From 75940a411ed0f4bcdca4d0aaca7396dd3768f1c9 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Tue, 29 May 2018 13:55:44 +0200 Subject: [PATCH 1/9] Update CgRPC to v1.12.0 and BoringSSL to 10.0.4 (I guess?). This change requires Swift 4.1 and Ruby (with the 'xcodeproj' gem) installed to compile if building via SPM. To cut through the noise, here are the non-vendored files with changes: - Makefile - Package.swift - SwiftGRPC.podspec - fix-project-settings.rb - vendor-boringssl.sh While at it, I have also re-run RootsEncoder. --- Assets/roots.pem | 998 +--- Makefile | 5 +- Package.swift | 4 +- Sources/BoringSSL/crypto/aes/internal.h | 87 - Sources/BoringSSL/crypto/asn1/a_bitstr.c | 9 +- Sources/BoringSSL/crypto/asn1/a_d2i_fp.c | 15 + Sources/BoringSSL/crypto/asn1/a_enum.c | 29 +- Sources/BoringSSL/crypto/asn1/a_gentm.c | 2 +- Sources/BoringSSL/crypto/asn1/a_i2d_fp.c | 3 + Sources/BoringSSL/crypto/asn1/a_int.c | 27 +- Sources/BoringSSL/crypto/asn1/a_object.c | 128 - Sources/BoringSSL/crypto/asn1/a_print.c | 28 - Sources/BoringSSL/crypto/asn1/a_strnid.c | 3 + Sources/BoringSSL/crypto/asn1/a_time.c | 26 +- Sources/BoringSSL/crypto/asn1/a_utctm.c | 2 +- Sources/BoringSSL/crypto/asn1/asn1_lib.c | 54 +- Sources/BoringSSL/crypto/asn1/asn1_locl.h | 5 +- Sources/BoringSSL/crypto/asn1/tasn_dec.c | 18 +- Sources/BoringSSL/crypto/asn1/tasn_enc.c | 6 - Sources/BoringSSL/crypto/asn1/tasn_fre.c | 6 +- Sources/BoringSSL/crypto/asn1/tasn_new.c | 5 +- Sources/BoringSSL/crypto/asn1/time_support.c | 10 +- Sources/BoringSSL/crypto/asn1/x_long.c | 200 - Sources/BoringSSL/crypto/base64/base64.c | 108 +- Sources/BoringSSL/crypto/bio/bio.c | 244 +- Sources/BoringSSL/crypto/bio/bio_mem.c | 18 +- Sources/BoringSSL/crypto/bio/connect.c | 34 +- Sources/BoringSSL/crypto/bio/fd.c | 3 +- Sources/BoringSSL/crypto/bio/file.c | 28 +- Sources/BoringSSL/crypto/bio/hexdump.c | 31 +- Sources/BoringSSL/crypto/bio/internal.h | 28 +- Sources/BoringSSL/crypto/bio/pair.c | 90 +- Sources/BoringSSL/crypto/bio/printf.c | 16 +- Sources/BoringSSL/crypto/bn/internal.h | 258 - Sources/BoringSSL/crypto/bn/montgomery_inv.c | 207 - .../crypto/{bn => bn_extra}/bn_asn1.c | 24 +- .../crypto/{bn => bn_extra}/convert.c | 241 +- Sources/BoringSSL/crypto/buf/buf.c | 76 +- Sources/BoringSSL/crypto/bytestring/ber.c | 70 +- Sources/BoringSSL/crypto/bytestring/cbb.c | 141 +- Sources/BoringSSL/crypto/bytestring/cbs.c | 70 +- .../BoringSSL/crypto/bytestring/internal.h | 76 +- Sources/BoringSSL/crypto/chacha/chacha.c | 14 +- Sources/BoringSSL/crypto/cipher/aead.c | 156 - Sources/BoringSSL/crypto/cipher/e_aes.c | 1771 ------ .../crypto/cipher/e_chacha20poly1305.c | 276 - Sources/BoringSSL/crypto/cipher/tls_cbc.c | 482 -- .../t_bitst.c => cipher_extra/cipher_extra.c} | 87 +- .../{cipher => cipher_extra}/derive_key.c | 2 - .../crypto/cipher_extra/e_aesctrhmac.c | 281 + .../crypto/cipher_extra/e_aesgcmsiv.c | 867 +++ .../crypto/cipher_extra/e_chacha20poly1305.c | 326 ++ .../crypto/{cipher => cipher_extra}/e_null.c | 1 - .../crypto/{cipher => cipher_extra}/e_rc2.c | 32 +- .../crypto/{cipher => cipher_extra}/e_rc4.c | 0 .../crypto/{cipher => cipher_extra}/e_ssl3.c | 184 +- .../crypto/{cipher => cipher_extra}/e_tls.c | 361 +- .../x_bignum.c => cipher_extra/internal.h} | 147 +- .../BoringSSL/crypto/cipher_extra/tls_cbc.c | 482 ++ Sources/BoringSSL/crypto/cmac/cmac.c | 40 +- Sources/BoringSSL/crypto/conf/conf.c | 56 +- Sources/BoringSSL/crypto/conf/internal.h | 6 +- Sources/BoringSSL/crypto/cpu-aarch64-linux.c | 10 +- Sources/BoringSSL/crypto/cpu-arm-linux.c | 85 +- Sources/BoringSSL/crypto/cpu-intel.c | 129 +- Sources/BoringSSL/crypto/cpu-ppc64le.c | 12 +- Sources/BoringSSL/crypto/crypto.c | 104 +- .../BoringSSL/crypto/curve25519/curve25519.c | 4938 ---------------- .../BoringSSL/crypto/curve25519/internal.h | 109 - .../BoringSSL/crypto/curve25519/spake25519.c | 290 +- .../crypto/curve25519/x25519-x86_64.c | 20 +- Sources/BoringSSL/crypto/dh/check.c | 67 +- Sources/BoringSSL/crypto/dh/dh.c | 108 +- Sources/BoringSSL/crypto/dh/dh_asn1.c | 2 +- Sources/BoringSSL/crypto/dh/params.c | 162 +- .../crypto/digest_extra/digest_extra.c | 240 + Sources/BoringSSL/crypto/dsa/dsa.c | 244 +- Sources/BoringSSL/crypto/dsa/dsa_asn1.c | 2 +- Sources/BoringSSL/crypto/ec/ec.c | 847 --- Sources/BoringSSL/crypto/ec/p224-64.c | 1143 ---- Sources/BoringSSL/crypto/ec/util-64.c | 109 - .../crypto/{ec => ec_extra}/ec_asn1.c | 157 +- Sources/BoringSSL/crypto/ecdh/ecdh.c | 2 +- Sources/BoringSSL/crypto/ecdsa/ecdsa.c | 479 -- .../{ecdsa => ecdsa_extra}/ecdsa_asn1.c | 110 +- Sources/BoringSSL/crypto/engine/engine.c | 12 +- Sources/BoringSSL/crypto/err/err.c | 303 +- Sources/BoringSSL/crypto/err/err_data.c | 63 + Sources/BoringSSL/crypto/err/internal.h | 58 + Sources/BoringSSL/crypto/evp/digestsign.c | 100 +- Sources/BoringSSL/crypto/evp/evp.c | 17 +- Sources/BoringSSL/crypto/evp/evp_asn1.c | 34 +- Sources/BoringSSL/crypto/evp/evp_ctx.c | 26 +- Sources/BoringSSL/crypto/evp/internal.h | 117 +- Sources/BoringSSL/crypto/evp/p_dsa_asn1.c | 22 +- Sources/BoringSSL/crypto/evp/p_ec.c | 18 +- Sources/BoringSSL/crypto/evp/p_ec_asn1.c | 23 +- Sources/BoringSSL/crypto/evp/p_ed25519.c | 71 + Sources/BoringSSL/crypto/evp/p_ed25519_asn1.c | 190 + Sources/BoringSSL/crypto/evp/p_rsa.c | 145 +- Sources/BoringSSL/crypto/evp/p_rsa_asn1.c | 27 +- Sources/BoringSSL/crypto/evp/pbkdf.c | 105 +- Sources/BoringSSL/crypto/evp/print.c | 41 +- Sources/BoringSSL/crypto/evp/scrypt.c | 209 + Sources/BoringSSL/crypto/ex_data.c | 61 +- .../crypto/{ => fipsmodule}/aes/aes.c | 166 +- .../crypto/fipsmodule/aes/internal.h | 100 + .../crypto/{ => fipsmodule}/aes/key_wrap.c | 8 +- .../{ => fipsmodule}/aes/mode_wrappers.c | 2 +- .../crypto/{ => fipsmodule}/bn/add.c | 72 +- .../BoringSSL/crypto/{ => fipsmodule}/bn/bn.c | 47 +- .../BoringSSL/crypto/fipsmodule/bn/bytes.c | 269 + .../crypto/{ => fipsmodule}/bn/cmp.c | 19 +- .../crypto/{ => fipsmodule}/bn/ctx.c | 90 +- .../crypto/{ => fipsmodule}/bn/div.c | 279 +- .../{ => fipsmodule}/bn/exponentiation.c | 630 +- .../crypto/{ => fipsmodule}/bn/gcd.c | 204 +- .../crypto/{ => fipsmodule}/bn/generic.c | 125 +- .../BoringSSL/crypto/fipsmodule/bn/internal.h | 413 ++ .../kronecker.c => fipsmodule/bn/jacobi.c} | 106 +- .../crypto/{ => fipsmodule}/bn/montgomery.c | 262 +- .../crypto/fipsmodule/bn/montgomery_inv.c | 207 + .../crypto/{ => fipsmodule}/bn/mul.c | 385 +- .../crypto/{ => fipsmodule}/bn/prime.c | 369 +- .../crypto/{ => fipsmodule}/bn/random.c | 224 +- .../crypto/{ => fipsmodule}/bn/rsaz_exp.c | 2 +- .../crypto/{ => fipsmodule}/bn/rsaz_exp.h | 0 .../crypto/{ => fipsmodule}/bn/shift.c | 68 +- .../crypto/{ => fipsmodule}/bn/sqrt.c | 220 +- .../BoringSSL/crypto/fipsmodule/cipher/aead.c | 284 + .../crypto/{ => fipsmodule}/cipher/cipher.c | 84 +- .../crypto/fipsmodule/cipher/e_aes.c | 1437 +++++ .../crypto/{ => fipsmodule}/cipher/e_des.c | 106 +- .../crypto/{ => fipsmodule}/cipher/internal.h | 101 +- .../BoringSSL/crypto/fipsmodule/delocate.h | 88 + .../crypto/{ => fipsmodule}/des/des.c | 98 +- .../crypto/{ => fipsmodule}/des/internal.h | 22 +- .../crypto/{ => fipsmodule}/digest/digest.c | 93 +- .../crypto/{ => fipsmodule}/digest/digests.c | 244 +- .../crypto/{ => fipsmodule}/digest/internal.h | 36 +- .../{ => fipsmodule}/digest/md32_common.h | 122 +- Sources/BoringSSL/crypto/fipsmodule/ec/ec.c | 943 +++ .../crypto/{ => fipsmodule}/ec/ec_key.c | 96 +- .../{ => fipsmodule}/ec/ec_montgomery.c | 86 +- .../crypto/{ => fipsmodule}/ec/internal.h | 178 +- .../crypto/{ => fipsmodule}/ec/oct.c | 38 +- .../BoringSSL/crypto/fipsmodule/ec/p224-64.c | 1131 ++++ .../crypto/{ => fipsmodule}/ec/p256-64.c | 1023 ++-- .../{ => fipsmodule}/ec/p256-x86_64-table.h | 22 +- .../crypto/{ => fipsmodule}/ec/p256-x86_64.c | 281 +- .../crypto/{ => fipsmodule}/ec/p256-x86_64.h | 64 +- .../crypto/{ => fipsmodule}/ec/simple.c | 308 +- .../BoringSSL/crypto/fipsmodule/ec/util-64.c | 109 + .../crypto/{ => fipsmodule}/ec/wnaf.c | 148 +- .../BoringSSL/crypto/fipsmodule/ecdsa/ecdsa.c | 442 ++ .../crypto/{ => fipsmodule}/hmac/hmac.c | 55 +- .../{rand/internal.h => fipsmodule/is_fips.c} | 25 +- .../crypto/{ => fipsmodule}/md4/md4.c | 34 +- .../crypto/{ => fipsmodule}/md5/md5.c | 45 +- .../crypto/{ => fipsmodule}/modes/cbc.c | 45 +- .../crypto/{ => fipsmodule}/modes/cfb.c | 36 +- .../crypto/{ => fipsmodule}/modes/ctr.c | 57 +- .../crypto/{ => fipsmodule}/modes/gcm.c | 184 +- .../crypto/{ => fipsmodule}/modes/internal.h | 233 +- .../crypto/{ => fipsmodule}/modes/ofb.c | 2 +- .../crypto/{ => fipsmodule}/modes/polyval.c | 33 +- .../crypto/fipsmodule/rand/ctrdrbg.c | 200 + .../crypto/fipsmodule/rand/internal.h | 92 + .../BoringSSL/crypto/fipsmodule/rand/rand.c | 358 ++ .../crypto/fipsmodule/rand/urandom.c | 302 + .../crypto/{ => fipsmodule}/rsa/blinding.c | 36 +- .../crypto/{ => fipsmodule}/rsa/internal.h | 83 +- .../crypto/{ => fipsmodule}/rsa/padding.c | 402 +- .../crypto/{ => fipsmodule}/rsa/rsa.c | 449 +- .../crypto/{ => fipsmodule}/rsa/rsa_impl.c | 765 ++- .../{ => fipsmodule}/sha/sha1-altivec.c | 93 +- .../crypto/{ => fipsmodule}/sha/sha1.c | 58 +- .../crypto/{ => fipsmodule}/sha/sha256.c | 60 +- .../crypto/{ => fipsmodule}/sha/sha512.c | 75 +- Sources/BoringSSL/crypto/hkdf/hkdf.c | 12 +- Sources/BoringSSL/crypto/internal.h | 534 +- Sources/BoringSSL/crypto/lhash/lhash.c | 90 +- Sources/BoringSSL/crypto/mem.c | 109 +- Sources/BoringSSL/crypto/obj/obj.c | 162 +- Sources/BoringSSL/crypto/obj/obj_dat.h | 136 +- Sources/BoringSSL/crypto/obj/obj_xref.c | 12 +- Sources/BoringSSL/crypto/pem/pem_info.c | 8 +- Sources/BoringSSL/crypto/pem/pem_lib.c | 11 +- Sources/BoringSSL/crypto/pem/pem_pk8.c | 1 + Sources/BoringSSL/crypto/pem/pem_pkey.c | 2 +- Sources/BoringSSL/crypto/pem/pem_xaux.c | 2 - Sources/BoringSSL/crypto/pkcs7/internal.h | 49 + Sources/BoringSSL/crypto/pkcs7/pkcs7.c | 166 + .../{x509/pkcs7.c => pkcs7/pkcs7_x509.c} | 174 +- Sources/BoringSSL/crypto/pkcs8/internal.h | 50 +- Sources/BoringSSL/crypto/pkcs8/p5_pbev2.c | 159 +- Sources/BoringSSL/crypto/pkcs8/p8_pkey.c | 85 - Sources/BoringSSL/crypto/pkcs8/pkcs8.c | 1001 +--- Sources/BoringSSL/crypto/pkcs8/pkcs8_x509.c | 789 +++ Sources/BoringSSL/crypto/poly1305/internal.h | 7 +- Sources/BoringSSL/crypto/poly1305/poly1305.c | 28 +- .../BoringSSL/crypto/poly1305/poly1305_arm.c | 22 +- .../BoringSSL/crypto/poly1305/poly1305_vec.c | 137 +- Sources/BoringSSL/crypto/pool/internal.h | 4 +- Sources/BoringSSL/crypto/pool/pool.c | 30 +- Sources/BoringSSL/crypto/rand/rand.c | 244 - Sources/BoringSSL/crypto/rand/urandom.c | 335 -- .../{rand => rand_extra}/deterministic.c | 14 +- .../BoringSSL/crypto/rand_extra/forkunsafe.c | 46 + .../crypto/{rand => rand_extra}/fuchsia.c | 14 +- .../BoringSSL/crypto/rand_extra/rand_extra.c | 70 + .../crypto/{rand => rand_extra}/windows.c | 10 +- Sources/BoringSSL/crypto/refcount_c11.c | 4 +- Sources/BoringSSL/crypto/refcount_lock.c | 2 +- .../crypto/{rsa => rsa_extra}/rsa_asn1.c | 149 +- Sources/BoringSSL/crypto/stack/stack.c | 26 +- Sources/BoringSSL/crypto/thread_none.c | 2 +- Sources/BoringSSL/crypto/thread_pthread.c | 2 +- Sources/BoringSSL/crypto/thread_win.c | 80 +- Sources/BoringSSL/crypto/x509/a_sign.c | 17 +- Sources/BoringSSL/crypto/x509/a_verify.c | 24 +- Sources/BoringSSL/crypto/x509/algorithm.c | 28 +- Sources/BoringSSL/crypto/x509/asn1_gen.c | 37 +- Sources/BoringSSL/crypto/x509/by_dir.c | 8 +- Sources/BoringSSL/crypto/x509/by_file.c | 5 +- Sources/BoringSSL/crypto/x509/rsa_pss.c | 10 +- Sources/BoringSSL/crypto/x509/t_x509.c | 3 +- Sources/BoringSSL/crypto/x509/x509_def.c | 5 + Sources/BoringSSL/crypto/x509/x509_lu.c | 40 +- Sources/BoringSSL/crypto/x509/x509_obj.c | 4 +- Sources/BoringSSL/crypto/x509/x509_set.c | 10 + Sources/BoringSSL/crypto/x509/x509_txt.c | 6 - Sources/BoringSSL/crypto/x509/x509_vfy.c | 38 +- Sources/BoringSSL/crypto/x509/x509_vpm.c | 1 - Sources/BoringSSL/crypto/x509/x509type.c | 126 - Sources/BoringSSL/crypto/x509/x_algor.c | 4 +- Sources/BoringSSL/crypto/x509/x_name.c | 29 +- Sources/BoringSSL/crypto/x509/x_x509.c | 6 +- Sources/BoringSSL/crypto/x509/x_x509a.c | 7 - Sources/BoringSSL/crypto/x509v3/ext_dat.h | 8 + Sources/BoringSSL/crypto/x509v3/pcy_int.h | 4 +- Sources/BoringSSL/crypto/x509v3/pcy_lib.c | 9 - Sources/BoringSSL/crypto/x509v3/pcy_node.c | 2 +- Sources/BoringSSL/crypto/x509v3/pcy_tree.c | 40 +- Sources/BoringSSL/crypto/x509v3/v3_alt.c | 39 +- Sources/BoringSSL/crypto/x509v3/v3_cpols.c | 12 +- Sources/BoringSSL/crypto/x509v3/v3_genn.c | 1 + Sources/BoringSSL/crypto/x509v3/v3_info.c | 39 +- Sources/BoringSSL/crypto/x509v3/v3_lib.c | 22 +- Sources/BoringSSL/crypto/x509v3/v3_ncons.c | 38 +- Sources/BoringSSL/crypto/x509v3/v3_pci.c | 33 - Sources/BoringSSL/crypto/x509v3/v3_utl.c | 51 +- Sources/BoringSSL/err_data.c | 1288 ---- Sources/BoringSSL/include/openssl/aead.h | 471 +- Sources/BoringSSL/include/openssl/aes.h | 100 +- Sources/BoringSSL/include/openssl/arm_arch.h | 24 +- Sources/BoringSSL/include/openssl/asn1.h | 92 +- Sources/BoringSSL/include/openssl/asn1t.h | 26 +- Sources/BoringSSL/include/openssl/base.h | 129 +- Sources/BoringSSL/include/openssl/base64.h | 136 +- Sources/BoringSSL/include/openssl/bio.h | 881 +-- Sources/BoringSSL/include/openssl/blowfish.h | 2 +- Sources/BoringSSL/include/openssl/bn.h | 888 +-- Sources/BoringSSL/include/openssl/buf.h | 58 +- .../BoringSSL/include/openssl/bytestring.h | 560 +- Sources/BoringSSL/include/openssl/cast.h | 4 +- Sources/BoringSSL/include/openssl/chacha.h | 16 +- Sources/BoringSSL/include/openssl/cipher.h | 419 +- Sources/BoringSSL/include/openssl/cmac.h | 54 +- Sources/BoringSSL/include/openssl/conf.h | 99 +- Sources/BoringSSL/include/openssl/cpu.h | 105 +- Sources/BoringSSL/include/openssl/crypto.h | 94 +- .../BoringSSL/include/openssl/curve25519.h | 189 +- Sources/BoringSSL/include/openssl/des.h | 50 +- Sources/BoringSSL/include/openssl/dh.h | 195 +- Sources/BoringSSL/include/openssl/digest.h | 257 +- Sources/BoringSSL/include/openssl/dsa.h | 423 +- Sources/BoringSSL/include/openssl/ec.h | 267 +- Sources/BoringSSL/include/openssl/ec_key.h | 260 +- Sources/BoringSSL/include/openssl/ecdh.h | 18 +- Sources/BoringSSL/include/openssl/ecdsa.h | 144 +- Sources/BoringSSL/include/openssl/engine.h | 76 +- Sources/BoringSSL/include/openssl/err.h | 408 +- Sources/BoringSSL/include/openssl/evp.h | 866 +-- Sources/BoringSSL/include/openssl/ex_data.h | 102 +- Sources/BoringSSL/include/openssl/hkdf.h | 34 +- Sources/BoringSSL/include/openssl/hmac.h | 98 +- .../BoringSSL/include/openssl/is_boringssl.h | 16 + Sources/BoringSSL/include/openssl/lhash.h | 137 +- .../BoringSSL/include/openssl/lhash_macros.h | 8 +- Sources/BoringSSL/include/openssl/md4.h | 28 +- Sources/BoringSSL/include/openssl/md5.h | 28 +- Sources/BoringSSL/include/openssl/mem.h | 72 +- Sources/BoringSSL/include/openssl/nid.h | 43 + Sources/BoringSSL/include/openssl/obj.h | 181 +- .../BoringSSL/include/openssl/opensslconf.h | 9 +- Sources/BoringSSL/include/openssl/pem.h | 124 +- Sources/BoringSSL/include/openssl/pkcs7.h | 70 +- Sources/BoringSSL/include/openssl/pkcs8.h | 147 +- Sources/BoringSSL/include/openssl/poly1305.h | 22 +- Sources/BoringSSL/include/openssl/pool.h | 54 +- Sources/BoringSSL/include/openssl/rand.h | 93 +- Sources/BoringSSL/include/openssl/rc4.h | 18 +- Sources/BoringSSL/include/openssl/ripemd.h | 26 +- Sources/BoringSSL/include/openssl/rsa.h | 714 +-- Sources/BoringSSL/include/openssl/sha.h | 142 +- Sources/BoringSSL/include/openssl/span.h | 191 + Sources/BoringSSL/include/openssl/ssl.h | 5177 +++++++++-------- Sources/BoringSSL/include/openssl/ssl3.h | 163 +- Sources/BoringSSL/include/openssl/stack.h | 519 +- .../BoringSSL/include/openssl/stack_macros.h | 3987 ------------- Sources/BoringSSL/include/openssl/thread.h | 86 +- Sources/BoringSSL/include/openssl/tls1.h | 140 +- .../BoringSSL/include/openssl/type_check.h | 24 +- Sources/BoringSSL/include/openssl/x509.h | 162 +- Sources/BoringSSL/include/openssl/x509_vfy.h | 44 +- Sources/BoringSSL/include/openssl/x509v3.h | 49 +- .../BoringSSL/ssl/{bio_ssl.c => bio_ssl.cc} | 14 +- ...stom_extensions.c => custom_extensions.cc} | 33 +- .../BoringSSL/ssl/{d1_both.c => d1_both.cc} | 592 +- Sources/BoringSSL/ssl/{d1_lib.c => d1_lib.cc} | 199 +- Sources/BoringSSL/ssl/{d1_pkt.c => d1_pkt.cc} | 246 +- .../BoringSSL/ssl/{d1_srtp.c => d1_srtp.cc} | 28 +- .../ssl/{dtls_method.c => dtls_method.cc} | 100 +- .../ssl/{dtls_record.c => dtls_record.cc} | 149 +- Sources/BoringSSL/ssl/handshake.cc | 616 ++ Sources/BoringSSL/ssl/handshake_client.c | 1883 ------ Sources/BoringSSL/ssl/handshake_client.cc | 1836 ++++++ Sources/BoringSSL/ssl/handshake_server.c | 1950 ------- Sources/BoringSSL/ssl/handshake_server.cc | 1662 ++++++ Sources/BoringSSL/ssl/internal.h | 3742 +++++++----- Sources/BoringSSL/ssl/s3_both.c | 895 --- Sources/BoringSSL/ssl/s3_both.cc | 585 ++ Sources/BoringSSL/ssl/{s3_lib.c => s3_lib.cc} | 64 +- Sources/BoringSSL/ssl/{s3_pkt.c => s3_pkt.cc} | 400 +- Sources/BoringSSL/ssl/ssl_aead_ctx.c | 335 -- Sources/BoringSSL/ssl/ssl_aead_ctx.cc | 415 ++ .../BoringSSL/ssl/{ssl_asn1.c => ssl_asn1.cc} | 562 +- Sources/BoringSSL/ssl/ssl_buffer.c | 312 - Sources/BoringSSL/ssl/ssl_buffer.cc | 286 + .../BoringSSL/ssl/{ssl_cert.c => ssl_cert.cc} | 729 +-- .../ssl/{ssl_cipher.c => ssl_cipher.cc} | 971 ++-- Sources/BoringSSL/ssl/ssl_ecdh.c | 465 -- .../BoringSSL/ssl/{ssl_file.c => ssl_file.cc} | 40 +- Sources/BoringSSL/ssl/ssl_key_share.cc | 250 + .../BoringSSL/ssl/{ssl_lib.c => ssl_lib.cc} | 1704 +++--- Sources/BoringSSL/ssl/ssl_privkey.c | 683 --- Sources/BoringSSL/ssl/ssl_privkey.cc | 488 ++ .../ssl/{ssl_session.c => ssl_session.cc} | 1069 ++-- .../BoringSSL/ssl/{ssl_stat.c => ssl_stat.cc} | 231 +- .../{ssl_transcript.c => ssl_transcript.cc} | 291 +- Sources/BoringSSL/ssl/ssl_versions.cc | 472 ++ .../BoringSSL/ssl/{ssl_x509.c => ssl_x509.cc} | 1018 +++- Sources/BoringSSL/ssl/t1_enc.c | 561 -- Sources/BoringSSL/ssl/t1_enc.cc | 503 ++ Sources/BoringSSL/ssl/{t1_lib.c => t1_lib.cc} | 2618 ++++----- Sources/BoringSSL/ssl/tls13_both.c | 634 -- Sources/BoringSSL/ssl/tls13_both.cc | 551 ++ Sources/BoringSSL/ssl/tls13_client.c | 712 --- Sources/BoringSSL/ssl/tls13_client.cc | 977 ++++ Sources/BoringSSL/ssl/tls13_enc.c | 430 -- Sources/BoringSSL/ssl/tls13_enc.cc | 563 ++ Sources/BoringSSL/ssl/tls13_server.c | 680 --- Sources/BoringSSL/ssl/tls13_server.cc | 1068 ++++ .../ssl/{tls_method.c => tls_method.cc} | 185 +- Sources/BoringSSL/ssl/tls_record.c | 556 -- Sources/BoringSSL/ssl/tls_record.cc | 712 +++ .../BoringSSL/third_party/fiat/curve25519.c | 5062 ++++++++++++++++ Sources/BoringSSL/third_party/fiat/internal.h | 142 + Sources/CgRPC/include/grpc/byte_buffer.h | 2 + .../CgRPC/include/grpc/byte_buffer_reader.h | 2 + Sources/CgRPC/include/grpc/census.h | 435 +- Sources/CgRPC/include/grpc/compression.h | 47 +- Sources/CgRPC/include/grpc/fork.h | 2 + Sources/CgRPC/include/grpc/grpc.h | 245 +- Sources/CgRPC/include/grpc/grpc_posix.h | 11 +- Sources/CgRPC/include/grpc/grpc_security.h | 347 +- .../include/grpc/grpc_security_constants.h | 8 + Sources/CgRPC/include/grpc/impl/codegen/atm.h | 10 +- .../grpc/impl/codegen/atm_gcc_atomic.h | 17 +- .../include/grpc/impl/codegen/atm_gcc_sync.h | 11 +- .../include/grpc/impl/codegen/atm_windows.h | 45 +- .../include/grpc/impl/codegen/byte_buffer.h | 30 +- .../grpc/impl/codegen/byte_buffer_reader.h | 4 +- .../grpc/impl/codegen/compression_types.h | 67 +- .../grpc/impl/codegen/connectivity_state.h | 2 - .../include/grpc/impl/codegen/exec_ctx_fwd.h | 26 - .../include/grpc/impl/codegen/grpc_types.h | 101 +- .../include/grpc/impl/codegen/port_platform.h | 100 + .../CgRPC/include/grpc/impl/codegen/slice.h | 23 +- .../CgRPC/include/grpc/impl/codegen/sync.h | 1 + .../include/grpc/impl/codegen/sync_custom.h | 2 + .../include/grpc/impl/codegen/sync_generic.h | 14 +- .../include/grpc/impl/codegen/sync_posix.h | 2 + .../include/grpc/impl/codegen/sync_windows.h | 2 + Sources/CgRPC/include/grpc/slice.h | 35 +- Sources/CgRPC/include/grpc/slice_buffer.h | 45 +- Sources/CgRPC/include/grpc/status.h | 2 + Sources/CgRPC/include/grpc/support/alloc.h | 29 +- Sources/CgRPC/include/grpc/support/atm.h | 2 + .../include/grpc/support/atm_gcc_atomic.h | 2 + .../CgRPC/include/grpc/support/atm_gcc_sync.h | 2 + .../CgRPC/include/grpc/support/atm_windows.h | 2 + Sources/CgRPC/include/grpc/support/cmdline.h | 88 - .../CgRPC/include/grpc/support/histogram.h | 64 - Sources/CgRPC/include/grpc/support/log.h | 39 +- .../CgRPC/include/grpc/support/log_windows.h | 2 +- .../CgRPC/include/grpc/support/string_util.h | 4 +- .../CgRPC/include/grpc/support/subprocess.h | 44 - Sources/CgRPC/include/grpc/support/sync.h | 72 +- .../CgRPC/include/grpc/support/sync_custom.h | 2 + .../CgRPC/include/grpc/support/sync_generic.h | 2 + .../CgRPC/include/grpc/support/sync_posix.h | 2 + .../CgRPC/include/grpc/support/sync_windows.h | 2 + Sources/CgRPC/include/grpc/support/thd.h | 76 - Sources/CgRPC/include/grpc/support/thd_id.h | 44 + Sources/CgRPC/include/grpc/support/time.h | 2 + .../CgRPC/src/core/ext/census/aggregation.h | 51 - .../src/core/ext/census/base_resources.c | 56 - .../src/core/ext/census/base_resources.h | 24 - .../src/core/ext/census/census_interface.h | 61 - .../src/core/ext/census/census_rpc_stats.h | 86 - Sources/CgRPC/src/core/ext/census/context.c | 496 -- .../CgRPC/src/core/ext/census/gen/census.pb.c | 161 - .../CgRPC/src/core/ext/census/gen/census.pb.h | 280 - .../core/ext/census/gen/trace_context.pb.c | 39 - .../core/ext/census/gen/trace_context.pb.h | 78 - .../{grpc_context.c => grpc_context.cc} | 16 +- .../CgRPC/src/core/ext/census/grpc_filter.c | 196 - .../CgRPC/src/core/ext/census/grpc_plugin.c | 70 - .../CgRPC/src/core/ext/census/initialize.c | 51 - .../src/core/ext/census/intrusive_hash_map.c | 305 - .../src/core/ext/census/intrusive_hash_map.h | 152 - .../ext/census/intrusive_hash_map_internal.h | 48 - Sources/CgRPC/src/core/ext/census/mlog.c | 586 -- Sources/CgRPC/src/core/ext/census/mlog.h | 80 - Sources/CgRPC/src/core/ext/census/operation.c | 48 - .../CgRPC/src/core/ext/census/placeholders.c | 49 - Sources/CgRPC/src/core/ext/census/resource.c | 303 - Sources/CgRPC/src/core/ext/census/resource.h | 48 - .../CgRPC/src/core/ext/census/rpc_metric_id.h | 36 - .../CgRPC/src/core/ext/census/trace_context.c | 71 - .../CgRPC/src/core/ext/census/trace_context.h | 56 - .../CgRPC/src/core/ext/census/trace_label.h | 46 - .../src/core/ext/census/trace_propagation.h | 48 - .../CgRPC/src/core/ext/census/trace_status.h | 30 - .../CgRPC/src/core/ext/census/trace_string.h | 35 - Sources/CgRPC/src/core/ext/census/tracing.c | 55 - Sources/CgRPC/src/core/ext/census/tracing.h | 109 - .../filters/client_channel/backup_poller.cc | 174 + .../filters/client_channel/backup_poller.h} | 26 +- ...connectivity.c => channel_connectivity.cc} | 121 +- .../filters/client_channel/client_channel.c | 1657 ------ .../filters/client_channel/client_channel.cc | 3304 +++++++++++ .../filters/client_channel/client_channel.h | 18 +- ...el_factory.c => client_channel_factory.cc} | 29 +- .../client_channel/client_channel_factory.h | 39 +- ...nnel_plugin.c => client_channel_plugin.cc} | 50 +- .../{connector.c => connector.cc} | 15 +- .../ext/filters/client_channel/connector.h | 42 +- ...andshaker.c => http_connect_handshaker.cc} | 140 +- .../{http_proxy.c => http_proxy.cc} | 56 +- .../ext/filters/client_channel/lb_policy.c | 164 - .../ext/filters/client_channel/lb_policy.cc | 59 + .../ext/filters/client_channel/lb_policy.h | 354 +- .../grpclb/client_load_reporting_filter.c | 137 - .../grpclb/client_load_reporting_filter.cc | 138 + .../grpclb/client_load_reporting_filter.h | 4 +- .../client_channel/lb_policy/grpclb/grpclb.c | 2021 ------- .../client_channel/lb_policy/grpclb/grpclb.cc | 1909 ++++++ .../client_channel/lb_policy/grpclb/grpclb.h | 29 - .../lb_policy/grpclb/grpclb_channel.h | 30 +- .../lb_policy/grpclb/grpclb_channel_secure.c | 99 - .../lb_policy/grpclb/grpclb_channel_secure.cc | 108 + ..._client_stats.c => grpclb_client_stats.cc} | 23 +- .../lb_policy/grpclb/grpclb_client_stats.h | 4 +- ...ad_balancer_api.c => load_balancer_api.cc} | 150 +- .../lb_policy/grpclb/load_balancer_api.h | 52 +- .../proto/grpc/lb/v1/load_balancer.pb.c | 7 +- .../proto/grpc/lb/v1/load_balancer.pb.h | 21 +- .../lb_policy/pick_first/pick_first.c | 714 --- .../lb_policy/pick_first/pick_first.cc | 562 ++ .../lb_policy/round_robin/round_robin.c | 924 --- .../lb_policy/round_robin/round_robin.cc | 682 +++ .../lb_policy/subchannel_list.h | 536 ++ ..._policy_factory.c => lb_policy_factory.cc} | 60 +- .../client_channel/lb_policy_factory.h | 106 +- .../client_channel/lb_policy_registry.c | 70 - .../client_channel/lb_policy_registry.cc | 97 + .../client_channel/lb_policy_registry.h | 40 +- .../filters/client_channel/method_params.cc | 178 + .../filters/client_channel/method_params.h | 78 + .../{parse_address.c => parse_address.cc} | 82 +- .../filters/client_channel/parse_address.h | 14 +- .../{proxy_mapper.c => proxy_mapper.cc} | 16 +- .../ext/filters/client_channel/proxy_mapper.h | 16 +- ...er_registry.c => proxy_mapper_registry.cc} | 36 +- .../client_channel/proxy_mapper_registry.h | 8 +- .../ext/filters/client_channel/resolver.c | 83 - .../ext/filters/client_channel/resolver.cc | 35 + .../ext/filters/client_channel/resolver.h | 168 +- .../resolver/dns/c_ares/dns_resolver_ares.c | 458 -- .../resolver/dns/c_ares/dns_resolver_ares.cc | 502 ++ .../resolver/dns/c_ares/grpc_ares_ev_driver.h | 20 +- ...r_posix.c => grpc_ares_ev_driver_posix.cc} | 131 +- ...pc_ares_wrapper.c => grpc_ares_wrapper.cc} | 348 +- .../resolver/dns/c_ares/grpc_ares_wrapper.h | 37 +- .../dns/c_ares/grpc_ares_wrapper_fallback.c | 60 - .../dns/c_ares/grpc_ares_wrapper_fallback.cc | 59 + .../resolver/dns/native/dns_resolver.c | 310 - .../resolver/dns/native/dns_resolver.cc | 348 ++ .../resolver/fake/fake_resolver.c | 265 - .../resolver/fake/fake_resolver.cc | 297 + .../resolver/fake/fake_resolver.h | 87 +- .../resolver/sockaddr/sockaddr_resolver.c | 222 - .../resolver/sockaddr/sockaddr_resolver.cc | 214 + .../filters/client_channel/resolver_factory.c | 41 - .../filters/client_channel/resolver_factory.h | 69 +- .../client_channel/resolver_registry.c | 159 - .../client_channel/resolver_registry.cc | 178 + .../client_channel/resolver_registry.h | 88 +- .../filters/client_channel/retry_throttle.c | 202 - .../filters/client_channel/retry_throttle.cc | 191 + .../filters/client_channel/retry_throttle.h | 81 +- .../ext/filters/client_channel/subchannel.c | 816 --- .../ext/filters/client_channel/subchannel.cc | 815 +++ .../ext/filters/client_channel/subchannel.h | 190 +- .../filters/client_channel/subchannel_index.c | 251 - .../client_channel/subchannel_index.cc | 254 + .../filters/client_channel/subchannel_index.h | 26 +- .../{uri_parser.c => uri_parser.cc} | 85 +- .../ext/filters/client_channel/uri_parser.h | 24 +- .../{deadline_filter.c => deadline_filter.cc} | 179 +- .../ext/filters/deadline/deadline_filter.h | 17 +- ..._client_filter.c => http_client_filter.cc} | 296 +- .../filters/http/client/http_client_filter.h | 2 + .../filters/http/client_authority_filter.cc | 156 + .../http/client_authority_filter.h} | 21 +- ...ilters_plugin.c => http_filters_plugin.cc} | 38 +- ...ss_filter.c => message_compress_filter.cc} | 414 +- .../message_compress_filter.h | 4 +- ..._server_filter.c => http_server_filter.cc} | 248 +- .../filters/http/server/http_server_filter.h | 2 + ...lter.c => server_load_reporting_filter.cc} | 81 +- .../server_load_reporting_filter.h | 4 +- ...ugin.c => server_load_reporting_plugin.cc} | 19 +- .../server_load_reporting_plugin.h | 12 +- .../{max_age_filter.c => max_age_filter.cc} | 380 +- .../core/ext/filters/max_age/max_age_filter.h | 2 + ...e_size_filter.c => message_size_filter.cc} | 187 +- .../message_size/message_size_filter.h | 2 + ...> workaround_cronet_compression_filter.cc} | 61 +- .../workaround_cronet_compression_filter.h | 2 + ...workaround_utils.c => workaround_utils.cc} | 20 +- .../filters/workarounds/workaround_utils.h | 4 +- .../transport/chttp2/alpn/{alpn.c => alpn.cc} | 13 +- .../src/core/ext/transport/chttp2/alpn/alpn.h | 6 +- .../ext/transport/chttp2/client/authority.cc | 42 + .../ext/transport/chttp2/client/authority.h | 36 + ...chttp2_connector.c => chttp2_connector.cc} | 131 +- .../chttp2/client/chttp2_connector.h | 2 + .../{channel_create.c => channel_create.cc} | 78 +- ...create_posix.c => channel_create_posix.cc} | 45 +- .../client/secure/secure_channel_create.c | 222 - .../client/secure/secure_channel_create.cc | 230 + .../{chttp2_server.c => chttp2_server.cc} | 231 +- .../transport/chttp2/server/chttp2_server.h | 9 +- .../{server_chttp2.c => server_chttp2.cc} | 13 +- ..._chttp2_posix.c => server_chttp2_posix.cc} | 37 +- ...ecure_chttp2.c => server_secure_chttp2.cc} | 33 +- .../{bin_decoder.c => bin_decoder.cc} | 75 +- .../transport/chttp2/transport/bin_decoder.h | 20 +- .../{bin_encoder.c => bin_encoder.cc} | 53 +- .../transport/chttp2/transport/bin_encoder.h | 4 +- .../{chttp2_plugin.c => chttp2_plugin.cc} | 15 +- ...chttp2_transport.c => chttp2_transport.cc} | 2418 ++++---- .../chttp2/transport/chttp2_transport.h | 21 +- .../transport/chttp2/transport/flow_control.c | 502 -- .../chttp2/transport/flow_control.cc | 405 ++ .../transport/chttp2/transport/flow_control.h | 482 ++ .../ext/transport/chttp2/transport/frame.h | 3 +- .../transport/{frame_data.c => frame_data.cc} | 200 +- .../transport/chttp2/transport/frame_data.h | 41 +- .../{frame_goaway.c => frame_goaway.cc} | 85 +- .../transport/chttp2/transport/frame_goaway.h | 23 +- .../transport/{frame_ping.c => frame_ping.cc} | 66 +- .../transport/chttp2/transport/frame_ping.h | 11 +- ...frame_rst_stream.c => frame_rst_stream.cc} | 64 +- .../chttp2/transport/frame_rst_stream.h | 16 +- .../{frame_settings.c => frame_settings.cc} | 94 +- .../chttp2/transport/frame_settings.h | 21 +- ...window_update.c => frame_window_update.cc} | 78 +- .../chttp2/transport/frame_window_update.h | 17 +- .../{hpack_encoder.c => hpack_encoder.cc} | 458 +- .../chttp2/transport/hpack_encoder.h | 29 +- .../{hpack_parser.c => hpack_parser.cc} | 925 ++- .../transport/chttp2/transport/hpack_parser.h | 43 +- .../{hpack_table.c => hpack_table.cc} | 83 +- .../transport/chttp2/transport/hpack_table.h | 22 +- .../{http2_settings.c => http2_settings.cc} | 8 +- .../chttp2/transport/http2_settings.h | 7 +- .../transport/{huffsyms.c => huffsyms.cc} | 2 + ...coming_metadata.c => incoming_metadata.cc} | 39 +- .../chttp2/transport/incoming_metadata.h | 21 +- .../ext/transport/chttp2/transport/internal.h | 606 +- .../transport/{parsing.c => parsing.cc} | 413 +- .../{stream_lists.c => stream_lists.cc} | 116 +- .../transport/{stream_map.c => stream_map.cc} | 78 +- .../transport/chttp2/transport/stream_map.h | 28 +- .../chttp2/transport/{varint.c => varint.cc} | 12 +- .../ext/transport/chttp2/transport/writing.c | 534 -- .../ext/transport/chttp2/transport/writing.cc | 641 ++ .../{inproc_plugin.c => inproc_plugin.cc} | 9 +- .../ext/transport/inproc/inproc_transport.c | 1299 ----- .../ext/transport/inproc/inproc_transport.cc | 1238 ++++ .../ext/transport/inproc/inproc_transport.h | 18 +- .../core/lib/{support/avl.c => avl/avl.cc} | 159 +- .../grpc/support => src/core/lib/avl}/avl.h | 68 +- Sources/CgRPC/src/core/lib/backoff/backoff.cc | 78 + Sources/CgRPC/src/core/lib/backoff/backoff.h | 89 + .../{channel_args.c => channel_args.cc} | 298 +- .../CgRPC/src/core/lib/channel/channel_args.h | 105 +- .../{channel_stack.c => channel_stack.cc} | 152 +- .../src/core/lib/channel/channel_stack.h | 167 +- ...ack_builder.c => channel_stack_builder.cc} | 198 +- .../core/lib/channel/channel_stack_builder.h | 99 +- .../src/core/lib/channel/channel_trace.cc | 239 + .../src/core/lib/channel/channel_trace.h | 133 + .../lib/channel/channel_trace_registry.cc | 80 + .../core/lib/channel/channel_trace_registry.h | 43 + ...nnected_channel.c => connected_channel.cc} | 160 +- .../src/core/lib/channel/connected_channel.h | 9 +- Sources/CgRPC/src/core/lib/channel/context.h | 4 +- .../channel/{handshaker.c => handshaker.cc} | 150 +- .../CgRPC/src/core/lib/channel/handshaker.h | 39 +- ...shaker_factory.c => handshaker_factory.cc} | 22 +- .../src/core/lib/channel/handshaker_factory.h | 21 +- ...aker_registry.c => handshaker_registry.cc} | 29 +- .../core/lib/channel/handshaker_registry.h | 8 +- .../CgRPC/src/core/lib/channel/status_util.cc | 100 + .../CgRPC/src/core/lib/channel/status_util.h | 58 + .../core/lib/compression/algorithm_metadata.h | 21 +- .../src/core/lib/compression/compression.c | 283 - .../src/core/lib/compression/compression.cc | 174 + .../lib/compression/compression_internal.cc | 276 + .../lib/compression/compression_internal.h | 88 + ...message_compress.c => message_compress.cc} | 68 +- .../core/lib/compression/message_compress.h | 11 +- ...am_compression.c => stream_compression.cc} | 24 +- .../core/lib/compression/stream_compression.h | 40 +- ...sion_gzip.c => stream_compression_gzip.cc} | 102 +- .../lib/compression/stream_compression_gzip.h | 2 + ...ntity.c => stream_compression_identity.cc} | 42 +- .../compression/stream_compression_identity.h | 2 + .../src/core/lib/debug/{stats.c => stats.cc} | 58 +- Sources/CgRPC/src/core/lib/debug/stats.h | 36 +- .../lib/debug/{stats_data.c => stats_data.cc} | 247 +- Sources/CgRPC/src/core/lib/debug/stats_data.h | 537 +- Sources/CgRPC/src/core/lib/debug/trace.c | 146 - Sources/CgRPC/src/core/lib/debug/trace.cc | 145 + Sources/CgRPC/src/core/lib/debug/trace.h | 88 +- .../lib/{support/alloc.c => gpr/alloc.cc} | 61 +- Sources/CgRPC/src/core/lib/gpr/arena.cc | 152 + .../src/core/lib/{support => gpr}/arena.h | 14 +- .../core/lib/{support/atm.c => gpr/atm.cc} | 7 +- .../cpu_iphone.c => gpr/cpu_iphone.cc} | 2 + .../{support/cpu_linux.c => gpr/cpu_linux.cc} | 20 +- .../{support/cpu_posix.c => gpr/cpu_posix.cc} | 34 +- .../cpu_windows.c => gpr/cpu_windows.cc} | 1 + .../CgRPC/src/core/lib/{support => gpr}/env.h | 24 +- .../{support/env_linux.c => gpr/env_linux.cc} | 28 +- .../{support/env_posix.c => gpr/env_posix.cc} | 16 +- .../env_windows.c => gpr/env_windows.cc} | 18 +- .../core/lib/{support/fork.c => gpr/fork.cc} | 30 +- .../src/core/lib/{support => gpr}/fork.h | 6 +- .../{support/host_port.c => gpr/host_port.cc} | 43 +- .../support => src/core/lib/gpr}/host_port.h | 18 +- .../core/lib/{support/log.c => gpr/log.cc} | 38 +- .../log_android.c => gpr/log_android.cc} | 18 +- .../{support/log_linux.c => gpr/log_linux.cc} | 23 +- .../{support/log_posix.c => gpr/log_posix.cc} | 27 +- .../log_windows.c => gpr/log_windows.cc} | 23 +- Sources/CgRPC/src/core/lib/gpr/mpscq.cc | 117 + Sources/CgRPC/src/core/lib/gpr/mpscq.h | 86 + .../murmur_hash.c => gpr/murmur_hash.cc} | 27 +- .../core/lib/{support => gpr}/murmur_hash.h | 8 +- .../src/core/lib/{support => gpr}/spinlock.h | 17 +- .../lib/{support/string.c => gpr/string.cc} | 131 +- .../src/core/lib/{support => gpr}/string.h | 59 +- .../string_posix.c => gpr/string_posix.cc} | 13 +- .../string_util_windows.cc} | 17 +- .../string_windows.cc} | 7 +- .../lib/{support => gpr}/string_windows.h | 6 +- .../core/lib/{support/sync.c => gpr/sync.cc} | 46 +- .../sync_posix.c => gpr/sync_posix.cc} | 31 +- .../sync_windows.c => gpr/sync_windows.cc} | 28 +- .../core/lib/{support/time.c => gpr/time.cc} | 12 +- .../time_posix.c => gpr/time_posix.cc} | 18 +- .../time_precise.c => gpr/time_precise.cc} | 12 +- .../core/lib/{support => gpr}/time_precise.h | 10 +- .../time_windows.c => gpr/time_windows.cc} | 5 +- .../grpc/support => src/core/lib/gpr}/tls.h | 18 +- .../support => src/core/lib/gpr}/tls_gcc.h | 53 +- .../support => src/core/lib/gpr}/tls_msvc.h | 17 +- .../tls_pthread.c => gpr/tls_pthread.cc} | 6 +- .../core/lib/gpr}/tls_pthread.h | 19 +- .../src/core/lib/{support => gpr}/tmpfile.h | 18 +- .../tmpfile_msys.c => gpr/tmpfile_msys.cc} | 8 +- .../tmpfile_posix.c => gpr/tmpfile_posix.cc} | 18 +- .../tmpfile_windows.cc} | 8 +- .../support => src/core/lib/gpr}/useful.h | 6 +- .../wrap_memcpy.c => gpr/wrap_memcpy.cc} | 6 +- Sources/CgRPC/src/core/lib/gprpp/abstract.h | 34 + .../src/core/lib/{support => gprpp}/atomic.h | 10 +- .../lib/{support => gprpp}/atomic_with_atm.h | 8 +- .../lib/{support => gprpp}/atomic_with_std.h | 8 +- .../CgRPC/src/core/lib/gprpp/debug_location.h | 52 + .../CgRPC/src/core/lib/gprpp/inlined_vector.h | 132 + .../src/core/lib/gprpp/manual_constructor.h | 213 + Sources/CgRPC/src/core/lib/gprpp/memory.h | 111 + Sources/CgRPC/src/core/lib/gprpp/orphanable.h | 199 + .../CgRPC/src/core/lib/gprpp/ref_counted.h | 169 + .../src/core/lib/gprpp/ref_counted_ptr.h | 112 + Sources/CgRPC/src/core/lib/gprpp/thd.h | 135 + Sources/CgRPC/src/core/lib/gprpp/thd_posix.cc | 209 + .../CgRPC/src/core/lib/gprpp/thd_windows.cc | 162 + .../{format_request.c => format_request.cc} | 28 +- .../CgRPC/src/core/lib/http/format_request.h | 10 +- Sources/CgRPC/src/core/lib/http/httpcli.c | 321 - Sources/CgRPC/src/core/lib/http/httpcli.cc | 303 + Sources/CgRPC/src/core/lib/http/httpcli.h | 69 +- .../lib/http/httpcli_security_connector.c | 185 - .../lib/http/httpcli_security_connector.cc | 202 + .../src/core/lib/http/{parser.c => parser.cc} | 108 +- Sources/CgRPC/src/core/lib/http/parser.h | 43 +- .../lib/{support => iomgr}/block_annotate.h | 34 +- .../{call_combiner.c => call_combiner.cc} | 114 +- .../CgRPC/src/core/lib/iomgr/call_combiner.h | 47 +- Sources/CgRPC/src/core/lib/iomgr/closure.c | 219 - Sources/CgRPC/src/core/lib/iomgr/closure.h | 295 +- .../lib/iomgr/{combiner.c => combiner.cc} | 236 +- Sources/CgRPC/src/core/lib/iomgr/combiner.h | 26 +- .../lib/iomgr/{endpoint.c => endpoint.cc} | 38 +- Sources/CgRPC/src/core/lib/iomgr/endpoint.h | 64 +- .../CgRPC/src/core/lib/iomgr/endpoint_pair.h | 10 +- ...nt_pair_posix.c => endpoint_pair_posix.cc} | 17 +- ...endpoint_pair_uv.c => endpoint_pair_uv.cc} | 6 +- ...air_windows.c => endpoint_pair_windows.cc} | 25 +- .../src/core/lib/iomgr/{error.c => error.cc} | 318 +- Sources/CgRPC/src/core/lib/iomgr/error.h | 74 +- .../CgRPC/src/core/lib/iomgr/error_internal.h | 7 +- .../{ev_epoll1_linux.c => ev_epoll1_linux.cc} | 617 +- .../src/core/lib/iomgr/ev_epoll1_linux.h | 4 +- .../src/core/lib/iomgr/ev_epollex_linux.c | 1461 ----- .../src/core/lib/iomgr/ev_epollex_linux.cc | 1513 +++++ .../src/core/lib/iomgr/ev_epollex_linux.h | 4 +- ..._epollsig_linux.c => ev_epollsig_linux.cc} | 708 ++- .../src/core/lib/iomgr/ev_epollsig_linux.h | 14 +- .../{ev_poll_posix.c => ev_poll_posix.cc} | 792 +-- .../CgRPC/src/core/lib/iomgr/ev_poll_posix.h | 6 +- Sources/CgRPC/src/core/lib/iomgr/ev_posix.c | 266 - Sources/CgRPC/src/core/lib/iomgr/ev_posix.cc | 330 ++ Sources/CgRPC/src/core/lib/iomgr/ev_posix.h | 118 +- .../lib/iomgr/{ev_windows.c => ev_windows.cc} | 6 +- Sources/CgRPC/src/core/lib/iomgr/exec_ctx.c | 113 - Sources/CgRPC/src/core/lib/iomgr/exec_ctx.cc | 147 + Sources/CgRPC/src/core/lib/iomgr/exec_ctx.h | 207 +- .../lib/iomgr/{executor.c => executor.cc} | 166 +- Sources/CgRPC/src/core/lib/iomgr/executor.h | 10 +- .../lib/iomgr/{fork_posix.c => fork_posix.cc} | 31 +- .../iomgr/{fork_windows.c => fork_windows.cc} | 2 + .../CgRPC/src/core/lib/iomgr/gethostname.h | 2 +- ...ame_fallback.c => gethostname_fallback.cc} | 5 +- ...ame_max.c => gethostname_host_name_max.cc} | 9 +- ...tname_sysconf.c => gethostname_sysconf.cc} | 9 +- .../iomgr/{iocp_windows.c => iocp_windows.cc} | 63 +- .../CgRPC/src/core/lib/iomgr/iocp_windows.h | 14 +- Sources/CgRPC/src/core/lib/iomgr/iomgr.c | 170 - Sources/CgRPC/src/core/lib/iomgr/iomgr.cc | 178 + Sources/CgRPC/src/core/lib/iomgr/iomgr.h | 9 +- .../CgRPC/src/core/lib/iomgr/iomgr_custom.cc | 63 + .../CgRPC/src/core/lib/iomgr/iomgr_custom.h | 47 + .../src/core/lib/iomgr/iomgr_internal.cc | 43 + .../CgRPC/src/core/lib/iomgr/iomgr_internal.h | 24 +- .../CgRPC/src/core/lib/iomgr/iomgr_posix.c | 41 - .../CgRPC/src/core/lib/iomgr/iomgr_posix.cc | 67 + .../CgRPC/src/core/lib/iomgr/iomgr_posix.h | 2 + Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.c | 42 - Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.cc | 40 + Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.h | 37 - .../{iomgr_windows.c => iomgr_windows.cc} | 32 +- ...lable.c => is_epollexclusive_available.cc} | 15 +- .../lib/iomgr/is_epollexclusive_available.h | 10 + .../lib/iomgr/{load_file.c => load_file.cc} | 30 +- Sources/CgRPC/src/core/lib/iomgr/load_file.h | 14 +- .../{lockfree_event.c => lockfree_event.cc} | 149 +- .../CgRPC/src/core/lib/iomgr/lockfree_event.h | 54 +- Sources/CgRPC/src/core/lib/iomgr/nameser.h | 2 + ...us_tracker.c => network_status_tracker.cc} | 7 +- .../core/lib/iomgr/network_status_tracker.h | 6 +- .../{polling_entity.c => polling_entity.cc} | 46 +- .../CgRPC/src/core/lib/iomgr/polling_entity.h | 27 +- Sources/CgRPC/src/core/lib/iomgr/pollset.cc | 56 + Sources/CgRPC/src/core/lib/iomgr/pollset.h | 40 +- .../src/core/lib/iomgr/pollset_custom.cc | 106 + .../CgRPC/src/core/lib/iomgr/pollset_custom.h | 35 + .../CgRPC/src/core/lib/iomgr/pollset_set.cc | 55 + .../CgRPC/src/core/lib/iomgr/pollset_set.h | 38 +- .../src/core/lib/iomgr/pollset_set_custom.cc | 48 + .../src/core/lib/iomgr/pollset_set_custom.h | 26 + .../CgRPC/src/core/lib/iomgr/pollset_set_uv.c | 48 - .../src/core/lib/iomgr/pollset_set_windows.c | 49 - .../src/core/lib/iomgr/pollset_set_windows.cc | 51 + .../src/core/lib/iomgr/pollset_set_windows.h | 2 + Sources/CgRPC/src/core/lib/iomgr/pollset_uv.c | 155 - .../CgRPC/src/core/lib/iomgr/pollset_uv.cc | 93 + Sources/CgRPC/src/core/lib/iomgr/pollset_uv.h | 27 - .../{pollset_windows.c => pollset_windows.cc} | 79 +- .../src/core/lib/iomgr/pollset_windows.h | 15 +- Sources/CgRPC/src/core/lib/iomgr/port.h | 42 +- .../src/core/lib/iomgr/resolve_address.cc | 50 + .../src/core/lib/iomgr/resolve_address.h | 54 +- .../core/lib/iomgr/resolve_address_custom.cc | 187 + .../core/lib/iomgr/resolve_address_custom.h | 43 + ...dress_posix.c => resolve_address_posix.cc} | 99 +- .../src/core/lib/iomgr/resolve_address_uv.c | 280 - ...s_windows.c => resolve_address_windows.cc} | 84 +- .../{resource_quota.c => resource_quota.cc} | 500 +- .../CgRPC/src/core/lib/iomgr/resource_quota.h | 71 +- Sources/CgRPC/src/core/lib/iomgr/sockaddr.h | 14 +- .../src/core/lib/iomgr/sockaddr_custom.h | 54 + .../CgRPC/src/core/lib/iomgr/sockaddr_posix.h | 26 + .../CgRPC/src/core/lib/iomgr/sockaddr_utils.c | 262 - .../src/core/lib/iomgr/sockaddr_utils.cc | 298 + .../CgRPC/src/core/lib/iomgr/sockaddr_utils.h | 34 +- .../src/core/lib/iomgr/sockaddr_windows.h | 27 + ...actory_posix.c => socket_factory_posix.cc} | 44 +- .../src/core/lib/iomgr/socket_factory_posix.h | 40 +- .../{socket_mutator.c => socket_mutator.cc} | 44 +- .../CgRPC/src/core/lib/iomgr/socket_mutator.h | 34 +- .../CgRPC/src/core/lib/iomgr/socket_utils.h | 13 +- ...n_posix.c => socket_utils_common_posix.cc} | 102 +- ...et_utils_linux.c => socket_utils_linux.cc} | 10 +- ...et_utils_posix.c => socket_utils_posix.cc} | 10 +- .../src/core/lib/iomgr/socket_utils_posix.h | 41 +- .../{socket_utils_uv.c => socket_utils_uv.cc} | 17 +- ...tils_windows.c => socket_utils_windows.cc} | 14 +- .../{socket_windows.c => socket_windows.cc} | 43 +- .../CgRPC/src/core/lib/iomgr/socket_windows.h | 31 +- .../src/core/lib/iomgr/sys_epoll_wrapper.h | 2 + .../CgRPC/src/core/lib/iomgr/tcp_client.cc | 36 + Sources/CgRPC/src/core/lib/iomgr/tcp_client.h | 24 +- .../src/core/lib/iomgr/tcp_client_custom.cc | 151 + ...tcp_client_posix.c => tcp_client_posix.cc} | 261 +- .../src/core/lib/iomgr/tcp_client_posix.h | 44 +- .../CgRPC/src/core/lib/iomgr/tcp_client_uv.c | 183 - ...client_windows.c => tcp_client_windows.cc} | 106 +- .../CgRPC/src/core/lib/iomgr/tcp_custom.cc | 365 ++ Sources/CgRPC/src/core/lib/iomgr/tcp_custom.h | 81 + Sources/CgRPC/src/core/lib/iomgr/tcp_posix.c | 819 --- Sources/CgRPC/src/core/lib/iomgr/tcp_posix.cc | 814 +++ Sources/CgRPC/src/core/lib/iomgr/tcp_posix.h | 15 +- .../CgRPC/src/core/lib/iomgr/tcp_server.cc | 73 + Sources/CgRPC/src/core/lib/iomgr/tcp_server.h | 67 +- .../src/core/lib/iomgr/tcp_server_custom.cc | 472 ++ ...tcp_server_posix.c => tcp_server_posix.cc} | 244 +- .../core/lib/iomgr/tcp_server_utils_posix.h | 40 +- ...mon.c => tcp_server_utils_posix_common.cc} | 64 +- ...rs.c => tcp_server_utils_posix_ifaddrs.cc} | 57 +- ....c => tcp_server_utils_posix_noifaddrs.cc} | 6 +- .../CgRPC/src/core/lib/iomgr/tcp_server_uv.c | 454 -- ...server_windows.c => tcp_server_windows.cc} | 209 +- Sources/CgRPC/src/core/lib/iomgr/tcp_uv.c | 381 -- Sources/CgRPC/src/core/lib/iomgr/tcp_uv.cc | 420 ++ Sources/CgRPC/src/core/lib/iomgr/tcp_uv.h | 45 - .../iomgr/{tcp_windows.c => tcp_windows.cc} | 242 +- .../CgRPC/src/core/lib/iomgr/tcp_windows.h | 15 +- ...veraged_stats.c => time_averaged_stats.cc} | 2 + Sources/CgRPC/src/core/lib/iomgr/timer.cc | 45 + Sources/CgRPC/src/core/lib/iomgr/timer.h | 67 +- .../CgRPC/src/core/lib/iomgr/timer_custom.cc | 93 + .../CgRPC/src/core/lib/iomgr/timer_custom.h | 43 + .../{timer_generic.c => timer_generic.cc} | 370 +- .../CgRPC/src/core/lib/iomgr/timer_generic.h | 37 - .../lib/iomgr/{timer_heap.c => timer_heap.cc} | 54 +- Sources/CgRPC/src/core/lib/iomgr/timer_heap.h | 18 +- .../{timer_manager.c => timer_manager.cc} | 160 +- .../CgRPC/src/core/lib/iomgr/timer_manager.h | 2 + Sources/CgRPC/src/core/lib/iomgr/timer_uv.c | 101 - Sources/CgRPC/src/core/lib/iomgr/timer_uv.cc | 66 + Sources/CgRPC/src/core/lib/iomgr/timer_uv.h | 32 - Sources/CgRPC/src/core/lib/iomgr/udp_server.c | 549 -- .../CgRPC/src/core/lib/iomgr/udp_server.cc | 747 +++ Sources/CgRPC/src/core/lib/iomgr/udp_server.h | 85 +- ..._sockets_posix.c => unix_sockets_posix.cc} | 59 +- .../src/core/lib/iomgr/unix_sockets_posix.h | 14 +- ...osix_noop.c => unix_sockets_posix_noop.cc} | 12 +- .../iomgr/{wakeup_fd_cv.c => wakeup_fd_cv.cc} | 21 +- .../CgRPC/src/core/lib/iomgr/wakeup_fd_cv.h | 28 +- ...keup_fd_eventfd.c => wakeup_fd_eventfd.cc} | 5 +- ..._fd_nospecial.c => wakeup_fd_nospecial.cc} | 4 +- .../{wakeup_fd_pipe.c => wakeup_fd_pipe.cc} | 3 + .../CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.h | 4 +- .../{wakeup_fd_posix.c => wakeup_fd_posix.cc} | 15 +- .../src/core/lib/iomgr/wakeup_fd_posix.h | 2 + Sources/CgRPC/src/core/lib/json/json.c | 48 - Sources/CgRPC/src/core/lib/json/json.cc | 86 + Sources/CgRPC/src/core/lib/json/json.h | 23 +- .../json/{json_reader.c => json_reader.cc} | 54 +- Sources/CgRPC/src/core/lib/json/json_reader.h | 37 +- .../json/{json_string.c => json_string.cc} | 119 +- .../json/{json_writer.c => json_writer.cc} | 62 +- Sources/CgRPC/src/core/lib/json/json_writer.h | 32 +- .../{basic_timers.c => basic_timers.cc} | 88 +- .../{stap_timers.c => stap_timers.cc} | 10 +- Sources/CgRPC/src/core/lib/profiling/timers.h | 52 +- ...security_context.c => security_context.cc} | 214 +- .../lib/security/context/security_context.h | 66 +- .../credentials/alts/alts_credentials.cc | 119 + .../credentials/alts/alts_credentials.h | 82 + .../credentials/alts/check_gcp_environment.cc | 72 + .../credentials/alts/check_gcp_environment.h | 57 + .../alts/check_gcp_environment_linux.cc | 67 + .../alts/check_gcp_environment_no_op.cc | 33 + .../alts/check_gcp_environment_windows.cc | 114 + .../grpc_alts_credentials_client_options.cc | 126 + .../alts/grpc_alts_credentials_options.cc | 46 + .../alts/grpc_alts_credentials_options.h | 75 + .../grpc_alts_credentials_server_options.cc | 58 + ...credentials.c => composite_credentials.cc} | 180 +- .../composite/composite_credentials.h | 20 +- .../lib/security/credentials/credentials.c | 287 - .../lib/security/credentials/credentials.cc | 286 + .../lib/security/credentials/credentials.h | 145 +- ...als_metadata.c => credentials_metadata.cc} | 17 +- .../credentials/fake/fake_credentials.c | 141 - .../credentials/fake/fake_credentials.cc | 136 + .../credentials/fake/fake_credentials.h | 15 +- ...tials_generic.c => credentials_generic.cc} | 18 +- ...ntials.c => google_default_credentials.cc} | 159 +- .../google_default_credentials.h | 2 +- .../{iam_credentials.c => iam_credentials.cc} | 51 +- .../credentials/iam/iam_credentials.h | 2 + .../jwt/{json_token.c => json_token.cc} | 162 +- .../lib/security/credentials/jwt/json_token.h | 32 +- .../{jwt_credentials.c => jwt_credentials.cc} | 106 +- .../credentials/jwt/jwt_credentials.h | 9 +- .../jwt/{jwt_verifier.c => jwt_verifier.cc} | 581 +- .../security/credentials/jwt/jwt_verifier.h | 58 +- ...h2_credentials.c => oauth2_credentials.cc} | 334 +- .../credentials/oauth2/oauth2_credentials.h | 45 +- ...in_credentials.c => plugin_credentials.cc} | 142 +- .../credentials/plugin/plugin_credentials.h | 16 +- .../credentials/ssl/ssl_credentials.c | 194 - .../credentials/ssl/ssl_credentials.cc | 349 ++ .../credentials/ssl/ssl_credentials.h | 21 + .../alts_security_connector.cc | 287 + .../alts_security_connector.h | 69 + .../security_connector/security_connector.cc | 1203 ++++ .../security_connector/security_connector.h | 284 + .../lib/security/transport/auth_filters.h | 10 + .../security/transport/client_auth_filter.c | 428 -- .../security/transport/client_auth_filter.cc | 418 ++ .../lib/security/transport/lb_targets_info.c | 54 - .../lib/security/transport/lb_targets_info.h | 32 - .../{secure_endpoint.c => secure_endpoint.cc} | 252 +- .../lib/security/transport/secure_endpoint.h | 12 +- .../security/transport/security_connector.c | 914 --- .../security/transport/security_connector.h | 248 - ...ty_handshaker.c => security_handshaker.cc} | 361 +- .../security/transport/security_handshaker.h | 10 +- ...er_auth_filter.c => server_auth_filter.cc} | 181 +- .../transport/target_authority_table.cc | 75 + .../transport/target_authority_table.h | 40 + .../transport/{tsi_error.c => tsi_error.cc} | 4 +- .../core/lib/security/transport/tsi_error.h | 4 +- .../util/{json_util.c => json_util.cc} | 24 +- .../src/core/lib/security/util/json_util.h | 10 +- .../src/core/lib/slice/{b64.c => b64.cc} | 70 +- Sources/CgRPC/src/core/lib/slice/b64.h | 13 +- ...percent_encoding.c => percent_encoding.cc} | 42 +- .../src/core/lib/slice/percent_encoding.h | 8 +- .../src/core/lib/slice/{slice.c => slice.cc} | 173 +- .../slice/{slice_buffer.c => slice_buffer.cc} | 127 +- .../src/core/lib/slice/slice_hash_table.c | 146 - .../src/core/lib/slice/slice_hash_table.h | 229 +- .../slice/{slice_intern.c => slice_intern.cc} | 96 +- .../CgRPC/src/core/lib/slice/slice_internal.h | 16 +- .../src/core/lib/slice/slice_string_helpers.c | 80 - .../core/lib/slice/slice_string_helpers.cc | 118 + .../src/core/lib/slice/slice_string_helpers.h | 23 +- .../core/lib/slice/slice_weak_hash_table.h | 109 + Sources/CgRPC/src/core/lib/support/arena.c | 83 - Sources/CgRPC/src/core/lib/support/backoff.c | 72 - Sources/CgRPC/src/core/lib/support/backoff.h | 56 - Sources/CgRPC/src/core/lib/support/cmdline.c | 330 -- .../CgRPC/src/core/lib/support/histogram.c | 228 - Sources/CgRPC/src/core/lib/support/memory.h | 59 - Sources/CgRPC/src/core/lib/support/mpscq.c | 79 - Sources/CgRPC/src/core/lib/support/mpscq.h | 53 - .../src/core/lib/support/stack_lockfree.c | 137 - .../src/core/lib/support/stack_lockfree.h | 38 - .../src/core/lib/support/subprocess_posix.c | 99 - .../src/core/lib/support/subprocess_windows.c | 126 - Sources/CgRPC/src/core/lib/support/thd.c | 49 - .../CgRPC/src/core/lib/support/thd_internal.h | 30 - .../CgRPC/src/core/lib/support/thd_posix.c | 136 - .../CgRPC/src/core/lib/support/thd_windows.c | 104 - Sources/CgRPC/src/core/lib/surface/alarm.c | 139 - .../src/core/lib/surface/alarm_internal.h | 40 - .../lib/surface/{api_trace.c => api_trace.cc} | 6 +- .../CgRPC/src/core/lib/surface/api_trace.h | 6 +- .../surface/{byte_buffer.c => byte_buffer.cc} | 34 +- ..._buffer_reader.c => byte_buffer_reader.cc} | 44 +- .../src/core/lib/surface/{call.c => call.cc} | 1383 ++--- Sources/CgRPC/src/core/lib/surface/call.h | 85 +- .../{call_details.c => call_details.cc} | 9 +- .../{call_log_batch.c => call_log_batch.cc} | 28 +- .../src/core/lib/surface/call_test_only.h | 30 +- Sources/CgRPC/src/core/lib/surface/channel.c | 454 -- Sources/CgRPC/src/core/lib/surface/channel.cc | 450 ++ Sources/CgRPC/src/core/lib/surface/channel.h | 49 +- .../{channel_init.c => channel_init.cc} | 35 +- .../CgRPC/src/core/lib/surface/channel_init.h | 20 +- .../{channel_ping.c => channel_ping.cc} | 36 +- ...nel_stack_type.c => channel_stack_type.cc} | 7 +- .../src/core/lib/surface/channel_stack_type.h | 4 +- .../src/core/lib/surface/completion_queue.c | 1249 ---- .../src/core/lib/surface/completion_queue.cc | 1262 ++++ .../src/core/lib/surface/completion_queue.h | 71 +- ..._factory.c => completion_queue_factory.cc} | 6 +- .../lib/surface/completion_queue_factory.h | 2 + .../{event_string.c => event_string.cc} | 22 +- .../CgRPC/src/core/lib/surface/event_string.h | 4 +- .../src/core/lib/surface/{init.c => init.cc} | 101 +- .../surface/{init_secure.c => init_secure.cc} | 46 +- .../CgRPC/src/core/lib/surface/lame_client.cc | 107 +- .../CgRPC/src/core/lib/surface/lame_client.h | 2 + .../{metadata_array.c => metadata_array.cc} | 2 + .../core/lib/surface/{server.c => server.cc} | 961 ++- Sources/CgRPC/src/core/lib/surface/server.h | 31 +- ...lidate_metadata.c => validate_metadata.cc} | 22 +- .../src/core/lib/surface/validate_metadata.h | 6 +- .../lib/surface/{version.c => version.cc} | 6 +- .../src/core/lib/transport/bdp_estimator.c | 110 - .../src/core/lib/transport/bdp_estimator.cc | 87 + .../src/core/lib/transport/bdp_estimator.h | 109 +- .../src/core/lib/transport/byte_stream.c | 187 - .../src/core/lib/transport/byte_stream.cc | 160 + .../src/core/lib/transport/byte_stream.h | 208 +- ...ectivity_state.c => connectivity_state.cc} | 121 +- .../core/lib/transport/connectivity_state.h | 44 +- .../{error_utils.c => error_utils.cc} | 59 +- .../src/core/lib/transport/error_utils.h | 17 +- .../lib/transport/{metadata.c => metadata.cc} | 233 +- .../CgRPC/src/core/lib/transport/metadata.h | 60 +- .../src/core/lib/transport/metadata_batch.c | 315 - .../src/core/lib/transport/metadata_batch.cc | 329 ++ .../src/core/lib/transport/metadata_batch.h | 104 +- .../src/core/lib/transport/pid_controller.c | 63 - .../src/core/lib/transport/pid_controller.cc | 51 + .../src/core/lib/transport/pid_controller.h | 118 +- .../src/core/lib/transport/service_config.c | 246 - .../src/core/lib/transport/service_config.cc | 106 + .../src/core/lib/transport/service_config.h | 259 +- .../src/core/lib/transport/static_metadata.c | 582 -- .../src/core/lib/transport/static_metadata.cc | 601 ++ .../src/core/lib/transport/static_metadata.h | 230 +- ...atus_conversion.c => status_conversion.cc} | 6 +- .../core/lib/transport/status_conversion.h | 6 +- .../src/core/lib/transport/status_metadata.cc | 54 + .../src/core/lib/transport/status_metadata.h | 30 + ...timeout_encoding.c => timeout_encoding.cc} | 97 +- .../src/core/lib/transport/timeout_encoding.h | 9 +- .../CgRPC/src/core/lib/transport/transport.c | 289 - .../CgRPC/src/core/lib/transport/transport.cc | 278 + .../CgRPC/src/core/lib/transport/transport.h | 214 +- .../src/core/lib/transport/transport_impl.h | 37 +- ...ort_op_string.c => transport_op_string.cc} | 78 +- ...gin_registry.c => grpc_plugin_registry.cc} | 82 +- .../CgRPC/src/core/tsi/alts/crypt/aes_gcm.cc | 687 +++ Sources/CgRPC/src/core/tsi/alts/crypt/gsec.cc | 189 + Sources/CgRPC/src/core/tsi/alts/crypt/gsec.h | 454 ++ .../tsi/alts/frame_protector/alts_counter.cc | 118 + .../tsi/alts/frame_protector/alts_counter.h | 98 + .../tsi/alts/frame_protector/alts_crypter.cc | 66 + .../tsi/alts/frame_protector/alts_crypter.h | 255 + .../frame_protector/alts_frame_protector.cc | 407 ++ .../frame_protector/alts_frame_protector.h | 55 + .../alts_record_protocol_crypter_common.cc | 114 + .../alts_record_protocol_crypter_common.h | 114 + .../alts_seal_privacy_integrity_crypter.cc | 105 + .../alts_unseal_privacy_integrity_crypter.cc | 103 + .../tsi/alts/frame_protector/frame_handler.cc | 218 + .../tsi/alts/frame_protector/frame_handler.h | 236 + .../alts/handshaker/alts_handshaker_client.cc | 316 + .../alts/handshaker/alts_handshaker_client.h | 137 + .../handshaker/alts_handshaker_service_api.cc | 520 ++ .../handshaker/alts_handshaker_service_api.h | 323 + .../alts_handshaker_service_api_util.cc | 143 + .../alts_handshaker_service_api_util.h | 149 + .../tsi/alts/handshaker/alts_tsi_event.cc | 73 + .../core/tsi/alts/handshaker/alts_tsi_event.h | 93 + .../alts/handshaker/alts_tsi_handshaker.cc | 483 ++ .../tsi/alts/handshaker/alts_tsi_handshaker.h | 83 + .../handshaker/alts_tsi_handshaker_private.h | 52 + .../tsi/alts/handshaker/alts_tsi_utils.cc | 58 + .../core/tsi/alts/handshaker/alts_tsi_utils.h | 52 + .../core/tsi/alts/handshaker/altscontext.pb.c | 48 + .../core/tsi/alts/handshaker/altscontext.pb.h | 64 + .../core/tsi/alts/handshaker/handshaker.pb.c | 123 + .../core/tsi/alts/handshaker/handshaker.pb.h | 255 + .../handshaker/transport_security_common.pb.c | 50 + .../handshaker/transport_security_common.pb.h | 78 + .../transport_security_common_api.cc | 196 + .../transport_security_common_api.h | 163 + ...lts_grpc_integrity_only_record_protocol.cc | 180 + ...alts_grpc_integrity_only_record_protocol.h | 52 + ..._grpc_privacy_integrity_record_protocol.cc | 144 + ...s_grpc_privacy_integrity_record_protocol.h | 49 + .../alts_grpc_record_protocol.h | 91 + .../alts_grpc_record_protocol_common.cc | 174 + .../alts_grpc_record_protocol_common.h | 100 + .../alts_iovec_record_protocol.cc | 476 ++ .../alts_iovec_record_protocol.h | 199 + .../alts_zero_copy_grpc_protector.cc | 296 + .../alts_zero_copy_grpc_protector.h | 52 + .../src/core/tsi/alts_transport_security.cc | 63 + .../src/core/tsi/alts_transport_security.h | 47 + ..._security.c => fake_transport_security.cc} | 323 +- .../src/core/tsi/fake_transport_security.h | 20 +- .../src/core/tsi/gts_transport_security.c | 40 - .../core/tsi/ssl/session_cache/ssl_session.h | 73 + .../session_cache/ssl_session_boringssl.cc | 58 + .../ssl/session_cache/ssl_session_cache.cc | 211 + .../tsi/ssl/session_cache/ssl_session_cache.h | 97 + .../ssl/session_cache/ssl_session_openssl.cc | 76 + ...t_security.c => ssl_transport_security.cc} | 1041 ++-- .../src/core/tsi/ssl_transport_security.h | 194 +- Sources/CgRPC/src/core/tsi/ssl_types.h | 2 + ...sport_security.c => transport_security.cc} | 183 +- .../CgRPC/src/core/tsi/transport_security.h | 120 +- ...dapter.c => transport_security_adapter.cc} | 121 +- .../src/core/tsi/transport_security_adapter.h | 14 +- .../src/core/tsi/transport_security_grpc.c | 71 - .../src/core/tsi/transport_security_grpc.cc | 66 + .../src/core/tsi/transport_security_grpc.h | 44 +- .../core/tsi/transport_security_interface.h | 92 +- Sources/CgRPC/third_party/nanopb/pb.h | 2 +- Sources/SwiftGRPC/Core/Roots.swift | 9 +- SwiftGRPC.podspec | 2 +- fix-indentation-settings.rb | 7 - fix-project-settings.rb | 15 + vendor-boringssl.sh | 17 +- 1154 files changed, 112994 insertions(+), 100683 deletions(-) delete mode 100644 Sources/BoringSSL/crypto/aes/internal.h delete mode 100644 Sources/BoringSSL/crypto/asn1/x_long.c delete mode 100644 Sources/BoringSSL/crypto/bn/internal.h delete mode 100644 Sources/BoringSSL/crypto/bn/montgomery_inv.c rename Sources/BoringSSL/crypto/{bn => bn_extra}/bn_asn1.c (70%) rename Sources/BoringSSL/crypto/{bn => bn_extra}/convert.c (63%) delete mode 100644 Sources/BoringSSL/crypto/cipher/aead.c delete mode 100644 Sources/BoringSSL/crypto/cipher/e_aes.c delete mode 100644 Sources/BoringSSL/crypto/cipher/e_chacha20poly1305.c delete mode 100644 Sources/BoringSSL/crypto/cipher/tls_cbc.c rename Sources/BoringSSL/crypto/{asn1/t_bitst.c => cipher_extra/cipher_extra.c} (67%) rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/derive_key.c (99%) create mode 100644 Sources/BoringSSL/crypto/cipher_extra/e_aesctrhmac.c create mode 100644 Sources/BoringSSL/crypto/cipher_extra/e_aesgcmsiv.c create mode 100644 Sources/BoringSSL/crypto/cipher_extra/e_chacha20poly1305.c rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/e_null.c (99%) rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/e_rc2.c (93%) rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/e_rc4.c (100%) rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/e_ssl3.c (65%) rename Sources/BoringSSL/crypto/{cipher => cipher_extra}/e_tls.c (61%) rename Sources/BoringSSL/crypto/{asn1/x_bignum.c => cipher_extra/internal.h} (51%) create mode 100644 Sources/BoringSSL/crypto/cipher_extra/tls_cbc.c delete mode 100644 Sources/BoringSSL/crypto/curve25519/curve25519.c delete mode 100644 Sources/BoringSSL/crypto/curve25519/internal.h create mode 100644 Sources/BoringSSL/crypto/digest_extra/digest_extra.c delete mode 100644 Sources/BoringSSL/crypto/ec/ec.c delete mode 100644 Sources/BoringSSL/crypto/ec/p224-64.c delete mode 100644 Sources/BoringSSL/crypto/ec/util-64.c rename Sources/BoringSSL/crypto/{ec => ec_extra}/ec_asn1.c (76%) delete mode 100644 Sources/BoringSSL/crypto/ecdsa/ecdsa.c rename Sources/BoringSSL/crypto/{ecdsa => ecdsa_extra}/ecdsa_asn1.c (74%) create mode 100644 Sources/BoringSSL/crypto/err/err_data.c create mode 100644 Sources/BoringSSL/crypto/err/internal.h create mode 100644 Sources/BoringSSL/crypto/evp/p_ed25519.c create mode 100644 Sources/BoringSSL/crypto/evp/p_ed25519_asn1.c create mode 100644 Sources/BoringSSL/crypto/evp/scrypt.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/aes/aes.c (95%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/aes/internal.h rename Sources/BoringSSL/crypto/{ => fipsmodule}/aes/key_wrap.c (96%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/aes/mode_wrappers.c (98%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/add.c (86%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/bn.c (92%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/bn/bytes.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/cmp.c (90%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/ctx.c (80%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/div.c (68%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/exponentiation.c (63%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/gcd.c (78%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/generic.c (87%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/bn/internal.h rename Sources/BoringSSL/crypto/{bn/kronecker.c => fipsmodule/bn/jacobi.c} (68%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/montgomery.c (61%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/bn/montgomery_inv.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/mul.c (66%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/prime.c (83%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/random.c (66%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/rsaz_exp.c (99%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/rsaz_exp.h (100%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/shift.c (87%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/bn/sqrt.c (68%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/cipher/aead.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/cipher/cipher.c (87%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/cipher/e_aes.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/cipher/e_des.c (75%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/cipher/internal.h (54%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/delocate.h rename Sources/BoringSSL/crypto/{ => fipsmodule}/des/des.c (93%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/des/internal.h (91%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/digest/digest.c (82%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/digest/digests.c (52%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/digest/internal.h (80%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/digest/md32_common.h (73%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/ec/ec.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/ec_key.c (84%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/ec_montgomery.c (81%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/internal.h (62%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/oct.c (92%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/ec/p224-64.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/p256-64.c (61%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/p256-x86_64-table.h (99%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/p256-x86_64.c (64%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/p256-x86_64.h (64%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/simple.c (78%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/ec/util-64.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/ec/wnaf.c (72%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/ecdsa/ecdsa.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/hmac/hmac.c (87%) rename Sources/BoringSSL/crypto/{rand/internal.h => fipsmodule/is_fips.c} (64%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/md4/md4.c (92%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/md5/md5.c (94%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/cbc.c (81%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/cfb.c (88%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/ctr.c (79%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/gcm.c (85%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/internal.h (62%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/ofb.c (99%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/modes/polyval.c (77%) create mode 100644 Sources/BoringSSL/crypto/fipsmodule/rand/ctrdrbg.c create mode 100644 Sources/BoringSSL/crypto/fipsmodule/rand/internal.h create mode 100644 Sources/BoringSSL/crypto/fipsmodule/rand/rand.c create mode 100644 Sources/BoringSSL/crypto/fipsmodule/rand/urandom.c rename Sources/BoringSSL/crypto/{ => fipsmodule}/rsa/blinding.c (89%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/rsa/internal.h (62%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/rsa/padding.c (59%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/rsa/rsa.c (67%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/rsa/rsa_impl.c (53%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/sha/sha1-altivec.c (85%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/sha/sha1.c (92%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/sha/sha256.c (91%) rename Sources/BoringSSL/crypto/{ => fipsmodule}/sha/sha512.c (93%) create mode 100644 Sources/BoringSSL/crypto/pkcs7/internal.h create mode 100644 Sources/BoringSSL/crypto/pkcs7/pkcs7.c rename Sources/BoringSSL/crypto/{x509/pkcs7.c => pkcs7/pkcs7_x509.c} (51%) delete mode 100644 Sources/BoringSSL/crypto/pkcs8/p8_pkey.c create mode 100644 Sources/BoringSSL/crypto/pkcs8/pkcs8_x509.c delete mode 100644 Sources/BoringSSL/crypto/rand/rand.c delete mode 100644 Sources/BoringSSL/crypto/rand/urandom.c rename Sources/BoringSSL/crypto/{rand => rand_extra}/deterministic.c (79%) create mode 100644 Sources/BoringSSL/crypto/rand_extra/forkunsafe.c rename Sources/BoringSSL/crypto/{rand => rand_extra}/fuchsia.c (79%) create mode 100644 Sources/BoringSSL/crypto/rand_extra/rand_extra.c rename Sources/BoringSSL/crypto/{rand => rand_extra}/windows.c (84%) rename Sources/BoringSSL/crypto/{rsa => rsa_extra}/rsa_asn1.c (64%) delete mode 100644 Sources/BoringSSL/crypto/x509/x509type.c create mode 100644 Sources/BoringSSL/include/openssl/is_boringssl.h create mode 100644 Sources/BoringSSL/include/openssl/span.h delete mode 100644 Sources/BoringSSL/include/openssl/stack_macros.h rename Sources/BoringSSL/ssl/{bio_ssl.c => bio_ssl.cc} (94%) rename Sources/BoringSSL/ssl/{custom_extensions.c => custom_extensions.cc} (90%) rename Sources/BoringSSL/ssl/{d1_both.c => d1_both.cc} (57%) rename Sources/BoringSSL/ssl/{d1_lib.c => d1_lib.cc} (67%) rename Sources/BoringSSL/ssl/{d1_pkt.c => d1_pkt.cc} (57%) rename Sources/BoringSSL/ssl/{d1_srtp.c => d1_srtp.cc} (94%) rename Sources/BoringSSL/ssl/{dtls_method.c => dtls_method.cc} (71%) rename Sources/BoringSSL/ssl/{dtls_record.c => dtls_record.cc} (73%) create mode 100644 Sources/BoringSSL/ssl/handshake.cc delete mode 100644 Sources/BoringSSL/ssl/handshake_client.c create mode 100644 Sources/BoringSSL/ssl/handshake_client.cc delete mode 100644 Sources/BoringSSL/ssl/handshake_server.c create mode 100644 Sources/BoringSSL/ssl/handshake_server.cc delete mode 100644 Sources/BoringSSL/ssl/s3_both.c create mode 100644 Sources/BoringSSL/ssl/s3_both.cc rename Sources/BoringSSL/ssl/{s3_lib.c => s3_lib.cc} (88%) rename Sources/BoringSSL/ssl/{s3_pkt.c => s3_pkt.cc} (54%) delete mode 100644 Sources/BoringSSL/ssl/ssl_aead_ctx.c create mode 100644 Sources/BoringSSL/ssl/ssl_aead_ctx.cc rename Sources/BoringSSL/ssl/{ssl_asn1.c => ssl_asn1.cc} (70%) delete mode 100644 Sources/BoringSSL/ssl/ssl_buffer.c create mode 100644 Sources/BoringSSL/ssl/ssl_buffer.cc rename Sources/BoringSSL/ssl/{ssl_cert.c => ssl_cert.cc} (61%) rename Sources/BoringSSL/ssl/{ssl_cipher.c => ssl_cipher.cc} (67%) delete mode 100644 Sources/BoringSSL/ssl/ssl_ecdh.c rename Sources/BoringSSL/ssl/{ssl_file.c => ssl_file.cc} (93%) create mode 100644 Sources/BoringSSL/ssl/ssl_key_share.cc rename Sources/BoringSSL/ssl/{ssl_lib.c => ssl_lib.cc} (66%) delete mode 100644 Sources/BoringSSL/ssl/ssl_privkey.c create mode 100644 Sources/BoringSSL/ssl/ssl_privkey.cc rename Sources/BoringSSL/ssl/{ssl_session.c => ssl_session.cc} (60%) rename Sources/BoringSSL/ssl/{ssl_stat.c => ssl_stat.cc} (59%) rename Sources/BoringSSL/ssl/{ssl_transcript.c => ssl_transcript.cc} (60%) create mode 100644 Sources/BoringSSL/ssl/ssl_versions.cc rename Sources/BoringSSL/ssl/{ssl_x509.c => ssl_x509.cc} (50%) delete mode 100644 Sources/BoringSSL/ssl/t1_enc.c create mode 100644 Sources/BoringSSL/ssl/t1_enc.cc rename Sources/BoringSSL/ssl/{t1_lib.c => t1_lib.cc} (56%) delete mode 100644 Sources/BoringSSL/ssl/tls13_both.c create mode 100644 Sources/BoringSSL/ssl/tls13_both.cc delete mode 100644 Sources/BoringSSL/ssl/tls13_client.c create mode 100644 Sources/BoringSSL/ssl/tls13_client.cc delete mode 100644 Sources/BoringSSL/ssl/tls13_enc.c create mode 100644 Sources/BoringSSL/ssl/tls13_enc.cc delete mode 100644 Sources/BoringSSL/ssl/tls13_server.c create mode 100644 Sources/BoringSSL/ssl/tls13_server.cc rename Sources/BoringSSL/ssl/{tls_method.c => tls_method.cc} (70%) delete mode 100644 Sources/BoringSSL/ssl/tls_record.c create mode 100644 Sources/BoringSSL/ssl/tls_record.cc create mode 100644 Sources/BoringSSL/third_party/fiat/curve25519.c create mode 100644 Sources/BoringSSL/third_party/fiat/internal.h delete mode 100644 Sources/CgRPC/include/grpc/impl/codegen/exec_ctx_fwd.h delete mode 100644 Sources/CgRPC/include/grpc/support/cmdline.h delete mode 100644 Sources/CgRPC/include/grpc/support/histogram.h delete mode 100644 Sources/CgRPC/include/grpc/support/subprocess.h delete mode 100644 Sources/CgRPC/include/grpc/support/thd.h create mode 100644 Sources/CgRPC/include/grpc/support/thd_id.h delete mode 100644 Sources/CgRPC/src/core/ext/census/aggregation.h delete mode 100644 Sources/CgRPC/src/core/ext/census/base_resources.c delete mode 100644 Sources/CgRPC/src/core/ext/census/base_resources.h delete mode 100644 Sources/CgRPC/src/core/ext/census/census_interface.h delete mode 100644 Sources/CgRPC/src/core/ext/census/census_rpc_stats.h delete mode 100644 Sources/CgRPC/src/core/ext/census/context.c delete mode 100644 Sources/CgRPC/src/core/ext/census/gen/census.pb.c delete mode 100644 Sources/CgRPC/src/core/ext/census/gen/census.pb.h delete mode 100644 Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.c delete mode 100644 Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.h rename Sources/CgRPC/src/core/ext/census/{grpc_context.c => grpc_context.cc} (70%) delete mode 100644 Sources/CgRPC/src/core/ext/census/grpc_filter.c delete mode 100644 Sources/CgRPC/src/core/ext/census/grpc_plugin.c delete mode 100644 Sources/CgRPC/src/core/ext/census/initialize.c delete mode 100644 Sources/CgRPC/src/core/ext/census/intrusive_hash_map.c delete mode 100644 Sources/CgRPC/src/core/ext/census/intrusive_hash_map.h delete mode 100644 Sources/CgRPC/src/core/ext/census/intrusive_hash_map_internal.h delete mode 100644 Sources/CgRPC/src/core/ext/census/mlog.c delete mode 100644 Sources/CgRPC/src/core/ext/census/mlog.h delete mode 100644 Sources/CgRPC/src/core/ext/census/operation.c delete mode 100644 Sources/CgRPC/src/core/ext/census/placeholders.c delete mode 100644 Sources/CgRPC/src/core/ext/census/resource.c delete mode 100644 Sources/CgRPC/src/core/ext/census/resource.h delete mode 100644 Sources/CgRPC/src/core/ext/census/rpc_metric_id.h delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_context.c delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_context.h delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_label.h delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_propagation.h delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_status.h delete mode 100644 Sources/CgRPC/src/core/ext/census/trace_string.h delete mode 100644 Sources/CgRPC/src/core/ext/census/tracing.c delete mode 100644 Sources/CgRPC/src/core/ext/census/tracing.h create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.cc rename Sources/CgRPC/src/core/{tsi/gts_transport_security.h => ext/filters/client_channel/backup_poller.h} (52%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{channel_connectivity.c => channel_connectivity.cc} (64%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.cc rename Sources/CgRPC/src/core/ext/filters/client_channel/{client_channel_factory.c => client_channel_factory.cc} (63%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{client_channel_plugin.c => client_channel_plugin.cc} (50%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{connector.c => connector.cc} (67%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{http_connect_handshaker.c => http_connect_handshaker.cc} (72%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{http_proxy.c => http_proxy.cc} (82%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc rename Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/{grpclb_client_stats.c => grpclb_client_stats.cc} (88%) rename Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/{load_balancer_api.c => load_balancer_api.cc} (65%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h rename Sources/CgRPC/src/core/ext/filters/client_channel/{lb_policy_factory.c => lb_policy_factory.cc} (74%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.cc create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/method_params.cc create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/method_params.h rename Sources/CgRPC/src/core/ext/filters/client_channel/{parse_address.c => parse_address.cc} (68%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{proxy_mapper.c => proxy_mapper.cc} (74%) rename Sources/CgRPC/src/core/ext/filters/client_channel/{proxy_mapper_registry.c => proxy_mapper_registry.cc} (74%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc rename Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/{grpc_ares_ev_driver_posix.c => grpc_ares_ev_driver_posix.cc} (74%) rename Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/{grpc_ares_wrapper.c => grpc_ares_wrapper.cc} (59%) delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.c delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.cc delete mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.c create mode 100644 Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.cc rename Sources/CgRPC/src/core/ext/filters/client_channel/{uri_parser.c => uri_parser.cc} (79%) rename Sources/CgRPC/src/core/ext/filters/deadline/{deadline_filter.c => deadline_filter.cc} (65%) rename Sources/CgRPC/src/core/ext/filters/http/client/{http_client_filter.c => http_client_filter.cc} (63%) create mode 100644 Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.cc rename Sources/CgRPC/src/core/ext/{census/grpc_filter.h => filters/http/client_authority_filter.h} (51%) rename Sources/CgRPC/src/core/ext/filters/http/{http_filters_plugin.c => http_filters_plugin.cc} (71%) rename Sources/CgRPC/src/core/ext/filters/http/message_compress/{message_compress_filter.c => message_compress_filter.cc} (50%) rename Sources/CgRPC/src/core/ext/filters/http/server/{http_server_filter.c => http_server_filter.cc} (63%) rename Sources/CgRPC/src/core/ext/filters/load_reporting/{server_load_reporting_filter.c => server_load_reporting_filter.cc} (73%) rename Sources/CgRPC/src/core/ext/filters/load_reporting/{server_load_reporting_plugin.c => server_load_reporting_plugin.cc} (80%) rename Sources/CgRPC/src/core/ext/filters/max_age/{max_age_filter.c => max_age_filter.cc} (50%) rename Sources/CgRPC/src/core/ext/filters/message_size/{message_size_filter.c => message_size_filter.cc} (63%) rename Sources/CgRPC/src/core/ext/filters/workarounds/{workaround_cronet_compression_filter.c => workaround_cronet_compression_filter.cc} (79%) rename Sources/CgRPC/src/core/ext/filters/workarounds/{workaround_utils.c => workaround_utils.cc} (67%) rename Sources/CgRPC/src/core/ext/transport/chttp2/alpn/{alpn.c => alpn.cc} (80%) create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.cc create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.h rename Sources/CgRPC/src/core/ext/transport/chttp2/client/{chttp2_connector.c => chttp2_connector.cc} (59%) rename Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/{channel_create.c => channel_create.cc} (52%) rename Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/{channel_create_posix.c => channel_create_posix.cc} (58%) delete mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc rename Sources/CgRPC/src/core/ext/transport/chttp2/server/{chttp2_server.c => chttp2_server.cc} (54%) rename Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/{server_chttp2.c => server_chttp2.cc} (80%) rename Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/{server_chttp2_posix.c => server_chttp2_posix.cc} (60%) rename Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/{server_secure_chttp2.c => server_secure_chttp2.cc} (77%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{bin_decoder.c => bin_decoder.cc} (79%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{bin_encoder.c => bin_encoder.cc} (80%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{chttp2_plugin.c => chttp2_plugin.cc} (74%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{chttp2_transport.c => chttp2_transport.cc} (50%) delete mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.c create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.cc create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.h rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_data.c => frame_data.cc} (55%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_goaway.c => frame_goaway.cc} (65%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_ping.c => frame_ping.cc} (61%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_rst_stream.c => frame_rst_stream.cc} (62%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_settings.c => frame_settings.cc} (69%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{frame_window_update.c => frame_window_update.cc} (52%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{hpack_encoder.c => hpack_encoder.cc} (63%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{hpack_parser.c => hpack_parser.cc} (65%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{hpack_table.c => hpack_table.cc} (80%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{http2_settings.c => http2_settings.cc} (93%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{huffsyms.c => huffsyms.cc} (99%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{incoming_metadata.c => incoming_metadata.cc} (54%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{parsing.c => parsing.cc} (61%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{stream_lists.c => stream_lists.cc} (67%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{stream_map.c => stream_map.cc} (58%) rename Sources/CgRPC/src/core/ext/transport/chttp2/transport/{varint.c => varint.cc} (78%) delete mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.c create mode 100644 Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.cc rename Sources/CgRPC/src/core/ext/transport/inproc/{inproc_plugin.c => inproc_plugin.cc} (80%) delete mode 100644 Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.c create mode 100644 Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.cc rename Sources/CgRPC/src/core/lib/{support/avl.c => avl/avl.cc} (59%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/avl}/avl.h (59%) create mode 100644 Sources/CgRPC/src/core/lib/backoff/backoff.cc create mode 100644 Sources/CgRPC/src/core/lib/backoff/backoff.h rename Sources/CgRPC/src/core/lib/channel/{channel_args.c => channel_args.cc} (54%) rename Sources/CgRPC/src/core/lib/channel/{channel_stack.c => channel_stack.cc} (56%) rename Sources/CgRPC/src/core/lib/channel/{channel_stack_builder.c => channel_stack_builder.cc} (53%) create mode 100644 Sources/CgRPC/src/core/lib/channel/channel_trace.cc create mode 100644 Sources/CgRPC/src/core/lib/channel/channel_trace.h create mode 100644 Sources/CgRPC/src/core/lib/channel/channel_trace_registry.cc create mode 100644 Sources/CgRPC/src/core/lib/channel/channel_trace_registry.h rename Sources/CgRPC/src/core/lib/channel/{connected_channel.c => connected_channel.cc} (52%) rename Sources/CgRPC/src/core/lib/channel/{handshaker.c => handshaker.cc} (59%) rename Sources/CgRPC/src/core/lib/channel/{handshaker_factory.c => handshaker_factory.cc} (55%) rename Sources/CgRPC/src/core/lib/channel/{handshaker_registry.c => handshaker_registry.cc} (71%) create mode 100644 Sources/CgRPC/src/core/lib/channel/status_util.cc create mode 100644 Sources/CgRPC/src/core/lib/channel/status_util.h delete mode 100644 Sources/CgRPC/src/core/lib/compression/compression.c create mode 100644 Sources/CgRPC/src/core/lib/compression/compression.cc create mode 100644 Sources/CgRPC/src/core/lib/compression/compression_internal.cc create mode 100644 Sources/CgRPC/src/core/lib/compression/compression_internal.h rename Sources/CgRPC/src/core/lib/compression/{message_compress.c => message_compress.cc} (69%) rename Sources/CgRPC/src/core/lib/compression/{stream_compression.c => stream_compression.cc} (77%) rename Sources/CgRPC/src/core/lib/compression/{stream_compression_gzip.c => stream_compression_gzip.cc} (70%) rename Sources/CgRPC/src/core/lib/compression/{stream_compression_identity.c => stream_compression_identity.cc} (71%) rename Sources/CgRPC/src/core/lib/debug/{stats.c => stats.cc} (74%) rename Sources/CgRPC/src/core/lib/debug/{stats_data.c => stats_data.cc} (75%) delete mode 100644 Sources/CgRPC/src/core/lib/debug/trace.c create mode 100644 Sources/CgRPC/src/core/lib/debug/trace.cc rename Sources/CgRPC/src/core/lib/{support/alloc.c => gpr/alloc.cc} (56%) create mode 100644 Sources/CgRPC/src/core/lib/gpr/arena.cc rename Sources/CgRPC/src/core/lib/{support => gpr}/arena.h (78%) rename Sources/CgRPC/src/core/lib/{support/atm.c => gpr/atm.cc} (87%) rename Sources/CgRPC/src/core/lib/{support/cpu_iphone.c => gpr/cpu_iphone.cc} (97%) rename Sources/CgRPC/src/core/lib/{support/cpu_linux.c => gpr/cpu_linux.cc} (76%) rename Sources/CgRPC/src/core/lib/{support/cpu_posix.c => gpr/cpu_posix.cc} (63%) rename Sources/CgRPC/src/core/lib/{support/cpu_windows.c => gpr/cpu_windows.cc} (96%) rename Sources/CgRPC/src/core/lib/{support => gpr}/env.h (74%) rename Sources/CgRPC/src/core/lib/{support/env_linux.c => gpr/env_linux.cc} (74%) rename Sources/CgRPC/src/core/lib/{support/env_posix.c => gpr/env_posix.cc} (72%) rename Sources/CgRPC/src/core/lib/{support/env_windows.c => gpr/env_windows.cc} (81%) rename Sources/CgRPC/src/core/lib/{support/fork.c => gpr/fork.cc} (67%) rename Sources/CgRPC/src/core/lib/{support => gpr}/fork.h (89%) rename Sources/CgRPC/src/core/lib/{support/host_port.c => gpr/host_port.cc} (68%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/host_port.h (81%) rename Sources/CgRPC/src/core/lib/{support/log.c => gpr/log.cc} (71%) rename Sources/CgRPC/src/core/lib/{support/log_android.c => gpr/log_android.cc} (82%) rename Sources/CgRPC/src/core/lib/{support/log_linux.c => gpr/log_linux.cc} (82%) rename Sources/CgRPC/src/core/lib/{support/log_posix.c => gpr/log_posix.cc} (80%) rename Sources/CgRPC/src/core/lib/{support/log_windows.c => gpr/log_windows.cc} (83%) create mode 100644 Sources/CgRPC/src/core/lib/gpr/mpscq.cc create mode 100644 Sources/CgRPC/src/core/lib/gpr/mpscq.h rename Sources/CgRPC/src/core/lib/{support/murmur_hash.c => gpr/murmur_hash.cc} (71%) rename Sources/CgRPC/src/core/lib/{support => gpr}/murmur_hash.h (78%) rename Sources/CgRPC/src/core/lib/{support => gpr}/spinlock.h (79%) rename Sources/CgRPC/src/core/lib/{support/string.c => gpr/string.cc} (61%) rename Sources/CgRPC/src/core/lib/{support => gpr}/string.h (69%) rename Sources/CgRPC/src/core/lib/{support/string_posix.c => gpr/string_posix.cc} (83%) rename Sources/CgRPC/src/core/lib/{support/string_util_windows.c => gpr/string_util_windows.cc} (81%) rename Sources/CgRPC/src/core/lib/{support/string_windows.c => gpr/string_windows.cc} (88%) rename Sources/CgRPC/src/core/lib/{support => gpr}/string_windows.h (85%) rename Sources/CgRPC/src/core/lib/{support/sync.c => gpr/sync.cc} (68%) rename Sources/CgRPC/src/core/lib/{support/sync_posix.c => gpr/sync_posix.cc} (77%) rename Sources/CgRPC/src/core/lib/{support/sync_windows.c => gpr/sync_windows.cc} (79%) rename Sources/CgRPC/src/core/lib/{support/time.c => gpr/time.cc} (94%) rename Sources/CgRPC/src/core/lib/{support/time_posix.c => gpr/time_posix.cc} (91%) rename Sources/CgRPC/src/core/lib/{support/time_precise.c => gpr/time_precise.cc} (87%) rename Sources/CgRPC/src/core/lib/{support => gpr}/time_precise.h (75%) rename Sources/CgRPC/src/core/lib/{support/time_windows.c => gpr/time_windows.cc} (93%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/tls.h (79%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/tls_gcc.h (51%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/tls_msvc.h (66%) rename Sources/CgRPC/src/core/lib/{support/tls_pthread.c => gpr/tls_pthread.cc} (81%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/tls_pthread.h (62%) rename Sources/CgRPC/src/core/lib/{support => gpr}/tmpfile.h (77%) rename Sources/CgRPC/src/core/lib/{support/tmpfile_msys.c => gpr/tmpfile_msys.cc} (88%) rename Sources/CgRPC/src/core/lib/{support/tmpfile_posix.c => gpr/tmpfile_posix.cc} (80%) rename Sources/CgRPC/src/core/lib/{support/tmpfile_windows.c => gpr/tmpfile_windows.cc} (90%) rename Sources/CgRPC/{include/grpc/support => src/core/lib/gpr}/useful.h (95%) rename Sources/CgRPC/src/core/lib/{support/wrap_memcpy.c => gpr/wrap_memcpy.cc} (88%) create mode 100644 Sources/CgRPC/src/core/lib/gprpp/abstract.h rename Sources/CgRPC/src/core/lib/{support => gprpp}/atomic.h (75%) rename Sources/CgRPC/src/core/lib/{support => gprpp}/atomic_with_atm.h (88%) rename Sources/CgRPC/src/core/lib/{support => gprpp}/atomic_with_std.h (80%) create mode 100644 Sources/CgRPC/src/core/lib/gprpp/debug_location.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/inlined_vector.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/manual_constructor.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/memory.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/orphanable.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/ref_counted.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/ref_counted_ptr.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/thd.h create mode 100644 Sources/CgRPC/src/core/lib/gprpp/thd_posix.cc create mode 100644 Sources/CgRPC/src/core/lib/gprpp/thd_windows.cc rename Sources/CgRPC/src/core/lib/http/{format_request.c => format_request.cc} (83%) delete mode 100644 Sources/CgRPC/src/core/lib/http/httpcli.c create mode 100644 Sources/CgRPC/src/core/lib/http/httpcli.cc delete mode 100644 Sources/CgRPC/src/core/lib/http/httpcli_security_connector.c create mode 100644 Sources/CgRPC/src/core/lib/http/httpcli_security_connector.cc rename Sources/CgRPC/src/core/lib/http/{parser.c => parser.cc} (77%) rename Sources/CgRPC/src/core/lib/{support => iomgr}/block_annotate.h (60%) rename Sources/CgRPC/src/core/lib/iomgr/{call_combiner.c => call_combiner.cc} (61%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/closure.c rename Sources/CgRPC/src/core/lib/iomgr/{combiner.c => combiner.cc} (53%) rename Sources/CgRPC/src/core/lib/iomgr/{endpoint.c => endpoint.cc} (50%) rename Sources/CgRPC/src/core/lib/iomgr/{endpoint_pair_posix.c => endpoint_pair_posix.cc} (83%) rename Sources/CgRPC/src/core/lib/iomgr/{endpoint_pair_uv.c => endpoint_pair_uv.cc} (89%) rename Sources/CgRPC/src/core/lib/iomgr/{endpoint_pair_windows.c => endpoint_pair_windows.cc} (75%) rename Sources/CgRPC/src/core/lib/iomgr/{error.c => error.cc} (68%) rename Sources/CgRPC/src/core/lib/iomgr/{ev_epoll1_linux.c => ev_epoll1_linux.cc} (63%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.cc rename Sources/CgRPC/src/core/lib/iomgr/{ev_epollsig_linux.c => ev_epollsig_linux.cc} (70%) rename Sources/CgRPC/src/core/lib/iomgr/{ev_poll_posix.c => ev_poll_posix.cc} (67%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/ev_posix.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/ev_posix.cc rename Sources/CgRPC/src/core/lib/iomgr/{ev_windows.c => ev_windows.cc} (81%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/exec_ctx.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/exec_ctx.cc rename Sources/CgRPC/src/core/lib/iomgr/{executor.c => executor.cc} (57%) rename Sources/CgRPC/src/core/lib/iomgr/{fork_posix.c => fork_posix.cc} (76%) rename Sources/CgRPC/src/core/lib/iomgr/{fork_windows.c => fork_windows.cc} (96%) rename Sources/CgRPC/src/core/lib/iomgr/{gethostname_fallback.c => gethostname_fallback.cc} (85%) rename Sources/CgRPC/src/core/lib/iomgr/{gethostname_host_name_max.c => gethostname_host_name_max.cc} (81%) rename Sources/CgRPC/src/core/lib/iomgr/{gethostname_sysconf.c => gethostname_sysconf.cc} (83%) rename Sources/CgRPC/src/core/lib/iomgr/{iocp_windows.c => iocp_windows.cc} (66%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.h create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.h rename Sources/CgRPC/src/core/lib/iomgr/{iomgr_windows.c => iomgr_windows.cc} (54%) rename Sources/CgRPC/src/core/lib/iomgr/{is_epollexclusive_available.c => is_epollexclusive_available.cc} (90%) rename Sources/CgRPC/src/core/lib/iomgr/{load_file.c => load_file.cc} (75%) rename Sources/CgRPC/src/core/lib/iomgr/{lockfree_event.c => lockfree_event.cc} (56%) rename Sources/CgRPC/src/core/lib/iomgr/{network_status_tracker.c => network_status_tracker.cc} (80%) rename Sources/CgRPC/src/core/lib/iomgr/{polling_entity.c => polling_entity.cc} (57%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_custom.h create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.h delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set_uv.c delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_uv.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_uv.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/pollset_uv.h rename Sources/CgRPC/src/core/lib/iomgr/{pollset_windows.c => pollset_windows.cc} (72%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/resolve_address.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.h rename Sources/CgRPC/src/core/lib/iomgr/{resolve_address_posix.c => resolve_address_posix.cc} (63%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/resolve_address_uv.c rename Sources/CgRPC/src/core/lib/iomgr/{resolve_address_windows.c => resolve_address_windows.cc} (65%) rename Sources/CgRPC/src/core/lib/iomgr/{resource_quota.c => resource_quota.cc} (60%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/sockaddr_custom.h delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.cc rename Sources/CgRPC/src/core/lib/iomgr/{socket_factory_posix.c => socket_factory_posix.cc} (56%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_mutator.c => socket_mutator.cc} (55%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_utils_common_posix.c => socket_utils_common_posix.cc} (73%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_utils_linux.c => socket_utils_linux.cc} (77%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_utils_posix.c => socket_utils_posix.cc} (83%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_utils_uv.c => socket_utils_uv.cc} (68%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_utils_windows.c => socket_utils_windows.cc} (70%) rename Sources/CgRPC/src/core/lib/iomgr/{socket_windows.c => socket_windows.cc} (73%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_client.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_client_custom.cc rename Sources/CgRPC/src/core/lib/iomgr/{tcp_client_posix.c => tcp_client_posix.cc} (51%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_client_uv.c rename Sources/CgRPC/src/core/lib/iomgr/{tcp_client_windows.c => tcp_client_windows.cc} (64%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_custom.h delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_posix.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_posix.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_server.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_server_custom.cc rename Sources/CgRPC/src/core/lib/iomgr/{tcp_server_posix.c => tcp_server_posix.cc} (68%) rename Sources/CgRPC/src/core/lib/iomgr/{tcp_server_utils_posix_common.c => tcp_server_utils_posix_common.cc} (80%) rename Sources/CgRPC/src/core/lib/iomgr/{tcp_server_utils_posix_ifaddrs.c => tcp_server_utils_posix_ifaddrs.cc} (79%) rename Sources/CgRPC/src/core/lib/iomgr/{tcp_server_utils_posix_noifaddrs.c => tcp_server_utils_posix_noifaddrs.cc} (86%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_server_uv.c rename Sources/CgRPC/src/core/lib/iomgr/{tcp_server_windows.c => tcp_server_windows.cc} (71%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_uv.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_uv.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/tcp_uv.h rename Sources/CgRPC/src/core/lib/iomgr/{tcp_windows.c => tcp_windows.cc} (63%) rename Sources/CgRPC/src/core/lib/iomgr/{time_averaged_stats.c => time_averaged_stats.cc} (98%) create mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_custom.cc create mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_custom.h rename Sources/CgRPC/src/core/lib/iomgr/{timer_generic.c => timer_generic.cc} (62%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_generic.h rename Sources/CgRPC/src/core/lib/iomgr/{timer_heap.c => timer_heap.cc} (68%) rename Sources/CgRPC/src/core/lib/iomgr/{timer_manager.c => timer_manager.cc} (66%) delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_uv.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_uv.cc delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/timer_uv.h delete mode 100644 Sources/CgRPC/src/core/lib/iomgr/udp_server.c create mode 100644 Sources/CgRPC/src/core/lib/iomgr/udp_server.cc rename Sources/CgRPC/src/core/lib/iomgr/{unix_sockets_posix.c => unix_sockets_posix.cc} (54%) rename Sources/CgRPC/src/core/lib/iomgr/{unix_sockets_posix_noop.c => unix_sockets_posix_noop.cc} (79%) rename Sources/CgRPC/src/core/lib/iomgr/{wakeup_fd_cv.c => wakeup_fd_cv.cc} (86%) rename Sources/CgRPC/src/core/lib/iomgr/{wakeup_fd_eventfd.c => wakeup_fd_eventfd.cc} (96%) rename Sources/CgRPC/src/core/lib/iomgr/{wakeup_fd_nospecial.c => wakeup_fd_nospecial.cc} (90%) rename Sources/CgRPC/src/core/lib/iomgr/{wakeup_fd_pipe.c => wakeup_fd_pipe.cc} (96%) rename Sources/CgRPC/src/core/lib/iomgr/{wakeup_fd_posix.c => wakeup_fd_posix.cc} (83%) delete mode 100644 Sources/CgRPC/src/core/lib/json/json.c create mode 100644 Sources/CgRPC/src/core/lib/json/json.cc rename Sources/CgRPC/src/core/lib/json/{json_reader.c => json_reader.cc} (93%) rename Sources/CgRPC/src/core/lib/json/{json_string.c => json_string.cc} (73%) rename Sources/CgRPC/src/core/lib/json/{json_writer.c => json_writer.cc} (77%) rename Sources/CgRPC/src/core/lib/profiling/{basic_timers.c => basic_timers.cc} (75%) rename Sources/CgRPC/src/core/lib/profiling/{stap_timers.c => stap_timers.cc} (75%) rename Sources/CgRPC/src/core/lib/security/context/{security_context.c => security_context.cc} (56%) create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.h create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.h create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc rename Sources/CgRPC/src/core/lib/security/credentials/composite/{composite_credentials.c => composite_credentials.cc} (55%) delete mode 100644 Sources/CgRPC/src/core/lib/security/credentials/credentials.c create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/credentials.cc rename Sources/CgRPC/src/core/lib/security/credentials/{credentials_metadata.c => credentials_metadata.cc} (86%) delete mode 100644 Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.c create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.cc rename Sources/CgRPC/src/core/lib/security/credentials/google_default/{credentials_generic.c => credentials_generic.cc} (76%) rename Sources/CgRPC/src/core/lib/security/credentials/google_default/{google_default_credentials.c => google_default_credentials.cc} (66%) rename Sources/CgRPC/src/core/lib/security/credentials/iam/{iam_credentials.c => iam_credentials.cc} (62%) rename Sources/CgRPC/src/core/lib/security/credentials/jwt/{json_token.c => json_token.cc} (65%) rename Sources/CgRPC/src/core/lib/security/credentials/jwt/{jwt_credentials.c => jwt_credentials.cc} (64%) rename Sources/CgRPC/src/core/lib/security/credentials/jwt/{jwt_verifier.c => jwt_verifier.cc} (57%) rename Sources/CgRPC/src/core/lib/security/credentials/oauth2/{oauth2_credentials.c => oauth2_credentials.cc} (57%) rename Sources/CgRPC/src/core/lib/security/credentials/plugin/{plugin_credentials.c => plugin_credentials.cc} (64%) delete mode 100644 Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.c create mode 100644 Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.cc create mode 100644 Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.cc create mode 100644 Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.h create mode 100644 Sources/CgRPC/src/core/lib/security/security_connector/security_connector.cc create mode 100644 Sources/CgRPC/src/core/lib/security/security_connector/security_connector.h delete mode 100644 Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.c create mode 100644 Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.cc delete mode 100644 Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.c delete mode 100644 Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.h rename Sources/CgRPC/src/core/lib/security/transport/{secure_endpoint.c => secure_endpoint.cc} (56%) delete mode 100644 Sources/CgRPC/src/core/lib/security/transport/security_connector.c delete mode 100644 Sources/CgRPC/src/core/lib/security/transport/security_connector.h rename Sources/CgRPC/src/core/lib/security/transport/{security_handshaker.c => security_handshaker.cc} (52%) rename Sources/CgRPC/src/core/lib/security/transport/{server_auth_filter.c => server_auth_filter.cc} (56%) create mode 100644 Sources/CgRPC/src/core/lib/security/transport/target_authority_table.cc create mode 100644 Sources/CgRPC/src/core/lib/security/transport/target_authority_table.h rename Sources/CgRPC/src/core/lib/security/transport/{tsi_error.c => tsi_error.cc} (89%) rename Sources/CgRPC/src/core/lib/security/util/{json_util.c => json_util.cc} (60%) rename Sources/CgRPC/src/core/lib/slice/{b64.c => b64.cc} (73%) rename Sources/CgRPC/src/core/lib/slice/{percent_encoding.c => percent_encoding.cc} (78%) rename Sources/CgRPC/src/core/lib/slice/{slice.c => slice.cc} (71%) rename Sources/CgRPC/src/core/lib/slice/{slice_buffer.c => slice_buffer.cc} (72%) delete mode 100644 Sources/CgRPC/src/core/lib/slice/slice_hash_table.c rename Sources/CgRPC/src/core/lib/slice/{slice_intern.c => slice_intern.cc} (77%) delete mode 100644 Sources/CgRPC/src/core/lib/slice/slice_string_helpers.c create mode 100644 Sources/CgRPC/src/core/lib/slice/slice_string_helpers.cc create mode 100644 Sources/CgRPC/src/core/lib/slice/slice_weak_hash_table.h delete mode 100644 Sources/CgRPC/src/core/lib/support/arena.c delete mode 100644 Sources/CgRPC/src/core/lib/support/backoff.c delete mode 100644 Sources/CgRPC/src/core/lib/support/backoff.h delete mode 100644 Sources/CgRPC/src/core/lib/support/cmdline.c delete mode 100644 Sources/CgRPC/src/core/lib/support/histogram.c delete mode 100644 Sources/CgRPC/src/core/lib/support/memory.h delete mode 100644 Sources/CgRPC/src/core/lib/support/mpscq.c delete mode 100644 Sources/CgRPC/src/core/lib/support/mpscq.h delete mode 100644 Sources/CgRPC/src/core/lib/support/stack_lockfree.c delete mode 100644 Sources/CgRPC/src/core/lib/support/stack_lockfree.h delete mode 100644 Sources/CgRPC/src/core/lib/support/subprocess_posix.c delete mode 100644 Sources/CgRPC/src/core/lib/support/subprocess_windows.c delete mode 100644 Sources/CgRPC/src/core/lib/support/thd.c delete mode 100644 Sources/CgRPC/src/core/lib/support/thd_internal.h delete mode 100644 Sources/CgRPC/src/core/lib/support/thd_posix.c delete mode 100644 Sources/CgRPC/src/core/lib/support/thd_windows.c delete mode 100644 Sources/CgRPC/src/core/lib/surface/alarm.c delete mode 100644 Sources/CgRPC/src/core/lib/surface/alarm_internal.h rename Sources/CgRPC/src/core/lib/surface/{api_trace.c => api_trace.cc} (88%) rename Sources/CgRPC/src/core/lib/surface/{byte_buffer.c => byte_buffer.cc} (70%) rename Sources/CgRPC/src/core/lib/surface/{byte_buffer_reader.c => byte_buffer_reader.cc} (75%) rename Sources/CgRPC/src/core/lib/surface/{call.c => call.cc} (54%) rename Sources/CgRPC/src/core/lib/surface/{call_details.c => call_details.cc} (86%) rename Sources/CgRPC/src/core/lib/surface/{call_log_batch.c => call_log_batch.cc} (86%) delete mode 100644 Sources/CgRPC/src/core/lib/surface/channel.c create mode 100644 Sources/CgRPC/src/core/lib/surface/channel.cc rename Sources/CgRPC/src/core/lib/surface/{channel_init.c => channel_init.cc} (74%) rename Sources/CgRPC/src/core/lib/surface/{channel_ping.c => channel_ping.cc} (60%) rename Sources/CgRPC/src/core/lib/surface/{channel_stack_type.c => channel_stack_type.cc} (96%) delete mode 100644 Sources/CgRPC/src/core/lib/surface/completion_queue.c create mode 100644 Sources/CgRPC/src/core/lib/surface/completion_queue.cc rename Sources/CgRPC/src/core/lib/surface/{completion_queue_factory.c => completion_queue_factory.cc} (96%) rename Sources/CgRPC/src/core/lib/surface/{event_string.c => event_string.cc} (76%) rename Sources/CgRPC/src/core/lib/surface/{init.c => init.cc} (62%) rename Sources/CgRPC/src/core/lib/surface/{init_secure.c => init_secure.cc} (72%) rename Sources/CgRPC/src/core/lib/surface/{metadata_array.c => metadata_array.cc} (96%) rename Sources/CgRPC/src/core/lib/surface/{server.c => server.cc} (53%) rename Sources/CgRPC/src/core/lib/surface/{validate_metadata.c => validate_metadata.cc} (83%) rename Sources/CgRPC/src/core/lib/surface/{version.c => version.cc} (81%) delete mode 100644 Sources/CgRPC/src/core/lib/transport/bdp_estimator.c create mode 100644 Sources/CgRPC/src/core/lib/transport/bdp_estimator.cc delete mode 100644 Sources/CgRPC/src/core/lib/transport/byte_stream.c create mode 100644 Sources/CgRPC/src/core/lib/transport/byte_stream.cc rename Sources/CgRPC/src/core/lib/transport/{connectivity_state.c => connectivity_state.cc} (52%) rename Sources/CgRPC/src/core/lib/transport/{error_utils.c => error_utils.cc} (64%) rename Sources/CgRPC/src/core/lib/transport/{metadata.c => metadata.cc} (68%) delete mode 100644 Sources/CgRPC/src/core/lib/transport/metadata_batch.c create mode 100644 Sources/CgRPC/src/core/lib/transport/metadata_batch.cc delete mode 100644 Sources/CgRPC/src/core/lib/transport/pid_controller.c create mode 100644 Sources/CgRPC/src/core/lib/transport/pid_controller.cc delete mode 100644 Sources/CgRPC/src/core/lib/transport/service_config.c create mode 100644 Sources/CgRPC/src/core/lib/transport/service_config.cc delete mode 100644 Sources/CgRPC/src/core/lib/transport/static_metadata.c create mode 100644 Sources/CgRPC/src/core/lib/transport/static_metadata.cc rename Sources/CgRPC/src/core/lib/transport/{status_conversion.c => status_conversion.cc} (94%) create mode 100644 Sources/CgRPC/src/core/lib/transport/status_metadata.cc create mode 100644 Sources/CgRPC/src/core/lib/transport/status_metadata.h rename Sources/CgRPC/src/core/lib/transport/{timeout_encoding.c => timeout_encoding.cc} (51%) delete mode 100644 Sources/CgRPC/src/core/lib/transport/transport.c create mode 100644 Sources/CgRPC/src/core/lib/transport/transport.cc rename Sources/CgRPC/src/core/lib/transport/{transport_op_string.c => transport_op_string.cc} (71%) rename Sources/CgRPC/src/core/plugin_registry/{grpc_plugin_registry.c => grpc_plugin_registry.cc} (55%) create mode 100644 Sources/CgRPC/src/core/tsi/alts/crypt/aes_gcm.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/crypt/gsec.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/crypt/gsec.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.c create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.c create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.c create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h create mode 100644 Sources/CgRPC/src/core/tsi/alts_transport_security.cc create mode 100644 Sources/CgRPC/src/core/tsi/alts_transport_security.h rename Sources/CgRPC/src/core/tsi/{fake_transport_security.c => fake_transport_security.cc} (69%) delete mode 100644 Sources/CgRPC/src/core/tsi/gts_transport_security.c create mode 100644 Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session.h create mode 100644 Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc create mode 100644 Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.cc create mode 100644 Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.h create mode 100644 Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc rename Sources/CgRPC/src/core/tsi/{ssl_transport_security.c => ssl_transport_security.cc} (56%) rename Sources/CgRPC/src/core/tsi/{transport_security.c => transport_security.cc} (57%) rename Sources/CgRPC/src/core/tsi/{transport_security_adapter.c => transport_security_adapter.cc} (62%) delete mode 100644 Sources/CgRPC/src/core/tsi/transport_security_grpc.c create mode 100644 Sources/CgRPC/src/core/tsi/transport_security_grpc.cc delete mode 100644 fix-indentation-settings.rb create mode 100644 fix-project-settings.rb diff --git a/Assets/roots.pem b/Assets/roots.pem index cd6a0c248..5dbd1ae6e 100644 --- a/Assets/roots.pem +++ b/Assets/roots.pem @@ -359,33 +359,6 @@ LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd 398znM/jra6O1I7mT1GvFpLgXPYHDw== -----END CERTIFICATE----- -# Issuer: CN=Certum CA O=Unizeto Sp. z o.o. -# Subject: CN=Certum CA O=Unizeto Sp. z o.o. -# Label: "Certum Root CA" -# Serial: 65568 -# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9 -# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18 -# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24 ------BEGIN CERTIFICATE----- -MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E -jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo -ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI -ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu -Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg -AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 -HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA -uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa -TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg -xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q -CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x -O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs -6GAqm4VKQPNriiTsBhYscw== ------END CERTIFICATE----- - # Issuer: CN=AAA Certificate Services O=Comodo CA Limited # Subject: CN=AAA Certificate Services O=Comodo CA Limited # Label: "Comodo AAA Services root" @@ -603,78 +576,6 @@ Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M -----END CERTIFICATE----- -# Issuer: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org -# Subject: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org -# Label: "Camerfirma Chambers of Commerce Root" -# Serial: 0 -# MD5 Fingerprint: b0:01:ee:14:d9:af:29:18:94:76:8e:f1:69:33:2a:84 -# SHA1 Fingerprint: 6e:3a:55:a4:19:0c:19:5c:93:84:3c:c0:db:72:2e:31:30:61:f0:b1 -# SHA256 Fingerprint: 0c:25:8a:12:a5:67:4a:ef:25:f2:8b:a7:dc:fa:ec:ee:a3:48:e5:41:e6:f5:cc:4e:e6:3b:71:b3:61:60:6a:c3 ------BEGIN CERTIFICATE----- -MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn -MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL -ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg -b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa -MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB -ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw -IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B -AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb -unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d -BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq -7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 -0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX -roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG -A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j -aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p -26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA -BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud -EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN -BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz -aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB -AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd -p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi -1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc -XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 -eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu -tGWaIZDgqtCYvDi1czyL+Nw= ------END CERTIFICATE----- - -# Issuer: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org -# Subject: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org -# Label: "Camerfirma Global Chambersign Root" -# Serial: 0 -# MD5 Fingerprint: c5:e6:7b:bf:06:d0:4f:43:ed:c4:7a:65:8a:fb:6b:19 -# SHA1 Fingerprint: 33:9b:6b:14:50:24:9b:55:7a:01:87:72:84:d9:e0:2f:c3:d2:d8:e9 -# SHA256 Fingerprint: ef:3c:b4:17:fc:8e:bf:6f:97:87:6c:9e:4e:ce:39:de:1e:a5:fe:64:91:41:d1:02:8b:7d:11:c0:b2:29:8c:ed ------BEGIN CERTIFICATE----- -MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn -MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL -ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo -YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 -MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy -NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G -A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA -A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 -Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s -QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV -eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 -B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh -z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T -AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i -ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w -TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH -MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD -VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE -VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh -bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B -AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM -bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi -ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG -VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c -ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ -AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== ------END CERTIFICATE----- - # Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com # Label: "XRamp Global CA Root" @@ -772,58 +673,6 @@ VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 1 -# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 -# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f -# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea ------BEGIN CERTIFICATE----- -MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE -FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j -ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js -LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM -BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 -Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy -dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh -cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh -YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg -dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp -bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ -YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT -TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ -9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 -jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW -FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz -ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 -ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L -EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu -L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq -yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC -O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V -um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh -NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= ------END CERTIFICATE----- - # Issuer: O=Government Root Certification Authority # Subject: O=Government Root Certification Authority # Label: "Taiwan GRCA" @@ -1013,38 +862,6 @@ JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ -----END CERTIFICATE----- -# Issuer: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES -# Subject: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES -# Label: "DST ACES CA X6" -# Serial: 17771143917277623872238992636097467865 -# MD5 Fingerprint: 21:d8:4c:82:2b:99:09:33:a2:eb:14:24:8d:8e:5f:e8 -# SHA1 Fingerprint: 40:54:da:6f:1c:3f:40:74:ac:ed:0f:ec:cd:db:79:d1:53:fb:90:1d -# SHA256 Fingerprint: 76:7c:95:5a:76:41:2c:89:af:68:8e:90:a1:c7:0f:55:6c:fd:6b:60:25:db:ea:10:41:6d:7e:b6:83:1f:8c:40 ------BEGIN CERTIFICATE----- -MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb -MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx -ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w -MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD -VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx -FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu -ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 -gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH -fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a -ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT -ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF -MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk -c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto -dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt -aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI -hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk -QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ -h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq -nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR -rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 -9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -1373,35 +1190,6 @@ fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= -----END CERTIFICATE----- -# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1 -# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1 -# Label: "Security Communication EV RootCA1" -# Serial: 0 -# MD5 Fingerprint: 22:2d:a6:01:ea:7c:0a:f7:f0:6c:56:43:3f:77:76:d3 -# SHA1 Fingerprint: fe:b8:c4:32:dc:f9:76:9a:ce:ae:3d:d8:90:8f:fd:28:86:65:64:7d -# SHA256 Fingerprint: a2:2d:ba:68:1e:97:37:6e:2d:39:7d:72:8a:ae:3a:9b:62:96:b9:fd:ba:60:bc:2e:11:f6:47:f2:c6:75:fb:37 ------BEGIN CERTIFICATE----- -MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl -MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh -U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz -MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N -IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 -bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE -RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO -zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 -bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF -MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 -VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC -OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G -CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW -tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ -q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb -EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ -Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O -VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 ------END CERTIFICATE----- - # Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed # Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed # Label: "OISTE WISeKey Global Root GA CA" @@ -1565,44 +1353,6 @@ W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D hNQ+IIX3Sj0rnP0qCglN6oH4EZw= -----END CERTIFICATE----- -# Issuer: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi -# Subject: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi -# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3" -# Serial: 17 -# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26 -# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96 -# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a ------BEGIN CERTIFICATE----- -MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS -MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp -bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw -VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy -YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy -dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 -ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe -Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx -GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls -aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU -QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh -xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 -aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr -IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h -gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK -O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO -fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw -lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL -hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID -AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP -NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t -wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM -7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh -gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n -oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs -yZyQ2uypQjyttgI= ------END CERTIFICATE----- - # Issuer: O=certSIGN OU=certSIGN ROOT CA # Subject: O=certSIGN OU=certSIGN ROOT CA # Label: "certSIGN ROOT CA" @@ -1940,47 +1690,6 @@ pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN QSdJQO7e5iNEOdyhIta6A/I= -----END CERTIFICATE----- -# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI -# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI -# Label: "ACEDICOM Root" -# Serial: 7029493972724711941 -# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6 -# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84 -# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a ------BEGIN CERTIFICATE----- -MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE -AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x -CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW -MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF -RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC -AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7 -09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7 -XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P -Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK -t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb -X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28 -MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU -fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI -2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH -K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae -ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP -BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ -MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw -RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv -bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm -fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3 -gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe -I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i -5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi -ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn -MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ -o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6 -zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN -GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt -r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK -Z05phkOTOPu220+DkdRgfks+KzgHVZhepA== ------END CERTIFICATE----- - # Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Label: "Microsec e-Szigno Root CA 2009" @@ -2466,46 +2175,6 @@ VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI 03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= -----END CERTIFICATE----- -# Issuer: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Autorité Racine" -# Serial: 1 -# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a -# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3 -# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17 ------BEGIN CERTIFICATE----- -MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk -BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 -Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl -cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 -aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY -F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N -8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe -rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K -/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu -7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC -28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 -lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E -nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB -0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 -5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj -WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN -jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ -KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s -ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM -OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q -619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn -2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj -o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v -nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG -5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq -pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb -dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 -BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -2706,96 +2375,6 @@ jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN ZetX2fNXlrtIzYE= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 45 -# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 -# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 -# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 ------BEGIN CERTIFICATE----- -MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD -VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul -F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC -ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w -ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk -aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 -YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg -c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 -d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG -CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 -dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF -wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS -Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst -0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc -pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl -CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF -P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK -1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm -KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE -JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ -8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm -fyWl8kgAwKQB2j8= ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Label: "StartCom Certification Authority G2" -# Serial: 59 -# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 -# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 -# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 ------BEGIN CERTIFICATE----- -MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 -OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG -A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ -JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD -vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo -D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ -Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW -RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK -HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN -nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM -0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i -UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 -Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg -TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE -AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL -BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K -2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX -UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl -6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK -9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ -HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI -wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY -XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l -IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo -hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr -so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI ------END CERTIFICATE----- - # Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Label: "Buypass Class 2 Root CA" @@ -2937,39 +2516,6 @@ iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= -----END CERTIFICATE----- -# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007 -# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007 -# Label: "TURKTRUST Certificate Services Provider Root 2007" -# Serial: 1 -# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72 -# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33 -# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50 ------BEGIN CERTIFICATE----- -MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx -OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry -b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC -VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE -sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F -ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY -KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG -+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG -HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P -IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M -733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk -Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G -CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW -AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I -aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 -mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa -XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ -qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 ------END CERTIFICATE----- - # Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Label: "D-TRUST Root Class 3 CA 2 2009" @@ -3036,106 +2582,6 @@ xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 -----END CERTIFICATE----- -# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica -# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT -# Label: "PSCProcert" -# Serial: 11 -# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec -# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74 -# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0 ------BEGIN CERTIFICATE----- -MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1 -dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s -YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz -dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 -aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh -IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ -KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw -MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy -b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx -KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG -A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u -aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9 -7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74 -BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G -ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9 -JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0 -PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2 -0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH -0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/ -6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m -v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7 -K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev -bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw -MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w -MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD -gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0 -b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh -bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0 -cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp -ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg -ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq -hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD -AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w -MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag -RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t -UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl -cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v -Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG -AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN -AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS -1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB -3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv -Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh -HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm -pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz -sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE -qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb -mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9 -opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H -YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km ------END CERTIFICATE----- - -# Issuer: CN=CA Disig Root R1 O=Disig a.s. -# Subject: CN=CA Disig Root R1 O=Disig a.s. -# Label: "CA Disig Root R1" -# Serial: 14052245610670616104 -# MD5 Fingerprint: be:ec:11:93:9a:f5:69:21:bc:d7:c1:c0:67:89:cc:2a -# SHA1 Fingerprint: 8e:1c:74:f8:a6:20:b9:e5:8a:f4:61:fa:ec:2b:47:56:51:1a:52:c6 -# SHA256 Fingerprint: f9:6f:23:f4:c3:e7:9c:07:7a:46:98:8d:5a:f5:90:06:76:a0:f0:39:cb:64:5d:d1:75:49:b2:16:c8:24:40:ce ------BEGIN CERTIFICATE----- -MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV -BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu -MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy -MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx -EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw -ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk -D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o -OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A -fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe -IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n -oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK -/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj -rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD -3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE -7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC -yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd -qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud -DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI -hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR -xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA -SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo -HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB -emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC -AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb -7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x -DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk -F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF -a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT -Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL ------END CERTIFICATE----- - # Issuer: CN=CA Disig Root R2 O=Disig a.s. # Subject: CN=CA Disig Root R2 O=Disig a.s. # Label: "CA Disig Root R2" @@ -3671,85 +3117,6 @@ r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited -# Label: "WoSign" -# Serial: 125491772294754854453622855443212256657 -# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d -# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb -# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08 ------BEGIN CERTIFICATE----- -MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV -BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw -MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX -b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN -rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U -fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc -f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2 -ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M -x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR -aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch -zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar -uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K -mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA -Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv -HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H -EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 -LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ -MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e -JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN -g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp -dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab -R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ -PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce -xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+ -J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl -OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT -ee5Ehr7XHuQe+w== ------END CERTIFICATE----- - -# Issuer: CN=CA 沃通根证书 O=WoSign CA Limited -# Subject: CN=CA 沃通根证书 O=WoSign CA Limited -# Label: "WoSign China" -# Serial: 106921963437422998931660691310149453965 -# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93 -# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6 -# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54 ------BEGIN CERTIFICATE----- -MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV -BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw -MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl -ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r -D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1 -9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf -v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk -UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L -NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb -+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V -qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K -yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G -AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK -J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC -AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O -BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4 -WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 -yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj -/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6 -jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2 -ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX -X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n -FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D -u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l -O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le -ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1 -2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ== ------END CERTIFICATE----- - # Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Label: "COMODO RSA Certification Authority" @@ -4158,39 +3525,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ 5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su -----END CERTIFICATE----- -# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. -# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. -# Label: "TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5" -# Serial: 156233699172481 -# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e -# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb -# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78 ------BEGIN CERTIFICATE----- -MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE -BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn -aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg -QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg -SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0 -MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD -VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8 -dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF -bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom -/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR -Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3 -4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z -5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0 -hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID -AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX -SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l -VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq -URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf -peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF -Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW -+qtB4Uu2NQvAmxU= ------END CERTIFICATE----- - # Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 # Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903 # Label: "Certinomis - Root CA" @@ -4261,56 +3595,6 @@ aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Label: "Certification Authority of WoSign G2" -# Serial: 142423943073812161787490648904721057092 -# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 -# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 -# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV -BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx -MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK -ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo -b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX -JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO -gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg -5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n -fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 -2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ -KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 -fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G -3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy -SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng -LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 -XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= ------END CERTIFICATE----- - -# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited -# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited -# Label: "CA WoSign ECC Root" -# Serial: 138625735294506723296996289575837012112 -# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 -# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b -# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 ------BEGIN CERTIFICATE----- -MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw -CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT -EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 -NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb -MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID -YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 -KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES -1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB -1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 -aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K ------END CERTIFICATE----- - # Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Label: "SZAFIR ROOT CA2" @@ -4874,3 +4158,285 @@ lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c 8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= -----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- diff --git a/Makefile b/Makefile index d67ddc5c8..c84f063a5 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -CFLAGS = -Xcc -ISources/BoringSSL/include +CFLAGS = -Xcc -ISources/BoringSSL/include -Xcc -DPB_FIELD_16BIT=1 all: swift build -v $(CFLAGS) @@ -8,8 +8,7 @@ all: project: swift package generate-xcodeproj -# Optional: set the generated project's indentation settings. - -ruby fix-indentation-settings.rb || echo "Consider running 'sudo gem install xcodeproj' to automatically set correct indentation settings for the generated project." + @ruby fix-project-settings.rb || echo "ERROR: Please install Ruby and the 'xcodeproj' gem to automatically fix the Xcode project's settings." test: all swift test -v $(CFLAGS) diff --git a/Package.swift b/Package.swift index ead8900dc..fa723a4d2 100644 --- a/Package.swift +++ b/Package.swift @@ -63,4 +63,6 @@ let package = Package( dependencies: ["SwiftGRPC", "Commander"], path: "Sources/Examples/Simple"), .testTarget(name: "SwiftGRPCTests", dependencies: ["SwiftGRPC"]) - ]) + ], + cLanguageStandard: .gnu11, + cxxLanguageStandard: .cxx11) diff --git a/Sources/BoringSSL/crypto/aes/internal.h b/Sources/BoringSSL/crypto/aes/internal.h deleted file mode 100644 index 3dc5c6379..000000000 --- a/Sources/BoringSSL/crypto/aes/internal.h +++ /dev/null @@ -1,87 +0,0 @@ -/* ==================================================================== - * Copyright (c) 2002-2006 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== */ - -#ifndef OPENSSL_HEADER_AES_INTERNAL_H -#define OPENSSL_HEADER_AES_INTERNAL_H - -#include - -#if defined(__cplusplus) -extern "C" { -#endif - - -#if defined(_MSC_VER) && \ - (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64)) -#define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) -#define GETU32(p) SWAP(*((uint32_t *)(p))) -#define PUTU32(ct, st) \ - { *((uint32_t *)(ct)) = SWAP((st)); } -#else -#define GETU32(pt) \ - (((uint32_t)(pt)[0] << 24) ^ ((uint32_t)(pt)[1] << 16) ^ \ - ((uint32_t)(pt)[2] << 8) ^ ((uint32_t)(pt)[3])) -#define PUTU32(ct, st) \ - { \ - (ct)[0] = (uint8_t)((st) >> 24); \ - (ct)[1] = (uint8_t)((st) >> 16); \ - (ct)[2] = (uint8_t)((st) >> 8); \ - (ct)[3] = (uint8_t)(st); \ - } -#endif - -#define MAXKC (256 / 32) -#define MAXKB (256 / 8) -#define MAXNR 14 - - -#if defined(__cplusplus) -} /* extern C */ -#endif - -#endif /* OPENSSL_HEADER_AES_INTERNAL_H */ diff --git a/Sources/BoringSSL/crypto/asn1/a_bitstr.c b/Sources/BoringSSL/crypto/asn1/a_bitstr.c index ea9da2475..39426389e 100644 --- a/Sources/BoringSSL/crypto/asn1/a_bitstr.c +++ b/Sources/BoringSSL/crypto/asn1/a_bitstr.c @@ -56,6 +56,7 @@ #include +#include #include #include @@ -139,6 +140,11 @@ ASN1_BIT_STRING *c2i_ASN1_BIT_STRING(ASN1_BIT_STRING **a, goto err; } + if (len > INT_MAX) { + OPENSSL_PUT_ERROR(ASN1, ASN1_R_STRING_TOO_LONG); + goto err; + } + if ((a == NULL) || ((*a) == NULL)) { if ((ret = M_ASN1_BIT_STRING_new()) == NULL) return (NULL); @@ -211,8 +217,7 @@ int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value) if (a->data == NULL) c = (unsigned char *)OPENSSL_malloc(w + 1); else - c = (unsigned char *)OPENSSL_realloc_clean(a->data, - a->length, w + 1); + c = (unsigned char *)OPENSSL_realloc(a->data, w + 1); if (c == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); return 0; diff --git a/Sources/BoringSSL/crypto/asn1/a_d2i_fp.c b/Sources/BoringSSL/crypto/asn1/a_d2i_fp.c index b54497191..3da6df9b0 100644 --- a/Sources/BoringSSL/crypto/asn1/a_d2i_fp.c +++ b/Sources/BoringSSL/crypto/asn1/a_d2i_fp.c @@ -140,6 +140,21 @@ void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x) } #endif +typedef struct asn1_const_ctx_st + { + const unsigned char *p;/* work char pointer */ + int eos; /* end of sequence read for indefinite encoding */ + int error; /* error code to use when returning an error */ + int inf; /* constructed if 0x20, indefinite is 0x21 */ + int tag; /* tag from last 'get object' */ + int xclass; /* class from last 'get object' */ + long slen; /* length of last 'get object' */ + const unsigned char *max; /* largest value of p allowed */ + const unsigned char *q;/* temporary variable */ + const unsigned char **pp;/* variable */ + int line; /* used in error processing */ + } ASN1_const_CTX; + #define HEADER_SIZE 8 #define ASN1_CHUNK_INITIAL_SIZE (16 * 1024) static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb) diff --git a/Sources/BoringSSL/crypto/asn1/a_enum.c b/Sources/BoringSSL/crypto/asn1/a_enum.c index cc4690552..4a779718c 100644 --- a/Sources/BoringSSL/crypto/asn1/a_enum.c +++ b/Sources/BoringSSL/crypto/asn1/a_enum.c @@ -56,6 +56,7 @@ #include +#include #include #include @@ -110,7 +111,6 @@ int ASN1_ENUMERATED_set(ASN1_ENUMERATED *a, long v) long ASN1_ENUMERATED_get(ASN1_ENUMERATED *a) { int neg = 0, i; - long r = 0; if (a == NULL) return (0L); @@ -120,20 +120,31 @@ long ASN1_ENUMERATED_get(ASN1_ENUMERATED *a) else if (i != V_ASN1_ENUMERATED) return -1; - if (a->length > (int)sizeof(long)) { + OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) >= sizeof(long), + long_larger_than_uint64_t); + + if (a->length > (int)sizeof(uint64_t)) { /* hmm... a bit ugly */ - return (0xffffffffL); + return -1; } - if (a->data == NULL) - return 0; - for (i = 0; i < a->length; i++) { - r <<= 8; - r |= (unsigned char)a->data[i]; + uint64_t r64 = 0; + if (a->data != NULL) { + for (i = 0; i < a->length; i++) { + r64 <<= 8; + r64 |= (unsigned char)a->data[i]; + } + + if (r64 > LONG_MAX) { + return -1; + } } + + long r = (long) r64; if (neg) r = -r; - return (r); + + return r; } ASN1_ENUMERATED *BN_to_ASN1_ENUMERATED(BIGNUM *bn, ASN1_ENUMERATED *ai) diff --git a/Sources/BoringSSL/crypto/asn1/a_gentm.c b/Sources/BoringSSL/crypto/asn1/a_gentm.c index d130cdf80..5fcb65b8b 100644 --- a/Sources/BoringSSL/crypto/asn1/a_gentm.c +++ b/Sources/BoringSSL/crypto/asn1/a_gentm.c @@ -148,7 +148,7 @@ int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d) if (a[o] == 'Z') o++; else if ((a[o] == '+') || (a[o] == '-')) { - int offsign = a[o] == '-' ? -1 : 1, offset = 0; + int offsign = a[o] == '-' ? 1 : -1, offset = 0; o++; if (o + 4 > l) goto err; diff --git a/Sources/BoringSSL/crypto/asn1/a_i2d_fp.c b/Sources/BoringSSL/crypto/asn1/a_i2d_fp.c index 486207ed2..7b76d0c54 100644 --- a/Sources/BoringSSL/crypto/asn1/a_i2d_fp.c +++ b/Sources/BoringSSL/crypto/asn1/a_i2d_fp.c @@ -81,6 +81,9 @@ int ASN1_i2d_bio(i2d_of_void *i2d, BIO *out, void *x) int i, j = 0, n, ret = 1; n = i2d(x, NULL); + if (n <= 0) + return 0; + b = (char *)OPENSSL_malloc(n); if (b == NULL) { OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); diff --git a/Sources/BoringSSL/crypto/asn1/a_int.c b/Sources/BoringSSL/crypto/asn1/a_int.c index 617ba9624..8a4edd691 100644 --- a/Sources/BoringSSL/crypto/asn1/a_int.c +++ b/Sources/BoringSSL/crypto/asn1/a_int.c @@ -57,6 +57,7 @@ #include #include +#include #include #include @@ -385,7 +386,6 @@ int ASN1_INTEGER_set(ASN1_INTEGER *a, long v) long ASN1_INTEGER_get(const ASN1_INTEGER *a) { int neg = 0, i; - long r = 0; if (a == NULL) return (0L); @@ -395,20 +395,31 @@ long ASN1_INTEGER_get(const ASN1_INTEGER *a) else if (i != V_ASN1_INTEGER) return -1; - if (a->length > (int)sizeof(long)) { + OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) >= sizeof(long), + long_larger_than_uint64_t); + + if (a->length > (int)sizeof(uint64_t)) { /* hmm... a bit ugly, return all ones */ return -1; } - if (a->data == NULL) - return 0; - for (i = 0; i < a->length; i++) { - r <<= 8; - r |= (unsigned char)a->data[i]; + uint64_t r64 = 0; + if (a->data != NULL) { + for (i = 0; i < a->length; i++) { + r64 <<= 8; + r64 |= (unsigned char)a->data[i]; + } + + if (r64 > LONG_MAX) { + return -1; + } } + + long r = (long) r64; if (neg) r = -r; - return (r); + + return r; } ASN1_INTEGER *BN_to_ASN1_INTEGER(const BIGNUM *bn, ASN1_INTEGER *ai) diff --git a/Sources/BoringSSL/crypto/asn1/a_object.c b/Sources/BoringSSL/crypto/asn1/a_object.c index a710addda..005e37d50 100644 --- a/Sources/BoringSSL/crypto/asn1/a_object.c +++ b/Sources/BoringSSL/crypto/asn1/a_object.c @@ -87,134 +87,6 @@ int i2d_ASN1_OBJECT(ASN1_OBJECT *a, unsigned char **pp) return (objsize); } -int a2d_ASN1_OBJECT(unsigned char *out, int olen, const char *buf, int num) -{ - int i, first, len = 0, c, use_bn; - char ftmp[24], *tmp = ftmp; - int tmpsize = sizeof ftmp; - const char *p; - unsigned long l; - BIGNUM *bl = NULL; - - if (num == 0) - return (0); - else if (num == -1) - num = strlen(buf); - - p = buf; - c = *(p++); - num--; - if ((c >= '0') && (c <= '2')) { - first = c - '0'; - } else { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_FIRST_NUM_TOO_LARGE); - goto err; - } - - if (num <= 0) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_MISSING_SECOND_NUMBER); - goto err; - } - c = *(p++); - num--; - for (;;) { - if (num <= 0) - break; - if ((c != '.') && (c != ' ')) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_SEPARATOR); - goto err; - } - l = 0; - use_bn = 0; - for (;;) { - if (num <= 0) - break; - num--; - c = *(p++); - if ((c == ' ') || (c == '.')) - break; - if ((c < '0') || (c > '9')) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_INVALID_DIGIT); - goto err; - } - if (!use_bn && l >= ((ULONG_MAX - 80) / 10L)) { - use_bn = 1; - if (!bl) - bl = BN_new(); - if (!bl || !BN_set_word(bl, l)) - goto err; - } - if (use_bn) { - if (!BN_mul_word(bl, 10L) - || !BN_add_word(bl, c - '0')) - goto err; - } else - l = l * 10L + (long)(c - '0'); - } - if (len == 0) { - if ((first < 2) && (l >= 40)) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_SECOND_NUMBER_TOO_LARGE); - goto err; - } - if (use_bn) { - if (!BN_add_word(bl, first * 40)) - goto err; - } else - l += (long)first *40; - } - i = 0; - if (use_bn) { - int blsize; - blsize = BN_num_bits(bl); - blsize = (blsize + 6) / 7; - if (blsize > tmpsize) { - if (tmp != ftmp) - OPENSSL_free(tmp); - tmpsize = blsize + 32; - tmp = OPENSSL_malloc(tmpsize); - if (!tmp) - goto err; - } - while (blsize--) { - BN_ULONG t = BN_div_word(bl, 0x80L); - if (t == (BN_ULONG)-1) - goto err; - tmp[i++] = (unsigned char)t; - } - } else { - - for (;;) { - tmp[i++] = (unsigned char)l & 0x7f; - l >>= 7L; - if (l == 0L) - break; - } - - } - if (out != NULL) { - if (len + i > olen) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_BUFFER_TOO_SMALL); - goto err; - } - while (--i > 0) - out[len++] = tmp[i] | 0x80; - out[len++] = tmp[0]; - } else - len += i; - } - if (tmp != ftmp) - OPENSSL_free(tmp); - if (bl) - BN_free(bl); - return (len); - err: - if (tmp != ftmp) - OPENSSL_free(tmp); - if (bl) - BN_free(bl); - return (0); -} - int i2t_ASN1_OBJECT(char *buf, int buf_len, ASN1_OBJECT *a) { return OBJ_obj2txt(buf, buf_len, a, 0); diff --git a/Sources/BoringSSL/crypto/asn1/a_print.c b/Sources/BoringSSL/crypto/asn1/a_print.c index aee54fa49..2104521e6 100644 --- a/Sources/BoringSSL/crypto/asn1/a_print.c +++ b/Sources/BoringSSL/crypto/asn1/a_print.c @@ -91,31 +91,3 @@ int ASN1_PRINTABLE_type(const unsigned char *s, int len) return (V_ASN1_IA5STRING); return (V_ASN1_PRINTABLESTRING); } - -int ASN1_UNIVERSALSTRING_to_string(ASN1_UNIVERSALSTRING *s) -{ - int i; - unsigned char *p; - - if (s->type != V_ASN1_UNIVERSALSTRING) - return (0); - if ((s->length % 4) != 0) - return (0); - p = s->data; - for (i = 0; i < s->length; i += 4) { - if ((p[0] != '\0') || (p[1] != '\0') || (p[2] != '\0')) - break; - else - p += 4; - } - if (i < s->length) - return (0); - p = s->data; - for (i = 3; i < s->length; i += 4) { - *(p++) = s->data[i]; - } - *(p) = '\0'; - s->length /= 4; - s->type = ASN1_PRINTABLE_type(s->data, s->length); - return (1); -} diff --git a/Sources/BoringSSL/crypto/asn1/a_strnid.c b/Sources/BoringSSL/crypto/asn1/a_strnid.c index c558bce6c..379a79fb7 100644 --- a/Sources/BoringSSL/crypto/asn1/a_strnid.c +++ b/Sources/BoringSSL/crypto/asn1/a_strnid.c @@ -62,6 +62,9 @@ #include #include #include +#include + +DEFINE_STACK_OF(ASN1_STRING_TABLE) static STACK_OF(ASN1_STRING_TABLE) *stable = NULL; static void st_free(ASN1_STRING_TABLE *tbl); diff --git a/Sources/BoringSSL/crypto/asn1/a_time.c b/Sources/BoringSSL/crypto/asn1/a_time.c index 4b584297e..c962c0bf0 100644 --- a/Sources/BoringSSL/crypto/asn1/a_time.c +++ b/Sources/BoringSSL/crypto/asn1/a_time.c @@ -114,7 +114,7 @@ int ASN1_TIME_check(ASN1_TIME *t) ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *t, ASN1_GENERALIZEDTIME **out) { - ASN1_GENERALIZEDTIME *ret; + ASN1_GENERALIZEDTIME *ret = NULL; char *str; int newlen; @@ -123,22 +123,21 @@ ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *t, if (!out || !*out) { if (!(ret = ASN1_GENERALIZEDTIME_new())) - return NULL; - if (out) - *out = ret; - } else + goto err; + } else { ret = *out; + } /* If already GeneralizedTime just copy across */ if (t->type == V_ASN1_GENERALIZEDTIME) { if (!ASN1_STRING_set(ret, t->data, t->length)) - return NULL; - return ret; + goto err; + goto done; } /* grow the string */ if (!ASN1_STRING_set(ret, NULL, t->length + 2)) - return NULL; + goto err; /* ASN1_STRING_set() allocated 'len + 1' bytes. */ newlen = t->length + 2 + 1; str = (char *)ret->data; @@ -150,9 +149,18 @@ ASN1_GENERALIZEDTIME *ASN1_TIME_to_generalizedtime(ASN1_TIME *t, BUF_strlcat(str, (char *)t->data, newlen); - return ret; + done: + if (out != NULL && *out == NULL) + *out = ret; + return ret; + + err: + if (out == NULL || *out != ret) + ASN1_GENERALIZEDTIME_free(ret); + return NULL; } + int ASN1_TIME_set_string(ASN1_TIME *s, const char *str) { ASN1_TIME t; diff --git a/Sources/BoringSSL/crypto/asn1/a_utctm.c b/Sources/BoringSSL/crypto/asn1/a_utctm.c index 193b83f82..f7519df78 100644 --- a/Sources/BoringSSL/crypto/asn1/a_utctm.c +++ b/Sources/BoringSSL/crypto/asn1/a_utctm.c @@ -127,7 +127,7 @@ int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d) if (a[o] == 'Z') o++; else if ((a[o] == '+') || (a[o] == '-')) { - int offsign = a[o] == '-' ? -1 : 1, offset = 0; + int offsign = a[o] == '-' ? 1 : -1, offset = 0; o++; if (o + 4 > l) goto err; diff --git a/Sources/BoringSSL/crypto/asn1/asn1_lib.c b/Sources/BoringSSL/crypto/asn1/asn1_lib.c index 774f151cd..ea727f335 100644 --- a/Sources/BoringSSL/crypto/asn1/asn1_lib.c +++ b/Sources/BoringSSL/crypto/asn1/asn1_lib.c @@ -107,30 +107,6 @@ static int asn1_get_length(const unsigned char **pp, int *inf, long *rl, long max); static void asn1_put_length(unsigned char **pp, int length); -static int _asn1_check_infinite_end(const unsigned char **p, long len) -{ - /* - * If there is 0 or 1 byte left, the length check should pick things up - */ - if (len <= 0) - return (1); - else if ((len >= 2) && ((*p)[0] == 0) && ((*p)[1] == 0)) { - (*p) += 2; - return (1); - } - return (0); -} - -int ASN1_check_infinite_end(unsigned char **p, long len) -{ - return _asn1_check_infinite_end((const unsigned char **)p, len); -} - -int ASN1_const_check_infinite_end(const unsigned char **p, long len) -{ - return _asn1_check_infinite_end(p, len); -} - int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag, int *pclass, long omax) { @@ -327,31 +303,6 @@ int ASN1_object_size(int constructed, int length, int tag) return ret + length; } -static int _asn1_Finish(ASN1_const_CTX *c) -{ - if ((c->inf == (1 | V_ASN1_CONSTRUCTED)) && (!c->eos)) { - if (!ASN1_const_check_infinite_end(&c->p, c->slen)) { - c->error = ASN1_R_MISSING_ASN1_EOS; - return (0); - } - } - if (((c->slen != 0) && !(c->inf & 1)) || ((c->slen < 0) && (c->inf & 1))) { - c->error = ASN1_R_ASN1_LENGTH_MISMATCH; - return (0); - } - return (1); -} - -int asn1_Finish(ASN1_CTX *c) -{ - return _asn1_Finish((ASN1_const_CTX *)c); -} - -int asn1_const_Finish(ASN1_const_CTX *c) -{ - return _asn1_Finish(c); -} - int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str) { if (str == NULL) @@ -484,3 +435,8 @@ unsigned char *ASN1_STRING_data(ASN1_STRING *x) { return M_ASN1_STRING_data(x); } + +const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x) +{ + return x->data; +} diff --git a/Sources/BoringSSL/crypto/asn1/asn1_locl.h b/Sources/BoringSSL/crypto/asn1/asn1_locl.h index ce8146bf3..10a832cd5 100644 --- a/Sources/BoringSSL/crypto/asn1/asn1_locl.h +++ b/Sources/BoringSSL/crypto/asn1/asn1_locl.h @@ -72,7 +72,7 @@ extern "C" { /* Wrapper functions for time functions. */ /* OPENSSL_gmtime wraps |gmtime_r|. See the manual page for that function. */ -struct tm *OPENSSL_gmtime(const time_t *timer, struct tm *result); +struct tm *OPENSSL_gmtime(const time_t *time, struct tm *result); /* OPENSSL_gmtime_adj updates |tm| by adding |offset_day| days and |offset_sec| * seconds. */ @@ -90,6 +90,9 @@ int OPENSSL_gmtime_diff(int *out_days, int *out_secs, const struct tm *from, int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d); int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d); +void asn1_item_combine_free(ASN1_VALUE **pval, const ASN1_ITEM *it, + int combine); + #if defined(__cplusplus) } /* extern C */ diff --git a/Sources/BoringSSL/crypto/asn1/tasn_dec.c b/Sources/BoringSSL/crypto/asn1/tasn_dec.c index bf008af1d..2f5f132a5 100644 --- a/Sources/BoringSSL/crypto/asn1/tasn_dec.c +++ b/Sources/BoringSSL/crypto/asn1/tasn_dec.c @@ -56,6 +56,7 @@ #include +#include #include #include @@ -147,15 +148,6 @@ ASN1_VALUE *ASN1_item_d2i(ASN1_VALUE **pval, return NULL; } -int ASN1_template_d2i(ASN1_VALUE **pval, - const unsigned char **in, long len, - const ASN1_TEMPLATE *tt) -{ - ASN1_TLC c; - asn1_tlc_clear_nc(&c); - return asn1_template_ex_d2i(pval, in, len, tt, 0, &c); -} - /* * Decode an item, taking care of IMPLICIT tagging, if any. If 'opt' set and * tag mismatch return -1 to handle OPTIONAL @@ -188,6 +180,14 @@ int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, else asn1_cb = 0; + /* + * Bound |len| to comfortably fit in an int. Lengths in this module often + * switch between int and long without overflow checks. + */ + if (len > INT_MAX/2) { + len = INT_MAX/2; + } + switch (it->itype) { case ASN1_ITYPE_PRIMITIVE: if (it->templates) { diff --git a/Sources/BoringSSL/crypto/asn1/tasn_enc.c b/Sources/BoringSSL/crypto/asn1/tasn_enc.c index 9286ef641..cc87d3498 100644 --- a/Sources/BoringSSL/crypto/asn1/tasn_enc.c +++ b/Sources/BoringSSL/crypto/asn1/tasn_enc.c @@ -256,12 +256,6 @@ int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, return 0; } -int ASN1_template_i2d(ASN1_VALUE **pval, unsigned char **out, - const ASN1_TEMPLATE *tt) -{ - return asn1_template_ex_i2d(pval, out, tt, -1, 0); -} - static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt, int tag, int iclass) { diff --git a/Sources/BoringSSL/crypto/asn1/tasn_fre.c b/Sources/BoringSSL/crypto/asn1/tasn_fre.c index 609cb9f98..eabc0fb56 100644 --- a/Sources/BoringSSL/crypto/asn1/tasn_fre.c +++ b/Sources/BoringSSL/crypto/asn1/tasn_fre.c @@ -59,8 +59,7 @@ #include #include -static void asn1_item_combine_free(ASN1_VALUE **pval, const ASN1_ITEM *it, - int combine); +#include "asn1_locl.h" /* Free up an ASN1 structure */ @@ -74,8 +73,7 @@ void ASN1_item_ex_free(ASN1_VALUE **pval, const ASN1_ITEM *it) asn1_item_combine_free(pval, it, 0); } -static void asn1_item_combine_free(ASN1_VALUE **pval, const ASN1_ITEM *it, - int combine) +void asn1_item_combine_free(ASN1_VALUE **pval, const ASN1_ITEM *it, int combine) { const ASN1_TEMPLATE *tt = NULL, *seqtt; const ASN1_EXTERN_FUNCS *ef; diff --git a/Sources/BoringSSL/crypto/asn1/tasn_new.c b/Sources/BoringSSL/crypto/asn1/tasn_new.c index 10cf954f7..5db38bef7 100644 --- a/Sources/BoringSSL/crypto/asn1/tasn_new.c +++ b/Sources/BoringSSL/crypto/asn1/tasn_new.c @@ -63,6 +63,7 @@ #include #include +#include "asn1_locl.h" #include "../internal.h" @@ -201,7 +202,7 @@ static int asn1_item_ex_combine_new(ASN1_VALUE **pval, const ASN1_ITEM *it, return 1; memerr2: - ASN1_item_ex_free(pval, it); + asn1_item_combine_free(pval, it, combine); memerr: OPENSSL_PUT_ERROR(ASN1, ERR_R_MALLOC_FAILURE); #ifdef CRYPTO_MDEBUG @@ -211,7 +212,7 @@ static int asn1_item_ex_combine_new(ASN1_VALUE **pval, const ASN1_ITEM *it, return 0; auxerr2: - ASN1_item_ex_free(pval, it); + asn1_item_combine_free(pval, it, combine); auxerr: OPENSSL_PUT_ERROR(ASN1, ASN1_R_AUX_ERROR); #ifdef CRYPTO_MDEBUG diff --git a/Sources/BoringSSL/crypto/asn1/time_support.c b/Sources/BoringSSL/crypto/asn1/time_support.c index 194dc3a79..3efd43e25 100644 --- a/Sources/BoringSSL/crypto/asn1/time_support.c +++ b/Sources/BoringSSL/crypto/asn1/time_support.c @@ -171,7 +171,7 @@ int OPENSSL_gmtime_adj(struct tm *tm, int off_day, long offset_sec) { return 1; } -int OPENSSL_gmtime_diff(int *pday, int *psec, const struct tm *from, +int OPENSSL_gmtime_diff(int *out_days, int *out_secs, const struct tm *from, const struct tm *to) { int from_sec, to_sec, diff_sec; long from_jd, to_jd, diff_day; @@ -195,11 +195,11 @@ int OPENSSL_gmtime_diff(int *pday, int *psec, const struct tm *from, diff_sec -= SECS_PER_DAY; } - if (pday) { - *pday = (int)diff_day; + if (out_days) { + *out_days = (int)diff_day; } - if (psec) { - *psec = diff_sec; + if (out_secs) { + *out_secs = diff_sec; } return 1; diff --git a/Sources/BoringSSL/crypto/asn1/x_long.c b/Sources/BoringSSL/crypto/asn1/x_long.c deleted file mode 100644 index b53127a30..000000000 --- a/Sources/BoringSSL/crypto/asn1/x_long.c +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] */ - -#include - -#include - -#include -#include -#include -#include - -#include "../internal.h" - - -/* - * Custom primitive type for long handling. This converts between an - * ASN1_INTEGER and a long directly. - */ - -static int long_new(ASN1_VALUE **pval, const ASN1_ITEM *it); -static void long_free(ASN1_VALUE **pval, const ASN1_ITEM *it); - -static int long_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, - const ASN1_ITEM *it); -static int long_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, - int utype, char *free_cont, const ASN1_ITEM *it); -static int long_print(BIO *out, ASN1_VALUE **pval, const ASN1_ITEM *it, - int indent, const ASN1_PCTX *pctx); - -static const ASN1_PRIMITIVE_FUNCS long_pf = { - NULL, 0, - long_new, - long_free, - long_free, /* Clear should set to initial value */ - long_c2i, - long_i2c, - long_print -}; - -ASN1_ITEM_start(LONG) - ASN1_ITYPE_PRIMITIVE, V_ASN1_INTEGER, NULL, 0, &long_pf, ASN1_LONG_UNDEF, "LONG" -ASN1_ITEM_end(LONG) - -ASN1_ITEM_start(ZLONG) - ASN1_ITYPE_PRIMITIVE, V_ASN1_INTEGER, NULL, 0, &long_pf, 0, "ZLONG" -ASN1_ITEM_end(ZLONG) - -static int long_new(ASN1_VALUE **pval, const ASN1_ITEM *it) -{ - *(long *)pval = it->size; - return 1; -} - -static void long_free(ASN1_VALUE **pval, const ASN1_ITEM *it) -{ - *(long *)pval = it->size; -} - -static int long_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, - const ASN1_ITEM *it) -{ - long ltmp; - unsigned long utmp; - int clen, pad, i; - /* this exists to bypass broken gcc optimization */ - char *cp = (char *)pval; - - /* use memcpy, because we may not be long aligned */ - OPENSSL_memcpy(<mp, cp, sizeof(long)); - - if (ltmp == it->size) - return -1; - /* - * Convert the long to positive: we subtract one if negative so we can - * cleanly handle the padding if only the MSB of the leading octet is - * set. - */ - if (ltmp < 0) - utmp = -ltmp - 1; - else - utmp = ltmp; - clen = BN_num_bits_word(utmp); - /* If MSB of leading octet set we need to pad */ - if (!(clen & 0x7)) - pad = 1; - else - pad = 0; - - /* Convert number of bits to number of octets */ - clen = (clen + 7) >> 3; - - if (cont) { - if (pad) - *cont++ = (ltmp < 0) ? 0xff : 0; - for (i = clen - 1; i >= 0; i--) { - cont[i] = (unsigned char)(utmp & 0xff); - if (ltmp < 0) - cont[i] ^= 0xff; - utmp >>= 8; - } - } - return clen + pad; -} - -static int long_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, - int utype, char *free_cont, const ASN1_ITEM *it) -{ - int neg, i; - long ltmp; - unsigned long utmp = 0; - char *cp = (char *)pval; - if (len > (int)sizeof(long)) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_INTEGER_TOO_LARGE_FOR_LONG); - return 0; - } - /* Is it negative? */ - if (len && (cont[0] & 0x80)) - neg = 1; - else - neg = 0; - utmp = 0; - for (i = 0; i < len; i++) { - utmp <<= 8; - if (neg) - utmp |= cont[i] ^ 0xff; - else - utmp |= cont[i]; - } - ltmp = (long)utmp; - if (neg) { - ltmp++; - ltmp = -ltmp; - } - if (ltmp == it->size) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_INTEGER_TOO_LARGE_FOR_LONG); - return 0; - } - OPENSSL_memcpy(cp, <mp, sizeof(long)); - return 1; -} - -static int long_print(BIO *out, ASN1_VALUE **pval, const ASN1_ITEM *it, - int indent, const ASN1_PCTX *pctx) -{ - return BIO_printf(out, "%ld\n", *(long *)pval); -} diff --git a/Sources/BoringSSL/crypto/base64/base64.c b/Sources/BoringSSL/crypto/base64/base64.c index 7afadf746..b701b0d12 100644 --- a/Sources/BoringSSL/crypto/base64/base64.c +++ b/Sources/BoringSSL/crypto/base64/base64.c @@ -65,12 +65,38 @@ #include "../internal.h" -/* Encoding. */ +// constant_time_lt_args_8 behaves like |constant_time_lt_8| but takes |uint8_t| +// arguments for a slightly simpler implementation. +static inline uint8_t constant_time_lt_args_8(uint8_t a, uint8_t b) { + crypto_word_t aw = a; + crypto_word_t bw = b; + // |crypto_word_t| is larger than |uint8_t|, so |aw| and |bw| have the same + // MSB. |aw| < |bw| iff MSB(|aw| - |bw|) is 1. + return constant_time_msb_w(aw - bw); +} -static const unsigned char data_bin2ascii[65] = - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +// constant_time_in_range_8 returns |CONSTTIME_TRUE_8| if |min| <= |a| <= |max| +// and |CONSTTIME_FALSE_8| otherwise. +static inline uint8_t constant_time_in_range_8(uint8_t a, uint8_t min, + uint8_t max) { + a -= min; + return constant_time_lt_args_8(a, max - min + 1); +} -#define conv_bin2ascii(a) (data_bin2ascii[(a) & 0x3f]) +// Encoding. + +static uint8_t conv_bin2ascii(uint8_t a) { + // Since PEM is sometimes used to carry private keys, we encode base64 data + // itself in constant-time. + a &= 0x3f; + uint8_t ret = constant_time_select_8(constant_time_eq_8(a, 62), '+', '/'); + ret = + constant_time_select_8(constant_time_lt_args_8(a, 62), a - 52 + '0', ret); + ret = + constant_time_select_8(constant_time_lt_args_8(a, 52), a - 26 + 'a', ret); + ret = constant_time_select_8(constant_time_lt_args_8(a, 26), a + 'A', ret); + return ret; +} OPENSSL_COMPILE_ASSERT(sizeof(((EVP_ENCODE_CTX *)(NULL))->data) % 3 == 0, data_length_must_be_multiple_of_base64_chunk_size); @@ -157,8 +183,8 @@ void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, ctx->data_used = (unsigned)in_len; if (total > INT_MAX) { - /* We cannot signal an error, but we can at least avoid making *out_len - * negative. */ + // We cannot signal an error, but we can at least avoid making *out_len + // negative. total = 0; } *out_len = (int)total; @@ -175,8 +201,8 @@ void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len) { out[encoded] = '\0'; ctx->data_used = 0; - /* ctx->data_used is bounded by sizeof(ctx->data), so this does not - * overflow. */ + // ctx->data_used is bounded by sizeof(ctx->data), so this does not + // overflow. assert(encoded <= INT_MAX); *out_len = (int)encoded; } @@ -214,7 +240,7 @@ size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { } -/* Decoding. */ +// Decoding. int EVP_DecodedLength(size_t *out_len, size_t len) { if (len % 4 != 0) { @@ -229,35 +255,31 @@ void EVP_DecodeInit(EVP_ENCODE_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_ENCODE_CTX)); } -/* kBase64ASCIIToBinData maps characters (c < 128) to their base64 value, or - * else 0xff if they are invalid. As a special case, the padding character - * ('=') is mapped to zero. */ -static const uint8_t kBase64ASCIIToBinData[128] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3e, 0xff, 0xff, 0xff, 0x3f, - 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0xff, 0xff, - 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, - 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, - 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, - 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, 0x33, 0xff, 0xff, 0xff, 0xff, 0xff, -}; - static uint8_t base64_ascii_to_bin(uint8_t a) { - if (a >= 128) { - return 0xFF; - } - - return kBase64ASCIIToBinData[a]; + // Since PEM is sometimes used to carry private keys, we decode base64 data + // itself in constant-time. + const uint8_t is_upper = constant_time_in_range_8(a, 'A', 'Z'); + const uint8_t is_lower = constant_time_in_range_8(a, 'a', 'z'); + const uint8_t is_digit = constant_time_in_range_8(a, '0', '9'); + const uint8_t is_plus = constant_time_eq_8(a, '+'); + const uint8_t is_slash = constant_time_eq_8(a, '/'); + const uint8_t is_equals = constant_time_eq_8(a, '='); + + uint8_t ret = 0xff; // 0xff signals invalid. + ret = constant_time_select_8(is_upper, a - 'A', ret); // [0,26) + ret = constant_time_select_8(is_lower, a - 'a' + 26, ret); // [26,52) + ret = constant_time_select_8(is_digit, a - '0' + 52, ret); // [52,62) + ret = constant_time_select_8(is_plus, 62, ret); + ret = constant_time_select_8(is_slash, 63, ret); + // Padding maps to zero, to be further handled by the caller. + ret = constant_time_select_8(is_equals, 0, ret); + return ret; } -/* base64_decode_quad decodes a single “quad” (i.e. four characters) of base64 - * data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the - * number of bytes written, which will be less than three if the quad ended - * with padding. It returns one on success or zero on error. */ +// base64_decode_quad decodes a single “quad” (i.e. four characters) of base64 +// data and writes up to three bytes to |out|. It sets |*out_num_bytes| to the +// number of bytes written, which will be less than three if the quad ended +// with padding. It returns one on success or zero on error. static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes, const uint8_t *in) { const uint8_t a = base64_ascii_to_bin(in[0]); @@ -278,20 +300,20 @@ static int base64_decode_quad(uint8_t *out, size_t *out_num_bytes, switch (padding_pattern) { case 0: - /* The common case of no padding. */ + // The common case of no padding. *out_num_bytes = 3; out[0] = v >> 16; out[1] = v >> 8; out[2] = v; break; - case 1: /* xxx= */ + case 1: // xxx= *out_num_bytes = 2; out[0] = v >> 16; out[1] = v >> 8; break; - case 3: /* xx== */ + case 3: // xx== *out_num_bytes = 1; out[0] = v >> 16; break; @@ -322,7 +344,7 @@ int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, continue; } - if (base64_ascii_to_bin(c) == 0xff || ctx->eof_seen) { + if (ctx->eof_seen) { ctx->error_encountered = 1; return -1; } @@ -402,7 +424,7 @@ int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out, } int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { - /* Trim spaces and tabs from the beginning of the input. */ + // Trim spaces and tabs from the beginning of the input. while (src_len > 0) { if (src[0] != ' ' && src[0] != '\t') { break; @@ -412,7 +434,7 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { src_len--; } - /* Trim newlines, spaces and tabs from the end of the line. */ + // Trim newlines, spaces and tabs from the end of the line. while (src_len > 0) { switch (src[src_len-1]) { case ' ': @@ -433,8 +455,8 @@ int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len) { return -1; } - /* EVP_DecodeBlock does not take padding into account, so put the - * NULs back in... so the caller can strip them back out. */ + // EVP_DecodeBlock does not take padding into account, so put the + // NULs back in... so the caller can strip them back out. while (dst_len % 3 != 0) { dst[dst_len++] = '\0'; } diff --git a/Sources/BoringSSL/crypto/bio/bio.c b/Sources/BoringSSL/crypto/bio/bio.c index 5cab843b5..3e788b876 100644 --- a/Sources/BoringSSL/crypto/bio/bio.c +++ b/Sources/BoringSSL/crypto/bio/bio.c @@ -96,13 +96,6 @@ int BIO_free(BIO *bio) { return 0; } - if (bio->callback != NULL) { - int i = (int)bio->callback(bio, BIO_CB_FREE, NULL, 0, 0, 1); - if (i <= 0) { - return i; - } - } - next_bio = BIO_pop(bio); if (bio->method != NULL && bio->method->destroy != NULL) { @@ -127,64 +120,61 @@ void BIO_free_all(BIO *bio) { BIO_free(bio); } -static int bio_io(BIO *bio, void *buf, int len, size_t method_offset, - int callback_flags, size_t *num) { - int i; - typedef int (*io_func_t)(BIO *, char *, int); - io_func_t io_func = NULL; - - if (bio != NULL && bio->method != NULL) { - io_func = - *((const io_func_t *)(((const uint8_t *)bio->method) + method_offset)); - } - - if (io_func == NULL) { +int BIO_read(BIO *bio, void *buf, int len) { + if (bio == NULL || bio->method == NULL || bio->method->bread == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); return -2; } - - if (bio->callback != NULL) { - i = (int) bio->callback(bio, callback_flags, buf, len, 0L, 1L); - if (i <= 0) { - return i; - } - } - if (!bio->init) { OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); return -2; } - - i = 0; - if (buf != NULL && len > 0) { - i = io_func(bio, buf, len); - } - - if (i > 0) { - *num += i; + if (len <= 0) { + return 0; } - - if (bio->callback != NULL) { - i = (int)(bio->callback(bio, callback_flags | BIO_CB_RETURN, buf, len, 0L, - (long)i)); + int ret = bio->method->bread(bio, buf, len); + if (ret > 0) { + bio->num_read += ret; } - - return i; -} - -int BIO_read(BIO *bio, void *buf, int len) { - return bio_io(bio, buf, len, offsetof(BIO_METHOD, bread), BIO_CB_READ, - &bio->num_read); + return ret; } int BIO_gets(BIO *bio, char *buf, int len) { - return bio_io(bio, buf, len, offsetof(BIO_METHOD, bgets), BIO_CB_GETS, - &bio->num_read); + if (bio == NULL || bio->method == NULL || bio->method->bgets == NULL) { + OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); + return -2; + } + if (!bio->init) { + OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); + return -2; + } + if (len <= 0) { + return 0; + } + int ret = bio->method->bgets(bio, buf, len); + if (ret > 0) { + bio->num_read += ret; + } + return ret; } int BIO_write(BIO *bio, const void *in, int inl) { - return bio_io(bio, (char *)in, inl, offsetof(BIO_METHOD, bwrite), - BIO_CB_WRITE, &bio->num_write); + if (bio == NULL || bio->method == NULL || bio->method->bwrite == NULL) { + OPENSSL_PUT_ERROR(BIO, BIO_R_UNSUPPORTED_METHOD); + return -2; + } + if (!bio->init) { + OPENSSL_PUT_ERROR(BIO, BIO_R_UNINITIALIZED); + return -2; + } + if (inl <= 0) { + return 0; + } + int ret = bio->method->bwrite(bio, in, inl); + if (ret > 0) { + bio->num_write += ret; + } + return ret; } int BIO_puts(BIO *bio, const char *in) { @@ -196,8 +186,6 @@ int BIO_flush(BIO *bio) { } long BIO_ctrl(BIO *bio, int cmd, long larg, void *parg) { - long ret; - if (bio == NULL) { return 0; } @@ -207,20 +195,7 @@ long BIO_ctrl(BIO *bio, int cmd, long larg, void *parg) { return -2; } - if (bio->callback != NULL) { - ret = bio->callback(bio, BIO_CB_CTRL, parg, cmd, larg, 1); - if (ret <= 0) { - return ret; - } - } - - ret = bio->method->ctrl(bio, cmd, larg, parg); - - if (bio->callback != NULL) { - ret = bio->callback(bio, BIO_CB_CTRL | BIO_CB_RETURN, parg, cmd, larg, ret); - } - - return ret; + return bio->method->ctrl(bio, cmd, larg, parg); } char *BIO_ptr_ctrl(BIO *b, int cmd, long larg) { @@ -305,9 +280,6 @@ void BIO_copy_next_retry(BIO *bio) { } long BIO_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { - long ret; - bio_info_cb cb; - if (bio == NULL) { return 0; } @@ -317,22 +289,7 @@ long BIO_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { return 0; } - cb = bio->callback; - - if (cb != NULL) { - ret = cb(bio, BIO_CB_CTRL, (void *)&fp, cmd, 0, 1L); - if (ret <= 0) { - return ret; - } - } - - ret = bio->method->callback_ctrl(bio, cmd, fp); - - if (cb != NULL) { - ret = cb(bio, BIO_CB_CTRL | BIO_CB_RETURN, (void *)&fp, cmd, 0, ret); - } - - return ret; + return bio->method->callback_ctrl(bio, cmd, fp); } size_t BIO_pending(const BIO *bio) { @@ -363,18 +320,6 @@ int BIO_set_close(BIO *bio, int close_flag) { return BIO_ctrl(bio, BIO_CTRL_SET_CLOSE, close_flag, NULL); } -void BIO_set_callback(BIO *bio, bio_info_cb callback_func) { - bio->callback = callback_func; -} - -void BIO_set_callback_arg(BIO *bio, char *arg) { - bio->cb_arg = arg; -} - -char *BIO_get_callback_arg(const BIO *bio) { - return bio->cb_arg; -} - OPENSSL_EXPORT size_t BIO_number_read(const BIO *bio) { return bio->num_read; } @@ -464,14 +409,14 @@ void ERR_print_errors(BIO *bio) { ERR_print_errors_cb(print_bio, bio); } -/* bio_read_all reads everything from |bio| and prepends |prefix| to it. On - * success, |*out| is set to an allocated buffer (which should be freed with - * |OPENSSL_free|), |*out_len| is set to its length and one is returned. The - * buffer will contain |prefix| followed by the contents of |bio|. On failure, - * zero is returned. - * - * The function will fail if the size of the output would equal or exceed - * |max_len|. */ +// bio_read_all reads everything from |bio| and prepends |prefix| to it. On +// success, |*out| is set to an allocated buffer (which should be freed with +// |OPENSSL_free|), |*out_len| is set to its length and one is returned. The +// buffer will contain |prefix| followed by the contents of |bio|. On failure, +// zero is returned. +// +// The function will fail if the size of the output would equal or exceed +// |max_len|. static int bio_read_all(BIO *bio, uint8_t **out, size_t *out_len, const uint8_t *prefix, size_t prefix_len, size_t max_len) { @@ -535,20 +480,20 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) { const uint8_t length_byte = header[1]; if ((tag & 0x1f) == 0x1f) { - /* Long form tags are not supported. */ + // Long form tags are not supported. return 0; } size_t len, header_len; if ((length_byte & 0x80) == 0) { - /* Short form length. */ + // Short form length. len = length_byte; header_len = kInitialHeaderLen; } else { const size_t num_bytes = length_byte & 0x7f; if ((tag & 0x20 /* constructed */) != 0 && num_bytes == 0) { - /* indefinite length. */ + // indefinite length. return bio_read_all(bio, out, out_len, header, kInitialHeaderLen, max_len); } @@ -571,12 +516,12 @@ int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len) { } if (len32 < 128) { - /* Length should have used short-form encoding. */ + // Length should have used short-form encoding. return 0; } if ((len32 >> ((num_bytes-1)*8)) == 0) { - /* Length should have been at least one byte shorter. */ + // Length should have been at least one byte shorter. return 0; } @@ -610,3 +555,82 @@ void BIO_set_retry_special(BIO *bio) { } int BIO_set_write_buffer_size(BIO *bio, int buffer_size) { return 0; } + +static struct CRYPTO_STATIC_MUTEX g_index_lock = CRYPTO_STATIC_MUTEX_INIT; +static int g_index = BIO_TYPE_START; + +int BIO_get_new_index(void) { + CRYPTO_STATIC_MUTEX_lock_write(&g_index_lock); + // If |g_index| exceeds 255, it will collide with the flags bits. + int ret = g_index > 255 ? -1 : g_index++; + CRYPTO_STATIC_MUTEX_unlock_write(&g_index_lock); + return ret; +} + +BIO_METHOD *BIO_meth_new(int type, const char *name) { + BIO_METHOD *method = OPENSSL_malloc(sizeof(BIO_METHOD)); + if (method == NULL) { + return NULL; + } + OPENSSL_memset(method, 0, sizeof(BIO_METHOD)); + method->type = type; + method->name = name; + return method; +} + +void BIO_meth_free(BIO_METHOD *method) { + OPENSSL_free(method); +} + +int BIO_meth_set_create(BIO_METHOD *method, + int (*create)(BIO *)) { + method->create = create; + return 1; +} + +int BIO_meth_set_destroy(BIO_METHOD *method, + int (*destroy)(BIO *)) { + method->destroy = destroy; + return 1; +} + +int BIO_meth_set_write(BIO_METHOD *method, + int (*write)(BIO *, const char *, int)) { + method->bwrite = write; + return 1; +} + +int BIO_meth_set_read(BIO_METHOD *method, + int (*read)(BIO *, char *, int)) { + method->bread = read; + return 1; +} + +int BIO_meth_set_gets(BIO_METHOD *method, + int (*gets)(BIO *, char *, int)) { + method->bgets = gets; + return 1; +} + +int BIO_meth_set_ctrl(BIO_METHOD *method, + long (*ctrl)(BIO *, int, long, void *)) { + method->ctrl = ctrl; + return 1; +} + +void BIO_set_data(BIO *bio, void *ptr) { bio->ptr = ptr; } + +void *BIO_get_data(BIO *bio) { return bio->ptr; } + +void BIO_set_init(BIO *bio, int init) { bio->init = init; } + +int BIO_get_init(BIO *bio) { return bio->init; } + +void BIO_set_shutdown(BIO *bio, int shutdown) { bio->shutdown = shutdown; } + +int BIO_get_shutdown(BIO *bio) { return bio->shutdown; } + +int BIO_meth_set_puts(BIO_METHOD *method, int (*puts)(BIO *, const char *)) { + // Ignore the parameter. We implement |BIO_puts| using |BIO_write|. + return 1; +} diff --git a/Sources/BoringSSL/crypto/bio/bio_mem.c b/Sources/BoringSSL/crypto/bio/bio_mem.c index 1cba8a89a..08dd6e9d7 100644 --- a/Sources/BoringSSL/crypto/bio/bio_mem.c +++ b/Sources/BoringSSL/crypto/bio/bio_mem.c @@ -82,16 +82,16 @@ BIO *BIO_new_mem_buf(const void *buf, int len) { } b = (BUF_MEM *)ret->ptr; - /* BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. */ + // BIO_FLAGS_MEM_RDONLY ensures |b->data| is not written to. b->data = (void *)buf; b->length = size; b->max = size; ret->flags |= BIO_FLAGS_MEM_RDONLY; - /* |num| is used to store the value that this BIO will return when it runs - * out of data. If it's negative then the retry flags will also be set. Since - * this is static data, retrying wont help */ + // |num| is used to store the value that this BIO will return when it runs + // out of data. If it's negative then the retry flags will also be set. Since + // this is static data, retrying wont help ret->num = 0; return ret; @@ -105,8 +105,8 @@ static int mem_new(BIO *bio) { return 0; } - /* |shutdown| is used to store the close flag: whether the BIO has ownership - * of the BUF_MEM. */ + // |shutdown| is used to store the close flag: whether the BIO has ownership + // of the BUF_MEM. bio->shutdown = 1; bio->init = 1; bio->num = -1; @@ -214,8 +214,8 @@ static int mem_gets(BIO *bio, char *buf, int size) { } } - /* i is now the max num of bytes to copy, either j or up to and including the - * first newline */ + // i is now the max num of bytes to copy, either j or up to and including the + // first newline i = mem_read(bio, buf, i); if (i > 0) { @@ -233,7 +233,7 @@ static long mem_ctrl(BIO *bio, int cmd, long num, void *ptr) { switch (cmd) { case BIO_CTRL_RESET: if (b->data != NULL) { - /* For read only case reset to the start again */ + // For read only case reset to the start again if (bio->flags & BIO_FLAGS_MEM_RDONLY) { b->data -= b->max - b->length; b->length = b->max; diff --git a/Sources/BoringSSL/crypto/bio/connect.c b/Sources/BoringSSL/crypto/bio/connect.c index d40dd530d..0b60f6a95 100644 --- a/Sources/BoringSSL/crypto/bio/connect.c +++ b/Sources/BoringSSL/crypto/bio/connect.c @@ -98,12 +98,12 @@ typedef struct bio_connect_st { struct sockaddr_storage them; socklen_t them_length; - /* the file descriptor is kept in bio->num in order to match the socket - * BIO. */ + // the file descriptor is kept in bio->num in order to match the socket + // BIO. - /* info_callback is called when the connection is initially made - * callback(BIO,state,ret); The callback should return 'ret', state is for - * compatibility with the SSL info_callback. */ + // info_callback is called when the connection is initially made + // callback(BIO,state,ret); The callback should return 'ret', state is for + // compatibility with the SSL info_callback. int (*info_callback)(const BIO *bio, int state, int ret); } BIO_CONNECT; @@ -113,9 +113,9 @@ static int closesocket(int sock) { } #endif -/* split_host_and_port sets |*out_host| and |*out_port| to the host and port - * parsed from |name|. It returns one on success or zero on error. Even when - * successful, |*out_port| may be NULL on return if no port was specified. */ +// split_host_and_port sets |*out_host| and |*out_port| to the host and port +// parsed from |name|. It returns one on success or zero on error. Even when +// successful, |*out_port| may be NULL on return if no port was specified. static int split_host_and_port(char **out_host, char **out_port, const char *name) { const char *host, *port = NULL; size_t host_len = 0; @@ -123,24 +123,24 @@ static int split_host_and_port(char **out_host, char **out_port, const char *nam *out_host = NULL; *out_port = NULL; - if (name[0] == '[') { /* bracketed IPv6 address */ + if (name[0] == '[') { // bracketed IPv6 address const char *close = strchr(name, ']'); if (close == NULL) { return 0; } host = name + 1; host_len = close - host; - if (close[1] == ':') { /* [IP]:port */ + if (close[1] == ':') { // [IP]:port port = close + 2; } else if (close[1] != 0) { return 0; } } else { const char *colon = strchr(name, ':'); - if (colon == NULL || strchr(colon + 1, ':') != NULL) { /* IPv6 address */ + if (colon == NULL || strchr(colon + 1, ':') != NULL) { // IPv6 address host = name; host_len = strlen(name); - } else { /* host:port */ + } else { // host:port host = name; host_len = colon - name; port = colon + 1; @@ -175,9 +175,9 @@ static int conn_state(BIO *bio, BIO_CONNECT *c) { for (;;) { switch (c->state) { case BIO_CONN_S_BEFORE: - /* If there's a hostname and a port, assume that both are - * exactly what they say. If there is only a hostname, try - * (just once) to split it into a hostname and port. */ + // If there's a hostname and a port, assume that both are + // exactly what they say. If there is only a hostname, try + // (just once) to split it into a hostname and port. if (c->param_hostname == NULL) { OPENSSL_PUT_ERROR(BIO, BIO_R_NO_HOSTNAME_SPECIFIED); @@ -330,7 +330,7 @@ static void conn_close_socket(BIO *bio) { return; } - /* Only do a shutdown if things were established */ + // Only do a shutdown if things were established if (c->state == BIO_CONN_S_OK) { shutdown(bio->num, 2); } @@ -415,7 +415,7 @@ static long conn_ctrl(BIO *bio, int cmd, long num, void *ptr) { bio->flags = 0; break; case BIO_C_DO_STATE_MACHINE: - /* use this one to start the connection */ + // use this one to start the connection if (data->state != BIO_CONN_S_OK) { ret = (long)conn_state(bio, data); } else { diff --git a/Sources/BoringSSL/crypto/bio/fd.c b/Sources/BoringSSL/crypto/bio/fd.c index 4e9eeacfa..ea5bfd8cc 100644 --- a/Sources/BoringSSL/crypto/bio/fd.c +++ b/Sources/BoringSSL/crypto/bio/fd.c @@ -138,7 +138,7 @@ BIO *BIO_new_fd(int fd, int close_flag) { } static int fd_new(BIO *bio) { - /* num is used to store the file descriptor. */ + // num is used to store the file descriptor. bio->num = -1; return 1; } @@ -190,6 +190,7 @@ static long fd_ctrl(BIO *b, int cmd, long num, void *ptr) { switch (cmd) { case BIO_CTRL_RESET: num = 0; + OPENSSL_FALLTHROUGH; case BIO_C_FILE_SEEK: ret = 0; if (b->init) { diff --git a/Sources/BoringSSL/crypto/bio/file.c b/Sources/BoringSSL/crypto/bio/file.c index 3580cd1c2..278be1880 100644 --- a/Sources/BoringSSL/crypto/bio/file.c +++ b/Sources/BoringSSL/crypto/bio/file.c @@ -55,18 +55,17 @@ * [including the GNU Public Licence.] */ #if defined(__linux) || defined(__sun) || defined(__hpux) -/* Following definition aliases fopen to fopen64 on above mentioned - * platforms. This makes it possible to open and sequentially access - * files larger than 2GB from 32-bit application. It does not allow to - * traverse them beyond 2GB with fseek/ftell, but on the other hand *no* - * 32-bit platform permits that, not with fseek/ftell. Not to mention - * that breaking 2GB limit for seeking would require surgery to *our* - * API. But sequential access suffices for practical cases when you - * can run into large files, such as fingerprinting, so we can let API - * alone. For reference, the list of 32-bit platforms which allow for - * sequential access of large files without extra "magic" comprise *BSD, - * Darwin, IRIX... - */ +// Following definition aliases fopen to fopen64 on above mentioned +// platforms. This makes it possible to open and sequentially access +// files larger than 2GB from 32-bit application. It does not allow to +// traverse them beyond 2GB with fseek/ftell, but on the other hand *no* +// 32-bit platform permits that, not with fseek/ftell. Not to mention +// that breaking 2GB limit for seeking would require surgery to *our* +// API. But sequential access suffices for practical cases when you +// can run into large files, such as fingerprinting, so we can let API +// alone. For reference, the list of 32-bit platforms which allow for +// sequential access of large files without extra "magic" comprise *BSD, +// Darwin, IRIX... #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 #endif @@ -157,7 +156,7 @@ static int file_read(BIO *b, char *out, int outl) { return -1; } - /* fread reads at most |outl| bytes, so |ret| fits in an int. */ + // fread reads at most |outl| bytes, so |ret| fits in an int. return (int)ret; } @@ -184,6 +183,7 @@ static long file_ctrl(BIO *b, int cmd, long num, void *ptr) { switch (cmd) { case BIO_CTRL_RESET: num = 0; + OPENSSL_FALLTHROUGH; case BIO_C_FILE_SEEK: ret = (long)fseek(fp, num, 0); break; @@ -232,7 +232,7 @@ static long file_ctrl(BIO *b, int cmd, long num, void *ptr) { b->init = 1; break; case BIO_C_GET_FILE_PTR: - /* the ptr parameter is actually a FILE ** in this case. */ + // the ptr parameter is actually a FILE ** in this case. if (ptr != NULL) { fpp = (FILE **)ptr; *fpp = (FILE *)b->ptr; diff --git a/Sources/BoringSSL/crypto/bio/hexdump.c b/Sources/BoringSSL/crypto/bio/hexdump.c index d55df6209..6d928bc0c 100644 --- a/Sources/BoringSSL/crypto/bio/hexdump.c +++ b/Sources/BoringSSL/crypto/bio/hexdump.c @@ -62,12 +62,12 @@ #include "../internal.h" -/* hexdump_ctx contains the state of a hexdump. */ +// hexdump_ctx contains the state of a hexdump. struct hexdump_ctx { BIO *bio; - char right_chars[18]; /* the contents of the right-hand side, ASCII dump. */ - unsigned used; /* number of bytes in the current line. */ - size_t n; /* number of bytes total. */ + char right_chars[18]; // the contents of the right-hand side, ASCII dump. + unsigned used; // number of bytes in the current line. + size_t n; // number of bytes total. unsigned indent; }; @@ -84,21 +84,20 @@ static char to_char(uint8_t b) { return b; } -/* hexdump_write adds |len| bytes of |data| to the current hex dump described by - * |ctx|. */ +// hexdump_write adds |len| bytes of |data| to the current hex dump described by +// |ctx|. static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, size_t len) { char buf[10]; unsigned l; - /* Output lines look like: - * 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=| - * ^ offset ^ extra space ^ ASCII of line - */ + // Output lines look like: + // 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=| + // ^ offset ^ extra space ^ ASCII of line for (size_t i = 0; i < len; i++) { if (ctx->used == 0) { - /* The beginning of a line. */ + // The beginning of a line. BIO_indent(ctx->bio, ctx->indent, UINT_MAX); hexbyte(&buf[0], ctx->n >> 24); @@ -115,12 +114,12 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, buf[2] = ' '; l = 3; if (ctx->used == 7) { - /* There's an additional space after the 8th byte. */ + // There's an additional space after the 8th byte. buf[3] = ' '; l = 4; } else if (ctx->used == 15) { - /* At the end of the line there's an extra space and the bar for the - * right column. */ + // At the end of the line there's an extra space and the bar for the + // right column. buf[3] = ' '; buf[4] = '|'; l = 5; @@ -145,9 +144,9 @@ static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data, return 1; } -/* finish flushes any buffered data in |ctx|. */ +// finish flushes any buffered data in |ctx|. static int finish(struct hexdump_ctx *ctx) { - /* See the comments in |hexdump| for the details of this format. */ + // See the comments in |hexdump| for the details of this format. const unsigned n_bytes = ctx->used; unsigned l; char buf[5]; diff --git a/Sources/BoringSSL/crypto/bio/internal.h b/Sources/BoringSSL/crypto/bio/internal.h index 4ec77fadb..8ed27dae5 100644 --- a/Sources/BoringSSL/crypto/bio/internal.h +++ b/Sources/BoringSSL/crypto/bio/internal.h @@ -61,7 +61,7 @@ #if !defined(OPENSSL_WINDOWS) #if defined(OPENSSL_PNACL) -/* newlib uses u_short in socket.h without defining it. */ +// newlib uses u_short in socket.h without defining it. typedef unsigned short u_short; #endif #include @@ -78,34 +78,34 @@ extern "C" { #endif -/* BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr| - * and |*out_addr_length| with the correct values for connecting to |hostname| - * on |port_str|. It returns one on success or zero on error. */ +// BIO_ip_and_port_to_socket_and_addr creates a socket and fills in |*out_addr| +// and |*out_addr_length| with the correct values for connecting to |hostname| +// on |port_str|. It returns one on success or zero on error. int bio_ip_and_port_to_socket_and_addr(int *out_sock, struct sockaddr_storage *out_addr, socklen_t *out_addr_length, const char *hostname, const char *port_str); -/* BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on - * success and zero otherwise. */ +// BIO_socket_nbio sets whether |sock| is non-blocking. It returns one on +// success and zero otherwise. int bio_socket_nbio(int sock, int on); -/* BIO_clear_socket_error clears the last system socket error. - * - * TODO(fork): remove all callers of this. */ +// BIO_clear_socket_error clears the last system socket error. +// +// TODO(fork): remove all callers of this. void bio_clear_socket_error(void); -/* BIO_sock_error returns the last socket error on |sock|. */ +// BIO_sock_error returns the last socket error on |sock|. int bio_sock_error(int sock); -/* BIO_fd_should_retry returns non-zero if |return_value| indicates an error - * and |errno| indicates that it's non-fatal. */ +// BIO_fd_should_retry returns non-zero if |return_value| indicates an error +// and |errno| indicates that it's non-fatal. int bio_fd_should_retry(int return_value); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BIO_INTERNAL_H */ +#endif // OPENSSL_HEADER_BIO_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/bio/pair.c b/Sources/BoringSSL/crypto/bio/pair.c index 8ba382d19..f5057ed36 100644 --- a/Sources/BoringSSL/crypto/bio/pair.c +++ b/Sources/BoringSSL/crypto/bio/pair.c @@ -63,22 +63,22 @@ struct bio_bio_st { - BIO *peer; /* NULL if buf == NULL. - * If peer != NULL, then peer->ptr is also a bio_bio_st, - * and its "peer" member points back to us. - * peer != NULL iff init != 0 in the BIO. */ - - /* This is for what we write (i.e. reading uses peer's struct): */ - int closed; /* valid iff peer != NULL */ - size_t len; /* valid iff buf != NULL; 0 if peer == NULL */ - size_t offset; /* valid iff buf != NULL; 0 if len == 0 */ + BIO *peer; // NULL if buf == NULL. + // If peer != NULL, then peer->ptr is also a bio_bio_st, + // and its "peer" member points back to us. + // peer != NULL iff init != 0 in the BIO. + + // This is for what we write (i.e. reading uses peer's struct): + int closed; // valid iff peer != NULL + size_t len; // valid iff buf != NULL; 0 if peer == NULL + size_t offset; // valid iff buf != NULL; 0 if len == 0 size_t size; - uint8_t *buf; /* "size" elements (if != NULL) */ + uint8_t *buf; // "size" elements (if != NULL) - size_t request; /* valid iff peer != NULL; 0 if len != 0, - * otherwise set by peer to number of bytes - * it (unsuccessfully) tried to read, - * never more than buffer space (size-len) warrants. */ + size_t request; // valid iff peer != NULL; 0 if len != 0, + // otherwise set by peer to number of bytes + // it (unsuccessfully) tried to read, + // never more than buffer space (size-len) warrants. }; static int bio_new(BIO *bio) { @@ -90,7 +90,7 @@ static int bio_new(BIO *bio) { } OPENSSL_memset(b, 0, sizeof(struct bio_bio_st)); - b->size = 17 * 1024; /* enough for one TLS record (just a default) */ + b->size = 17 * 1024; // enough for one TLS record (just a default) bio->ptr = b; return 1; } @@ -165,7 +165,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { assert(peer_b != NULL); assert(peer_b->buf != NULL); - peer_b->request = 0; /* will be set in "retry_read" situation */ + peer_b->request = 0; // will be set in "retry_read" situation if (buf == NULL || size == 0) { return 0; @@ -173,30 +173,30 @@ static int bio_read(BIO *bio, char *buf, int size_) { if (peer_b->len == 0) { if (peer_b->closed) { - return 0; /* writer has closed, and no data is left */ + return 0; // writer has closed, and no data is left } else { - BIO_set_retry_read(bio); /* buffer is empty */ + BIO_set_retry_read(bio); // buffer is empty if (size <= peer_b->size) { peer_b->request = size; } else { - /* don't ask for more than the peer can - * deliver in one write */ + // don't ask for more than the peer can + // deliver in one write peer_b->request = peer_b->size; } return -1; } } - /* we can read */ + // we can read if (peer_b->len < size) { size = peer_b->len; } - /* now read "size" bytes */ + // now read "size" bytes rest = size; assert(rest > 0); - /* one or two iterations */ + // one or two iterations do { size_t chunk; @@ -204,7 +204,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { if (peer_b->offset + rest <= peer_b->size) { chunk = rest; } else { - /* wrap around ring buffer */ + // wrap around ring buffer chunk = peer_b->size - peer_b->offset; } assert(peer_b->offset + chunk <= peer_b->size); @@ -220,7 +220,7 @@ static int bio_read(BIO *bio, char *buf, int size_) { } buf += chunk; } else { - /* buffer now empty, no need to advance "buf" */ + // buffer now empty, no need to advance "buf" assert(chunk == rest); peer_b->offset = 0; } @@ -248,7 +248,7 @@ static int bio_write(BIO *bio, const char *buf, int num_) { b->request = 0; if (b->closed) { - /* we already closed */ + // we already closed OPENSSL_PUT_ERROR(BIO, BIO_R_BROKEN_PIPE); return -1; } @@ -256,20 +256,20 @@ static int bio_write(BIO *bio, const char *buf, int num_) { assert(b->len <= b->size); if (b->len == b->size) { - BIO_set_retry_write(bio); /* buffer is full */ + BIO_set_retry_write(bio); // buffer is full return -1; } - /* we can write */ + // we can write if (num > b->size - b->len) { num = b->size - b->len; } - /* now write "num" bytes */ + // now write "num" bytes rest = num; assert(rest > 0); - /* one or two iterations */ + // one or two iterations do { size_t write_offset; size_t chunk; @@ -280,12 +280,12 @@ static int bio_write(BIO *bio, const char *buf, int num_) { if (write_offset >= b->size) { write_offset -= b->size; } - /* b->buf[write_offset] is the first byte we can write to. */ + // b->buf[write_offset] is the first byte we can write to. if (write_offset + rest <= b->size) { chunk = rest; } else { - /* wrap around ring buffer */ + // wrap around ring buffer chunk = b->size - write_offset; } @@ -363,15 +363,15 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { assert(b != NULL); switch (cmd) { - /* specific CTRL codes */ + // specific CTRL codes case BIO_C_GET_WRITE_BUF_SIZE: ret = (long)b->size; break; case BIO_C_GET_WRITE_GUARANTEE: - /* How many bytes can the caller feed to the next write - * without having to keep any? */ + // How many bytes can the caller feed to the next write + // without having to keep any? if (b->peer == NULL || b->closed) { ret = 0; } else { @@ -380,28 +380,28 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { break; case BIO_C_GET_READ_REQUEST: - /* If the peer unsuccessfully tried to read, how many bytes - * were requested? (As with BIO_CTRL_PENDING, that number - * can usually be treated as boolean.) */ + // If the peer unsuccessfully tried to read, how many bytes + // were requested? (As with BIO_CTRL_PENDING, that number + // can usually be treated as boolean.) ret = (long)b->request; break; case BIO_C_RESET_READ_REQUEST: - /* Reset request. (Can be useful after read attempts - * at the other side that are meant to be non-blocking, - * e.g. when probing SSL_read to see if any data is - * available.) */ + // Reset request. (Can be useful after read attempts + // at the other side that are meant to be non-blocking, + // e.g. when probing SSL_read to see if any data is + // available.) b->request = 0; ret = 1; break; case BIO_C_SHUTDOWN_WR: - /* similar to shutdown(..., SHUT_WR) */ + // similar to shutdown(..., SHUT_WR) b->closed = 1; ret = 1; break; - /* standard CTRL codes follow */ + // standard CTRL codes follow case BIO_CTRL_GET_CLOSE: ret = bio->shutdown; @@ -453,7 +453,7 @@ static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) { static const BIO_METHOD methods_biop = { BIO_TYPE_BIO, "BIO pair", bio_write, bio_read, NULL /* puts */, - NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */ + NULL /* gets */, bio_ctrl, bio_new, bio_free, NULL /* callback_ctrl */, }; static const BIO_METHOD *bio_s_bio(void) { return &methods_biop; } diff --git a/Sources/BoringSSL/crypto/bio/printf.c b/Sources/BoringSSL/crypto/bio/printf.c index 3709fcb2b..4f9d8a187 100644 --- a/Sources/BoringSSL/crypto/bio/printf.c +++ b/Sources/BoringSSL/crypto/bio/printf.c @@ -54,10 +54,6 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -#if !defined(_POSIX_C_SOURCE) -#define _POSIX_C_SOURCE 201410L /* for snprintf, vprintf etc */ -#endif - #include #include @@ -77,13 +73,13 @@ int BIO_printf(BIO *bio, const char *format, ...) { va_end(args); #if defined(OPENSSL_WINDOWS) - /* On Windows, vsnprintf returns -1 rather than the requested length on - * truncation */ + // On Windows, vsnprintf returns -1 rather than the requested length on + // truncation if (out_len < 0) { va_start(args, format); out_len = _vscprintf(format, args); va_end(args); - assert(out_len >= sizeof(buf)); + assert(out_len >= (int)sizeof(buf)); } #endif @@ -93,9 +89,9 @@ int BIO_printf(BIO *bio, const char *format, ...) { if ((size_t) out_len >= sizeof(buf)) { const int requested_len = out_len; - /* The output was truncated. Note that vsnprintf's return value - * does not include a trailing NUL, but the buffer must be sized - * for it. */ + // The output was truncated. Note that vsnprintf's return value + // does not include a trailing NUL, but the buffer must be sized + // for it. out = OPENSSL_malloc(requested_len + 1); out_malloced = 1; if (out == NULL) { diff --git a/Sources/BoringSSL/crypto/bn/internal.h b/Sources/BoringSSL/crypto/bn/internal.h deleted file mode 100644 index 1ee29fd49..000000000 --- a/Sources/BoringSSL/crypto/bn/internal.h +++ /dev/null @@ -1,258 +0,0 @@ -/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* ==================================================================== - * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. - * - * Portions of the attached software ("Contribution") are developed by - * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. - * - * The Contribution is licensed pursuant to the Eric Young open source - * license provided above. - * - * The binary polynomial arithmetic software is originally written by - * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems - * Laboratories. */ - -#ifndef OPENSSL_HEADER_BN_INTERNAL_H -#define OPENSSL_HEADER_BN_INTERNAL_H - -#include - -#if defined(OPENSSL_X86_64) && defined(_MSC_VER) -OPENSSL_MSVC_PRAGMA(warning(push, 3)) -#include -OPENSSL_MSVC_PRAGMA(warning(pop)) -#pragma intrinsic(__umulh, _umul128) -#endif - -#include "../internal.h" - -#if defined(__cplusplus) -extern "C" { -#endif - -/* bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather - * than a number of words. */ -BIGNUM *bn_expand(BIGNUM *bn, size_t bits); - -#if defined(OPENSSL_64_BIT) - -#if !defined(_MSC_VER) -/* MSVC doesn't support two-word integers on 64-bit. */ -#define BN_ULLONG uint128_t -#endif - -#define BN_BITS2 64 -#define BN_BYTES 8 -#define BN_BITS4 32 -#define BN_MASK2 (0xffffffffffffffffUL) -#define BN_MASK2l (0xffffffffUL) -#define BN_MASK2h (0xffffffff00000000UL) -#define BN_MASK2h1 (0xffffffff80000000UL) -#define BN_MONT_CTX_N0_LIMBS 1 -#define BN_TBIT (0x8000000000000000UL) -#define BN_DEC_CONV (10000000000000000000UL) -#define BN_DEC_NUM 19 -#define TOBN(hi, lo) ((BN_ULONG)(hi) << 32 | (lo)) - -#elif defined(OPENSSL_32_BIT) - -#define BN_ULLONG uint64_t -#define BN_BITS2 32 -#define BN_BYTES 4 -#define BN_BITS4 16 -#define BN_MASK2 (0xffffffffUL) -#define BN_MASK2l (0xffffUL) -#define BN_MASK2h1 (0xffff8000UL) -#define BN_MASK2h (0xffff0000UL) -/* On some 32-bit platforms, Montgomery multiplication is done using 64-bit - * arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| - * needs to be two words long. Only certain 32-bit platforms actually make use - * of n0[1] and shorter R value would suffice for the others. However, - * currently only the assembly files know which is which. */ -#define BN_MONT_CTX_N0_LIMBS 2 -#define BN_TBIT (0x80000000UL) -#define BN_DEC_CONV (1000000000UL) -#define BN_DEC_NUM 9 -#define TOBN(hi, lo) (lo), (hi) - -#else -#error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" -#endif - - -#define STATIC_BIGNUM(x) \ - { \ - (BN_ULONG *)(x), sizeof(x) / sizeof(BN_ULONG), \ - sizeof(x) / sizeof(BN_ULONG), 0, BN_FLG_STATIC_DATA \ - } - -#if defined(BN_ULLONG) -#define Lw(t) (((BN_ULONG)(t))&BN_MASK2) -#define Hw(t) (((BN_ULONG)((t)>>BN_BITS2))&BN_MASK2) -#endif - -/* bn_set_words sets |bn| to the value encoded in the |num| words in |words|, - * least significant word first. */ -int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); - -BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); -BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); -void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num); -BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num); -BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int num); - -void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b); -void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b); -void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a); -void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a); - -/* bn_cmp_words returns a value less than, equal to or greater than zero if - * the, length |n|, array |a| is less than, equal to or greater than |b|. */ -int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n); - -/* bn_cmp_words returns a value less than, equal to or greater than zero if the - * array |a| is less than, equal to or greater than |b|. The arrays can be of - * different lengths: |cl| gives the minimum of the two lengths and |dl| gives - * the length of |a| minus the length of |b|. */ -int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); - -int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, - const BN_ULONG *np, const BN_ULONG *n0, int num); - -uint64_t bn_mont_n0(const BIGNUM *n); -int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n); - -#if defined(OPENSSL_X86_64) && defined(_MSC_VER) -#define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) -#endif - -#if !defined(BN_ULLONG) && !defined(BN_UMULT_LOHI) -#error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform." -#endif - -/* bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|, - * computed with Fermat's Little Theorem. It returns one on success and zero on - * error. If |mont_p| is NULL, one will be computed temporarily. */ -int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, - BN_CTX *ctx, const BN_MONT_CTX *mont_p); - -/* bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses - * |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of - * protecting the exponent. */ -int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, - BN_CTX *ctx, const BN_MONT_CTX *mont_p); - - -#if defined(__cplusplus) -} /* extern C */ -#endif - -#endif /* OPENSSL_HEADER_BN_INTERNAL_H */ diff --git a/Sources/BoringSSL/crypto/bn/montgomery_inv.c b/Sources/BoringSSL/crypto/bn/montgomery_inv.c deleted file mode 100644 index 9264adb22..000000000 --- a/Sources/BoringSSL/crypto/bn/montgomery_inv.c +++ /dev/null @@ -1,207 +0,0 @@ -/* Copyright 2016 Brian Smith. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include "internal.h" -#include "../internal.h" - - -static uint64_t bn_neg_inv_mod_r_u64(uint64_t n); - -OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, - BN_MONT_CTX_N0_LIMBS_VALUE_INVALID); -OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) == - BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG), - BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T); - -/* LG_LITTLE_R is log_2(r). */ -#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2) - -uint64_t bn_mont_n0(const BIGNUM *n) { - /* These conditions are checked by the caller, |BN_MONT_CTX_set|. */ - assert(!BN_is_zero(n)); - assert(!BN_is_negative(n)); - assert(BN_is_odd(n)); - - /* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This - * ensures that we can do integer division by |r| by simply ignoring - * |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo - * |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is - * what makes Montgomery multiplication efficient. - * - * As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography - * with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a - * multi-limb Montgomery multiplication of |a * b (mod n)|, given the - * unreduced product |t == a * b|, we repeatedly calculate: - * - * t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). - * t2 := t1*n0*n - * t3 := t + t2 - * t := t3 / r copy all limbs of |t3| except the lowest to |t|. - * - * In the last step, it would only make sense to ignore the lowest limb of - * |t3| if it were zero. The middle steps ensure that this is the case: - * - * t3 == 0 (mod r) - * t + t2 == 0 (mod r) - * t + t1*n0*n == 0 (mod r) - * t1*n0*n == -t (mod r) - * t*n0*n == -t (mod r) - * n0*n == -1 (mod r) - * n0 == -1/n (mod r) - * - * Thus, in each iteration of the loop, we multiply by the constant factor - * |n0|, the negative inverse of n (mod r). */ - - /* n_mod_r = n % r. As explained above, this is done by taking the lowest - * |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */ - uint64_t n_mod_r = n->d[0]; -#if BN_MONT_CTX_N0_LIMBS == 2 - if (n->top > 1) { - n_mod_r |= (uint64_t)n->d[1] << BN_BITS2; - } -#endif - - return bn_neg_inv_mod_r_u64(n_mod_r); -} - -/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| - * such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| - * must be odd. - * - * This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery - * Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf). - * It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and - * Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000" - * (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21). - * - * This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" - * (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is - * constant-time with respect to |n|. We assume uint64_t additions, - * subtractions, shifts, and bitwise operations are all constant time, which - * may be a large leap of faith on 32-bit targets. We avoid division and - * multiplication, which tend to be the most problematic in terms of timing - * leaks. - * - * Most GCD implementations return values such that |u*r + v*n == 1|, so the - * caller would have to negate the resultant |v| for the purpose of Montgomery - * multiplication. This implementation does the negation implicitly by doing - * the computations as a difference instead of a sum. */ -static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { - assert(n % 2 == 1); - - /* alpha == 2**(lg r - 1) == r / 2. */ - static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1); - - const uint64_t beta = n; - - uint64_t u = 1; - uint64_t v = 0; - - /* The invariant maintained from here on is: - * 2**(lg r - i) == u*2*alpha - v*beta. */ - for (size_t i = 0; i < LG_LITTLE_R; ++i) { -#if BN_BITS2 == 64 && defined(BN_ULLONG) - assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) == - ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); -#endif - - /* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set - * |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */ - - uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */ - - /* The addition can overflow, so use Dietz's method for it. - * - * Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all - * (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values - * (embedded in 64 bits to so that overflow can be ignored): - * - * (declare-fun x () (_ BitVec 64)) - * (declare-fun y () (_ BitVec 64)) - * (assert (let ( - * (one (_ bv1 64)) - * (thirtyTwo (_ bv32 64))) - * (and - * (bvult x (bvshl one thirtyTwo)) - * (bvult y (bvshl one thirtyTwo)) - * (not (= - * (bvadd (bvlshr (bvxor x y) one) (bvand x y)) - * (bvlshr (bvadd x y) one))) - * ))) - * (check-sat) */ - uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */ - u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd); - - uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */ - v = (v >> 1) + alpha_if_u_is_odd; - } - - /* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */ -#if BN_BITS2 == 64 && defined(BN_ULLONG) - assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); -#endif - - return v; -} - -/* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger - * than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and - * odd. */ -int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) { - assert(!BN_is_zero(n)); - assert(!BN_is_negative(n)); - assert(BN_is_odd(n)); - - BN_zero(r); - - unsigned n_bits = BN_num_bits(n); - assert(n_bits != 0); - if (n_bits == 1) { - return 1; - } - - /* Set |r| to the smallest power of two larger than |n|. */ - assert(p > n_bits); - if (!BN_set_bit(r, n_bits)) { - return 0; - } - - /* Unconditionally reduce |r|. */ - assert(BN_cmp(r, n) > 0); - if (!BN_usub(r, r, n)) { - return 0; - } - assert(BN_cmp(r, n) < 0); - - for (unsigned i = n_bits; i < p; ++i) { - /* This is like |BN_mod_lshift1_quick| except using |BN_usub|. - * - * TODO: Replace this with the use of a constant-time variant of - * |BN_mod_lshift1_quick|. */ - if (!BN_lshift1(r, r)) { - return 0; - } - if (BN_cmp(r, n) >= 0) { - if (!BN_usub(r, r, n)) { - return 0; - } - } - } - - return 1; -} diff --git a/Sources/BoringSSL/crypto/bn/bn_asn1.c b/Sources/BoringSSL/crypto/bn_extra/bn_asn1.c similarity index 70% rename from Sources/BoringSSL/crypto/bn/bn_asn1.c rename to Sources/BoringSSL/crypto/bn_extra/bn_asn1.c index efb23355b..0d96573a3 100644 --- a/Sources/BoringSSL/crypto/bn/bn_asn1.c +++ b/Sources/BoringSSL/crypto/bn_extra/bn_asn1.c @@ -31,7 +31,7 @@ int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret) { return 0; } - /* INTEGERs must be minimal. */ + // INTEGERs must be minimal. if (CBS_data(&child)[0] == 0x00 && CBS_len(&child) > 1 && !(CBS_data(&child)[1] & 0x80)) { @@ -42,24 +42,8 @@ int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret) { return BN_bin2bn(CBS_data(&child), CBS_len(&child), ret) != NULL; } -int BN_parse_asn1_unsigned_buggy(CBS *cbs, BIGNUM *ret) { - CBS child; - if (!CBS_get_asn1(cbs, &child, CBS_ASN1_INTEGER) || - CBS_len(&child) == 0) { - OPENSSL_PUT_ERROR(BN, BN_R_BAD_ENCODING); - return 0; - } - - /* This function intentionally does not reject negative numbers or non-minimal - * encodings. Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Remove this code and callers in March 2016. */ - return BN_bin2bn(CBS_data(&child), CBS_len(&child), ret) != NULL; -} - int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) { - /* Negative numbers are unsupported. */ + // Negative numbers are unsupported. if (BN_is_negative(bn)) { OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); return 0; @@ -67,8 +51,8 @@ int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn) { CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER) || - /* The number must be padded with a leading zero if the high bit would - * otherwise be set or if |bn| is zero. */ + // The number must be padded with a leading zero if the high bit would + // otherwise be set or if |bn| is zero. (BN_num_bits(bn) % 8 == 0 && !CBB_add_u8(&child, 0x00)) || !BN_bn2cbb_padded(&child, BN_num_bytes(bn), bn) || !CBB_flush(cbb)) { diff --git a/Sources/BoringSSL/crypto/bn/convert.c b/Sources/BoringSSL/crypto/bn_extra/convert.c similarity index 63% rename from Sources/BoringSSL/crypto/bn/convert.c rename to Sources/BoringSSL/crypto/bn_extra/convert.c index 1fa0dd4f5..6f3a062ad 100644 --- a/Sources/BoringSSL/crypto/bn/convert.c +++ b/Sources/BoringSSL/crypto/bn_extra/convert.c @@ -60,191 +60,14 @@ #include #include #include -#include #include #include #include #include -#include "internal.h" +#include "../fipsmodule/bn/internal.h" -BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { - size_t num_words; - unsigned m; - BN_ULONG word = 0; - BIGNUM *bn = NULL; - - if (ret == NULL) { - ret = bn = BN_new(); - } - - if (ret == NULL) { - return NULL; - } - - if (len == 0) { - ret->top = 0; - return ret; - } - - num_words = ((len - 1) / BN_BYTES) + 1; - m = (len - 1) % BN_BYTES; - if (bn_wexpand(ret, num_words) == NULL) { - if (bn) { - BN_free(bn); - } - return NULL; - } - - /* |bn_wexpand| must check bounds on |num_words| to write it into - * |ret->dmax|. */ - assert(num_words <= INT_MAX); - ret->top = (int)num_words; - ret->neg = 0; - - while (len--) { - word = (word << 8) | *(in++); - if (m-- == 0) { - ret->d[--num_words] = word; - word = 0; - m = BN_BYTES - 1; - } - } - - /* need to call this due to clear byte at top if avoiding having the top bit - * set (-ve number) */ - bn_correct_top(ret); - return ret; -} - -BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) { - BIGNUM *bn = NULL; - if (ret == NULL) { - bn = BN_new(); - ret = bn; - } - - if (ret == NULL) { - return NULL; - } - - if (len == 0) { - ret->top = 0; - ret->neg = 0; - return ret; - } - - /* Reserve enough space in |ret|. */ - size_t num_words = ((len - 1) / BN_BYTES) + 1; - if (!bn_wexpand(ret, num_words)) { - BN_free(bn); - return NULL; - } - ret->top = num_words; - - /* Make sure the top bytes will be zeroed. */ - ret->d[num_words - 1] = 0; - - /* We only support little-endian platforms, so we can simply memcpy the - * internal representation. */ - OPENSSL_memcpy(ret->d, in, len); - - bn_correct_top(ret); - return ret; -} - -size_t BN_bn2bin(const BIGNUM *in, uint8_t *out) { - size_t n, i; - BN_ULONG l; - - n = i = BN_num_bytes(in); - while (i--) { - l = in->d[i / BN_BYTES]; - *(out++) = (unsigned char)(l >> (8 * (i % BN_BYTES))) & 0xff; - } - return n; -} - -int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) { - /* If we don't have enough space, fail out. */ - size_t num_bytes = BN_num_bytes(in); - if (len < num_bytes) { - return 0; - } - - /* We only support little-endian platforms, so we can simply memcpy into the - * internal representation. */ - OPENSSL_memcpy(out, in->d, num_bytes); - - /* Pad out the rest of the buffer with zeroes. */ - OPENSSL_memset(out + num_bytes, 0, len - num_bytes); - - return 1; -} - -/* constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its - * behavior is undefined if |v| takes any other value. */ -static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) { - BN_ULONG mask = v; - mask--; - - return (~mask & x) | (mask & y); -} - -/* constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y| - * must not have their MSBs set. */ -static int constant_time_le_size_t(size_t x, size_t y) { - return ((x - y - 1) >> (sizeof(size_t) * 8 - 1)) & 1; -} - -/* read_word_padded returns the |i|'th word of |in|, if it is not out of - * bounds. Otherwise, it returns 0. It does so without branches on the size of - * |in|, however it necessarily does not have the same memory access pattern. If - * the access would be out of bounds, it reads the last word of |in|. |in| must - * not be zero. */ -static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) { - /* Read |in->d[i]| if valid. Otherwise, read the last word. */ - BN_ULONG l = in->d[constant_time_select_ulong( - constant_time_le_size_t(in->dmax, i), in->dmax - 1, i)]; - - /* Clamp to zero if above |d->top|. */ - return constant_time_select_ulong(constant_time_le_size_t(in->top, i), 0, l); -} - -int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) { - /* Special case for |in| = 0. Just branch as the probability is negligible. */ - if (BN_is_zero(in)) { - OPENSSL_memset(out, 0, len); - return 1; - } - - /* Check if the integer is too big. This case can exit early in non-constant - * time. */ - if ((size_t)in->top > (len + (BN_BYTES - 1)) / BN_BYTES) { - return 0; - } - if ((len % BN_BYTES) != 0) { - BN_ULONG l = read_word_padded(in, len / BN_BYTES); - if (l >> (8 * (len % BN_BYTES)) != 0) { - return 0; - } - } - - /* Write the bytes out one by one. Serialization is done without branching on - * the bits of |in| or on |in->top|, but if the routine would otherwise read - * out of bounds, the memory access pattern can't be fixed. However, for an - * RSA key of size a multiple of the word size, the probability of BN_BYTES - * leading zero octets is low. - * - * See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */ - size_t i = len; - while (i--) { - BN_ULONG l = read_word_padded(in, i / BN_BYTES); - *(out++) = (uint8_t)(l >> (8 * (i % BN_BYTES))) & 0xff; - } - return 1; -} int BN_bn2cbb_padded(CBB *out, size_t len, const BIGNUM *in) { uint8_t *ptr; @@ -273,7 +96,7 @@ char *BN_bn2hex(const BIGNUM *bn) { int z = 0; for (int i = bn->top - 1; i >= 0; i--) { for (int j = BN_BITS2 - 8; j >= 0; j -= 8) { - /* strip leading zeros */ + // strip leading zeros int v = ((int)(bn->d[i] >> (long)j)) & 0xff; if (z || v != 0) { *(p++) = hextable[v >> 4]; @@ -287,20 +110,20 @@ char *BN_bn2hex(const BIGNUM *bn) { return buf; } -/* decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. */ +// decode_hex decodes |in_len| bytes of hex data from |in| and updates |bn|. static int decode_hex(BIGNUM *bn, const char *in, int in_len) { if (in_len > INT_MAX/4) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); return 0; } - /* |in_len| is the number of hex digits. */ - if (bn_expand(bn, in_len * 4) == NULL) { + // |in_len| is the number of hex digits. + if (!bn_expand(bn, in_len * 4)) { return 0; } int i = 0; while (in_len > 0) { - /* Decode one |BN_ULONG| at a time. */ + // Decode one |BN_ULONG| at a time. int todo = BN_BYTES * 2; if (todo > in_len) { todo = in_len; @@ -320,7 +143,7 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) { hex = c - 'A' + 10; } else { hex = 0; - /* This shouldn't happen. The caller checks |isxdigit|. */ + // This shouldn't happen. The caller checks |isxdigit|. assert(0); } word = (word << 4) | hex; @@ -334,12 +157,12 @@ static int decode_hex(BIGNUM *bn, const char *in, int in_len) { return 1; } -/* decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. */ +// decode_dec decodes |in_len| bytes of decimal data from |in| and updates |bn|. static int decode_dec(BIGNUM *bn, const char *in, int in_len) { int i, j; BN_ULONG l = 0; - /* Decode |BN_DEC_NUM| digits at a time. */ + // Decode |BN_DEC_NUM| digits at a time. j = BN_DEC_NUM - (in_len % BN_DEC_NUM); if (j == BN_DEC_NUM) { j = 0; @@ -384,7 +207,7 @@ static int bn_x2bn(BIGNUM **outp, const char *in, decode_func decode, char_test_ return num; } - /* in is the start of the hex digits, and it is 'i' long */ + // in is the start of the hex digits, and it is 'i' long if (*outp == NULL) { ret = BN_new(); if (ret == NULL) { @@ -420,8 +243,8 @@ int BN_hex2bn(BIGNUM **outp, const char *in) { } char *BN_bn2dec(const BIGNUM *a) { - /* It is easier to print strings little-endian, so we assemble it in reverse - * and fix at the end. */ + // It is easier to print strings little-endian, so we assemble it in reverse + // and fix at the end. BIGNUM *copy = NULL; CBB cbb; if (!CBB_init(&cbb, 16) || @@ -467,7 +290,7 @@ char *BN_bn2dec(const BIGNUM *a) { goto cbb_err; } - /* Reverse the buffer. */ + // Reverse the buffer. for (size_t i = 0; i < len/2; i++) { uint8_t tmp = data[i]; data[i] = data[len - 1 - i]; @@ -526,7 +349,7 @@ int BN_print(BIO *bp, const BIGNUM *a) { for (i = a->top - 1; i >= 0; i--) { for (j = BN_BITS2 - 4; j >= 0; j -= 4) { - /* strip leading zeros */ + // strip leading zeros v = ((int)(a->d[i] >> (long)j)) & 0x0f; if (z || v != 0) { if (BIO_write(bp, &hextable[v], 1) != 1) { @@ -557,40 +380,12 @@ int BN_print_fp(FILE *fp, const BIGNUM *a) { return ret; } -BN_ULONG BN_get_word(const BIGNUM *bn) { - switch (bn->top) { - case 0: - return 0; - case 1: - return bn->d[0]; - default: - return BN_MASK2; - } -} - -int BN_get_u64(const BIGNUM *bn, uint64_t *out) { - switch (bn->top) { - case 0: - *out = 0; - return 1; - case 1: - *out = bn->d[0]; - return 1; -#if defined(OPENSSL_32_BIT) - case 2: - *out = (uint64_t) bn->d[0] | (((uint64_t) bn->d[1]) << 32); - return 1; -#endif - default: - return 0; - } -} size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) { const size_t bits = BN_num_bits(in); const size_t bytes = (bits + 7) / 8; - /* If the number of bits is a multiple of 8, i.e. if the MSB is set, - * prefix with a zero byte. */ + // If the number of bits is a multiple of 8, i.e. if the MSB is set, + // prefix with a zero byte. int extend = 0; if (bytes != 0 && (bits & 0x07) == 0) { extend = 1; @@ -600,8 +395,8 @@ size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out) { if (len < bytes || 4 + len < len || (len & 0xffffffff) != len) { - /* If we cannot represent the number then we emit zero as the interface - * doesn't allow an error to be signalled. */ + // If we cannot represent the number then we emit zero as the interface + // doesn't allow an error to be signalled. if (out) { OPENSSL_memset(out, 0, 4); } diff --git a/Sources/BoringSSL/crypto/buf/buf.c b/Sources/BoringSSL/crypto/buf/buf.c index ca1d70b04..146b1e0a3 100644 --- a/Sources/BoringSSL/crypto/buf/buf.c +++ b/Sources/BoringSSL/crypto/buf/buf.c @@ -82,44 +82,30 @@ void BUF_MEM_free(BUF_MEM *buf) { return; } - if (buf->data != NULL) { - OPENSSL_cleanse(buf->data, buf->max); - OPENSSL_free(buf->data); - } - + OPENSSL_free(buf->data); OPENSSL_free(buf); } -static int buf_mem_reserve(BUF_MEM *buf, size_t cap, int clean) { +int BUF_MEM_reserve(BUF_MEM *buf, size_t cap) { if (buf->max >= cap) { return 1; } size_t n = cap + 3; if (n < cap) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return 0; } n = n / 3; size_t alloc_size = n * 4; if (alloc_size / 4 != n) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return 0; } - char *new_buf; - if (buf->data == NULL) { - new_buf = OPENSSL_malloc(alloc_size); - } else { - if (clean) { - new_buf = OPENSSL_realloc_clean(buf->data, buf->max, alloc_size); - } else { - new_buf = OPENSSL_realloc(buf->data, alloc_size); - } - } - + char *new_buf = OPENSSL_realloc(buf->data, alloc_size); if (new_buf == NULL) { OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return 0; @@ -130,12 +116,8 @@ static int buf_mem_reserve(BUF_MEM *buf, size_t cap, int clean) { return 1; } -int BUF_MEM_reserve(BUF_MEM *buf, size_t cap) { - return buf_mem_reserve(buf, cap, 0 /* don't clear old buffer contents. */); -} - -static size_t buf_mem_grow(BUF_MEM *buf, size_t len, int clean) { - if (!buf_mem_reserve(buf, len, clean)) { +size_t BUF_MEM_grow(BUF_MEM *buf, size_t len) { + if (!BUF_MEM_reserve(buf, len)) { return 0; } if (buf->length < len) { @@ -145,20 +127,30 @@ static size_t buf_mem_grow(BUF_MEM *buf, size_t len, int clean) { return len; } -size_t BUF_MEM_grow(BUF_MEM *buf, size_t len) { - return buf_mem_grow(buf, len, 0 /* don't clear old buffer contents. */); +size_t BUF_MEM_grow_clean(BUF_MEM *buf, size_t len) { + return BUF_MEM_grow(buf, len); } -size_t BUF_MEM_grow_clean(BUF_MEM *buf, size_t len) { - return buf_mem_grow(buf, len, 1 /* clear old buffer contents. */); +int BUF_MEM_append(BUF_MEM *buf, const void *in, size_t len) { + size_t new_len = buf->length + len; + if (new_len < len) { + OPENSSL_PUT_ERROR(BUF, ERR_R_OVERFLOW); + return 0; + } + if (!BUF_MEM_reserve(buf, new_len)) { + return 0; + } + OPENSSL_memcpy(buf->data + buf->length, in, len); + buf->length = new_len; + return 1; } -char *BUF_strdup(const char *buf) { - if (buf == NULL) { +char *BUF_strdup(const char *str) { + if (str == NULL) { return NULL; } - return BUF_strndup(buf, strlen(buf)); + return BUF_strndup(str, strlen(str)); } size_t BUF_strnlen(const char *str, size_t max_len) { @@ -173,19 +165,19 @@ size_t BUF_strnlen(const char *str, size_t max_len) { return i; } -char *BUF_strndup(const char *buf, size_t size) { +char *BUF_strndup(const char *str, size_t size) { char *ret; size_t alloc_size; - if (buf == NULL) { + if (str == NULL) { return NULL; } - size = BUF_strnlen(buf, size); + size = BUF_strnlen(str, size); alloc_size = size + 1; if (alloc_size < size) { - /* overflow */ + // overflow OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return NULL; } @@ -195,7 +187,7 @@ char *BUF_strndup(const char *buf, size_t size) { return NULL; } - OPENSSL_memcpy(ret, buf, size); + OPENSSL_memcpy(ret, str, size); ret[size] = '\0'; return ret; } @@ -223,19 +215,17 @@ size_t BUF_strlcat(char *dst, const char *src, size_t dst_size) { return l + BUF_strlcpy(dst, src, dst_size); } -void *BUF_memdup(const void *data, size_t dst_size) { - void *ret; - - if (dst_size == 0) { +void *BUF_memdup(const void *data, size_t size) { + if (size == 0) { return NULL; } - ret = OPENSSL_malloc(dst_size); + void *ret = OPENSSL_malloc(size); if (ret == NULL) { OPENSSL_PUT_ERROR(BUF, ERR_R_MALLOC_FAILURE); return NULL; } - OPENSSL_memcpy(ret, data, dst_size); + OPENSSL_memcpy(ret, data, size); return ret; } diff --git a/Sources/BoringSSL/crypto/bytestring/ber.c b/Sources/BoringSSL/crypto/bytestring/ber.c index ee3cd0a06..4dc94f6f7 100644 --- a/Sources/BoringSSL/crypto/bytestring/ber.c +++ b/Sources/BoringSSL/crypto/bytestring/ber.c @@ -21,13 +21,13 @@ #include "../internal.h" -/* kMaxDepth is a just a sanity limit. The code should be such that the length - * of the input being processes always decreases. None the less, a very large - * input could otherwise cause the stack to overflow. */ +// kMaxDepth is a just a sanity limit. The code should be such that the length +// of the input being processes always decreases. None the less, a very large +// input could otherwise cause the stack to overflow. static const unsigned kMaxDepth = 2048; -/* is_string_type returns one if |tag| is a string type and zero otherwise. It - * ignores the constructed bit. */ +// is_string_type returns one if |tag| is a string type and zero otherwise. It +// ignores the constructed bit. static int is_string_type(unsigned tag) { if ((tag & 0xc0) != 0) { return 0; @@ -38,7 +38,7 @@ static int is_string_type(unsigned tag) { case CBS_ASN1_UTF8STRING: case CBS_ASN1_NUMERICSTRING: case CBS_ASN1_PRINTABLESTRING: - case CBS_ASN1_T16STRING: + case CBS_ASN1_T61STRING: case CBS_ASN1_VIDEOTEXSTRING: case CBS_ASN1_IA5STRING: case CBS_ASN1_GRAPHICSTRING: @@ -52,10 +52,10 @@ static int is_string_type(unsigned tag) { } } -/* cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found| - * depending on whether an indefinite length element or constructed string was - * found. The value of |orig_in| is not changed. It returns one on success (i.e. - * |*ber_found| was set) and zero on error. */ +// cbs_find_ber walks an ASN.1 structure in |orig_in| and sets |*ber_found| +// depending on whether an indefinite length element or constructed string was +// found. The value of |orig_in| is not changed. It returns one on success (i.e. +// |*ber_found| was set) and zero on error. static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { CBS in; @@ -77,13 +77,13 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { if (CBS_len(&contents) == header_len && header_len > 0 && CBS_data(&contents)[header_len-1] == 0x80) { - /* Found an indefinite-length element. */ + // Found an indefinite-length element. *ber_found = 1; return 1; } if (tag & CBS_ASN1_CONSTRUCTED) { if (is_string_type(tag)) { - /* Constructed strings are only legal in BER and require conversion. */ + // Constructed strings are only legal in BER and require conversion. *ber_found = 1; return 1; } @@ -97,20 +97,20 @@ static int cbs_find_ber(const CBS *orig_in, char *ber_found, unsigned depth) { return 1; } -/* is_eoc returns true if |header_len| and |contents|, as returned by - * |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value. */ +// is_eoc returns true if |header_len| and |contents|, as returned by +// |CBS_get_any_ber_asn1_element|, indicate an "end of contents" (EOC) value. static char is_eoc(size_t header_len, CBS *contents) { return header_len == 2 && CBS_len(contents) == 2 && OPENSSL_memcmp(CBS_data(contents), "\x00\x00", 2) == 0; } -/* cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If - * |string_tag| is non-zero, then all elements must match |string_tag| up to the - * constructed bit and primitive element bodies are written to |out| without - * element headers. This is used when concatenating the fragments of a - * constructed string. If |looking_for_eoc| is set then any EOC elements found - * will cause the function to return after consuming it. It returns one on - * success and zero on error. */ +// cbs_convert_ber reads BER data from |in| and writes DER data to |out|. If +// |string_tag| is non-zero, then all elements must match |string_tag| up to the +// constructed bit and primitive element bodies are written to |out| without +// element headers. This is used when concatenating the fragments of a +// constructed string. If |looking_for_eoc| is set then any EOC elements found +// will cause the function to return after consuming it. It returns one on +// success and zero on error. static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, char looking_for_eoc, unsigned depth) { assert(!(string_tag & CBS_ASN1_CONSTRUCTED)); @@ -134,9 +134,9 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } if (string_tag != 0) { - /* This is part of a constructed string. All elements must match - * |string_tag| up to the constructed bit and get appended to |out| - * without a child element. */ + // This is part of a constructed string. All elements must match + // |string_tag| up to the constructed bit and get appended to |out| + // without a child element. if ((tag & ~CBS_ASN1_CONSTRUCTED) != string_tag) { return 0; } @@ -144,8 +144,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } else { unsigned out_tag = tag; if ((tag & CBS_ASN1_CONSTRUCTED) && is_string_type(tag)) { - /* If a constructed string, clear the constructed bit and inform - * children to concatenate bodies. */ + // If a constructed string, clear the constructed bit and inform + // children to concatenate bodies. out_tag &= ~CBS_ASN1_CONSTRUCTED; child_string_tag = out_tag; } @@ -157,7 +157,7 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, if (CBS_len(&contents) == header_len && header_len > 0 && CBS_data(&contents)[header_len - 1] == 0x80) { - /* This is an indefinite length element. */ + // This is an indefinite length element. if (!cbs_convert_ber(in, out_contents, child_string_tag, 1 /* looking for eoc */, depth + 1) || !CBB_flush(out)) { @@ -171,13 +171,13 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, } if (tag & CBS_ASN1_CONSTRUCTED) { - /* Recurse into children. */ + // Recurse into children. if (!cbs_convert_ber(&contents, out_contents, child_string_tag, 0 /* not looking for eoc */, depth + 1)) { return 0; } } else { - /* Copy primitive contents as-is. */ + // Copy primitive contents as-is. if (!CBB_add_bytes(out_contents, CBS_data(&contents), CBS_len(&contents))) { return 0; @@ -195,8 +195,8 @@ static int cbs_convert_ber(CBS *in, CBB *out, unsigned string_tag, int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len) { CBB cbb; - /* First, do a quick walk to find any indefinite-length elements. Most of the - * time we hope that there aren't any and thus we can quickly return. */ + // First, do a quick walk to find any indefinite-length elements. Most of the + // time we hope that there aren't any and thus we can quickly return. char conversion_needed; if (!cbs_find_ber(in, &conversion_needed, 0)) { return 0; @@ -225,14 +225,14 @@ int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, assert(is_string_type(inner_tag)); if (CBS_peek_asn1_tag(in, outer_tag)) { - /* Normal implicitly-tagged string. */ + // Normal implicitly-tagged string. *out_storage = NULL; return CBS_get_asn1(in, out, outer_tag); } - /* Otherwise, try to parse an implicitly-tagged constructed string. - * |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep - * of nesting. */ + // Otherwise, try to parse an implicitly-tagged constructed string. + // |CBS_asn1_ber_to_der| is assumed to have run, so only allow one level deep + // of nesting. CBB result; CBS child; if (!CBB_init(&result, CBS_len(in)) || diff --git a/Sources/BoringSSL/crypto/bytestring/cbb.c b/Sources/BoringSSL/crypto/bytestring/cbb.c index 14116be57..2853509cb 100644 --- a/Sources/BoringSSL/crypto/bytestring/cbb.c +++ b/Sources/BoringSSL/crypto/bytestring/cbb.c @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -27,7 +28,7 @@ void CBB_zero(CBB *cbb) { } static int cbb_init(CBB *cbb, uint8_t *buf, size_t cap) { - /* This assumes that |cbb| has already been zeroed. */ + // This assumes that |cbb| has already been zeroed. struct cbb_buffer_st *base; base = OPENSSL_malloc(sizeof(struct cbb_buffer_st)); @@ -75,8 +76,8 @@ int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len) { void CBB_cleanup(CBB *cbb) { if (cbb->base) { - /* Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They - * are implicitly discarded when the parent is flushed or cleaned up. */ + // Only top-level |CBB|s are cleaned up. Child |CBB|s are non-owning. They + // are implicitly discarded when the parent is flushed or cleaned up. assert(cbb->is_top_level); if (cbb->base->can_resize) { @@ -97,7 +98,7 @@ static int cbb_buffer_reserve(struct cbb_buffer_st *base, uint8_t **out, newlen = base->len + len; if (newlen < base->len) { - /* Overflow */ + // Overflow goto err; } @@ -137,7 +138,7 @@ static int cbb_buffer_add(struct cbb_buffer_st *base, uint8_t **out, if (!cbb_buffer_reserve(base, out, len)) { return 0; } - /* This will not overflow or |cbb_buffer_reserve| would have failed. */ + // This will not overflow or |cbb_buffer_reserve| would have failed. base->len += len; return 1; } @@ -176,7 +177,7 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) { } if (cbb->base->can_resize && (out_data == NULL || out_len == NULL)) { - /* |out_data| and |out_len| can only be NULL if the CBB is fixed. */ + // |out_data| and |out_len| can only be NULL if the CBB is fixed. return 0; } @@ -191,15 +192,15 @@ int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len) { return 1; } -/* CBB_flush recurses and then writes out any pending length prefix. The - * current length of the underlying base is taken to be the length of the - * length-prefixed data. */ +// CBB_flush recurses and then writes out any pending length prefix. The +// current length of the underlying base is taken to be the length of the +// length-prefixed data. int CBB_flush(CBB *cbb) { size_t child_start, i, len; - /* If |cbb->base| has hit an error, the buffer is in an undefined state, so - * fail all following calls. In particular, |cbb->child| may point to invalid - * memory. */ + // If |cbb->base| has hit an error, the buffer is in an undefined state, so + // fail all following calls. In particular, |cbb->child| may point to invalid + // memory. if (cbb->base == NULL || cbb->base->error) { return 0; } @@ -219,16 +220,16 @@ int CBB_flush(CBB *cbb) { len = cbb->base->len - child_start; if (cbb->child->pending_is_asn1) { - /* For ASN.1 we assume that we'll only need a single byte for the length. - * If that turned out to be incorrect, we have to move the contents along - * in order to make space. */ + // For ASN.1 we assume that we'll only need a single byte for the length. + // If that turned out to be incorrect, we have to move the contents along + // in order to make space. uint8_t len_len; uint8_t initial_length_byte; assert (cbb->child->pending_len_len == 1); if (len > 0xfffffffe) { - /* Too large. */ + // Too large. goto err; } else if (len > 0xffffff) { len_len = 5; @@ -249,7 +250,7 @@ int CBB_flush(CBB *cbb) { } if (len_len != 1) { - /* We need to move the contents along in order to make space. */ + // We need to move the contents along in order to make space. size_t extra_bytes = len_len - 1; if (!cbb_buffer_add(cbb->base, NULL, extra_bytes)) { goto err; @@ -328,17 +329,43 @@ int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents) { return cbb_add_length_prefixed(cbb, out_contents, 3); } +// add_base128_integer encodes |v| as a big-endian base-128 integer where the +// high bit of each byte indicates where there is more data. This is the +// encoding used in DER for both high tag number form and OID components. +static int add_base128_integer(CBB *cbb, uint32_t v) { + unsigned len_len = 0; + unsigned copy = v; + while (copy > 0) { + len_len++; + copy >>= 7; + } + if (len_len == 0) { + len_len = 1; // Zero is encoded with one byte. + } + for (unsigned i = len_len - 1; i < len_len; i--) { + uint8_t byte = (v >> (7 * i)) & 0x7f; + if (i != 0) { + // The high bit denotes whether there is more data. + byte |= 0x80; + } + if (!CBB_add_u8(cbb, byte)) { + return 0; + } + } + return 1; +} + int CBB_add_asn1(CBB *cbb, CBB *out_contents, unsigned tag) { if (tag > 0xff || (tag & 0x1f) == 0x1f) { - /* Long form identifier octets are not supported. Further, all current valid - * tag serializations are 8 bits. */ + // Long form identifier octets are not supported. Further, all current valid + // tag serializations are 8 bits. cbb->base->error = 1; return 0; } if (!CBB_flush(cbb) || - /* |tag|'s representation matches the DER encoding. */ + // |tag|'s representation matches the DER encoding. !CBB_add_u8(cbb, (uint8_t)tag)) { return 0; } @@ -451,11 +478,11 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) { uint8_t byte = (value >> 8*(7-i)) & 0xff; if (!started) { if (byte == 0) { - /* Don't encode leading zeros. */ + // Don't encode leading zeros. continue; } - /* If the high bit is set, add a padding byte to make it - * unsigned. */ + // If the high bit is set, add a padding byte to make it + // unsigned. if ((byte & 0x80) && !CBB_add_u8(&child, 0)) { return 0; } @@ -466,10 +493,76 @@ int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) { } } - /* 0 is encoded as a single 0, not the empty string. */ + // 0 is encoded as a single 0, not the empty string. if (!started && !CBB_add_u8(&child, 0)) { return 0; } return CBB_flush(cbb); } + +// parse_dotted_decimal parses one decimal component from |cbs|, where |cbs| is +// an OID literal, e.g., "1.2.840.113554.4.1.72585". It consumes both the +// component and the dot, so |cbs| may be passed into the function again for the +// next value. +static int parse_dotted_decimal(CBS *cbs, uint32_t *out) { + *out = 0; + int seen_digit = 0; + for (;;) { + // Valid terminators for a component are the end of the string or a + // non-terminal dot. If the string ends with a dot, this is not a valid OID + // string. + uint8_t u; + if (!CBS_get_u8(cbs, &u) || + (u == '.' && CBS_len(cbs) > 0)) { + break; + } + if (u < '0' || u > '9' || + // Forbid stray leading zeros. + (seen_digit && *out == 0) || + // Check for overflow. + *out > UINT32_MAX / 10 || + *out * 10 > UINT32_MAX - (u - '0')) { + return 0; + } + *out = *out * 10 + (u - '0'); + seen_digit = 1; + } + // The empty string is not a legal OID component. + return seen_digit; +} + +int CBB_add_asn1_oid_from_text(CBB *cbb, const char *text, size_t len) { + if (!CBB_flush(cbb)) { + return 0; + } + + CBS cbs; + CBS_init(&cbs, (const uint8_t *)text, len); + + // OIDs must have at least two components. + uint32_t a, b; + if (!parse_dotted_decimal(&cbs, &a) || + !parse_dotted_decimal(&cbs, &b)) { + return 0; + } + + // The first component is encoded as 40 * |a| + |b|. This assumes that |a| is + // 0, 1, or 2 and that, when it is 0 or 1, |b| is at most 39. + if (a > 2 || + (a < 2 && b > 39) || + b > UINT32_MAX - 80 || + !add_base128_integer(cbb, 40 * a + b)) { + return 0; + } + + // The remaining components are encoded unmodified. + while (CBS_len(&cbs) > 0) { + if (!parse_dotted_decimal(&cbs, &a) || + !add_base128_integer(cbb, a)) { + return 0; + } + } + + return 1; +} diff --git a/Sources/BoringSSL/crypto/bytestring/cbs.c b/Sources/BoringSSL/crypto/bytestring/cbs.c index 14c55a4dc..ec495d210 100644 --- a/Sources/BoringSSL/crypto/bytestring/cbs.c +++ b/Sources/BoringSSL/crypto/bytestring/cbs.c @@ -190,13 +190,13 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, return 0; } - /* ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag - * number no greater than 30. - * - * If the number portion is 31 (0x1f, the largest value that fits in the - * allotted bits), then the tag is more than one byte long and the - * continuation bytes contain the tag number. This parser only supports tag - * numbers less than 31 (and thus single-byte tags). */ + // ITU-T X.690 section 8.1.2.3 specifies the format for identifiers with a tag + // number no greater than 30. + // + // If the number portion is 31 (0x1f, the largest value that fits in the + // allotted bits), then the tag is more than one byte long and the + // continuation bytes contain the tag number. This parser only supports tag + // numbers less than 31 (and thus single-byte tags). if ((tag & 0x1f) == 0x1f) { return 0; } @@ -206,52 +206,51 @@ static int cbs_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, } size_t len; - /* The format for the length encoding is specified in ITU-T X.690 section - * 8.1.3. */ + // The format for the length encoding is specified in ITU-T X.690 section + // 8.1.3. if ((length_byte & 0x80) == 0) { - /* Short form length. */ + // Short form length. len = ((size_t) length_byte) + 2; if (out_header_len != NULL) { *out_header_len = 2; } } else { - /* The high bit indicate that this is the long form, while the next 7 bits - * encode the number of subsequent octets used to encode the length (ITU-T - * X.690 clause 8.1.3.5.b). */ + // The high bit indicate that this is the long form, while the next 7 bits + // encode the number of subsequent octets used to encode the length (ITU-T + // X.690 clause 8.1.3.5.b). const size_t num_bytes = length_byte & 0x7f; uint32_t len32; if (ber_ok && (tag & CBS_ASN1_CONSTRUCTED) != 0 && num_bytes == 0) { - /* indefinite length */ + // indefinite length if (out_header_len != NULL) { *out_header_len = 2; } return CBS_get_bytes(cbs, out, 2); } - /* ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be - * used as the first byte of the length. If this parser encounters that - * value, num_bytes will be parsed as 127, which will fail the check below. - */ + // ITU-T X.690 clause 8.1.3.5.c specifies that the value 0xff shall not be + // used as the first byte of the length. If this parser encounters that + // value, num_bytes will be parsed as 127, which will fail the check below. if (num_bytes == 0 || num_bytes > 4) { return 0; } if (!cbs_get_u(&header, &len32, num_bytes)) { return 0; } - /* ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - * with the minimum number of octets. */ + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. if (len32 < 128) { - /* Length should have used short-form encoding. */ + // Length should have used short-form encoding. return 0; } if ((len32 >> ((num_bytes-1)*8)) == 0) { - /* Length should have been at least one byte shorter. */ + // Length should have been at least one byte shorter. return 0; } len = len32; if (len + 2 + num_bytes < len) { - /* Overflow. */ + // Overflow. return 0; } len += 2 + num_bytes; @@ -329,35 +328,32 @@ int CBS_peek_asn1_tag(const CBS *cbs, unsigned tag_value) { int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out) { CBS bytes; - const uint8_t *data; - size_t i, len; - if (!CBS_get_asn1(cbs, &bytes, CBS_ASN1_INTEGER)) { return 0; } *out = 0; - data = CBS_data(&bytes); - len = CBS_len(&bytes); + const uint8_t *data = CBS_data(&bytes); + size_t len = CBS_len(&bytes); if (len == 0) { - /* An INTEGER is encoded with at least one octet. */ + // An INTEGER is encoded with at least one octet. return 0; } if ((data[0] & 0x80) != 0) { - /* Negative number. */ + // Negative number. return 0; } if (data[0] == 0 && len > 1 && (data[1] & 0x80) == 0) { - /* Extra leading zeros. */ + // Extra leading zeros. return 0; } - for (i = 0; i < len; i++) { + for (size_t i = 0; i < len; i++) { if ((*out >> 56) != 0) { - /* Too large to represent as a uint64_t. */ + // Too large to represent as a uint64_t. return 0; } *out <<= 8; @@ -465,7 +461,7 @@ int CBS_is_valid_asn1_bitstring(const CBS *cbs) { return 1; } - /* All num_unused_bits bits must exist and be zeros. */ + // All num_unused_bits bits must exist and be zeros. uint8_t last; if (!CBS_get_last_u8(&in, &last) || (last & ((1 << num_unused_bits) - 1)) != 0) { @@ -483,9 +479,9 @@ int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit) { const unsigned byte_num = (bit >> 3) + 1; const unsigned bit_num = 7 - (bit & 7); - /* Unused bits are zero, and this function does not distinguish between - * missing and unset bits. Thus it is sufficient to do a byte-level length - * check. */ + // Unused bits are zero, and this function does not distinguish between + // missing and unset bits. Thus it is sufficient to do a byte-level length + // check. return byte_num < CBS_len(cbs) && (CBS_data(cbs)[byte_num] & (1 << bit_num)) != 0; } diff --git a/Sources/BoringSSL/crypto/bytestring/internal.h b/Sources/BoringSSL/crypto/bytestring/internal.h index 2fed41390..f6ac32cd9 100644 --- a/Sources/BoringSSL/crypto/bytestring/internal.h +++ b/Sources/BoringSSL/crypto/bytestring/internal.h @@ -22,54 +22,54 @@ extern "C" { #endif -/* CBS_asn1_ber_to_der reads a BER element from |in|. If it finds - * indefinite-length elements or constructed strings then it converts the BER - * data to DER and sets |*out| and |*out_length| to describe a malloced buffer - * containing the DER data. Additionally, |*in| will be advanced over the BER - * element. - * - * If it doesn't find any indefinite-length elements or constructed strings then - * it sets |*out| to NULL and |*in| is unmodified. - * - * This function should successfully process any valid BER input, however it - * will not convert all of BER's deviations from DER. BER is ambiguous between - * implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed - * strings. Implicitly-tagged strings must be parsed with - * |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller - * must also account for BER variations in the contents of a primitive. - * - * It returns one on success and zero otherwise. */ +// CBS_asn1_ber_to_der reads a BER element from |in|. If it finds +// indefinite-length elements or constructed strings then it converts the BER +// data to DER and sets |*out| and |*out_length| to describe a malloced buffer +// containing the DER data. Additionally, |*in| will be advanced over the BER +// element. +// +// If it doesn't find any indefinite-length elements or constructed strings then +// it sets |*out| to NULL and |*in| is unmodified. +// +// This function should successfully process any valid BER input, however it +// will not convert all of BER's deviations from DER. BER is ambiguous between +// implicitly-tagged SEQUENCEs of strings and implicitly-tagged constructed +// strings. Implicitly-tagged strings must be parsed with +// |CBS_get_ber_implicitly_tagged_string| instead of |CBS_get_asn1|. The caller +// must also account for BER variations in the contents of a primitive. +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_asn1_ber_to_der(CBS *in, uint8_t **out, size_t *out_len); -/* CBS_get_asn1_implicit_string parses a BER string of primitive type - * |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the - * contents. If concatenation was needed, it sets |*out_storage| to a buffer - * which the caller must release with |OPENSSL_free|. Otherwise, it sets - * |*out_storage| to NULL. - * - * This function does not parse all of BER. It requires the string be - * definite-length. Constructed strings are allowed, but all children of the - * outermost element must be primitive. The caller should use - * |CBS_asn1_ber_to_der| before running this function. - * - * It returns one on success and zero otherwise. */ +// CBS_get_asn1_implicit_string parses a BER string of primitive type +// |inner_tag| implicitly-tagged with |outer_tag|. It sets |out| to the +// contents. If concatenation was needed, it sets |*out_storage| to a buffer +// which the caller must release with |OPENSSL_free|. Otherwise, it sets +// |*out_storage| to NULL. +// +// This function does not parse all of BER. It requires the string be +// definite-length. Constructed strings are allowed, but all children of the +// outermost element must be primitive. The caller should use +// |CBS_asn1_ber_to_der| before running this function. +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int CBS_get_asn1_implicit_string(CBS *in, CBS *out, uint8_t **out_storage, unsigned outer_tag, unsigned inner_tag); -/* CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized - * with |CBB_init|. If |outp| is not NULL then the result is written to |*outp| - * and |*outp| is advanced just past the output. It returns the number of bytes - * in the result, whether written or not, or a negative value on error. On - * error, it calls |CBB_cleanup| on |cbb|. - * - * This function may be used to help implement legacy i2d ASN.1 functions. */ +// CBB_finish_i2d calls |CBB_finish| on |cbb| which must have been initialized +// with |CBB_init|. If |outp| is not NULL then the result is written to |*outp| +// and |*outp| is advanced just past the output. It returns the number of bytes +// in the result, whether written or not, or a negative value on error. On +// error, it calls |CBB_cleanup| on |cbb|. +// +// This function may be used to help implement legacy i2d ASN.1 functions. int CBB_finish_i2d(CBB *cbb, uint8_t **outp); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BYTESTRING_INTERNAL_H */ +#endif // OPENSSL_HEADER_BYTESTRING_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/chacha/chacha.c b/Sources/BoringSSL/crypto/chacha/chacha.c index fe32596a2..646ef7a6c 100644 --- a/Sources/BoringSSL/crypto/chacha/chacha.c +++ b/Sources/BoringSSL/crypto/chacha/chacha.c @@ -12,7 +12,7 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Adapted from the public domain, estream code by D. Bernstein. */ +// Adapted from the public domain, estream code by D. Bernstein. #include @@ -32,7 +32,7 @@ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) -/* ChaCha20_ctr32 is defined in asm/chacha-*.pl. */ +// ChaCha20_ctr32 is defined in asm/chacha-*.pl. void ChaCha20_ctr32(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t key[8], const uint32_t counter[4]); @@ -48,7 +48,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint32_t *key_ptr = (const uint32_t *)key; #if !defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) - /* The assembly expects the key to be four-byte aligned. */ + // The assembly expects the key to be four-byte aligned. uint32_t key_u32[8]; if ((((uintptr_t)key) & 3) != 0) { key_u32[0] = U8TO32_LITTLE(key + 0); @@ -69,7 +69,7 @@ void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, #else -/* sigma contains the ChaCha constants, which happen to be an ASCII string. */ +// sigma contains the ChaCha constants, which happen to be an ASCII string. static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' }; @@ -83,15 +83,15 @@ static const uint8_t sigma[16] = { 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', (p)[3] = (v >> 24) & 0xff; \ } -/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */ +// QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. #define QUARTERROUND(a, b, c, d) \ x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 16); \ x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 12); \ x[a] += x[b]; x[d] = ROTATE(x[d] ^ x[a], 8); \ x[c] += x[d]; x[b] = ROTATE(x[b] ^ x[c], 7); -/* chacha_core performs 20 rounds of ChaCha on the input words in - * |input| and writes the 64 output bytes to |output|. */ +// chacha_core performs 20 rounds of ChaCha on the input words in +// |input| and writes the 64 output bytes to |output|. static void chacha_core(uint8_t output[64], const uint32_t input[16]) { uint32_t x[16]; int i; diff --git a/Sources/BoringSSL/crypto/cipher/aead.c b/Sources/BoringSSL/crypto/cipher/aead.c deleted file mode 100644 index b5ff48a02..000000000 --- a/Sources/BoringSSL/crypto/cipher/aead.c +++ /dev/null @@ -1,156 +0,0 @@ -/* Copyright (c) 2014, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include -#include - -#include "internal.h" -#include "../internal.h" - - -size_t EVP_AEAD_key_length(const EVP_AEAD *aead) { return aead->key_len; } - -size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead) { return aead->nonce_len; } - -size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead) { return aead->overhead; } - -size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead) { return aead->max_tag_len; } - -void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx) { - OPENSSL_memset(ctx, 0, sizeof(EVP_AEAD_CTX)); -} - -int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, - const uint8_t *key, size_t key_len, size_t tag_len, - ENGINE *impl) { - if (!aead->init) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_DIRECTION_SET); - ctx->aead = NULL; - return 0; - } - return EVP_AEAD_CTX_init_with_direction(ctx, aead, key, key_len, tag_len, - evp_aead_open); -} - -int EVP_AEAD_CTX_init_with_direction(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, - const uint8_t *key, size_t key_len, - size_t tag_len, - enum evp_aead_direction_t dir) { - if (key_len != aead->key_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_KEY_SIZE); - ctx->aead = NULL; - return 0; - } - - ctx->aead = aead; - - int ok; - if (aead->init) { - ok = aead->init(ctx, key, key_len, tag_len); - } else { - ok = aead->init_with_direction(ctx, key, key_len, tag_len, dir); - } - - if (!ok) { - ctx->aead = NULL; - } - - return ok; -} - -void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) { - if (ctx->aead == NULL) { - return; - } - ctx->aead->cleanup(ctx); - ctx->aead = NULL; -} - -/* check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If - * |in| and |out| alias, we require that |in| == |out|. */ -static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out, - size_t out_len) { - if (!buffers_alias(in, in_len, out, out_len)) { - return 1; - } - - return in == out; -} - -int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, - size_t max_out_len, const uint8_t *nonce, - size_t nonce_len, const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - size_t possible_out_len = in_len + ctx->aead->overhead; - - if (possible_out_len < in_len /* overflow */) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - goto error; - } - - if (!check_alias(in, in_len, out, max_out_len)) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); - goto error; - } - - if (ctx->aead->seal(ctx, out, out_len, max_out_len, nonce, nonce_len, in, - in_len, ad, ad_len)) { - return 1; - } - -error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't send raw data. */ - OPENSSL_memset(out, 0, max_out_len); - *out_len = 0; - return 0; -} - -int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, - size_t max_out_len, const uint8_t *nonce, - size_t nonce_len, const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - if (!check_alias(in, in_len, out, max_out_len)) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); - goto error; - } - - if (ctx->aead->open(ctx, out, out_len, max_out_len, nonce, nonce_len, in, - in_len, ad, ad_len)) { - return 1; - } - -error: - /* In the event of an error, clear the output buffer so that a caller - * that doesn't check the return value doesn't try and process bad - * data. */ - OPENSSL_memset(out, 0, max_out_len); - *out_len = 0; - return 0; -} - -const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx) { return ctx->aead; } - -int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, - size_t *out_len) { - if (ctx->aead->get_iv == NULL) { - return 0; - } - - return ctx->aead->get_iv(ctx, out_iv, out_len); -} diff --git a/Sources/BoringSSL/crypto/cipher/e_aes.c b/Sources/BoringSSL/crypto/cipher/e_aes.c deleted file mode 100644 index f67cdadbc..000000000 --- a/Sources/BoringSSL/crypto/cipher/e_aes.c +++ /dev/null @@ -1,1771 +0,0 @@ -/* ==================================================================== - * Copyright (c) 2001-2011 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "internal.h" -#include "../internal.h" -#include "../modes/internal.h" - -#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) -#include -#endif - - -OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) /* Unreachable code. */ - -typedef struct { - union { - double align; - AES_KEY ks; - } ks; - block128_f block; - union { - cbc128_f cbc; - ctr128_f ctr; - } stream; -} EVP_AES_KEY; - -typedef struct { - union { - double align; - AES_KEY ks; - } ks; /* AES key schedule to use */ - int key_set; /* Set if key initialised */ - int iv_set; /* Set if an iv is set */ - GCM128_CONTEXT gcm; - uint8_t *iv; /* Temporary IV store */ - int ivlen; /* IV length */ - int taglen; - int iv_gen; /* It is OK to generate IVs */ - ctr128_f ctr; -} EVP_AES_GCM_CTX; - -#if !defined(OPENSSL_NO_ASM) && \ - (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) -#define VPAES -static char vpaes_capable(void) { - return (OPENSSL_ia32cap_P[1] & (1 << (41 - 32))) != 0; -} - -#if defined(OPENSSL_X86_64) -#define BSAES -static char bsaes_capable(void) { - return vpaes_capable(); -} -#endif - -#elif !defined(OPENSSL_NO_ASM) && \ - (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) - -#if defined(OPENSSL_ARM) && __ARM_MAX_ARCH__ >= 7 -#define BSAES -static char bsaes_capable(void) { - return CRYPTO_is_NEON_capable(); -} -#endif - -#define HWAES -static int hwaes_capable(void) { - return CRYPTO_is_ARMv8_AES_capable(); -} - -#elif !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE) - -#define HWAES -static int hwaes_capable(void) { - return CRYPTO_is_PPC64LE_vcrypto_capable(); -} - -#endif /* OPENSSL_PPC64LE */ - - -#if defined(BSAES) -/* On platforms where BSAES gets defined (just above), then these functions are - * provided by asm. */ -void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t ivec[16], int enc); -void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, - const AES_KEY *key, const uint8_t ivec[16]); -#else -static char bsaes_capable(void) { - return 0; -} - -/* On other platforms, bsaes_capable() will always return false and so the - * following will never be called. */ -static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t ivec[16], int enc) { - abort(); -} - -static void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, - size_t len, const AES_KEY *key, - const uint8_t ivec[16]) { - abort(); -} -#endif - -#if defined(VPAES) -/* On platforms where VPAES gets defined (just above), then these functions are - * provided by asm. */ -int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); -int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); - -void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); - -void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t *ivec, int enc); -#else -static char vpaes_capable(void) { - return 0; -} - -/* On other platforms, vpaes_capable() will always return false and so the - * following will never be called. */ -static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, - AES_KEY *key) { - abort(); -} -static int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, - AES_KEY *key) { - abort(); -} -static void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { - abort(); -} -static void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { - abort(); -} -static void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t *ivec, int enc) { - abort(); -} -#endif - -#if defined(HWAES) -int aes_hw_set_encrypt_key(const uint8_t *user_key, const int bits, - AES_KEY *key); -int aes_hw_set_decrypt_key(const uint8_t *user_key, const int bits, - AES_KEY *key); -void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t *ivec, const int enc); -void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, - const AES_KEY *key, const uint8_t ivec[16]); -#else -/* If HWAES isn't defined then we provide dummy functions for each of the hwaes - * functions. */ -static int hwaes_capable(void) { - return 0; -} - -static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, - AES_KEY *key) { - abort(); -} - -static int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, - AES_KEY *key) { - abort(); -} - -static void aes_hw_encrypt(const uint8_t *in, uint8_t *out, - const AES_KEY *key) { - abort(); -} - -static void aes_hw_decrypt(const uint8_t *in, uint8_t *out, - const AES_KEY *key) { - abort(); -} - -static void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t *ivec, int enc) { - abort(); -} - -static void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, - size_t len, const AES_KEY *key, - const uint8_t ivec[16]) { - abort(); -} -#endif - -#if !defined(OPENSSL_NO_ASM) && \ - (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) -int aesni_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); -int aesni_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); - -void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -void aesni_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); - -void aesni_ecb_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, int enc); -void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, - const AES_KEY *key, uint8_t *ivec, int enc); - -#else - -/* On other platforms, aesni_capable() will always return false and so the - * following will never be called. */ -static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { - abort(); -} -static int aesni_set_encrypt_key(const uint8_t *userKey, int bits, - AES_KEY *key) { - abort(); -} -static void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, - size_t blocks, const void *key, - const uint8_t *ivec) { - abort(); -} - -#endif - -static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, - const uint8_t *iv, int enc) { - int ret, mode; - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; - if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { - if (hwaes_capable()) { - ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)aes_hw_decrypt; - dat->stream.cbc = NULL; - if (mode == EVP_CIPH_CBC_MODE) { - dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt; - } - } else if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) { - ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)AES_decrypt; - dat->stream.cbc = (cbc128_f)bsaes_cbc_encrypt; - } else if (vpaes_capable()) { - ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)vpaes_decrypt; - dat->stream.cbc = - mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL; - } else { - ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)AES_decrypt; - dat->stream.cbc = - mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL; - } - } else if (hwaes_capable()) { - ret = aes_hw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)aes_hw_encrypt; - dat->stream.cbc = NULL; - if (mode == EVP_CIPH_CBC_MODE) { - dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt; - } else if (mode == EVP_CIPH_CTR_MODE) { - dat->stream.ctr = (ctr128_f)aes_hw_ctr32_encrypt_blocks; - } - } else if (bsaes_capable() && mode == EVP_CIPH_CTR_MODE) { - ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)AES_encrypt; - dat->stream.ctr = (ctr128_f)bsaes_ctr32_encrypt_blocks; - } else if (vpaes_capable()) { - ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)vpaes_encrypt; - dat->stream.cbc = - mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL; - } else { - ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); - dat->block = (block128_f)AES_encrypt; - dat->stream.cbc = - mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL; - } - - if (ret < 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED); - return 0; - } - - return 1; -} - -static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, - size_t len) { - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - if (dat->stream.cbc) { - (*dat->stream.cbc)(in, out, len, &dat->ks, ctx->iv, ctx->encrypt); - } else if (ctx->encrypt) { - CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, dat->block); - } else { - CRYPTO_cbc128_decrypt(in, out, len, &dat->ks, ctx->iv, dat->block); - } - - return 1; -} - -static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, - size_t len) { - size_t bl = ctx->cipher->block_size; - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - if (len < bl) { - return 1; - } - - len -= bl; - for (size_t i = 0; i <= len; i += bl) { - (*dat->block)(in + i, out + i, &dat->ks); - } - - return 1; -} - -static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, - size_t len) { - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - if (dat->stream.ctr) { - CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks, ctx->iv, ctx->buf, - &ctx->num, dat->stream.ctr); - } else { - CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, ctx->iv, ctx->buf, &ctx->num, - dat->block); - } - return 1; -} - -static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, - size_t len) { - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, ctx->iv, &ctx->num, dat->block); - return 1; -} - -static char aesni_capable(void); - -static ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx, - block128_f *out_block, const uint8_t *key, - size_t key_len) { - if (aesni_capable()) { - aesni_set_encrypt_key(key, key_len * 8, aes_key); - if (gcm_ctx != NULL) { - CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aesni_encrypt); - } - if (out_block) { - *out_block = (block128_f) aesni_encrypt; - } - return (ctr128_f)aesni_ctr32_encrypt_blocks; - } - - if (hwaes_capable()) { - aes_hw_set_encrypt_key(key, key_len * 8, aes_key); - if (gcm_ctx != NULL) { - CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aes_hw_encrypt); - } - if (out_block) { - *out_block = (block128_f) aes_hw_encrypt; - } - return (ctr128_f)aes_hw_ctr32_encrypt_blocks; - } - - if (bsaes_capable()) { - AES_set_encrypt_key(key, key_len * 8, aes_key); - if (gcm_ctx != NULL) { - CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt); - } - if (out_block) { - *out_block = (block128_f) AES_encrypt; - } - return (ctr128_f)bsaes_ctr32_encrypt_blocks; - } - - if (vpaes_capable()) { - vpaes_set_encrypt_key(key, key_len * 8, aes_key); - if (out_block) { - *out_block = (block128_f) vpaes_encrypt; - } - if (gcm_ctx != NULL) { - CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)vpaes_encrypt); - } - return NULL; - } - - AES_set_encrypt_key(key, key_len * 8, aes_key); - if (gcm_ctx != NULL) { - CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt); - } - if (out_block) { - *out_block = (block128_f) AES_encrypt; - } - return NULL; -} - -static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, - const uint8_t *iv, int enc) { - EVP_AES_GCM_CTX *gctx = ctx->cipher_data; - if (!iv && !key) { - return 1; - } - if (key) { - gctx->ctr = - aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len); - /* If we have an iv can set it directly, otherwise use saved IV. */ - if (iv == NULL && gctx->iv_set) { - iv = gctx->iv; - } - if (iv) { - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); - gctx->iv_set = 1; - } - gctx->key_set = 1; - } else { - /* If key set use IV, otherwise copy */ - if (gctx->key_set) { - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); - } else { - OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen); - } - gctx->iv_set = 1; - gctx->iv_gen = 0; - } - return 1; -} - -static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) { - EVP_AES_GCM_CTX *gctx = c->cipher_data; - OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm)); - if (gctx->iv != c->iv) { - OPENSSL_free(gctx->iv); - } -} - -/* increment counter (64-bit int) by 1 */ -static void ctr64_inc(uint8_t *counter) { - int n = 8; - uint8_t c; - - do { - --n; - c = counter[n]; - ++c; - counter[n] = c; - if (c) { - return; - } - } while (n); -} - -static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { - EVP_AES_GCM_CTX *gctx = c->cipher_data; - switch (type) { - case EVP_CTRL_INIT: - gctx->key_set = 0; - gctx->iv_set = 0; - gctx->ivlen = c->cipher->iv_len; - gctx->iv = c->iv; - gctx->taglen = -1; - gctx->iv_gen = 0; - return 1; - - case EVP_CTRL_GCM_SET_IVLEN: - if (arg <= 0) { - return 0; - } - - /* Allocate memory for IV if needed */ - if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) { - if (gctx->iv != c->iv) { - OPENSSL_free(gctx->iv); - } - gctx->iv = OPENSSL_malloc(arg); - if (!gctx->iv) { - return 0; - } - } - gctx->ivlen = arg; - return 1; - - case EVP_CTRL_GCM_SET_TAG: - if (arg <= 0 || arg > 16 || c->encrypt) { - return 0; - } - OPENSSL_memcpy(c->buf, ptr, arg); - gctx->taglen = arg; - return 1; - - case EVP_CTRL_GCM_GET_TAG: - if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) { - return 0; - } - OPENSSL_memcpy(ptr, c->buf, arg); - return 1; - - case EVP_CTRL_GCM_SET_IV_FIXED: - /* Special case: -1 length restores whole IV */ - if (arg == -1) { - OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen); - gctx->iv_gen = 1; - return 1; - } - /* Fixed field must be at least 4 bytes and invocation field - * at least 8. */ - if (arg < 4 || (gctx->ivlen - arg) < 8) { - return 0; - } - if (arg) { - OPENSSL_memcpy(gctx->iv, ptr, arg); - } - if (c->encrypt && !RAND_bytes(gctx->iv + arg, gctx->ivlen - arg)) { - return 0; - } - gctx->iv_gen = 1; - return 1; - - case EVP_CTRL_GCM_IV_GEN: - if (gctx->iv_gen == 0 || gctx->key_set == 0) { - return 0; - } - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); - if (arg <= 0 || arg > gctx->ivlen) { - arg = gctx->ivlen; - } - OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); - /* Invocation field will be at least 8 bytes in size and - * so no need to check wrap around or increment more than - * last 8 bytes. */ - ctr64_inc(gctx->iv + gctx->ivlen - 8); - gctx->iv_set = 1; - return 1; - - case EVP_CTRL_GCM_SET_IV_INV: - if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) { - return 0; - } - OPENSSL_memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); - gctx->iv_set = 1; - return 1; - - case EVP_CTRL_COPY: { - EVP_CIPHER_CTX *out = ptr; - EVP_AES_GCM_CTX *gctx_out = out->cipher_data; - if (gctx->iv == c->iv) { - gctx_out->iv = out->iv; - } else { - gctx_out->iv = OPENSSL_malloc(gctx->ivlen); - if (!gctx_out->iv) { - return 0; - } - OPENSSL_memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); - } - return 1; - } - - default: - return -1; - } -} - -static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, - size_t len) { - EVP_AES_GCM_CTX *gctx = ctx->cipher_data; - - /* If not set up, return error */ - if (!gctx->key_set) { - return -1; - } - if (!gctx->iv_set) { - return -1; - } - - if (in) { - if (out == NULL) { - if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len)) { - return -1; - } - } else if (ctx->encrypt) { - if (gctx->ctr) { - if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len, - gctx->ctr)) { - return -1; - } - } else { - if (!CRYPTO_gcm128_encrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) { - return -1; - } - } - } else { - if (gctx->ctr) { - if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len, - gctx->ctr)) { - return -1; - } - } else { - if (!CRYPTO_gcm128_decrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) { - return -1; - } - } - } - return len; - } else { - if (!ctx->encrypt) { - if (gctx->taglen < 0 || - !CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen)) { - return -1; - } - gctx->iv_set = 0; - return 0; - } - CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); - gctx->taglen = 16; - /* Don't reuse the IV */ - gctx->iv_set = 0; - return 0; - } -} - -static const EVP_CIPHER aes_128_cbc = { - NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aes_init_key, aes_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_128_ctr = { - NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aes_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_128_ecb = { - NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aes_init_key, aes_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_128_ofb = { - NID_aes_128_ofb128, 1 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE, - NULL /* app_data */, aes_init_key, aes_ofb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_128_gcm = { - NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - - -static const EVP_CIPHER aes_192_cbc = { - NID_aes_192_cbc, 16 /* block_size */, 24 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aes_init_key, aes_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_192_ctr = { - NID_aes_192_ctr, 1 /* block_size */, 24 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aes_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_192_ecb = { - NID_aes_192_ecb, 16 /* block_size */, 24 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aes_init_key, aes_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_192_gcm = { - NID_aes_192_gcm, 1 /* block_size */, 24 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - - -static const EVP_CIPHER aes_256_cbc = { - NID_aes_256_cbc, 16 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aes_init_key, aes_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_256_ctr = { - NID_aes_256_ctr, 1 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aes_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_256_ecb = { - NID_aes_256_ecb, 16 /* block_size */, 32 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aes_init_key, aes_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_256_ofb = { - NID_aes_256_ofb128, 1 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE, - NULL /* app_data */, aes_init_key, aes_ofb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aes_256_gcm = { - NID_aes_256_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aes_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - -#if !defined(OPENSSL_NO_ASM) && \ - (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) - -/* AES-NI section. */ - -static char aesni_capable(void) { - return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0; -} - -static int aesni_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, - const uint8_t *iv, int enc) { - int ret, mode; - EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; - - mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; - if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { - ret = aesni_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data); - dat->block = (block128_f)aesni_decrypt; - dat->stream.cbc = - mode == EVP_CIPH_CBC_MODE ? (cbc128_f)aesni_cbc_encrypt : NULL; - } else { - ret = aesni_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data); - dat->block = (block128_f)aesni_encrypt; - if (mode == EVP_CIPH_CBC_MODE) { - dat->stream.cbc = (cbc128_f)aesni_cbc_encrypt; - } else if (mode == EVP_CIPH_CTR_MODE) { - dat->stream.ctr = (ctr128_f)aesni_ctr32_encrypt_blocks; - } else { - dat->stream.cbc = NULL; - } - } - - if (ret < 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED); - return 0; - } - - return 1; -} - -static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, - const uint8_t *in, size_t len) { - aesni_cbc_encrypt(in, out, len, ctx->cipher_data, ctx->iv, ctx->encrypt); - - return 1; -} - -static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, - const uint8_t *in, size_t len) { - size_t bl = ctx->cipher->block_size; - - if (len < bl) { - return 1; - } - - aesni_ecb_encrypt(in, out, len, ctx->cipher_data, ctx->encrypt); - - return 1; -} - -static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, - const uint8_t *iv, int enc) { - EVP_AES_GCM_CTX *gctx = ctx->cipher_data; - if (!iv && !key) { - return 1; - } - if (key) { - aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks); - CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt); - gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks; - /* If we have an iv can set it directly, otherwise use - * saved IV. */ - if (iv == NULL && gctx->iv_set) { - iv = gctx->iv; - } - if (iv) { - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); - gctx->iv_set = 1; - } - gctx->key_set = 1; - } else { - /* If key set use IV, otherwise copy */ - if (gctx->key_set) { - CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); - } else { - OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen); - } - gctx->iv_set = 1; - gctx->iv_gen = 0; - } - return 1; -} - -static const EVP_CIPHER aesni_128_cbc = { - NID_aes_128_cbc, 16 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aesni_init_key, aesni_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_128_ctr = { - NID_aes_128_ctr, 1 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aesni_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_128_ecb = { - NID_aes_128_ecb, 16 /* block_size */, 16 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aesni_init_key, aesni_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_128_ofb = { - NID_aes_128_ofb128, 1 /* block_size */, 16 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE, - NULL /* app_data */, aesni_init_key, aes_ofb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_128_gcm = { - NID_aes_128_gcm, 1 /* block_size */, 16 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - - -static const EVP_CIPHER aesni_192_cbc = { - NID_aes_192_cbc, 16 /* block_size */, 24 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aesni_init_key, aesni_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_192_ctr = { - NID_aes_192_ctr, 1 /* block_size */, 24 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aesni_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_192_ecb = { - NID_aes_192_ecb, 16 /* block_size */, 24 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aesni_init_key, aesni_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_192_gcm = { - NID_aes_192_gcm, 1 /* block_size */, 24 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - - -static const EVP_CIPHER aesni_256_cbc = { - NID_aes_256_cbc, 16 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, aesni_init_key, aesni_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_256_ctr = { - NID_aes_256_ctr, 1 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_CTR_MODE, - NULL /* app_data */, aesni_init_key, aes_ctr_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_256_ecb = { - NID_aes_256_ecb, 16 /* block_size */, 32 /* key_size */, - 0 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, aesni_init_key, aesni_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_256_ofb = { - NID_aes_256_ofb128, 1 /* block_size */, 32 /* key_size */, - 16 /* iv_len */, sizeof(EVP_AES_KEY), EVP_CIPH_OFB_MODE, - NULL /* app_data */, aesni_init_key, aes_ofb_cipher, - NULL /* cleanup */, NULL /* ctrl */}; - -static const EVP_CIPHER aesni_256_gcm = { - NID_aes_256_gcm, 1 /* block_size */, 32 /* key_size */, 12 /* iv_len */, - sizeof(EVP_AES_GCM_CTX), - EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER | - EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT | EVP_CIPH_CUSTOM_COPY | - EVP_CIPH_FLAG_AEAD_CIPHER, - NULL /* app_data */, aesni_gcm_init_key, aes_gcm_cipher, aes_gcm_cleanup, - aes_gcm_ctrl}; - -#define EVP_CIPHER_FUNCTION(keybits, mode) \ - const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \ - if (aesni_capable()) { \ - return &aesni_##keybits##_##mode; \ - } else { \ - return &aes_##keybits##_##mode; \ - } \ - } - -#else /* ^^^ OPENSSL_X86_64 || OPENSSL_X86 */ - -static char aesni_capable(void) { - return 0; -} - -#define EVP_CIPHER_FUNCTION(keybits, mode) \ - const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \ - return &aes_##keybits##_##mode; \ - } - -#endif - -EVP_CIPHER_FUNCTION(128, cbc) -EVP_CIPHER_FUNCTION(128, ctr) -EVP_CIPHER_FUNCTION(128, ecb) -EVP_CIPHER_FUNCTION(128, ofb) -EVP_CIPHER_FUNCTION(128, gcm) - -EVP_CIPHER_FUNCTION(192, cbc) -EVP_CIPHER_FUNCTION(192, ctr) -EVP_CIPHER_FUNCTION(192, ecb) -EVP_CIPHER_FUNCTION(192, gcm) - -EVP_CIPHER_FUNCTION(256, cbc) -EVP_CIPHER_FUNCTION(256, ctr) -EVP_CIPHER_FUNCTION(256, ecb) -EVP_CIPHER_FUNCTION(256, ofb) -EVP_CIPHER_FUNCTION(256, gcm) - - -#define EVP_AEAD_AES_GCM_TAG_LEN 16 - -struct aead_aes_gcm_ctx { - union { - double align; - AES_KEY ks; - } ks; - GCM128_CONTEXT gcm; - ctr128_f ctr; - uint8_t tag_len; -}; - -static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, - size_t key_len, size_t tag_len) { - struct aead_aes_gcm_ctx *gcm_ctx; - const size_t key_bits = key_len * 8; - - if (key_bits != 128 && key_bits != 256) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ - } - - if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { - tag_len = EVP_AEAD_AES_GCM_TAG_LEN; - } - - if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); - return 0; - } - - gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_ctx)); - if (gcm_ctx == NULL) { - return 0; - } - - gcm_ctx->ctr = - aes_ctr_set_key(&gcm_ctx->ks.ks, &gcm_ctx->gcm, NULL, key, key_len); - gcm_ctx->tag_len = tag_len; - ctx->aead_state = gcm_ctx; - - return 1; -} - -static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) { - struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state; - OPENSSL_cleanse(gcm_ctx, sizeof(struct aead_aes_gcm_ctx)); - OPENSSL_free(gcm_ctx); -} - -static int aead_aes_gcm_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state; - GCM128_CONTEXT gcm; - - if (in_len + gcm_ctx->tag_len < in_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (max_out_len < in_len + gcm_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - const AES_KEY *key = &gcm_ctx->ks.ks; - - OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm)); - CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len); - - if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) { - return 0; - } - - if (gcm_ctx->ctr) { - if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, in, out, in_len, - gcm_ctx->ctr)) { - return 0; - } - } else { - if (!CRYPTO_gcm128_encrypt(&gcm, key, in, out, in_len)) { - return 0; - } - } - - CRYPTO_gcm128_tag(&gcm, out + in_len, gcm_ctx->tag_len); - *out_len = in_len + gcm_ctx->tag_len; - return 1; -} - -static int aead_aes_gcm_open(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state; - uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN]; - size_t plaintext_len; - GCM128_CONTEXT gcm; - - if (in_len < gcm_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - plaintext_len = in_len - gcm_ctx->tag_len; - - if (max_out_len < plaintext_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - const AES_KEY *key = &gcm_ctx->ks.ks; - - OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm)); - CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len); - - if (!CRYPTO_gcm128_aad(&gcm, ad, ad_len)) { - return 0; - } - - if (gcm_ctx->ctr) { - if (!CRYPTO_gcm128_decrypt_ctr32(&gcm, key, in, out, - in_len - gcm_ctx->tag_len, gcm_ctx->ctr)) { - return 0; - } - } else { - if (!CRYPTO_gcm128_decrypt(&gcm, key, in, out, in_len - gcm_ctx->tag_len)) { - return 0; - } - } - - CRYPTO_gcm128_tag(&gcm, tag, gcm_ctx->tag_len); - if (CRYPTO_memcmp(tag, in + plaintext_len, gcm_ctx->tag_len) != 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - *out_len = plaintext_len; - return 1; -} - -static const EVP_AEAD aead_aes_128_gcm = { - 16, /* key len */ - 12, /* nonce len */ - EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */ - aead_aes_gcm_init, - NULL, /* init_with_direction */ - aead_aes_gcm_cleanup, - aead_aes_gcm_seal, - aead_aes_gcm_open, - NULL, /* get_iv */ -}; - -static const EVP_AEAD aead_aes_256_gcm = { - 32, /* key len */ - 12, /* nonce len */ - EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */ - aead_aes_gcm_init, - NULL, /* init_with_direction */ - aead_aes_gcm_cleanup, - aead_aes_gcm_seal, - aead_aes_gcm_open, - NULL, /* get_iv */ -}; - -const EVP_AEAD *EVP_aead_aes_128_gcm(void) { return &aead_aes_128_gcm; } - -const EVP_AEAD *EVP_aead_aes_256_gcm(void) { return &aead_aes_256_gcm; } - - -#define EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN SHA256_DIGEST_LENGTH -#define EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN 12 - -struct aead_aes_ctr_hmac_sha256_ctx { - union { - double align; - AES_KEY ks; - } ks; - ctr128_f ctr; - block128_f block; - SHA256_CTX inner_init_state; - SHA256_CTX outer_init_state; - uint8_t tag_len; -}; - -static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer, - const uint8_t hmac_key[32]) { - static const size_t hmac_key_len = 32; - uint8_t block[SHA256_CBLOCK]; - OPENSSL_memcpy(block, hmac_key, hmac_key_len); - OPENSSL_memset(block + hmac_key_len, 0x36, sizeof(block) - hmac_key_len); - - unsigned i; - for (i = 0; i < hmac_key_len; i++) { - block[i] ^= 0x36; - } - - SHA256_Init(out_inner); - SHA256_Update(out_inner, block, sizeof(block)); - - OPENSSL_memset(block + hmac_key_len, 0x5c, sizeof(block) - hmac_key_len); - for (i = 0; i < hmac_key_len; i++) { - block[i] ^= (0x36 ^ 0x5c); - } - - SHA256_Init(out_outer); - SHA256_Update(out_outer, block, sizeof(block)); -} - -static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key, - size_t key_len, size_t tag_len) { - struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx; - static const size_t hmac_key_len = 32; - - if (key_len < hmac_key_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ - } - - const size_t aes_key_len = key_len - hmac_key_len; - if (aes_key_len != 16 && aes_key_len != 32) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ - } - - if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { - tag_len = EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN; - } - - if (tag_len > EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); - return 0; - } - - aes_ctx = OPENSSL_malloc(sizeof(struct aead_aes_ctr_hmac_sha256_ctx)); - if (aes_ctx == NULL) { - OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE); - return 0; - } - - aes_ctx->ctr = - aes_ctr_set_key(&aes_ctx->ks.ks, NULL, &aes_ctx->block, key, aes_key_len); - aes_ctx->tag_len = tag_len; - hmac_init(&aes_ctx->inner_init_state, &aes_ctx->outer_init_state, - key + aes_key_len); - - ctx->aead_state = aes_ctx; - - return 1; -} - -static void aead_aes_ctr_hmac_sha256_cleanup(EVP_AEAD_CTX *ctx) { - struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state; - OPENSSL_cleanse(aes_ctx, sizeof(struct aead_aes_ctr_hmac_sha256_ctx)); - OPENSSL_free(aes_ctx); -} - -static void hmac_update_uint64(SHA256_CTX *sha256, uint64_t value) { - unsigned i; - uint8_t bytes[8]; - - for (i = 0; i < sizeof(bytes); i++) { - bytes[i] = value & 0xff; - value >>= 8; - } - SHA256_Update(sha256, bytes, sizeof(bytes)); -} - -static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH], - const SHA256_CTX *inner_init_state, - const SHA256_CTX *outer_init_state, - const uint8_t *ad, size_t ad_len, - const uint8_t *nonce, const uint8_t *ciphertext, - size_t ciphertext_len) { - SHA256_CTX sha256; - OPENSSL_memcpy(&sha256, inner_init_state, sizeof(sha256)); - hmac_update_uint64(&sha256, ad_len); - hmac_update_uint64(&sha256, ciphertext_len); - SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); - SHA256_Update(&sha256, ad, ad_len); - - /* Pad with zeros to the end of the SHA-256 block. */ - const unsigned num_padding = - (SHA256_CBLOCK - ((sizeof(uint64_t)*2 + - EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) % - SHA256_CBLOCK)) % - SHA256_CBLOCK; - uint8_t padding[SHA256_CBLOCK]; - OPENSSL_memset(padding, 0, num_padding); - SHA256_Update(&sha256, padding, num_padding); - - SHA256_Update(&sha256, ciphertext, ciphertext_len); - - uint8_t inner_digest[SHA256_DIGEST_LENGTH]; - SHA256_Final(inner_digest, &sha256); - - OPENSSL_memcpy(&sha256, outer_init_state, sizeof(sha256)); - SHA256_Update(&sha256, inner_digest, sizeof(inner_digest)); - SHA256_Final(out, &sha256); -} - -static void aead_aes_ctr_hmac_sha256_crypt( - const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out, - const uint8_t *in, size_t len, const uint8_t *nonce) { - /* Since the AEAD operation is one-shot, keeping a buffer of unused keystream - * bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. */ - uint8_t partial_block_buffer[AES_BLOCK_SIZE]; - unsigned partial_block_offset = 0; - OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer)); - - uint8_t counter[AES_BLOCK_SIZE]; - OPENSSL_memcpy(counter, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); - OPENSSL_memset(counter + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN, 0, 4); - - if (aes_ctx->ctr) { - CRYPTO_ctr128_encrypt_ctr32(in, out, len, &aes_ctx->ks.ks, counter, - partial_block_buffer, &partial_block_offset, - aes_ctx->ctr); - } else { - CRYPTO_ctr128_encrypt(in, out, len, &aes_ctx->ks.ks, counter, - partial_block_buffer, &partial_block_offset, - aes_ctx->block); - } -} - -static int aead_aes_ctr_hmac_sha256_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state; - const uint64_t in_len_64 = in_len; - - if (in_len + aes_ctx->tag_len < in_len || - /* This input is so large it would overflow the 32-bit block counter. */ - in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (max_out_len < in_len + aes_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce); - - uint8_t hmac_result[SHA256_DIGEST_LENGTH]; - hmac_calculate(hmac_result, &aes_ctx->inner_init_state, - &aes_ctx->outer_init_state, ad, ad_len, nonce, out, in_len); - OPENSSL_memcpy(out + in_len, hmac_result, aes_ctx->tag_len); - *out_len = in_len + aes_ctx->tag_len; - - return 1; -} - -static int aead_aes_ctr_hmac_sha256_open(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state; - size_t plaintext_len; - - if (in_len < aes_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - plaintext_len = in_len - aes_ctx->tag_len; - - if (max_out_len < plaintext_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - uint8_t hmac_result[SHA256_DIGEST_LENGTH]; - hmac_calculate(hmac_result, &aes_ctx->inner_init_state, - &aes_ctx->outer_init_state, ad, ad_len, nonce, in, - plaintext_len); - if (CRYPTO_memcmp(hmac_result, in + plaintext_len, aes_ctx->tag_len) != 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, plaintext_len, nonce); - - *out_len = plaintext_len; - return 1; -} - -static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = { - 16 /* AES key */ + 32 /* HMAC key */, - 12, /* nonce length */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */ - - aead_aes_ctr_hmac_sha256_init, - NULL /* init_with_direction */, - aead_aes_ctr_hmac_sha256_cleanup, - aead_aes_ctr_hmac_sha256_seal, - aead_aes_ctr_hmac_sha256_open, - NULL /* get_iv */, -}; - -static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = { - 32 /* AES key */ + 32 /* HMAC key */, - 12, /* nonce length */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* overhead */ - EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, /* max tag length */ - - aead_aes_ctr_hmac_sha256_init, - NULL /* init_with_direction */, - aead_aes_ctr_hmac_sha256_cleanup, - aead_aes_ctr_hmac_sha256_seal, - aead_aes_ctr_hmac_sha256_open, - NULL /* get_iv */, -}; - -const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void) { - return &aead_aes_128_ctr_hmac_sha256; -} - -const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void) { - return &aead_aes_256_ctr_hmac_sha256; -} - -#if !defined(OPENSSL_SMALL) - -#define EVP_AEAD_AES_GCM_SIV_NONCE_LEN 12 -#define EVP_AEAD_AES_GCM_SIV_TAG_LEN 16 - -struct aead_aes_gcm_siv_ctx { - union { - double align; - AES_KEY ks; - } ks; - block128_f kgk_block; - unsigned is_256:1; -}; - -static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key, - size_t key_len, size_t tag_len) { - const size_t key_bits = key_len * 8; - - if (key_bits != 128 && key_bits != 256) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); - return 0; /* EVP_AEAD_CTX_init should catch this. */ - } - - if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { - tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; - } - - if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); - return 0; - } - - struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = - OPENSSL_malloc(sizeof(struct aead_aes_gcm_siv_ctx)); - if (gcm_siv_ctx == NULL) { - return 0; - } - OPENSSL_memset(gcm_siv_ctx, 0, sizeof(struct aead_aes_gcm_siv_ctx)); - - if (aesni_capable()) { - aesni_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks); - gcm_siv_ctx->kgk_block = (block128_f)aesni_encrypt; - } else if (hwaes_capable()) { - aes_hw_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks); - gcm_siv_ctx->kgk_block = (block128_f)aes_hw_encrypt; - } else if (vpaes_capable()) { - vpaes_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks); - gcm_siv_ctx->kgk_block = (block128_f)vpaes_encrypt; - } else { - AES_set_encrypt_key(key, key_len * 8, &gcm_siv_ctx->ks.ks); - gcm_siv_ctx->kgk_block = (block128_f)AES_encrypt; - } - - gcm_siv_ctx->is_256 = (key_len == 32); - ctx->aead_state = gcm_siv_ctx; - - return 1; -} - -static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) { - struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state; - OPENSSL_cleanse(gcm_siv_ctx, sizeof(struct aead_aes_gcm_siv_ctx)); - OPENSSL_free(gcm_siv_ctx); -} - -/* gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from - * |in| to |out|, using the block function |enc_block| with |key| in counter - * mode, starting at |initial_counter|. This differs from the traditional - * counter mode code in that the counter is handled little-endian, only the - * first four bytes are used and the GCM-SIV tweak to the final byte is - * applied. The |in| and |out| pointers may be equal but otherwise must not - * alias. */ -static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, - const uint8_t initial_counter[AES_BLOCK_SIZE], - block128_f enc_block, const AES_KEY *key) { - union { - uint32_t w[4]; - uint8_t c[16]; - } counter; - - OPENSSL_memcpy(counter.c, initial_counter, AES_BLOCK_SIZE); - counter.c[15] |= 0x80; - - for (size_t done = 0; done < in_len;) { - uint8_t keystream[AES_BLOCK_SIZE]; - enc_block(counter.c, keystream, key); - counter.w[0]++; - - size_t todo = AES_BLOCK_SIZE; - if (in_len - done < todo) { - todo = in_len - done; - } - - for (size_t i = 0; i < todo; i++) { - out[done + i] = keystream[i] ^ in[done + i]; - } - - done += todo; - } -} - -/* gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and - * AD. The result is written to |out_tag|. */ -static void gcm_siv_polyval( - uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, - size_t ad_len, const uint8_t auth_key[16], - const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { - struct polyval_ctx polyval_ctx; - CRYPTO_POLYVAL_init(&polyval_ctx, auth_key); - - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, ad, ad_len & ~15); - - uint8_t scratch[16]; - if (ad_len & 15) { - OPENSSL_memset(scratch, 0, sizeof(scratch)); - OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); - } - - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, in, in_len & ~15); - if (in_len & 15) { - OPENSSL_memset(scratch, 0, sizeof(scratch)); - OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15); - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); - } - - union { - uint8_t c[16]; - struct { - uint64_t ad; - uint64_t in; - } bitlens; - } length_block; - - length_block.bitlens.ad = ad_len * 8; - length_block.bitlens.in = in_len * 8; - CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block.c, - sizeof(length_block)); - - CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag); - for (size_t i = 0; i < EVP_AEAD_AES_GCM_SIV_NONCE_LEN; i++) { - out_tag[i] ^= nonce[i]; - } - out_tag[15] &= 0x7f; -} - -/* gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. */ -struct gcm_siv_record_keys { - uint8_t auth_key[16]; - union { - double align; - AES_KEY ks; - } enc_key; - block128_f enc_block; -}; - -/* gcm_siv_keys calculates the keys for a specific GCM-SIV record with the - * given nonce and writes them to |*out_keys|. */ -static void gcm_siv_keys( - const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx, - struct gcm_siv_record_keys *out_keys, - const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { - const AES_KEY *const key = &gcm_siv_ctx->ks.ks; - uint8_t key_material[(128 /* POLYVAL key */ + 256 /* max AES key */) / 8]; - const size_t blocks_needed = gcm_siv_ctx->is_256 ? 6 : 4; - - uint8_t counter[AES_BLOCK_SIZE]; - OPENSSL_memset(counter, 0, AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN); - OPENSSL_memcpy(counter + AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, - nonce, EVP_AEAD_AES_GCM_SIV_NONCE_LEN); - for (size_t i = 0; i < blocks_needed; i++) { - counter[0] = i; - - uint8_t ciphertext[AES_BLOCK_SIZE]; - gcm_siv_ctx->kgk_block(counter, ciphertext, key); - OPENSSL_memcpy(&key_material[i * 8], ciphertext, 8); - } - - OPENSSL_memcpy(out_keys->auth_key, key_material, 16); - aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block, - key_material + 16, gcm_siv_ctx->is_256 ? 32 : 16); -} - -static int aead_aes_gcm_siv_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state; - const uint64_t in_len_64 = in_len; - const uint64_t ad_len_64 = ad_len; - - if (in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN < in_len || - in_len_64 > (UINT64_C(1) << 36) || - ad_len_64 >= (UINT64_C(1) << 61)) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (max_out_len < in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - struct gcm_siv_record_keys keys; - gcm_siv_keys(gcm_siv_ctx, &keys, nonce); - - uint8_t tag[16]; - gcm_siv_polyval(tag, in, in_len, ad, ad_len, keys.auth_key, nonce); - keys.enc_block(tag, tag, &keys.enc_key.ks); - - gcm_siv_crypt(out, in, in_len, tag, keys.enc_block, &keys.enc_key.ks); - - OPENSSL_memcpy(&out[in_len], tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN); - *out_len = in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN; - - return 1; -} - -static int aead_aes_gcm_siv_open(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const uint64_t ad_len_64 = ad_len; - if (ad_len_64 >= (UINT64_C(1) << 61)) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - const uint64_t in_len_64 = in_len; - if (in_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN || - in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state; - const size_t plaintext_len = in_len - EVP_AEAD_AES_GCM_SIV_TAG_LEN; - - if (max_out_len < plaintext_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - struct gcm_siv_record_keys keys; - gcm_siv_keys(gcm_siv_ctx, &keys, nonce); - - gcm_siv_crypt(out, in, plaintext_len, &in[plaintext_len], keys.enc_block, - &keys.enc_key.ks); - - uint8_t expected_tag[EVP_AEAD_AES_GCM_SIV_TAG_LEN]; - gcm_siv_polyval(expected_tag, out, plaintext_len, ad, ad_len, keys.auth_key, - nonce); - keys.enc_block(expected_tag, expected_tag, &keys.enc_key.ks); - - if (CRYPTO_memcmp(expected_tag, &in[plaintext_len], sizeof(expected_tag)) != - 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - *out_len = plaintext_len; - return 1; -} - -static const EVP_AEAD aead_aes_128_gcm_siv = { - 16, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - - aead_aes_gcm_siv_init, - NULL /* init_with_direction */, - aead_aes_gcm_siv_cleanup, - aead_aes_gcm_siv_seal, - aead_aes_gcm_siv_open, - NULL /* get_iv */, -}; - -static const EVP_AEAD aead_aes_256_gcm_siv = { - 32, /* key length */ - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, /* nonce length */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* overhead */ - EVP_AEAD_AES_GCM_SIV_TAG_LEN, /* max tag length */ - - aead_aes_gcm_siv_init, - NULL /* init_with_direction */, - aead_aes_gcm_siv_cleanup, - aead_aes_gcm_siv_seal, - aead_aes_gcm_siv_open, - NULL /* get_iv */, -}; - -const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { - return &aead_aes_128_gcm_siv; -} - -const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { - return &aead_aes_256_gcm_siv; -} - -#endif /* !OPENSSL_SMALL */ - -int EVP_has_aes_hardware(void) { -#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) - return aesni_capable() && crypto_gcm_clmul_enabled(); -#elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) - return hwaes_capable() && CRYPTO_is_ARMv8_PMULL_capable(); -#else - return 0; -#endif -} diff --git a/Sources/BoringSSL/crypto/cipher/e_chacha20poly1305.c b/Sources/BoringSSL/crypto/cipher/e_chacha20poly1305.c deleted file mode 100644 index c6e81ab87..000000000 --- a/Sources/BoringSSL/crypto/cipher/e_chacha20poly1305.c +++ /dev/null @@ -1,276 +0,0 @@ -/* Copyright (c) 2014, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include "internal.h" -#include "../internal.h" - - -#define POLY1305_TAG_LEN 16 - -struct aead_chacha20_poly1305_ctx { - unsigned char key[32]; - unsigned char tag_len; -}; - -#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ - !defined(OPENSSL_WINDOWS) -static int asm_capable(void) { - const int sse41_capable = (OPENSSL_ia32cap_P[1] & (1 << 19)) != 0; - return sse41_capable; -} - -// chacha20_poly1305_open is defined in chacha20_poly1305_x86_64.pl. It -// decrypts |plaintext_len| bytes from |ciphertext| and writes them to -// |out_plaintext|. On entry, |aead_data| must contain the final 48 bytes of -// the initial ChaCha20 block, i.e. the key, followed by four zeros, followed -// by the nonce. On exit, it will contain the calculated tag value, which the -// caller must check. -extern void chacha20_poly1305_open(uint8_t *out_plaintext, - const uint8_t *ciphertext, - size_t plaintext_len, const uint8_t *ad, - size_t ad_len, uint8_t *aead_data); - -// chacha20_poly1305_open is defined in chacha20_poly1305_x86_64.pl. It -// encrypts |plaintext_len| bytes from |plaintext| and writes them to -// |out_ciphertext|. On entry, |aead_data| must contain the final 48 bytes of -// the initial ChaCha20 block, i.e. the key, followed by four zeros, followed -// by the nonce. On exit, it will contain the calculated tag value, which the -// caller must append to the ciphertext. -extern void chacha20_poly1305_seal(uint8_t *out_ciphertext, - const uint8_t *plaintext, - size_t plaintext_len, const uint8_t *ad, - size_t ad_len, uint8_t *aead_data); -#else -static int asm_capable(void) { - return 0; -} - - -static void chacha20_poly1305_open(uint8_t *out_plaintext, - const uint8_t *ciphertext, - size_t plaintext_len, const uint8_t *ad, - size_t ad_len, uint8_t *aead_data) {} - -static void chacha20_poly1305_seal(uint8_t *out_ciphertext, - const uint8_t *plaintext, - size_t plaintext_len, const uint8_t *ad, - size_t ad_len, uint8_t *aead_data) {} -#endif - -static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key, - size_t key_len, size_t tag_len) { - struct aead_chacha20_poly1305_ctx *c20_ctx; - - if (tag_len == 0) { - tag_len = POLY1305_TAG_LEN; - } - - if (tag_len > POLY1305_TAG_LEN) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (key_len != sizeof(c20_ctx->key)) { - return 0; /* internal error - EVP_AEAD_CTX_init should catch this. */ - } - - c20_ctx = OPENSSL_malloc(sizeof(struct aead_chacha20_poly1305_ctx)); - if (c20_ctx == NULL) { - return 0; - } - - OPENSSL_memcpy(c20_ctx->key, key, key_len); - c20_ctx->tag_len = tag_len; - ctx->aead_state = c20_ctx; - - return 1; -} - -static void aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx) { - struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; - OPENSSL_cleanse(c20_ctx->key, sizeof(c20_ctx->key)); - OPENSSL_free(c20_ctx); -} - -static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) { - uint8_t length_bytes[8]; - - for (unsigned i = 0; i < sizeof(length_bytes); i++) { - length_bytes[i] = data_len; - data_len >>= 8; - } - - CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes)); -} - -static void poly1305_update_padded_16(poly1305_state *poly1305, - const uint8_t *data, size_t data_len) { - static const uint8_t padding[16] = { 0 }; /* Padding is all zeros. */ - - CRYPTO_poly1305_update(poly1305, data, data_len); - if (data_len % 16 != 0) { - CRYPTO_poly1305_update(poly1305, padding, - sizeof(padding) - (data_len % 16)); - } -} - -/* calc_tag fills |tag| with the authentication tag for the given inputs. */ -static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], - const struct aead_chacha20_poly1305_ctx *c20_ctx, - const uint8_t nonce[12], const uint8_t *ad, size_t ad_len, - const uint8_t *ciphertext, size_t ciphertext_len) { - alignas(16) uint8_t poly1305_key[32]; - OPENSSL_memset(poly1305_key, 0, sizeof(poly1305_key)); - CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), - c20_ctx->key, nonce, 0); - - poly1305_state ctx; - CRYPTO_poly1305_init(&ctx, poly1305_key); - poly1305_update_padded_16(&ctx, ad, ad_len); - poly1305_update_padded_16(&ctx, ciphertext, ciphertext_len); - poly1305_update_length(&ctx, ad_len); - poly1305_update_length(&ctx, ciphertext_len); - CRYPTO_poly1305_finish(&ctx, tag); -} - -static int aead_chacha20_poly1305_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; - const uint64_t in_len_64 = in_len; - - if (nonce_len != 12) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - /* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow - * individual operations that work on more than 256GB at a time. - * |in_len_64| is needed because, on 32-bit platforms, size_t is only - * 32-bits and this produces a warning because it's always false. - * Casting to uint64_t inside the conditional is not sufficient to stop - * the warning. */ - if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (in_len + c20_ctx->tag_len < in_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - if (max_out_len < in_len + c20_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); - return 0; - } - - alignas(16) uint8_t tag[48]; - - if (asm_capable()) { - OPENSSL_memcpy(tag, c20_ctx->key, 32); - OPENSSL_memset(tag + 32, 0, 4); - OPENSSL_memcpy(tag + 32 + 4, nonce, 12); - chacha20_poly1305_seal(out, in, in_len, ad, ad_len, tag); - } else { - CRYPTO_chacha_20(out, in, in_len, c20_ctx->key, nonce, 1); - calc_tag(tag, c20_ctx, nonce, ad, ad_len, out, in_len); - } - - OPENSSL_memcpy(out + in_len, tag, c20_ctx->tag_len); - *out_len = in_len + c20_ctx->tag_len; - return 1; -} - -static int aead_chacha20_poly1305_open(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { - const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; - size_t plaintext_len; - const uint64_t in_len_64 = in_len; - - if (nonce_len != 12) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); - return 0; - } - - if (in_len < c20_ctx->tag_len) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - /* |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow - * individual operations that work on more than 256GB at a time. - * |in_len_64| is needed because, on 32-bit platforms, size_t is only - * 32-bits and this produces a warning because it's always false. - * Casting to uint64_t inside the conditional is not sufficient to stop - * the warning. */ - if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); - return 0; - } - - plaintext_len = in_len - c20_ctx->tag_len; - alignas(16) uint8_t tag[48]; - - if (asm_capable()) { - OPENSSL_memcpy(tag, c20_ctx->key, 32); - OPENSSL_memset(tag + 32, 0, 4); - OPENSSL_memcpy(tag + 32 + 4, nonce, 12); - chacha20_poly1305_open(out, in, plaintext_len, ad, ad_len, tag); - } else { - calc_tag(tag, c20_ctx, nonce, ad, ad_len, in, plaintext_len); - CRYPTO_chacha_20(out, in, plaintext_len, c20_ctx->key, nonce, 1); - } - - if (CRYPTO_memcmp(tag, in + plaintext_len, c20_ctx->tag_len) != 0) { - OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); - return 0; - } - - *out_len = plaintext_len; - return 1; -} - -static const EVP_AEAD aead_chacha20_poly1305 = { - 32, /* key len */ - 12, /* nonce len */ - POLY1305_TAG_LEN, /* overhead */ - POLY1305_TAG_LEN, /* max tag length */ - aead_chacha20_poly1305_init, - NULL, /* init_with_direction */ - aead_chacha20_poly1305_cleanup, - aead_chacha20_poly1305_seal, - aead_chacha20_poly1305_open, - NULL, /* get_iv */ -}; - -const EVP_AEAD *EVP_aead_chacha20_poly1305(void) { - return &aead_chacha20_poly1305; -} diff --git a/Sources/BoringSSL/crypto/cipher/tls_cbc.c b/Sources/BoringSSL/crypto/cipher/tls_cbc.c deleted file mode 100644 index 52880b0c6..000000000 --- a/Sources/BoringSSL/crypto/cipher/tls_cbc.c +++ /dev/null @@ -1,482 +0,0 @@ -/* ==================================================================== - * Copyright (c) 2012 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). */ - -#include -#include - -#include -#include -#include - -#include "../internal.h" -#include "internal.h" - - -/* TODO(davidben): unsigned should be size_t. The various constant_time - * functions need to be switched to size_t. */ - -/* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length - * field. (SHA-384/512 have 128-bit length.) */ -#define MAX_HASH_BIT_COUNT_BYTES 16 - -/* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. - * Currently SHA-384/512 has a 128-byte block size and that's the largest - * supported by TLS.) */ -#define MAX_HASH_BLOCK_SIZE 128 - -int EVP_tls_cbc_remove_padding(unsigned *out_padding_ok, unsigned *out_len, - const uint8_t *in, unsigned in_len, - unsigned block_size, unsigned mac_size) { - unsigned padding_length, good, to_check, i; - const unsigned overhead = 1 /* padding length byte */ + mac_size; - - /* These lengths are all public so we can test them in non-constant time. */ - if (overhead > in_len) { - return 0; - } - - padding_length = in[in_len - 1]; - - good = constant_time_ge(in_len, overhead + padding_length); - /* The padding consists of a length byte at the end of the record and - * then that many bytes of padding, all with the same value as the - * length byte. Thus, with the length byte included, there are i+1 - * bytes of padding. - * - * We can't check just |padding_length+1| bytes because that leaks - * decrypted information. Therefore we always have to check the maximum - * amount of padding possible. (Again, the length of the record is - * public information so we can use it.) */ - to_check = 256; /* maximum amount of padding, inc length byte. */ - if (to_check > in_len) { - to_check = in_len; - } - - for (i = 0; i < to_check; i++) { - uint8_t mask = constant_time_ge_8(padding_length, i); - uint8_t b = in[in_len - 1 - i]; - /* The final |padding_length+1| bytes should all have the value - * |padding_length|. Therefore the XOR should be zero. */ - good &= ~(mask & (padding_length ^ b)); - } - - /* If any of the final |padding_length+1| bytes had the wrong value, - * one or more of the lower eight bits of |good| will be cleared. */ - good = constant_time_eq(0xff, good & 0xff); - - /* Always treat |padding_length| as zero on error. If, assuming block size of - * 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 - * and returned -1, distinguishing good MAC and bad padding from bad MAC and - * bad padding would give POODLE's padding oracle. */ - padding_length = good & (padding_length + 1); - *out_len = in_len - padding_length; - *out_padding_ok = good; - return 1; -} - -void EVP_tls_cbc_copy_mac(uint8_t *out, unsigned md_size, - const uint8_t *in, unsigned in_len, - unsigned orig_len) { - uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE]; - uint8_t *rotated_mac = rotated_mac1; - uint8_t *rotated_mac_tmp = rotated_mac2; - - /* mac_end is the index of |in| just after the end of the MAC. */ - unsigned mac_end = in_len; - unsigned mac_start = mac_end - md_size; - - assert(orig_len >= in_len); - assert(in_len >= md_size); - assert(md_size <= EVP_MAX_MD_SIZE); - - /* scan_start contains the number of bytes that we can ignore because - * the MAC's position can only vary by 255 bytes. */ - unsigned scan_start = 0; - /* This information is public so it's safe to branch based on it. */ - if (orig_len > md_size + 255 + 1) { - scan_start = orig_len - (md_size + 255 + 1); - } - - unsigned rotate_offset = 0; - uint8_t mac_started = 0; - OPENSSL_memset(rotated_mac, 0, md_size); - for (unsigned i = scan_start, j = 0; i < orig_len; i++, j++) { - if (j >= md_size) { - j -= md_size; - } - unsigned is_mac_start = constant_time_eq(i, mac_start); - mac_started |= is_mac_start; - uint8_t mac_ended = constant_time_ge_8(i, mac_end); - rotated_mac[j] |= in[i] & mac_started & ~mac_ended; - /* Save the offset that |mac_start| is mapped to. */ - rotate_offset |= j & is_mac_start; - } - - /* Now rotate the MAC. We rotate in log(md_size) steps, one for each bit - * position. */ - for (unsigned offset = 1; offset < md_size; - offset <<= 1, rotate_offset >>= 1) { - /* Rotate by |offset| iff the corresponding bit is set in - * |rotate_offset|, placing the result in |rotated_mac_tmp|. */ - const uint8_t skip_rotate = (rotate_offset & 1) - 1; - for (unsigned i = 0, j = offset; i < md_size; i++, j++) { - if (j >= md_size) { - j -= md_size; - } - rotated_mac_tmp[i] = - constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); - } - - /* Swap pointers so |rotated_mac| contains the (possibly) rotated value. - * Note the number of iterations and thus the identity of these pointers is - * public information. */ - uint8_t *tmp = rotated_mac; - rotated_mac = rotated_mac_tmp; - rotated_mac_tmp = tmp; - } - - OPENSSL_memcpy(out, rotated_mac, md_size); -} - -/* u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in - * big-endian order. The value of p is advanced by four. */ -#define u32toBE(n, p) \ - do { \ - *((p)++) = (uint8_t)((n) >> 24); \ - *((p)++) = (uint8_t)((n) >> 16); \ - *((p)++) = (uint8_t)((n) >> 8); \ - *((p)++) = (uint8_t)((n)); \ - } while (0) - -/* u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in - * big-endian order. The value of p is advanced by eight. */ -#define u64toBE(n, p) \ - do { \ - *((p)++) = (uint8_t)((n) >> 56); \ - *((p)++) = (uint8_t)((n) >> 48); \ - *((p)++) = (uint8_t)((n) >> 40); \ - *((p)++) = (uint8_t)((n) >> 32); \ - *((p)++) = (uint8_t)((n) >> 24); \ - *((p)++) = (uint8_t)((n) >> 16); \ - *((p)++) = (uint8_t)((n) >> 8); \ - *((p)++) = (uint8_t)((n)); \ - } while (0) - -/* These functions serialize the state of a hash and thus perform the standard - * "final" operation without adding the padding and length that such a function - * typically does. */ -static void tls1_sha1_final_raw(void *ctx, uint8_t *md_out) { - SHA_CTX *sha1 = ctx; - u32toBE(sha1->h[0], md_out); - u32toBE(sha1->h[1], md_out); - u32toBE(sha1->h[2], md_out); - u32toBE(sha1->h[3], md_out); - u32toBE(sha1->h[4], md_out); -} -#define LARGEST_DIGEST_CTX SHA_CTX - -static void tls1_sha256_final_raw(void *ctx, uint8_t *md_out) { - SHA256_CTX *sha256 = ctx; - unsigned i; - - for (i = 0; i < 8; i++) { - u32toBE(sha256->h[i], md_out); - } -} -#undef LARGEST_DIGEST_CTX -#define LARGEST_DIGEST_CTX SHA256_CTX - -static void tls1_sha512_final_raw(void *ctx, uint8_t *md_out) { - SHA512_CTX *sha512 = ctx; - unsigned i; - - for (i = 0; i < 8; i++) { - u64toBE(sha512->h[i], md_out); - } -} -#undef LARGEST_DIGEST_CTX -#define LARGEST_DIGEST_CTX SHA512_CTX - -int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) { - switch (EVP_MD_type(md)) { - case NID_sha1: - case NID_sha256: - case NID_sha384: - return 1; - - default: - return 0; - } -} - -int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, - size_t *md_out_size, const uint8_t header[13], - const uint8_t *data, size_t data_plus_mac_size, - size_t data_plus_mac_plus_padding_size, - const uint8_t *mac_secret, - unsigned mac_secret_length) { - union { - double align; - uint8_t c[sizeof(LARGEST_DIGEST_CTX)]; - } md_state; - void (*md_final_raw)(void *ctx, uint8_t *md_out); - void (*md_transform)(void *ctx, const uint8_t *block); - unsigned md_size, md_block_size = 64; - unsigned len, max_mac_bytes, num_blocks, num_starting_blocks, k, - mac_end_offset, c, index_a, index_b; - unsigned int bits; /* at most 18 bits */ - uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; - /* hmac_pad is the masked HMAC key. */ - uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; - uint8_t first_block[MAX_HASH_BLOCK_SIZE]; - uint8_t mac_out[EVP_MAX_MD_SIZE]; - unsigned i, j, md_out_size_u; - EVP_MD_CTX md_ctx; - /* mdLengthSize is the number of bytes in the length field that terminates - * the hash. */ - unsigned md_length_size = 8; - - /* This is a, hopefully redundant, check that allows us to forget about - * many possible overflows later in this function. */ - assert(data_plus_mac_plus_padding_size < 1024 * 1024); - - switch (EVP_MD_type(md)) { - case NID_sha1: - SHA1_Init((SHA_CTX *)md_state.c); - md_final_raw = tls1_sha1_final_raw; - md_transform = - (void (*)(void *ctx, const uint8_t *block))SHA1_Transform; - md_size = 20; - break; - - case NID_sha256: - SHA256_Init((SHA256_CTX *)md_state.c); - md_final_raw = tls1_sha256_final_raw; - md_transform = - (void (*)(void *ctx, const uint8_t *block))SHA256_Transform; - md_size = 32; - break; - - case NID_sha384: - SHA384_Init((SHA512_CTX *)md_state.c); - md_final_raw = tls1_sha512_final_raw; - md_transform = - (void (*)(void *ctx, const uint8_t *block))SHA512_Transform; - md_size = 384 / 8; - md_block_size = 128; - md_length_size = 16; - break; - - default: - /* EVP_tls_cbc_record_digest_supported should have been called first to - * check that the hash function is supported. */ - assert(0); - *md_out_size = 0; - return 0; - } - - assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); - assert(md_block_size <= MAX_HASH_BLOCK_SIZE); - assert(md_size <= EVP_MAX_MD_SIZE); - - static const unsigned kHeaderLength = 13; - - /* kVarianceBlocks is the number of blocks of the hash that we have to - * calculate in constant time because they could be altered by the - * padding value. - * - * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not - * required to be minimal. Therefore we say that the final six blocks - * can vary based on the padding. */ - static const unsigned kVarianceBlocks = 6; - - /* From now on we're dealing with the MAC, which conceptually has 13 - * bytes of `header' before the start of the data. */ - len = data_plus_mac_plus_padding_size + kHeaderLength; - /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including - * |header|, assuming that there's no padding. */ - max_mac_bytes = len - md_size - 1; - /* num_blocks is the maximum number of hash blocks. */ - num_blocks = - (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; - /* In order to calculate the MAC in constant time we have to handle - * the final blocks specially because the padding value could cause the - * end to appear somewhere in the final |kVarianceBlocks| blocks and we - * can't leak where. However, |num_starting_blocks| worth of data can - * be hashed right away because no padding value can affect whether - * they are plaintext. */ - num_starting_blocks = 0; - /* k is the starting byte offset into the conceptual header||data where - * we start processing. */ - k = 0; - /* mac_end_offset is the index just past the end of the data to be - * MACed. */ - mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; - /* c is the index of the 0x80 byte in the final hash block that - * contains application data. */ - c = mac_end_offset % md_block_size; - /* index_a is the hash block number that contains the 0x80 terminating - * value. */ - index_a = mac_end_offset / md_block_size; - /* index_b is the hash block number that contains the 64-bit hash - * length, in bits. */ - index_b = (mac_end_offset + md_length_size) / md_block_size; - /* bits is the hash-length in bits. It includes the additional hash - * block for the masked HMAC key. */ - - if (num_blocks > kVarianceBlocks) { - num_starting_blocks = num_blocks - kVarianceBlocks; - k = md_block_size * num_starting_blocks; - } - - bits = 8 * mac_end_offset; - - /* Compute the initial HMAC block. */ - bits += 8 * md_block_size; - OPENSSL_memset(hmac_pad, 0, md_block_size); - assert(mac_secret_length <= sizeof(hmac_pad)); - OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); - for (i = 0; i < md_block_size; i++) { - hmac_pad[i] ^= 0x36; - } - - md_transform(md_state.c, hmac_pad); - - OPENSSL_memset(length_bytes, 0, md_length_size - 4); - length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); - length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16); - length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8); - length_bytes[md_length_size - 1] = (uint8_t)bits; - - if (k > 0) { - /* k is a multiple of md_block_size. */ - OPENSSL_memcpy(first_block, header, 13); - OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); - md_transform(md_state.c, first_block); - for (i = 1; i < k / md_block_size; i++) { - md_transform(md_state.c, data + md_block_size * i - 13); - } - } - - OPENSSL_memset(mac_out, 0, sizeof(mac_out)); - - /* We now process the final hash blocks. For each block, we construct - * it in constant time. If the |i==index_a| then we'll include the 0x80 - * bytes and zero pad etc. For each block we selectively copy it, in - * constant time, to |mac_out|. */ - for (i = num_starting_blocks; i <= num_starting_blocks + kVarianceBlocks; - i++) { - uint8_t block[MAX_HASH_BLOCK_SIZE]; - uint8_t is_block_a = constant_time_eq_8(i, index_a); - uint8_t is_block_b = constant_time_eq_8(i, index_b); - for (j = 0; j < md_block_size; j++) { - uint8_t b = 0, is_past_c, is_past_cp1; - if (k < kHeaderLength) { - b = header[k]; - } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) { - b = data[k - kHeaderLength]; - } - k++; - - is_past_c = is_block_a & constant_time_ge_8(j, c); - is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); - /* If this is the block containing the end of the - * application data, and we are at the offset for the - * 0x80 value, then overwrite b with 0x80. */ - b = constant_time_select_8(is_past_c, 0x80, b); - /* If this the the block containing the end of the - * application data and we're past the 0x80 value then - * just write zero. */ - b = b & ~is_past_cp1; - /* If this is index_b (the final block), but not - * index_a (the end of the data), then the 64-bit - * length didn't fit into index_a and we're having to - * add an extra block of zeros. */ - b &= ~is_block_b | is_block_a; - - /* The final bytes of one of the blocks contains the - * length. */ - if (j >= md_block_size - md_length_size) { - /* If this is index_b, write a length byte. */ - b = constant_time_select_8( - is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); - } - block[j] = b; - } - - md_transform(md_state.c, block); - md_final_raw(md_state.c, block); - /* If this is index_b, copy the hash value to |mac_out|. */ - for (j = 0; j < md_size; j++) { - mac_out[j] |= block[j] & is_block_b; - } - } - - EVP_MD_CTX_init(&md_ctx); - if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) { - EVP_MD_CTX_cleanup(&md_ctx); - return 0; - } - - /* Complete the HMAC in the standard manner. */ - for (i = 0; i < md_block_size; i++) { - hmac_pad[i] ^= 0x6a; - } - - EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); - EVP_DigestUpdate(&md_ctx, mac_out, md_size); - EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); - *md_out_size = md_out_size_u; - EVP_MD_CTX_cleanup(&md_ctx); - - return 1; -} diff --git a/Sources/BoringSSL/crypto/asn1/t_bitst.c b/Sources/BoringSSL/crypto/cipher_extra/cipher_extra.c similarity index 67% rename from Sources/BoringSSL/crypto/asn1/t_bitst.c rename to Sources/BoringSSL/crypto/cipher_extra/cipher_extra.c index e754ca73a..fc8e24b65 100644 --- a/Sources/BoringSSL/crypto/asn1/t_bitst.c +++ b/Sources/BoringSSL/crypto/cipher_extra/cipher_extra.c @@ -54,50 +54,61 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -#include +#include +#include #include +#include #include +#include + +#include "internal.h" +#include "../internal.h" -int ASN1_BIT_STRING_name_print(BIO *out, ASN1_BIT_STRING *bs, - BIT_STRING_BITNAME *tbl, int indent) -{ - BIT_STRING_BITNAME *bnam; - char first = 1; - BIO_printf(out, "%*s", indent, ""); - for (bnam = tbl; bnam->lname; bnam++) { - if (ASN1_BIT_STRING_get_bit(bs, bnam->bitnum)) { - if (!first) - BIO_puts(out, ", "); - BIO_puts(out, bnam->lname); - first = 0; - } - } - BIO_puts(out, "\n"); - return 1; -} -int ASN1_BIT_STRING_set_asc(ASN1_BIT_STRING *bs, char *name, int value, - BIT_STRING_BITNAME *tbl) -{ - int bitnum; - bitnum = ASN1_BIT_STRING_num_asc(name, tbl); - if (bitnum < 0) - return 0; - if (bs) { - if (!ASN1_BIT_STRING_set_bit(bs, bitnum, value)) - return 0; - } - return 1; +const EVP_CIPHER *EVP_get_cipherbynid(int nid) { + switch (nid) { + case NID_rc2_cbc: + return EVP_rc2_cbc(); + case NID_rc2_40_cbc: + return EVP_rc2_40_cbc(); + case NID_des_ede3_cbc: + return EVP_des_ede3_cbc(); + case NID_des_ede_cbc: + return EVP_des_cbc(); + case NID_aes_128_cbc: + return EVP_aes_128_cbc(); + case NID_aes_192_cbc: + return EVP_aes_192_cbc(); + case NID_aes_256_cbc: + return EVP_aes_256_cbc(); + default: + return NULL; + } } -int ASN1_BIT_STRING_num_asc(char *name, BIT_STRING_BITNAME *tbl) -{ - BIT_STRING_BITNAME *bnam; - for (bnam = tbl; bnam->lname; bnam++) { - if (!strcmp(bnam->sname, name) || !strcmp(bnam->lname, name)) - return bnam->bitnum; - } - return -1; +const EVP_CIPHER *EVP_get_cipherbyname(const char *name) { + if (OPENSSL_strcasecmp(name, "rc4") == 0) { + return EVP_rc4(); + } else if (OPENSSL_strcasecmp(name, "des-cbc") == 0) { + return EVP_des_cbc(); + } else if (OPENSSL_strcasecmp(name, "des-ede3-cbc") == 0 || + OPENSSL_strcasecmp(name, "3des") == 0) { + return EVP_des_ede3_cbc(); + } else if (OPENSSL_strcasecmp(name, "aes-128-cbc") == 0) { + return EVP_aes_128_cbc(); + } else if (OPENSSL_strcasecmp(name, "aes-256-cbc") == 0) { + return EVP_aes_256_cbc(); + } else if (OPENSSL_strcasecmp(name, "aes-128-ctr") == 0) { + return EVP_aes_128_ctr(); + } else if (OPENSSL_strcasecmp(name, "aes-256-ctr") == 0) { + return EVP_aes_256_ctr(); + } else if (OPENSSL_strcasecmp(name, "aes-128-ecb") == 0) { + return EVP_aes_128_ecb(); + } else if (OPENSSL_strcasecmp(name, "aes-256-ecb") == 0) { + return EVP_aes_256_ecb(); + } + + return NULL; } diff --git a/Sources/BoringSSL/crypto/cipher/derive_key.c b/Sources/BoringSSL/crypto/cipher_extra/derive_key.c similarity index 99% rename from Sources/BoringSSL/crypto/cipher/derive_key.c rename to Sources/BoringSSL/crypto/cipher_extra/derive_key.c index 9e1634ab9..ff5ae0649 100644 --- a/Sources/BoringSSL/crypto/cipher/derive_key.c +++ b/Sources/BoringSSL/crypto/cipher_extra/derive_key.c @@ -61,8 +61,6 @@ #include #include -#include "internal.h" - #define PKCS5_SALT_LEN 8 diff --git a/Sources/BoringSSL/crypto/cipher_extra/e_aesctrhmac.c b/Sources/BoringSSL/crypto/cipher_extra/e_aesctrhmac.c new file mode 100644 index 000000000..3a0de9b15 --- /dev/null +++ b/Sources/BoringSSL/crypto/cipher_extra/e_aesctrhmac.c @@ -0,0 +1,281 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include +#include +#include +#include +#include + +#include "../fipsmodule/cipher/internal.h" + + +#define EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN SHA256_DIGEST_LENGTH +#define EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN 12 + +struct aead_aes_ctr_hmac_sha256_ctx { + union { + double align; + AES_KEY ks; + } ks; + ctr128_f ctr; + block128_f block; + SHA256_CTX inner_init_state; + SHA256_CTX outer_init_state; +}; + +static void hmac_init(SHA256_CTX *out_inner, SHA256_CTX *out_outer, + const uint8_t hmac_key[32]) { + static const size_t hmac_key_len = 32; + uint8_t block[SHA256_CBLOCK]; + OPENSSL_memcpy(block, hmac_key, hmac_key_len); + OPENSSL_memset(block + hmac_key_len, 0x36, sizeof(block) - hmac_key_len); + + unsigned i; + for (i = 0; i < hmac_key_len; i++) { + block[i] ^= 0x36; + } + + SHA256_Init(out_inner); + SHA256_Update(out_inner, block, sizeof(block)); + + OPENSSL_memset(block + hmac_key_len, 0x5c, sizeof(block) - hmac_key_len); + for (i = 0; i < hmac_key_len; i++) { + block[i] ^= (0x36 ^ 0x5c); + } + + SHA256_Init(out_outer); + SHA256_Update(out_outer, block, sizeof(block)); +} + +static int aead_aes_ctr_hmac_sha256_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t tag_len) { + struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx; + static const size_t hmac_key_len = 32; + + if (key_len < hmac_key_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); + return 0; // EVP_AEAD_CTX_init should catch this. + } + + const size_t aes_key_len = key_len - hmac_key_len; + if (aes_key_len != 16 && aes_key_len != 32) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); + return 0; // EVP_AEAD_CTX_init should catch this. + } + + if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { + tag_len = EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN; + } + + if (tag_len > EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); + return 0; + } + + aes_ctx = OPENSSL_malloc(sizeof(struct aead_aes_ctr_hmac_sha256_ctx)); + if (aes_ctx == NULL) { + OPENSSL_PUT_ERROR(CIPHER, ERR_R_MALLOC_FAILURE); + return 0; + } + + aes_ctx->ctr = + aes_ctr_set_key(&aes_ctx->ks.ks, NULL, &aes_ctx->block, key, aes_key_len); + ctx->tag_len = tag_len; + hmac_init(&aes_ctx->inner_init_state, &aes_ctx->outer_init_state, + key + aes_key_len); + + ctx->aead_state = aes_ctx; + + return 1; +} + +static void aead_aes_ctr_hmac_sha256_cleanup(EVP_AEAD_CTX *ctx) { + OPENSSL_free(ctx->aead_state); +} + +static void hmac_update_uint64(SHA256_CTX *sha256, uint64_t value) { + unsigned i; + uint8_t bytes[8]; + + for (i = 0; i < sizeof(bytes); i++) { + bytes[i] = value & 0xff; + value >>= 8; + } + SHA256_Update(sha256, bytes, sizeof(bytes)); +} + +static void hmac_calculate(uint8_t out[SHA256_DIGEST_LENGTH], + const SHA256_CTX *inner_init_state, + const SHA256_CTX *outer_init_state, + const uint8_t *ad, size_t ad_len, + const uint8_t *nonce, const uint8_t *ciphertext, + size_t ciphertext_len) { + SHA256_CTX sha256; + OPENSSL_memcpy(&sha256, inner_init_state, sizeof(sha256)); + hmac_update_uint64(&sha256, ad_len); + hmac_update_uint64(&sha256, ciphertext_len); + SHA256_Update(&sha256, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); + SHA256_Update(&sha256, ad, ad_len); + + // Pad with zeros to the end of the SHA-256 block. + const unsigned num_padding = + (SHA256_CBLOCK - ((sizeof(uint64_t)*2 + + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN + ad_len) % + SHA256_CBLOCK)) % + SHA256_CBLOCK; + uint8_t padding[SHA256_CBLOCK]; + OPENSSL_memset(padding, 0, num_padding); + SHA256_Update(&sha256, padding, num_padding); + + SHA256_Update(&sha256, ciphertext, ciphertext_len); + + uint8_t inner_digest[SHA256_DIGEST_LENGTH]; + SHA256_Final(inner_digest, &sha256); + + OPENSSL_memcpy(&sha256, outer_init_state, sizeof(sha256)); + SHA256_Update(&sha256, inner_digest, sizeof(inner_digest)); + SHA256_Final(out, &sha256); +} + +static void aead_aes_ctr_hmac_sha256_crypt( + const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx, uint8_t *out, + const uint8_t *in, size_t len, const uint8_t *nonce) { + // Since the AEAD operation is one-shot, keeping a buffer of unused keystream + // bytes is pointless. However, |CRYPTO_ctr128_encrypt| requires it. + uint8_t partial_block_buffer[AES_BLOCK_SIZE]; + unsigned partial_block_offset = 0; + OPENSSL_memset(partial_block_buffer, 0, sizeof(partial_block_buffer)); + + uint8_t counter[AES_BLOCK_SIZE]; + OPENSSL_memcpy(counter, nonce, EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN); + OPENSSL_memset(counter + EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN, 0, 4); + + if (aes_ctx->ctr) { + CRYPTO_ctr128_encrypt_ctr32(in, out, len, &aes_ctx->ks.ks, counter, + partial_block_buffer, &partial_block_offset, + aes_ctx->ctr); + } else { + CRYPTO_ctr128_encrypt(in, out, len, &aes_ctx->ks.ks, counter, + partial_block_buffer, &partial_block_offset, + aes_ctx->block); + } +} + +static int aead_aes_ctr_hmac_sha256_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state; + const uint64_t in_len_64 = in_len; + + if (in_len_64 >= (UINT64_C(1) << 32) * AES_BLOCK_SIZE) { + // This input is so large it would overflow the 32-bit block counter. + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + if (max_out_tag_len < ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce); + + uint8_t hmac_result[SHA256_DIGEST_LENGTH]; + hmac_calculate(hmac_result, &aes_ctx->inner_init_state, + &aes_ctx->outer_init_state, ad, ad_len, nonce, out, in_len); + OPENSSL_memcpy(out_tag, hmac_result, ctx->tag_len); + *out_tag_len = ctx->tag_len; + + return 1; +} + +static int aead_aes_ctr_hmac_sha256_open_gather( + const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, + size_t in_tag_len, const uint8_t *ad, size_t ad_len) { + const struct aead_aes_ctr_hmac_sha256_ctx *aes_ctx = ctx->aead_state; + + if (in_tag_len != ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + if (nonce_len != EVP_AEAD_AES_CTR_HMAC_SHA256_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + uint8_t hmac_result[SHA256_DIGEST_LENGTH]; + hmac_calculate(hmac_result, &aes_ctx->inner_init_state, + &aes_ctx->outer_init_state, ad, ad_len, nonce, in, + in_len); + if (CRYPTO_memcmp(hmac_result, in_tag, ctx->tag_len) != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + aead_aes_ctr_hmac_sha256_crypt(aes_ctx, out, in, in_len, nonce); + + return 1; +} + +static const EVP_AEAD aead_aes_128_ctr_hmac_sha256 = { + 16 /* AES key */ + 32 /* HMAC key */, + 12, // nonce length + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_ctr_hmac_sha256_init, + NULL /* init_with_direction */, + aead_aes_ctr_hmac_sha256_cleanup, + NULL /* open */, + aead_aes_ctr_hmac_sha256_seal_scatter, + aead_aes_ctr_hmac_sha256_open_gather, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +static const EVP_AEAD aead_aes_256_ctr_hmac_sha256 = { + 32 /* AES key */ + 32 /* HMAC key */, + 12, // nonce length + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // overhead + EVP_AEAD_AES_CTR_HMAC_SHA256_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_ctr_hmac_sha256_init, + NULL /* init_with_direction */, + aead_aes_ctr_hmac_sha256_cleanup, + NULL /* open */, + aead_aes_ctr_hmac_sha256_seal_scatter, + aead_aes_ctr_hmac_sha256_open_gather, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void) { + return &aead_aes_128_ctr_hmac_sha256; +} + +const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void) { + return &aead_aes_256_ctr_hmac_sha256; +} diff --git a/Sources/BoringSSL/crypto/cipher_extra/e_aesgcmsiv.c b/Sources/BoringSSL/crypto/cipher_extra/e_aesgcmsiv.c new file mode 100644 index 000000000..9de23003b --- /dev/null +++ b/Sources/BoringSSL/crypto/cipher_extra/e_aesgcmsiv.c @@ -0,0 +1,867 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include +#include +#include +#include + +#include "../fipsmodule/cipher/internal.h" + + +#define EVP_AEAD_AES_GCM_SIV_NONCE_LEN 12 +#define EVP_AEAD_AES_GCM_SIV_TAG_LEN 16 + +#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) + +// Optimised AES-GCM-SIV + +struct aead_aes_gcm_siv_asm_ctx { + alignas(16) uint8_t key[16*15]; + int is_128_bit; + // ptr contains the original pointer from |OPENSSL_malloc|, which may only be + // 8-byte aligned. When freeing this structure, actually call |OPENSSL_free| + // on this pointer. + void *ptr; +}; + +// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to +// |out_expanded_key|. +extern void aes128gcmsiv_aes_ks( + const uint8_t key[16], uint8_t out_expanded_key[16*15]); + +// aes128gcmsiv_aes_ks writes an AES-128 key schedule for |key| to +// |out_expanded_key|. +extern void aes256gcmsiv_aes_ks( + const uint8_t key[16], uint8_t out_expanded_key[16*15]); + +static int aead_aes_gcm_siv_asm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t tag_len) { + const size_t key_bits = key_len * 8; + + if (key_bits != 128 && key_bits != 256) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); + return 0; // EVP_AEAD_CTX_init should catch this. + } + + if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { + tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; + } + + if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); + return 0; + } + + char *ptr = OPENSSL_malloc(sizeof(struct aead_aes_gcm_siv_asm_ctx) + 8); + if (ptr == NULL) { + return 0; + } + assert((((uintptr_t)ptr) & 7) == 0); + + // gcm_siv_ctx needs to be 16-byte aligned in a cross-platform way. + struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = + (struct aead_aes_gcm_siv_asm_ctx *)(ptr + (((uintptr_t)ptr) & 8)); + + assert((((uintptr_t)gcm_siv_ctx) & 15) == 0); + gcm_siv_ctx->ptr = ptr; + + if (key_bits == 128) { + aes128gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]); + gcm_siv_ctx->is_128_bit = 1; + } else { + aes256gcmsiv_aes_ks(key, &gcm_siv_ctx->key[0]); + gcm_siv_ctx->is_128_bit = 0; + } + ctx->aead_state = gcm_siv_ctx; + ctx->tag_len = tag_len; + + return 1; +} + +static void aead_aes_gcm_siv_asm_cleanup(EVP_AEAD_CTX *ctx) { + const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = ctx->aead_state; + OPENSSL_free(gcm_siv_ctx->ptr); +} + +// aesgcmsiv_polyval_horner updates the POLYVAL value in |in_out_poly| to +// include a number (|in_blocks|) of 16-byte blocks of data from |in|, given +// the POLYVAL key in |key|. +extern void aesgcmsiv_polyval_horner(const uint8_t in_out_poly[16], + const uint8_t key[16], const uint8_t *in, + size_t in_blocks); + +// aesgcmsiv_htable_init writes powers 1..8 of |auth_key| to |out_htable|. +extern void aesgcmsiv_htable_init(uint8_t out_htable[16 * 8], + const uint8_t auth_key[16]); + +// aesgcmsiv_htable6_init writes powers 1..6 of |auth_key| to |out_htable|. +extern void aesgcmsiv_htable6_init(uint8_t out_htable[16 * 6], + const uint8_t auth_key[16]); + +// aesgcmsiv_htable_polyval updates the POLYVAL value in |in_out_poly| to +// include |in_len| bytes of data from |in|. (Where |in_len| must be a multiple +// of 16.) It uses the precomputed powers of the key given in |htable|. +extern void aesgcmsiv_htable_polyval(const uint8_t htable[16 * 8], + const uint8_t *in, size_t in_len, + uint8_t in_out_poly[16]); + +// aes128gcmsiv_dec decrypts |in_len| & ~15 bytes from |out| and writes them to +// |in|. (The full value of |in_len| is still used to find the authentication +// tag appended to the ciphertext, however, so must not be pre-masked.) +// +// |in| and |out| may be equal, but must not otherwise overlap. +// +// While decrypting, it updates the POLYVAL value found at the beginning of +// |in_out_calculated_tag_and_scratch| and writes the updated value back before +// return. During executation, it may use the whole of this space for other +// purposes. In order to decrypt and update the POLYVAL value, it uses the +// expanded key from |key| and the table of powers in |htable|. +extern void aes128gcmsiv_dec(const uint8_t *in, uint8_t *out, + uint8_t in_out_calculated_tag_and_scratch[16 * 8], + const uint8_t htable[16 * 6], + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// aes256gcmsiv_dec acts like |aes128gcmsiv_dec|, but for AES-256. +extern void aes256gcmsiv_dec(const uint8_t *in, uint8_t *out, + uint8_t in_out_calculated_tag_and_scratch[16 * 8], + const uint8_t htable[16 * 6], + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// aes128gcmsiv_kdf performs the AES-GCM-SIV KDF given the expanded key from +// |key_schedule| and the nonce in |nonce|. Note that, while only 12 bytes of +// the nonce are used, 16 bytes are read and so the value must be +// right-padded. +extern void aes128gcmsiv_kdf(const uint8_t nonce[16], + uint64_t out_key_material[8], + const uint8_t *key_schedule); + +// aes256gcmsiv_kdf acts like |aes128gcmsiv_kdf|, but for AES-256. +extern void aes256gcmsiv_kdf(const uint8_t nonce[16], + uint64_t out_key_material[12], + const uint8_t *key_schedule); + +// aes128gcmsiv_aes_ks_enc_x1 performs a key expansion of the AES-128 key in +// |key|, writes the expanded key to |out_expanded_key| and encrypts a single +// block from |in| to |out|. +extern void aes128gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], + uint8_t out_expanded_key[16 * 15], + const uint64_t key[2]); + +// aes256gcmsiv_aes_ks_enc_x1 acts like |aes128gcmsiv_aes_ks_enc_x1|, but for +// AES-256. +extern void aes256gcmsiv_aes_ks_enc_x1(const uint8_t in[16], uint8_t out[16], + uint8_t out_expanded_key[16 * 15], + const uint64_t key[4]); + +// aes128gcmsiv_ecb_enc_block encrypts a single block from |in| to |out| using +// the expanded key in |expanded_key|. +extern void aes128gcmsiv_ecb_enc_block( + const uint8_t in[16], uint8_t out[16], + const struct aead_aes_gcm_siv_asm_ctx *expanded_key); + +// aes256gcmsiv_ecb_enc_block acts like |aes128gcmsiv_ecb_enc_block|, but for +// AES-256. +extern void aes256gcmsiv_ecb_enc_block( + const uint8_t in[16], uint8_t out[16], + const struct aead_aes_gcm_siv_asm_ctx *expanded_key); + +// aes128gcmsiv_enc_msg_x4 encrypts |in_len| bytes from |in| to |out| using the +// expanded key from |key|. (The value of |in_len| must be a multiple of 16.) +// The |in| and |out| buffers may be equal but must not otherwise overlap. The +// initial counter is constructed from the given |tag| as required by +// AES-GCM-SIV. +extern void aes128gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, + const uint8_t *tag, + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// aes256gcmsiv_enc_msg_x4 acts like |aes128gcmsiv_enc_msg_x4|, but for +// AES-256. +extern void aes256gcmsiv_enc_msg_x4(const uint8_t *in, uint8_t *out, + const uint8_t *tag, + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// aes128gcmsiv_enc_msg_x8 acts like |aes128gcmsiv_enc_msg_x4|, but is +// optimised for longer messages. +extern void aes128gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, + const uint8_t *tag, + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// aes256gcmsiv_enc_msg_x8 acts like |aes256gcmsiv_enc_msg_x4|, but is +// optimised for longer messages. +extern void aes256gcmsiv_enc_msg_x8(const uint8_t *in, uint8_t *out, + const uint8_t *tag, + const struct aead_aes_gcm_siv_asm_ctx *key, + size_t in_len); + +// gcm_siv_asm_polyval evaluates POLYVAL at |auth_key| on the given plaintext +// and AD. The result is written to |out_tag|. +static void gcm_siv_asm_polyval(uint8_t out_tag[16], const uint8_t *in, + size_t in_len, const uint8_t *ad, size_t ad_len, + const uint8_t auth_key[16], + const uint8_t nonce[12]) { + OPENSSL_memset(out_tag, 0, 16); + const size_t ad_blocks = ad_len / 16; + const size_t in_blocks = in_len / 16; + int htable_init = 0; + alignas(16) uint8_t htable[16*8]; + + if (ad_blocks > 8 || in_blocks > 8) { + htable_init = 1; + aesgcmsiv_htable_init(htable, auth_key); + } + + if (htable_init) { + aesgcmsiv_htable_polyval(htable, ad, ad_len & ~15, out_tag); + } else { + aesgcmsiv_polyval_horner(out_tag, auth_key, ad, ad_blocks); + } + + uint8_t scratch[16]; + if (ad_len & 15) { + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); + aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1); + } + + if (htable_init) { + aesgcmsiv_htable_polyval(htable, in, in_len & ~15, out_tag); + } else { + aesgcmsiv_polyval_horner(out_tag, auth_key, in, in_blocks); + } + + if (in_len & 15) { + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15); + aesgcmsiv_polyval_horner(out_tag, auth_key, scratch, 1); + } + + union { + uint8_t c[16]; + struct { + uint64_t ad; + uint64_t in; + } bitlens; + } length_block; + + length_block.bitlens.ad = ad_len * 8; + length_block.bitlens.in = in_len * 8; + aesgcmsiv_polyval_horner(out_tag, auth_key, length_block.c, 1); + + for (size_t i = 0; i < 12; i++) { + out_tag[i] ^= nonce[i]; + } + + out_tag[15] &= 0x7f; +} + +// aead_aes_gcm_siv_asm_crypt_last_block handles the encryption/decryption +// (same thing in CTR mode) of the final block of a plaintext/ciphertext. It +// writes |in_len| & 15 bytes to |out| + |in_len|, based on an initial counter +// derived from |tag|. +static void aead_aes_gcm_siv_asm_crypt_last_block( + int is_128_bit, uint8_t *out, const uint8_t *in, size_t in_len, + const uint8_t tag[16], + const struct aead_aes_gcm_siv_asm_ctx *enc_key_expanded) { + alignas(16) union { + uint8_t c[16]; + uint32_t u32[4]; + } counter; + OPENSSL_memcpy(&counter, tag, sizeof(counter)); + counter.c[15] |= 0x80; + counter.u32[0] += in_len / 16; + + if (is_128_bit) { + aes128gcmsiv_ecb_enc_block(&counter.c[0], &counter.c[0], enc_key_expanded); + } else { + aes256gcmsiv_ecb_enc_block(&counter.c[0], &counter.c[0], enc_key_expanded); + } + + const size_t last_bytes_offset = in_len & ~15; + const size_t last_bytes_len = in_len & 15; + uint8_t *last_bytes_out = &out[last_bytes_offset]; + const uint8_t *last_bytes_in = &in[last_bytes_offset]; + for (size_t i = 0; i < last_bytes_len; i++) { + last_bytes_out[i] = last_bytes_in[i] ^ counter.c[i]; + } +} + +// aead_aes_gcm_siv_kdf calculates the record encryption and authentication +// keys given the |nonce|. +static void aead_aes_gcm_siv_kdf( + int is_128_bit, const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx, + uint64_t out_record_auth_key[2], uint64_t out_record_enc_key[4], + const uint8_t nonce[12]) { + alignas(16) uint8_t padded_nonce[16]; + OPENSSL_memcpy(padded_nonce, nonce, 12); + + alignas(16) uint64_t key_material[12]; + if (is_128_bit) { + aes128gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]); + out_record_enc_key[0] = key_material[4]; + out_record_enc_key[1] = key_material[6]; + } else { + aes256gcmsiv_kdf(padded_nonce, key_material, &gcm_siv_ctx->key[0]); + out_record_enc_key[0] = key_material[4]; + out_record_enc_key[1] = key_material[6]; + out_record_enc_key[2] = key_material[8]; + out_record_enc_key[3] = key_material[10]; + } + + out_record_auth_key[0] = key_material[0]; + out_record_auth_key[1] = key_material[2]; +} + +static int aead_aes_gcm_siv_asm_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = ctx->aead_state; + const uint64_t in_len_64 = in_len; + const uint64_t ad_len_64 = ad_len; + + if (in_len_64 > (UINT64_C(1) << 36) || + ad_len_64 >= (UINT64_C(1) << 61)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + alignas(16) uint64_t record_auth_key[2]; + alignas(16) uint64_t record_enc_key[4]; + aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key, + record_enc_key, nonce); + + alignas(16) uint8_t tag[16] = {0}; + gcm_siv_asm_polyval(tag, in, in_len, ad, ad_len, + (const uint8_t *)record_auth_key, nonce); + + struct aead_aes_gcm_siv_asm_ctx enc_key_expanded; + + if (gcm_siv_ctx->is_128_bit) { + aes128gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0], + record_enc_key); + + if (in_len < 128) { + aes128gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15); + } else { + aes128gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15); + } + } else { + aes256gcmsiv_aes_ks_enc_x1(tag, tag, &enc_key_expanded.key[0], + record_enc_key); + + if (in_len < 128) { + aes256gcmsiv_enc_msg_x4(in, out, tag, &enc_key_expanded, in_len & ~15); + } else { + aes256gcmsiv_enc_msg_x8(in, out, tag, &enc_key_expanded, in_len & ~15); + } + } + + if (in_len & 15) { + aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in, + in_len, tag, &enc_key_expanded); + } + + OPENSSL_memcpy(out_tag, tag, sizeof(tag)); + *out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; + + return 1; +} + +// TODO(martinkr): Add aead_aes_gcm_siv_asm_open_gather. N.B. aes128gcmsiv_dec +// expects ciphertext and tag in a contiguous buffer. + +static int aead_aes_gcm_siv_asm_open(const EVP_AEAD_CTX *ctx, uint8_t *out, + size_t *out_len, size_t max_out_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *ad, size_t ad_len) { + const uint64_t ad_len_64 = ad_len; + if (ad_len_64 >= (UINT64_C(1) << 61)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + const uint64_t in_len_64 = in_len; + if (in_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN || + in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + const struct aead_aes_gcm_siv_asm_ctx *gcm_siv_ctx = ctx->aead_state; + const size_t plaintext_len = in_len - EVP_AEAD_AES_GCM_SIV_TAG_LEN; + const uint8_t *const given_tag = in + plaintext_len; + + if (max_out_len < plaintext_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + alignas(16) uint64_t record_auth_key[2]; + alignas(16) uint64_t record_enc_key[4]; + aead_aes_gcm_siv_kdf(gcm_siv_ctx->is_128_bit, gcm_siv_ctx, record_auth_key, + record_enc_key, nonce); + + struct aead_aes_gcm_siv_asm_ctx expanded_key; + if (gcm_siv_ctx->is_128_bit) { + aes128gcmsiv_aes_ks((const uint8_t *) record_enc_key, &expanded_key.key[0]); + } else { + aes256gcmsiv_aes_ks((const uint8_t *) record_enc_key, &expanded_key.key[0]); + } + // calculated_tag is 16*8 bytes, rather than 16 bytes, because + // aes[128|256]gcmsiv_dec uses the extra as scratch space. + alignas(16) uint8_t calculated_tag[16 * 8] = {0}; + + OPENSSL_memset(calculated_tag, 0, EVP_AEAD_AES_GCM_SIV_TAG_LEN); + const size_t ad_blocks = ad_len / 16; + aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, ad, + ad_blocks); + + uint8_t scratch[16]; + if (ad_len & 15) { + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); + aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, + scratch, 1); + } + + alignas(16) uint8_t htable[16 * 6]; + aesgcmsiv_htable6_init(htable, (const uint8_t *)record_auth_key); + + if (gcm_siv_ctx->is_128_bit) { + aes128gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, + plaintext_len); + } else { + aes256gcmsiv_dec(in, out, calculated_tag, htable, &expanded_key, + plaintext_len); + } + + if (plaintext_len & 15) { + aead_aes_gcm_siv_asm_crypt_last_block(gcm_siv_ctx->is_128_bit, out, in, + plaintext_len, given_tag, + &expanded_key); + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, out + (plaintext_len & ~15), plaintext_len & 15); + aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, + scratch, 1); + } + + union { + uint8_t c[16]; + struct { + uint64_t ad; + uint64_t in; + } bitlens; + } length_block; + + length_block.bitlens.ad = ad_len * 8; + length_block.bitlens.in = plaintext_len * 8; + aesgcmsiv_polyval_horner(calculated_tag, (const uint8_t *)record_auth_key, + length_block.c, 1); + + for (size_t i = 0; i < 12; i++) { + calculated_tag[i] ^= nonce[i]; + } + + calculated_tag[15] &= 0x7f; + + if (gcm_siv_ctx->is_128_bit) { + aes128gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key); + } else { + aes256gcmsiv_ecb_enc_block(calculated_tag, calculated_tag, &expanded_key); + } + + if (CRYPTO_memcmp(calculated_tag, given_tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN) != + 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + *out_len = in_len - EVP_AEAD_AES_GCM_SIV_TAG_LEN; + return 1; +} + +static const EVP_AEAD aead_aes_128_gcm_siv_asm = { + 16, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_gcm_siv_asm_init, + NULL /* init_with_direction */, + aead_aes_gcm_siv_asm_cleanup, + aead_aes_gcm_siv_asm_open, + aead_aes_gcm_siv_asm_seal_scatter, + NULL /* open_gather */, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +static const EVP_AEAD aead_aes_256_gcm_siv_asm = { + 32, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_gcm_siv_asm_init, + NULL /* init_with_direction */, + aead_aes_gcm_siv_asm_cleanup, + aead_aes_gcm_siv_asm_open, + aead_aes_gcm_siv_asm_seal_scatter, + NULL /* open_gather */, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +#endif // X86_64 && !NO_ASM + +struct aead_aes_gcm_siv_ctx { + union { + double align; + AES_KEY ks; + } ks; + block128_f kgk_block; + unsigned is_256:1; +}; + +static int aead_aes_gcm_siv_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t tag_len) { + const size_t key_bits = key_len * 8; + + if (key_bits != 128 && key_bits != 256) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); + return 0; // EVP_AEAD_CTX_init should catch this. + } + + if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { + tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; + } + if (tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); + return 0; + } + + struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = + OPENSSL_malloc(sizeof(struct aead_aes_gcm_siv_ctx)); + if (gcm_siv_ctx == NULL) { + return 0; + } + OPENSSL_memset(gcm_siv_ctx, 0, sizeof(struct aead_aes_gcm_siv_ctx)); + + aes_ctr_set_key(&gcm_siv_ctx->ks.ks, NULL, &gcm_siv_ctx->kgk_block, key, + key_len); + gcm_siv_ctx->is_256 = (key_len == 32); + ctx->aead_state = gcm_siv_ctx; + ctx->tag_len = tag_len; + + return 1; +} + +static void aead_aes_gcm_siv_cleanup(EVP_AEAD_CTX *ctx) { + OPENSSL_free(ctx->aead_state); +} + +// gcm_siv_crypt encrypts (or decrypts—it's the same thing) |in_len| bytes from +// |in| to |out|, using the block function |enc_block| with |key| in counter +// mode, starting at |initial_counter|. This differs from the traditional +// counter mode code in that the counter is handled little-endian, only the +// first four bytes are used and the GCM-SIV tweak to the final byte is +// applied. The |in| and |out| pointers may be equal but otherwise must not +// alias. +static void gcm_siv_crypt(uint8_t *out, const uint8_t *in, size_t in_len, + const uint8_t initial_counter[AES_BLOCK_SIZE], + block128_f enc_block, const AES_KEY *key) { + union { + uint32_t w[4]; + uint8_t c[16]; + } counter; + + OPENSSL_memcpy(counter.c, initial_counter, AES_BLOCK_SIZE); + counter.c[15] |= 0x80; + + for (size_t done = 0; done < in_len;) { + uint8_t keystream[AES_BLOCK_SIZE]; + enc_block(counter.c, keystream, key); + counter.w[0]++; + + size_t todo = AES_BLOCK_SIZE; + if (in_len - done < todo) { + todo = in_len - done; + } + + for (size_t i = 0; i < todo; i++) { + out[done + i] = keystream[i] ^ in[done + i]; + } + + done += todo; + } +} + +// gcm_siv_polyval evaluates POLYVAL at |auth_key| on the given plaintext and +// AD. The result is written to |out_tag|. +static void gcm_siv_polyval( + uint8_t out_tag[16], const uint8_t *in, size_t in_len, const uint8_t *ad, + size_t ad_len, const uint8_t auth_key[16], + const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { + struct polyval_ctx polyval_ctx; + CRYPTO_POLYVAL_init(&polyval_ctx, auth_key); + + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, ad, ad_len & ~15); + + uint8_t scratch[16]; + if (ad_len & 15) { + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, &ad[ad_len & ~15], ad_len & 15); + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); + } + + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, in, in_len & ~15); + if (in_len & 15) { + OPENSSL_memset(scratch, 0, sizeof(scratch)); + OPENSSL_memcpy(scratch, &in[in_len & ~15], in_len & 15); + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, scratch, sizeof(scratch)); + } + + union { + uint8_t c[16]; + struct { + uint64_t ad; + uint64_t in; + } bitlens; + } length_block; + + length_block.bitlens.ad = ad_len * 8; + length_block.bitlens.in = in_len * 8; + CRYPTO_POLYVAL_update_blocks(&polyval_ctx, length_block.c, + sizeof(length_block)); + + CRYPTO_POLYVAL_finish(&polyval_ctx, out_tag); + for (size_t i = 0; i < EVP_AEAD_AES_GCM_SIV_NONCE_LEN; i++) { + out_tag[i] ^= nonce[i]; + } + out_tag[15] &= 0x7f; +} + +// gcm_siv_record_keys contains the keys used for a specific GCM-SIV record. +struct gcm_siv_record_keys { + uint8_t auth_key[16]; + union { + double align; + AES_KEY ks; + } enc_key; + block128_f enc_block; +}; + +// gcm_siv_keys calculates the keys for a specific GCM-SIV record with the +// given nonce and writes them to |*out_keys|. +static void gcm_siv_keys( + const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx, + struct gcm_siv_record_keys *out_keys, + const uint8_t nonce[EVP_AEAD_AES_GCM_SIV_NONCE_LEN]) { + const AES_KEY *const key = &gcm_siv_ctx->ks.ks; + uint8_t key_material[(128 /* POLYVAL key */ + 256 /* max AES key */) / 8]; + const size_t blocks_needed = gcm_siv_ctx->is_256 ? 6 : 4; + + uint8_t counter[AES_BLOCK_SIZE]; + OPENSSL_memset(counter, 0, AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN); + OPENSSL_memcpy(counter + AES_BLOCK_SIZE - EVP_AEAD_AES_GCM_SIV_NONCE_LEN, + nonce, EVP_AEAD_AES_GCM_SIV_NONCE_LEN); + for (size_t i = 0; i < blocks_needed; i++) { + counter[0] = i; + + uint8_t ciphertext[AES_BLOCK_SIZE]; + gcm_siv_ctx->kgk_block(counter, ciphertext, key); + OPENSSL_memcpy(&key_material[i * 8], ciphertext, 8); + } + + OPENSSL_memcpy(out_keys->auth_key, key_material, 16); + aes_ctr_set_key(&out_keys->enc_key.ks, NULL, &out_keys->enc_block, + key_material + 16, gcm_siv_ctx->is_256 ? 32 : 16); +} + +static int aead_aes_gcm_siv_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state; + const uint64_t in_len_64 = in_len; + const uint64_t ad_len_64 = ad_len; + + if (in_len + EVP_AEAD_AES_GCM_SIV_TAG_LEN < in_len || + in_len_64 > (UINT64_C(1) << 36) || + ad_len_64 >= (UINT64_C(1) << 61)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + if (max_out_tag_len < EVP_AEAD_AES_GCM_SIV_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + struct gcm_siv_record_keys keys; + gcm_siv_keys(gcm_siv_ctx, &keys, nonce); + + uint8_t tag[16]; + gcm_siv_polyval(tag, in, in_len, ad, ad_len, keys.auth_key, nonce); + keys.enc_block(tag, tag, &keys.enc_key.ks); + + gcm_siv_crypt(out, in, in_len, tag, keys.enc_block, &keys.enc_key.ks); + + OPENSSL_memcpy(out_tag, tag, EVP_AEAD_AES_GCM_SIV_TAG_LEN); + *out_tag_len = EVP_AEAD_AES_GCM_SIV_TAG_LEN; + + return 1; +} + +static int aead_aes_gcm_siv_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, + size_t in_tag_len, const uint8_t *ad, + size_t ad_len) { + const uint64_t ad_len_64 = ad_len; + if (ad_len_64 >= (UINT64_C(1) << 61)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + const uint64_t in_len_64 = in_len; + if (in_tag_len != EVP_AEAD_AES_GCM_SIV_TAG_LEN || + in_len_64 > (UINT64_C(1) << 36) + AES_BLOCK_SIZE) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + if (nonce_len != EVP_AEAD_AES_GCM_SIV_NONCE_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + const struct aead_aes_gcm_siv_ctx *gcm_siv_ctx = ctx->aead_state; + + struct gcm_siv_record_keys keys; + gcm_siv_keys(gcm_siv_ctx, &keys, nonce); + + gcm_siv_crypt(out, in, in_len, in_tag, keys.enc_block, &keys.enc_key.ks); + + uint8_t expected_tag[EVP_AEAD_AES_GCM_SIV_TAG_LEN]; + gcm_siv_polyval(expected_tag, out, in_len, ad, ad_len, keys.auth_key, nonce); + keys.enc_block(expected_tag, expected_tag, &keys.enc_key.ks); + + if (CRYPTO_memcmp(expected_tag, in_tag, sizeof(expected_tag)) != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + return 1; +} + +static const EVP_AEAD aead_aes_128_gcm_siv = { + 16, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_gcm_siv_init, + NULL /* init_with_direction */, + aead_aes_gcm_siv_cleanup, + NULL /* open */, + aead_aes_gcm_siv_seal_scatter, + aead_aes_gcm_siv_open_gather, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +static const EVP_AEAD aead_aes_256_gcm_siv = { + 32, // key length + EVP_AEAD_AES_GCM_SIV_NONCE_LEN, // nonce length + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // overhead + EVP_AEAD_AES_GCM_SIV_TAG_LEN, // max tag length + 0, // seal_scatter_supports_extra_in + + aead_aes_gcm_siv_init, + NULL /* init_with_direction */, + aead_aes_gcm_siv_cleanup, + NULL /* open */, + aead_aes_gcm_siv_seal_scatter, + aead_aes_gcm_siv_open_gather, + NULL /* get_iv */, + NULL /* tag_len */, +}; + +#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) + +static char avx_aesni_capable(void) { + const uint32_t ecx = OPENSSL_ia32cap_P[1]; + + return (ecx & (1 << (57 - 32))) != 0 /* AESNI */ && + (ecx & (1 << 28)) != 0 /* AVX */; +} + +const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { + if (avx_aesni_capable()) { + return &aead_aes_128_gcm_siv_asm; + } + return &aead_aes_128_gcm_siv; +} + +const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { + if (avx_aesni_capable()) { + return &aead_aes_256_gcm_siv_asm; + } + return &aead_aes_256_gcm_siv; +} + +#else + +const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void) { + return &aead_aes_128_gcm_siv; +} + +const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void) { + return &aead_aes_256_gcm_siv; +} + +#endif // X86_64 && !NO_ASM diff --git a/Sources/BoringSSL/crypto/cipher_extra/e_chacha20poly1305.c b/Sources/BoringSSL/crypto/cipher_extra/e_chacha20poly1305.c new file mode 100644 index 000000000..64ab457e1 --- /dev/null +++ b/Sources/BoringSSL/crypto/cipher_extra/e_chacha20poly1305.c @@ -0,0 +1,326 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../fipsmodule/cipher/internal.h" +#include "../internal.h" + + +#define POLY1305_TAG_LEN 16 + +struct aead_chacha20_poly1305_ctx { + uint8_t key[32]; +}; + +// For convenience (the x86_64 calling convention allows only six parameters in +// registers), the final parameter for the assembly functions is both an input +// and output parameter. +union open_data { + struct { + alignas(16) uint8_t key[32]; + uint32_t counter; + uint8_t nonce[12]; + } in; + struct { + uint8_t tag[POLY1305_TAG_LEN]; + } out; +}; + +union seal_data { + struct { + alignas(16) uint8_t key[32]; + uint32_t counter; + uint8_t nonce[12]; + const uint8_t *extra_ciphertext; + size_t extra_ciphertext_len; + } in; + struct { + uint8_t tag[POLY1305_TAG_LEN]; + } out; +}; + +#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ + !defined(OPENSSL_WINDOWS) +static int asm_capable(void) { + const int sse41_capable = (OPENSSL_ia32cap_P[1] & (1 << 19)) != 0; + return sse41_capable; +} + +OPENSSL_COMPILE_ASSERT(sizeof(union open_data) == 48, wrong_open_data_size); +OPENSSL_COMPILE_ASSERT(sizeof(union seal_data) == 48 + 8 + 8, + wrong_seal_data_size); + +// chacha20_poly1305_open is defined in chacha20_poly1305_x86_64.pl. It decrypts +// |plaintext_len| bytes from |ciphertext| and writes them to |out_plaintext|. +// Additional input parameters are passed in |aead_data->in|. On exit, it will +// write calculated tag value to |aead_data->out.tag|, which the caller must +// check. +extern void chacha20_poly1305_open(uint8_t *out_plaintext, + const uint8_t *ciphertext, + size_t plaintext_len, const uint8_t *ad, + size_t ad_len, union open_data *aead_data); + +// chacha20_poly1305_open is defined in chacha20_poly1305_x86_64.pl. It encrypts +// |plaintext_len| bytes from |plaintext| and writes them to |out_ciphertext|. +// Additional input parameters are passed in |aead_data->in|. The calculated tag +// value is over the computed ciphertext concatenated with |extra_ciphertext| +// and written to |aead_data->out.tag|. +extern void chacha20_poly1305_seal(uint8_t *out_ciphertext, + const uint8_t *plaintext, + size_t plaintext_len, const uint8_t *ad, + size_t ad_len, union seal_data *aead_data); +#else +static int asm_capable(void) { return 0; } + + +static void chacha20_poly1305_open(uint8_t *out_plaintext, + const uint8_t *ciphertext, + size_t plaintext_len, const uint8_t *ad, + size_t ad_len, union open_data *aead_data) {} + +static void chacha20_poly1305_seal(uint8_t *out_ciphertext, + const uint8_t *plaintext, + size_t plaintext_len, const uint8_t *ad, + size_t ad_len, union seal_data *aead_data) {} +#endif + +static int aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t tag_len) { + struct aead_chacha20_poly1305_ctx *c20_ctx; + + if (tag_len == 0) { + tag_len = POLY1305_TAG_LEN; + } + + if (tag_len > POLY1305_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + if (key_len != sizeof(c20_ctx->key)) { + return 0; // internal error - EVP_AEAD_CTX_init should catch this. + } + + c20_ctx = OPENSSL_malloc(sizeof(struct aead_chacha20_poly1305_ctx)); + if (c20_ctx == NULL) { + return 0; + } + + OPENSSL_memcpy(c20_ctx->key, key, key_len); + ctx->aead_state = c20_ctx; + ctx->tag_len = tag_len; + + return 1; +} + +static void aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx) { + OPENSSL_free(ctx->aead_state); +} + +static void poly1305_update_length(poly1305_state *poly1305, size_t data_len) { + uint8_t length_bytes[8]; + + for (unsigned i = 0; i < sizeof(length_bytes); i++) { + length_bytes[i] = data_len; + data_len >>= 8; + } + + CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes)); +} + +// calc_tag fills |tag| with the authentication tag for the given inputs. +static void calc_tag(uint8_t tag[POLY1305_TAG_LEN], + const struct aead_chacha20_poly1305_ctx *c20_ctx, + const uint8_t nonce[12], const uint8_t *ad, size_t ad_len, + const uint8_t *ciphertext, size_t ciphertext_len, + const uint8_t *ciphertext_extra, + size_t ciphertext_extra_len) { + alignas(16) uint8_t poly1305_key[32]; + OPENSSL_memset(poly1305_key, 0, sizeof(poly1305_key)); + CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key), + c20_ctx->key, nonce, 0); + + static const uint8_t padding[16] = { 0 }; // Padding is all zeros. + poly1305_state ctx; + CRYPTO_poly1305_init(&ctx, poly1305_key); + CRYPTO_poly1305_update(&ctx, ad, ad_len); + if (ad_len % 16 != 0) { + CRYPTO_poly1305_update(&ctx, padding, sizeof(padding) - (ad_len % 16)); + } + CRYPTO_poly1305_update(&ctx, ciphertext, ciphertext_len); + CRYPTO_poly1305_update(&ctx, ciphertext_extra, ciphertext_extra_len); + const size_t ciphertext_total = ciphertext_len + ciphertext_extra_len; + if (ciphertext_total % 16 != 0) { + CRYPTO_poly1305_update(&ctx, padding, + sizeof(padding) - (ciphertext_total % 16)); + } + poly1305_update_length(&ctx, ad_len); + poly1305_update_length(&ctx, ciphertext_total); + CRYPTO_poly1305_finish(&ctx, tag); +} + +static int aead_chacha20_poly1305_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; + + if (extra_in_len + ctx->tag_len < ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + if (max_out_tag_len < ctx->tag_len + extra_in_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + if (nonce_len != 12) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow + // individual operations that work on more than 256GB at a time. + // |in_len_64| is needed because, on 32-bit platforms, size_t is only + // 32-bits and this produces a warning because it's always false. + // Casting to uint64_t inside the conditional is not sufficient to stop + // the warning. + const uint64_t in_len_64 = in_len; + if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + if (max_out_tag_len < ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + + // The the extra input is given, it is expected to be very short and so is + // encrypted byte-by-byte first. + if (extra_in_len) { + static const size_t kChaChaBlockSize = 64; + uint32_t block_counter = 1 + (in_len / kChaChaBlockSize); + size_t offset = in_len % kChaChaBlockSize; + uint8_t block[64 /* kChaChaBlockSize */]; + + for (size_t done = 0; done < extra_in_len; block_counter++) { + memset(block, 0, sizeof(block)); + CRYPTO_chacha_20(block, block, sizeof(block), c20_ctx->key, nonce, + block_counter); + for (size_t i = offset; i < sizeof(block) && done < extra_in_len; + i++, done++) { + out_tag[done] = extra_in[done] ^ block[i]; + } + offset = 0; + } + } + + union seal_data data; + if (asm_capable()) { + OPENSSL_memcpy(data.in.key, c20_ctx->key, 32); + data.in.counter = 0; + OPENSSL_memcpy(data.in.nonce, nonce, 12); + data.in.extra_ciphertext = out_tag; + data.in.extra_ciphertext_len = extra_in_len; + chacha20_poly1305_seal(out, in, in_len, ad, ad_len, &data); + } else { + CRYPTO_chacha_20(out, in, in_len, c20_ctx->key, nonce, 1); + calc_tag(data.out.tag, c20_ctx, nonce, ad, ad_len, out, in_len, out_tag, + extra_in_len); + } + + OPENSSL_memcpy(out_tag + extra_in_len, data.out.tag, ctx->tag_len); + *out_tag_len = extra_in_len + ctx->tag_len; + return 1; +} + +static int aead_chacha20_poly1305_open_gather( + const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, + size_t in_tag_len, const uint8_t *ad, size_t ad_len) { + const struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state; + + if (nonce_len != 12) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + if (in_tag_len != ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + // |CRYPTO_chacha_20| uses a 32-bit block counter. Therefore we disallow + // individual operations that work on more than 256GB at a time. + // |in_len_64| is needed because, on 32-bit platforms, size_t is only + // 32-bits and this produces a warning because it's always false. + // Casting to uint64_t inside the conditional is not sufficient to stop + // the warning. + const uint64_t in_len_64 = in_len; + if (in_len_64 >= (UINT64_C(1) << 32) * 64 - 64) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + + union open_data data; + if (asm_capable()) { + OPENSSL_memcpy(data.in.key, c20_ctx->key, 32); + data.in.counter = 0; + OPENSSL_memcpy(data.in.nonce, nonce, 12); + chacha20_poly1305_open(out, in, in_len, ad, ad_len, &data); + } else { + calc_tag(data.out.tag, c20_ctx, nonce, ad, ad_len, in, in_len, NULL, 0); + CRYPTO_chacha_20(out, in, in_len, c20_ctx->key, nonce, 1); + } + + if (CRYPTO_memcmp(data.out.tag, in_tag, ctx->tag_len) != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + return 1; +} + +static const EVP_AEAD aead_chacha20_poly1305 = { + 32, // key len + 12, // nonce len + POLY1305_TAG_LEN, // overhead + POLY1305_TAG_LEN, // max tag length + 1, // seal_scatter_supports_extra_in + + aead_chacha20_poly1305_init, + NULL, // init_with_direction + aead_chacha20_poly1305_cleanup, + NULL /* open */, + aead_chacha20_poly1305_seal_scatter, + aead_chacha20_poly1305_open_gather, + NULL, // get_iv + NULL, // tag_len +}; + +const EVP_AEAD *EVP_aead_chacha20_poly1305(void) { + return &aead_chacha20_poly1305; +} diff --git a/Sources/BoringSSL/crypto/cipher/e_null.c b/Sources/BoringSSL/crypto/cipher_extra/e_null.c similarity index 99% rename from Sources/BoringSSL/crypto/cipher/e_null.c rename to Sources/BoringSSL/crypto/cipher_extra/e_null.c index 9f8930850..f5fe8fb7d 100644 --- a/Sources/BoringSSL/crypto/cipher/e_null.c +++ b/Sources/BoringSSL/crypto/cipher_extra/e_null.c @@ -61,7 +61,6 @@ #include #include "../internal.h" -#include "internal.h" static int null_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, diff --git a/Sources/BoringSSL/crypto/cipher/e_rc2.c b/Sources/BoringSSL/crypto/cipher_extra/e_rc2.c similarity index 93% rename from Sources/BoringSSL/crypto/cipher/e_rc2.c rename to Sources/BoringSSL/crypto/cipher_extra/e_rc2.c index e1b4301e6..0b2caf555 100644 --- a/Sources/BoringSSL/crypto/cipher/e_rc2.c +++ b/Sources/BoringSSL/crypto/cipher_extra/e_rc2.c @@ -57,8 +57,6 @@ #include #include -#include "internal.h" - #define c2l(c, l) \ do { \ @@ -75,18 +73,25 @@ switch (n) { \ case 8: \ (l2) = ((uint32_t)(*(--(c)))) << 24L; \ + OPENSSL_FALLTHROUGH; \ case 7: \ (l2) |= ((uint32_t)(*(--(c)))) << 16L; \ + OPENSSL_FALLTHROUGH; \ case 6: \ (l2) |= ((uint32_t)(*(--(c)))) << 8L; \ + OPENSSL_FALLTHROUGH; \ case 5: \ (l2) |= ((uint32_t)(*(--(c)))); \ + OPENSSL_FALLTHROUGH; \ case 4: \ (l1) = ((uint32_t)(*(--(c)))) << 24L; \ + OPENSSL_FALLTHROUGH; \ case 3: \ (l1) |= ((uint32_t)(*(--(c)))) << 16L; \ + OPENSSL_FALLTHROUGH; \ case 2: \ (l1) |= ((uint32_t)(*(--(c)))) << 8L; \ + OPENSSL_FALLTHROUGH; \ case 1: \ (l1) |= ((uint32_t)(*(--(c)))); \ } \ @@ -106,18 +111,25 @@ switch (n) { \ case 8: \ *(--(c)) = (uint8_t)(((l2) >> 24L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 7: \ *(--(c)) = (uint8_t)(((l2) >> 16L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 6: \ *(--(c)) = (uint8_t)(((l2) >> 8L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 5: \ *(--(c)) = (uint8_t)(((l2)) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 4: \ *(--(c)) = (uint8_t)(((l1) >> 24L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 3: \ *(--(c)) = (uint8_t)(((l1) >> 16L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 2: \ *(--(c)) = (uint8_t)(((l1) >> 8L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 1: \ *(--(c)) = (uint8_t)(((l1)) & 0xff); \ } \ @@ -319,7 +331,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { unsigned int c, d; k = (uint8_t *)&key->data[0]; - *k = 0; /* for if there is a zero length key */ + *k = 0; // for if there is a zero length key if (len > 128) { len = 128; @@ -335,7 +347,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = data[i]; } - /* expand table */ + // expand table d = k[len - 1]; j = 0; for (i = len; i < 128; i++, j++) { @@ -343,7 +355,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = d; } - /* hmm.... key reduction to 'bits' bits */ + // hmm.... key reduction to 'bits' bits j = (bits + 7) >> 3; i = 128 - j; @@ -356,7 +368,7 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { k[i] = d; } - /* copy from bytes into uint16_t's */ + // copy from bytes into uint16_t's ki = &(key->data[63]); for (i = 127; i >= 0; i -= 2) { *(ki--) = ((k[i] << 8) | k[i - 1]) & 0xffff; @@ -364,8 +376,8 @@ static void RC2_set_key(RC2_KEY *key, int len, const uint8_t *data, int bits) { } typedef struct { - int key_bits; /* effective key bits */ - RC2_KEY ks; /* key schedule */ + int key_bits; // effective key bits + RC2_KEY ks; // key schedule } EVP_RC2_KEY; static int rc2_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, @@ -401,8 +413,8 @@ static int rc2_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) { key->key_bits = EVP_CIPHER_CTX_key_length(ctx) * 8; return 1; case EVP_CTRL_SET_RC2_KEY_BITS: - /* Should be overridden by later call to |EVP_CTRL_INIT|, but - * people call it, so it may as well work. */ + // Should be overridden by later call to |EVP_CTRL_INIT|, but + // people call it, so it may as well work. key->key_bits = arg; return 1; diff --git a/Sources/BoringSSL/crypto/cipher/e_rc4.c b/Sources/BoringSSL/crypto/cipher_extra/e_rc4.c similarity index 100% rename from Sources/BoringSSL/crypto/cipher/e_rc4.c rename to Sources/BoringSSL/crypto/cipher_extra/e_rc4.c diff --git a/Sources/BoringSSL/crypto/cipher/e_ssl3.c b/Sources/BoringSSL/crypto/cipher_extra/e_ssl3.c similarity index 65% rename from Sources/BoringSSL/crypto/cipher/e_ssl3.c rename to Sources/BoringSSL/crypto/cipher_extra/e_ssl3.c index f1dad2ba1..61f25cad1 100644 --- a/Sources/BoringSSL/crypto/cipher/e_ssl3.c +++ b/Sources/BoringSSL/crypto/cipher_extra/e_ssl3.c @@ -26,6 +26,7 @@ #include "internal.h" #include "../internal.h" +#include "../fipsmodule/cipher/internal.h" typedef struct { @@ -39,8 +40,8 @@ static int ssl3_mac(AEAD_SSL3_CTX *ssl3_ctx, uint8_t *out, unsigned *out_len, size_t md_size = EVP_MD_CTX_size(&ssl3_ctx->md_ctx); size_t pad_len = (md_size == 20) ? 40 : 48; - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); @@ -122,28 +123,48 @@ static int aead_ssl3_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, return 1; } -static int aead_ssl3_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { +static size_t aead_ssl3_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, + const size_t extra_in_len) { + assert(extra_in_len == 0); + const AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX*)ctx->aead_state; + + const size_t digest_len = EVP_MD_CTX_size(&ssl3_ctx->md_ctx); + if (EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) { + // The NULL cipher. + return digest_len; + } + + const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); + // An overflow of |in_len + digest_len| doesn't affect the result mod + // |block_size|, provided that |block_size| is a smaller power of two. + assert(block_size != 0 && (block_size & (block_size - 1)) == 0); + const size_t pad_len = block_size - ((in_len + digest_len) % block_size); + return digest_len + pad_len; +} + +static int aead_ssl3_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, + uint8_t *out_tag, size_t *out_tag_len, + const size_t max_out_tag_len, + const uint8_t *nonce, const size_t nonce_len, + const uint8_t *in, const size_t in_len, + const uint8_t *extra_in, + const size_t extra_in_len, const uint8_t *ad, + const size_t ad_len) { AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state; - size_t total = 0; if (!ssl3_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */ + // Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } - if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || - in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + if (in_len > INT_MAX) { + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { + if (max_out_tag_len < aead_ssl3_tag_len(ctx, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -158,52 +179,71 @@ static int aead_ssl3_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* Compute the MAC. This must be first in case the operation is being done - * in-place. */ + // Compute the MAC. This must be first in case the operation is being done + // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!ssl3_mac(ssl3_ctx, mac, &mac_len, ad, ad_len, in, in_len)) { return 0; } - /* Encrypt the input. */ + // Encrypt the input. int len; if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } - total = len; - /* Feed the MAC into the cipher. */ - if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out + total, &len, mac, - (int)mac_len)) { + const size_t block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); + + // Feed the MAC into the cipher in two steps. First complete the final partial + // block from encrypting the input and split the result between |out| and + // |out_tag|. Then encrypt the remainder. + + size_t early_mac_len = (block_size - (in_len % block_size)) % block_size; + if (early_mac_len != 0) { + assert(len + block_size - early_mac_len == in_len); + uint8_t buf[EVP_MAX_BLOCK_LENGTH]; + int buf_len; + if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, buf, &buf_len, mac, + (int)early_mac_len)) { + return 0; + } + assert(buf_len == (int)block_size); + OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); + OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); + } + size_t tag_len = early_mac_len; + + if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out_tag + tag_len, &len, + mac + tag_len, mac_len - tag_len)) { return 0; } - total += len; + tag_len += len; - unsigned block_size = EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); - /* Compute padding and feed that into the cipher. */ + // Compute padding and feed that into the cipher. uint8_t padding[256]; - unsigned padding_len = block_size - ((in_len + mac_len) % block_size); + size_t padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, 0, padding_len - 1); padding[padding_len - 1] = padding_len - 1; - if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out + total, &len, padding, + if (!EVP_EncryptUpdate(&ssl3_ctx->cipher_ctx, out_tag + tag_len, &len, padding, (int)padding_len)) { return 0; } - total += len; + tag_len += len; } - if (!EVP_EncryptFinal_ex(&ssl3_ctx->cipher_ctx, out + total, &len)) { + if (!EVP_EncryptFinal_ex(&ssl3_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } - total += len; + tag_len += len; + assert(tag_len == aead_ssl3_tag_len(ctx, in_len, extra_in_len)); - *out_len = total; + *out_tag_len = tag_len; return 1; } @@ -215,7 +255,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, AEAD_SSL3_CTX *ssl3_ctx = (AEAD_SSL3_CTX *)ctx->aead_state; if (ssl3_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. */ + // Unlike a normal AEAD, an SSL3 AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } @@ -227,8 +267,8 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (max_out_len < in_len) { - /* This requires that the caller provide space for the MAC, even though it - * will always be removed on return. */ + // This requires that the caller provide space for the MAC, even though it + // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -244,12 +284,12 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - /* Decrypt to get the plaintext + MAC + padding. */ + // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&ssl3_ctx->cipher_ctx, out, &len, in, (int)in_len)) { @@ -262,9 +302,9 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, total += len; assert(total == in_len); - /* Remove CBC padding and MAC. This would normally be timing-sensitive, but - * SSLv3 CBC ciphers are already broken. Support will be removed eventually. - * https://www.openssl.org/~bodo/ssl-poodle.pdf */ + // Remove CBC padding and MAC. This would normally be timing-sensitive, but + // SSLv3 CBC ciphers are already broken. Support will be removed eventually. + // https://www.openssl.org/~bodo/ssl-poodle.pdf size_t data_len; if (EVP_CIPHER_CTX_mode(&ssl3_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { unsigned padding_length = out[total - 1]; @@ -272,7 +312,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } - /* The padding must be minimal. */ + // The padding must be minimal. if (padding_length + 1 > EVP_CIPHER_CTX_block_size(&ssl3_ctx->cipher_ctx)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; @@ -282,7 +322,7 @@ static int aead_ssl3_open(const EVP_AEAD_CTX *ctx, uint8_t *out, data_len = total - mac_len; } - /* Compute the MAC and compare against the one in the record. */ + // Compute the MAC and compare against the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; if (!ssl3_mac(ssl3_ctx, mac, NULL, ad, ad_len, out, data_len)) { return 0; @@ -338,55 +378,71 @@ static int aead_null_sha1_ssl3_init(EVP_AEAD_CTX *ctx, const uint8_t *key, } static const EVP_AEAD aead_aes_128_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_128_cbc_sha1_ssl3_init, aead_ssl3_cleanup, - aead_ssl3_seal, aead_ssl3_open, + aead_ssl3_seal_scatter, + NULL, // open_gather aead_ssl3_get_iv, + aead_ssl3_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_256_cbc_sha1_ssl3_init, aead_ssl3_cleanup, - aead_ssl3_seal, aead_ssl3_open, + aead_ssl3_seal_scatter, + NULL, // open_gather aead_ssl3_get_iv, + aead_ssl3_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_ssl3 = { - SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */ - 0, /* nonce len */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) + 0, // nonce len + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_des_ede3_cbc_sha1_ssl3_init, aead_ssl3_cleanup, - aead_ssl3_seal, aead_ssl3_open, + aead_ssl3_seal_scatter, + NULL, // open_gather aead_ssl3_get_iv, + aead_ssl3_tag_len, }; static const EVP_AEAD aead_null_sha1_ssl3 = { - SHA_DIGEST_LENGTH, /* key len */ - 0, /* nonce len */ - SHA_DIGEST_LENGTH, /* overhead (SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH, // key len + 0, // nonce len + SHA_DIGEST_LENGTH, // overhead (SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_null_sha1_ssl3_init, aead_ssl3_cleanup, - aead_ssl3_seal, aead_ssl3_open, - NULL, /* get_iv */ + aead_ssl3_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_ssl3_tag_len, }; const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_ssl3(void) { diff --git a/Sources/BoringSSL/crypto/cipher/e_tls.c b/Sources/BoringSSL/crypto/cipher_extra/e_tls.c similarity index 61% rename from Sources/BoringSSL/crypto/cipher/e_tls.c rename to Sources/BoringSSL/crypto/cipher_extra/e_tls.c index 7d9bbeea5..72754c0f1 100644 --- a/Sources/BoringSSL/crypto/cipher/e_tls.c +++ b/Sources/BoringSSL/crypto/cipher_extra/e_tls.c @@ -25,6 +25,7 @@ #include #include +#include "../fipsmodule/cipher/internal.h" #include "../internal.h" #include "internal.h" @@ -32,12 +33,12 @@ typedef struct { EVP_CIPHER_CTX cipher_ctx; HMAC_CTX hmac_ctx; - /* mac_key is the portion of the key used for the MAC. It is retained - * separately for the constant-time CBC code. */ + // mac_key is the portion of the key used for the MAC. It is retained + // separately for the constant-time CBC code. uint8_t mac_key[EVP_MAX_MD_SIZE]; uint8_t mac_key_len; - /* implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit - * IV. */ + // implicit_iv is one iff this is a pre-TLS-1.1 CBC cipher without an explicit + // IV. char implicit_iv; } AEAD_TLS_CTX; @@ -47,7 +48,6 @@ static void aead_tls_cleanup(EVP_AEAD_CTX *ctx) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; EVP_CIPHER_CTX_cleanup(&tls_ctx->cipher_ctx); HMAC_CTX_cleanup(&tls_ctx->hmac_ctx); - OPENSSL_cleanse(&tls_ctx->mac_key, sizeof(tls_ctx->mac_key)); OPENSSL_free(tls_ctx); ctx->aead_state = NULL; } @@ -98,28 +98,48 @@ static int aead_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, size_t key_len, return 1; } -static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, - const uint8_t *ad, size_t ad_len) { +static size_t aead_tls_tag_len(const EVP_AEAD_CTX *ctx, const size_t in_len, + const size_t extra_in_len) { + assert(extra_in_len == 0); + AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; + + const size_t hmac_len = HMAC_size(&tls_ctx->hmac_ctx); + if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE) { + // The NULL cipher. + return hmac_len; + } + + const size_t block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); + // An overflow of |in_len + hmac_len| doesn't affect the result mod + // |block_size|, provided that |block_size| is a smaller power of two. + assert(block_size != 0 && (block_size & (block_size - 1)) == 0); + const size_t pad_len = block_size - (in_len + hmac_len) % block_size; + return hmac_len + pad_len; +} + +static int aead_tls_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, + uint8_t *out_tag, size_t *out_tag_len, + const size_t max_out_tag_len, + const uint8_t *nonce, const size_t nonce_len, + const uint8_t *in, const size_t in_len, + const uint8_t *extra_in, + const size_t extra_in_len, const uint8_t *ad, + const size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; - size_t total = 0; if (!tls_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ + // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } - if (in_len + EVP_AEAD_max_overhead(ctx->aead) < in_len || - in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + if (in_len > INT_MAX) { + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - if (max_out_len < in_len + EVP_AEAD_max_overhead(ctx->aead)) { + if (max_out_tag_len < aead_tls_tag_len(ctx, in_len, extra_in_len)) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -134,14 +154,14 @@ static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_extra[2]; ad_extra[0] = (uint8_t)(in_len >> 8); ad_extra[1] = (uint8_t)(in_len & 0xff); - /* Compute the MAC. This must be first in case the operation is being done - * in-place. */ + // Compute the MAC. This must be first in case the operation is being done + // in-place. uint8_t mac[EVP_MAX_MD_SIZE]; unsigned mac_len; if (!HMAC_Init_ex(&tls_ctx->hmac_ctx, NULL, 0, NULL, NULL) || @@ -152,62 +172,80 @@ static int aead_tls_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, return 0; } - /* Configure the explicit IV. */ + // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_EncryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } - /* Encrypt the input. */ + // Encrypt the input. int len; - if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, - (int)in_len)) { + if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { return 0; } - total = len; - /* Feed the MAC into the cipher. */ - if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, mac, - (int)mac_len)) { + unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); + + // Feed the MAC into the cipher in two steps. First complete the final partial + // block from encrypting the input and split the result between |out| and + // |out_tag|. Then feed the rest. + + const size_t early_mac_len = + (block_size - (in_len % block_size) % block_size); + if (early_mac_len != 0) { + assert(len + block_size - early_mac_len == in_len); + uint8_t buf[EVP_MAX_BLOCK_LENGTH]; + int buf_len; + if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, buf, &buf_len, mac, + (int)early_mac_len)) { + return 0; + } + assert(buf_len == (int)block_size); + OPENSSL_memcpy(out + len, buf, block_size - early_mac_len); + OPENSSL_memcpy(out_tag, buf + block_size - early_mac_len, early_mac_len); + } + size_t tag_len = early_mac_len; + + if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, + mac + tag_len, mac_len - tag_len)) { return 0; } - total += len; + tag_len += len; - unsigned block_size = EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx); if (block_size > 1) { assert(block_size <= 256); assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE); - /* Compute padding and feed that into the cipher. */ + // Compute padding and feed that into the cipher. uint8_t padding[256]; unsigned padding_len = block_size - ((in_len + mac_len) % block_size); OPENSSL_memset(padding, padding_len - 1, padding_len); - if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out + total, &len, padding, - (int)padding_len)) { + if (!EVP_EncryptUpdate(&tls_ctx->cipher_ctx, out_tag + tag_len, &len, + padding, (int)padding_len)) { return 0; } - total += len; + tag_len += len; } - if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out + total, &len)) { + if (!EVP_EncryptFinal_ex(&tls_ctx->cipher_ctx, out_tag + tag_len, &len)) { return 0; } - total += len; + assert(len == 0); // Padding is explicit. + assert(tag_len == aead_tls_tag_len(ctx, in_len, extra_in_len)); - *out_len = total; + *out_tag_len = tag_len; return 1; } -static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, - size_t *out_len, size_t max_out_len, - const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, +static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, + size_t max_out_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len) { AEAD_TLS_CTX *tls_ctx = (AEAD_TLS_CTX *)ctx->aead_state; if (tls_ctx->cipher_ctx.encrypt) { - /* Unlike a normal AEAD, a TLS AEAD may only be used in one direction. */ + // Unlike a normal AEAD, a TLS AEAD may only be used in one direction. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); return 0; } @@ -218,8 +256,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (max_out_len < in_len) { - /* This requires that the caller provide space for the MAC, even though it - * will always be removed on return. */ + // This requires that the caller provide space for the MAC, even though it + // will always be removed on return. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); return 0; } @@ -235,19 +273,19 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, } if (in_len > INT_MAX) { - /* EVP_CIPHER takes int as input. */ + // EVP_CIPHER takes int as input. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); return 0; } - /* Configure the explicit IV. */ + // Configure the explicit IV. if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE && !tls_ctx->implicit_iv && !EVP_DecryptInit_ex(&tls_ctx->cipher_ctx, NULL, NULL, NULL, nonce)) { return 0; } - /* Decrypt to get the plaintext + MAC + padding. */ + // Decrypt to get the plaintext + MAC + padding. size_t total = 0; int len; if (!EVP_DecryptUpdate(&tls_ctx->cipher_ctx, out, &len, in, (int)in_len)) { @@ -260,40 +298,41 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, total += len; assert(total == in_len); - /* Remove CBC padding. Code from here on is timing-sensitive with respect to - * |padding_ok| and |data_plus_mac_len| for CBC ciphers. */ - unsigned padding_ok, data_plus_mac_len; + // Remove CBC padding. Code from here on is timing-sensitive with respect to + // |padding_ok| and |data_plus_mac_len| for CBC ciphers. + size_t data_plus_mac_len; + crypto_word_t padding_ok; if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE) { if (!EVP_tls_cbc_remove_padding( &padding_ok, &data_plus_mac_len, out, total, EVP_CIPHER_CTX_block_size(&tls_ctx->cipher_ctx), - (unsigned)HMAC_size(&tls_ctx->hmac_ctx))) { - /* Publicly invalid. This can be rejected in non-constant time. */ + HMAC_size(&tls_ctx->hmac_ctx))) { + // Publicly invalid. This can be rejected in non-constant time. OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } } else { - padding_ok = ~0u; + padding_ok = CONSTTIME_TRUE_W; data_plus_mac_len = total; - /* |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has - * already been checked against the MAC size at the top of the function. */ + // |data_plus_mac_len| = |total| = |in_len| at this point. |in_len| has + // already been checked against the MAC size at the top of the function. assert(data_plus_mac_len >= HMAC_size(&tls_ctx->hmac_ctx)); } - unsigned data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); + size_t data_len = data_plus_mac_len - HMAC_size(&tls_ctx->hmac_ctx); - /* At this point, if the padding is valid, the first |data_plus_mac_len| bytes - * after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is - * still large enough to extract a MAC, but it will be irrelevant. */ + // At this point, if the padding is valid, the first |data_plus_mac_len| bytes + // after |out| are the plaintext and MAC. Otherwise, |data_plus_mac_len| is + // still large enough to extract a MAC, but it will be irrelevant. - /* To allow for CBC mode which changes cipher length, |ad| doesn't include the - * length for legacy ciphers. */ + // To allow for CBC mode which changes cipher length, |ad| doesn't include the + // length for legacy ciphers. uint8_t ad_fixed[13]; OPENSSL_memcpy(ad_fixed, ad, 11); ad_fixed[11] = (uint8_t)(data_len >> 8); ad_fixed[12] = (uint8_t)(data_len & 0xff); ad_len += 2; - /* Compute the MAC and extract the one in the record. */ + // Compute the MAC and extract the one in the record. uint8_t mac[EVP_MAX_MD_SIZE]; size_t mac_len; uint8_t record_mac_tmp[EVP_MAX_MD_SIZE]; @@ -311,8 +350,8 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, record_mac = record_mac_tmp; EVP_tls_cbc_copy_mac(record_mac, mac_len, out, data_plus_mac_len, total); } else { - /* We should support the constant-time path for all CBC-mode ciphers - * implemented. */ + // We should support the constant-time path for all CBC-mode ciphers + // implemented. assert(EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) != EVP_CIPH_CBC_MODE); unsigned mac_len_u; @@ -328,19 +367,19 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, record_mac = &out[data_len]; } - /* Perform the MAC check and the padding check in constant-time. It should be - * safe to simply perform the padding check first, but it would not be under a - * different choice of MAC location on padding failure. See - * EVP_tls_cbc_remove_padding. */ - unsigned good = constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), - 0); + // Perform the MAC check and the padding check in constant-time. It should be + // safe to simply perform the padding check first, but it would not be under a + // different choice of MAC location on padding failure. See + // EVP_tls_cbc_remove_padding. + crypto_word_t good = + constant_time_eq_int(CRYPTO_memcmp(record_mac, mac, mac_len), 0); good &= padding_ok; if (!good) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); return 0; } - /* End of timing-sensitive code. */ + // End of timing-sensitive code. *out_len = data_len; return 1; @@ -434,133 +473,173 @@ static int aead_null_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key, } static const EVP_AEAD aead_aes_128_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 16, /* key len (SHA1 + AES128) */ - 16, /* nonce len (IV) */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 16, // key len (SHA1 + AES128) + 16, // nonce len (IV) + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_128_cbc_sha1_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 16 + 16, /* key len (SHA1 + AES128 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 16 + 16, // key len (SHA1 + AES128 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_128_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - aead_tls_get_iv, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + aead_tls_get_iv, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_128_cbc_sha256_tls = { - SHA256_DIGEST_LENGTH + 16, /* key len (SHA256 + AES128) */ - 16, /* nonce len (IV) */ - 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */ - SHA256_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128) + 16, // nonce len (IV) + 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) + SHA256_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_128_cbc_sha256_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 32, /* key len (SHA1 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256) + 16, // nonce len (IV) + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_256_cbc_sha1_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 32 + 16, /* key len (SHA1 + AES256 + IV) */ - 0, /* nonce len */ - 16 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 32 + 16, // key len (SHA1 + AES256 + IV) + 0, // nonce len + 16 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_256_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - aead_tls_get_iv, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + aead_tls_get_iv, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha256_tls = { - SHA256_DIGEST_LENGTH + 32, /* key len (SHA256 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA256_DIGEST_LENGTH, /* overhead (padding + SHA256) */ - SHA256_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA256_DIGEST_LENGTH + 32, // key len (SHA256 + AES256) + 16, // nonce len (IV) + 16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256) + SHA256_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_256_cbc_sha256_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_aes_256_cbc_sha384_tls = { - SHA384_DIGEST_LENGTH + 32, /* key len (SHA384 + AES256) */ - 16, /* nonce len (IV) */ - 16 + SHA384_DIGEST_LENGTH, /* overhead (padding + SHA384) */ - SHA384_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA384_DIGEST_LENGTH + 32, // key len (SHA384 + AES256) + 16, // nonce len (IV) + 16 + SHA384_DIGEST_LENGTH, // overhead (padding + SHA384) + SHA384_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_aes_256_cbc_sha384_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = { - SHA_DIGEST_LENGTH + 24, /* key len (SHA1 + 3DES) */ - 8, /* nonce len (IV) */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES) + 8, // nonce len (IV) + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_des_ede3_cbc_sha1_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_des_ede3_cbc_sha1_tls_implicit_iv = { - SHA_DIGEST_LENGTH + 24 + 8, /* key len (SHA1 + 3DES + IV) */ - 0, /* nonce len */ - 8 + SHA_DIGEST_LENGTH, /* overhead (padding + SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH + 24 + 8, // key len (SHA1 + 3DES + IV) + 0, // nonce len + 8 + SHA_DIGEST_LENGTH, // overhead (padding + SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_des_ede3_cbc_sha1_tls_implicit_iv_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - aead_tls_get_iv, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + aead_tls_get_iv, // get_iv + aead_tls_tag_len, }; static const EVP_AEAD aead_null_sha1_tls = { - SHA_DIGEST_LENGTH, /* key len */ - 0, /* nonce len */ - SHA_DIGEST_LENGTH, /* overhead (SHA1) */ - SHA_DIGEST_LENGTH, /* max tag length */ - NULL, /* init */ + SHA_DIGEST_LENGTH, // key len + 0, // nonce len + SHA_DIGEST_LENGTH, // overhead (SHA1) + SHA_DIGEST_LENGTH, // max tag length + 0, // seal_scatter_supports_extra_in + + NULL, // init aead_null_sha1_tls_init, aead_tls_cleanup, - aead_tls_seal, aead_tls_open, - NULL, /* get_iv */ + aead_tls_seal_scatter, + NULL, // open_gather + NULL, // get_iv + aead_tls_tag_len, }; const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void) { diff --git a/Sources/BoringSSL/crypto/asn1/x_bignum.c b/Sources/BoringSSL/crypto/cipher_extra/internal.h similarity index 51% rename from Sources/BoringSSL/crypto/asn1/x_bignum.c rename to Sources/BoringSSL/crypto/cipher_extra/internal.h index 585d9d4a4..1d2c4e1f4 100644 --- a/Sources/BoringSSL/crypto/asn1/x_bignum.c +++ b/Sources/BoringSSL/crypto/cipher_extra/internal.h @@ -54,100 +54,75 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -#include +#ifndef OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H +#define OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H -#include -#include +#include -/* - * Custom primitive type for BIGNUM handling. This reads in an ASN1_INTEGER - * as a BIGNUM directly. Currently it ignores the sign which isn't a problem - * since all BIGNUMs used are non negative and anything that looks negative - * is normally due to an encoding error. - */ +#include "../internal.h" -#define BN_SENSITIVE 1 +#if defined(__cplusplus) +extern "C" { +#endif -static int bn_new(ASN1_VALUE **pval, const ASN1_ITEM *it); -static void bn_free(ASN1_VALUE **pval, const ASN1_ITEM *it); -static int bn_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, - const ASN1_ITEM *it); -static int bn_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, - int utype, char *free_cont, const ASN1_ITEM *it); +// EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC +// record in |in|. This decrypted record should not include any "decrypted" +// explicit IV. If the record is publicly invalid, it returns zero. Otherwise, +// it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the +// padding is valid and zero otherwise. It then sets |*out_len| to the length +// with the padding removed or |in_len| if invalid. +// +// If the function returns one, it runs in time independent of the contents of +// |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying +// |EVP_tls_cbc_copy_mac|'s precondition. +int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, + const uint8_t *in, size_t in_len, + size_t block_size, size_t mac_size); -static const ASN1_PRIMITIVE_FUNCS bignum_pf = { - NULL, 0, - bn_new, - bn_free, - 0, - bn_c2i, - bn_i2c, - NULL /* prim_print */ , -}; +// EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first +// |in_len| bytes of |in| to |out| in constant time (independent of the concrete +// value of |in_len|, which may vary within a 256-byte window). |in| must point +// to a buffer of |orig_len| bytes. +// +// On entry: +// orig_len >= in_len >= md_size +// md_size <= EVP_MAX_MD_SIZE +void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, + size_t in_len, size_t orig_len); -ASN1_ITEM_start(BIGNUM) - ASN1_ITYPE_PRIMITIVE, V_ASN1_INTEGER, NULL, 0, &bignum_pf, 0, "BIGNUM" -ASN1_ITEM_end(BIGNUM) +// EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function +// which EVP_tls_cbc_digest_record supports. +int EVP_tls_cbc_record_digest_supported(const EVP_MD *md); -ASN1_ITEM_start(CBIGNUM) - ASN1_ITYPE_PRIMITIVE, V_ASN1_INTEGER, NULL, 0, &bignum_pf, BN_SENSITIVE, "BIGNUM" -ASN1_ITEM_end(CBIGNUM) +// EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS +// record. +// +// md: the hash function used in the HMAC. +// EVP_tls_cbc_record_digest_supported must return true for this hash. +// md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. +// md_out_size: the number of output bytes is written here. +// header: the 13-byte, TLS record header. +// data: the record data itself +// data_plus_mac_size: the secret, reported length of the data and MAC +// once the padding has been removed. +// data_plus_mac_plus_padding_size: the public length of the whole +// record, including padding. +// +// On entry: by virtue of having been through one of the remove_padding +// functions, above, we know that data_plus_mac_size is large enough to contain +// a padding byte and MAC. (If the padding was invalid, it might contain the +// padding too. ) +int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, + size_t *md_out_size, const uint8_t header[13], + const uint8_t *data, size_t data_plus_mac_size, + size_t data_plus_mac_plus_padding_size, + const uint8_t *mac_secret, + unsigned mac_secret_length); -static int bn_new(ASN1_VALUE **pval, const ASN1_ITEM *it) -{ - *pval = (ASN1_VALUE *)BN_new(); - if (*pval) - return 1; - else - return 0; -} -static void bn_free(ASN1_VALUE **pval, const ASN1_ITEM *it) -{ - if (!*pval) - return; - if (it->size & BN_SENSITIVE) - BN_clear_free((BIGNUM *)*pval); - else - BN_free((BIGNUM *)*pval); - *pval = NULL; -} +#if defined(__cplusplus) +} // extern C +#endif -static int bn_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, - const ASN1_ITEM *it) -{ - BIGNUM *bn; - int pad; - if (!*pval) - return -1; - bn = (BIGNUM *)*pval; - /* If MSB set in an octet we need a padding byte */ - if (BN_num_bits(bn) & 0x7) - pad = 0; - else - pad = 1; - if (cont) { - if (pad) - *cont++ = 0; - BN_bn2bin(bn, cont); - } - return pad + BN_num_bytes(bn); -} - -static int bn_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, - int utype, char *free_cont, const ASN1_ITEM *it) -{ - BIGNUM *bn; - if (!*pval) { - if (!bn_new(pval, it)) { - return 0; - } - } - bn = (BIGNUM *)*pval; - if (!BN_bin2bn(cont, len, bn)) { - bn_free(pval, it); - return 0; - } - return 1; -} +#endif // OPENSSL_HEADER_CIPHER_EXTRA_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/cipher_extra/tls_cbc.c b/Sources/BoringSSL/crypto/cipher_extra/tls_cbc.c new file mode 100644 index 000000000..6f95130a3 --- /dev/null +++ b/Sources/BoringSSL/crypto/cipher_extra/tls_cbc.c @@ -0,0 +1,482 @@ +/* ==================================================================== + * Copyright (c) 2012 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ + +#include +#include + +#include +#include +#include + +#include "../internal.h" +#include "internal.h" +#include "../fipsmodule/cipher/internal.h" + + +// MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length +// field. (SHA-384/512 have 128-bit length.) +#define MAX_HASH_BIT_COUNT_BYTES 16 + +// MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. +// Currently SHA-384/512 has a 128-byte block size and that's the largest +// supported by TLS.) +#define MAX_HASH_BLOCK_SIZE 128 + +int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, + const uint8_t *in, size_t in_len, + size_t block_size, size_t mac_size) { + const size_t overhead = 1 /* padding length byte */ + mac_size; + + // These lengths are all public so we can test them in non-constant time. + if (overhead > in_len) { + return 0; + } + + size_t padding_length = in[in_len - 1]; + + crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length); + // The padding consists of a length byte at the end of the record and + // then that many bytes of padding, all with the same value as the + // length byte. Thus, with the length byte included, there are i+1 + // bytes of padding. + // + // We can't check just |padding_length+1| bytes because that leaks + // decrypted information. Therefore we always have to check the maximum + // amount of padding possible. (Again, the length of the record is + // public information so we can use it.) + size_t to_check = 256; // maximum amount of padding, inc length byte. + if (to_check > in_len) { + to_check = in_len; + } + + for (size_t i = 0; i < to_check; i++) { + uint8_t mask = constant_time_ge_8(padding_length, i); + uint8_t b = in[in_len - 1 - i]; + // The final |padding_length+1| bytes should all have the value + // |padding_length|. Therefore the XOR should be zero. + good &= ~(mask & (padding_length ^ b)); + } + + // If any of the final |padding_length+1| bytes had the wrong value, + // one or more of the lower eight bits of |good| will be cleared. + good = constant_time_eq_w(0xff, good & 0xff); + + // Always treat |padding_length| as zero on error. If, assuming block size of + // 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 + // and returned -1, distinguishing good MAC and bad padding from bad MAC and + // bad padding would give POODLE's padding oracle. + padding_length = good & (padding_length + 1); + *out_len = in_len - padding_length; + *out_padding_ok = good; + return 1; +} + +void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, + size_t in_len, size_t orig_len) { + uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE]; + uint8_t *rotated_mac = rotated_mac1; + uint8_t *rotated_mac_tmp = rotated_mac2; + + // mac_end is the index of |in| just after the end of the MAC. + size_t mac_end = in_len; + size_t mac_start = mac_end - md_size; + + assert(orig_len >= in_len); + assert(in_len >= md_size); + assert(md_size <= EVP_MAX_MD_SIZE); + + // scan_start contains the number of bytes that we can ignore because + // the MAC's position can only vary by 255 bytes. + size_t scan_start = 0; + // This information is public so it's safe to branch based on it. + if (orig_len > md_size + 255 + 1) { + scan_start = orig_len - (md_size + 255 + 1); + } + + size_t rotate_offset = 0; + uint8_t mac_started = 0; + OPENSSL_memset(rotated_mac, 0, md_size); + for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) { + if (j >= md_size) { + j -= md_size; + } + crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start); + mac_started |= is_mac_start; + uint8_t mac_ended = constant_time_ge_8(i, mac_end); + rotated_mac[j] |= in[i] & mac_started & ~mac_ended; + // Save the offset that |mac_start| is mapped to. + rotate_offset |= j & is_mac_start; + } + + // Now rotate the MAC. We rotate in log(md_size) steps, one for each bit + // position. + for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) { + // Rotate by |offset| iff the corresponding bit is set in + // |rotate_offset|, placing the result in |rotated_mac_tmp|. + const uint8_t skip_rotate = (rotate_offset & 1) - 1; + for (size_t i = 0, j = offset; i < md_size; i++, j++) { + if (j >= md_size) { + j -= md_size; + } + rotated_mac_tmp[i] = + constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); + } + + // Swap pointers so |rotated_mac| contains the (possibly) rotated value. + // Note the number of iterations and thus the identity of these pointers is + // public information. + uint8_t *tmp = rotated_mac; + rotated_mac = rotated_mac_tmp; + rotated_mac_tmp = tmp; + } + + OPENSSL_memcpy(out, rotated_mac, md_size); +} + +// u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in +// big-endian order. The value of p is advanced by four. +#define u32toBE(n, p) \ + do { \ + *((p)++) = (uint8_t)((n) >> 24); \ + *((p)++) = (uint8_t)((n) >> 16); \ + *((p)++) = (uint8_t)((n) >> 8); \ + *((p)++) = (uint8_t)((n)); \ + } while (0) + +// u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in +// big-endian order. The value of p is advanced by eight. +#define u64toBE(n, p) \ + do { \ + *((p)++) = (uint8_t)((n) >> 56); \ + *((p)++) = (uint8_t)((n) >> 48); \ + *((p)++) = (uint8_t)((n) >> 40); \ + *((p)++) = (uint8_t)((n) >> 32); \ + *((p)++) = (uint8_t)((n) >> 24); \ + *((p)++) = (uint8_t)((n) >> 16); \ + *((p)++) = (uint8_t)((n) >> 8); \ + *((p)++) = (uint8_t)((n)); \ + } while (0) + +typedef union { + SHA_CTX sha1; + SHA256_CTX sha256; + SHA512_CTX sha512; +} HASH_CTX; + +static void tls1_sha1_transform(HASH_CTX *ctx, const uint8_t *block) { + SHA1_Transform(&ctx->sha1, block); +} + +static void tls1_sha256_transform(HASH_CTX *ctx, const uint8_t *block) { + SHA256_Transform(&ctx->sha256, block); +} + +static void tls1_sha512_transform(HASH_CTX *ctx, const uint8_t *block) { + SHA512_Transform(&ctx->sha512, block); +} + +// These functions serialize the state of a hash and thus perform the standard +// "final" operation without adding the padding and length that such a function +// typically does. +static void tls1_sha1_final_raw(HASH_CTX *ctx, uint8_t *md_out) { + SHA_CTX *sha1 = &ctx->sha1; + u32toBE(sha1->h[0], md_out); + u32toBE(sha1->h[1], md_out); + u32toBE(sha1->h[2], md_out); + u32toBE(sha1->h[3], md_out); + u32toBE(sha1->h[4], md_out); +} + +static void tls1_sha256_final_raw(HASH_CTX *ctx, uint8_t *md_out) { + SHA256_CTX *sha256 = &ctx->sha256; + for (unsigned i = 0; i < 8; i++) { + u32toBE(sha256->h[i], md_out); + } +} + +static void tls1_sha512_final_raw(HASH_CTX *ctx, uint8_t *md_out) { + SHA512_CTX *sha512 = &ctx->sha512; + for (unsigned i = 0; i < 8; i++) { + u64toBE(sha512->h[i], md_out); + } +} + +int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) { + switch (EVP_MD_type(md)) { + case NID_sha1: + case NID_sha256: + case NID_sha384: + return 1; + + default: + return 0; + } +} + +int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, + size_t *md_out_size, const uint8_t header[13], + const uint8_t *data, size_t data_plus_mac_size, + size_t data_plus_mac_plus_padding_size, + const uint8_t *mac_secret, + unsigned mac_secret_length) { + HASH_CTX md_state; + void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out); + void (*md_transform)(HASH_CTX *ctx, const uint8_t *block); + unsigned md_size, md_block_size = 64; + // md_length_size is the number of bytes in the length field that terminates + // the hash. + unsigned md_length_size = 8; + + // Bound the acceptable input so we can forget about many possible overflows + // later in this function. This is redundant with the record size limits in + // TLS. + if (data_plus_mac_plus_padding_size >= 1024 * 1024) { + assert(0); + return 0; + } + + switch (EVP_MD_type(md)) { + case NID_sha1: + SHA1_Init(&md_state.sha1); + md_final_raw = tls1_sha1_final_raw; + md_transform = tls1_sha1_transform; + md_size = SHA_DIGEST_LENGTH; + break; + + case NID_sha256: + SHA256_Init(&md_state.sha256); + md_final_raw = tls1_sha256_final_raw; + md_transform = tls1_sha256_transform; + md_size = SHA256_DIGEST_LENGTH; + break; + + case NID_sha384: + SHA384_Init(&md_state.sha512); + md_final_raw = tls1_sha512_final_raw; + md_transform = tls1_sha512_transform; + md_size = SHA384_DIGEST_LENGTH; + md_block_size = 128; + md_length_size = 16; + break; + + default: + // EVP_tls_cbc_record_digest_supported should have been called first to + // check that the hash function is supported. + assert(0); + *md_out_size = 0; + return 0; + } + + assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); + assert(md_block_size <= MAX_HASH_BLOCK_SIZE); + assert(md_size <= EVP_MAX_MD_SIZE); + + static const size_t kHeaderLength = 13; + + // kVarianceBlocks is the number of blocks of the hash that we have to + // calculate in constant time because they could be altered by the + // padding value. + // + // TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not + // required to be minimal. Therefore we say that the final six blocks + // can vary based on the padding. + static const size_t kVarianceBlocks = 6; + + // From now on we're dealing with the MAC, which conceptually has 13 + // bytes of `header' before the start of the data. + size_t len = data_plus_mac_plus_padding_size + kHeaderLength; + // max_mac_bytes contains the maximum bytes of bytes in the MAC, including + // |header|, assuming that there's no padding. + size_t max_mac_bytes = len - md_size - 1; + // num_blocks is the maximum number of hash blocks. + size_t num_blocks = + (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; + // In order to calculate the MAC in constant time we have to handle + // the final blocks specially because the padding value could cause the + // end to appear somewhere in the final |kVarianceBlocks| blocks and we + // can't leak where. However, |num_starting_blocks| worth of data can + // be hashed right away because no padding value can affect whether + // they are plaintext. + size_t num_starting_blocks = 0; + // k is the starting byte offset into the conceptual header||data where + // we start processing. + size_t k = 0; + // mac_end_offset is the index just past the end of the data to be + // MACed. + size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; + // c is the index of the 0x80 byte in the final hash block that + // contains application data. + size_t c = mac_end_offset % md_block_size; + // index_a is the hash block number that contains the 0x80 terminating + // value. + size_t index_a = mac_end_offset / md_block_size; + // index_b is the hash block number that contains the 64-bit hash + // length, in bits. + size_t index_b = (mac_end_offset + md_length_size) / md_block_size; + + if (num_blocks > kVarianceBlocks) { + num_starting_blocks = num_blocks - kVarianceBlocks; + k = md_block_size * num_starting_blocks; + } + + // bits is the hash-length in bits. It includes the additional hash + // block for the masked HMAC key. + size_t bits = 8 * mac_end_offset; // at most 18 bits to represent + + // Compute the initial HMAC block. + bits += 8 * md_block_size; + // hmac_pad is the masked HMAC key. + uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; + OPENSSL_memset(hmac_pad, 0, md_block_size); + assert(mac_secret_length <= sizeof(hmac_pad)); + OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); + for (size_t i = 0; i < md_block_size; i++) { + hmac_pad[i] ^= 0x36; + } + + md_transform(&md_state, hmac_pad); + + // The length check means |bits| fits in four bytes. + uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; + OPENSSL_memset(length_bytes, 0, md_length_size - 4); + length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); + length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16); + length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8); + length_bytes[md_length_size - 1] = (uint8_t)bits; + + if (k > 0) { + // k is a multiple of md_block_size. + uint8_t first_block[MAX_HASH_BLOCK_SIZE]; + OPENSSL_memcpy(first_block, header, 13); + OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); + md_transform(&md_state, first_block); + for (size_t i = 1; i < k / md_block_size; i++) { + md_transform(&md_state, data + md_block_size * i - 13); + } + } + + uint8_t mac_out[EVP_MAX_MD_SIZE]; + OPENSSL_memset(mac_out, 0, sizeof(mac_out)); + + // We now process the final hash blocks. For each block, we construct + // it in constant time. If the |i==index_a| then we'll include the 0x80 + // bytes and zero pad etc. For each block we selectively copy it, in + // constant time, to |mac_out|. + for (size_t i = num_starting_blocks; + i <= num_starting_blocks + kVarianceBlocks; i++) { + uint8_t block[MAX_HASH_BLOCK_SIZE]; + uint8_t is_block_a = constant_time_eq_8(i, index_a); + uint8_t is_block_b = constant_time_eq_8(i, index_b); + for (size_t j = 0; j < md_block_size; j++) { + uint8_t b = 0; + if (k < kHeaderLength) { + b = header[k]; + } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) { + b = data[k - kHeaderLength]; + } + k++; + + uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c); + uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); + // If this is the block containing the end of the + // application data, and we are at the offset for the + // 0x80 value, then overwrite b with 0x80. + b = constant_time_select_8(is_past_c, 0x80, b); + // If this the the block containing the end of the + // application data and we're past the 0x80 value then + // just write zero. + b = b & ~is_past_cp1; + // If this is index_b (the final block), but not + // index_a (the end of the data), then the 64-bit + // length didn't fit into index_a and we're having to + // add an extra block of zeros. + b &= ~is_block_b | is_block_a; + + // The final bytes of one of the blocks contains the + // length. + if (j >= md_block_size - md_length_size) { + // If this is index_b, write a length byte. + b = constant_time_select_8( + is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); + } + block[j] = b; + } + + md_transform(&md_state, block); + md_final_raw(&md_state, block); + // If this is index_b, copy the hash value to |mac_out|. + for (size_t j = 0; j < md_size; j++) { + mac_out[j] |= block[j] & is_block_b; + } + } + + EVP_MD_CTX md_ctx; + EVP_MD_CTX_init(&md_ctx); + if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) { + EVP_MD_CTX_cleanup(&md_ctx); + return 0; + } + + // Complete the HMAC in the standard manner. + for (size_t i = 0; i < md_block_size; i++) { + hmac_pad[i] ^= 0x6a; + } + + EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); + EVP_DigestUpdate(&md_ctx, mac_out, md_size); + unsigned md_out_size_u; + EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); + *md_out_size = md_out_size_u; + EVP_MD_CTX_cleanup(&md_ctx); + + return 1; +} diff --git a/Sources/BoringSSL/crypto/cmac/cmac.c b/Sources/BoringSSL/crypto/cmac/cmac.c index a9a527d58..fb4e69c7f 100644 --- a/Sources/BoringSSL/crypto/cmac/cmac.c +++ b/Sources/BoringSSL/crypto/cmac/cmac.c @@ -60,13 +60,13 @@ struct cmac_ctx_st { EVP_CIPHER_CTX cipher_ctx; - /* k1 and k2 are the CMAC subkeys. See - * https://tools.ietf.org/html/rfc4493#section-2.3 */ + // k1 and k2 are the CMAC subkeys. See + // https://tools.ietf.org/html/rfc4493#section-2.3 uint8_t k1[AES_BLOCK_SIZE]; uint8_t k2[AES_BLOCK_SIZE]; - /* Last (possibly partial) scratch */ + // Last (possibly partial) scratch uint8_t block[AES_BLOCK_SIZE]; - /* block_used contains the number of valid bytes in |block|. */ + // block_used contains the number of valid bytes in |block|. unsigned block_used; }; @@ -124,20 +124,20 @@ void CMAC_CTX_free(CMAC_CTX *ctx) { OPENSSL_free(ctx); } -/* binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸) - * with a hard-coded reduction polynomial and sets |out| as x times the - * input. - * - * See https://tools.ietf.org/html/rfc4493#section-2.3 */ +// binary_field_mul_x treats the 128 bits at |in| as an element of GF(2¹²⁸) +// with a hard-coded reduction polynomial and sets |out| as x times the +// input. +// +// See https://tools.ietf.org/html/rfc4493#section-2.3 static void binary_field_mul_x(uint8_t out[16], const uint8_t in[16]) { unsigned i; - /* Shift |in| to left, including carry. */ + // Shift |in| to left, including carry. for (i = 0; i < 15; i++) { out[i] = (in[i] << 1) | (in[i+1] >> 7); } - /* If MSB set fixup with R. */ + // If MSB set fixup with R. const uint8_t carry = in[0] >> 7; out[i] = (in[i] << 1) ^ ((0 - carry) & 0x87); } @@ -152,7 +152,7 @@ int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len, EVP_CIPHER_key_length(cipher) != key_len || !EVP_EncryptInit_ex(&ctx->cipher_ctx, cipher, NULL, key, kZeroIV) || !EVP_Cipher(&ctx->cipher_ctx, scratch, kZeroIV, AES_BLOCK_SIZE) || - /* Reset context again ready for first data. */ + // Reset context again ready for first data. !EVP_EncryptInit_ex(&ctx->cipher_ctx, NULL, NULL, NULL, kZeroIV)) { return 0; } @@ -183,11 +183,11 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) { in_len -= todo; ctx->block_used += todo; - /* If |in_len| is zero then either |ctx->block_used| is less than - * |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used| - * is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the - * latter case we don't want to process this block now because it might be - * the last block and that block is treated specially. */ + // If |in_len| is zero then either |ctx->block_used| is less than + // |AES_BLOCK_SIZE|, in which case we can stop here, or |ctx->block_used| + // is exactly |AES_BLOCK_SIZE| but there's no more data to process. In the + // latter case we don't want to process this block now because it might be + // the last block and that block is treated specially. if (in_len == 0) { return 1; } @@ -199,7 +199,7 @@ int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len) { } } - /* Encrypt all but one of the remaining blocks. */ + // Encrypt all but one of the remaining blocks. while (in_len > AES_BLOCK_SIZE) { if (!EVP_Cipher(&ctx->cipher_ctx, scratch, in, AES_BLOCK_SIZE)) { return 0; @@ -223,8 +223,8 @@ int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len) { const uint8_t *mask = ctx->k1; if (ctx->block_used != AES_BLOCK_SIZE) { - /* If the last block is incomplete, terminate it with a single 'one' bit - * followed by zeros. */ + // If the last block is incomplete, terminate it with a single 'one' bit + // followed by zeros. ctx->block[ctx->block_used] = 0x80; OPENSSL_memset(ctx->block + ctx->block_used + 1, 0, AES_BLOCK_SIZE - (ctx->block_used + 1)); diff --git a/Sources/BoringSSL/crypto/conf/conf.c b/Sources/BoringSSL/crypto/conf/conf.c index 5b51d225b..b1982f82a 100644 --- a/Sources/BoringSSL/crypto/conf/conf.c +++ b/Sources/BoringSSL/crypto/conf/conf.c @@ -69,6 +69,10 @@ #include "../internal.h" +// The maximum length we can grow a value to after variable expansion. 64k +// should be more than enough for all reasonable uses. +#define MAX_CONF_VALUE_LENGTH 65536 + static uint32_t conf_value_hash(const CONF_VALUE *v) { return (lh_strhash(v->section) << 2) ^ lh_strhash(v->name); } @@ -259,7 +263,7 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) { } else if (IS_EOF(conf, *from)) { break; } else if (*from == '$') { - /* try to expand it */ + // try to expand it rrp = NULL; s = &(from[1]); if (*s == '{') { @@ -299,14 +303,14 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) { } e++; } - /* So at this point we have - * np which is the start of the name string which is - * '\0' terminated. - * cp which is the start of the section string which is - * '\0' terminated. - * e is the 'next point after'. - * r and rr are the chars replaced by the '\0' - * rp and rrp is where 'r' and 'rr' came from. */ + // So at this point we have + // np which is the start of the name string which is + // '\0' terminated. + // cp which is the start of the section string which is + // '\0' terminated. + // e is the 'next point after'. + // r and rr are the chars replaced by the '\0' + // rp and rrp is where 'r' and 'rr' came from. p = NCONF_get_string(conf, cp, np); if (rrp != NULL) { *rrp = rr; @@ -316,7 +320,15 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from) { OPENSSL_PUT_ERROR(CONF, CONF_R_VARIABLE_HAS_NO_VALUE); goto err; } - BUF_MEM_grow_clean(buf, (strlen(p) + buf->length - (e - from))); + size_t newsize = strlen(p) + buf->length - (e - from); + if (newsize > MAX_CONF_VALUE_LENGTH) { + OPENSSL_PUT_ERROR(CONF, CONF_R_VARIABLE_EXPANSION_TOO_LONG); + goto err; + } + if (!BUF_MEM_grow_clean(buf, newsize)) { + OPENSSL_PUT_ERROR(CONF, ERR_R_MALLOC_FAILURE); + goto err; + } while (*p) { buf->data[to++] = *(p++); } @@ -554,25 +566,25 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) { i--; } } - /* we removed some trailing stuff so there is a new - * line on the end. */ + // we removed some trailing stuff so there is a new + // line on the end. if (ii && i == ii) { - again = 1; /* long line */ + again = 1; // long line } else { p[i] = '\0'; - eline++; /* another input line */ + eline++; // another input line } - /* we now have a line with trailing \r\n removed */ + // we now have a line with trailing \r\n removed - /* i is the number of bytes */ + // i is the number of bytes bufnum += i; v = NULL; - /* check for line continuation */ + // check for line continuation if (bufnum >= 1) { - /* If we have bytes and the last char '\\' and - * second last char is not '\\' */ + // If we have bytes and the last char '\\' and + // second last char is not '\\' p = &(buff->data[bufnum - 1]); if (IS_ESC(conf, p[0]) && ((bufnum <= 1) || !IS_ESC(conf, p[-1]))) { bufnum--; @@ -588,7 +600,7 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) { clear_comments(conf, buf); s = eat_ws(conf, buf); if (IS_EOF(conf, *s)) { - continue; /* blank line */ + continue; // blank line } if (*s == '[') { char *ss; @@ -779,13 +791,13 @@ int CONF_parse_list(const char *list, char sep, int remove_whitespace, } } -int CONF_modules_load_file(CONF_MUST_BE_NULL *filename, const char *appname, +int CONF_modules_load_file(const char *filename, const char *appname, unsigned long flags) { return 1; } void CONF_modules_free(void) {} -void OPENSSL_config(CONF_MUST_BE_NULL *config_name) {} +void OPENSSL_config(const char *config_name) {} void OPENSSL_no_config(void) {} diff --git a/Sources/BoringSSL/crypto/conf/internal.h b/Sources/BoringSSL/crypto/conf/internal.h index 03d1a8f39..3e0e57dfe 100644 --- a/Sources/BoringSSL/crypto/conf/internal.h +++ b/Sources/BoringSSL/crypto/conf/internal.h @@ -20,12 +20,12 @@ extern "C" { #endif -/* CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. */ +// CONF_VALUE_new returns a freshly allocated and zeroed |CONF_VALUE|. CONF_VALUE *CONF_VALUE_new(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H */ +#endif // OPENSSL_HEADER_CRYPTO_CONF_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/cpu-aarch64-linux.c b/Sources/BoringSSL/crypto/cpu-aarch64-linux.c index 1b0f39552..f9fa6c5c5 100644 --- a/Sources/BoringSSL/crypto/cpu-aarch64-linux.c +++ b/Sources/BoringSSL/crypto/cpu-aarch64-linux.c @@ -28,8 +28,8 @@ extern uint32_t OPENSSL_armcap_P; void OPENSSL_cpuid_setup(void) { unsigned long hwcap = getauxval(AT_HWCAP); - /* See /usr/include/asm/hwcap.h on an aarch64 installation for the source of - * these values. */ + // See /usr/include/asm/hwcap.h on an aarch64 installation for the source of + // these values. static const unsigned long kNEON = 1 << 1; static const unsigned long kAES = 1 << 3; static const unsigned long kPMULL = 1 << 4; @@ -37,8 +37,8 @@ void OPENSSL_cpuid_setup(void) { static const unsigned long kSHA256 = 1 << 6; if ((hwcap & kNEON) == 0) { - /* Matching OpenSSL, if NEON is missing, don't report other features - * either. */ + // Matching OpenSSL, if NEON is missing, don't report other features + // either. return; } @@ -58,4 +58,4 @@ void OPENSSL_cpuid_setup(void) { } } -#endif /* OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP */ +#endif // OPENSSL_AARCH64 && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/BoringSSL/crypto/cpu-arm-linux.c b/Sources/BoringSSL/crypto/cpu-arm-linux.c index 95bb5ee36..839b632b0 100644 --- a/Sources/BoringSSL/crypto/cpu-arm-linux.c +++ b/Sources/BoringSSL/crypto/cpu-arm-linux.c @@ -34,15 +34,15 @@ #define HWCAP_NEON (1 << 12) -/* See /usr/include/asm/hwcap.h on an ARM installation for the source of - * these values. */ +// See /usr/include/asm/hwcap.h on an ARM installation for the source of +// these values. #define HWCAP2_AES (1 << 0) #define HWCAP2_PMULL (1 << 1) #define HWCAP2_SHA1 (1 << 2) #define HWCAP2_SHA2 (1 << 3) -/* |getauxval| is not available on Android until API level 20. Link it as a weak - * symbol and use other methods as fallback. */ +// |getauxval| is not available on Android until API level 20. Link it as a weak +// symbol and use other methods as fallback. unsigned long getauxval(unsigned long type) __attribute__((weak)); static int open_eintr(const char *path, int flags) { @@ -61,8 +61,8 @@ static ssize_t read_eintr(int fd, void *out, size_t len) { return ret; } -/* read_full reads exactly |len| bytes from |fd| to |out|. On error or end of - * file, it returns zero. */ +// read_full reads exactly |len| bytes from |fd| to |out|. On error or end of +// file, it returns zero. static int read_full(int fd, void *out, size_t len) { char *outp = out; while (len > 0) { @@ -76,9 +76,9 @@ static int read_full(int fd, void *out, size_t len) { return 1; } -/* read_file opens |path| and reads until end-of-file. On success, it returns - * one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the - * contents. Otherwise, it returns zero. */ +// read_file opens |path| and reads until end-of-file. On success, it returns +// one and sets |*out_ptr| and |*out_len| to a newly-allocated buffer with the +// contents. Otherwise, it returns zero. static int read_file(char **out_ptr, size_t *out_len, const char *path) { int fd = open_eintr(path, O_RDONLY); if (fd < 0) { @@ -128,7 +128,7 @@ static int read_file(char **out_ptr, size_t *out_len, const char *path) { return ret; } -/* getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv. */ +// getauxval_proc behaves like |getauxval| but reads from /proc/self/auxv. static unsigned long getauxval_proc(unsigned long type) { int fd = open_eintr("/proc/self/auxv", O_RDONLY); if (fd < 0) { @@ -164,16 +164,16 @@ static int STRING_PIECE_equals(const STRING_PIECE *a, const char *b) { return a->len == b_len && OPENSSL_memcmp(a->data, b, b_len) == 0; } -/* STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found, - * sets |*out_left| and |*out_right| to |in| split before and after it. It - * returns one if |sep| was found and zero otherwise. */ +// STRING_PIECE_split finds the first occurence of |sep| in |in| and, if found, +// sets |*out_left| and |*out_right| to |in| split before and after it. It +// returns one if |sep| was found and zero otherwise. static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right, const STRING_PIECE *in, char sep) { const char *p = OPENSSL_memchr(in->data, sep, in->len); if (p == NULL) { return 0; } - /* |out_left| or |out_right| may alias |in|, so make a copy. */ + // |out_left| or |out_right| may alias |in|, so make a copy. STRING_PIECE in_copy = *in; out_left->data = in_copy.data; out_left->len = p - in_copy.data; @@ -182,7 +182,7 @@ static int STRING_PIECE_split(STRING_PIECE *out_left, STRING_PIECE *out_right, return 1; } -/* STRING_PIECE_trim removes leading and trailing whitespace from |s|. */ +// STRING_PIECE_trim removes leading and trailing whitespace from |s|. static void STRING_PIECE_trim(STRING_PIECE *s) { while (s->len != 0 && (s->data[0] == ' ' || s->data[0] == '\t')) { s->data++; @@ -194,12 +194,12 @@ static void STRING_PIECE_trim(STRING_PIECE *s) { } } -/* extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from - * |in|. If found, it sets |*out| to the value and returns one. Otherwise, it - * returns zero. */ +// extract_cpuinfo_field extracts a /proc/cpuinfo field named |field| from +// |in|. If found, it sets |*out| to the value and returns one. Otherwise, it +// returns zero. static int extract_cpuinfo_field(STRING_PIECE *out, const STRING_PIECE *in, const char *field) { - /* Process |in| one line at a time. */ + // Process |in| one line at a time. STRING_PIECE remaining = *in, line; while (STRING_PIECE_split(&line, &remaining, &remaining, '\n')) { STRING_PIECE key, value; @@ -224,8 +224,8 @@ static int cpuinfo_field_equals(const STRING_PIECE *cpuinfo, const char *field, STRING_PIECE_equals(&extracted, value); } -/* has_list_item treats |list| as a space-separated list of items and returns - * one if |item| is contained in |list| and zero otherwise. */ +// has_list_item treats |list| as a space-separated list of items and returns +// one if |item| is contained in |list| and zero otherwise. static int has_list_item(const STRING_PIECE *list, const char *item) { STRING_PIECE remaining = *list, feature; while (STRING_PIECE_split(&feature, &remaining, &remaining, ' ')) { @@ -238,11 +238,11 @@ static int has_list_item(const STRING_PIECE *list, const char *item) { static unsigned long get_hwcap_cpuinfo(const STRING_PIECE *cpuinfo) { if (cpuinfo_field_equals(cpuinfo, "CPU architecture", "8")) { - /* This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always - * available on ARMv8. Linux omits required features, so reading the - * "Features" line does not work. (For simplicity, use strict equality. We - * assume everything running on future ARM architectures will have a - * working |getauxval|.) */ + // This is a 32-bit ARM binary running on a 64-bit kernel. NEON is always + // available on ARMv8. Linux omits required features, so reading the + // "Features" line does not work. (For simplicity, use strict equality. We + // assume everything running on future ARM architectures will have a + // working |getauxval|.) return HWCAP_NEON; } @@ -276,8 +276,8 @@ static unsigned long get_hwcap2_cpuinfo(const STRING_PIECE *cpuinfo) { return ret; } -/* has_broken_neon returns one if |in| matches a CPU known to have a broken - * NEON unit. See https://crbug.com/341598. */ +// has_broken_neon returns one if |in| matches a CPU known to have a broken +// NEON unit. See https://crbug.com/341598. static int has_broken_neon(const STRING_PIECE *cpuinfo) { return cpuinfo_field_equals(cpuinfo, "CPU implementer", "0x51") && cpuinfo_field_equals(cpuinfo, "CPU architecture", "7") && @@ -288,7 +288,7 @@ static int has_broken_neon(const STRING_PIECE *cpuinfo) { extern uint32_t OPENSSL_armcap_P; -static int g_has_broken_neon; +static int g_has_broken_neon, g_needs_hwcap2_workaround; void OPENSSL_cpuid_setup(void) { char *cpuinfo_data; @@ -300,13 +300,13 @@ void OPENSSL_cpuid_setup(void) { cpuinfo.data = cpuinfo_data; cpuinfo.len = cpuinfo_len; - /* |getauxval| is not available on Android until API level 20. If it is - * unavailable, read from /proc/self/auxv as a fallback. This is unreadable - * on some versions of Android, so further fall back to /proc/cpuinfo. - * - * See - * https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2 - * and b/13679666 (Google-internal) for details. */ + // |getauxval| is not available on Android until API level 20. If it is + // unavailable, read from /proc/self/auxv as a fallback. This is unreadable + // on some versions of Android, so further fall back to /proc/cpuinfo. + // + // See + // https://android.googlesource.com/platform/ndk/+/882ac8f3392858991a0e1af33b4b7387ec856bd2 + // and b/13679666 (Google-internal) for details. unsigned long hwcap = 0; if (getauxval != NULL) { hwcap = getauxval(AT_HWCAP); @@ -318,24 +318,25 @@ void OPENSSL_cpuid_setup(void) { hwcap = get_hwcap_cpuinfo(&cpuinfo); } - /* Clear NEON support if known broken. */ + // Clear NEON support if known broken. g_has_broken_neon = has_broken_neon(&cpuinfo); if (g_has_broken_neon) { hwcap &= ~HWCAP_NEON; } - /* Matching OpenSSL, only report other features if NEON is present. */ + // Matching OpenSSL, only report other features if NEON is present. if (hwcap & HWCAP_NEON) { OPENSSL_armcap_P |= ARMV7_NEON; - /* Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to - * /proc/cpuinfo. See https://crbug.com/596156. */ + // Some ARMv8 Android devices don't expose AT_HWCAP2. Fall back to + // /proc/cpuinfo. See https://crbug.com/596156. unsigned long hwcap2 = 0; if (getauxval != NULL) { hwcap2 = getauxval(AT_HWCAP2); } if (hwcap2 == 0) { hwcap2 = get_hwcap2_cpuinfo(&cpuinfo); + g_needs_hwcap2_workaround = hwcap2 != 0; } if (hwcap2 & HWCAP2_AES) { @@ -357,4 +358,6 @@ void OPENSSL_cpuid_setup(void) { int CRYPTO_has_broken_NEON(void) { return g_has_broken_neon; } -#endif /* OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP */ +int CRYPTO_needs_hwcap2_workaround(void) { return g_needs_hwcap2_workaround; } + +#endif // OPENSSL_ARM && !OPENSSL_STATIC_ARMCAP diff --git a/Sources/BoringSSL/crypto/cpu-intel.c b/Sources/BoringSSL/crypto/cpu-intel.c index f2e0c4cbc..1ac280c86 100644 --- a/Sources/BoringSSL/crypto/cpu-intel.c +++ b/Sources/BoringSSL/crypto/cpu-intel.c @@ -68,7 +68,7 @@ #include #include -#if defined(OPENSSL_WINDOWS) +#if defined(_MSC_VER) OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include #include @@ -78,12 +78,12 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #include "internal.h" -/* OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX - * is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through - * |*out_edx|. */ +// OPENSSL_cpuid runs the cpuid instruction. |leaf| is passed in as EAX and ECX +// is set to zero. It writes EAX, EBX, ECX, and EDX to |*out_eax| through +// |*out_edx|. static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, uint32_t *out_ecx, uint32_t *out_edx, uint32_t leaf) { -#if defined(OPENSSL_WINDOWS) +#if defined(_MSC_VER) int tmp[4]; __cpuid(tmp, (int)leaf); *out_eax = (uint32_t)tmp[0]; @@ -91,8 +91,8 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, *out_ecx = (uint32_t)tmp[2]; *out_edx = (uint32_t)tmp[3]; #elif defined(__pic__) && defined(OPENSSL_32_BIT) - /* Inline assembly may not clobber the PIC register. For 32-bit, this is EBX. - * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. */ + // Inline assembly may not clobber the PIC register. For 32-bit, this is EBX. + // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47602. __asm__ volatile ( "xor %%ecx, %%ecx\n" "mov %%ebx, %%edi\n" @@ -111,10 +111,10 @@ static void OPENSSL_cpuid(uint32_t *out_eax, uint32_t *out_ebx, #endif } -/* OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR). - * Currently only XCR0 is defined by Intel so |xcr| should always be zero. */ +// OPENSSL_xgetbv returns the value of an Intel Extended Control Register (XCR). +// Currently only XCR0 is defined by Intel so |xcr| should always be zero. static uint64_t OPENSSL_xgetbv(uint32_t xcr) { -#if defined(OPENSSL_WINDOWS) +#if defined(_MSC_VER) return (uint64_t)_xgetbv(xcr); #else uint32_t eax, edx; @@ -123,8 +123,8 @@ static uint64_t OPENSSL_xgetbv(uint32_t xcr) { #endif } -/* handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| - * and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. */ +// handle_cpu_env applies the value from |in| to the CPUID values in |out[0]| +// and |out[1]|. See the comment in |OPENSSL_cpuid_setup| about this. static void handle_cpu_env(uint32_t *out, const char *in) { const int invert = in[0] == '~'; uint64_t v; @@ -143,7 +143,7 @@ static void handle_cpu_env(uint32_t *out, const char *in) { } void OPENSSL_cpuid_setup(void) { - /* Determine the vendor and maximum input value. */ + // Determine the vendor and maximum input value. uint32_t eax, ebx, ecx, edx; OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0); @@ -158,13 +158,13 @@ void OPENSSL_cpuid_setup(void) { int has_amd_xop = 0; if (is_amd) { - /* AMD-specific logic. - * See http://developer.amd.com/wordpress/media/2012/10/254811.pdf */ + // AMD-specific logic. + // See http://developer.amd.com/wordpress/media/2012/10/254811.pdf OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0x80000000); uint32_t num_extended_ids = eax; if (num_extended_ids >= 0x80000001) { OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 0x80000001); - if (ecx & (1 << 11)) { + if (ecx & (1u << 11)) { has_amd_xop = 1; } } @@ -176,60 +176,85 @@ void OPENSSL_cpuid_setup(void) { extended_features = ebx; } - /* Determine the number of cores sharing an L1 data cache to adjust the - * hyper-threading bit. */ + // Determine the number of cores sharing an L1 data cache to adjust the + // hyper-threading bit. uint32_t cores_per_cache = 0; if (is_amd) { - /* AMD CPUs never share an L1 data cache between threads but do set the HTT - * bit on multi-core CPUs. */ + // AMD CPUs never share an L1 data cache between threads but do set the HTT + // bit on multi-core CPUs. cores_per_cache = 1; } else if (num_ids >= 4) { - /* TODO(davidben): The Intel manual says this CPUID leaf enumerates all - * caches using ECX and doesn't say which is first. Does this matter? */ + // TODO(davidben): The Intel manual says this CPUID leaf enumerates all + // caches using ECX and doesn't say which is first. Does this matter? OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 4); cores_per_cache = 1 + ((eax >> 14) & 0xfff); } OPENSSL_cpuid(&eax, &ebx, &ecx, &edx, 1); - /* Adjust the hyper-threading bit. */ - if (edx & (1 << 28)) { + // Adjust the hyper-threading bit. + if (edx & (1u << 28)) { uint32_t num_logical_cores = (ebx >> 16) & 0xff; if (cores_per_cache == 1 || num_logical_cores <= 1) { - edx &= ~(1 << 28); + edx &= ~(1u << 28); } } - /* Reserved bit #20 was historically repurposed to control the in-memory - * representation of RC4 state. Always set it to zero. */ - edx &= ~(1 << 20); + // Reserved bit #20 was historically repurposed to control the in-memory + // representation of RC4 state. Always set it to zero. + edx &= ~(1u << 20); - /* Reserved bit #30 is repurposed to signal an Intel CPU. */ + // Reserved bit #30 is repurposed to signal an Intel CPU. if (is_intel) { - edx |= (1 << 30); + edx |= (1u << 30); + + // Clear the XSAVE bit on Knights Landing to mimic Silvermont. This enables + // some Silvermont-specific codepaths which perform better. See OpenSSL + // commit 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. + if ((eax & 0x0fff0ff0) == 0x00050670 /* Knights Landing */ || + (eax & 0x0fff0ff0) == 0x00080650 /* Knights Mill (per SDE) */) { + ecx &= ~(1u << 26); + } } else { - edx &= ~(1 << 30); + edx &= ~(1u << 30); } - /* The SDBG bit is repurposed to denote AMD XOP support. */ + // The SDBG bit is repurposed to denote AMD XOP support. if (has_amd_xop) { - ecx |= (1 << 11); + ecx |= (1u << 11); } else { - ecx &= ~(1 << 11); + ecx &= ~(1u << 11); } uint64_t xcr0 = 0; - if (ecx & (1 << 27)) { - /* XCR0 may only be queried if the OSXSAVE bit is set. */ + if (ecx & (1u << 27)) { + // XCR0 may only be queried if the OSXSAVE bit is set. xcr0 = OPENSSL_xgetbv(0); } - /* See Intel manual, section 14.3. */ + // See Intel manual, volume 1, section 14.3. if ((xcr0 & 6) != 6) { - /* YMM registers cannot be used. */ - ecx &= ~(1 << 28); /* AVX */ - ecx &= ~(1 << 12); /* FMA */ - ecx &= ~(1 << 11); /* AMD XOP */ - extended_features &= ~(1 << 5); /* AVX2 */ + // YMM registers cannot be used. + ecx &= ~(1u << 28); // AVX + ecx &= ~(1u << 12); // FMA + ecx &= ~(1u << 11); // AMD XOP + // Clear AVX2 and AVX512* bits. + // + // TODO(davidben): Should bits 17 and 26-28 also be cleared? Upstream + // doesn't clear those. + extended_features &= + ~((1u << 5) | (1u << 16) | (1u << 21) | (1u << 30) | (1u << 31)); + } + // See Intel manual, volume 1, section 15.2. + if ((xcr0 & 0xe6) != 0xe6) { + // Clear AVX512F. Note we don't touch other AVX512 extensions because they + // can be used with YMM. + extended_features &= ~(1u << 16); + } + + // Disable ADX instructions on Knights Landing. See OpenSSL commit + // 64d92d74985ebb3d0be58a9718f9e080a14a8e7f. + if ((ecx & (1u << 26)) == 0) { + extended_features &= ~(1u << 19); } OPENSSL_ia32cap_P[0] = edx; @@ -243,15 +268,15 @@ void OPENSSL_cpuid_setup(void) { return; } - /* OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'. - * Each value is a 64-bit, unsigned value which may start with "0x" to - * indicate a hex value. Prior to the 64-bit value, a '~' may be given. - * - * If '~' isn't present, then the value is taken as the result of the CPUID. - * Otherwise the value is inverted and ANDed with the probed CPUID result. - * - * The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2] - * and [3]. */ + // OPENSSL_ia32cap can contain zero, one or two values, separated with a ':'. + // Each value is a 64-bit, unsigned value which may start with "0x" to + // indicate a hex value. Prior to the 64-bit value, a '~' may be given. + // + // If '~' isn't present, then the value is taken as the result of the CPUID. + // Otherwise the value is inverted and ANDed with the probed CPUID result. + // + // The first value determines OPENSSL_ia32cap_P[0] and [1]. The second [2] + // and [3]. handle_cpu_env(&OPENSSL_ia32cap_P[0], env1); env2 = strchr(env1, ':'); @@ -260,4 +285,4 @@ void OPENSSL_cpuid_setup(void) { } } -#endif /* !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) */ +#endif // !OPENSSL_NO_ASM && (OPENSSL_X86 || OPENSSL_X86_64) diff --git a/Sources/BoringSSL/crypto/cpu-ppc64le.c b/Sources/BoringSSL/crypto/cpu-ppc64le.c index c431c818f..6cc8aee52 100644 --- a/Sources/BoringSSL/crypto/cpu-ppc64le.c +++ b/Sources/BoringSSL/crypto/cpu-ppc64le.c @@ -22,19 +22,17 @@ #if !defined(PPC_FEATURE2_HAS_VCRYPTO) -/* PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER - * ABI for Linux Supplement”. */ +// PPC_FEATURE2_HAS_VCRYPTO was taken from section 4.1.2.3 of the “OpenPOWER +// ABI for Linux Supplement”. #define PPC_FEATURE2_HAS_VCRYPTO 0x02000000 #endif -static unsigned long g_ppc64le_hwcap2 = 0; - void OPENSSL_cpuid_setup(void) { - g_ppc64le_hwcap2 = getauxval(AT_HWCAP2); + OPENSSL_ppc64le_hwcap2 = getauxval(AT_HWCAP2); } int CRYPTO_is_PPC64LE_vcrypto_capable(void) { - return (g_ppc64le_hwcap2 & PPC_FEATURE2_HAS_VCRYPTO) != 0; + return (OPENSSL_ppc64le_hwcap2 & PPC_FEATURE2_HAS_VCRYPTO) != 0; } -#endif /* OPENSSL_PPC64LE */ +#endif // OPENSSL_PPC64LE diff --git a/Sources/BoringSSL/crypto/crypto.c b/Sources/BoringSSL/crypto/crypto.c index c32f5144d..9f4639f23 100644 --- a/Sources/BoringSSL/crypto/crypto.c +++ b/Sources/BoringSSL/crypto/crypto.c @@ -23,14 +23,14 @@ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \ defined(OPENSSL_PPC64LE)) -/* x86, x86_64, the ARMs and ppc64le need to record the result of a - * cpuid/getauxval call for the asm to work correctly, unless compiled without - * asm code. */ +// x86, x86_64, the ARMs and ppc64le need to record the result of a +// cpuid/getauxval call for the asm to work correctly, unless compiled without +// asm code. #define NEED_CPUID #else -/* Otherwise, don't emit a static initialiser. */ +// Otherwise, don't emit a static initialiser. #if !defined(BORINGSSL_NO_STATIC_INITIALIZER) #define BORINGSSL_NO_STATIC_INITIALIZER @@ -40,53 +40,76 @@ OPENSSL_ARM || OPENSSL_AARCH64) */ -/* The capability variables are defined in this file in order to work around a - * linker bug. When linking with a .a, if no symbols in a .o are referenced - * then the .o is discarded, even if it has constructor functions. - * - * This still means that any binaries that don't include some functionality - * that tests the capability values will still skip the constructor but, so - * far, the init constructor function only sets the capability variables. */ +// Our assembly does not use the GOT to reference symbols, which means +// references to visible symbols will often require a TEXTREL. This is +// undesirable, so all assembly-referenced symbols should be hidden. CPU +// capabilities are the only such symbols defined in C. Explicitly hide them, +// rather than rely on being built with -fvisibility=hidden. +#if defined(OPENSSL_WINDOWS) +#define HIDDEN +#else +#define HIDDEN __attribute__((visibility("hidden"))) +#endif + + +// The capability variables are defined in this file in order to work around a +// linker bug. When linking with a .a, if no symbols in a .o are referenced +// then the .o is discarded, even if it has constructor functions. +// +// This still means that any binaries that don't include some functionality +// that tests the capability values will still skip the constructor but, so +// far, the init constructor function only sets the capability variables. #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* This value must be explicitly initialised to zero in order to work around a - * bug in libtool or the linker on OS X. - * - * If not initialised then it becomes a "common symbol". When put into an - * archive, linking on OS X will fail to resolve common symbols. By - * initialising it to zero, it becomes a "data symbol", which isn't so - * affected. */ -uint32_t OPENSSL_ia32cap_P[4] = {0}; + +// This value must be explicitly initialised to zero in order to work around a +// bug in libtool or the linker on OS X. +// +// If not initialised then it becomes a "common symbol". When put into an +// archive, linking on OS X will fail to resolve common symbols. By +// initialising it to zero, it becomes a "data symbol", which isn't so +// affected. +HIDDEN uint32_t OPENSSL_ia32cap_P[4] = {0}; + +#elif defined(OPENSSL_PPC64LE) + +HIDDEN unsigned long OPENSSL_ppc64le_hwcap2 = 0; + #elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #include #if defined(OPENSSL_STATIC_ARMCAP) -uint32_t OPENSSL_armcap_P = +HIDDEN uint32_t OPENSSL_armcap_P = #if defined(OPENSSL_STATIC_ARMCAP_NEON) || defined(__ARM_NEON__) ARMV7_NEON | #endif -#if defined(OPENSSL_STATIC_ARMCAP_AES) +#if defined(OPENSSL_STATIC_ARMCAP_AES) || defined(__ARM_FEATURE_CRYPTO) ARMV8_AES | #endif -#if defined(OPENSSL_STATIC_ARMCAP_SHA1) +#if defined(OPENSSL_STATIC_ARMCAP_SHA1) || defined(__ARM_FEATURE_CRYPTO) ARMV8_SHA1 | #endif -#if defined(OPENSSL_STATIC_ARMCAP_SHA256) +#if defined(OPENSSL_STATIC_ARMCAP_SHA256) || defined(__ARM_FEATURE_CRYPTO) ARMV8_SHA256 | #endif -#if defined(OPENSSL_STATIC_ARMCAP_PMULL) +#if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_CRYPTO) ARMV8_PMULL | #endif 0; #else -uint32_t OPENSSL_armcap_P = 0; +HIDDEN uint32_t OPENSSL_armcap_P = 0; #endif #endif +#if defined(BORINGSSL_FIPS) +// In FIPS mode, the power-on self-test function calls |CRYPTO_library_init| +// because we have to ensure that CPUID detection occurs first. +#define BORINGSSL_NO_STATIC_INITIALIZER +#endif #if defined(OPENSSL_WINDOWS) && !defined(BORINGSSL_NO_STATIC_INITIALIZER) #define OPENSSL_CDECL __cdecl @@ -96,7 +119,7 @@ uint32_t OPENSSL_armcap_P = 0; #if defined(BORINGSSL_NO_STATIC_INITIALIZER) static CRYPTO_once_t once = CRYPTO_ONCE_INIT; -#elif defined(OPENSSL_WINDOWS) +#elif defined(_MSC_VER) #pragma section(".CRT$XCU", read) static void __cdecl do_library_init(void); __declspec(allocate(".CRT$XCU")) void(*library_init_constructor)(void) = @@ -105,21 +128,21 @@ __declspec(allocate(".CRT$XCU")) void(*library_init_constructor)(void) = static void do_library_init(void) __attribute__ ((constructor)); #endif -/* do_library_init is the actual initialization function. If - * BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static - * initializer. Otherwise, it is called by CRYPTO_library_init. */ +// do_library_init is the actual initialization function. If +// BORINGSSL_NO_STATIC_INITIALIZER isn't defined, this is set as a static +// initializer. Otherwise, it is called by CRYPTO_library_init. static void OPENSSL_CDECL do_library_init(void) { - /* WARNING: this function may only configure the capability variables. See the - * note above about the linker bug. */ + // WARNING: this function may only configure the capability variables. See the + // note above about the linker bug. #if defined(NEED_CPUID) OPENSSL_cpuid_setup(); #endif } void CRYPTO_library_init(void) { - /* TODO(davidben): It would be tidier if this build knob could be replaced - * with an internal lazy-init mechanism that would handle things correctly - * in-library. https://crbug.com/542879 */ + // TODO(davidben): It would be tidier if this build knob could be replaced + // with an internal lazy-init mechanism that would handle things correctly + // in-library. https://crbug.com/542879 #if defined(BORINGSSL_NO_STATIC_INITIALIZER) CRYPTO_once(&once, do_library_init); #endif @@ -145,10 +168,18 @@ const char *SSLeay_version(int unused) { return "BoringSSL"; } +const char *OpenSSL_version(int unused) { + return "BoringSSL"; +} + unsigned long SSLeay(void) { return OPENSSL_VERSION_NUMBER; } +unsigned long OpenSSL_version_num(void) { + return OPENSSL_VERSION_NUMBER; +} + int CRYPTO_malloc_init(void) { return 1; } @@ -161,4 +192,7 @@ int ENGINE_register_all_complete(void) { void OPENSSL_load_builtin_modules(void) {} -int FIPS_mode(void) { return 0; } +int OPENSSL_init_crypto(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) { + CRYPTO_library_init(); + return 1; +} diff --git a/Sources/BoringSSL/crypto/curve25519/curve25519.c b/Sources/BoringSSL/crypto/curve25519/curve25519.c deleted file mode 100644 index c91e78eaa..000000000 --- a/Sources/BoringSSL/crypto/curve25519/curve25519.c +++ /dev/null @@ -1,4938 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP - * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as - * public domain but this file has the ISC license just to keep licencing - * simple. - * - * The field functions are shared by Ed25519 and X25519 where possible. */ - -#include - -#include - -#include -#include -#include -#include - -#include "internal.h" -#include "../internal.h" - - -static const int64_t kBottom25Bits = INT64_C(0x1ffffff); -static const int64_t kBottom26Bits = INT64_C(0x3ffffff); -static const int64_t kTop39Bits = INT64_C(0xfffffffffe000000); -static const int64_t kTop38Bits = INT64_C(0xfffffffffc000000); - -static uint64_t load_3(const uint8_t *in) { - uint64_t result; - result = (uint64_t)in[0]; - result |= ((uint64_t)in[1]) << 8; - result |= ((uint64_t)in[2]) << 16; - return result; -} - -static uint64_t load_4(const uint8_t *in) { - uint64_t result; - result = (uint64_t)in[0]; - result |= ((uint64_t)in[1]) << 8; - result |= ((uint64_t)in[2]) << 16; - result |= ((uint64_t)in[3]) << 24; - return result; -} - -static void fe_frombytes(fe h, const uint8_t *s) { - /* Ignores top bit of h. */ - int64_t h0 = load_4(s); - int64_t h1 = load_3(s + 4) << 6; - int64_t h2 = load_3(s + 7) << 5; - int64_t h3 = load_3(s + 10) << 3; - int64_t h4 = load_3(s + 13) << 2; - int64_t h5 = load_4(s + 16); - int64_t h6 = load_3(s + 20) << 7; - int64_t h7 = load_3(s + 23) << 5; - int64_t h8 = load_3(s + 26) << 4; - int64_t h9 = (load_3(s + 29) & 8388607) << 2; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - - carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; - carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; - carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - - h[0] = h0; - h[1] = h1; - h[2] = h2; - h[3] = h3; - h[4] = h4; - h[5] = h5; - h[6] = h6; - h[7] = h7; - h[8] = h8; - h[9] = h9; -} - -/* Preconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. - * - * Write p=2^255-19; q=floor(h/p). - * Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). - * - * Proof: - * Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. - * Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4. - * - * Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). - * Then 0> 25; - q = (h0 + q) >> 26; - q = (h1 + q) >> 25; - q = (h2 + q) >> 26; - q = (h3 + q) >> 25; - q = (h4 + q) >> 26; - q = (h5 + q) >> 25; - q = (h6 + q) >> 26; - q = (h7 + q) >> 25; - q = (h8 + q) >> 26; - q = (h9 + q) >> 25; - - /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */ - h0 += 19 * q; - /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */ - - h1 += h0 >> 26; h0 &= kBottom26Bits; - h2 += h1 >> 25; h1 &= kBottom25Bits; - h3 += h2 >> 26; h2 &= kBottom26Bits; - h4 += h3 >> 25; h3 &= kBottom25Bits; - h5 += h4 >> 26; h4 &= kBottom26Bits; - h6 += h5 >> 25; h5 &= kBottom25Bits; - h7 += h6 >> 26; h6 &= kBottom26Bits; - h8 += h7 >> 25; h7 &= kBottom25Bits; - h9 += h8 >> 26; h8 &= kBottom26Bits; - h9 &= kBottom25Bits; - /* h10 = carry9 */ - - /* Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - * Have h0+...+2^230 h9 between 0 and 2^255-1; - * evidently 2^255 h10-2^255 q = 0. - * Goal: Output h0+...+2^230 h9. */ - - s[0] = h0 >> 0; - s[1] = h0 >> 8; - s[2] = h0 >> 16; - s[3] = (h0 >> 24) | ((uint32_t)(h1) << 2); - s[4] = h1 >> 6; - s[5] = h1 >> 14; - s[6] = (h1 >> 22) | ((uint32_t)(h2) << 3); - s[7] = h2 >> 5; - s[8] = h2 >> 13; - s[9] = (h2 >> 21) | ((uint32_t)(h3) << 5); - s[10] = h3 >> 3; - s[11] = h3 >> 11; - s[12] = (h3 >> 19) | ((uint32_t)(h4) << 6); - s[13] = h4 >> 2; - s[14] = h4 >> 10; - s[15] = h4 >> 18; - s[16] = h5 >> 0; - s[17] = h5 >> 8; - s[18] = h5 >> 16; - s[19] = (h5 >> 24) | ((uint32_t)(h6) << 1); - s[20] = h6 >> 7; - s[21] = h6 >> 15; - s[22] = (h6 >> 23) | ((uint32_t)(h7) << 3); - s[23] = h7 >> 5; - s[24] = h7 >> 13; - s[25] = (h7 >> 21) | ((uint32_t)(h8) << 4); - s[26] = h8 >> 4; - s[27] = h8 >> 12; - s[28] = (h8 >> 20) | ((uint32_t)(h9) << 6); - s[29] = h9 >> 2; - s[30] = h9 >> 10; - s[31] = h9 >> 18; -} - -/* h = f */ -static void fe_copy(fe h, const fe f) { - OPENSSL_memmove(h, f, sizeof(int32_t) * 10); -} - -/* h = 0 */ -static void fe_0(fe h) { OPENSSL_memset(h, 0, sizeof(int32_t) * 10); } - -/* h = 1 */ -static void fe_1(fe h) { - OPENSSL_memset(h, 0, sizeof(int32_t) * 10); - h[0] = 1; -} - -/* h = f + g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static void fe_add(fe h, const fe f, const fe g) { - unsigned i; - for (i = 0; i < 10; i++) { - h[i] = f[i] + g[i]; - } -} - -/* h = f - g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static void fe_sub(fe h, const fe f, const fe g) { - unsigned i; - for (i = 0; i < 10; i++) { - h[i] = f[i] - g[i]; - } -} - -/* h = f * g - * Can overlap h with f or g. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * Notes on implementation strategy: - * - * Using schoolbook multiplication. - * Karatsuba would save a little in some cost models. - * - * Most multiplications by 2 and 19 are 32-bit precomputations; - * cheaper than 64-bit postcomputations. - * - * There is one remaining multiplication by 19 in the carry chain; - * one *19 precomputation can be merged into this, - * but the resulting data flow is considerably less clean. - * - * There are 12 carries below. - * 10 of them are 2-way parallelizable and vectorizable. - * Can get away with 11 carries, but then data flow is much deeper. - * - * With tighter constraints on inputs can squeeze carries into int32. */ -static void fe_mul(fe h, const fe f, const fe g) { - int32_t f0 = f[0]; - int32_t f1 = f[1]; - int32_t f2 = f[2]; - int32_t f3 = f[3]; - int32_t f4 = f[4]; - int32_t f5 = f[5]; - int32_t f6 = f[6]; - int32_t f7 = f[7]; - int32_t f8 = f[8]; - int32_t f9 = f[9]; - int32_t g0 = g[0]; - int32_t g1 = g[1]; - int32_t g2 = g[2]; - int32_t g3 = g[3]; - int32_t g4 = g[4]; - int32_t g5 = g[5]; - int32_t g6 = g[6]; - int32_t g7 = g[7]; - int32_t g8 = g[8]; - int32_t g9 = g[9]; - int32_t g1_19 = 19 * g1; /* 1.959375*2^29 */ - int32_t g2_19 = 19 * g2; /* 1.959375*2^30; still ok */ - int32_t g3_19 = 19 * g3; - int32_t g4_19 = 19 * g4; - int32_t g5_19 = 19 * g5; - int32_t g6_19 = 19 * g6; - int32_t g7_19 = 19 * g7; - int32_t g8_19 = 19 * g8; - int32_t g9_19 = 19 * g9; - int32_t f1_2 = 2 * f1; - int32_t f3_2 = 2 * f3; - int32_t f5_2 = 2 * f5; - int32_t f7_2 = 2 * f7; - int32_t f9_2 = 2 * f9; - int64_t f0g0 = f0 * (int64_t) g0; - int64_t f0g1 = f0 * (int64_t) g1; - int64_t f0g2 = f0 * (int64_t) g2; - int64_t f0g3 = f0 * (int64_t) g3; - int64_t f0g4 = f0 * (int64_t) g4; - int64_t f0g5 = f0 * (int64_t) g5; - int64_t f0g6 = f0 * (int64_t) g6; - int64_t f0g7 = f0 * (int64_t) g7; - int64_t f0g8 = f0 * (int64_t) g8; - int64_t f0g9 = f0 * (int64_t) g9; - int64_t f1g0 = f1 * (int64_t) g0; - int64_t f1g1_2 = f1_2 * (int64_t) g1; - int64_t f1g2 = f1 * (int64_t) g2; - int64_t f1g3_2 = f1_2 * (int64_t) g3; - int64_t f1g4 = f1 * (int64_t) g4; - int64_t f1g5_2 = f1_2 * (int64_t) g5; - int64_t f1g6 = f1 * (int64_t) g6; - int64_t f1g7_2 = f1_2 * (int64_t) g7; - int64_t f1g8 = f1 * (int64_t) g8; - int64_t f1g9_38 = f1_2 * (int64_t) g9_19; - int64_t f2g0 = f2 * (int64_t) g0; - int64_t f2g1 = f2 * (int64_t) g1; - int64_t f2g2 = f2 * (int64_t) g2; - int64_t f2g3 = f2 * (int64_t) g3; - int64_t f2g4 = f2 * (int64_t) g4; - int64_t f2g5 = f2 * (int64_t) g5; - int64_t f2g6 = f2 * (int64_t) g6; - int64_t f2g7 = f2 * (int64_t) g7; - int64_t f2g8_19 = f2 * (int64_t) g8_19; - int64_t f2g9_19 = f2 * (int64_t) g9_19; - int64_t f3g0 = f3 * (int64_t) g0; - int64_t f3g1_2 = f3_2 * (int64_t) g1; - int64_t f3g2 = f3 * (int64_t) g2; - int64_t f3g3_2 = f3_2 * (int64_t) g3; - int64_t f3g4 = f3 * (int64_t) g4; - int64_t f3g5_2 = f3_2 * (int64_t) g5; - int64_t f3g6 = f3 * (int64_t) g6; - int64_t f3g7_38 = f3_2 * (int64_t) g7_19; - int64_t f3g8_19 = f3 * (int64_t) g8_19; - int64_t f3g9_38 = f3_2 * (int64_t) g9_19; - int64_t f4g0 = f4 * (int64_t) g0; - int64_t f4g1 = f4 * (int64_t) g1; - int64_t f4g2 = f4 * (int64_t) g2; - int64_t f4g3 = f4 * (int64_t) g3; - int64_t f4g4 = f4 * (int64_t) g4; - int64_t f4g5 = f4 * (int64_t) g5; - int64_t f4g6_19 = f4 * (int64_t) g6_19; - int64_t f4g7_19 = f4 * (int64_t) g7_19; - int64_t f4g8_19 = f4 * (int64_t) g8_19; - int64_t f4g9_19 = f4 * (int64_t) g9_19; - int64_t f5g0 = f5 * (int64_t) g0; - int64_t f5g1_2 = f5_2 * (int64_t) g1; - int64_t f5g2 = f5 * (int64_t) g2; - int64_t f5g3_2 = f5_2 * (int64_t) g3; - int64_t f5g4 = f5 * (int64_t) g4; - int64_t f5g5_38 = f5_2 * (int64_t) g5_19; - int64_t f5g6_19 = f5 * (int64_t) g6_19; - int64_t f5g7_38 = f5_2 * (int64_t) g7_19; - int64_t f5g8_19 = f5 * (int64_t) g8_19; - int64_t f5g9_38 = f5_2 * (int64_t) g9_19; - int64_t f6g0 = f6 * (int64_t) g0; - int64_t f6g1 = f6 * (int64_t) g1; - int64_t f6g2 = f6 * (int64_t) g2; - int64_t f6g3 = f6 * (int64_t) g3; - int64_t f6g4_19 = f6 * (int64_t) g4_19; - int64_t f6g5_19 = f6 * (int64_t) g5_19; - int64_t f6g6_19 = f6 * (int64_t) g6_19; - int64_t f6g7_19 = f6 * (int64_t) g7_19; - int64_t f6g8_19 = f6 * (int64_t) g8_19; - int64_t f6g9_19 = f6 * (int64_t) g9_19; - int64_t f7g0 = f7 * (int64_t) g0; - int64_t f7g1_2 = f7_2 * (int64_t) g1; - int64_t f7g2 = f7 * (int64_t) g2; - int64_t f7g3_38 = f7_2 * (int64_t) g3_19; - int64_t f7g4_19 = f7 * (int64_t) g4_19; - int64_t f7g5_38 = f7_2 * (int64_t) g5_19; - int64_t f7g6_19 = f7 * (int64_t) g6_19; - int64_t f7g7_38 = f7_2 * (int64_t) g7_19; - int64_t f7g8_19 = f7 * (int64_t) g8_19; - int64_t f7g9_38 = f7_2 * (int64_t) g9_19; - int64_t f8g0 = f8 * (int64_t) g0; - int64_t f8g1 = f8 * (int64_t) g1; - int64_t f8g2_19 = f8 * (int64_t) g2_19; - int64_t f8g3_19 = f8 * (int64_t) g3_19; - int64_t f8g4_19 = f8 * (int64_t) g4_19; - int64_t f8g5_19 = f8 * (int64_t) g5_19; - int64_t f8g6_19 = f8 * (int64_t) g6_19; - int64_t f8g7_19 = f8 * (int64_t) g7_19; - int64_t f8g8_19 = f8 * (int64_t) g8_19; - int64_t f8g9_19 = f8 * (int64_t) g9_19; - int64_t f9g0 = f9 * (int64_t) g0; - int64_t f9g1_38 = f9_2 * (int64_t) g1_19; - int64_t f9g2_19 = f9 * (int64_t) g2_19; - int64_t f9g3_38 = f9_2 * (int64_t) g3_19; - int64_t f9g4_19 = f9 * (int64_t) g4_19; - int64_t f9g5_38 = f9_2 * (int64_t) g5_19; - int64_t f9g6_19 = f9 * (int64_t) g6_19; - int64_t f9g7_38 = f9_2 * (int64_t) g7_19; - int64_t f9g8_19 = f9 * (int64_t) g8_19; - int64_t f9g9_38 = f9_2 * (int64_t) g9_19; - int64_t h0 = f0g0+f1g9_38+f2g8_19+f3g7_38+f4g6_19+f5g5_38+f6g4_19+f7g3_38+f8g2_19+f9g1_38; - int64_t h1 = f0g1+f1g0 +f2g9_19+f3g8_19+f4g7_19+f5g6_19+f6g5_19+f7g4_19+f8g3_19+f9g2_19; - int64_t h2 = f0g2+f1g1_2 +f2g0 +f3g9_38+f4g8_19+f5g7_38+f6g6_19+f7g5_38+f8g4_19+f9g3_38; - int64_t h3 = f0g3+f1g2 +f2g1 +f3g0 +f4g9_19+f5g8_19+f6g7_19+f7g6_19+f8g5_19+f9g4_19; - int64_t h4 = f0g4+f1g3_2 +f2g2 +f3g1_2 +f4g0 +f5g9_38+f6g8_19+f7g7_38+f8g6_19+f9g5_38; - int64_t h5 = f0g5+f1g4 +f2g3 +f3g2 +f4g1 +f5g0 +f6g9_19+f7g8_19+f8g7_19+f9g6_19; - int64_t h6 = f0g6+f1g5_2 +f2g4 +f3g3_2 +f4g2 +f5g1_2 +f6g0 +f7g9_38+f8g8_19+f9g7_38; - int64_t h7 = f0g7+f1g6 +f2g5 +f3g4 +f4g3 +f5g2 +f6g1 +f7g0 +f8g9_19+f9g8_19; - int64_t h8 = f0g8+f1g7_2 +f2g6 +f3g5_2 +f4g4 +f5g3_2 +f6g2 +f7g1_2 +f8g0 +f9g9_38; - int64_t h9 = f0g9+f1g8 +f2g7 +f3g6 +f4g5 +f5g4 +f6g3 +f7g2 +f8g1 +f9g0 ; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - - /* |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38)) - * i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8 - * |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19)) - * i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 */ - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.71*2^59 */ - /* |h5| <= 1.71*2^59 */ - - carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; - carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.41*2^60 */ - /* |h6| <= 1.41*2^60 */ - - carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; - carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.71*2^59 */ - /* |h7| <= 1.71*2^59 */ - - carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; - carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.72*2^34 */ - /* |h8| <= 1.41*2^60 */ - - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.71*2^59 */ - - carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.1*2^39 */ - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = h0; - h[1] = h1; - h[2] = h2; - h[3] = h3; - h[4] = h4; - h[5] = h5; - h[6] = h6; - h[7] = h7; - h[8] = h8; - h[9] = h9; -} - -/* h = f * f - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * See fe_mul.c for discussion of implementation strategy. */ -static void fe_sq(fe h, const fe f) { - int32_t f0 = f[0]; - int32_t f1 = f[1]; - int32_t f2 = f[2]; - int32_t f3 = f[3]; - int32_t f4 = f[4]; - int32_t f5 = f[5]; - int32_t f6 = f[6]; - int32_t f7 = f[7]; - int32_t f8 = f[8]; - int32_t f9 = f[9]; - int32_t f0_2 = 2 * f0; - int32_t f1_2 = 2 * f1; - int32_t f2_2 = 2 * f2; - int32_t f3_2 = 2 * f3; - int32_t f4_2 = 2 * f4; - int32_t f5_2 = 2 * f5; - int32_t f6_2 = 2 * f6; - int32_t f7_2 = 2 * f7; - int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */ - int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */ - int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */ - int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */ - int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */ - int64_t f0f0 = f0 * (int64_t) f0; - int64_t f0f1_2 = f0_2 * (int64_t) f1; - int64_t f0f2_2 = f0_2 * (int64_t) f2; - int64_t f0f3_2 = f0_2 * (int64_t) f3; - int64_t f0f4_2 = f0_2 * (int64_t) f4; - int64_t f0f5_2 = f0_2 * (int64_t) f5; - int64_t f0f6_2 = f0_2 * (int64_t) f6; - int64_t f0f7_2 = f0_2 * (int64_t) f7; - int64_t f0f8_2 = f0_2 * (int64_t) f8; - int64_t f0f9_2 = f0_2 * (int64_t) f9; - int64_t f1f1_2 = f1_2 * (int64_t) f1; - int64_t f1f2_2 = f1_2 * (int64_t) f2; - int64_t f1f3_4 = f1_2 * (int64_t) f3_2; - int64_t f1f4_2 = f1_2 * (int64_t) f4; - int64_t f1f5_4 = f1_2 * (int64_t) f5_2; - int64_t f1f6_2 = f1_2 * (int64_t) f6; - int64_t f1f7_4 = f1_2 * (int64_t) f7_2; - int64_t f1f8_2 = f1_2 * (int64_t) f8; - int64_t f1f9_76 = f1_2 * (int64_t) f9_38; - int64_t f2f2 = f2 * (int64_t) f2; - int64_t f2f3_2 = f2_2 * (int64_t) f3; - int64_t f2f4_2 = f2_2 * (int64_t) f4; - int64_t f2f5_2 = f2_2 * (int64_t) f5; - int64_t f2f6_2 = f2_2 * (int64_t) f6; - int64_t f2f7_2 = f2_2 * (int64_t) f7; - int64_t f2f8_38 = f2_2 * (int64_t) f8_19; - int64_t f2f9_38 = f2 * (int64_t) f9_38; - int64_t f3f3_2 = f3_2 * (int64_t) f3; - int64_t f3f4_2 = f3_2 * (int64_t) f4; - int64_t f3f5_4 = f3_2 * (int64_t) f5_2; - int64_t f3f6_2 = f3_2 * (int64_t) f6; - int64_t f3f7_76 = f3_2 * (int64_t) f7_38; - int64_t f3f8_38 = f3_2 * (int64_t) f8_19; - int64_t f3f9_76 = f3_2 * (int64_t) f9_38; - int64_t f4f4 = f4 * (int64_t) f4; - int64_t f4f5_2 = f4_2 * (int64_t) f5; - int64_t f4f6_38 = f4_2 * (int64_t) f6_19; - int64_t f4f7_38 = f4 * (int64_t) f7_38; - int64_t f4f8_38 = f4_2 * (int64_t) f8_19; - int64_t f4f9_38 = f4 * (int64_t) f9_38; - int64_t f5f5_38 = f5 * (int64_t) f5_38; - int64_t f5f6_38 = f5_2 * (int64_t) f6_19; - int64_t f5f7_76 = f5_2 * (int64_t) f7_38; - int64_t f5f8_38 = f5_2 * (int64_t) f8_19; - int64_t f5f9_76 = f5_2 * (int64_t) f9_38; - int64_t f6f6_19 = f6 * (int64_t) f6_19; - int64_t f6f7_38 = f6 * (int64_t) f7_38; - int64_t f6f8_38 = f6_2 * (int64_t) f8_19; - int64_t f6f9_38 = f6 * (int64_t) f9_38; - int64_t f7f7_38 = f7 * (int64_t) f7_38; - int64_t f7f8_38 = f7_2 * (int64_t) f8_19; - int64_t f7f9_76 = f7_2 * (int64_t) f9_38; - int64_t f8f8_19 = f8 * (int64_t) f8_19; - int64_t f8f9_38 = f8 * (int64_t) f9_38; - int64_t f9f9_38 = f9 * (int64_t) f9_38; - int64_t h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38; - int64_t h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38; - int64_t h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19; - int64_t h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38; - int64_t h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38; - int64_t h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38; - int64_t h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19; - int64_t h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38; - int64_t h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38; - int64_t h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - - carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; - carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - - carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; - carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - - carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; - carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - - carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - - h[0] = h0; - h[1] = h1; - h[2] = h2; - h[3] = h3; - h[4] = h4; - h[5] = h5; - h[6] = h6; - h[7] = h7; - h[8] = h8; - h[9] = h9; -} - -static void fe_invert(fe out, const fe z) { - fe t0; - fe t1; - fe t2; - fe t3; - int i; - - fe_sq(t0, z); - fe_sq(t1, t0); - for (i = 1; i < 2; ++i) { - fe_sq(t1, t1); - } - fe_mul(t1, z, t1); - fe_mul(t0, t0, t1); - fe_sq(t2, t0); - fe_mul(t1, t1, t2); - fe_sq(t2, t1); - for (i = 1; i < 5; ++i) { - fe_sq(t2, t2); - } - fe_mul(t1, t2, t1); - fe_sq(t2, t1); - for (i = 1; i < 10; ++i) { - fe_sq(t2, t2); - } - fe_mul(t2, t2, t1); - fe_sq(t3, t2); - for (i = 1; i < 20; ++i) { - fe_sq(t3, t3); - } - fe_mul(t2, t3, t2); - fe_sq(t2, t2); - for (i = 1; i < 10; ++i) { - fe_sq(t2, t2); - } - fe_mul(t1, t2, t1); - fe_sq(t2, t1); - for (i = 1; i < 50; ++i) { - fe_sq(t2, t2); - } - fe_mul(t2, t2, t1); - fe_sq(t3, t2); - for (i = 1; i < 100; ++i) { - fe_sq(t3, t3); - } - fe_mul(t2, t3, t2); - fe_sq(t2, t2); - for (i = 1; i < 50; ++i) { - fe_sq(t2, t2); - } - fe_mul(t1, t2, t1); - fe_sq(t1, t1); - for (i = 1; i < 5; ++i) { - fe_sq(t1, t1); - } - fe_mul(out, t1, t0); -} - -/* h = -f - * - * Preconditions: - * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */ -static void fe_neg(fe h, const fe f) { - unsigned i; - for (i = 0; i < 10; i++) { - h[i] = -f[i]; - } -} - -/* Replace (f,g) with (g,g) if b == 1; - * replace (f,g) with (f,g) if b == 0. - * - * Preconditions: b in {0,1}. */ -static void fe_cmov(fe f, const fe g, unsigned b) { - b = 0-b; - unsigned i; - for (i = 0; i < 10; i++) { - int32_t x = f[i] ^ g[i]; - x &= b; - f[i] ^= x; - } -} - -/* return 0 if f == 0 - * return 1 if f != 0 - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static int fe_isnonzero(const fe f) { - uint8_t s[32]; - fe_tobytes(s, f); - - static const uint8_t zero[32] = {0}; - return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0; -} - -/* return 1 if f is in {1,3,5,...,q-2} - * return 0 if f is in {0,2,4,...,q-1} - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static int fe_isnegative(const fe f) { - uint8_t s[32]; - fe_tobytes(s, f); - return s[0] & 1; -} - -/* h = 2 * f * f - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. - * - * See fe_mul.c for discussion of implementation strategy. */ -static void fe_sq2(fe h, const fe f) { - int32_t f0 = f[0]; - int32_t f1 = f[1]; - int32_t f2 = f[2]; - int32_t f3 = f[3]; - int32_t f4 = f[4]; - int32_t f5 = f[5]; - int32_t f6 = f[6]; - int32_t f7 = f[7]; - int32_t f8 = f[8]; - int32_t f9 = f[9]; - int32_t f0_2 = 2 * f0; - int32_t f1_2 = 2 * f1; - int32_t f2_2 = 2 * f2; - int32_t f3_2 = 2 * f3; - int32_t f4_2 = 2 * f4; - int32_t f5_2 = 2 * f5; - int32_t f6_2 = 2 * f6; - int32_t f7_2 = 2 * f7; - int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */ - int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */ - int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */ - int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */ - int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */ - int64_t f0f0 = f0 * (int64_t) f0; - int64_t f0f1_2 = f0_2 * (int64_t) f1; - int64_t f0f2_2 = f0_2 * (int64_t) f2; - int64_t f0f3_2 = f0_2 * (int64_t) f3; - int64_t f0f4_2 = f0_2 * (int64_t) f4; - int64_t f0f5_2 = f0_2 * (int64_t) f5; - int64_t f0f6_2 = f0_2 * (int64_t) f6; - int64_t f0f7_2 = f0_2 * (int64_t) f7; - int64_t f0f8_2 = f0_2 * (int64_t) f8; - int64_t f0f9_2 = f0_2 * (int64_t) f9; - int64_t f1f1_2 = f1_2 * (int64_t) f1; - int64_t f1f2_2 = f1_2 * (int64_t) f2; - int64_t f1f3_4 = f1_2 * (int64_t) f3_2; - int64_t f1f4_2 = f1_2 * (int64_t) f4; - int64_t f1f5_4 = f1_2 * (int64_t) f5_2; - int64_t f1f6_2 = f1_2 * (int64_t) f6; - int64_t f1f7_4 = f1_2 * (int64_t) f7_2; - int64_t f1f8_2 = f1_2 * (int64_t) f8; - int64_t f1f9_76 = f1_2 * (int64_t) f9_38; - int64_t f2f2 = f2 * (int64_t) f2; - int64_t f2f3_2 = f2_2 * (int64_t) f3; - int64_t f2f4_2 = f2_2 * (int64_t) f4; - int64_t f2f5_2 = f2_2 * (int64_t) f5; - int64_t f2f6_2 = f2_2 * (int64_t) f6; - int64_t f2f7_2 = f2_2 * (int64_t) f7; - int64_t f2f8_38 = f2_2 * (int64_t) f8_19; - int64_t f2f9_38 = f2 * (int64_t) f9_38; - int64_t f3f3_2 = f3_2 * (int64_t) f3; - int64_t f3f4_2 = f3_2 * (int64_t) f4; - int64_t f3f5_4 = f3_2 * (int64_t) f5_2; - int64_t f3f6_2 = f3_2 * (int64_t) f6; - int64_t f3f7_76 = f3_2 * (int64_t) f7_38; - int64_t f3f8_38 = f3_2 * (int64_t) f8_19; - int64_t f3f9_76 = f3_2 * (int64_t) f9_38; - int64_t f4f4 = f4 * (int64_t) f4; - int64_t f4f5_2 = f4_2 * (int64_t) f5; - int64_t f4f6_38 = f4_2 * (int64_t) f6_19; - int64_t f4f7_38 = f4 * (int64_t) f7_38; - int64_t f4f8_38 = f4_2 * (int64_t) f8_19; - int64_t f4f9_38 = f4 * (int64_t) f9_38; - int64_t f5f5_38 = f5 * (int64_t) f5_38; - int64_t f5f6_38 = f5_2 * (int64_t) f6_19; - int64_t f5f7_76 = f5_2 * (int64_t) f7_38; - int64_t f5f8_38 = f5_2 * (int64_t) f8_19; - int64_t f5f9_76 = f5_2 * (int64_t) f9_38; - int64_t f6f6_19 = f6 * (int64_t) f6_19; - int64_t f6f7_38 = f6 * (int64_t) f7_38; - int64_t f6f8_38 = f6_2 * (int64_t) f8_19; - int64_t f6f9_38 = f6 * (int64_t) f9_38; - int64_t f7f7_38 = f7 * (int64_t) f7_38; - int64_t f7f8_38 = f7_2 * (int64_t) f8_19; - int64_t f7f9_76 = f7_2 * (int64_t) f9_38; - int64_t f8f8_19 = f8 * (int64_t) f8_19; - int64_t f8f9_38 = f8 * (int64_t) f9_38; - int64_t f9f9_38 = f9 * (int64_t) f9_38; - int64_t h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38; - int64_t h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38; - int64_t h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19; - int64_t h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38; - int64_t h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38; - int64_t h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38; - int64_t h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19; - int64_t h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38; - int64_t h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38; - int64_t h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - - h0 += h0; - h1 += h1; - h2 += h2; - h3 += h3; - h4 += h4; - h5 += h5; - h6 += h6; - h7 += h7; - h8 += h8; - h9 += h9; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - - carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; - carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - - carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; - carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - - carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; - carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - - carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - - h[0] = h0; - h[1] = h1; - h[2] = h2; - h[3] = h3; - h[4] = h4; - h[5] = h5; - h[6] = h6; - h[7] = h7; - h[8] = h8; - h[9] = h9; -} - -static void fe_pow22523(fe out, const fe z) { - fe t0; - fe t1; - fe t2; - int i; - - fe_sq(t0, z); - fe_sq(t1, t0); - for (i = 1; i < 2; ++i) { - fe_sq(t1, t1); - } - fe_mul(t1, z, t1); - fe_mul(t0, t0, t1); - fe_sq(t0, t0); - fe_mul(t0, t1, t0); - fe_sq(t1, t0); - for (i = 1; i < 5; ++i) { - fe_sq(t1, t1); - } - fe_mul(t0, t1, t0); - fe_sq(t1, t0); - for (i = 1; i < 10; ++i) { - fe_sq(t1, t1); - } - fe_mul(t1, t1, t0); - fe_sq(t2, t1); - for (i = 1; i < 20; ++i) { - fe_sq(t2, t2); - } - fe_mul(t1, t2, t1); - fe_sq(t1, t1); - for (i = 1; i < 10; ++i) { - fe_sq(t1, t1); - } - fe_mul(t0, t1, t0); - fe_sq(t1, t0); - for (i = 1; i < 50; ++i) { - fe_sq(t1, t1); - } - fe_mul(t1, t1, t0); - fe_sq(t2, t1); - for (i = 1; i < 100; ++i) { - fe_sq(t2, t2); - } - fe_mul(t1, t2, t1); - fe_sq(t1, t1); - for (i = 1; i < 50; ++i) { - fe_sq(t1, t1); - } - fe_mul(t0, t1, t0); - fe_sq(t0, t0); - for (i = 1; i < 2; ++i) { - fe_sq(t0, t0); - } - fe_mul(out, t0, z); -} - -void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h) { - fe recip; - fe x; - fe y; - - fe_invert(recip, h->Z); - fe_mul(x, h->X, recip); - fe_mul(y, h->Y, recip); - fe_tobytes(s, y); - s[31] ^= fe_isnegative(x) << 7; -} - -static void ge_p3_tobytes(uint8_t *s, const ge_p3 *h) { - fe recip; - fe x; - fe y; - - fe_invert(recip, h->Z); - fe_mul(x, h->X, recip); - fe_mul(y, h->Y, recip); - fe_tobytes(s, y); - s[31] ^= fe_isnegative(x) << 7; -} - -static const fe d = {-10913610, 13857413, -15372611, 6949391, 114729, - -8787816, -6275908, -3247719, -18696448, -12055116}; - -static const fe sqrtm1 = {-32595792, -7943725, 9377950, 3500415, 12389472, - -272473, -25146209, -2005654, 326686, 11406482}; - -int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s) { - fe u; - fe v; - fe v3; - fe vxx; - fe check; - - fe_frombytes(h->Y, s); - fe_1(h->Z); - fe_sq(u, h->Y); - fe_mul(v, u, d); - fe_sub(u, u, h->Z); /* u = y^2-1 */ - fe_add(v, v, h->Z); /* v = dy^2+1 */ - - fe_sq(v3, v); - fe_mul(v3, v3, v); /* v3 = v^3 */ - fe_sq(h->X, v3); - fe_mul(h->X, h->X, v); - fe_mul(h->X, h->X, u); /* x = uv^7 */ - - fe_pow22523(h->X, h->X); /* x = (uv^7)^((q-5)/8) */ - fe_mul(h->X, h->X, v3); - fe_mul(h->X, h->X, u); /* x = uv^3(uv^7)^((q-5)/8) */ - - fe_sq(vxx, h->X); - fe_mul(vxx, vxx, v); - fe_sub(check, vxx, u); /* vx^2-u */ - if (fe_isnonzero(check)) { - fe_add(check, vxx, u); /* vx^2+u */ - if (fe_isnonzero(check)) { - return -1; - } - fe_mul(h->X, h->X, sqrtm1); - } - - if (fe_isnegative(h->X) != (s[31] >> 7)) { - fe_neg(h->X, h->X); - } - - fe_mul(h->T, h->X, h->Y); - return 0; -} - -static void ge_p2_0(ge_p2 *h) { - fe_0(h->X); - fe_1(h->Y); - fe_1(h->Z); -} - -static void ge_p3_0(ge_p3 *h) { - fe_0(h->X); - fe_1(h->Y); - fe_1(h->Z); - fe_0(h->T); -} - -static void ge_cached_0(ge_cached *h) { - fe_1(h->YplusX); - fe_1(h->YminusX); - fe_1(h->Z); - fe_0(h->T2d); -} - -static void ge_precomp_0(ge_precomp *h) { - fe_1(h->yplusx); - fe_1(h->yminusx); - fe_0(h->xy2d); -} - -/* r = p */ -static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) { - fe_copy(r->X, p->X); - fe_copy(r->Y, p->Y); - fe_copy(r->Z, p->Z); -} - -static const fe d2 = {-21827239, -5839606, -30745221, 13898782, 229458, - 15978800, -12551817, -6495438, 29715968, 9444199}; - -/* r = p */ -void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) { - fe_add(r->YplusX, p->Y, p->X); - fe_sub(r->YminusX, p->Y, p->X); - fe_copy(r->Z, p->Z); - fe_mul(r->T2d, p->T, d2); -} - -/* r = p */ -void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) { - fe_mul(r->X, p->X, p->T); - fe_mul(r->Y, p->Y, p->Z); - fe_mul(r->Z, p->Z, p->T); -} - -/* r = p */ -void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) { - fe_mul(r->X, p->X, p->T); - fe_mul(r->Y, p->Y, p->Z); - fe_mul(r->Z, p->Z, p->T); - fe_mul(r->T, p->X, p->Y); -} - -/* r = p */ -static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) { - ge_p3 t; - x25519_ge_p1p1_to_p3(&t, p); - x25519_ge_p3_to_cached(r, &t); -} - -/* r = 2 * p */ -static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) { - fe t0; - - fe_sq(r->X, p->X); - fe_sq(r->Z, p->Y); - fe_sq2(r->T, p->Z); - fe_add(r->Y, p->X, p->Y); - fe_sq(t0, r->Y); - fe_add(r->Y, r->Z, r->X); - fe_sub(r->Z, r->Z, r->X); - fe_sub(r->X, t0, r->Y); - fe_sub(r->T, r->T, r->Z); -} - -/* r = 2 * p */ -static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) { - ge_p2 q; - ge_p3_to_p2(&q, p); - ge_p2_dbl(r, &q); -} - -/* r = p + q */ -static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { - fe t0; - - fe_add(r->X, p->Y, p->X); - fe_sub(r->Y, p->Y, p->X); - fe_mul(r->Z, r->X, q->yplusx); - fe_mul(r->Y, r->Y, q->yminusx); - fe_mul(r->T, q->xy2d, p->T); - fe_add(t0, p->Z, p->Z); - fe_sub(r->X, r->Z, r->Y); - fe_add(r->Y, r->Z, r->Y); - fe_add(r->Z, t0, r->T); - fe_sub(r->T, t0, r->T); -} - -/* r = p - q */ -static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { - fe t0; - - fe_add(r->X, p->Y, p->X); - fe_sub(r->Y, p->Y, p->X); - fe_mul(r->Z, r->X, q->yminusx); - fe_mul(r->Y, r->Y, q->yplusx); - fe_mul(r->T, q->xy2d, p->T); - fe_add(t0, p->Z, p->Z); - fe_sub(r->X, r->Z, r->Y); - fe_add(r->Y, r->Z, r->Y); - fe_sub(r->Z, t0, r->T); - fe_add(r->T, t0, r->T); -} - -/* r = p + q */ -void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { - fe t0; - - fe_add(r->X, p->Y, p->X); - fe_sub(r->Y, p->Y, p->X); - fe_mul(r->Z, r->X, q->YplusX); - fe_mul(r->Y, r->Y, q->YminusX); - fe_mul(r->T, q->T2d, p->T); - fe_mul(r->X, p->Z, q->Z); - fe_add(t0, r->X, r->X); - fe_sub(r->X, r->Z, r->Y); - fe_add(r->Y, r->Z, r->Y); - fe_add(r->Z, t0, r->T); - fe_sub(r->T, t0, r->T); -} - -/* r = p - q */ -void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { - fe t0; - - fe_add(r->X, p->Y, p->X); - fe_sub(r->Y, p->Y, p->X); - fe_mul(r->Z, r->X, q->YminusX); - fe_mul(r->Y, r->Y, q->YplusX); - fe_mul(r->T, q->T2d, p->T); - fe_mul(r->X, p->Z, q->Z); - fe_add(t0, r->X, r->X); - fe_sub(r->X, r->Z, r->Y); - fe_add(r->Y, r->Z, r->Y); - fe_sub(r->Z, t0, r->T); - fe_add(r->T, t0, r->T); -} - -static uint8_t equal(signed char b, signed char c) { - uint8_t ub = b; - uint8_t uc = c; - uint8_t x = ub ^ uc; /* 0: yes; 1..255: no */ - uint32_t y = x; /* 0: yes; 1..255: no */ - y -= 1; /* 4294967295: yes; 0..254: no */ - y >>= 31; /* 1: yes; 0: no */ - return y; -} - -static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) { - fe_cmov(t->yplusx, u->yplusx, b); - fe_cmov(t->yminusx, u->yminusx, b); - fe_cmov(t->xy2d, u->xy2d, b); -} - -void x25519_ge_scalarmult_small_precomp( - ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) { - /* precomp_table is first expanded into matching |ge_precomp| - * elements. */ - ge_precomp multiples[15]; - - unsigned i; - for (i = 0; i < 15; i++) { - const uint8_t *bytes = &precomp_table[i*(2 * 32)]; - fe x, y; - fe_frombytes(x, bytes); - fe_frombytes(y, bytes + 32); - - ge_precomp *out = &multiples[i]; - fe_add(out->yplusx, y, x); - fe_sub(out->yminusx, y, x); - fe_mul(out->xy2d, x, y); - fe_mul(out->xy2d, out->xy2d, d2); - } - - /* See the comment above |k25519SmallPrecomp| about the structure of the - * precomputed elements. This loop does 64 additions and 64 doublings to - * calculate the result. */ - ge_p3_0(h); - - for (i = 63; i < 64; i--) { - unsigned j; - signed char index = 0; - - for (j = 0; j < 4; j++) { - const uint8_t bit = 1 & (a[(8 * j) + (i / 8)] >> (i & 7)); - index |= (bit << j); - } - - ge_precomp e; - ge_precomp_0(&e); - - for (j = 1; j < 16; j++) { - cmov(&e, &multiples[j-1], equal(index, j)); - } - - ge_cached cached; - ge_p1p1 r; - x25519_ge_p3_to_cached(&cached, h); - x25519_ge_add(&r, h, &cached); - x25519_ge_p1p1_to_p3(h, &r); - - ge_madd(&r, h, &e); - x25519_ge_p1p1_to_p3(h, &r); - } -} - -#if defined(OPENSSL_SMALL) - -/* This block of code replaces the standard base-point table with a much smaller - * one. The standard table is 30,720 bytes while this one is just 960. - * - * This table contains 15 pairs of group elements, (x, y), where each field - * element is serialised with |fe_tobytes|. If |i| is the index of the group - * element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀ - * is the most significant bit). The value of the group element is then: - * (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. */ -static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = { - 0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95, - 0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0, - 0xfe, 0x53, 0x6e, 0xcd, 0xd3, 0x36, 0x69, 0x21, 0x58, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x66, 0x66, 0x66, 0x66, 0x02, 0xa2, 0xed, 0xf4, 0x8f, 0x6b, 0x0b, 0x3e, - 0xeb, 0x35, 0x1a, 0xd5, 0x7e, 0xdb, 0x78, 0x00, 0x96, 0x8a, 0xa0, 0xb4, - 0xcf, 0x60, 0x4b, 0xd4, 0xd5, 0xf9, 0x2d, 0xbf, 0x88, 0xbd, 0x22, 0x62, - 0x13, 0x53, 0xe4, 0x82, 0x57, 0xfa, 0x1e, 0x8f, 0x06, 0x2b, 0x90, 0xba, - 0x08, 0xb6, 0x10, 0x54, 0x4f, 0x7c, 0x1b, 0x26, 0xed, 0xda, 0x6b, 0xdd, - 0x25, 0xd0, 0x4e, 0xea, 0x42, 0xbb, 0x25, 0x03, 0xa2, 0xfb, 0xcc, 0x61, - 0x67, 0x06, 0x70, 0x1a, 0xc4, 0x78, 0x3a, 0xff, 0x32, 0x62, 0xdd, 0x2c, - 0xab, 0x50, 0x19, 0x3b, 0xf2, 0x9b, 0x7d, 0xb8, 0xfd, 0x4f, 0x29, 0x9c, - 0xa7, 0x91, 0xba, 0x0e, 0x46, 0x5e, 0x51, 0xfe, 0x1d, 0xbf, 0xe5, 0xe5, - 0x9b, 0x95, 0x0d, 0x67, 0xf8, 0xd1, 0xb5, 0x5a, 0xa1, 0x93, 0x2c, 0xc3, - 0xde, 0x0e, 0x97, 0x85, 0x2d, 0x7f, 0xea, 0xab, 0x3e, 0x47, 0x30, 0x18, - 0x24, 0xe8, 0xb7, 0x60, 0xae, 0x47, 0x80, 0xfc, 0xe5, 0x23, 0xe7, 0xc2, - 0xc9, 0x85, 0xe6, 0x98, 0xa0, 0x29, 0x4e, 0xe1, 0x84, 0x39, 0x2d, 0x95, - 0x2c, 0xf3, 0x45, 0x3c, 0xff, 0xaf, 0x27, 0x4c, 0x6b, 0xa6, 0xf5, 0x4b, - 0x11, 0xbd, 0xba, 0x5b, 0x9e, 0xc4, 0xa4, 0x51, 0x1e, 0xbe, 0xd0, 0x90, - 0x3a, 0x9c, 0xc2, 0x26, 0xb6, 0x1e, 0xf1, 0x95, 0x7d, 0xc8, 0x6d, 0x52, - 0xe6, 0x99, 0x2c, 0x5f, 0x9a, 0x96, 0x0c, 0x68, 0x29, 0xfd, 0xe2, 0xfb, - 0xe6, 0xbc, 0xec, 0x31, 0x08, 0xec, 0xe6, 0xb0, 0x53, 0x60, 0xc3, 0x8c, - 0xbe, 0xc1, 0xb3, 0x8a, 0x8f, 0xe4, 0x88, 0x2b, 0x55, 0xe5, 0x64, 0x6e, - 0x9b, 0xd0, 0xaf, 0x7b, 0x64, 0x2a, 0x35, 0x25, 0x10, 0x52, 0xc5, 0x9e, - 0x58, 0x11, 0x39, 0x36, 0x45, 0x51, 0xb8, 0x39, 0x93, 0xfc, 0x9d, 0x6a, - 0xbe, 0x58, 0xcb, 0xa4, 0x0f, 0x51, 0x3c, 0x38, 0x05, 0xca, 0xab, 0x43, - 0x63, 0x0e, 0xf3, 0x8b, 0x41, 0xa6, 0xf8, 0x9b, 0x53, 0x70, 0x80, 0x53, - 0x86, 0x5e, 0x8f, 0xe3, 0xc3, 0x0d, 0x18, 0xc8, 0x4b, 0x34, 0x1f, 0xd8, - 0x1d, 0xbc, 0xf2, 0x6d, 0x34, 0x3a, 0xbe, 0xdf, 0xd9, 0xf6, 0xf3, 0x89, - 0xa1, 0xe1, 0x94, 0x9f, 0x5d, 0x4c, 0x5d, 0xe9, 0xa1, 0x49, 0x92, 0xef, - 0x0e, 0x53, 0x81, 0x89, 0x58, 0x87, 0xa6, 0x37, 0xf1, 0xdd, 0x62, 0x60, - 0x63, 0x5a, 0x9d, 0x1b, 0x8c, 0xc6, 0x7d, 0x52, 0xea, 0x70, 0x09, 0x6a, - 0xe1, 0x32, 0xf3, 0x73, 0x21, 0x1f, 0x07, 0x7b, 0x7c, 0x9b, 0x49, 0xd8, - 0xc0, 0xf3, 0x25, 0x72, 0x6f, 0x9d, 0xed, 0x31, 0x67, 0x36, 0x36, 0x54, - 0x40, 0x92, 0x71, 0xe6, 0x11, 0x28, 0x11, 0xad, 0x93, 0x32, 0x85, 0x7b, - 0x3e, 0xb7, 0x3b, 0x49, 0x13, 0x1c, 0x07, 0xb0, 0x2e, 0x93, 0xaa, 0xfd, - 0xfd, 0x28, 0x47, 0x3d, 0x8d, 0xd2, 0xda, 0xc7, 0x44, 0xd6, 0x7a, 0xdb, - 0x26, 0x7d, 0x1d, 0xb8, 0xe1, 0xde, 0x9d, 0x7a, 0x7d, 0x17, 0x7e, 0x1c, - 0x37, 0x04, 0x8d, 0x2d, 0x7c, 0x5e, 0x18, 0x38, 0x1e, 0xaf, 0xc7, 0x1b, - 0x33, 0x48, 0x31, 0x00, 0x59, 0xf6, 0xf2, 0xca, 0x0f, 0x27, 0x1b, 0x63, - 0x12, 0x7e, 0x02, 0x1d, 0x49, 0xc0, 0x5d, 0x79, 0x87, 0xef, 0x5e, 0x7a, - 0x2f, 0x1f, 0x66, 0x55, 0xd8, 0x09, 0xd9, 0x61, 0x38, 0x68, 0xb0, 0x07, - 0xa3, 0xfc, 0xcc, 0x85, 0x10, 0x7f, 0x4c, 0x65, 0x65, 0xb3, 0xfa, 0xfa, - 0xa5, 0x53, 0x6f, 0xdb, 0x74, 0x4c, 0x56, 0x46, 0x03, 0xe2, 0xd5, 0x7a, - 0x29, 0x1c, 0xc6, 0x02, 0xbc, 0x59, 0xf2, 0x04, 0x75, 0x63, 0xc0, 0x84, - 0x2f, 0x60, 0x1c, 0x67, 0x76, 0xfd, 0x63, 0x86, 0xf3, 0xfa, 0xbf, 0xdc, - 0xd2, 0x2d, 0x90, 0x91, 0xbd, 0x33, 0xa9, 0xe5, 0x66, 0x0c, 0xda, 0x42, - 0x27, 0xca, 0xf4, 0x66, 0xc2, 0xec, 0x92, 0x14, 0x57, 0x06, 0x63, 0xd0, - 0x4d, 0x15, 0x06, 0xeb, 0x69, 0x58, 0x4f, 0x77, 0xc5, 0x8b, 0xc7, 0xf0, - 0x8e, 0xed, 0x64, 0xa0, 0xb3, 0x3c, 0x66, 0x71, 0xc6, 0x2d, 0xda, 0x0a, - 0x0d, 0xfe, 0x70, 0x27, 0x64, 0xf8, 0x27, 0xfa, 0xf6, 0x5f, 0x30, 0xa5, - 0x0d, 0x6c, 0xda, 0xf2, 0x62, 0x5e, 0x78, 0x47, 0xd3, 0x66, 0x00, 0x1c, - 0xfd, 0x56, 0x1f, 0x5d, 0x3f, 0x6f, 0xf4, 0x4c, 0xd8, 0xfd, 0x0e, 0x27, - 0xc9, 0x5c, 0x2b, 0xbc, 0xc0, 0xa4, 0xe7, 0x23, 0x29, 0x02, 0x9f, 0x31, - 0xd6, 0xe9, 0xd7, 0x96, 0xf4, 0xe0, 0x5e, 0x0b, 0x0e, 0x13, 0xee, 0x3c, - 0x09, 0xed, 0xf2, 0x3d, 0x76, 0x91, 0xc3, 0xa4, 0x97, 0xae, 0xd4, 0x87, - 0xd0, 0x5d, 0xf6, 0x18, 0x47, 0x1f, 0x1d, 0x67, 0xf2, 0xcf, 0x63, 0xa0, - 0x91, 0x27, 0xf8, 0x93, 0x45, 0x75, 0x23, 0x3f, 0xd1, 0xf1, 0xad, 0x23, - 0xdd, 0x64, 0x93, 0x96, 0x41, 0x70, 0x7f, 0xf7, 0xf5, 0xa9, 0x89, 0xa2, - 0x34, 0xb0, 0x8d, 0x1b, 0xae, 0x19, 0x15, 0x49, 0x58, 0x23, 0x6d, 0x87, - 0x15, 0x4f, 0x81, 0x76, 0xfb, 0x23, 0xb5, 0xea, 0xcf, 0xac, 0x54, 0x8d, - 0x4e, 0x42, 0x2f, 0xeb, 0x0f, 0x63, 0xdb, 0x68, 0x37, 0xa8, 0xcf, 0x8b, - 0xab, 0xf5, 0xa4, 0x6e, 0x96, 0x2a, 0xb2, 0xd6, 0xbe, 0x9e, 0xbd, 0x0d, - 0xb4, 0x42, 0xa9, 0xcf, 0x01, 0x83, 0x8a, 0x17, 0x47, 0x76, 0xc4, 0xc6, - 0x83, 0x04, 0x95, 0x0b, 0xfc, 0x11, 0xc9, 0x62, 0xb8, 0x0c, 0x76, 0x84, - 0xd9, 0xb9, 0x37, 0xfa, 0xfc, 0x7c, 0xc2, 0x6d, 0x58, 0x3e, 0xb3, 0x04, - 0xbb, 0x8c, 0x8f, 0x48, 0xbc, 0x91, 0x27, 0xcc, 0xf9, 0xb7, 0x22, 0x19, - 0x83, 0x2e, 0x09, 0xb5, 0x72, 0xd9, 0x54, 0x1c, 0x4d, 0xa1, 0xea, 0x0b, - 0xf1, 0xc6, 0x08, 0x72, 0x46, 0x87, 0x7a, 0x6e, 0x80, 0x56, 0x0a, 0x8a, - 0xc0, 0xdd, 0x11, 0x6b, 0xd6, 0xdd, 0x47, 0xdf, 0x10, 0xd9, 0xd8, 0xea, - 0x7c, 0xb0, 0x8f, 0x03, 0x00, 0x2e, 0xc1, 0x8f, 0x44, 0xa8, 0xd3, 0x30, - 0x06, 0x89, 0xa2, 0xf9, 0x34, 0xad, 0xdc, 0x03, 0x85, 0xed, 0x51, 0xa7, - 0x82, 0x9c, 0xe7, 0x5d, 0x52, 0x93, 0x0c, 0x32, 0x9a, 0x5b, 0xe1, 0xaa, - 0xca, 0xb8, 0x02, 0x6d, 0x3a, 0xd4, 0xb1, 0x3a, 0xf0, 0x5f, 0xbe, 0xb5, - 0x0d, 0x10, 0x6b, 0x38, 0x32, 0xac, 0x76, 0x80, 0xbd, 0xca, 0x94, 0x71, - 0x7a, 0xf2, 0xc9, 0x35, 0x2a, 0xde, 0x9f, 0x42, 0x49, 0x18, 0x01, 0xab, - 0xbc, 0xef, 0x7c, 0x64, 0x3f, 0x58, 0x3d, 0x92, 0x59, 0xdb, 0x13, 0xdb, - 0x58, 0x6e, 0x0a, 0xe0, 0xb7, 0x91, 0x4a, 0x08, 0x20, 0xd6, 0x2e, 0x3c, - 0x45, 0xc9, 0x8b, 0x17, 0x79, 0xe7, 0xc7, 0x90, 0x99, 0x3a, 0x18, 0x25, -}; - -void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) { - x25519_ge_scalarmult_small_precomp(h, a, k25519SmallPrecomp); -} - -#else - -/* k25519Precomp[i][j] = (j+1)*256^i*B */ -static const ge_precomp k25519Precomp[32][8] = { - { - { - {25967493, -14356035, 29566456, 3660896, -12694345, 4014787, - 27544626, -11754271, -6079156, 2047605}, - {-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, - 5043384, 19500929, -15469378}, - {-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, - 29287919, 11864899, -24514362, -4438546}, - }, - { - {-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, - -11717903, -3814571, -358445, -10211303}, - {-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, - -15616551, 11189268, -26829678, -5319081}, - {26966642, 11152617, 32442495, 15396054, 14353839, -12752335, - -3128826, -9541118, -15472047, -4166697}, - }, - { - {15636291, -9688557, 24204773, -7912398, 616977, -16685262, - 27787600, -14772189, 28944400, -1550024}, - {16568933, 4717097, -11556148, -1102322, 15682896, -11807043, - 16354577, -11775962, 7689662, 11199574}, - {30464156, -5976125, -11779434, -15670865, 23220365, 15915852, - 7512774, 10017326, -17749093, -9920357}, - }, - { - {-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, - -28926210, 15006023, 3284568, -6276540}, - {23599295, -8306047, -11193664, -7687416, 13236774, 10506355, - 7464579, 9656445, 13059162, 10374397}, - {7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, - -3839045, -641708, -101325}, - }, - { - {10861363, 11473154, 27284546, 1981175, -30064349, 12577861, - 32867885, 14515107, -15438304, 10819380}, - {4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, - 12483688, -12668491, 5581306}, - {19563160, 16186464, -29386857, 4097519, 10237984, -4348115, - 28542350, 13850243, -23678021, -15815942}, - }, - { - {-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, - -19188627, -15224819, -9818940, -12085777}, - {-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, - -15689887, 1762328, 14866737}, - {-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, - -28236412, 3959421, 27914454, 4383652}, - }, - { - {5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, - 5230134, -23952439, -15175766}, - {-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, - 20654025, 16520125, 30598449, 7715701}, - {28881845, 14381568, 9657904, 3680757, -20181635, 7843316, - -31400660, 1370708, 29794553, -1409300}, - }, - { - {14499471, -2729599, -33191113, -4254652, 28494862, 14271267, - 30290735, 10876454, -33154098, 2381726}, - {-7195431, -2655363, -14730155, 462251, -27724326, 3941372, - -6236617, 3696005, -32300832, 15351955}, - {27431194, 8222322, 16448760, -3907995, -18707002, 11938355, - -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - {-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, - -2378284, -1627556, 10092783, -4764171}, - {27939166, 14210322, 4677035, 16277044, -22964462, -12398139, - -32508754, 12005538, -17810127, 12803510}, - {17228999, -15661624, -1233527, 300140, -1224870, -11714777, - 30364213, -9038194, 18016357, 4397660}, - }, - { - {-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, - -26619106, 14544525, -17477504, 982639}, - {29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, - -4120128, -21047696, 9934963}, - {5793303, 16271923, -24131614, -10116404, 29188560, 1206517, - -14747930, 4559895, -30123922, -10897950}, - }, - { - {-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, - 24191034, 4541697, -13338309, 5500568}, - {12650548, -1497113, 9052871, 11355358, -17680037, -8400164, - -17430592, 12264343, 10874051, 13524335}, - {25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, - 5080568, -22528059, 5376628}, - }, - { - {-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, - -22321305, -9447443, 4535768, 1569007}, - {-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, - -30494562, 3044290, 31848280, 12543772}, - {-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, - -27377195, -2062731, 7718482, 14474653}, - }, - { - {2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, - -7236665, 24316168, -5253567}, - {13741529, 10911568, -33233417, -8603737, -20177830, -1033297, - 33040651, -13424532, -20729456, 8321686}, - {21060490, -2212744, 15712757, -4336099, 1639040, 10656336, - 23845965, -11874838, -9984458, 608372}, - }, - { - {-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, - 1123968, -6780577, 27229399, 23887}, - {-23244140, -294205, -11744728, 14712571, -29465699, -2029617, - 12797024, -6440308, -1633405, 16678954}, - {-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, - -1508144, -4795045, -17169265, 4904953}, - }, - { - {24059557, 14617003, 19037157, -15039908, 19766093, -14906429, - 5169211, 16191880, 2128236, -4326833}, - {-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, - -29806336, 916033, -6882542, -2986532}, - {-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, - 285431, 2763829, 15736322, 4143876}, - }, - { - {2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, - -14594663, 23527084, -16458268}, - {33431127, -11130478, -17838966, -15626900, 8909499, 8376530, - -32625340, 4087881, -15188911, -14416214}, - {1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, - 4357868, -4774191, -16323038}, - }, - }, - { - { - {6721966, 13833823, -23523388, -1551314, 26354293, -11863321, - 23365147, -3949732, 7390890, 2759800}, - {4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, - -4264057, 1244380, -12919645}, - {-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, - 9208236, 15886429, 16489664}, - }, - { - {1996075, 10375649, 14346367, 13311202, -6874135, -16438411, - -13693198, 398369, -30606455, -712933}, - {-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, - 13348553, 12076947, -30836462, 5113182}, - {-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, - -30341101, -7336386, 13847711, 5387222}, - }, - { - {-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, - 8763061, 3617786, -19600662, 10370991}, - {20246567, -14369378, 22358229, -543712, 18507283, -10413996, - 14554437, -8746092, 32232924, 16763880}, - {9648505, 10094563, 26416693, 14745928, -30374318, -6472621, - 11094161, 15689506, 3140038, -16510092}, - }, - { - {-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, - -27224800, 9448613, -28774454, 366295}, - {19153450, 11523972, -11096490, -6503142, -24647631, 5420647, - 28344573, 8041113, 719605, 11671788}, - {8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, - -15266516, 27000813, -10195553}, - }, - { - {-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, - 5336097, 6750977, -14521026}, - {11836410, -3979488, 26297894, 16080799, 23455045, 15735944, - 1695823, -8819122, 8169720, 16220347}, - {-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, - -11144307, -2627664, -5990708, -14166033}, - }, - { - {-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, - 27884329, 2847284, 2655861, 1738395}, - {-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, - 21651608, -3239336, -19087449, -11005278}, - {1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, - 5821408, 10478196, 8544890}, - }, - { - {32173121, -16129311, 24896207, 3921497, 22579056, -3410854, - 19270449, 12217473, 17789017, -3395995}, - {-30552961, -2228401, -15578829, -10147201, 13243889, 517024, - 15479401, -3853233, 30460520, 1052596}, - {-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, - 27491595, -4612359, 3179268, -9478891}, - }, - { - {31947069, -14366651, -4640583, -15339921, -15125977, -6039709, - -14756777, -16411740, 19072640, -9511060}, - {11685058, 11822410, 3158003, -13952594, 33402194, -4165066, - 5977896, -5215017, 473099, 5040608}, - {-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, - 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - {7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, - 8076149, -27868496, 11538389}, - {-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, - 8754525, 7446702, -5676054, 5797016}, - {-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, - 2014099, -9050574, -2369172, -5877341}, - }, - { - {-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, - 1192730, -3714199, 15123619, 10811505}, - {14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, - 15776356, -28886779, -11974553}, - {-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, - -20654173, -16484855, 4714547, -9600655}, - }, - { - {15200332, 8368572, 19679101, 15970074, -31872674, 1959451, - 24611599, -4543832, -11745876, 12340220}, - {12876937, -10480056, 33134381, 6590940, -6307776, 14872440, - 9613953, 8241152, 15370987, 9608631}, - {-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, - 15866074, -28210621, -8814099}, - }, - { - {26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, - 858697, 20571223, 8420556}, - {14620715, 13067227, -15447274, 8264467, 14106269, 15080814, - 33531827, 12516406, -21574435, -12476749}, - {236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, - 7256740, 8791136, 15069930}, - }, - { - {1276410, -9371918, 22949635, -16322807, -23493039, -5702186, - 14711875, 4874229, -30663140, -2331391}, - {5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, - -7912378, -33069337, 9234253}, - {20590503, -9018988, 31529744, -7352666, -2706834, 10650548, - 31559055, -11609587, 18979186, 13396066}, - }, - { - {24474287, 4968103, 22267082, 4407354, 24063882, -8325180, - -18816887, 13594782, 33514650, 7021958}, - {-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, - -25948728, -3916677, -21480480, 12868082}, - {-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, - -21446107, 2244500, -12455797, -8089383}, - }, - { - {-30595528, 13793479, -5852820, 319136, -25723172, -6263899, - 33086546, 8957937, -15233648, 5540521}, - {-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, - -23710744, -1568984, -16128528, -14962807}, - {23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, - 892185, -11513277, -15205948}, - }, - { - {9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, - 4763127, -19179614, 5867134}, - {-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, - 27846559, 5931263, -29749703, -16108455}, - {27461885, -2977536, 22380810, 1815854, -23033753, -3031938, - 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - {-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, - 7585295, -3176626, 18549497, 15302069}, - {-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, - 10458790, -6418461, -8872242, 8424746}, - {24687205, 8613276, -30667046, -3233545, 1863892, -1830544, - 19206234, 7134917, -11284482, -828919}, - }, - { - {11334899, -9218022, 8025293, 12707519, 17523892, -10476071, - 10243738, -14685461, -5066034, 16498837}, - {8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, - -14124238, 6536641, 10543906}, - {-28946384, 15479763, -17466835, 568876, -1497683, 11223454, - -2669190, -16625574, -27235709, 8876771}, - }, - { - {-25742899, -12566864, -15649966, -846607, -33026686, -796288, - -33481822, 15824474, -604426, -9039817}, - {10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, - -4890037, 1657394, 3084098}, - {10477963, -7470260, 12119566, -13250805, 29016247, -5365589, - 31280319, 14396151, -30233575, 15272409}, - }, - { - {-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, - -25173957, -12636138, -25014757, 1950504}, - {-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, - -8384306, -8767532, 15341279, 8373727}, - {28685821, 7759505, -14378516, -12002860, -31971820, 4079242, - 298136, -10232602, -2878207, 15190420}, - }, - { - {-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, - 8669718, 2742393, -26033313, -6875003}, - {-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, - 9291594, -16247779, -12154742, 6048605}, - {-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, - 13934231, 5128323, 11213262, 9168384}, - }, - { - {-26280513, 11007847, 19408960, -940758, -18592965, -4328580, - -5088060, -11105150, 20470157, -16398701}, - {-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, - -22783952, 14461608, 14042978, 5230683}, - {29969567, -2741594, -16711867, -8552442, 9175486, -2468974, - 21556951, 3506042, -5933891, -12449708}, - }, - { - {-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, - -21284170, 8971513, -28539189, 15326563}, - {-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, - -15523050, 15300988, -20514118, 9168260}, - {-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, - -28948358, 9601605, 33087103, -9011387}, - }, - { - {-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, - -27444329, -15000531, -5996870, 15664672}, - {23294591, -16632613, -22650781, -8470978, 27844204, 11461195, - 13099750, -2460356, 18151676, 13417686}, - {-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, - 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - {11433042, -13228665, 8239631, -5279517, -1985436, -725718, - -18698764, 2167544, -6921301, -13440182}, - {-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, - -9917708, -8638997, 12215110, 12028277}, - {14098400, 6555944, 23007258, 5757252, -15427832, -12950502, - 30123440, 4617780, -16900089, -655628}, - }, - { - {-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, - -15819999, 10154009, 23973261, -12684474}, - {-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, - 18341390, -11419951, 32013174, -10103539}, - {-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, - 21911214, 6354752, 4425632, -837822}, - }, - { - {-10433389, -14612966, 22229858, -3091047, -13191166, 776729, - -17415375, -12020462, 4725005, 14044970}, - {19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, - -1411784, -19522291, -16109756}, - {-24864089, 12986008, -10898878, -5558584, -11312371, -148526, - 19541418, 8180106, 9282262, 10282508}, - }, - { - {-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, - 15522535, 8372215, 5542595, -10702683}, - {-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, - -2781891, 6993761, -18093885, 10114655}, - {-20107055, -929418, 31422704, 10427861, -7110749, 6150669, - -29091755, -11529146, 25953725, -106158}, - }, - { - {-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, - 19390020, 6094296, -3315279, 12831125}, - {-15998678, 7578152, 5310217, 14408357, -33548620, -224739, - 31575954, 6326196, 7381791, -2421839}, - {-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, - 6295303, 8082724, -15362489, 12339664}, - }, - { - {27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, - 15768922, 25091167, 14856294}, - {-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, - -12695493, -22182473, -9012899}, - {-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, - -27260765, 13866390, 30146206, 9142070}, - }, - { - {3924129, -15307516, -13817122, -10054960, 12291820, -668366, - -27702774, 9326384, -8237858, 4171294}, - {-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, - 26396185, 3731949, 345228, -5462949}, - {-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, - 2031539, -12391231, -16253183, -13582083}, - }, - { - {31016211, -16722429, 26371392, -14451233, -5027349, 14854137, - 17477601, 3842657, 28012650, -16405420}, - {-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, - -9189873, 16292057, -8867157, 3507940}, - {29439664, 3537914, 23333589, 6997794, -17555561, -11018068, - -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - {-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, - 17860444, -9273846, -2095802, 9304567}, - {20714564, -4336911, 29088195, 7406487, 11426967, -5095705, - 14792667, -14608617, 5289421, -477127}, - {-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, - 17271490, 12349094, 26939669, -3752294}, - }, - { - {-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, - -27283495, -12348559, -3698806, 117887}, - {22263325, -6560050, 3984570, -11174646, -15114008, -566785, - 28311253, 5358056, -23319780, 541964}, - {16259219, 3261970, 2309254, -15534474, -16885711, -4581916, - 24134070, -16705829, -13337066, -13552195}, - }, - { - {9378160, -13140186, -22845982, -12745264, 28198281, -7244098, - -2399684, -717351, 690426, 14876244}, - {24977353, -314384, -8223969, -13465086, 28432343, -1176353, - -13068804, -12297348, -22380984, 6618999}, - {-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, - 8044829, -13817328, 32239829, -5652762}, - }, - { - {-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, - -10350059, 32779359, 5095274}, - {-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, - -24601656, 14506724, 21639561, -2630236}, - {-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, - -1289502, -6863535, 17874574, 558605}, - }, - { - {-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, - 33499487, 5080151, 2085892, 5119761}, - {-22205145, -2519528, -16381601, 414691, -25019550, 2170430, - 30634760, -8363614, -31999993, -5759884}, - {-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, - 27534430, -7192145, -22351378, 12961482}, - }, - { - {-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, - 16533930, 8206996, -30194652, -5159638}, - {-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, - 7031275, 7589640, 8945490}, - {-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, - 7251489, -11182180, 24099109, -14456170}, - }, - { - {5019558, -7907470, 4244127, -14714356, -26933272, 6453165, - -19118182, -13289025, -6231896, -10280736}, - {10853594, 10721687, 26480089, 5861829, -22995819, 1972175, - -1866647, -10557898, -3363451, -6441124}, - {-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, - -2008168, -13866408, 7421392}, - }, - { - {8139927, -6546497, 32257646, -5890546, 30375719, 1886181, - -21175108, 15441252, 28826358, -4123029}, - {6267086, 9695052, 7709135, -16603597, -32869068, -1886135, - 14795160, -7840124, 13746021, -1742048}, - {28584902, 7787108, -6732942, -15050729, 22846041, -7571236, - -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - {24949256, 6376279, -27466481, -8174608, -18646154, -9930606, - 33543569, -12141695, 3569627, 11342593}, - {26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, - 4608608, 7325975, -14801071}, - {-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, - -27400540, 10258390, -17646694, -8186692}, - }, - { - {11431204, 15823007, 26570245, 14329124, 18029990, 4796082, - -31446179, 15580664, 9280358, -3973687}, - {-160783, -10326257, -22855316, -4304997, -20861367, -13621002, - -32810901, -11181622, -15545091, 4387441}, - {-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, - -24513992, 8548137, 20617071, -7482001}, - }, - { - {-938825, -3930586, -8714311, 16124718, 24603125, -6225393, - -13775352, -11875822, 24345683, 10325460}, - {-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, - 16318175, -1010689, 4766743, 3552007}, - {-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, - 14481909, 10988822, -3994762}, - }, - { - {15564307, -14311570, 3101243, 5684148, 30446780, -8051356, - 12677127, -6505343, -8295852, 13296005}, - {-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, - 31521204, 9614054, -30000824, 12074674}, - {4771191, -135239, 14290749, -13089852, 27992298, 14998318, - -1413936, -1556716, 29832613, -16391035}, - }, - { - {7064884, -7541174, -19161962, -5067537, -18891269, -2912736, - 25825242, 5293297, -27122660, 13101590}, - {-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, - 32512469, -5317593, -30356070, -4190957}, - {-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, - 14413974, 9515896, 19568978, 9628812}, - }, - { - {33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, - -6106839, -6291786, 3437740}, - {-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, - -22961733, 70104, 7463304, 4176122}, - {-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, - -32719404, -5322751, 24216882, 5944158}, - }, - { - {8894125, 7450974, -2664149, -9765752, -28080517, -12389115, - 19345746, 14680796, 11632993, 5847885}, - {26942781, -2315317, 9129564, -4906607, 26024105, 11769399, - -11518837, 6367194, -9727230, 4782140}, - {19916461, -4828410, -22910704, -11414391, 25606324, -5972441, - 33253853, 8220911, 6358847, -1873857}, - }, - { - {801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, - -4480480, -13538503, 1387155}, - {19646058, 5720633, -11416706, 12814209, 11607948, 12749789, - 14147075, 15156355, -21866831, 11835260}, - {19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, - 15467869, -26560550, 5052483}, - }, - }, - { - { - {-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, - -12618185, 12228557, -7003677}, - {32944382, 14922211, -22844894, 5188528, 21913450, -8719943, - 4001465, 13238564, -6114803, 8653815}, - {22865569, -4652735, 27603668, -12545395, 14348958, 8234005, - 24808405, 5719875, 28483275, 2841751}, - }, - { - {-16420968, -1113305, -327719, -12107856, 21886282, -15552774, - -1887966, -315658, 19932058, -12739203}, - {-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, - 3999228, 13239134, -4777469, -13910208}, - {1382174, -11694719, 17266790, 9194690, -13324356, 9720081, - 20403944, 11284705, -14013818, 3093230}, - }, - { - {16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, - 16271225, -24049421, -6691850}, - {-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, - 24123614, 15193618, -21652117, -16739389}, - {-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, - 31870908, 14690798, 17361620, 11864968}, - }, - { - {-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, - -12331205, -7486601, -25578460, -16240689}, - {14668462, -12270235, 26039039, 15305210, 25515617, 4542480, - 10453892, 6577524, 9145645, -6443880}, - {5974874, 3053895, -9433049, -10385191, -31865124, 3225009, - -7972642, 3936128, -5652273, -3050304}, - }, - { - {30625386, -4729400, -25555961, -12792866, -20484575, 7695099, - 17097188, -16303496, -27999779, 1803632}, - {-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, - 14911344, 12196514, -21405489, 7047412}, - {20093277, 9920966, -11138194, -5343857, 13161587, 12044805, - -32856851, 4124601, -32343828, -10257566}, - }, - { - {-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, - 4752377, -8714640, -21679658, 2288038}, - {-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, - 29457502, 14625692, -24819617, 12570232}, - {-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, - -21159943, -3498680, -11974704, 4724943}, - }, - { - {17960970, -11775534, -4140968, -9702530, -8876562, -1410617, - -12907383, -8659932, -29576300, 1903856}, - {23134274, -14279132, -10681997, -1611936, 20684485, 15770816, - -12989750, 3190296, 26955097, 14109738}, - {15308788, 5320727, -30113809, -14318877, 22902008, 7767164, - 29425325, -11277562, 31960942, 11934971}, - }, - { - {-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, - 20638173, 4875028, 10491392, 1379718}, - {-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, - 33518459, 16176658, 21432314, 12180697}, - {-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, - 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - {5414091, -15386041, -21007664, 9643570, 12834970, 1186149, - -2622916, -1342231, 26128231, 6032912}, - {-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, - 3604025, 8316894, -25875034, -10437358}, - {3296484, 6223048, 24680646, -12246460, -23052020, 5903205, - -8862297, -4639164, 12376617, 3188849}, - }, - { - {29190488, -14659046, 27549113, -1183516, 3520066, -10697301, - 32049515, -7309113, -16109234, -9852307}, - {-14744486, -9309156, 735818, -598978, -20407687, -5057904, - 25246078, -15795669, 18640741, -960977}, - {-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, - -31638386, -494430, 10530747, 1053335}, - }, - { - {-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, - -31462369, -2948985, 24018831, 15026644}, - {-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, - 25310643, 13003497, -2314791, -15145616}, - {-27419985, -603321, -8043984, -1669117, -26092265, 13987819, - -27297622, 187899, -23166419, -2531735}, - }, - { - {-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, - 9716667, 16266922, -5070217, 726099}, - {29370922, -6053998, 7334071, -15342259, 9385287, 2247707, - -13661962, -4839461, 30007388, -15823341}, - {-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, - 730663, 9835848, 4555336}, - }, - { - {-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, - 17693930, 544696, -11985298, 12422646}, - {31117226, -12215734, -13502838, 6561947, -9876867, -12757670, - -5118685, -4096706, 29120153, 13924425}, - {-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, - -9383939, -11317700, 7240931, -237388}, - }, - { - {-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, - 1222336, 4389483, 3293637, -15551743}, - {-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, - -24319580, 7733547, 12796905, -6335822}, - {-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, - -28253339, 3647836, 3222231, -11160462}, - }, - { - {18606113, 1693100, -25448386, -15170272, 4112353, 10045021, - 23603893, -2048234, -7550776, 2484985}, - {9255317, -3131197, -12156162, -1004256, 13098013, -9214866, - 16377220, -2102812, -19802075, -3034702}, - {-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, - -31718148, 9936966, -30097688, -10618797}, - }, - { - {21878590, -5001297, 4338336, 13643897, -3036865, 13160960, - 19708896, 5415497, -7360503, -4109293}, - {27736861, 10103576, 12500508, 8502413, -3413016, -9633558, - 10436918, -1550276, -23659143, -8132100}, - {19492550, -12104365, -29681976, -852630, -3208171, 12403437, - 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - {12015105, 2801261, 28198131, 10151021, 24818120, -4743133, - -11194191, -5645734, 5150968, 7274186}, - {2831366, -12492146, 1478975, 6122054, 23825128, -12733586, - 31097299, 6083058, 31021603, -9793610}, - {-2529932, -2229646, 445613, 10720828, -13849527, -11505937, - -23507731, 16354465, 15067285, -14147707}, - }, - { - {7840942, 14037873, -33364863, 15934016, -728213, -3642706, - 21403988, 1057586, -19379462, -12403220}, - {915865, -16469274, 15608285, -8789130, -24357026, 6060030, - -17371319, 8410997, -7220461, 16527025}, - {32922597, -556987, 20336074, -16184568, 10903705, -5384487, - 16957574, 52992, 23834301, 6588044}, - }, - { - {32752030, 11232950, 3381995, -8714866, 22652988, -10744103, - 17159699, 16689107, -20314580, -1305992}, - {-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, - 7924251, -2752281, 1976123, -7249027}, - {21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, - -3371252, 12331345, -8237197}, - }, - { - {8651614, -4477032, -16085636, -4996994, 13002507, 2950805, - 29054427, -5106970, 10008136, -4667901}, - {31486080, 15114593, -14261250, 12951354, 14369431, -7387845, - 16347321, -13662089, 8684155, -10532952}, - {19443825, 11385320, 24468943, -9659068, -23919258, 2187569, - -26263207, -6086921, 31316348, 14219878}, - }, - { - {-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, - 27146014, 6992409, 29126555, 9207390}, - {32382935, 1110093, 18477781, 11028262, -27411763, -7548111, - -4980517, 10843782, -7957600, -14435730}, - {2814918, 7836403, 27519878, -7868156, -20894015, -11553689, - -21494559, 8550130, 28346258, 1994730}, - }, - { - {-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, - -19516951, 7174894, 22628102, 8115180}, - {-30405132, 955511, -11133838, -15078069, -32447087, -13278079, - -25651578, 3317160, -9943017, 930272}, - {-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, - 24091212, -1388970, -22765376, -10650715}, - }, - { - {-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, - -14839018, -16554220, -1867018, 8398970}, - {-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, - 22981545, -6291273, 18009408, -15772772}, - {-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, - 29551787, -3727419, 19288549, 1325865}, - }, - { - {15100157, -15835752, -23923978, -1005098, -26450192, 15509408, - 12376730, -3479146, 33166107, -8042750}, - {20909231, 13023121, -9209752, 16251778, -5778415, -8094914, - 12412151, 10018715, 2213263, -13878373}, - {32529814, -11074689, 30361439, -16689753, -9135940, 1513226, - 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - {9923462, 11271500, 12616794, 3544722, -29998368, -1721626, - 12891687, -8193132, -26442943, 10486144}, - {-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, - 2610596, -23921530, -11455195}, - {5408411, -1136691, -4969122, 10561668, 24145918, 14240566, - 31319731, -4235541, 19985175, -3436086}, - }, - { - {-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, - -17577068, 8849297, 65030, 8370684}, - {-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, - -19442942, 6922164, 12743482, -9800518}, - {-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, - 23783145, 11038569, 18800704, 255233}, - }, - { - {-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, - 9066957, 19258688, -14753793}, - {-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, - -31934921, 2209390, -1524053, 2055794}, - {580882, 16705327, 5468415, -2683018, -30926419, -14696000, - -7203346, -8994389, -30021019, 7394435}, - }, - { - {23838809, 1822728, -15738443, 15242727, 8318092, -3733104, - -21672180, -3492205, -4821741, 14799921}, - {13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, - 13496856, -9056018, 7402518}, - {2286874, -4435931, -20042458, -2008336, -13696227, 5038122, - 11006906, -15760352, 8205061, 1607563}, - }, - { - {14414086, -8002132, 3331830, -3208217, 22249151, -5594188, - 18364661, -2906958, 30019587, -9029278}, - {-27688051, 1585953, -10775053, 931069, -29120221, -11002319, - -14410829, 12029093, 9944378, 8024}, - {4368715, -3709630, 29874200, -15022983, -20230386, -11410704, - -16114594, -999085, -8142388, 5640030}, - }, - { - {10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, - -16694564, 15219798, -14327783}, - {27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, - -1173195, -18342183, 9742717}, - {6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, - 7406442, 12420155, 1994844}, - }, - { - {14012521, -5024720, -18384453, -9578469, -26485342, -3936439, - -13033478, -10909803, 24319929, -6446333}, - {16412690, -4507367, 10772641, 15929391, -17068788, -4658621, - 10555945, -10484049, -30102368, -4739048}, - {22397382, -7767684, -9293161, -12792868, 17166287, -9755136, - -27333065, 6199366, 21880021, -12250760}, - }, - { - {-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, - 16557151, 8890729, 8840445, 4957760}, - {-15447727, 709327, -6919446, -10870178, -29777922, 6522332, - -21720181, 12130072, -14796503, 5005757}, - {-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, - 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - {-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, - -32013908, -3057104, 22208662, 2000468}, - {3065073, -1412761, -25598674, -361432, -17683065, -5703415, - -8164212, 11248527, -3691214, -7414184}, - {10379208, -6045554, 8877319, 1473647, -29291284, -12507580, - 16690915, 2553332, -3132688, 16400289}, - }, - { - {15716668, 1254266, -18472690, 7446274, -8448918, 6344164, - -22097271, -7285580, 26894937, 9132066}, - {24158887, 12938817, 11085297, -8177598, -28063478, -4457083, - -30576463, 64452, -6817084, -2692882}, - {13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, - -3418511, -4688006, 2364226}, - }, - { - {16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, - -11697457, 15445875, -7798101}, - {29004207, -7867081, 28661402, -640412, -12794003, -7943086, - 31863255, -4135540, -278050, -15759279}, - {-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, - 10343412, -6976290, -29828287, -10815811}, - }, - { - {27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, - 15372179, 17293797, 960709}, - {20263915, 11434237, -5765435, 11236810, 13505955, -10857102, - -16111345, 6493122, -19384511, 7639714}, - {-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, - 18006287, -16043750, 29994677, -15808121}, - }, - { - {9769828, 5202651, -24157398, -13631392, -28051003, -11561624, - -24613141, -13860782, -31184575, 709464}, - {12286395, 13076066, -21775189, -1176622, -25003198, 4057652, - -32018128, -8890874, 16102007, 13205847}, - {13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, - 8525972, 10151379, 10394400}, - }, - { - {4024660, -16137551, 22436262, 12276534, -9099015, -2686099, - 19698229, 11743039, -33302334, 8934414}, - {-15879800, -4525240, -8580747, -2934061, 14634845, -698278, - -9449077, 3137094, -11536886, 11721158}, - {17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, - 8835153, -9205489, -1280045}, - }, - { - {-461409, -7830014, 20614118, 16688288, -7514766, -4807119, - 22300304, 505429, 6108462, -6183415}, - {-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, - 29880583, -13483331, -26898490, -7867459}, - {-31975283, 5726539, 26934134, 10237677, -3173717, -605053, - 24199304, 3795095, 7592688, -14992079}, - }, - { - {21594432, -14964228, 17466408, -4077222, 32537084, 2739898, - 6407723, 12018833, -28256052, 4298412}, - {-20650503, -11961496, -27236275, 570498, 3767144, -1717540, - 13891942, -1569194, 13717174, 10805743}, - {-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, - -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - {12962541, 5311799, -10060768, 11658280, 18855286, -7954201, - 13286263, -12808704, -4381056, 9882022}, - {18512079, 11319350, -20123124, 15090309, 18818594, 5271736, - -22727904, 3666879, -23967430, -3299429}, - {-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, - -10084880, -6661110, -2403099, 5276065}, - }, - { - {30169808, -5317648, 26306206, -11750859, 27814964, 7069267, - 7152851, 3684982, 1449224, 13082861}, - {10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, - 15056736, -21016438, -8202000}, - {-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, - -26171976, 6482814, -10300080, -11060101}, - }, - { - {32869458, -5408545, 25609743, 15678670, -10687769, -15471071, - 26112421, 2521008, -22664288, 6904815}, - {29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, - 3841096, -29003639, -6657642}, - {10340844, -6630377, -18656632, -2278430, 12621151, -13339055, - 30878497, -11824370, -25584551, 5181966}, - }, - { - {25940115, -12658025, 17324188, -10307374, -8671468, 15029094, - 24396252, -16450922, -2322852, -12388574}, - {-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, - 12641087, 20603771, -6561742}, - {-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, - 1925523, 11914390, 4662781, 7820689}, - }, - { - {12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, - 12172924, 16136752, 15264020}, - {-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, - 10658213, 6671822, 19012087, 3772772}, - {3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, - -15762884, 20527771, 12988982}, - }, - { - {-14822485, -5797269, -3707987, 12689773, -898983, -10914866, - -24183046, -10564943, 3299665, -12424953}, - {-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, - 6461331, -25583147, 8991218}, - {-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, - -32948145, 7417950, -30242287, 1507265}, - }, - { - {29692663, 6829891, -10498800, 4334896, 20945975, -11906496, - -28887608, 8209391, 14606362, -10647073}, - {-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, - 9761487, 4170404, -2085325}, - {-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, - 22186522, 16002000, -14276837, -8400798}, - }, - { - {-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, - -7113572, -9620092, 13240845, 10965870}, - {-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, - 4498947, 14147411, 29514390, 4302863}, - {-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, - -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - {-2244452, -754728, -4597030, -1066309, -6247172, 1455299, - -21647728, -9214789, -5222701, 12650267}, - {-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, - 13770293, -19134326, 10958663}, - {22470984, 12369526, 23446014, -5441109, -21520802, -9698723, - -11772496, -11574455, -25083830, 4271862}, - }, - { - {-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, - 75375, -4278529, -32526221, 8469673}, - {15854970, 4148314, -8893890, 7259002, 11666551, 13824734, - -30531198, 2697372, 24154791, -9460943}, - {15446137, -15806644, 29759747, 14019369, 30811221, -9610191, - -31582008, 12840104, 24913809, 9815020}, - }, - { - {-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, - -9103676, 13438769, 18735128, 9466238}, - {11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, - -10896103, -22728655, 16199064}, - {14576810, 379472, -26786533, -8317236, -29426508, -10812974, - -102766, 1876699, 30801119, 2164795}, - }, - { - {15995086, 3199873, 13672555, 13712240, -19378835, -4647646, - -13081610, -15496269, -13492807, 1268052}, - {-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, - -3470338, -12600221, -17055369, 3565904}, - {29210088, -9419337, -5919792, -4952785, 10834811, -13327726, - -16512102, -10820713, -27162222, -14030531}, - }, - { - {-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, - -29183421, -3769423, 2244111, -14001979}, - {-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, - -25673088, -16180800, 13491506, 4641841}, - {10813417, 643330, -19188515, -728916, 30292062, -16600078, - 27548447, -7721242, 14476989, -12767431}, - }, - { - {10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, - -1644259, -27912810, 12651324}, - {-31185513, -813383, 22271204, 11835308, 10201545, 15351028, - 17099662, 3988035, 21721536, -3148940}, - {10202177, -6545839, -31373232, -9574638, -32150642, -8119683, - -12906320, 3852694, 13216206, 14842320}, - }, - { - {-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, - -31500847, 13765824, -27434397, 9900184}, - {14465505, -13833331, -32133984, -14738873, -27443187, 12990492, - 33046193, 15796406, -7051866, -8040114}, - {30924417, -8279620, 6359016, -12816335, 16508377, 9071735, - -25488601, 15413635, 9524356, -7018878}, - }, - { - {12274201, -13175547, 32627641, -1785326, 6736625, 13267305, - 5237659, -5109483, 15663516, 4035784}, - {-2951309, 8903985, 17349946, 601635, -16432815, -4612556, - -13732739, -15889334, -22258478, 4659091}, - {-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, - 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - {-8858980, -2219056, 28571666, -10155518, -474467, -10105698, - -3801496, 278095, 23440562, -290208}, - {10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, - 11551483, -16571960, -7442864}, - {17932739, -12437276, -24039557, 10749060, 11316803, 7535897, - 22503767, 5561594, -3646624, 3898661}, - }, - { - {7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, - 7152530, 21831162, 1245233}, - {26958459, -14658026, 4314586, 8346991, -5677764, 11960072, - -32589295, -620035, -30402091, -16716212}, - {-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, - 6280834, 14587357, -22338025, 13987525}, - }, - { - {-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, - -4300898, -5124639, -7469781, -2858068}, - {9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, - 6439245, -14581012, 4091397}, - {-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, - -19622683, 12092163, 29077877, -14741988}, - }, - { - {5269168, -6859726, -13230211, -8020715, 25932563, 1763552, - -5606110, -5505881, -20017847, 2357889}, - {32264008, -15407652, -5387735, -1160093, -2091322, -3946900, - 23104804, -12869908, 5727338, 189038}, - {14609123, -8954470, -6000566, -16622781, -14577387, -7743898, - -26745169, 10942115, -25888931, -14884697}, - }, - { - {20513500, 5557931, -15604613, 7829531, 26413943, -2019404, - -21378968, 7471781, 13913677, -5137875}, - {-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, - -8940970, 14059180, 12878652, 8511905}, - {-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, - -30223418, 6812974, 5568676, -3127656}, - }, - { - {11630004, 12144454, 2116339, 13606037, 27378885, 15676917, - -17408753, -13504373, -14395196, 8070818}, - {27117696, -10007378, -31282771, -5570088, 1127282, 12772488, - -29845906, 10483306, -11552749, -1028714}, - {10637467, -5688064, 5674781, 1072708, -26343588, -6982302, - -1683975, 9177853, -27493162, 15431203}, - }, - { - {20525145, 10892566, -12742472, 12779443, -29493034, 16150075, - -28240519, 14943142, -15056790, -7935931}, - {-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, - -3239766, -3356550, 9594024}, - {-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, - -6492290, 13352335, -10977084}, - }, - { - {-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, - -29783850, -7752482, -13215537, -319204}, - {20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, - 15077870, -22750759, 14523817}, - {27406042, -6041657, 27423596, -4497394, 4996214, 10002360, - -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - {11374242, 12660715, 17861383, -12540833, 10935568, 1099227, - -13886076, -9091740, -27727044, 11358504}, - {-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, - 32676003, 11149336, -26123651, 4985768}, - {-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, - 13794114, -19414307, -15621255}, - }, - { - {6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, - 6970005, -1691065, -9004790}, - {1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, - -5475723, -16796596, -5031438}, - {-22273315, -13524424, -64685, -4334223, -18605636, -10921968, - -20571065, -7007978, -99853, -10237333}, - }, - { - {17747465, 10039260, 19368299, -4050591, -20630635, -16041286, - 31992683, -15857976, -29260363, -5511971}, - {31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, - -3744247, 4882242, -10626905}, - {29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, - 3272828, -5190932, -4162409}, - }, - { - {12501286, 4044383, -8612957, -13392385, -32430052, 5136599, - -19230378, -3529697, 330070, -3659409}, - {6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, - -8573892, -271295, 12071499}, - {-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, - -32769618, 1936675, -5159697, 3829363}, - }, - { - {28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, - -6567787, 26333140, 14267664}, - {-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, - 10004786, -8709488, -21761224, 8930324}, - {-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, - 1541940, 4757911, -26491501, -16408940}, - }, - { - {13537262, -7759490, -20604840, 10961927, -5922820, -13218065, - -13156584, 6217254, -15943699, 13814990}, - {-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, - 9257833, -1956526, -1776914}, - {-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, - -29171540, 12361135, -18685978, 4578290}, - }, - { - {24579768, 3711570, 1342322, -11180126, -27005135, 14124956, - -22544529, 14074919, 21964432, 8235257}, - {-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, - -2981514, -1669206, 13006806, 2355433}, - {-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, - 27202044, 1719366, 1141648, -12796236}, - }, - { - {-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, - 13475066, -3133972, 32674895, 13715045}, - {11423335, -5468059, 32344216, 8962751, 24989809, 9241752, - -13265253, 16086212, -28740881, -15642093}, - {-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, - -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - {-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, - -4862407, -4906449, 27193557, 6245191}, - {-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, - 3260492, 22510453, 8577507}, - {-12632451, 11257346, -32692994, 13548177, -721004, 10879011, - 31168030, 13952092, -29571492, -3635906}, - }, - { - {3877321, -9572739, 32416692, 5405324, -11004407, -13656635, - 3759769, 11935320, 5611860, 8164018}, - {-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, - 32003002, -8832289, 5773085, -8422109}, - {-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, - 12376320, 31632953, 190926}, - }, - { - {-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, - -8288749, 4508564, -25341555, -3627528}, - {8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, - -14786005, -1672488, 827625}, - {-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, - -1800575, -14108036, -24878478, 1541286}, - }, - { - {2901347, -1117687, 3880376, -10059388, -17620940, -3612781, - -21802117, -3567481, 20456845, -1885033}, - {27019610, 12299467, -13658288, -1603234, -12861660, -4861471, - -19540150, -5016058, 29439641, 15138866}, - {21536104, -6626420, -32447818, -10690208, -22408077, 5175814, - -5420040, -16361163, 7779328, 109896}, - }, - { - {30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, - 12180118, 23177719, -554075}, - {26572847, 3405927, -31701700, 12890905, -19265668, 5335866, - -6493768, 2378492, 4439158, -13279347}, - {-22716706, 3489070, -9225266, -332753, 18875722, -1140095, - 14819434, -12731527, -17717757, -5461437}, - }, - { - {-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, - -820954, 2177225, 8550082, -15114165}, - {-18473302, 16596775, -381660, 15663611, 22860960, 15585581, - -27844109, -3582739, -23260460, -8428588}, - {-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, - -22725137, 15860482, -21902570, 1494193}, - }, - { - {-19562091, -14087393, -25583872, -9299552, 13127842, 759709, - 21923482, 16529112, 8742704, 12967017}, - {-28464899, 1553205, 32536856, -10473729, -24691605, -406174, - -8914625, -2933896, -29903758, 15553883}, - {21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, - 14513274, 19375923, -12647961}, - }, - { - {8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, - -6222716, 2862653, 9455043}, - {29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, - -2990080, 15511449, 4789663}, - {-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, - -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - {-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, - -6650416, -12936300, -18319198, 10212860}, - {2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, - 2600940, -9988298, -12506466}, - {-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, - 11344424, 864440, -2499677, -16710063}, - }, - { - {-26432803, 6148329, -17184412, -14474154, 18782929, -275997, - -22561534, 211300, 2719757, 4940997}, - {-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, - 21690126, 8518463, 26699843, 5276295}, - {-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, - 149635, -15452774, 7159369}, - }, - { - {9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, - 8312176, 22477218, -8403385}, - {18155857, -16504990, 19744716, 9006923, 15154154, -10538976, - 24256460, -4864995, -22548173, 9334109}, - {2986088, -4911893, 10776628, -3473844, 10620590, -7083203, - -21413845, 14253545, -22587149, 536906}, - }, - { - {4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, - 10589625, 10838060, -15420424}, - {-19342404, 867880, 9277171, -3218459, -14431572, -1986443, - 19295826, -15796950, 6378260, 699185}, - {7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, - 15693155, -5045064, -13373962}, - }, - { - {-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, - 31730678, -10962840, -3918636, -9669325}, - {10188286, -15770834, -7336361, 13427543, 22223443, 14896287, - 30743455, 7116568, -21786507, 5427593}, - {696102, 13206899, 27047647, -10632082, 15285305, -9853179, - 10798490, -4578720, 19236243, 12477404}, - }, - { - {-11229439, 11243796, -17054270, -8040865, -788228, -8167967, - -3897669, 11180504, -23169516, 7733644}, - {17800790, -14036179, -27000429, -11766671, 23887827, 3149671, - 23466177, -10538171, 10322027, 15313801}, - {26246234, 11968874, 32263343, -5468728, 6830755, -13323031, - -15794704, -101982, -24449242, 10890804}, - }, - { - {-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, - -14982212, 16484931, 25180797, -5334884}, - {-586574, 10376444, -32586414, -11286356, 19801893, 10997610, - 2276632, 9482883, 316878, 13820577}, - {-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, - 30756178, -7515054, 30696930, -3712849}, - }, - { - {32988917, -9603412, 12499366, 7910787, -10617257, -11931514, - -7342816, -9985397, -32349517, 7392473}, - {-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, - -30409476, -9134995, 25112947, -2926644}, - {-2504044, -436966, 25621774, -5678772, 15085042, -5479877, - -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - {-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, - -14876251, -1729667, 31234590, 6090599}, - {-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, - 15878753, -6970405, -9034768}, - {-27757857, 247744, -15194774, -9002551, 23288161, -10011936, - -23869595, 6503646, 20650474, 1804084}, - }, - { - {-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, - -10329713, 27842616, -202328}, - {-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, - 5031932, -11375082, 12714369}, - {20807691, -7270825, 29286141, 11421711, -27876523, -13868230, - -21227475, 1035546, -19733229, 12796920}, - }, - { - {12076899, -14301286, -8785001, -11848922, -25012791, 16400684, - -17591495, -12899438, 3480665, -15182815}, - {-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, - -24363064, -15921875, -33374054, 2771025}, - {-21389266, 421932, 26597266, 6860826, 22486084, -6737172, - -17137485, -4210226, -24552282, 15673397}, - }, - { - {-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, - -20271184, 4733254, 3727144, -12934448}, - {6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, - 7975683, 31123697, -10958981}, - {30069250, -11435332, 30434654, 2958439, 18399564, -976289, - 12296869, 9204260, -16432438, 9648165}, - }, - { - {32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, - 5248604, -26008332, -11377501}, - {17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, - 15298639, 2662509, -16297073}, - {-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, - 32087529, -1222777, 32247248, -14389861}, - }, - { - {14312628, 1221556, 17395390, -8700143, -4945741, -8684635, - -28197744, -9637817, -16027623, -13378845}, - {-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, - 9803137, 17597934, 2346211}, - {18510800, 15337574, 26171504, 981392, -22241552, 7827556, - -23491134, -11323352, 3059833, -11782870}, - }, - { - {10141598, 6082907, 17829293, -1947643, 9830092, 13613136, - -25556636, -5544586, -33502212, 3592096}, - {33114168, -15889352, -26525686, -13343397, 33076705, 8716171, - 1151462, 1521897, -982665, -6837803}, - {-32939165, -4255815, 23947181, -324178, -33072974, -12305637, - -16637686, 3891704, 26353178, 693168}, - }, - { - {30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, - -400668, 31375464, 14369965}, - {-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, - 32732230, -13108839, 17901441, 16011505}, - {18171223, -11934626, -12500402, 15197122, -11038147, -15230035, - -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - {5975908, -5243188, -19459362, -9681747, -11541277, 14015782, - -23665757, 1228319, 17544096, -10593782}, - {5811932, -1715293, 3442887, -2269310, -18367348, -8359541, - -18044043, -15410127, -5565381, 12348900}, - {-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, - -24849353, 8141295, -10632534, -585479}, - }, - { - {-12675304, 694026, -5076145, 13300344, 14015258, -14451394, - -9698672, -11329050, 30944593, 1130208}, - {8247766, -6710942, -26562381, -7709309, -14401939, -14648910, - 4652152, 2488540, 23550156, -271232}, - {17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, - -5908146, -408818, -137719}, - }, - { - {16091085, -16253926, 18599252, 7340678, 2137637, -1221657, - -3364161, 14550936, 3260525, -7166271}, - {-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, - -23028869, -13204905, -12748722, 2701326}, - {-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, - -10018363, 9276971, 11329923, 1862132}, - }, - { - {14763076, -15903608, -30918270, 3689867, 3511892, 10313526, - -21951088, 12219231, -9037963, -940300}, - {8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, - -2909717, -15438168, 11595570}, - {15214962, 3537601, -26238722, -14058872, 4418657, -15230761, - 13947276, 10730794, -13489462, -4363670}, - }, - { - {-2538306, 7682793, 32759013, 263109, -29984731, -7955452, - -22332124, -10188635, 977108, 699994}, - {-12466472, 4195084, -9211532, 550904, -15565337, 12917920, - 19118110, -439841, -30534533, -14337913}, - {31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, - -10051775, 12493932, -5409317}, - }, - { - {-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, - 27218280, 2607121, 29375955, 6024730}, - {842132, -2794693, -4763381, -8722815, 26332018, -12405641, - 11831880, 6985184, -9940361, 2854096}, - {-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, - 960770, 12121869, 16648078}, - }, - { - {-15218652, 14667096, -13336229, 2013717, 30598287, -464137, - -31504922, -7882064, 20237806, 2838411}, - {-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, - 12544294, -13470457, 1068881, -12499905}, - {-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, - -8486907, -2630053, 12521378, 4845654}, - }, - { - {-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, - 3409348, -873400, -6482306, -12885870}, - {-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, - 10477734, -1240216, -3113227, 13974498}, - {12966261, 15550616, -32038948, -1615346, 21025980, -629444, - 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - {14741879, -14946887, 22177208, -11721237, 1279741, 8058600, - 11758140, 789443, 32195181, 3895677}, - {10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, - -3566119, -8982069, 4429647}, - {-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, - -7135870, -11642895, 18047436, -15281743}, - }, - { - {-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, - 10993114, -12850837, -17620701, -9408468}, - {21987233, 700364, -24505048, 14972008, -7774265, -5718395, - 32155026, 2581431, -29958985, 8773375}, - {-25568350, 454463, -13211935, 16126715, 25240068, 8594567, - 20656846, 12017935, -7874389, -13920155}, - }, - { - {6028182, 6263078, -31011806, -11301710, -818919, 2461772, - -31841174, -5468042, -1721788, -2776725}, - {-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, - -4166698, 28408820, 6816612}, - {-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, - 20613181, 13982702, -10339570, 5067943}, - }, - { - {-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, - -19719286, 12746132, 5331210, -10105944}, - {30528811, 3601899, -1957090, 4619785, -27361822, -15436388, - 24180793, -12570394, 27679908, -1648928}, - {9402404, -13957065, 32834043, 10838634, -26580150, -13237195, - 26653274, -8685565, 22611444, -12715406}, - }, - { - {22190590, 1118029, 22736441, 15130463, -30460692, -5991321, - 19189625, -4648942, 4854859, 6622139}, - {-8310738, -2953450, -8262579, -3388049, -10401731, -271929, - 13424426, -3567227, 26404409, 13001963}, - {-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, - -26064365, -11621720, -15405155, 11020693}, - }, - { - {1866042, -7949489, -7898649, -10301010, 12483315, 13477547, - 3175636, -12424163, 28761762, 1406734}, - {-448555, -1777666, 13018551, 3194501, -9580420, -11161737, - 24760585, -4347088, 25577411, -13378680}, - {-24290378, 4759345, -690653, -1852816, 2066747, 10693769, - -29595790, 9884936, -9368926, 4745410}, - }, - { - {-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, - -15462008, -11311852, 10931924, -11931931}, - {-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, - -22853429, 10856641, -20470770, 13434654}, - {22759489, -10073434, -16766264, -1871422, 13637442, -10168091, - 1765144, -12654326, 28445307, -5364710}, - }, - { - {29875063, 12493613, 2795536, -3786330, 1710620, 15181182, - -10195717, -8788675, 9074234, 1167180}, - {-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, - -18716888, -9535498, 3843903, 9367684}, - {-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, - 8601684, -139197, 4242895}, - }, - }, - { - { - {22092954, -13191123, -2042793, -11968512, 32186753, -11517388, - -6574341, 2470660, -27417366, 16625501}, - {-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, - 2602725, -27351616, 14247413}, - {6314175, -10264892, -32772502, 15957557, -10157730, 168750, - -8618807, 14290061, 27108877, -1180880}, - }, - { - {-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, - 33547976, -11058889, -27148451, 981874}, - {22833440, 9293594, -32649448, -13618667, -9136966, 14756819, - -22928859, -13970780, -10479804, -16197962}, - {-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, - 22680049, 13906969, -15933690, 3797899}, - }, - { - {21721356, -4212746, -12206123, 9310182, -3882239, -13653110, - 23740224, -2709232, 20491983, -8042152}, - {9209270, -15135055, -13256557, -6167798, -731016, 15289673, - 25947805, 15286587, 30997318, -6703063}, - {7392032, 16618386, 23946583, -8039892, -13265164, -1533858, - -14197445, -2321576, 17649998, -250080}, - }, - { - {-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, - -15241566, -9525724, -2233253, 7662146}, - {-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, - 7335080, -8472199, -3174674, 3440183}, - {-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, - 40450, -4431835, 4862400, 1133}, - }, - { - {-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, - 7258061, 311861, -30594991, -7379421}, - {-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, - 16527196, 18278453, 15405622}, - {-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, - -13313598, 843523, -21875062, 13626197}, - }, - { - {2281448, -13487055, -10915418, -2609910, 1879358, 16164207, - -10783882, 3953792, 13340839, 15928663}, - {31727126, -7179855, -18437503, -8283652, 2875793, -16390330, - -25269894, -7014826, -23452306, 5964753}, - {4100420, -5959452, -17179337, 6017714, -18705837, 12227141, - -26684835, 11344144, 2538215, -7570755}, - }, - { - {-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, - -20474983, 1485421, -629256, -15958862}, - {-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, - -20205425, -13191288, 11659922, -11115118}, - {26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, - -10170080, 33100372, -1306171}, - }, - { - {15121113, -5201871, -10389905, 15427821, -27509937, -15992507, - 21670947, 4486675, -5931810, -14466380}, - {16166486, -9483733, -11104130, 6023908, -31926798, -1364923, - 2340060, -16254968, -10735770, -10039824}, - {28042865, -3557089, -12126526, 12259706, -3717498, -6945899, - 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - {-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, - 4598332, -6159431, -14117438}, - {-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, - 696309, 50292, -20095739, 11763584}, - {-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, - -12613632, -19773211, -10713562}, - }, - { - {30464590, -11262872, -4127476, -12734478, 19835327, -7105613, - -24396175, 2075773, -17020157, 992471}, - {18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, - 8080033, -11574335, -10601610}, - {19598397, 10334610, 12555054, 2555664, 18821899, -10339780, - 21873263, 16014234, 26224780, 16452269}, - }, - { - {-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, - -7618186, -20533829, 3698650}, - {14187449, 3448569, -10636236, -10810935, -22663880, -3433596, - 7268410, -10890444, 27394301, 12015369}, - {19695761, 16087646, 28032085, 12999827, 6817792, 11427614, - 20244189, -1312777, -13259127, -3402461}, - }, - { - {30860103, 12735208, -1888245, -4699734, -16974906, 2256940, - -8166013, 12298312, -8550524, -10393462}, - {-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, - -5789354, -15118654, -4976164, 12651793}, - {-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, - -13118820, -16517902, 9768698, -2533218}, - }, - { - {-24719459, 1894651, -287698, -4704085, 15348719, -8156530, - 32767513, 12765450, 4940095, 10678226}, - {18860224, 15980149, -18987240, -1562570, -26233012, -11071856, - -7843882, 13944024, -24372348, 16582019}, - {-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, - -11704054, 15444560, -11003761, 7989037}, - }, - { - {31490452, 5568061, -2412803, 2182383, -32336847, 4531686, - -32078269, 6200206, -19686113, -14800171}, - {-17308668, -15879940, -31522777, -2831, -32887382, 16375549, - 8680158, -16371713, 28550068, -6857132}, - {-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, - -30039981, 4364038, 1155602, 5988841}, - }, - { - {21890435, -13272907, -12624011, 12154349, -7831873, 15300496, - 23148983, -4470481, 24618407, 8283181}, - {-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, - 3070187, -7025928, 1466169, 10740210}, - {-1509399, -15488185, -13503385, -10655916, 32799044, 909394, - -13938903, -5779719, -32164649, -15327040}, - }, - { - {3960823, -14267803, -28026090, -15918051, -19404858, 13146868, - 15567327, 951507, -3260321, -573935}, - {24740841, 5052253, -30094131, 8961361, 25877428, 6165135, - -24368180, 14397372, -7380369, -6144105}, - {-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, - -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - {793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, - -4885251, -9906200, -621852}, - {5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, - 1468826, -6171428, -15186581}, - {-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, - -30404353, -9871238, -1558923, -9863646}, - }, - { - {10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, - 14783338, -30581476, -15757844}, - {10566929, 12612572, -31944212, 11118703, -12633376, 12362879, - 21752402, 8822496, 24003793, 14264025}, - {27713862, -7355973, -11008240, 9227530, 27050101, 2504721, - 23886875, -13117525, 13958495, -5732453}, - }, - { - {-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, - -31889399, -10041781, 7340521, -15410068}, - {4646514, -8011124, -22766023, -11532654, 23184553, 8566613, - 31366726, -1381061, -15066784, -10375192}, - {-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, - 27584817, 3093888, -8843694, 3849921}, - }, - { - {-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, - 32477045, -9017955, 5002294, -15550259}, - {-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, - 16489530, 13378448, -25845716, 12741426}, - {-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, - 24306472, 15852464, 28834118, -7646072}, - }, - { - {-17335748, -9107057, -24531279, 9434953, -8472084, -583362, - -13090771, 455841, 20461858, 5491305}, - {13669248, -16095482, -12481974, -10203039, -14569770, -11893198, - -24995986, 11293807, -28588204, -9421832}, - {28497928, 6272777, -33022994, 14470570, 8906179, -1225630, - 18504674, -14165166, 29867745, -8795943}, - }, - { - {-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, - -6367600, -13175392, 22853429, -4012011}, - {24191378, 16712145, -13931797, 15217831, 14542237, 1646131, - 18603514, -11037887, 12876623, -2112447}, - {17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, - 608397, 16031844, 3723494}, - }, - { - {-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, - 17558842, -7872890, 23896954, -4314245}, - {-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, - 7229064, -9919646, -8826859}, - {28816045, 298879, -28165016, -15920938, 19000928, -1665890, - -12680833, -2949325, -18051778, -2082915}, - }, - { - {16000882, -344896, 3493092, -11447198, -29504595, -13159789, - 12577740, 16041268, -19715240, 7847707}, - {10151868, 10572098, 27312476, 7922682, 14825339, 4723128, - -32855931, -6519018, -10020567, 3852848}, - {-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, - 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - {-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, - 3303702, 15490, -27548796, 12314391}, - {15683520, -6003043, 18109120, -9980648, 15337968, -5997823, - -16717435, 15921866, 16103996, -3731215}, - {-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, - -19273607, 5402699, -29815713, -9841101}, - }, - { - {23190676, 2384583, -32714340, 3462154, -29903655, -1529132, - -11266856, 8911517, -25205859, 2739713}, - {21374101, -3554250, -33524649, 9874411, 15377179, 11831242, - -33529904, 6134907, 4931255, 11987849}, - {-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, - 13861388, -30076310, 10117930}, - }, - { - {-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, - -6325503, 6704079, 12890019, 15728940}, - {-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, - -10428139, 12885167, 8311031}, - {-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, - 26423267, 4384730, 1888765, -5435404}, - }, - { - {-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, - -32251644, -12707869, -19464434, -3340243}, - {-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, - 14845197, 17151279, -9854116}, - {-24830458, -12733720, -15165978, 10367250, -29530908, -265356, - 22825805, -7087279, -16866484, 16176525}, - }, - { - {-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, - -10363426, -28746253, -10197509}, - {-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, - 23632037, -1940610, 32808310, 1099883}, - {15030977, 5768825, -27451236, -2887299, -6427378, -15361371, - -15277896, -6809350, 2051441, -15225865}, - }, - { - {-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, - -14154188, -22686354, 16633660}, - {4577086, -16752288, 13249841, -15304328, 19958763, -14537274, - 18559670, -10759549, 8402478, -9864273}, - {-28406330, -1051581, -26790155, -907698, -17212414, -11030789, - 9453451, -14980072, 17983010, 9967138}, - }, - { - {-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, - 7806337, 17507396, 3651560}, - {-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, - 26556809, -5574557, -18553322, -11357135}, - {2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, - 8459447, -5605463, -7621941}, - }, - { - {-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, - -849066, 17258084, -7977739}, - {18164541, -10595176, -17154882, -1542417, 19237078, -9745295, - 23357533, -15217008, 26908270, 12150756}, - {-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, - -5537701, -32302074, 16215819}, - }, - }, - { - { - {-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, - 32574489, 12532905, -7503072, -8675347}, - {-27343522, -16515468, -27151524, -10722951, 946346, 16291093, - 254968, 7168080, 21676107, -1943028}, - {21260961, -8424752, -16831886, -11920822, -23677961, 3968121, - -3651949, -6215466, -3556191, -7913075}, - }, - { - {16544754, 13250366, -16804428, 15546242, -4583003, 12757258, - -2462308, -8680336, -18907032, -9662799}, - {-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, - 26820651, 16690659, 25459437, -4564609}, - {-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, - 9142795, -2391602, -6432418, -1644817}, - }, - { - {-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, - -27457225, -16344658, 6335692, 7249989}, - {-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, - -30272269, 2682242, 25993170, -12478523}, - {4364628, 5930691, 32304656, -10044554, -8054781, 15091131, - 22857016, -10598955, 31820368, 15075278}, - }, - { - {31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, - -9650886, -17970238, 12833045}, - {19073683, 14851414, -24403169, -11860168, 7625278, 11091125, - -19619190, 2074449, -9413939, 14905377}, - {24483667, -11935567, -2518866, -11547418, -1553130, 15355506, - -25282080, 9253129, 27628530, -7555480}, - }, - { - {17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, - -9157582, -14110875, 15297016}, - {510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, - -11864220, 8683221, 2921426}, - {18606791, 11874196, 27155355, -5281482, -24031742, 6265446, - -25178240, -1278924, 4674690, 13890525}, - }, - { - {13609624, 13069022, -27372361, -13055908, 24360586, 9592974, - 14977157, 9835105, 4389687, 288396}, - {9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, - 8317628, 23388070, 16052080}, - {12720016, 11937594, -31970060, -5028689, 26900120, 8561328, - -20155687, -11632979, -14754271, -10812892}, - }, - { - {15961858, 14150409, 26716931, -665832, -22794328, 13603569, - 11829573, 7467844, -28822128, 929275}, - {11038231, -11582396, -27310482, -7316562, -10498527, -16307831, - -23479533, -9371869, -21393143, 2465074}, - {20017163, -4323226, 27915242, 1529148, 12396362, 15675764, - 13817261, -9658066, 2463391, -4622140}, - }, - { - {-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, - 9583558, 12851107, 4003896, 12673717}, - {-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, - 14741514, -9103726, 7903886, 2348101}, - {24536016, -16515207, 12715592, -3862155, 1511293, 10047386, - -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - {-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, - 18873298, -7297090, -32297756, 15221632}, - {-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, - -21343950, 2095755, 29769758, 6593415}, - {-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, - -6118678, 30958054, 8292160}, - }, - { - {31429822, -13959116, 29173532, 15632448, 12174511, -2760094, - 32808831, 3977186, 26143136, -3148876}, - {22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, - -1674433, -3758243, -2304625}, - {-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, - -1612713, -1535569, -16664475, 8194478}, - }, - { - {27338066, -7507420, -7414224, 10140405, -19026427, -6589889, - 27277191, 8855376, 28572286, 3005164}, - {26287124, 4821776, 25476601, -4145903, -3764513, -15788984, - -18008582, 1182479, -26094821, -13079595}, - {-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, - -21876275, -13982627, 32208683, -1198248}, - }, - { - {-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, - -27315504, -10497842, -27672585, -11539858}, - {15941029, -9405932, -21367050, 8062055, 31876073, -238629, - -15278393, -1444429, 15397331, -4130193}, - {8934485, -13485467, -23286397, -13423241, -32446090, 14047986, - 31170398, -1441021, -27505566, 15087184}, - }, - { - {-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, - -15502406, 11461896, 16788528, -5868942}, - {-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, - -3770287, -10323320, 31322514, -11615635}, - {21426655, -5650218, -13648287, -5347537, -28812189, -4920970, - -18275391, -14621414, 13040862, -12112948}, - }, - { - {11293895, 12478086, -27136401, 15083750, -29307421, 14748872, - 14555558, -13417103, 1613711, 4896935}, - {-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, - 2825960, -4897045, -23971776, -11267415}, - {-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, - 20615400, 12405433, -23753030, -8436416}, - }, - { - {-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, - 4378436, 2432030, 23097949, -566018}, - {4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, - 10103221, -18512313, 2424778}, - {366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, - 1344109, -3642553, 12412659}, - }, - { - {-24001791, 7690286, 14929416, -168257, -32210835, -13412986, - 24162697, -15326504, -3141501, 11179385}, - {18289522, -14724954, 8056945, 16430056, -21729724, 7842514, - -6001441, -1486897, -18684645, -11443503}, - {476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, - 13403813, 11052904, 5219329}, - }, - }, - { - { - {20678546, -8375738, -32671898, 8849123, -5009758, 14574752, - 31186971, -3973730, 9014762, -8579056}, - {-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, - -33102500, 9160280, 8473550, -3256838}, - {24900749, 14435722, 17209120, -15292541, -22592275, 9878983, - -7689309, -16335821, -24568481, 11788948}, - }, - { - {-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, - -20037437, 10410733, -24568470, -1458691}, - {-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, - 11871841, -12505194, -18513325, 8464118}, - {-23400612, 8348507, -14585951, -861714, -3950205, -6373419, - 14325289, 8628612, 33313881, -8370517}, - }, - { - {-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, - -24805667, -10236854, -8940735, -5818269}, - {-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, - 15989197, -12838188, 28358192, -4253904}, - {-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, - -16637684, 4072016, -5351664, 5596589}, - }, - { - {-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, - 29266239, 2557221, 1768301, 15373193}, - {-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, - -4504991, -24660491, 3442910}, - {-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, - 22597931, 7176455, -18585478, 13365930}, - }, - { - {-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, - -8570186, -9689599, -3031667}, - {25008904, -10771599, -4305031, -9638010, 16265036, 15721635, - 683793, -11823784, 15723479, -15163481}, - {-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, - 11879682, 5400171, 519526, -1235876}, - }, - { - {22258397, -16332233, -7869817, 14613016, -22520255, -2950923, - -20353881, 7315967, 16648397, 7605640}, - {-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, - 23994942, -5281555, -9468848, 4763278}, - {-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, - 31088447, -7764523, -11356529, 728112}, - }, - { - {26047220, -11751471, -6900323, -16521798, 24092068, 9158119, - -4273545, -12555558, -29365436, -5498272}, - {17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, - 12327945, 10750447, 10014012}, - {-10312768, 3936952, 9156313, -8897683, 16498692, -994647, - -27481051, -666732, 3424691, 7540221}, - }, - { - {30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, - -16317219, -9244265, 15258046}, - {13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, - 2711395, 1062915, -5136345}, - {-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, - -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - {19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, - -6101885, 18638003, -11174937}, - {31395534, 15098109, 26581030, 8030562, -16527914, -5007134, - 9012486, -7584354, -6643087, -5442636}, - {-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, - 9677543, -32294889, -6456008}, - }, - { - {-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, - -7839692, -7852844, -8138429}, - {-15236356, -15433509, 7766470, 746860, 26346930, -10221762, - -27333451, 10754588, -9431476, 5203576}, - {31834314, 14135496, -770007, 5159118, 20917671, -16768096, - -7467973, -7337524, 31809243, 7347066}, - }, - { - {-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, - 19797970, -12211255, 15192876, -2087490}, - {-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, - 10609330, 12694420, 33473243, -13382104}, - {33184999, 11180355, 15832085, -11385430, -1633671, 225884, - 15089336, -11023903, -6135662, 14480053}, - }, - { - {31308717, -5619998, 31030840, -1897099, 15674547, -6582883, - 5496208, 13685227, 27595050, 8737275}, - {-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, - -31008351, -12610604, 26498114, 66511}, - {22644454, -8761729, -16671776, 4884562, -3105614, -13559366, - 30540766, -4286747, -13327787, -7515095}, - }, - { - {-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, - 8205540, 13585437, -17127465, 15115439}, - {23711543, -672915, 31206561, -8362711, 6164647, -9709987, - -33535882, -1426096, 8236921, 16492939}, - {-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, - 19574902, 10071562, 6708380, -6222424}, - }, - { - {2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, - 9328700, 29955601, -11678310}, - {3096359, 9271816, -21620864, -15521844, -14847996, -7592937, - -25892142, -12635595, -9917575, 6216608}, - {-32615849, 338663, -25195611, 2510422, -29213566, -13820213, - 24822830, -6146567, -26767480, 7525079}, - }, - { - {-23066649, -13985623, 16133487, -7896178, -3389565, 778788, - -910336, -2782495, -19386633, 11994101}, - {21691500, -13624626, -641331, -14367021, 3285881, -3483596, - -25064666, 9718258, -7477437, 13381418}, - {18445390, -4202236, 14979846, 11622458, -1727110, -3582980, - 23111648, -6375247, 28535282, 15779576}, - }, - { - {30098053, 3089662, -9234387, 16662135, -21306940, 11308411, - -14068454, 12021730, 9955285, -16303356}, - {9734894, -14576830, -7473633, -9138735, 2060392, 11313496, - -18426029, 9924399, 20194861, 13380996}, - {-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, - -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - {-26016874, -219943, 21339191, -41388, 19745256, -2878700, - -29637280, 2227040, 21612326, -545728}, - {-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, - 25764461, 12243797, -20856566, 11649658}, - {-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, - 6114064, 33514190, 2333242}, - }, - { - {-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, - -6679750, -12670638, 24350578, -13450001}, - {-4116307, -11271533, -23886186, 4843615, -30088339, 690623, - -31536088, -10406836, 8317860, 12352766}, - {18200138, -14475911, -33087759, -2696619, -23702521, -9102511, - -23552096, -2287550, 20712163, 6719373}, - }, - { - {26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, - -3763210, 26224235, -3297458}, - {-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, - 21728352, 9493610, 18620611, -16428628}, - {-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, - -5269471, -9725556, -30701573, -16479657}, - }, - { - {-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, - 12248509, -5240639, 13735342, 1934062}, - {25089769, 6742589, 17081145, -13406266, 21909293, -16067981, - -15136294, -3765346, -21277997, 5473616}, - {31883677, -7961101, 1083432, -11572403, 22828471, 13290673, - -7125085, 12469656, 29111212, -5451014}, - }, - { - {24244947, -15050407, -26262976, 2791540, -14997599, 16666678, - 24367466, 6388839, -10295587, 452383}, - {-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, - -24236251, -5915248, 15766062, 8407814}, - {-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, - -8917023, -4388953, -8067909, 2276718}, - }, - { - {30157918, 12924066, -17712050, 9245753, 19895028, 3368142, - -23827587, 5096219, 22740376, -7303417}, - {2041139, -14256350, 7783687, 13876377, -25946985, -13352459, - 24051124, 13742383, -15637599, 13295222}, - {33338237, -8505733, 12532113, 7977527, 9106186, -1715251, - -17720195, -4612972, -4451357, -14669444}, - }, - { - {-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, - -2469266, -4141880, 7770569, 9620597}, - {23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, - -1694323, -33502340, -14767970}, - {1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, - 1220118, 30494170, -11440799}, - }, - { - {-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, - -26739026, 926050, -1684339, -13333647}, - {13908495, -3549272, 30919928, -6273825, -21521863, 7989039, - 9021034, 9078865, 3353509, 4033511}, - {-29663431, -15113610, 32259991, -344482, 24295849, -12912123, - 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - {9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, - 2625015, 28431036, -16771834}, - {-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, - -22545972, 14150565, 15970762, 4099461}, - {29262576, 16756590, 26350592, -8793563, 8529671, -11208050, - 13617293, -9937143, 11465739, 8317062}, - }, - { - {-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, - 14898637, 3848455, 20969334, -5157516}, - {-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, - -21610826, -3649888, 11177095, 14989547}, - {-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, - 13515641, 2581286, -28487508, 9930240}, - }, - { - {-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, - 18345767, -13403753, 16291481, -5314038}, - {-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, - 6957617, 4368891, 9788741}, - {16660756, 7281060, -10830758, 12911820, 20108584, -8101676, - -21722536, -8613148, 16250552, -11111103}, - }, - { - {-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, - 10604807, -30190403, 4782747}, - {-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, - -9981571, 4383045, 22546403, 437323}, - {31665577, -12180464, -16186830, 1491339, -18368625, 3294682, - 27343084, 2786261, -30633590, -14097016}, - }, - { - {-14467279, -683715, -33374107, 7448552, 19294360, 14334329, - -19690631, 2355319, -19284671, -6114373}, - {15121312, -15796162, 6377020, -6031361, -10798111, -12957845, - 18952177, 15496498, -29380133, 11754228}, - {-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, - 7141596, 11724556, 22761615, -10134141}, - }, - { - {16918416, 11729663, -18083579, 3022987, -31015732, -13339659, - -28741185, -12227393, 32851222, 11717399}, - {11166634, 7338049, -6722523, 4531520, -29468672, -7302055, - 31474879, 3483633, -1193175, -4030831}, - {-185635, 9921305, 31456609, -13536438, -12013818, 13348923, - 33142652, 6546660, -19985279, -3948376}, - }, - { - {-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, - -8537131, -12833048, -30772034, -15486313}, - {-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, - -31135347, -16049879, 10928917, 3011958}, - {-6957757, -15594337, 31696059, 334240, 29576716, 14796075, - -30831056, -12805180, 18008031, 10258577}, - }, - { - {-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, - -1853465, 1367120, 25127874, 6671743}, - {29701166, -14373934, -10878120, 9279288, -17568, 13127210, - 21382910, 11042292, 25838796, 4642684}, - {-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, - 30468147, -13900640, 18423289, 4177476}, - }, - }, -}; - -static uint8_t negative(signed char b) { - uint32_t x = b; - x >>= 31; /* 1: yes; 0: no */ - return x; -} - -static void table_select(ge_precomp *t, int pos, signed char b) { - ge_precomp minust; - uint8_t bnegative = negative(b); - uint8_t babs = b - ((uint8_t)((-bnegative) & b) << 1); - - ge_precomp_0(t); - cmov(t, &k25519Precomp[pos][0], equal(babs, 1)); - cmov(t, &k25519Precomp[pos][1], equal(babs, 2)); - cmov(t, &k25519Precomp[pos][2], equal(babs, 3)); - cmov(t, &k25519Precomp[pos][3], equal(babs, 4)); - cmov(t, &k25519Precomp[pos][4], equal(babs, 5)); - cmov(t, &k25519Precomp[pos][5], equal(babs, 6)); - cmov(t, &k25519Precomp[pos][6], equal(babs, 7)); - cmov(t, &k25519Precomp[pos][7], equal(babs, 8)); - fe_copy(minust.yplusx, t->yminusx); - fe_copy(minust.yminusx, t->yplusx); - fe_neg(minust.xy2d, t->xy2d); - cmov(t, &minust, bnegative); -} - -/* h = a * B - * where a = a[0]+256*a[1]+...+256^31 a[31] - * B is the Ed25519 base point (x,4/5) with x positive. - * - * Preconditions: - * a[31] <= 127 */ -void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) { - signed char e[64]; - signed char carry; - ge_p1p1 r; - ge_p2 s; - ge_precomp t; - int i; - - for (i = 0; i < 32; ++i) { - e[2 * i + 0] = (a[i] >> 0) & 15; - e[2 * i + 1] = (a[i] >> 4) & 15; - } - /* each e[i] is between 0 and 15 */ - /* e[63] is between 0 and 7 */ - - carry = 0; - for (i = 0; i < 63; ++i) { - e[i] += carry; - carry = e[i] + 8; - carry >>= 4; - e[i] -= carry << 4; - } - e[63] += carry; - /* each e[i] is between -8 and 8 */ - - ge_p3_0(h); - for (i = 1; i < 64; i += 2) { - table_select(&t, i / 2, e[i]); - ge_madd(&r, h, &t); - x25519_ge_p1p1_to_p3(h, &r); - } - - ge_p3_dbl(&r, h); - x25519_ge_p1p1_to_p2(&s, &r); - ge_p2_dbl(&r, &s); - x25519_ge_p1p1_to_p2(&s, &r); - ge_p2_dbl(&r, &s); - x25519_ge_p1p1_to_p2(&s, &r); - ge_p2_dbl(&r, &s); - x25519_ge_p1p1_to_p3(h, &r); - - for (i = 0; i < 64; i += 2) { - table_select(&t, i / 2, e[i]); - ge_madd(&r, h, &t); - x25519_ge_p1p1_to_p3(h, &r); - } -} - -#endif - -static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) { - fe_cmov(t->YplusX, u->YplusX, b); - fe_cmov(t->YminusX, u->YminusX, b); - fe_cmov(t->Z, u->Z, b); - fe_cmov(t->T2d, u->T2d, b); -} - -/* r = scalar * A. - * where a = a[0]+256*a[1]+...+256^31 a[31]. */ -void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) { - ge_p2 Ai_p2[8]; - ge_cached Ai[16]; - ge_p1p1 t; - - ge_cached_0(&Ai[0]); - x25519_ge_p3_to_cached(&Ai[1], A); - ge_p3_to_p2(&Ai_p2[1], A); - - unsigned i; - for (i = 2; i < 16; i += 2) { - ge_p2_dbl(&t, &Ai_p2[i / 2]); - ge_p1p1_to_cached(&Ai[i], &t); - if (i < 8) { - x25519_ge_p1p1_to_p2(&Ai_p2[i], &t); - } - x25519_ge_add(&t, A, &Ai[i]); - ge_p1p1_to_cached(&Ai[i + 1], &t); - if (i < 7) { - x25519_ge_p1p1_to_p2(&Ai_p2[i + 1], &t); - } - } - - ge_p2_0(r); - ge_p3 u; - - for (i = 0; i < 256; i += 4) { - ge_p2_dbl(&t, r); - x25519_ge_p1p1_to_p2(r, &t); - ge_p2_dbl(&t, r); - x25519_ge_p1p1_to_p2(r, &t); - ge_p2_dbl(&t, r); - x25519_ge_p1p1_to_p2(r, &t); - ge_p2_dbl(&t, r); - x25519_ge_p1p1_to_p3(&u, &t); - - uint8_t index = scalar[31 - i/8]; - index >>= 4 - (i & 4); - index &= 0xf; - - unsigned j; - ge_cached selected; - ge_cached_0(&selected); - for (j = 0; j < 16; j++) { - cmov_cached(&selected, &Ai[j], equal(j, index)); - } - - x25519_ge_add(&t, &u, &selected); - x25519_ge_p1p1_to_p2(r, &t); - } -} - -static void slide(signed char *r, const uint8_t *a) { - int i; - int b; - int k; - - for (i = 0; i < 256; ++i) { - r[i] = 1 & (a[i >> 3] >> (i & 7)); - } - - for (i = 0; i < 256; ++i) { - if (r[i]) { - for (b = 1; b <= 6 && i + b < 256; ++b) { - if (r[i + b]) { - if (r[i] + (r[i + b] << b) <= 15) { - r[i] += r[i + b] << b; - r[i + b] = 0; - } else if (r[i] - (r[i + b] << b) >= -15) { - r[i] -= r[i + b] << b; - for (k = i + b; k < 256; ++k) { - if (!r[k]) { - r[k] = 1; - break; - } - r[k] = 0; - } - } else { - break; - } - } - } - } - } -} - -static const ge_precomp Bi[8] = { - { - {25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, - -11754271, -6079156, 2047605}, - {-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, - 5043384, 19500929, -15469378}, - {-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, - 11864899, -24514362, -4438546}, - }, - { - {15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, - -14772189, 28944400, -1550024}, - {16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, - -11775962, 7689662, 11199574}, - {30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, - 10017326, -17749093, -9920357}, - }, - { - {10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, - 14515107, -15438304, 10819380}, - {4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, - 12483688, -12668491, 5581306}, - {19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, - 13850243, -23678021, -15815942}, - }, - { - {5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, - 5230134, -23952439, -15175766}, - {-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, - 16520125, 30598449, 7715701}, - {28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, - 1370708, 29794553, -1409300}, - }, - { - {-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, - -1361450, -13062696, 13821877}, - {-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, - -7212327, 18853322, -14220951}, - {4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, - -10431137, 2207753, -3209784}, - }, - { - {-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, - -663000, -31111463, -16132436}, - {25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, - 15725684, 171356, 6466918}, - {23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, - -14088058, -30714912, 16193877}, - }, - { - {-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, - 4729455, -18074513, 9256800}, - {-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, - 9761698, -19827198, 630305}, - {-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, - -15960994, -2449256, -14291300}, - }, - { - {-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, - 15033784, 25105118, -7894876}, - {-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, - 1573892, -2625887, 2198790, -15804619}, - {-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, - -16236442, -32461234, -12290683}, - }, -}; - -/* r = a * A + b * B - * where a = a[0]+256*a[1]+...+256^31 a[31]. - * and b = b[0]+256*b[1]+...+256^31 b[31]. - * B is the Ed25519 base point (x,4/5) with x positive. */ -static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a, - const ge_p3 *A, const uint8_t *b) { - signed char aslide[256]; - signed char bslide[256]; - ge_cached Ai[8]; /* A,3A,5A,7A,9A,11A,13A,15A */ - ge_p1p1 t; - ge_p3 u; - ge_p3 A2; - int i; - - slide(aslide, a); - slide(bslide, b); - - x25519_ge_p3_to_cached(&Ai[0], A); - ge_p3_dbl(&t, A); - x25519_ge_p1p1_to_p3(&A2, &t); - x25519_ge_add(&t, &A2, &Ai[0]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[1], &u); - x25519_ge_add(&t, &A2, &Ai[1]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[2], &u); - x25519_ge_add(&t, &A2, &Ai[2]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[3], &u); - x25519_ge_add(&t, &A2, &Ai[3]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[4], &u); - x25519_ge_add(&t, &A2, &Ai[4]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[5], &u); - x25519_ge_add(&t, &A2, &Ai[5]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[6], &u); - x25519_ge_add(&t, &A2, &Ai[6]); - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_p3_to_cached(&Ai[7], &u); - - ge_p2_0(r); - - for (i = 255; i >= 0; --i) { - if (aslide[i] || bslide[i]) { - break; - } - } - - for (; i >= 0; --i) { - ge_p2_dbl(&t, r); - - if (aslide[i] > 0) { - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_add(&t, &u, &Ai[aslide[i] / 2]); - } else if (aslide[i] < 0) { - x25519_ge_p1p1_to_p3(&u, &t); - x25519_ge_sub(&t, &u, &Ai[(-aslide[i]) / 2]); - } - - if (bslide[i] > 0) { - x25519_ge_p1p1_to_p3(&u, &t); - ge_madd(&t, &u, &Bi[bslide[i] / 2]); - } else if (bslide[i] < 0) { - x25519_ge_p1p1_to_p3(&u, &t); - ge_msub(&t, &u, &Bi[(-bslide[i]) / 2]); - } - - x25519_ge_p1p1_to_p2(r, &t); - } -} - -/* The set of scalars is \Z/l - * where l = 2^252 + 27742317777372353535851937790883648493. */ - -/* Input: - * s[0]+256*s[1]+...+256^63*s[63] = s - * - * Output: - * s[0]+256*s[1]+...+256^31*s[31] = s mod l - * where l = 2^252 + 27742317777372353535851937790883648493. - * Overwrites s in place. */ -void x25519_sc_reduce(uint8_t *s) { - int64_t s0 = 2097151 & load_3(s); - int64_t s1 = 2097151 & (load_4(s + 2) >> 5); - int64_t s2 = 2097151 & (load_3(s + 5) >> 2); - int64_t s3 = 2097151 & (load_4(s + 7) >> 7); - int64_t s4 = 2097151 & (load_4(s + 10) >> 4); - int64_t s5 = 2097151 & (load_3(s + 13) >> 1); - int64_t s6 = 2097151 & (load_4(s + 15) >> 6); - int64_t s7 = 2097151 & (load_3(s + 18) >> 3); - int64_t s8 = 2097151 & load_3(s + 21); - int64_t s9 = 2097151 & (load_4(s + 23) >> 5); - int64_t s10 = 2097151 & (load_3(s + 26) >> 2); - int64_t s11 = 2097151 & (load_4(s + 28) >> 7); - int64_t s12 = 2097151 & (load_4(s + 31) >> 4); - int64_t s13 = 2097151 & (load_3(s + 34) >> 1); - int64_t s14 = 2097151 & (load_4(s + 36) >> 6); - int64_t s15 = 2097151 & (load_3(s + 39) >> 3); - int64_t s16 = 2097151 & load_3(s + 42); - int64_t s17 = 2097151 & (load_4(s + 44) >> 5); - int64_t s18 = 2097151 & (load_3(s + 47) >> 2); - int64_t s19 = 2097151 & (load_4(s + 49) >> 7); - int64_t s20 = 2097151 & (load_4(s + 52) >> 4); - int64_t s21 = 2097151 & (load_3(s + 55) >> 1); - int64_t s22 = 2097151 & (load_4(s + 57) >> 6); - int64_t s23 = (load_4(s + 60) >> 3); - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - int64_t carry10; - int64_t carry11; - int64_t carry12; - int64_t carry13; - int64_t carry14; - int64_t carry15; - int64_t carry16; - - s11 += s23 * 666643; - s12 += s23 * 470296; - s13 += s23 * 654183; - s14 -= s23 * 997805; - s15 += s23 * 136657; - s16 -= s23 * 683901; - s23 = 0; - - s10 += s22 * 666643; - s11 += s22 * 470296; - s12 += s22 * 654183; - s13 -= s22 * 997805; - s14 += s22 * 136657; - s15 -= s22 * 683901; - s22 = 0; - - s9 += s21 * 666643; - s10 += s21 * 470296; - s11 += s21 * 654183; - s12 -= s21 * 997805; - s13 += s21 * 136657; - s14 -= s21 * 683901; - s21 = 0; - - s8 += s20 * 666643; - s9 += s20 * 470296; - s10 += s20 * 654183; - s11 -= s20 * 997805; - s12 += s20 * 136657; - s13 -= s20 * 683901; - s20 = 0; - - s7 += s19 * 666643; - s8 += s19 * 470296; - s9 += s19 * 654183; - s10 -= s19 * 997805; - s11 += s19 * 136657; - s12 -= s19 * 683901; - s19 = 0; - - s6 += s18 * 666643; - s7 += s18 * 470296; - s8 += s18 * 654183; - s9 -= s18 * 997805; - s10 += s18 * 136657; - s11 -= s18 * 683901; - s18 = 0; - - carry6 = (s6 + (1 << 20)) >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry8 = (s8 + (1 << 20)) >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry10 = (s10 + (1 << 20)) >> 21; - s11 += carry10; - s10 -= carry10 << 21; - carry12 = (s12 + (1 << 20)) >> 21; - s13 += carry12; - s12 -= carry12 << 21; - carry14 = (s14 + (1 << 20)) >> 21; - s15 += carry14; - s14 -= carry14 << 21; - carry16 = (s16 + (1 << 20)) >> 21; - s17 += carry16; - s16 -= carry16 << 21; - - carry7 = (s7 + (1 << 20)) >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry9 = (s9 + (1 << 20)) >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry11 = (s11 + (1 << 20)) >> 21; - s12 += carry11; - s11 -= carry11 << 21; - carry13 = (s13 + (1 << 20)) >> 21; - s14 += carry13; - s13 -= carry13 << 21; - carry15 = (s15 + (1 << 20)) >> 21; - s16 += carry15; - s15 -= carry15 << 21; - - s5 += s17 * 666643; - s6 += s17 * 470296; - s7 += s17 * 654183; - s8 -= s17 * 997805; - s9 += s17 * 136657; - s10 -= s17 * 683901; - s17 = 0; - - s4 += s16 * 666643; - s5 += s16 * 470296; - s6 += s16 * 654183; - s7 -= s16 * 997805; - s8 += s16 * 136657; - s9 -= s16 * 683901; - s16 = 0; - - s3 += s15 * 666643; - s4 += s15 * 470296; - s5 += s15 * 654183; - s6 -= s15 * 997805; - s7 += s15 * 136657; - s8 -= s15 * 683901; - s15 = 0; - - s2 += s14 * 666643; - s3 += s14 * 470296; - s4 += s14 * 654183; - s5 -= s14 * 997805; - s6 += s14 * 136657; - s7 -= s14 * 683901; - s14 = 0; - - s1 += s13 * 666643; - s2 += s13 * 470296; - s3 += s13 * 654183; - s4 -= s13 * 997805; - s5 += s13 * 136657; - s6 -= s13 * 683901; - s13 = 0; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = (s0 + (1 << 20)) >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry2 = (s2 + (1 << 20)) >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry4 = (s4 + (1 << 20)) >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry6 = (s6 + (1 << 20)) >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry8 = (s8 + (1 << 20)) >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry10 = (s10 + (1 << 20)) >> 21; - s11 += carry10; - s10 -= carry10 << 21; - - carry1 = (s1 + (1 << 20)) >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry3 = (s3 + (1 << 20)) >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry5 = (s5 + (1 << 20)) >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry7 = (s7 + (1 << 20)) >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry9 = (s9 + (1 << 20)) >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry11 = (s11 + (1 << 20)) >> 21; - s12 += carry11; - s11 -= carry11 << 21; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = s0 >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry1 = s1 >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry2 = s2 >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry3 = s3 >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry4 = s4 >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry5 = s5 >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry6 = s6 >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry7 = s7 >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry8 = s8 >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry9 = s9 >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry10 = s10 >> 21; - s11 += carry10; - s10 -= carry10 << 21; - carry11 = s11 >> 21; - s12 += carry11; - s11 -= carry11 << 21; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = s0 >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry1 = s1 >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry2 = s2 >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry3 = s3 >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry4 = s4 >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry5 = s5 >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry6 = s6 >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry7 = s7 >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry8 = s8 >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry9 = s9 >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry10 = s10 >> 21; - s11 += carry10; - s10 -= carry10 << 21; - - s[0] = s0 >> 0; - s[1] = s0 >> 8; - s[2] = (s0 >> 16) | (s1 << 5); - s[3] = s1 >> 3; - s[4] = s1 >> 11; - s[5] = (s1 >> 19) | (s2 << 2); - s[6] = s2 >> 6; - s[7] = (s2 >> 14) | (s3 << 7); - s[8] = s3 >> 1; - s[9] = s3 >> 9; - s[10] = (s3 >> 17) | (s4 << 4); - s[11] = s4 >> 4; - s[12] = s4 >> 12; - s[13] = (s4 >> 20) | (s5 << 1); - s[14] = s5 >> 7; - s[15] = (s5 >> 15) | (s6 << 6); - s[16] = s6 >> 2; - s[17] = s6 >> 10; - s[18] = (s6 >> 18) | (s7 << 3); - s[19] = s7 >> 5; - s[20] = s7 >> 13; - s[21] = s8 >> 0; - s[22] = s8 >> 8; - s[23] = (s8 >> 16) | (s9 << 5); - s[24] = s9 >> 3; - s[25] = s9 >> 11; - s[26] = (s9 >> 19) | (s10 << 2); - s[27] = s10 >> 6; - s[28] = (s10 >> 14) | (s11 << 7); - s[29] = s11 >> 1; - s[30] = s11 >> 9; - s[31] = s11 >> 17; -} - -/* Input: - * a[0]+256*a[1]+...+256^31*a[31] = a - * b[0]+256*b[1]+...+256^31*b[31] = b - * c[0]+256*c[1]+...+256^31*c[31] = c - * - * Output: - * s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l - * where l = 2^252 + 27742317777372353535851937790883648493. */ -static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b, - const uint8_t *c) { - int64_t a0 = 2097151 & load_3(a); - int64_t a1 = 2097151 & (load_4(a + 2) >> 5); - int64_t a2 = 2097151 & (load_3(a + 5) >> 2); - int64_t a3 = 2097151 & (load_4(a + 7) >> 7); - int64_t a4 = 2097151 & (load_4(a + 10) >> 4); - int64_t a5 = 2097151 & (load_3(a + 13) >> 1); - int64_t a6 = 2097151 & (load_4(a + 15) >> 6); - int64_t a7 = 2097151 & (load_3(a + 18) >> 3); - int64_t a8 = 2097151 & load_3(a + 21); - int64_t a9 = 2097151 & (load_4(a + 23) >> 5); - int64_t a10 = 2097151 & (load_3(a + 26) >> 2); - int64_t a11 = (load_4(a + 28) >> 7); - int64_t b0 = 2097151 & load_3(b); - int64_t b1 = 2097151 & (load_4(b + 2) >> 5); - int64_t b2 = 2097151 & (load_3(b + 5) >> 2); - int64_t b3 = 2097151 & (load_4(b + 7) >> 7); - int64_t b4 = 2097151 & (load_4(b + 10) >> 4); - int64_t b5 = 2097151 & (load_3(b + 13) >> 1); - int64_t b6 = 2097151 & (load_4(b + 15) >> 6); - int64_t b7 = 2097151 & (load_3(b + 18) >> 3); - int64_t b8 = 2097151 & load_3(b + 21); - int64_t b9 = 2097151 & (load_4(b + 23) >> 5); - int64_t b10 = 2097151 & (load_3(b + 26) >> 2); - int64_t b11 = (load_4(b + 28) >> 7); - int64_t c0 = 2097151 & load_3(c); - int64_t c1 = 2097151 & (load_4(c + 2) >> 5); - int64_t c2 = 2097151 & (load_3(c + 5) >> 2); - int64_t c3 = 2097151 & (load_4(c + 7) >> 7); - int64_t c4 = 2097151 & (load_4(c + 10) >> 4); - int64_t c5 = 2097151 & (load_3(c + 13) >> 1); - int64_t c6 = 2097151 & (load_4(c + 15) >> 6); - int64_t c7 = 2097151 & (load_3(c + 18) >> 3); - int64_t c8 = 2097151 & load_3(c + 21); - int64_t c9 = 2097151 & (load_4(c + 23) >> 5); - int64_t c10 = 2097151 & (load_3(c + 26) >> 2); - int64_t c11 = (load_4(c + 28) >> 7); - int64_t s0; - int64_t s1; - int64_t s2; - int64_t s3; - int64_t s4; - int64_t s5; - int64_t s6; - int64_t s7; - int64_t s8; - int64_t s9; - int64_t s10; - int64_t s11; - int64_t s12; - int64_t s13; - int64_t s14; - int64_t s15; - int64_t s16; - int64_t s17; - int64_t s18; - int64_t s19; - int64_t s20; - int64_t s21; - int64_t s22; - int64_t s23; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - int64_t carry10; - int64_t carry11; - int64_t carry12; - int64_t carry13; - int64_t carry14; - int64_t carry15; - int64_t carry16; - int64_t carry17; - int64_t carry18; - int64_t carry19; - int64_t carry20; - int64_t carry21; - int64_t carry22; - - s0 = c0 + a0 * b0; - s1 = c1 + a0 * b1 + a1 * b0; - s2 = c2 + a0 * b2 + a1 * b1 + a2 * b0; - s3 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0; - s4 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0; - s5 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0; - s6 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0; - s7 = c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 + - a6 * b1 + a7 * b0; - s8 = c8 + a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 + - a6 * b2 + a7 * b1 + a8 * b0; - s9 = c9 + a0 * b9 + a1 * b8 + a2 * b7 + a3 * b6 + a4 * b5 + a5 * b4 + - a6 * b3 + a7 * b2 + a8 * b1 + a9 * b0; - s10 = c10 + a0 * b10 + a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 + - a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1 + a10 * b0; - s11 = c11 + a0 * b11 + a1 * b10 + a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 + - a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2 + a10 * b1 + a11 * b0; - s12 = a1 * b11 + a2 * b10 + a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 + - a8 * b4 + a9 * b3 + a10 * b2 + a11 * b1; - s13 = a2 * b11 + a3 * b10 + a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 + - a9 * b4 + a10 * b3 + a11 * b2; - s14 = a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 + - a10 * b4 + a11 * b3; - s15 = a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 + - a11 * b4; - s16 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5; - s17 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6; - s18 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7; - s19 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8; - s20 = a9 * b11 + a10 * b10 + a11 * b9; - s21 = a10 * b11 + a11 * b10; - s22 = a11 * b11; - s23 = 0; - - carry0 = (s0 + (1 << 20)) >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry2 = (s2 + (1 << 20)) >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry4 = (s4 + (1 << 20)) >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry6 = (s6 + (1 << 20)) >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry8 = (s8 + (1 << 20)) >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry10 = (s10 + (1 << 20)) >> 21; - s11 += carry10; - s10 -= carry10 << 21; - carry12 = (s12 + (1 << 20)) >> 21; - s13 += carry12; - s12 -= carry12 << 21; - carry14 = (s14 + (1 << 20)) >> 21; - s15 += carry14; - s14 -= carry14 << 21; - carry16 = (s16 + (1 << 20)) >> 21; - s17 += carry16; - s16 -= carry16 << 21; - carry18 = (s18 + (1 << 20)) >> 21; - s19 += carry18; - s18 -= carry18 << 21; - carry20 = (s20 + (1 << 20)) >> 21; - s21 += carry20; - s20 -= carry20 << 21; - carry22 = (s22 + (1 << 20)) >> 21; - s23 += carry22; - s22 -= carry22 << 21; - - carry1 = (s1 + (1 << 20)) >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry3 = (s3 + (1 << 20)) >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry5 = (s5 + (1 << 20)) >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry7 = (s7 + (1 << 20)) >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry9 = (s9 + (1 << 20)) >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry11 = (s11 + (1 << 20)) >> 21; - s12 += carry11; - s11 -= carry11 << 21; - carry13 = (s13 + (1 << 20)) >> 21; - s14 += carry13; - s13 -= carry13 << 21; - carry15 = (s15 + (1 << 20)) >> 21; - s16 += carry15; - s15 -= carry15 << 21; - carry17 = (s17 + (1 << 20)) >> 21; - s18 += carry17; - s17 -= carry17 << 21; - carry19 = (s19 + (1 << 20)) >> 21; - s20 += carry19; - s19 -= carry19 << 21; - carry21 = (s21 + (1 << 20)) >> 21; - s22 += carry21; - s21 -= carry21 << 21; - - s11 += s23 * 666643; - s12 += s23 * 470296; - s13 += s23 * 654183; - s14 -= s23 * 997805; - s15 += s23 * 136657; - s16 -= s23 * 683901; - s23 = 0; - - s10 += s22 * 666643; - s11 += s22 * 470296; - s12 += s22 * 654183; - s13 -= s22 * 997805; - s14 += s22 * 136657; - s15 -= s22 * 683901; - s22 = 0; - - s9 += s21 * 666643; - s10 += s21 * 470296; - s11 += s21 * 654183; - s12 -= s21 * 997805; - s13 += s21 * 136657; - s14 -= s21 * 683901; - s21 = 0; - - s8 += s20 * 666643; - s9 += s20 * 470296; - s10 += s20 * 654183; - s11 -= s20 * 997805; - s12 += s20 * 136657; - s13 -= s20 * 683901; - s20 = 0; - - s7 += s19 * 666643; - s8 += s19 * 470296; - s9 += s19 * 654183; - s10 -= s19 * 997805; - s11 += s19 * 136657; - s12 -= s19 * 683901; - s19 = 0; - - s6 += s18 * 666643; - s7 += s18 * 470296; - s8 += s18 * 654183; - s9 -= s18 * 997805; - s10 += s18 * 136657; - s11 -= s18 * 683901; - s18 = 0; - - carry6 = (s6 + (1 << 20)) >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry8 = (s8 + (1 << 20)) >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry10 = (s10 + (1 << 20)) >> 21; - s11 += carry10; - s10 -= carry10 << 21; - carry12 = (s12 + (1 << 20)) >> 21; - s13 += carry12; - s12 -= carry12 << 21; - carry14 = (s14 + (1 << 20)) >> 21; - s15 += carry14; - s14 -= carry14 << 21; - carry16 = (s16 + (1 << 20)) >> 21; - s17 += carry16; - s16 -= carry16 << 21; - - carry7 = (s7 + (1 << 20)) >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry9 = (s9 + (1 << 20)) >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry11 = (s11 + (1 << 20)) >> 21; - s12 += carry11; - s11 -= carry11 << 21; - carry13 = (s13 + (1 << 20)) >> 21; - s14 += carry13; - s13 -= carry13 << 21; - carry15 = (s15 + (1 << 20)) >> 21; - s16 += carry15; - s15 -= carry15 << 21; - - s5 += s17 * 666643; - s6 += s17 * 470296; - s7 += s17 * 654183; - s8 -= s17 * 997805; - s9 += s17 * 136657; - s10 -= s17 * 683901; - s17 = 0; - - s4 += s16 * 666643; - s5 += s16 * 470296; - s6 += s16 * 654183; - s7 -= s16 * 997805; - s8 += s16 * 136657; - s9 -= s16 * 683901; - s16 = 0; - - s3 += s15 * 666643; - s4 += s15 * 470296; - s5 += s15 * 654183; - s6 -= s15 * 997805; - s7 += s15 * 136657; - s8 -= s15 * 683901; - s15 = 0; - - s2 += s14 * 666643; - s3 += s14 * 470296; - s4 += s14 * 654183; - s5 -= s14 * 997805; - s6 += s14 * 136657; - s7 -= s14 * 683901; - s14 = 0; - - s1 += s13 * 666643; - s2 += s13 * 470296; - s3 += s13 * 654183; - s4 -= s13 * 997805; - s5 += s13 * 136657; - s6 -= s13 * 683901; - s13 = 0; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = (s0 + (1 << 20)) >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry2 = (s2 + (1 << 20)) >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry4 = (s4 + (1 << 20)) >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry6 = (s6 + (1 << 20)) >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry8 = (s8 + (1 << 20)) >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry10 = (s10 + (1 << 20)) >> 21; - s11 += carry10; - s10 -= carry10 << 21; - - carry1 = (s1 + (1 << 20)) >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry3 = (s3 + (1 << 20)) >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry5 = (s5 + (1 << 20)) >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry7 = (s7 + (1 << 20)) >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry9 = (s9 + (1 << 20)) >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry11 = (s11 + (1 << 20)) >> 21; - s12 += carry11; - s11 -= carry11 << 21; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = s0 >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry1 = s1 >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry2 = s2 >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry3 = s3 >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry4 = s4 >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry5 = s5 >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry6 = s6 >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry7 = s7 >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry8 = s8 >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry9 = s9 >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry10 = s10 >> 21; - s11 += carry10; - s10 -= carry10 << 21; - carry11 = s11 >> 21; - s12 += carry11; - s11 -= carry11 << 21; - - s0 += s12 * 666643; - s1 += s12 * 470296; - s2 += s12 * 654183; - s3 -= s12 * 997805; - s4 += s12 * 136657; - s5 -= s12 * 683901; - s12 = 0; - - carry0 = s0 >> 21; - s1 += carry0; - s0 -= carry0 << 21; - carry1 = s1 >> 21; - s2 += carry1; - s1 -= carry1 << 21; - carry2 = s2 >> 21; - s3 += carry2; - s2 -= carry2 << 21; - carry3 = s3 >> 21; - s4 += carry3; - s3 -= carry3 << 21; - carry4 = s4 >> 21; - s5 += carry4; - s4 -= carry4 << 21; - carry5 = s5 >> 21; - s6 += carry5; - s5 -= carry5 << 21; - carry6 = s6 >> 21; - s7 += carry6; - s6 -= carry6 << 21; - carry7 = s7 >> 21; - s8 += carry7; - s7 -= carry7 << 21; - carry8 = s8 >> 21; - s9 += carry8; - s8 -= carry8 << 21; - carry9 = s9 >> 21; - s10 += carry9; - s9 -= carry9 << 21; - carry10 = s10 >> 21; - s11 += carry10; - s10 -= carry10 << 21; - - s[0] = s0 >> 0; - s[1] = s0 >> 8; - s[2] = (s0 >> 16) | (s1 << 5); - s[3] = s1 >> 3; - s[4] = s1 >> 11; - s[5] = (s1 >> 19) | (s2 << 2); - s[6] = s2 >> 6; - s[7] = (s2 >> 14) | (s3 << 7); - s[8] = s3 >> 1; - s[9] = s3 >> 9; - s[10] = (s3 >> 17) | (s4 << 4); - s[11] = s4 >> 4; - s[12] = s4 >> 12; - s[13] = (s4 >> 20) | (s5 << 1); - s[14] = s5 >> 7; - s[15] = (s5 >> 15) | (s6 << 6); - s[16] = s6 >> 2; - s[17] = s6 >> 10; - s[18] = (s6 >> 18) | (s7 << 3); - s[19] = s7 >> 5; - s[20] = s7 >> 13; - s[21] = s8 >> 0; - s[22] = s8 >> 8; - s[23] = (s8 >> 16) | (s9 << 5); - s[24] = s9 >> 3; - s[25] = s9 >> 11; - s[26] = (s9 >> 19) | (s10 << 2); - s[27] = s10 >> 6; - s[28] = (s10 >> 14) | (s11 << 7); - s[29] = s11 >> 1; - s[30] = s11 >> 9; - s[31] = s11 >> 17; -} - -void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]) { - uint8_t seed[32]; - RAND_bytes(seed, 32); - ED25519_keypair_from_seed(out_public_key, out_private_key, seed); -} - -int ED25519_sign(uint8_t *out_sig, const uint8_t *message, size_t message_len, - const uint8_t private_key[64]) { - uint8_t az[SHA512_DIGEST_LENGTH]; - SHA512(private_key, 32, az); - - az[0] &= 248; - az[31] &= 63; - az[31] |= 64; - - SHA512_CTX hash_ctx; - SHA512_Init(&hash_ctx); - SHA512_Update(&hash_ctx, az + 32, 32); - SHA512_Update(&hash_ctx, message, message_len); - uint8_t nonce[SHA512_DIGEST_LENGTH]; - SHA512_Final(nonce, &hash_ctx); - - x25519_sc_reduce(nonce); - ge_p3 R; - x25519_ge_scalarmult_base(&R, nonce); - ge_p3_tobytes(out_sig, &R); - - SHA512_Init(&hash_ctx); - SHA512_Update(&hash_ctx, out_sig, 32); - SHA512_Update(&hash_ctx, private_key + 32, 32); - SHA512_Update(&hash_ctx, message, message_len); - uint8_t hram[SHA512_DIGEST_LENGTH]; - SHA512_Final(hram, &hash_ctx); - - x25519_sc_reduce(hram); - sc_muladd(out_sig + 32, hram, az, nonce); - - return 1; -} - -int ED25519_verify(const uint8_t *message, size_t message_len, - const uint8_t signature[64], const uint8_t public_key[32]) { - ge_p3 A; - if ((signature[63] & 224) != 0 || - x25519_ge_frombytes_vartime(&A, public_key) != 0) { - return 0; - } - - fe_neg(A.X, A.X); - fe_neg(A.T, A.T); - - uint8_t pkcopy[32]; - OPENSSL_memcpy(pkcopy, public_key, 32); - uint8_t rcopy[32]; - OPENSSL_memcpy(rcopy, signature, 32); - uint8_t scopy[32]; - OPENSSL_memcpy(scopy, signature + 32, 32); - - SHA512_CTX hash_ctx; - SHA512_Init(&hash_ctx); - SHA512_Update(&hash_ctx, signature, 32); - SHA512_Update(&hash_ctx, public_key, 32); - SHA512_Update(&hash_ctx, message, message_len); - uint8_t h[SHA512_DIGEST_LENGTH]; - SHA512_Final(h, &hash_ctx); - - x25519_sc_reduce(h); - - ge_p2 R; - ge_double_scalarmult_vartime(&R, h, &A, scopy); - - uint8_t rcheck[32]; - x25519_ge_tobytes(rcheck, &R); - - return CRYPTO_memcmp(rcheck, rcopy, sizeof(rcheck)) == 0; -} - -void ED25519_keypair_from_seed(uint8_t out_public_key[32], - uint8_t out_private_key[64], - const uint8_t seed[32]) { - uint8_t az[SHA512_DIGEST_LENGTH]; - SHA512(seed, 32, az); - - az[0] &= 248; - az[31] &= 63; - az[31] |= 64; - - ge_p3 A; - x25519_ge_scalarmult_base(&A, az); - ge_p3_tobytes(out_public_key, &A); - - OPENSSL_memcpy(out_private_key, seed, 32); - OPENSSL_memcpy(out_private_key + 32, out_public_key, 32); -} - - -#if defined(BORINGSSL_X25519_X86_64) - -static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], - const uint8_t point[32]) { - x25519_x86_64(out, scalar, point); -} - -#else - -/* Replace (f,g) with (g,f) if b == 1; - * replace (f,g) with (f,g) if b == 0. - * - * Preconditions: b in {0,1}. */ -static void fe_cswap(fe f, fe g, unsigned int b) { - b = 0-b; - unsigned i; - for (i = 0; i < 10; i++) { - int32_t x = f[i] ^ g[i]; - x &= b; - f[i] ^= x; - g[i] ^= x; - } -} - -/* h = f * 121666 - * Can overlap h with f. - * - * Preconditions: - * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. - * - * Postconditions: - * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */ -static void fe_mul121666(fe h, fe f) { - int32_t f0 = f[0]; - int32_t f1 = f[1]; - int32_t f2 = f[2]; - int32_t f3 = f[3]; - int32_t f4 = f[4]; - int32_t f5 = f[5]; - int32_t f6 = f[6]; - int32_t f7 = f[7]; - int32_t f8 = f[8]; - int32_t f9 = f[9]; - int64_t h0 = f0 * (int64_t) 121666; - int64_t h1 = f1 * (int64_t) 121666; - int64_t h2 = f2 * (int64_t) 121666; - int64_t h3 = f3 * (int64_t) 121666; - int64_t h4 = f4 * (int64_t) 121666; - int64_t h5 = f5 * (int64_t) 121666; - int64_t h6 = f6 * (int64_t) 121666; - int64_t h7 = f7 * (int64_t) 121666; - int64_t h8 = f8 * (int64_t) 121666; - int64_t h9 = f9 * (int64_t) 121666; - int64_t carry0; - int64_t carry1; - int64_t carry2; - int64_t carry3; - int64_t carry4; - int64_t carry5; - int64_t carry6; - int64_t carry7; - int64_t carry8; - int64_t carry9; - - carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits; - carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits; - carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits; - carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits; - carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits; - - carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits; - carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits; - carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits; - carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits; - carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits; - - h[0] = h0; - h[1] = h1; - h[2] = h2; - h[3] = h3; - h[4] = h4; - h[5] = h5; - h[6] = h6; - h[7] = h7; - h[8] = h8; - h[9] = h9; -} - -static void x25519_scalar_mult_generic(uint8_t out[32], - const uint8_t scalar[32], - const uint8_t point[32]) { - fe x1, x2, z2, x3, z3, tmp0, tmp1; - - uint8_t e[32]; - OPENSSL_memcpy(e, scalar, 32); - e[0] &= 248; - e[31] &= 127; - e[31] |= 64; - fe_frombytes(x1, point); - fe_1(x2); - fe_0(z2); - fe_copy(x3, x1); - fe_1(z3); - - unsigned swap = 0; - int pos; - for (pos = 254; pos >= 0; --pos) { - unsigned b = 1 & (e[pos / 8] >> (pos & 7)); - swap ^= b; - fe_cswap(x2, x3, swap); - fe_cswap(z2, z3, swap); - swap = b; - fe_sub(tmp0, x3, z3); - fe_sub(tmp1, x2, z2); - fe_add(x2, x2, z2); - fe_add(z2, x3, z3); - fe_mul(z3, tmp0, x2); - fe_mul(z2, z2, tmp1); - fe_sq(tmp0, tmp1); - fe_sq(tmp1, x2); - fe_add(x3, z3, z2); - fe_sub(z2, z3, z2); - fe_mul(x2, tmp1, tmp0); - fe_sub(tmp1, tmp1, tmp0); - fe_sq(z2, z2); - fe_mul121666(z3, tmp1); - fe_sq(x3, x3); - fe_add(tmp0, tmp0, z3); - fe_mul(z3, x1, z2); - fe_mul(z2, tmp1, tmp0); - } - fe_cswap(x2, x3, swap); - fe_cswap(z2, z3, swap); - - fe_invert(z2, z2); - fe_mul(x2, x2, z2); - fe_tobytes(out, x2); -} - -static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], - const uint8_t point[32]) { -#if defined(BORINGSSL_X25519_NEON) - if (CRYPTO_is_NEON_capable()) { - x25519_NEON(out, scalar, point); - return; - } -#endif - - x25519_scalar_mult_generic(out, scalar, point); -} - -#endif /* BORINGSSL_X25519_X86_64 */ - - -void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) { - RAND_bytes(out_private_key, 32); - - /* All X25519 implementations should decode scalars correctly (see - * https://tools.ietf.org/html/rfc7748#section-5). However, if an - * implementation doesn't then it might interoperate with random keys a - * fraction of the time because they'll, randomly, happen to be correctly - * formed. - * - * Thus we do the opposite of the masking here to make sure that our private - * keys are never correctly masked and so, hopefully, any incorrect - * implementations are deterministically broken. - * - * This does not affect security because, although we're throwing away - * entropy, a valid implementation of scalarmult should throw away the exact - * same bits anyway. */ - out_private_key[0] |= 7; - out_private_key[31] &= 63; - out_private_key[31] |= 128; - - X25519_public_from_private(out_public_value, out_private_key); -} - -int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], - const uint8_t peer_public_value[32]) { - static const uint8_t kZeros[32] = {0}; - x25519_scalar_mult(out_shared_key, private_key, peer_public_value); - /* The all-zero output results when the input is a point of small order. */ - return CRYPTO_memcmp(kZeros, out_shared_key, 32) != 0; -} - -#if defined(BORINGSSL_X25519_X86_64) - -/* When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with - * the Montgomery ladder because it's faster. Otherwise it's done using the - * Ed25519 tables. */ - -void X25519_public_from_private(uint8_t out_public_value[32], - const uint8_t private_key[32]) { - static const uint8_t kMongomeryBasePoint[32] = {9}; - x25519_scalar_mult(out_public_value, private_key, kMongomeryBasePoint); -} - -#else - -void X25519_public_from_private(uint8_t out_public_value[32], - const uint8_t private_key[32]) { -#if defined(BORINGSSL_X25519_NEON) - if (CRYPTO_is_NEON_capable()) { - static const uint8_t kMongomeryBasePoint[32] = {9}; - x25519_NEON(out_public_value, private_key, kMongomeryBasePoint); - return; - } -#endif - - uint8_t e[32]; - OPENSSL_memcpy(e, private_key, 32); - e[0] &= 248; - e[31] &= 127; - e[31] |= 64; - - ge_p3 A; - x25519_ge_scalarmult_base(&A, e); - - /* We only need the u-coordinate of the curve25519 point. The map is - * u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). */ - fe zplusy, zminusy, zminusy_inv; - fe_add(zplusy, A.Z, A.Y); - fe_sub(zminusy, A.Z, A.Y); - fe_invert(zminusy_inv, zminusy); - fe_mul(zplusy, zplusy, zminusy_inv); - fe_tobytes(out_public_value, zplusy); -} - -#endif /* BORINGSSL_X25519_X86_64 */ diff --git a/Sources/BoringSSL/crypto/curve25519/internal.h b/Sources/BoringSSL/crypto/curve25519/internal.h deleted file mode 100644 index ea206a3e9..000000000 --- a/Sources/BoringSSL/crypto/curve25519/internal.h +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#ifndef OPENSSL_HEADER_CURVE25519_INTERNAL_H -#define OPENSSL_HEADER_CURVE25519_INTERNAL_H - -#if defined(__cplusplus) -extern "C" { -#endif - - -#if defined(OPENSSL_X86_64) && !defined(OPENSSL_SMALL) && \ - !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_NO_ASM) -#define BORINGSSL_X25519_X86_64 - -void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32], - const uint8_t point[32]); -#endif - - -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) -#define BORINGSSL_X25519_NEON - -/* x25519_NEON is defined in asm/x25519-arm.S. */ -void x25519_NEON(uint8_t out[32], const uint8_t scalar[32], - const uint8_t point[32]); -#endif - -/* fe means field element. Here the field is \Z/(2^255-19). An element t, - * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 - * t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on - * context. */ -typedef int32_t fe[10]; - -/* ge means group element. - - * Here the group is the set of pairs (x,y) of field elements (see fe.h) - * satisfying -x^2 + y^2 = 1 + d x^2y^2 - * where d = -121665/121666. - * - * Representations: - * ge_p2 (projective): (X:Y:Z) satisfying x=X/Z, y=Y/Z - * ge_p3 (extended): (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT - * ge_p1p1 (completed): ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T - * ge_precomp (Duif): (y+x,y-x,2dxy) */ - -typedef struct { - fe X; - fe Y; - fe Z; -} ge_p2; - -typedef struct { - fe X; - fe Y; - fe Z; - fe T; -} ge_p3; - -typedef struct { - fe X; - fe Y; - fe Z; - fe T; -} ge_p1p1; - -typedef struct { - fe yplusx; - fe yminusx; - fe xy2d; -} ge_precomp; - -typedef struct { - fe YplusX; - fe YminusX; - fe Z; - fe T2d; -} ge_cached; - -void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h); -int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s); -void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p); -void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p); -void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p); -void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); -void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); -void x25519_ge_scalarmult_small_precomp( - ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]); -void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]); -void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A); -void x25519_sc_reduce(uint8_t *s); - - -#if defined(__cplusplus) -} /* extern C */ -#endif - -#endif /* OPENSSL_HEADER_CURVE25519_INTERNAL_H */ diff --git a/Sources/BoringSSL/crypto/curve25519/spake25519.c b/Sources/BoringSSL/crypto/curve25519/spake25519.c index 5b794b377..e0ff9baee 100644 --- a/Sources/BoringSSL/crypto/curve25519/spake25519.c +++ b/Sources/BoringSSL/crypto/curve25519/spake25519.c @@ -14,6 +14,7 @@ #include +#include #include #include @@ -21,84 +22,86 @@ #include #include -#include "internal.h" #include "../internal.h" +#include "../../third_party/fiat/internal.h" + + +// The following precomputation tables are for the following +// points used in the SPAKE2 protocol. +// +// N: +// x: 49918732221787544735331783592030787422991506689877079631459872391322455579424 +// y: 54629554431565467720832445949441049581317094546788069926228343916274969994000 +// encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778 +// +// M: +// x: 31406539342727633121250288103050113562375374900226415211311216773867585644232 +// y: 21177308356423958466833845032658859666296341766942662650232962324899758529114 +// encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e +// +// These points and their precomputation tables are generated with the +// following Python code. For a description of the precomputation table, +// see curve25519.c in this directory. +// +// Exact copies of the source code are kept in bug 27296743. + +/* +import hashlib +import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py + +SEED_N = 'edwards25519 point generation seed (N)' +SEED_M = 'edwards25519 point generation seed (M)' + +def genpoint(seed): + v = hashlib.sha256(seed).digest() + it = 1 + while True: + try: + x,y = E.decodepoint(v) + except Exception, e: + print e + it += 1 + v = hashlib.sha256(v).digest() + continue + print "Found in %d iterations:" % it + print " x = %d" % x + print " y = %d" % y + print " Encoded (hex)" + print E.encodepoint((x,y)).encode('hex') + return (x,y) + +def gentable(P): + t = [] + for i in range(1,16): + k = ((i >> 3 & 1) * (1 << 192) + + (i >> 2 & 1) * (1 << 128) + + (i >> 1 & 1) * (1 << 64) + + (i & 1)) + t.append(E.scalarmult(P, k)) + return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t) + +def printtable(table, name): + print "static const uint8_t %s[15 * 2 * 32] = {" % name, + for i in range(15 * 2 * 32): + if i % 12 == 0: + print "\n ", + print " 0x%02x," % ord(table[i]), + print "\n};" + +if __name__ == "__main__": + print "Searching for N" + N = genpoint(SEED_N) + print "Generating precomputation table for N" + Ntable = gentable(N) + printtable(Ntable, "kSpakeNSmallPrecomp") + + print "Searching for M" + M = genpoint(SEED_M) + print "Generating precomputation table for M" + Mtable = gentable(M) + printtable(Mtable, "kSpakeMSmallPrecomp") +*/ - -/* The following precomputation tables are for the following - * points used in the SPAKE2 protocol. - * - * N: - * x: 49918732221787544735331783592030787422991506689877079631459872391322455579424 - * y: 54629554431565467720832445949441049581317094546788069926228343916274969994000 - * encoded: 10e3df0ae37d8e7a99b5fe74b44672103dbddcbd06af680d71329a11693bc778 - * - * M: - * x: 31406539342727633121250288103050113562375374900226415211311216773867585644232 - * y: 21177308356423958466833845032658859666296341766942662650232962324899758529114 - * encoded: 5ada7e4bf6ddd9adb6626d32131c6b5c51a1e347a3478f53cfcf441b88eed12e - * - * These points and their precomputation tables are generated with the - * following Python code. For a description of the precomputation table, - * see curve25519.c in this directory. - * - * Exact copies of the source code are kept in bug 27296743. - * - * import hashlib - * import ed25519 as E # http://ed25519.cr.yp.to/python/ed25519.py - * - * SEED_N = 'edwards25519 point generation seed (N)' - * SEED_M = 'edwards25519 point generation seed (M)' - * - * def genpoint(seed): - * v = hashlib.sha256(seed).digest() - * it = 1 - * while True: - * try: - * x,y = E.decodepoint(v) - * except Exception, e: - * print e - * it += 1 - * v = hashlib.sha256(v).digest() - * continue - * print "Found in %d iterations:" % it - * print " x = %d" % x - * print " y = %d" % y - * print " Encoded (hex)" - * print E.encodepoint((x,y)).encode('hex') - * return (x,y) - * - * def gentable(P): - * t = [] - * for i in range(1,16): - * k = (i >> 3 & 1) * (1 << 192) + \ - * (i >> 2 & 1) * (1 << 128) + \ - * (i >> 1 & 1) * (1 << 64) + \ - * (i & 1) - * t.append(E.scalarmult(P, k)) - * return ''.join(E.encodeint(x) + E.encodeint(y) for (x,y) in t) - * - * def printtable(table, name): - * print "static const uint8_t %s[15 * 2 * 32] = {" % name, - * for i in range(15 * 2 * 32): - * if i % 12 == 0: - * print "\n ", - * print " 0x%02x," % ord(table[i]), - * print "\n};" - * - * if __name__ == "__main__": - * print "Searching for N" - * N = genpoint(SEED_N) - * print "Generating precomputation table for N" - * Ntable = gentable(N) - * printtable(Ntable, "kSpakeNSmallPrecomp") - * - * print "Searching for M" - * M = genpoint(SEED_M) - * print "Generating precomputation table for M" - * Mtable = gentable(M) - * printtable(Mtable, "kSpakeMSmallPrecomp") - */ static const uint8_t kSpakeNSmallPrecomp[15 * 2 * 32] = { 0x20, 0x1b, 0xc5, 0xb3, 0x43, 0x17, 0x71, 0x10, 0x44, 0x1e, 0x73, 0xb3, 0xae, 0x3f, 0xbf, 0x9f, 0xf5, 0x44, 0xc8, 0x13, 0x8f, 0xd1, 0x01, 0xc2, @@ -265,25 +268,6 @@ static const uint8_t kSpakeMSmallPrecomp[15 * 2 * 32] = { 0xa6, 0x76, 0x81, 0x28, 0xb2, 0x65, 0xe8, 0x47, 0x14, 0xc6, 0x39, 0x06, }; -enum spake2_state_t { - spake2_state_init = 0, - spake2_state_msg_generated, - spake2_state_key_generated, -}; - -struct spake2_ctx_st { - uint8_t private_key[32]; - uint8_t my_msg[32]; - uint8_t password_scalar[32]; - uint8_t password_hash[SHA512_DIGEST_LENGTH]; - uint8_t *my_name; - size_t my_name_len; - uint8_t *their_name; - size_t their_name_len; - enum spake2_role_t my_role; - enum spake2_state_t state; -}; - SPAKE2_CTX *SPAKE2_CTX_new(enum spake2_role_t my_role, const uint8_t *my_name, size_t my_name_len, const uint8_t *their_name, size_t their_name_len) { @@ -317,8 +301,8 @@ void SPAKE2_CTX_free(SPAKE2_CTX *ctx) { OPENSSL_free(ctx); } -/* left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian - * order. */ +// left_shift_3 sets |n| to |n|*8, where |n| is represented in little-endian +// order. static void left_shift_3(uint8_t n[32]) { uint8_t carry = 0; unsigned i; @@ -330,6 +314,48 @@ static void left_shift_3(uint8_t n[32]) { } } +typedef union { + uint8_t bytes[32]; + uint32_t words[8]; +} scalar; + +// kOrder is the order of the prime-order subgroup of curve25519 in +// little-endian order. +static const scalar kOrder = {{0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, + 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}}; + +// scalar_cmov copies |src| to |dest| if |mask| is all ones. +static void scalar_cmov(scalar *dest, const scalar *src, crypto_word_t mask) { + for (size_t i = 0; i < 8; i++) { + dest->words[i] = + constant_time_select_w(mask, src->words[i], dest->words[i]); + } +} + +// scalar_double sets |s| to |2×s|. +static void scalar_double(scalar *s) { + uint32_t carry = 0; + + for (size_t i = 0; i < 8; i++) { + const uint32_t carry_out = s->words[i] >> 31; + s->words[i] = (s->words[i] << 1) | carry; + carry = carry_out; + } +} + +// scalar_add sets |dest| to |dest| plus |src|. +static void scalar_add(scalar *dest, const scalar *src) { + uint32_t carry = 0; + + for (size_t i = 0; i < 8; i++) { + uint64_t tmp = ((uint64_t)dest->words[i] + src->words[i]) + carry; + dest->words[i] = (uint32_t)tmp; + carry = (uint32_t)(tmp >> 32); + } +} + int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *password, size_t password_len) { @@ -344,34 +370,82 @@ int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, uint8_t private_tmp[64]; RAND_bytes(private_tmp, sizeof(private_tmp)); x25519_sc_reduce(private_tmp); - /* Multiply by the cofactor (eight) so that we'll clear it when operating on - * the peer's point later in the protocol. */ + // Multiply by the cofactor (eight) so that we'll clear it when operating on + // the peer's point later in the protocol. left_shift_3(private_tmp); OPENSSL_memcpy(ctx->private_key, private_tmp, sizeof(ctx->private_key)); ge_p3 P; x25519_ge_scalarmult_base(&P, ctx->private_key); - /* mask = h(password) * . */ + // mask = h(password) * . uint8_t password_tmp[SHA512_DIGEST_LENGTH]; SHA512(password, password_len, password_tmp); OPENSSL_memcpy(ctx->password_hash, password_tmp, sizeof(ctx->password_hash)); x25519_sc_reduce(password_tmp); - OPENSSL_memcpy(ctx->password_scalar, password_tmp, sizeof(ctx->password_scalar)); + + // Due to a copy-paste error, the call to |left_shift_3| was omitted after + // the |x25519_sc_reduce|, just above. This meant that |ctx->password_scalar| + // was not a multiple of eight to clear the cofactor and thus three bits of + // the password hash would leak. In order to fix this in a unilateral way, + // points of small order are added to the mask point such that it is in the + // prime-order subgroup. Since the ephemeral scalar is a multiple of eight, + // these points will cancel out when calculating the shared secret. + // + // Adding points of small order is the same as adding multiples of the prime + // order to the password scalar. Since that's faster, that is what is done + // below. The prime order (kOrder) is a large prime, thus odd, thus the LSB + // is one. So adding it will flip the LSB. Adding twice it will flip the next + // bit and so one for all the bottom three bits. + + scalar password_scalar; + OPENSSL_memcpy(&password_scalar, password_tmp, sizeof(password_scalar)); + + // |password_scalar| is the result of |x25519_sc_reduce| and thus is, at + // most, $l-1$ (where $l$ is |kOrder|, the order of the prime-order subgroup + // of Ed25519). In the following, we may add $l + 2×l + 4×l$ for a max value + // of $8×l-1$. That is < 2**256, as required. + + if (!ctx->disable_password_scalar_hack) { + scalar order = kOrder; + scalar tmp; + + OPENSSL_memset(&tmp, 0, sizeof(tmp)); + scalar_cmov(&tmp, &order, + constant_time_eq_w(password_scalar.bytes[0] & 1, 1)); + scalar_add(&password_scalar, &tmp); + + scalar_double(&order); + OPENSSL_memset(&tmp, 0, sizeof(tmp)); + scalar_cmov(&tmp, &order, + constant_time_eq_w(password_scalar.bytes[0] & 2, 2)); + scalar_add(&password_scalar, &tmp); + + scalar_double(&order); + OPENSSL_memset(&tmp, 0, sizeof(tmp)); + scalar_cmov(&tmp, &order, + constant_time_eq_w(password_scalar.bytes[0] & 4, 4)); + scalar_add(&password_scalar, &tmp); + + assert((password_scalar.bytes[0] & 7) == 0); + } + + OPENSSL_memcpy(ctx->password_scalar, password_scalar.bytes, + sizeof(ctx->password_scalar)); ge_p3 mask; x25519_ge_scalarmult_small_precomp(&mask, ctx->password_scalar, - ctx->my_role == spake2_role_alice - ? kSpakeMSmallPrecomp - : kSpakeNSmallPrecomp); + ctx->my_role == spake2_role_alice + ? kSpakeMSmallPrecomp + : kSpakeNSmallPrecomp); - /* P* = P + mask. */ + // P* = P + mask. ge_cached mask_cached; x25519_ge_p3_to_cached(&mask_cached, &mask); ge_p1p1 Pstar; x25519_ge_add(&Pstar, &P, &mask_cached); - /* Encode P* */ + // Encode P* ge_p2 Pstar_proj; x25519_ge_p1p1_to_p2(&Pstar_proj, &Pstar); x25519_ge_tobytes(ctx->my_msg, &Pstar_proj); @@ -408,11 +482,11 @@ int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len, ge_p3 Qstar; if (0 != x25519_ge_frombytes_vartime(&Qstar, their_msg)) { - /* Point received from peer was not on the curve. */ + // Point received from peer was not on the curve. return 0; } - /* Unmask peer's value. */ + // Unmask peer's value. ge_p3 peers_mask; x25519_ge_scalarmult_small_precomp(&peers_mask, ctx->password_scalar, ctx->my_role == spake2_role_alice diff --git a/Sources/BoringSSL/crypto/curve25519/x25519-x86_64.c b/Sources/BoringSSL/crypto/curve25519/x25519-x86_64.c index 9c3d41447..41db0bddc 100644 --- a/Sources/BoringSSL/crypto/curve25519/x25519-x86_64.c +++ b/Sources/BoringSSL/crypto/curve25519/x25519-x86_64.c @@ -12,26 +12,26 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP - * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as - * public domain but this file has the ISC license just to keep licencing - * simple. - * - * The field functions are shared by Ed25519 and X25519 where possible. */ +// This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP +// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as +// public domain but this file has the ISC license just to keep licencing +// simple. +// +// The field functions are shared by Ed25519 and X25519 where possible. #include #include #include "../internal.h" -#include "internal.h" +#include "../../third_party/fiat/internal.h" #if defined(BORINGSSL_X25519_X86_64) typedef struct { uint64_t v[5]; } fe25519; -/* These functions are defined in asm/x25519-x86_64.S */ +// These functions are defined in asm/x25519-x86_64.S void x25519_x86_64_work_cswap(fe25519 *, uint64_t); void x25519_x86_64_mul(fe25519 *out, const fe25519 *a, const fe25519 *b); void x25519_x86_64_square(fe25519 *out, const fe25519 *a); @@ -46,7 +46,7 @@ static void fe25519_setint(fe25519 *r, unsigned v) { r->v[4] = 0; } -/* Assumes input x being reduced below 2^255 */ +// Assumes input x being reduced below 2^255 static void fe25519_pack(unsigned char r[32], const fe25519 *x) { fe25519 t; t = *x; @@ -244,4 +244,4 @@ void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32], fe25519_pack(out, &t); } -#endif /* BORINGSSL_X25519_X86_64 */ +#endif // BORINGSSL_X25519_X86_64 diff --git a/Sources/BoringSSL/crypto/dh/check.c b/Sources/BoringSSL/crypto/dh/check.c index f40e03419..454ad44a1 100644 --- a/Sources/BoringSSL/crypto/dh/check.c +++ b/Sources/BoringSSL/crypto/dh/check.c @@ -59,8 +59,8 @@ #include -int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *ret) { - *ret = 0; +int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags) { + *out_flags = 0; BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { @@ -70,34 +70,34 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *ret) { int ok = 0; - /* Check |pub_key| is greater than 1. */ + // Check |pub_key| is greater than 1. BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL || !BN_set_word(tmp, 1)) { goto err; } if (BN_cmp(pub_key, tmp) <= 0) { - *ret |= DH_CHECK_PUBKEY_TOO_SMALL; + *out_flags |= DH_CHECK_PUBKEY_TOO_SMALL; } - /* Check |pub_key| is less than |dh->p| - 1. */ + // Check |pub_key| is less than |dh->p| - 1. if (!BN_copy(tmp, dh->p) || !BN_sub_word(tmp, 1)) { goto err; } if (BN_cmp(pub_key, tmp) >= 0) { - *ret |= DH_CHECK_PUBKEY_TOO_LARGE; + *out_flags |= DH_CHECK_PUBKEY_TOO_LARGE; } if (dh->q != NULL) { - /* Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114 - * groups which are not safe primes but pick a generator on a prime-order - * subgroup of size |dh->q|. */ - if (!BN_mod_exp(tmp, pub_key, dh->q, dh->p, ctx)) { + // Check |pub_key|^|dh->q| is 1 mod |dh->p|. This is necessary for RFC 5114 + // groups which are not safe primes but pick a generator on a prime-order + // subgroup of size |dh->q|. + if (!BN_mod_exp_mont(tmp, pub_key, dh->q, dh->p, ctx, NULL)) { goto err; } if (!BN_is_one(tmp)) { - *ret |= DH_CHECK_PUBKEY_INVALID; + *out_flags |= DH_CHECK_PUBKEY_INVALID; } } @@ -110,20 +110,19 @@ int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *ret) { } -int DH_check(const DH *dh, int *ret) { - /* Check that p is a safe prime and if g is 2, 3 or 5, check that it is a - * suitable generator where: - * for 2, p mod 24 == 11 - * for 3, p mod 12 == 5 - * for 5, p mod 10 == 3 or 7 - * should hold. - */ +int DH_check(const DH *dh, int *out_flags) { + // Check that p is a safe prime and if g is 2, 3 or 5, check that it is a + // suitable generator where: + // for 2, p mod 24 == 11 + // for 3, p mod 12 == 5 + // for 5, p mod 10 == 3 or 7 + // should hold. int ok = 0, r; BN_CTX *ctx = NULL; BN_ULONG l; BIGNUM *t1 = NULL, *t2 = NULL; - *ret = 0; + *out_flags = 0; ctx = BN_CTX_new(); if (ctx == NULL) { goto err; @@ -140,16 +139,16 @@ int DH_check(const DH *dh, int *ret) { if (dh->q) { if (BN_cmp(dh->g, BN_value_one()) <= 0) { - *ret |= DH_CHECK_NOT_SUITABLE_GENERATOR; + *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } else if (BN_cmp(dh->g, dh->p) >= 0) { - *ret |= DH_CHECK_NOT_SUITABLE_GENERATOR; + *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } else { - /* Check g^q == 1 mod p */ - if (!BN_mod_exp(t1, dh->g, dh->q, dh->p, ctx)) { + // Check g^q == 1 mod p + if (!BN_mod_exp_mont(t1, dh->g, dh->q, dh->p, ctx, NULL)) { goto err; } if (!BN_is_one(t1)) { - *ret |= DH_CHECK_NOT_SUITABLE_GENERATOR; + *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } r = BN_is_prime_ex(dh->q, BN_prime_checks, ctx, NULL); @@ -157,17 +156,17 @@ int DH_check(const DH *dh, int *ret) { goto err; } if (!r) { - *ret |= DH_CHECK_Q_NOT_PRIME; + *out_flags |= DH_CHECK_Q_NOT_PRIME; } - /* Check p == 1 mod q i.e. q divides p - 1 */ + // Check p == 1 mod q i.e. q divides p - 1 if (!BN_div(t1, t2, dh->p, dh->q, ctx)) { goto err; } if (!BN_is_one(t2)) { - *ret |= DH_CHECK_INVALID_Q_VALUE; + *out_flags |= DH_CHECK_INVALID_Q_VALUE; } if (dh->j && BN_cmp(dh->j, t1)) { - *ret |= DH_CHECK_INVALID_J_VALUE; + *out_flags |= DH_CHECK_INVALID_J_VALUE; } } else if (BN_is_word(dh->g, DH_GENERATOR_2)) { l = BN_mod_word(dh->p, 24); @@ -175,7 +174,7 @@ int DH_check(const DH *dh, int *ret) { goto err; } if (l != 11) { - *ret |= DH_CHECK_NOT_SUITABLE_GENERATOR; + *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } else if (BN_is_word(dh->g, DH_GENERATOR_5)) { l = BN_mod_word(dh->p, 10); @@ -183,10 +182,10 @@ int DH_check(const DH *dh, int *ret) { goto err; } if (l != 3 && l != 7) { - *ret |= DH_CHECK_NOT_SUITABLE_GENERATOR; + *out_flags |= DH_CHECK_NOT_SUITABLE_GENERATOR; } } else { - *ret |= DH_CHECK_UNABLE_TO_CHECK_GENERATOR; + *out_flags |= DH_CHECK_UNABLE_TO_CHECK_GENERATOR; } r = BN_is_prime_ex(dh->p, BN_prime_checks, ctx, NULL); @@ -194,7 +193,7 @@ int DH_check(const DH *dh, int *ret) { goto err; } if (!r) { - *ret |= DH_CHECK_P_NOT_PRIME; + *out_flags |= DH_CHECK_P_NOT_PRIME; } else if (!dh->q) { if (!BN_rshift1(t1, dh->p)) { goto err; @@ -204,7 +203,7 @@ int DH_check(const DH *dh, int *ret) { goto err; } if (!r) { - *ret |= DH_CHECK_P_NOT_SAFE_PRIME; + *out_flags |= DH_CHECK_P_NOT_SAFE_PRIME; } } ok = 1; diff --git a/Sources/BoringSSL/crypto/dh/dh.c b/Sources/BoringSSL/crypto/dh/dh.c index 33c36f31f..7b7b83359 100644 --- a/Sources/BoringSSL/crypto/dh/dh.c +++ b/Sources/BoringSSL/crypto/dh/dh.c @@ -124,6 +124,20 @@ void DH_get0_key(const DH *dh, const BIGNUM **out_pub_key, } } +int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key) { + if (pub_key != NULL) { + BN_free(dh->pub_key); + dh->pub_key = pub_key; + } + + if (priv_key != NULL) { + BN_free(dh->priv_key); + dh->priv_key = priv_key; + } + + return 1; +} + void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g) { if (out_p != NULL) { @@ -137,33 +151,55 @@ void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, } } +int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g) { + if ((dh->p == NULL && p == NULL) || + (dh->g == NULL && g == NULL)) { + return 0; + } + + if (p != NULL) { + BN_free(dh->p); + dh->p = p; + } + + if (q != NULL) { + BN_free(dh->q); + dh->q = q; + } + + if (g != NULL) { + BN_free(dh->g); + dh->g = g; + } + + return 1; +} + int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb) { - /* We generate DH parameters as follows - * find a prime q which is prime_bits/2 bits long. - * p=(2*q)+1 or (p-1)/2 = q - * For this case, g is a generator if - * g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1. - * Since the factors of p-1 are q and 2, we just need to check - * g^2 mod p != 1 and g^q mod p != 1. - * - * Having said all that, - * there is another special case method for the generators 2, 3 and 5. - * for 2, p mod 24 == 11 - * for 3, p mod 12 == 5 <<<<< does not work for safe primes. - * for 5, p mod 10 == 3 or 7 - * - * Thanks to Phil Karn for the pointers about the - * special generators and for answering some of my questions. - * - * I've implemented the second simple method :-). - * Since DH should be using a safe prime (both p and q are prime), - * this generator function can take a very very long time to run. - */ - - /* Actually there is no reason to insist that 'generator' be a generator. - * It's just as OK (and in some sense better) to use a generator of the - * order-q subgroup. - */ + // We generate DH parameters as follows + // find a prime q which is prime_bits/2 bits long. + // p=(2*q)+1 or (p-1)/2 = q + // For this case, g is a generator if + // g^((p-1)/q) mod p != 1 for values of q which are the factors of p-1. + // Since the factors of p-1 are q and 2, we just need to check + // g^2 mod p != 1 and g^q mod p != 1. + // + // Having said all that, + // there is another special case method for the generators 2, 3 and 5. + // for 2, p mod 24 == 11 + // for 3, p mod 12 == 5 <<<<< does not work for safe primes. + // for 5, p mod 10 == 3 or 7 + // + // Thanks to Phil Karn for the pointers about the + // special generators and for answering some of my questions. + // + // I've implemented the second simple method :-). + // Since DH should be using a safe prime (both p and q are prime), + // this generator function can take a very very long time to run. + + // Actually there is no reason to insist that 'generator' be a generator. + // It's just as OK (and in some sense better) to use a generator of the + // order-q subgroup. BIGNUM *t1, *t2; int g, ok = 0; @@ -180,7 +216,7 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c goto err; } - /* Make sure |dh| has the necessary elements */ + // Make sure |dh| has the necessary elements if (dh->p == NULL) { dh->p = BN_new(); if (dh->p == NULL) { @@ -213,14 +249,14 @@ int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *c if (!BN_set_word(t2, 3)) { goto err; } - /* BN_set_word(t3,7); just have to miss - * out on these ones :-( */ + // BN_set_word(t3,7); just have to miss + // out on these ones :-( g = 5; } else { - /* in the general case, don't worry if 'generator' is a - * generator or not: since we are using safe primes, - * it will generate either an order-q or an order-2q group, - * which both is OK */ + // in the general case, don't worry if 'generator' is a + // generator or not: since we are using safe primes, + // it will generate either an order-q or an order-2q group, + // which both is OK if (!BN_set_word(t1, 2)) { goto err; } @@ -299,7 +335,7 @@ int DH_generate_key(DH *dh) { goto err; } } else { - /* secret exponent length */ + // secret exponent length unsigned priv_bits = dh->priv_length; if (priv_bits == 0) { const unsigned p_bits = BN_num_bits(dh->p); @@ -465,9 +501,9 @@ DH *DHparams_dup(const DH *dh) { } int DH_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; - if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, dup_func, + if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, free_func)) { return -1; } diff --git a/Sources/BoringSSL/crypto/dh/dh_asn1.c b/Sources/BoringSSL/crypto/dh/dh_asn1.c index 1a147eea9..9d3218079 100644 --- a/Sources/BoringSSL/crypto/dh/dh_asn1.c +++ b/Sources/BoringSSL/crypto/dh/dh_asn1.c @@ -76,7 +76,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) { static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* A DH object may be missing some components. */ + // A DH object may be missing some components. OPENSSL_PUT_ERROR(DH, ERR_R_PASSED_NULL_PARAMETER); return 0; } diff --git a/Sources/BoringSSL/crypto/dh/params.c b/Sources/BoringSSL/crypto/dh/params.c index 4cec700a2..333602916 100644 --- a/Sources/BoringSSL/crypto/dh/params.c +++ b/Sources/BoringSSL/crypto/dh/params.c @@ -54,169 +54,9 @@ #include -#include "../bn/internal.h" +#include "../fipsmodule/bn/internal.h" -static const BN_ULONG dh1024_160_p[] = { - TOBN(0xDF1FB2BC, 0x2E4A4371), TOBN(0xE68CFDA7, 0x6D4DA708), - TOBN(0x45BF37DF, 0x365C1A65), TOBN(0xA151AF5F, 0x0DC8B4BD), - TOBN(0xFAA31A4F, 0xF55BCCC0), TOBN(0x4EFFD6FA, 0xE5644738), - TOBN(0x98488E9C, 0x219A7372), TOBN(0xACCBDD7D, 0x90C4BD70), - TOBN(0x24975C3C, 0xD49B83BF), TOBN(0x13ECB4AE, 0xA9061123), - TOBN(0x9838EF1E, 0x2EE652C0), TOBN(0x6073E286, 0x75A23D18), - TOBN(0x9A6A9DCA, 0x52D23B61), TOBN(0x52C99FBC, 0xFB06A3C6), - TOBN(0xDE92DE5E, 0xAE5D54EC), TOBN(0xB10B8F96, 0xA080E01D), -}; -static const BN_ULONG dh1024_160_g[] = { - TOBN(0x855E6EEB, 0x22B3B2E5), TOBN(0x858F4DCE, 0xF97C2A24), - TOBN(0x2D779D59, 0x18D08BC8), TOBN(0xD662A4D1, 0x8E73AFA3), - TOBN(0x1DBF0A01, 0x69B6A28A), TOBN(0xA6A24C08, 0x7A091F53), - TOBN(0x909D0D22, 0x63F80A76), TOBN(0xD7FBD7D3, 0xB9A92EE1), - TOBN(0x5E91547F, 0x9E2749F4), TOBN(0x160217B4, 0xB01B886A), - TOBN(0x777E690F, 0x5504F213), TOBN(0x266FEA1E, 0x5C41564B), - TOBN(0xD6406CFF, 0x14266D31), TOBN(0xF8104DD2, 0x58AC507F), - TOBN(0x6765A442, 0xEFB99905), TOBN(0xA4D1CBD5, 0xC3FD3412), -}; -static const BN_ULONG dh1024_160_q[] = { - TOBN(0x64B7CB9D, 0x49462353), TOBN(0x81A8DF27, 0x8ABA4E7D), 0xF518AA87, -}; - -static const BN_ULONG dh2048_224_p[] = { - TOBN(0x0AC4DFFE, 0x0C10E64F), TOBN(0xCF9DE538, 0x4E71B81C), - TOBN(0x7EF363E2, 0xFFA31F71), TOBN(0xE3FB73C1, 0x6B8E75B9), - TOBN(0xC9B53DCF, 0x4BA80A29), TOBN(0x23F10B0E, 0x16E79763), - TOBN(0xC52172E4, 0x13042E9B), TOBN(0xBE60E69C, 0xC928B2B9), - TOBN(0x80CD86A1, 0xB9E587E8), TOBN(0x315D75E1, 0x98C641A4), - TOBN(0xCDF93ACC, 0x44328387), TOBN(0x15987D9A, 0xDC0A486D), - TOBN(0x7310F712, 0x1FD5A074), TOBN(0x278273C7, 0xDE31EFDC), - TOBN(0x1602E714, 0x415D9330), TOBN(0x81286130, 0xBC8985DB), - TOBN(0xB3BF8A31, 0x70918836), TOBN(0x6A00E0A0, 0xB9C49708), - TOBN(0xC6BA0B2C, 0x8BBC27BE), TOBN(0xC9F98D11, 0xED34DBF6), - TOBN(0x7AD5B7D0, 0xB6C12207), TOBN(0xD91E8FEF, 0x55B7394B), - TOBN(0x9037C9ED, 0xEFDA4DF8), TOBN(0x6D3F8152, 0xAD6AC212), - TOBN(0x1DE6B85A, 0x1274A0A6), TOBN(0xEB3D688A, 0x309C180E), - TOBN(0xAF9A3C40, 0x7BA1DF15), TOBN(0xE6FA141D, 0xF95A56DB), - TOBN(0xB54B1597, 0xB61D0A75), TOBN(0xA20D64E5, 0x683B9FD1), - TOBN(0xD660FAA7, 0x9559C51F), TOBN(0xAD107E1E, 0x9123A9D0), -}; - -static const BN_ULONG dh2048_224_g[] = { - TOBN(0x84B890D3, 0x191F2BFA), TOBN(0x81BC087F, 0x2A7065B3), - TOBN(0x19C418E1, 0xF6EC0179), TOBN(0x7B5A0F1C, 0x71CFFF4C), - TOBN(0xEDFE72FE, 0x9B6AA4BD), TOBN(0x81E1BCFE, 0x94B30269), - TOBN(0x566AFBB4, 0x8D6C0191), TOBN(0xB539CCE3, 0x409D13CD), - TOBN(0x6AA21E7F, 0x5F2FF381), TOBN(0xD9E263E4, 0x770589EF), - TOBN(0x10E183ED, 0xD19963DD), TOBN(0xB70A8137, 0x150B8EEB), - TOBN(0x051AE3D4, 0x28C8F8AC), TOBN(0xBB77A86F, 0x0C1AB15B), - TOBN(0x6E3025E3, 0x16A330EF), TOBN(0x19529A45, 0xD6F83456), - TOBN(0xF180EB34, 0x118E98D1), TOBN(0xB5F6C6B2, 0x50717CBE), - TOBN(0x09939D54, 0xDA7460CD), TOBN(0xE2471504, 0x22EA1ED4), - TOBN(0xB8A762D0, 0x521BC98A), TOBN(0xF4D02727, 0x5AC1348B), - TOBN(0xC1766910, 0x1999024A), TOBN(0xBE5E9001, 0xA8D66AD7), - TOBN(0xC57DB17C, 0x620A8652), TOBN(0xAB739D77, 0x00C29F52), - TOBN(0xDD921F01, 0xA70C4AFA), TOBN(0xA6824A4E, 0x10B9A6F0), - TOBN(0x74866A08, 0xCFE4FFE3), TOBN(0x6CDEBE7B, 0x89998CAF), - TOBN(0x9DF30B5C, 0x8FFDAC50), TOBN(0xAC4032EF, 0x4F2D9AE3), -}; - -static const BN_ULONG dh2048_224_q[] = { - TOBN(0xBF389A99, 0xB36371EB), TOBN(0x1F80535A, 0x4738CEBC), - TOBN(0xC58D93FE, 0x99717710), 0x801C0D34, -}; - -static const BN_ULONG dh2048_256_p[] = { - TOBN(0xDB094AE9, 0x1E1A1597), TOBN(0x693877FA, 0xD7EF09CA), - TOBN(0x6116D227, 0x6E11715F), TOBN(0xA4B54330, 0xC198AF12), - TOBN(0x75F26375, 0xD7014103), TOBN(0xC3A3960A, 0x54E710C3), - TOBN(0xDED4010A, 0xBD0BE621), TOBN(0xC0B857F6, 0x89962856), - TOBN(0xB3CA3F79, 0x71506026), TOBN(0x1CCACB83, 0xE6B486F6), - TOBN(0x67E144E5, 0x14056425), TOBN(0xF6A167B5, 0xA41825D9), - TOBN(0x3AD83477, 0x96524D8E), TOBN(0xF13C6D9A, 0x51BFA4AB), - TOBN(0x2D525267, 0x35488A0E), TOBN(0xB63ACAE1, 0xCAA6B790), - TOBN(0x4FDB70C5, 0x81B23F76), TOBN(0xBC39A0BF, 0x12307F5C), - TOBN(0xB941F54E, 0xB1E59BB8), TOBN(0x6C5BFC11, 0xD45F9088), - TOBN(0x22E0B1EF, 0x4275BF7B), TOBN(0x91F9E672, 0x5B4758C0), - TOBN(0x5A8A9D30, 0x6BCF67ED), TOBN(0x209E0C64, 0x97517ABD), - TOBN(0x3BF4296D, 0x830E9A7C), TOBN(0x16C3D911, 0x34096FAA), - TOBN(0xFAF7DF45, 0x61B2AA30), TOBN(0xE00DF8F1, 0xD61957D4), - TOBN(0x5D2CEED4, 0x435E3B00), TOBN(0x8CEEF608, 0x660DD0F2), - TOBN(0xFFBBD19C, 0x65195999), TOBN(0x87A8E61D, 0xB4B6663C), -}; -static const BN_ULONG dh2048_256_g[] = { - TOBN(0x664B4C0F, 0x6CC41659), TOBN(0x5E2327CF, 0xEF98C582), - TOBN(0xD647D148, 0xD4795451), TOBN(0x2F630784, 0x90F00EF8), - TOBN(0x184B523D, 0x1DB246C3), TOBN(0xC7891428, 0xCDC67EB6), - TOBN(0x7FD02837, 0x0DF92B52), TOBN(0xB3353BBB, 0x64E0EC37), - TOBN(0xECD06E15, 0x57CD0915), TOBN(0xB7D2BBD2, 0xDF016199), - TOBN(0xC8484B1E, 0x052588B9), TOBN(0xDB2A3B73, 0x13D3FE14), - TOBN(0xD052B985, 0xD182EA0A), TOBN(0xA4BD1BFF, 0xE83B9C80), - TOBN(0xDFC967C1, 0xFB3F2E55), TOBN(0xB5045AF2, 0x767164E1), - TOBN(0x1D14348F, 0x6F2F9193), TOBN(0x64E67982, 0x428EBC83), - TOBN(0x8AC376D2, 0x82D6ED38), TOBN(0x777DE62A, 0xAAB8A862), - TOBN(0xDDF463E5, 0xE9EC144B), TOBN(0x0196F931, 0xC77A57F2), - TOBN(0xA55AE313, 0x41000A65), TOBN(0x901228F8, 0xC28CBB18), - TOBN(0xBC3773BF, 0x7E8C6F62), TOBN(0xBE3A6C1B, 0x0C6B47B1), - TOBN(0xFF4FED4A, 0xAC0BB555), TOBN(0x10DBC150, 0x77BE463F), - TOBN(0x07F4793A, 0x1A0BA125), TOBN(0x4CA7B18F, 0x21EF2054), - TOBN(0x2E775066, 0x60EDBD48), TOBN(0x3FB32C9B, 0x73134D0B), -}; -static const BN_ULONG dh2048_256_q[] = { - TOBN(0xA308B0FE, 0x64F5FBD3), TOBN(0x99B1A47D, 0x1EB3750B), - TOBN(0xB4479976, 0x40129DA2), TOBN(0x8CF83642, 0xA709A097), -}; - -struct standard_parameters { - BIGNUM p, q, g; -}; - -static const struct standard_parameters dh1024_160 = { - STATIC_BIGNUM(dh1024_160_p), - STATIC_BIGNUM(dh1024_160_q), - STATIC_BIGNUM(dh1024_160_g), -}; - -static const struct standard_parameters dh2048_224 = { - STATIC_BIGNUM(dh2048_224_p), - STATIC_BIGNUM(dh2048_224_q), - STATIC_BIGNUM(dh2048_224_g), -}; - -static const struct standard_parameters dh2048_256 = { - STATIC_BIGNUM(dh2048_256_p), - STATIC_BIGNUM(dh2048_256_q), - STATIC_BIGNUM(dh2048_256_g), -}; - -static DH *get_standard_parameters(const struct standard_parameters *params, - const ENGINE *engine) { - DH *dh = DH_new(); - if (!dh) { - return NULL; - } - - dh->p = BN_dup(¶ms->p); - dh->q = BN_dup(¶ms->q); - dh->g = BN_dup(¶ms->g); - if (!dh->p || !dh->q || !dh->g) { - DH_free(dh); - return NULL; - } - - return dh; -} - -DH *DH_get_1024_160(const ENGINE *engine) { - return get_standard_parameters(&dh1024_160, engine); -} - -DH *DH_get_2048_224(const ENGINE *engine) { - return get_standard_parameters(&dh2048_224, engine); -} - -DH *DH_get_2048_256(const ENGINE *engine) { - return get_standard_parameters(&dh2048_256, engine); -} - BIGNUM *BN_get_rfc3526_prime_1536(BIGNUM *ret) { static const BN_ULONG kPrime1536Data[] = { TOBN(0xffffffff, 0xffffffff), TOBN(0xf1746c08, 0xca237327), diff --git a/Sources/BoringSSL/crypto/digest_extra/digest_extra.c b/Sources/BoringSSL/crypto/digest_extra/digest_extra.c new file mode 100644 index 000000000..4b4bb3813 --- /dev/null +++ b/Sources/BoringSSL/crypto/digest_extra/digest_extra.c @@ -0,0 +1,240 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] */ + +#include + +#include + +#include +#include +#include + +#include "../internal.h" + + +struct nid_to_digest { + int nid; + const EVP_MD* (*md_func)(void); + const char *short_name; + const char *long_name; +}; + +static const struct nid_to_digest nid_to_digest_mapping[] = { + {NID_md4, EVP_md4, SN_md4, LN_md4}, + {NID_md5, EVP_md5, SN_md5, LN_md5}, + {NID_sha1, EVP_sha1, SN_sha1, LN_sha1}, + {NID_sha224, EVP_sha224, SN_sha224, LN_sha224}, + {NID_sha256, EVP_sha256, SN_sha256, LN_sha256}, + {NID_sha384, EVP_sha384, SN_sha384, LN_sha384}, + {NID_sha512, EVP_sha512, SN_sha512, LN_sha512}, + {NID_md5_sha1, EVP_md5_sha1, SN_md5_sha1, LN_md5_sha1}, + // As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding + // hash function when given a signature OID. To avoid unintended lax parsing + // of hash OIDs, this is no longer supported for lookup by OID or NID. + // Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to + // consumers so we retain it there. + {NID_undef, EVP_sha1, SN_dsaWithSHA, LN_dsaWithSHA}, + {NID_undef, EVP_sha1, SN_dsaWithSHA1, LN_dsaWithSHA1}, + {NID_undef, EVP_sha1, SN_ecdsa_with_SHA1, NULL}, + {NID_undef, EVP_md5, SN_md5WithRSAEncryption, LN_md5WithRSAEncryption}, + {NID_undef, EVP_sha1, SN_sha1WithRSAEncryption, LN_sha1WithRSAEncryption}, + {NID_undef, EVP_sha224, SN_sha224WithRSAEncryption, + LN_sha224WithRSAEncryption}, + {NID_undef, EVP_sha256, SN_sha256WithRSAEncryption, + LN_sha256WithRSAEncryption}, + {NID_undef, EVP_sha384, SN_sha384WithRSAEncryption, + LN_sha384WithRSAEncryption}, + {NID_undef, EVP_sha512, SN_sha512WithRSAEncryption, + LN_sha512WithRSAEncryption}, +}; + +const EVP_MD* EVP_get_digestbynid(int nid) { + if (nid == NID_undef) { + // Skip the |NID_undef| entries in |nid_to_digest_mapping|. + return NULL; + } + + for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { + if (nid_to_digest_mapping[i].nid == nid) { + return nid_to_digest_mapping[i].md_func(); + } + } + + return NULL; +} + +static const struct { + uint8_t oid[9]; + uint8_t oid_len; + int nid; +} kMDOIDs[] = { + // 1.2.840.113549.2.4 + { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04}, 8, NID_md4 }, + // 1.2.840.113549.2.5 + { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05}, 8, NID_md5 }, + // 1.3.14.3.2.26 + { {0x2b, 0x0e, 0x03, 0x02, 0x1a}, 5, NID_sha1 }, + // 2.16.840.1.101.3.4.2.1 + { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}, 9, NID_sha256 }, + // 2.16.840.1.101.3.4.2.2 + { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}, 9, NID_sha384 }, + // 2.16.840.1.101.3.4.2.3 + { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03}, 9, NID_sha512 }, + // 2.16.840.1.101.3.4.2.4 + { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04}, 9, NID_sha224 }, +}; + +static const EVP_MD *cbs_to_md(const CBS *cbs) { + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMDOIDs); i++) { + if (CBS_len(cbs) == kMDOIDs[i].oid_len && + OPENSSL_memcmp(CBS_data(cbs), kMDOIDs[i].oid, kMDOIDs[i].oid_len) == + 0) { + return EVP_get_digestbynid(kMDOIDs[i].nid); + } + } + + return NULL; +} + +const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) { + // Handle objects with no corresponding OID. + if (obj->nid != NID_undef) { + return EVP_get_digestbynid(obj->nid); + } + + CBS cbs; + CBS_init(&cbs, obj->data, obj->length); + return cbs_to_md(&cbs); +} + +const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs) { + CBS algorithm, oid; + if (!CBS_get_asn1(cbs, &algorithm, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&algorithm, &oid, CBS_ASN1_OBJECT)) { + OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_DECODE_ERROR); + return NULL; + } + + const EVP_MD *ret = cbs_to_md(&oid); + if (ret == NULL) { + OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_UNKNOWN_HASH); + return NULL; + } + + // The parameters, if present, must be NULL. Historically, whether the NULL + // was included or omitted was not well-specified. When parsing an + // AlgorithmIdentifier, we allow both. (Note this code is not used when + // verifying RSASSA-PKCS1-v1_5 signatures.) + if (CBS_len(&algorithm) > 0) { + CBS param; + if (!CBS_get_asn1(&algorithm, ¶m, CBS_ASN1_NULL) || + CBS_len(¶m) != 0 || + CBS_len(&algorithm) != 0) { + OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_DECODE_ERROR); + return NULL; + } + } + + return ret; +} + +int EVP_marshal_digest_algorithm(CBB *cbb, const EVP_MD *md) { + CBB algorithm, oid, null; + if (!CBB_add_asn1(cbb, &algorithm, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT)) { + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; + } + + int found = 0; + int nid = EVP_MD_type(md); + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMDOIDs); i++) { + if (nid == kMDOIDs[i].nid) { + if (!CBB_add_bytes(&oid, kMDOIDs[i].oid, kMDOIDs[i].oid_len)) { + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; + } + found = 1; + break; + } + } + + if (!found) { + OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_UNKNOWN_HASH); + return 0; + } + + if (!CBB_add_asn1(&algorithm, &null, CBS_ASN1_NULL) || + !CBB_flush(cbb)) { + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; + } + + return 1; +} + +const EVP_MD *EVP_get_digestbyname(const char *name) { + for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { + const char *short_name = nid_to_digest_mapping[i].short_name; + const char *long_name = nid_to_digest_mapping[i].long_name; + if ((short_name && strcmp(short_name, name) == 0) || + (long_name && strcmp(long_name, name) == 0)) { + return nid_to_digest_mapping[i].md_func(); + } + } + + return NULL; +} diff --git a/Sources/BoringSSL/crypto/dsa/dsa.c b/Sources/BoringSSL/crypto/dsa/dsa.c index e2b6695e8..f3d4f859d 100644 --- a/Sources/BoringSSL/crypto/dsa/dsa.c +++ b/Sources/BoringSSL/crypto/dsa/dsa.c @@ -72,16 +72,19 @@ #include #include -#include "../bn/internal.h" +#include "../fipsmodule/bn/internal.h" #include "../internal.h" #define OPENSSL_DSA_MAX_MODULUS_BITS 10000 -/* Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of - * Rabin-Miller */ +// Primality test according to FIPS PUB 186[-1], Appendix 2.1: 50 rounds of +// Rabin-Miller #define DSS_prime_checks 50 +static int dsa_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, + BIGNUM **out_r); + static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; DSA *DSA_new(void) { @@ -117,8 +120,6 @@ void DSA_free(DSA *dsa) { BN_clear_free(dsa->g); BN_clear_free(dsa->pub_key); BN_clear_free(dsa->priv_key); - BN_clear_free(dsa->kinv); - BN_clear_free(dsa->r); BN_MONT_CTX_free(dsa->method_mont_p); BN_MONT_CTX_free(dsa->method_mont_q); CRYPTO_MUTEX_cleanup(&dsa->method_mont_lock); @@ -153,6 +154,46 @@ void DSA_get0_pqg(const DSA *dsa, const BIGNUM **out_p, const BIGNUM **out_q, } } +int DSA_set0_key(DSA *dsa, BIGNUM *pub_key, BIGNUM *priv_key) { + if (dsa->pub_key == NULL && pub_key == NULL) { + return 0; + } + + if (pub_key != NULL) { + BN_free(dsa->pub_key); + dsa->pub_key = pub_key; + } + if (priv_key != NULL) { + BN_free(dsa->priv_key); + dsa->priv_key = priv_key; + } + + return 1; +} + +int DSA_set0_pqg(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g) { + if ((dsa->p == NULL && p == NULL) || + (dsa->q == NULL && q == NULL) || + (dsa->g == NULL && g == NULL)) { + return 0; + } + + if (p != NULL) { + BN_free(dsa->p); + dsa->p = p; + } + if (q != NULL) { + BN_free(dsa->q); + dsa->q = q; + } + if (g != NULL) { + BN_free(dsa->g); + dsa->g = g; + } + + return 1; +} + int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, size_t seed_len, int *out_counter, unsigned long *out_h, BN_GENCB *cb) { @@ -186,7 +227,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, return 0; } if (seed_len > (size_t)qsize) { - /* Only consume as much seed as is expected. */ + // Only consume as much seed as is expected. seed_len = qsize; } OPENSSL_memcpy(seed, seed_in, seed_len); @@ -217,9 +258,9 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } for (;;) { - /* Find q. */ + // Find q. for (;;) { - /* step 1 */ + // step 1 if (!BN_GENCB_call(cb, 0, m++)) { goto err; } @@ -230,12 +271,12 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } } else { - /* If we come back through, use random seed next time. */ + // If we come back through, use random seed next time. seed_in = NULL; } OPENSSL_memcpy(buf, seed, qsize); OPENSSL_memcpy(buf2, seed, qsize); - /* precompute "SEED + 1" for step 7: */ + // precompute "SEED + 1" for step 7: for (i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { @@ -243,7 +284,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } } - /* step 2 */ + // step 2 if (!EVP_Digest(seed, qsize, md, NULL, evpmd, NULL) || !EVP_Digest(buf, qsize, buf2, NULL, evpmd, NULL)) { goto err; @@ -252,14 +293,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, md[i] ^= buf2[i]; } - /* step 3 */ + // step 3 md[0] |= 0x80; md[qsize - 1] |= 0x01; if (!BN_bin2bn(md, qsize, q)) { goto err; } - /* step 4 */ + // step 4 r = BN_is_prime_fasttest_ex(q, DSS_prime_checks, ctx, use_random_seed, cb); if (r > 0) { break; @@ -268,17 +309,17 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* do a callback call */ - /* step 5 */ + // do a callback call + // step 5 } if (!BN_GENCB_call(cb, 2, 0) || !BN_GENCB_call(cb, 3, 0)) { goto err; } - /* step 6 */ + // step 6 counter = 0; - /* "offset = 2" */ + // "offset = 2" n = (bits - 1) / 160; @@ -287,11 +328,11 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 7 */ + // step 7 BN_zero(W); - /* now 'buf' contains "SEED + offset - 1" */ + // now 'buf' contains "SEED + offset - 1" for (k = 0; k <= n; k++) { - /* obtain "SEED + offset + k" by incrementing: */ + // obtain "SEED + offset + k" by incrementing: for (i = qsize - 1; i < qsize; i--) { buf[i]++; if (buf[i] != 0) { @@ -303,7 +344,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 8 */ + // step 8 if (!BN_bin2bn(md, qsize, r0) || !BN_lshift(r0, r0, (qsize << 3) * k) || !BN_add(W, W, r0)) { @@ -311,14 +352,14 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } } - /* more of step 8 */ + // more of step 8 if (!BN_mask_bits(W, bits - 1) || !BN_copy(X, W) || !BN_add(X, X, test)) { goto err; } - /* step 9 */ + // step 9 if (!BN_lshift1(r0, q) || !BN_mod(c, X, r0, ctx) || !BN_sub(r0, c, BN_value_one()) || @@ -326,23 +367,23 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* step 10 */ + // step 10 if (BN_cmp(p, test) >= 0) { - /* step 11 */ + // step 11 r = BN_is_prime_fasttest_ex(p, DSS_prime_checks, ctx, 1, cb); if (r > 0) { - goto end; /* found it */ + goto end; // found it } if (r != 0) { goto err; } } - /* step 13 */ + // step 13 counter++; - /* "offset = offset + n + 1" */ + // "offset = offset + n + 1" - /* step 14 */ + // step 14 if (counter >= 4096) { break; } @@ -353,8 +394,8 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, goto err; } - /* We now need to generate g */ - /* Set r0=(p-1)/q */ + // We now need to generate g + // Set r0=(p-1)/q if (!BN_sub(test, p, BN_value_one()) || !BN_div(r0, NULL, test, q, ctx)) { goto err; @@ -366,7 +407,7 @@ int DSA_generate_parameters_ex(DSA *dsa, unsigned bits, const uint8_t *seed_in, } for (;;) { - /* g=test^r0%p */ + // g=test^r0%p if (!BN_mod_exp_mont(g, test, r0, p, ctx, mont)) { goto err; } @@ -504,14 +545,13 @@ void DSA_SIG_free(DSA_SIG *sig) { OPENSSL_free(sig); } -DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, DSA *dsa) { +DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, const DSA *dsa) { BIGNUM *kinv = NULL, *r = NULL, *s = NULL; BIGNUM m; BIGNUM xr; BN_CTX *ctx = NULL; int reason = ERR_R_BN_LIB; DSA_SIG *ret = NULL; - int noredo = 0; BN_init(&m); BN_init(&xr); @@ -531,22 +571,14 @@ DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, DSA *dsa) { } redo: - if (dsa->kinv == NULL || dsa->r == NULL) { - if (!DSA_sign_setup(dsa, ctx, &kinv, &r)) { - goto err; - } - } else { - kinv = dsa->kinv; - dsa->kinv = NULL; - r = dsa->r; - dsa->r = NULL; - noredo = 1; + if (!dsa_sign_setup(dsa, ctx, &kinv, &r)) { + goto err; } if (digest_len > BN_num_bytes(dsa->q)) { - /* if the digest length is greater than the size of q use the - * BN_num_bits(dsa->q) leftmost bits of the digest, see - * fips 186-3, 4.2 */ + // if the digest length is greater than the size of q use the + // BN_num_bits(dsa->q) leftmost bits of the digest, see + // fips 186-3, 4.2 digest_len = BN_num_bytes(dsa->q); } @@ -554,12 +586,12 @@ DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, DSA *dsa) { goto err; } - /* Compute s = inv(k) (m + xr) mod q */ + // Compute s = inv(k) (m + xr) mod q if (!BN_mod_mul(&xr, dsa->priv_key, r, dsa->q, ctx)) { - goto err; /* s = xr */ + goto err; // s = xr } if (!BN_add(s, &xr, &m)) { - goto err; /* s = m + xr */ + goto err; // s = m + xr } if (BN_cmp(s, dsa->q) > 0) { if (!BN_sub(s, s, dsa->q)) { @@ -570,13 +602,9 @@ DSA_SIG *DSA_do_sign(const uint8_t *digest, size_t digest_len, DSA *dsa) { goto err; } - /* Redo if r or s is zero as required by FIPS 186-3: this is - * very unlikely. */ + // Redo if r or s is zero as required by FIPS 186-3: this is + // very unlikely. if (BN_is_zero(r) || BN_is_zero(s)) { - if (noredo) { - reason = DSA_R_NEED_NEW_SETUP_VALUES; - goto err; - } goto redo; } ret = DSA_SIG_new(); @@ -624,7 +652,7 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, } i = BN_num_bits(dsa->q); - /* fips 186-3 allows only different sizes for q */ + // fips 186-3 allows only different sizes for q if (i != 160 && i != 224 && i != 256) { OPENSSL_PUT_ERROR(DSA, DSA_R_BAD_Q_VALUE); return 0; @@ -655,17 +683,17 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* Calculate W = inv(S) mod Q - * save W in u2 */ + // Calculate W = inv(S) mod Q + // save W in u2 if (BN_mod_inverse(&u2, sig->s, dsa->q, ctx) == NULL) { goto err; } - /* save M in u1 */ + // save M in u1 if (digest_len > (i >> 3)) { - /* if the digest length is greater than the size of q use the - * BN_num_bits(dsa->q) leftmost bits of the digest, see - * fips 186-3, 4.2 */ + // if the digest length is greater than the size of q use the + // BN_num_bits(dsa->q) leftmost bits of the digest, see + // fips 186-3, 4.2 digest_len = (i >> 3); } @@ -673,12 +701,12 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* u1 = M * w mod q */ + // u1 = M * w mod q if (!BN_mod_mul(&u1, &u1, &u2, dsa->q, ctx)) { goto err; } - /* u2 = r * w mod q */ + // u2 = r * w mod q if (!BN_mod_mul(&u2, sig->r, &u2, dsa->q, ctx)) { goto err; } @@ -694,14 +722,14 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* BN_copy(&u1,&t1); */ - /* let u1 = u1 mod q */ + // BN_copy(&u1,&t1); + // let u1 = u1 mod q if (!BN_mod(&u1, &t1, dsa->q, ctx)) { goto err; } - /* V is now in u1. If the signature is correct, it will be - * equal to R. */ + // V is now in u1. If the signature is correct, it will be + // equal to R. *out_valid = BN_ucmp(&u1, sig->r) == 0; ret = 1; @@ -718,7 +746,7 @@ int DSA_do_check_signature(int *out_valid, const uint8_t *digest, } int DSA_sign(int type, const uint8_t *digest, size_t digest_len, - uint8_t *out_sig, unsigned int *out_siglen, DSA *dsa) { + uint8_t *out_sig, unsigned int *out_siglen, const DSA *dsa) { DSA_SIG *s; s = DSA_do_sign(digest, digest_len, dsa); @@ -758,7 +786,7 @@ int DSA_check_signature(int *out_valid, const uint8_t *digest, goto err; } - /* Ensure that the signature uses DER and doesn't have trailing garbage. */ + // Ensure that the signature uses DER and doesn't have trailing garbage. int der_len = i2d_DSA_SIG(s, &der); if (der_len < 0 || (size_t)der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len)) { @@ -773,8 +801,8 @@ int DSA_check_signature(int *out_valid, const uint8_t *digest, return ret; } -/* der_len_len returns the number of bytes needed to represent a length of |len| - * in DER. */ +// der_len_len returns the number of bytes needed to represent a length of |len| +// in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; @@ -789,18 +817,18 @@ static size_t der_len_len(size_t len) { int DSA_size(const DSA *dsa) { size_t order_len = BN_num_bytes(dsa->q); - /* Compute the maximum length of an |order_len| byte integer. Defensively - * assume that the leading 0x00 is included. */ + // Compute the maximum length of an |order_len| byte integer. Defensively + // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } - /* A DSA signature is two INTEGERs. */ + // A DSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } - /* Add the header. */ + // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; @@ -808,8 +836,8 @@ int DSA_size(const DSA *dsa) { return ret; } -int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, - BIGNUM **out_r) { +static int dsa_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, + BIGNUM **out_r) { BN_CTX *ctx; BIGNUM k, kq, *kinv = NULL, *r = NULL; int ret = 0; @@ -835,7 +863,7 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Get random k */ + // Get random k if (!BN_rand_range_ex(&k, 1, dsa->q)) { goto err; } @@ -849,16 +877,16 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Compute r = (g^k mod p) mod q */ + // Compute r = (g^k mod p) mod q if (!BN_copy(&kq, &k)) { goto err; } - /* We do not want timing information to leak the length of k, - * so we compute g^k using an equivalent exponent of fixed length. - * - * (This is a kludge that we need because the BN_mod_exp_mont() - * does not let us specify the desired timing behaviour.) */ + // We do not want timing information to leak the length of k, + // so we compute g^k using an equivalent exponent of fixed length. + // + // (This is a kludge that we need because the BN_mod_exp_mont() + // does not let us specify the desired timing behaviour.) if (!BN_add(&kq, &kq, dsa->q)) { goto err; @@ -875,8 +903,8 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, goto err; } - /* Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little - * Theorem. */ + // Compute part of 's = inv(k) (m + xr) mod q' using Fermat's Little + // Theorem. kinv = BN_new(); if (kinv == NULL || !bn_mod_inverse_prime(kinv, &k, dsa->q, ctx, dsa->method_mont_q)) { @@ -908,44 +936,44 @@ int DSA_sign_setup(const DSA *dsa, BN_CTX *ctx_in, BIGNUM **out_kinv, } int DSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; - if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, dup_func, + if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, free_func)) { return -1; } return index; } -int DSA_set_ex_data(DSA *d, int idx, void *arg) { - return CRYPTO_set_ex_data(&d->ex_data, idx, arg); +int DSA_set_ex_data(DSA *dsa, int idx, void *arg) { + return CRYPTO_set_ex_data(&dsa->ex_data, idx, arg); } -void *DSA_get_ex_data(const DSA *d, int idx) { - return CRYPTO_get_ex_data(&d->ex_data, idx); +void *DSA_get_ex_data(const DSA *dsa, int idx) { + return CRYPTO_get_ex_data(&dsa->ex_data, idx); } -DH *DSA_dup_DH(const DSA *r) { - DH *ret = NULL; - - if (r == NULL) { - goto err; +DH *DSA_dup_DH(const DSA *dsa) { + if (dsa == NULL) { + return NULL; } - ret = DH_new(); + + DH *ret = DH_new(); if (ret == NULL) { goto err; } - if (r->q != NULL) { - ret->priv_length = BN_num_bits(r->q); - if ((ret->q = BN_dup(r->q)) == NULL) { + if (dsa->q != NULL) { + ret->priv_length = BN_num_bits(dsa->q); + if ((ret->q = BN_dup(dsa->q)) == NULL) { goto err; } } - if ((r->p != NULL && (ret->p = BN_dup(r->p)) == NULL) || - (r->g != NULL && (ret->g = BN_dup(r->g)) == NULL) || - (r->pub_key != NULL && (ret->pub_key = BN_dup(r->pub_key)) == NULL) || - (r->priv_key != NULL && (ret->priv_key = BN_dup(r->priv_key)) == NULL)) { - goto err; + if ((dsa->p != NULL && (ret->p = BN_dup(dsa->p)) == NULL) || + (dsa->g != NULL && (ret->g = BN_dup(dsa->g)) == NULL) || + (dsa->pub_key != NULL && (ret->pub_key = BN_dup(dsa->pub_key)) == NULL) || + (dsa->priv_key != NULL && + (ret->priv_key = BN_dup(dsa->priv_key)) == NULL)) { + goto err; } return ret; diff --git a/Sources/BoringSSL/crypto/dsa/dsa_asn1.c b/Sources/BoringSSL/crypto/dsa/dsa_asn1.c index ff5ee0039..97fd07fee 100644 --- a/Sources/BoringSSL/crypto/dsa/dsa_asn1.c +++ b/Sources/BoringSSL/crypto/dsa/dsa_asn1.c @@ -75,7 +75,7 @@ static int parse_integer(CBS *cbs, BIGNUM **out) { static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* A DSA object may be missing some components. */ + // A DSA object may be missing some components. OPENSSL_PUT_ERROR(DSA, ERR_R_PASSED_NULL_PARAMETER); return 0; } diff --git a/Sources/BoringSSL/crypto/ec/ec.c b/Sources/BoringSSL/crypto/ec/ec.c deleted file mode 100644 index 96bb70370..000000000 --- a/Sources/BoringSSL/crypto/ec/ec.c +++ /dev/null @@ -1,847 +0,0 @@ -/* Originally written by Bodo Moeller for the OpenSSL project. - * ==================================================================== - * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* ==================================================================== - * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. - * - * Portions of the attached software ("Contribution") are developed by - * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. - * - * The Contribution is licensed pursuant to the OpenSSL open source - * license provided above. - * - * The elliptic curve binary polynomial software is originally written by - * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems - * Laboratories. */ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "internal.h" -#include "../internal.h" - - -static const struct curve_data P224 = { - "NIST P-224", - 28, - {/* p */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - /* a */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFE, - /* b */ - 0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, 0x32, 0x56, - 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA, 0x27, 0x0B, 0x39, 0x43, - 0x23, 0x55, 0xFF, 0xB4, - /* x */ - 0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, 0x90, 0xB9, - 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xD6, - 0x11, 0x5C, 0x1D, 0x21, - /* y */ - 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6, - 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99, - 0x85, 0x00, 0x7e, 0x34, - /* order */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0x16, 0xA2, 0xE0, 0xB8, 0xF0, 0x3E, 0x13, 0xDD, 0x29, 0x45, - 0x5C, 0x5C, 0x2A, 0x3D, - }}; - -static const struct curve_data P256 = { - "NIST P-256", - 32, - {/* p */ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ - 0x5A, 0xC6, 0x35, 0xD8, 0xAA, 0x3A, 0x93, 0xE7, 0xB3, 0xEB, 0xBD, 0x55, - 0x76, 0x98, 0x86, 0xBC, 0x65, 0x1D, 0x06, 0xB0, 0xCC, 0x53, 0xB0, 0xF6, - 0x3B, 0xCE, 0x3C, 0x3E, 0x27, 0xD2, 0x60, 0x4B, - /* x */ - 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8, 0xBC, 0xE6, 0xE5, - 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D, 0x81, 0x2D, 0xEB, 0x33, 0xA0, - 0xF4, 0xA1, 0x39, 0x45, 0xD8, 0x98, 0xC2, 0x96, - /* y */ - 0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x8e, 0xe7, 0xeb, 0x4a, - 0x7c, 0x0f, 0x9e, 0x16, 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce, - 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5, - /* order */ - 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84, - 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51}}; - -static const struct curve_data P384 = { - "NIST P-384", - 48, - {/* p */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ - 0xB3, 0x31, 0x2F, 0xA7, 0xE2, 0x3E, 0xE7, 0xE4, 0x98, 0x8E, 0x05, 0x6B, - 0xE3, 0xF8, 0x2D, 0x19, 0x18, 0x1D, 0x9C, 0x6E, 0xFE, 0x81, 0x41, 0x12, - 0x03, 0x14, 0x08, 0x8F, 0x50, 0x13, 0x87, 0x5A, 0xC6, 0x56, 0x39, 0x8D, - 0x8A, 0x2E, 0xD1, 0x9D, 0x2A, 0x85, 0xC8, 0xED, 0xD3, 0xEC, 0x2A, 0xEF, - /* x */ - 0xAA, 0x87, 0xCA, 0x22, 0xBE, 0x8B, 0x05, 0x37, 0x8E, 0xB1, 0xC7, 0x1E, - 0xF3, 0x20, 0xAD, 0x74, 0x6E, 0x1D, 0x3B, 0x62, 0x8B, 0xA7, 0x9B, 0x98, - 0x59, 0xF7, 0x41, 0xE0, 0x82, 0x54, 0x2A, 0x38, 0x55, 0x02, 0xF2, 0x5D, - 0xBF, 0x55, 0x29, 0x6C, 0x3A, 0x54, 0x5E, 0x38, 0x72, 0x76, 0x0A, 0xB7, - /* y */ - 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf, - 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c, - 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce, - 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f, - /* order */ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF, 0x58, 0x1A, 0x0D, 0xB2, - 0x48, 0xB0, 0xA7, 0x7A, 0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73}}; - -static const struct curve_data P521 = { - "NIST P-521", - 66, - {/* p */ - 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - /* a */ - 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, - /* b */ - 0x00, 0x51, 0x95, 0x3E, 0xB9, 0x61, 0x8E, 0x1C, 0x9A, 0x1F, 0x92, 0x9A, - 0x21, 0xA0, 0xB6, 0x85, 0x40, 0xEE, 0xA2, 0xDA, 0x72, 0x5B, 0x99, 0xB3, - 0x15, 0xF3, 0xB8, 0xB4, 0x89, 0x91, 0x8E, 0xF1, 0x09, 0xE1, 0x56, 0x19, - 0x39, 0x51, 0xEC, 0x7E, 0x93, 0x7B, 0x16, 0x52, 0xC0, 0xBD, 0x3B, 0xB1, - 0xBF, 0x07, 0x35, 0x73, 0xDF, 0x88, 0x3D, 0x2C, 0x34, 0xF1, 0xEF, 0x45, - 0x1F, 0xD4, 0x6B, 0x50, 0x3F, 0x00, - /* x */ - 0x00, 0xC6, 0x85, 0x8E, 0x06, 0xB7, 0x04, 0x04, 0xE9, 0xCD, 0x9E, 0x3E, - 0xCB, 0x66, 0x23, 0x95, 0xB4, 0x42, 0x9C, 0x64, 0x81, 0x39, 0x05, 0x3F, - 0xB5, 0x21, 0xF8, 0x28, 0xAF, 0x60, 0x6B, 0x4D, 0x3D, 0xBA, 0xA1, 0x4B, - 0x5E, 0x77, 0xEF, 0xE7, 0x59, 0x28, 0xFE, 0x1D, 0xC1, 0x27, 0xA2, 0xFF, - 0xA8, 0xDE, 0x33, 0x48, 0xB3, 0xC1, 0x85, 0x6A, 0x42, 0x9B, 0xF9, 0x7E, - 0x7E, 0x31, 0xC2, 0xE5, 0xBD, 0x66, - /* y */ - 0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a, - 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, - 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, - 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, - 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, - 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50, - /* order */ - 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x51, 0x86, - 0x87, 0x83, 0xBF, 0x2F, 0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09, - 0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C, 0x47, 0xAE, 0xBB, 0x6F, - 0xB7, 0x1E, 0x91, 0x38, 0x64, 0x09}}; - -/* MSan appears to have a bug that causes code to be miscompiled in opt mode. - * While that is being looked at, don't run the uint128_t code under MSan. */ -#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \ - !defined(MEMORY_SANITIZER) -#define BORINGSSL_USE_INT128_CODE -#endif - -const struct built_in_curve OPENSSL_built_in_curves[] = { - { - NID_secp521r1, - /* 1.3.132.0.35 */ - {0x2b, 0x81, 0x04, 0x00, 0x23}, 5, - &P521, - &EC_GFp_mont_method, - }, - { - NID_secp384r1, - /* 1.3.132.0.34 */ - {0x2b, 0x81, 0x04, 0x00, 0x22}, 5, - &P384, - &EC_GFp_mont_method, - }, - { - NID_X9_62_prime256v1, - /* 1.2.840.10045.3.1.7 */ - {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}, 8, - &P256, -#if defined(BORINGSSL_USE_INT128_CODE) -#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ - !defined(OPENSSL_SMALL) - &EC_GFp_nistz256_method, -#else - &EC_GFp_nistp256_method, -#endif -#else - &EC_GFp_mont_method, -#endif - }, - { - NID_secp224r1, - /* 1.3.132.0.33 */ - {0x2b, 0x81, 0x04, 0x00, 0x21}, 5, - &P224, -#if defined(BORINGSSL_USE_INT128_CODE) && !defined(OPENSSL_SMALL) - &EC_GFp_nistp224_method, -#else - &EC_GFp_mont_method, -#endif - }, - {NID_undef, {0}, 0, NULL, NULL}, -}; - -/* built_in_curve_scalar_field_monts contains Montgomery contexts for - * performing inversions in the scalar fields of each of the built-in - * curves. It's protected by |built_in_curve_scalar_field_monts_once|. */ -static const BN_MONT_CTX **built_in_curve_scalar_field_monts; - -static CRYPTO_once_t built_in_curve_scalar_field_monts_once; - -static void built_in_curve_scalar_field_monts_init(void) { - unsigned num_built_in_curves; - for (num_built_in_curves = 0;; num_built_in_curves++) { - if (OPENSSL_built_in_curves[num_built_in_curves].nid == NID_undef) { - break; - } - } - - assert(0 < num_built_in_curves); - - built_in_curve_scalar_field_monts = - OPENSSL_malloc(sizeof(BN_MONT_CTX *) * num_built_in_curves); - if (built_in_curve_scalar_field_monts == NULL) { - return; - } - - BIGNUM *order = BN_new(); - BN_CTX *bn_ctx = BN_CTX_new(); - BN_MONT_CTX *mont_ctx = NULL; - - if (bn_ctx == NULL || - order == NULL) { - goto err; - } - - unsigned i; - for (i = 0; i < num_built_in_curves; i++) { - const struct curve_data *curve = OPENSSL_built_in_curves[i].data; - const unsigned param_len = curve->param_len; - const uint8_t *params = curve->data; - - mont_ctx = BN_MONT_CTX_new(); - if (mont_ctx == NULL) { - goto err; - } - - if (!BN_bin2bn(params + 5 * param_len, param_len, order) || - !BN_MONT_CTX_set(mont_ctx, order, bn_ctx)) { - goto err; - } - - built_in_curve_scalar_field_monts[i] = mont_ctx; - mont_ctx = NULL; - } - - goto out; - -err: - BN_MONT_CTX_free(mont_ctx); - OPENSSL_free((BN_MONT_CTX**) built_in_curve_scalar_field_monts); - built_in_curve_scalar_field_monts = NULL; - -out: - BN_free(order); - BN_CTX_free(bn_ctx); -} - -EC_GROUP *ec_group_new(const EC_METHOD *meth) { - EC_GROUP *ret; - - if (meth == NULL) { - OPENSSL_PUT_ERROR(EC, EC_R_SLOT_FULL); - return NULL; - } - - if (meth->group_init == 0) { - OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); - return NULL; - } - - ret = OPENSSL_malloc(sizeof(EC_GROUP)); - if (ret == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); - return NULL; - } - OPENSSL_memset(ret, 0, sizeof(EC_GROUP)); - - ret->meth = meth; - BN_init(&ret->order); - - if (!meth->group_init(ret)) { - OPENSSL_free(ret); - return NULL; - } - - return ret; -} - -EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, - const BIGNUM *b, BN_CTX *ctx) { - EC_GROUP *ret = ec_group_new(&EC_GFp_mont_method); - if (ret == NULL) { - return NULL; - } - - if (ret->meth->group_set_curve == 0) { - OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); - return 0; - } - if (!ret->meth->group_set_curve(ret, p, a, b, ctx)) { - EC_GROUP_free(ret); - return NULL; - } - return ret; -} - -int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, - const BIGNUM *order, const BIGNUM *cofactor) { - if (group->curve_name != NID_undef || group->generator != NULL) { - /* |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by - * |EC_GROUP_new_curve_GFp| and may only used once on each group. */ - return 0; - } - - /* Require a cofactor of one for custom curves, which implies prime order. */ - if (!BN_is_one(cofactor)) { - OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COFACTOR); - return 0; - } - - group->generator = EC_POINT_new(group); - return group->generator != NULL && - EC_POINT_copy(group->generator, generator) && - BN_copy(&group->order, order); -} - -static EC_GROUP *ec_group_new_from_data(unsigned built_in_index) { - const struct built_in_curve *curve = &OPENSSL_built_in_curves[built_in_index]; - EC_GROUP *group = NULL; - EC_POINT *P = NULL; - BIGNUM *p = NULL, *a = NULL, *b = NULL, *x = NULL, *y = NULL; - int ok = 0; - - BN_CTX *ctx = BN_CTX_new(); - if (ctx == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); - goto err; - } - - const struct curve_data *data = curve->data; - const unsigned param_len = data->param_len; - const uint8_t *params = data->data; - - if (!(p = BN_bin2bn(params + 0 * param_len, param_len, NULL)) || - !(a = BN_bin2bn(params + 1 * param_len, param_len, NULL)) || - !(b = BN_bin2bn(params + 2 * param_len, param_len, NULL))) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - - group = ec_group_new(curve->method); - if (group == NULL || - !group->meth->group_set_curve(group, p, a, b, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); - goto err; - } - - if ((P = EC_POINT_new(group)) == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); - goto err; - } - - if (!(x = BN_bin2bn(params + 3 * param_len, param_len, NULL)) || - !(y = BN_bin2bn(params + 4 * param_len, param_len, NULL))) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - - if (!EC_POINT_set_affine_coordinates_GFp(group, P, x, y, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); - goto err; - } - if (!BN_bin2bn(params + 5 * param_len, param_len, &group->order)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - - CRYPTO_once(&built_in_curve_scalar_field_monts_once, - built_in_curve_scalar_field_monts_init); - if (built_in_curve_scalar_field_monts != NULL) { - group->mont_data = built_in_curve_scalar_field_monts[built_in_index]; - } - - group->generator = P; - P = NULL; - ok = 1; - -err: - if (!ok) { - EC_GROUP_free(group); - group = NULL; - } - EC_POINT_free(P); - BN_CTX_free(ctx); - BN_free(p); - BN_free(a); - BN_free(b); - BN_free(x); - BN_free(y); - return group; -} - -EC_GROUP *EC_GROUP_new_by_curve_name(int nid) { - unsigned i; - const struct built_in_curve *curve; - EC_GROUP *ret = NULL; - - for (i = 0; OPENSSL_built_in_curves[i].nid != NID_undef; i++) { - curve = &OPENSSL_built_in_curves[i]; - if (curve->nid == nid) { - ret = ec_group_new_from_data(i); - break; - } - } - - if (ret == NULL) { - OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); - return NULL; - } - - ret->curve_name = nid; - return ret; -} - -void EC_GROUP_free(EC_GROUP *group) { - if (!group) { - return; - } - - if (group->meth->group_finish != 0) { - group->meth->group_finish(group); - } - - EC_POINT_free(group->generator); - BN_free(&group->order); - - OPENSSL_free(group); -} - -const BN_MONT_CTX *ec_group_get_mont_data(const EC_GROUP *group) { - return group->mont_data; -} - -EC_GROUP *EC_GROUP_dup(const EC_GROUP *a) { - if (a == NULL) { - return NULL; - } - - if (a->meth->group_copy == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); - return NULL; - } - - EC_GROUP *ret = ec_group_new(a->meth); - if (ret == NULL) { - return NULL; - } - - ret->mont_data = a->mont_data; - ret->curve_name = a->curve_name; - - if (a->generator != NULL) { - ret->generator = EC_POINT_dup(a->generator, ret); - if (ret->generator == NULL) { - goto err; - } - } - - if (!BN_copy(&ret->order, &a->order) || - !ret->meth->group_copy(ret, a)) { - goto err; - } - - return ret; - -err: - EC_GROUP_free(ret); - return NULL; -} - -int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ignored) { - return a->curve_name == NID_undef || - b->curve_name == NID_undef || - a->curve_name != b->curve_name; -} - -const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group) { - return group->generator; -} - -const BIGNUM *EC_GROUP_get0_order(const EC_GROUP *group) { - assert(!BN_is_zero(&group->order)); - return &group->order; -} - -int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx) { - if (BN_copy(order, EC_GROUP_get0_order(group)) == NULL) { - return 0; - } - return 1; -} - -int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, - BN_CTX *ctx) { - /* All |EC_GROUP|s have cofactor 1. */ - return BN_set_word(cofactor, 1); -} - -int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *out_p, BIGNUM *out_a, - BIGNUM *out_b, BN_CTX *ctx) { - return ec_GFp_simple_group_get_curve(group, out_p, out_a, out_b, ctx); -} - -int EC_GROUP_get_curve_name(const EC_GROUP *group) { return group->curve_name; } - -unsigned EC_GROUP_get_degree(const EC_GROUP *group) { - return ec_GFp_simple_group_get_degree(group); -} - -EC_POINT *EC_POINT_new(const EC_GROUP *group) { - EC_POINT *ret; - - if (group == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); - return NULL; - } - - ret = OPENSSL_malloc(sizeof *ret); - if (ret == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); - return NULL; - } - - ret->meth = group->meth; - - if (!ec_GFp_simple_point_init(ret)) { - OPENSSL_free(ret); - return NULL; - } - - return ret; -} - -void EC_POINT_free(EC_POINT *point) { - if (!point) { - return; - } - - ec_GFp_simple_point_finish(point); - - OPENSSL_free(point); -} - -void EC_POINT_clear_free(EC_POINT *point) { - if (!point) { - return; - } - - ec_GFp_simple_point_clear_finish(point); - - OPENSSL_cleanse(point, sizeof *point); - OPENSSL_free(point); -} - -int EC_POINT_copy(EC_POINT *dest, const EC_POINT *src) { - if (dest->meth != src->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - if (dest == src) { - return 1; - } - return ec_GFp_simple_point_copy(dest, src); -} - -EC_POINT *EC_POINT_dup(const EC_POINT *a, const EC_GROUP *group) { - if (a == NULL) { - return NULL; - } - - EC_POINT *ret = EC_POINT_new(group); - if (ret == NULL || - !EC_POINT_copy(ret, a)) { - EC_POINT_free(ret); - return NULL; - } - - return ret; -} - -int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_point_set_to_infinity(group, point); -} - -int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *point) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_is_at_infinity(group, point); -} - -int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, - BN_CTX *ctx) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_is_on_curve(group, point, ctx); -} - -int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, - BN_CTX *ctx) { - if ((group->meth != a->meth) || (a->meth != b->meth)) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return -1; - } - return ec_GFp_simple_cmp(group, a, b, ctx); -} - -int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_make_affine(group, point, ctx); -} - -int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], - BN_CTX *ctx) { - for (size_t i = 0; i < num; i++) { - if (group->meth != points[i]->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - } - return ec_GFp_simple_points_make_affine(group, num, points, ctx); -} - -int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group, - const EC_POINT *point, BIGNUM *x, - BIGNUM *y, BN_CTX *ctx) { - if (group->meth->point_get_affine_coordinates == 0) { - OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); - return 0; - } - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return group->meth->point_get_affine_coordinates(group, point, x, y, ctx); -} - -int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, - const BIGNUM *x, const BIGNUM *y, - BN_CTX *ctx) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - if (!ec_GFp_simple_point_set_affine_coordinates(group, point, x, y, ctx)) { - return 0; - } - - if (!EC_POINT_is_on_curve(group, point, ctx)) { - OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); - return 0; - } - - return 1; -} - -int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, - const EC_POINT *b, BN_CTX *ctx) { - if ((group->meth != r->meth) || (r->meth != a->meth) || - (a->meth != b->meth)) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_add(group, r, a, b, ctx); -} - - -int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, - BN_CTX *ctx) { - if ((group->meth != r->meth) || (r->meth != a->meth)) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_dbl(group, r, a, ctx); -} - - -int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx) { - if (group->meth != a->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_invert(group, a, ctx); -} - -int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, - const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { - /* Previously, this function set |r| to the point at infinity if there was - * nothing to multiply. But, nobody should be calling this function with - * nothing to multiply in the first place. */ - if ((g_scalar == NULL && p_scalar == NULL) || - ((p == NULL) != (p_scalar == NULL))) { - OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - if (group->meth != r->meth || - (p != NULL && group->meth != p->meth)) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - - return group->meth->mul(group, r, g_scalar, p, p_scalar, ctx); -} - -int ec_point_set_Jprojective_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, - const BIGNUM *x, const BIGNUM *y, - const BIGNUM *z, BN_CTX *ctx) { - if (group->meth != point->meth) { - OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); - return 0; - } - return ec_GFp_simple_set_Jprojective_coordinates_GFp(group, point, x, y, z, - ctx); -} - -void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag) {} - -const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group) { - return NULL; -} - -int EC_METHOD_get_field_type(const EC_METHOD *meth) { - return NID_X9_62_prime_field; -} - -void EC_GROUP_set_point_conversion_form(EC_GROUP *group, - point_conversion_form_t form) { - if (form != POINT_CONVERSION_UNCOMPRESSED) { - abort(); - } -} - -size_t EC_get_builtin_curves(EC_builtin_curve *out_curves, - size_t max_num_curves) { - unsigned num_built_in_curves; - for (num_built_in_curves = 0;; num_built_in_curves++) { - if (OPENSSL_built_in_curves[num_built_in_curves].nid == NID_undef) { - break; - } - } - - unsigned i; - for (i = 0; i < max_num_curves && i < num_built_in_curves; i++) { - out_curves[i].comment = OPENSSL_built_in_curves[i].data->comment; - out_curves[i].nid = OPENSSL_built_in_curves[i].nid; - } - - return num_built_in_curves; -} diff --git a/Sources/BoringSSL/crypto/ec/p224-64.c b/Sources/BoringSSL/crypto/ec/p224-64.c deleted file mode 100644 index 7b2ae68c9..000000000 --- a/Sources/BoringSSL/crypto/ec/p224-64.c +++ /dev/null @@ -1,1143 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -/* A 64-bit implementation of the NIST P-224 elliptic curve point multiplication - * - * Inspired by Daniel J. Bernstein's public domain nistp224 implementation - * and Adam Langley's public domain 64-bit C implementation of curve25519. */ - -#include - -#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \ - !defined(OPENSSL_SMALL) - -#include -#include -#include -#include - -#include - -#include "internal.h" -#include "../internal.h" - - -typedef uint8_t u8; -typedef uint64_t u64; -typedef int64_t s64; - -/* Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3 - * using 64-bit coefficients called 'limbs', and sometimes (for multiplication - * results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + - * 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-limb - * representation is an 'felem'; a 7-widelimb representation is a 'widefelem'. - * Even within felems, bits of adjacent limbs overlap, and we don't always - * reduce the representations: we ensure that inputs to each felem - * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60, and - * fit into a 128-bit word without overflow. The coefficients are then again - * partially reduced to obtain an felem satisfying a_i < 2^57. We only reduce - * to the unique minimal representation at the end of the computation. */ - -typedef uint64_t limb; -typedef uint128_t widelimb; - -typedef limb felem[4]; -typedef widelimb widefelem[7]; - -/* Field element represented as a byte arrary. 28*8 = 224 bits is also the - * group order size for the elliptic curve, and we also use this type for - * scalars for point multiplication. */ -typedef u8 felem_bytearray[28]; - -/* Precomputed multiples of the standard generator - * Points are given in coordinates (X, Y, Z) where Z normally is 1 - * (0 for the point at infinity). - * For each field element, slice a_0 is word 0, etc. - * - * The table has 2 * 16 elements, starting with the following: - * index | bits | point - * ------+---------+------------------------------ - * 0 | 0 0 0 0 | 0G - * 1 | 0 0 0 1 | 1G - * 2 | 0 0 1 0 | 2^56G - * 3 | 0 0 1 1 | (2^56 + 1)G - * 4 | 0 1 0 0 | 2^112G - * 5 | 0 1 0 1 | (2^112 + 1)G - * 6 | 0 1 1 0 | (2^112 + 2^56)G - * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G - * 8 | 1 0 0 0 | 2^168G - * 9 | 1 0 0 1 | (2^168 + 1)G - * 10 | 1 0 1 0 | (2^168 + 2^56)G - * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G - * 12 | 1 1 0 0 | (2^168 + 2^112)G - * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G - * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G - * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G - * followed by a copy of this with each element multiplied by 2^28. - * - * The reason for this is so that we can clock bits into four different - * locations when doing simple scalar multiplies against the base point, - * and then another four locations using the second 16 elements. */ -static const felem g_pre_comp[2][16][3] = { - {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, - {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf}, - {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723}, - {1, 0, 0, 0}}, - {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5}, - {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321}, - {1, 0, 0, 0}}, - {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748}, - {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17}, - {1, 0, 0, 0}}, - {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe}, - {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b}, - {1, 0, 0, 0}}, - {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3}, - {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a}, - {1, 0, 0, 0}}, - {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c}, - {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244}, - {1, 0, 0, 0}}, - {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849}, - {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112}, - {1, 0, 0, 0}}, - {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47}, - {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394}, - {1, 0, 0, 0}}, - {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d}, - {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7}, - {1, 0, 0, 0}}, - {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24}, - {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881}, - {1, 0, 0, 0}}, - {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984}, - {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369}, - {1, 0, 0, 0}}, - {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3}, - {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60}, - {1, 0, 0, 0}}, - {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057}, - {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9}, - {1, 0, 0, 0}}, - {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9}, - {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc}, - {1, 0, 0, 0}}, - {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58}, - {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558}, - {1, 0, 0, 0}}}, - {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, - {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31}, - {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d}, - {1, 0, 0, 0}}, - {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3}, - {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a}, - {1, 0, 0, 0}}, - {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33}, - {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100}, - {1, 0, 0, 0}}, - {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5}, - {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea}, - {1, 0, 0, 0}}, - {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be}, - {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51}, - {1, 0, 0, 0}}, - {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1}, - {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb}, - {1, 0, 0, 0}}, - {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233}, - {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def}, - {1, 0, 0, 0}}, - {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae}, - {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45}, - {1, 0, 0, 0}}, - {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e}, - {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb}, - {1, 0, 0, 0}}, - {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de}, - {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3}, - {1, 0, 0, 0}}, - {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05}, - {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58}, - {1, 0, 0, 0}}, - {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb}, - {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0}, - {1, 0, 0, 0}}, - {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9}, - {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea}, - {1, 0, 0, 0}}, - {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba}, - {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405}, - {1, 0, 0, 0}}, - {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e}, - {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e}, - {1, 0, 0, 0}}}}; - -/* Helper functions to convert field elements to/from internal representation */ -static void bin28_to_felem(felem out, const u8 in[28]) { - out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff; - out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff; - out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff; - out[3] = (*((const uint64_t *)(in + 20))) >> 8; -} - -static void felem_to_bin28(u8 out[28], const felem in) { - for (size_t i = 0; i < 7; ++i) { - out[i] = in[0] >> (8 * i); - out[i + 7] = in[1] >> (8 * i); - out[i + 14] = in[2] >> (8 * i); - out[i + 21] = in[3] >> (8 * i); - } -} - -/* To preserve endianness when using BN_bn2bin and BN_bin2bn */ -static void flip_endian(u8 *out, const u8 *in, size_t len) { - for (size_t i = 0; i < len; ++i) { - out[i] = in[len - 1 - i]; - } -} - -/* From OpenSSL BIGNUM to internal representation */ -static int BN_to_felem(felem out, const BIGNUM *bn) { - /* BN_bn2bin eats leading zeroes */ - felem_bytearray b_out; - OPENSSL_memset(b_out, 0, sizeof(b_out)); - size_t num_bytes = BN_num_bytes(bn); - if (num_bytes > sizeof(b_out) || - BN_is_negative(bn)) { - OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE); - return 0; - } - - felem_bytearray b_in; - num_bytes = BN_bn2bin(bn, b_in); - flip_endian(b_out, b_in, num_bytes); - bin28_to_felem(out, b_out); - return 1; -} - -/* From internal representation to OpenSSL BIGNUM */ -static BIGNUM *felem_to_BN(BIGNUM *out, const felem in) { - felem_bytearray b_in, b_out; - felem_to_bin28(b_in, in); - flip_endian(b_out, b_in, sizeof(b_out)); - return BN_bin2bn(b_out, sizeof(b_out), out); -} - -/* Field operations, using the internal representation of field elements. - * NB! These operations are specific to our point multiplication and cannot be - * expected to be correct in general - e.g., multiplication with a large scalar - * will cause an overflow. */ - -static void felem_assign(felem out, const felem in) { - out[0] = in[0]; - out[1] = in[1]; - out[2] = in[2]; - out[3] = in[3]; -} - -/* Sum two field elements: out += in */ -static void felem_sum(felem out, const felem in) { - out[0] += in[0]; - out[1] += in[1]; - out[2] += in[2]; - out[3] += in[3]; -} - -/* Get negative value: out = -in */ -/* Assumes in[i] < 2^57 */ -static void felem_neg(felem out, const felem in) { - static const limb two58p2 = (((limb)1) << 58) + (((limb)1) << 2); - static const limb two58m2 = (((limb)1) << 58) - (((limb)1) << 2); - static const limb two58m42m2 = - (((limb)1) << 58) - (((limb)1) << 42) - (((limb)1) << 2); - - /* Set to 0 mod 2^224-2^96+1 to ensure out > in */ - out[0] = two58p2 - in[0]; - out[1] = two58m42m2 - in[1]; - out[2] = two58m2 - in[2]; - out[3] = two58m2 - in[3]; -} - -/* Subtract field elements: out -= in */ -/* Assumes in[i] < 2^57 */ -static void felem_diff(felem out, const felem in) { - static const limb two58p2 = (((limb)1) << 58) + (((limb)1) << 2); - static const limb two58m2 = (((limb)1) << 58) - (((limb)1) << 2); - static const limb two58m42m2 = - (((limb)1) << 58) - (((limb)1) << 42) - (((limb)1) << 2); - - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ - out[0] += two58p2; - out[1] += two58m42m2; - out[2] += two58m2; - out[3] += two58m2; - - out[0] -= in[0]; - out[1] -= in[1]; - out[2] -= in[2]; - out[3] -= in[3]; -} - -/* Subtract in unreduced 128-bit mode: out -= in */ -/* Assumes in[i] < 2^119 */ -static void widefelem_diff(widefelem out, const widefelem in) { - static const widelimb two120 = ((widelimb)1) << 120; - static const widelimb two120m64 = - (((widelimb)1) << 120) - (((widelimb)1) << 64); - static const widelimb two120m104m64 = - (((widelimb)1) << 120) - (((widelimb)1) << 104) - (((widelimb)1) << 64); - - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ - out[0] += two120; - out[1] += two120m64; - out[2] += two120m64; - out[3] += two120; - out[4] += two120m104m64; - out[5] += two120m64; - out[6] += two120m64; - - out[0] -= in[0]; - out[1] -= in[1]; - out[2] -= in[2]; - out[3] -= in[3]; - out[4] -= in[4]; - out[5] -= in[5]; - out[6] -= in[6]; -} - -/* Subtract in mixed mode: out128 -= in64 */ -/* in[i] < 2^63 */ -static void felem_diff_128_64(widefelem out, const felem in) { - static const widelimb two64p8 = (((widelimb)1) << 64) + (((widelimb)1) << 8); - static const widelimb two64m8 = (((widelimb)1) << 64) - (((widelimb)1) << 8); - static const widelimb two64m48m8 = - (((widelimb)1) << 64) - (((widelimb)1) << 48) - (((widelimb)1) << 8); - - /* Add 0 mod 2^224-2^96+1 to ensure out > in */ - out[0] += two64p8; - out[1] += two64m48m8; - out[2] += two64m8; - out[3] += two64m8; - - out[0] -= in[0]; - out[1] -= in[1]; - out[2] -= in[2]; - out[3] -= in[3]; -} - -/* Multiply a field element by a scalar: out = out * scalar - * The scalars we actually use are small, so results fit without overflow */ -static void felem_scalar(felem out, const limb scalar) { - out[0] *= scalar; - out[1] *= scalar; - out[2] *= scalar; - out[3] *= scalar; -} - -/* Multiply an unreduced field element by a scalar: out = out * scalar - * The scalars we actually use are small, so results fit without overflow */ -static void widefelem_scalar(widefelem out, const widelimb scalar) { - out[0] *= scalar; - out[1] *= scalar; - out[2] *= scalar; - out[3] *= scalar; - out[4] *= scalar; - out[5] *= scalar; - out[6] *= scalar; -} - -/* Square a field element: out = in^2 */ -static void felem_square(widefelem out, const felem in) { - limb tmp0, tmp1, tmp2; - tmp0 = 2 * in[0]; - tmp1 = 2 * in[1]; - tmp2 = 2 * in[2]; - out[0] = ((widelimb)in[0]) * in[0]; - out[1] = ((widelimb)in[0]) * tmp1; - out[2] = ((widelimb)in[0]) * tmp2 + ((widelimb)in[1]) * in[1]; - out[3] = ((widelimb)in[3]) * tmp0 + ((widelimb)in[1]) * tmp2; - out[4] = ((widelimb)in[3]) * tmp1 + ((widelimb)in[2]) * in[2]; - out[5] = ((widelimb)in[3]) * tmp2; - out[6] = ((widelimb)in[3]) * in[3]; -} - -/* Multiply two field elements: out = in1 * in2 */ -static void felem_mul(widefelem out, const felem in1, const felem in2) { - out[0] = ((widelimb)in1[0]) * in2[0]; - out[1] = ((widelimb)in1[0]) * in2[1] + ((widelimb)in1[1]) * in2[0]; - out[2] = ((widelimb)in1[0]) * in2[2] + ((widelimb)in1[1]) * in2[1] + - ((widelimb)in1[2]) * in2[0]; - out[3] = ((widelimb)in1[0]) * in2[3] + ((widelimb)in1[1]) * in2[2] + - ((widelimb)in1[2]) * in2[1] + ((widelimb)in1[3]) * in2[0]; - out[4] = ((widelimb)in1[1]) * in2[3] + ((widelimb)in1[2]) * in2[2] + - ((widelimb)in1[3]) * in2[1]; - out[5] = ((widelimb)in1[2]) * in2[3] + ((widelimb)in1[3]) * in2[2]; - out[6] = ((widelimb)in1[3]) * in2[3]; -} - -/* Reduce seven 128-bit coefficients to four 64-bit coefficients. - * Requires in[i] < 2^126, - * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */ -static void felem_reduce(felem out, const widefelem in) { - static const widelimb two127p15 = - (((widelimb)1) << 127) + (((widelimb)1) << 15); - static const widelimb two127m71 = - (((widelimb)1) << 127) - (((widelimb)1) << 71); - static const widelimb two127m71m55 = - (((widelimb)1) << 127) - (((widelimb)1) << 71) - (((widelimb)1) << 55); - widelimb output[5]; - - /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */ - output[0] = in[0] + two127p15; - output[1] = in[1] + two127m71m55; - output[2] = in[2] + two127m71; - output[3] = in[3]; - output[4] = in[4]; - - /* Eliminate in[4], in[5], in[6] */ - output[4] += in[6] >> 16; - output[3] += (in[6] & 0xffff) << 40; - output[2] -= in[6]; - - output[3] += in[5] >> 16; - output[2] += (in[5] & 0xffff) << 40; - output[1] -= in[5]; - - output[2] += output[4] >> 16; - output[1] += (output[4] & 0xffff) << 40; - output[0] -= output[4]; - - /* Carry 2 -> 3 -> 4 */ - output[3] += output[2] >> 56; - output[2] &= 0x00ffffffffffffff; - - output[4] = output[3] >> 56; - output[3] &= 0x00ffffffffffffff; - - /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */ - - /* Eliminate output[4] */ - output[2] += output[4] >> 16; - /* output[2] < 2^56 + 2^56 = 2^57 */ - output[1] += (output[4] & 0xffff) << 40; - output[0] -= output[4]; - - /* Carry 0 -> 1 -> 2 -> 3 */ - output[1] += output[0] >> 56; - out[0] = output[0] & 0x00ffffffffffffff; - - output[2] += output[1] >> 56; - /* output[2] < 2^57 + 2^72 */ - out[1] = output[1] & 0x00ffffffffffffff; - output[3] += output[2] >> 56; - /* output[3] <= 2^56 + 2^16 */ - out[2] = output[2] & 0x00ffffffffffffff; - - /* out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, - * out[3] <= 2^56 + 2^16 (due to final carry), - * so out < 2*p */ - out[3] = output[3]; -} - -/* Reduce to unique minimal representation. - * Requires 0 <= in < 2*p (always call felem_reduce first) */ -static void felem_contract(felem out, const felem in) { - static const int64_t two56 = ((limb)1) << 56; - /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */ - /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */ - int64_t tmp[4], a; - tmp[0] = in[0]; - tmp[1] = in[1]; - tmp[2] = in[2]; - tmp[3] = in[3]; - /* Case 1: a = 1 iff in >= 2^224 */ - a = (in[3] >> 56); - tmp[0] -= a; - tmp[1] += a << 40; - tmp[3] &= 0x00ffffffffffffff; - /* Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and - * the lower part is non-zero */ - a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) | - (((int64_t)(in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63); - a &= 0x00ffffffffffffff; - /* turn a into an all-one mask (if a = 0) or an all-zero mask */ - a = (a - 1) >> 63; - /* subtract 2^224 - 2^96 + 1 if a is all-one */ - tmp[3] &= a ^ 0xffffffffffffffff; - tmp[2] &= a ^ 0xffffffffffffffff; - tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff; - tmp[0] -= 1 & a; - - /* eliminate negative coefficients: if tmp[0] is negative, tmp[1] must - * be non-zero, so we only need one step */ - a = tmp[0] >> 63; - tmp[0] += two56 & a; - tmp[1] -= 1 & a; - - /* carry 1 -> 2 -> 3 */ - tmp[2] += tmp[1] >> 56; - tmp[1] &= 0x00ffffffffffffff; - - tmp[3] += tmp[2] >> 56; - tmp[2] &= 0x00ffffffffffffff; - - /* Now 0 <= out < p */ - out[0] = tmp[0]; - out[1] = tmp[1]; - out[2] = tmp[2]; - out[3] = tmp[3]; -} - -/* Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field - * elements are reduced to in < 2^225, so we only need to check three cases: 0, - * 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 */ -static limb felem_is_zero(const felem in) { - limb zero = in[0] | in[1] | in[2] | in[3]; - zero = (((int64_t)(zero)-1) >> 63) & 1; - - limb two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000) | - (in[2] ^ 0x00ffffffffffffff) | - (in[3] ^ 0x00ffffffffffffff); - two224m96p1 = (((int64_t)(two224m96p1)-1) >> 63) & 1; - limb two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000) | - (in[2] ^ 0x00ffffffffffffff) | - (in[3] ^ 0x01ffffffffffffff); - two225m97p2 = (((int64_t)(two225m97p2)-1) >> 63) & 1; - return (zero | two224m96p1 | two225m97p2); -} - -/* Invert a field element */ -/* Computation chain copied from djb's code */ -static void felem_inv(felem out, const felem in) { - felem ftmp, ftmp2, ftmp3, ftmp4; - widefelem tmp; - - felem_square(tmp, in); - felem_reduce(ftmp, tmp); /* 2 */ - felem_mul(tmp, in, ftmp); - felem_reduce(ftmp, tmp); /* 2^2 - 1 */ - felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^3 - 2 */ - felem_mul(tmp, in, ftmp); - felem_reduce(ftmp, tmp); /* 2^3 - 1 */ - felem_square(tmp, ftmp); - felem_reduce(ftmp2, tmp); /* 2^4 - 2 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^5 - 4 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^6 - 8 */ - felem_mul(tmp, ftmp2, ftmp); - felem_reduce(ftmp, tmp); /* 2^6 - 1 */ - felem_square(tmp, ftmp); - felem_reduce(ftmp2, tmp); /* 2^7 - 2 */ - for (size_t i = 0; i < 5; ++i) { /* 2^12 - 2^6 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); - } - felem_mul(tmp, ftmp2, ftmp); - felem_reduce(ftmp2, tmp); /* 2^12 - 1 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp3, tmp); /* 2^13 - 2 */ - for (size_t i = 0; i < 11; ++i) {/* 2^24 - 2^12 */ - felem_square(tmp, ftmp3); - felem_reduce(ftmp3, tmp); - } - felem_mul(tmp, ftmp3, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^24 - 1 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp3, tmp); /* 2^25 - 2 */ - for (size_t i = 0; i < 23; ++i) {/* 2^48 - 2^24 */ - felem_square(tmp, ftmp3); - felem_reduce(ftmp3, tmp); - } - felem_mul(tmp, ftmp3, ftmp2); - felem_reduce(ftmp3, tmp); /* 2^48 - 1 */ - felem_square(tmp, ftmp3); - felem_reduce(ftmp4, tmp); /* 2^49 - 2 */ - for (size_t i = 0; i < 47; ++i) {/* 2^96 - 2^48 */ - felem_square(tmp, ftmp4); - felem_reduce(ftmp4, tmp); - } - felem_mul(tmp, ftmp3, ftmp4); - felem_reduce(ftmp3, tmp); /* 2^96 - 1 */ - felem_square(tmp, ftmp3); - felem_reduce(ftmp4, tmp); /* 2^97 - 2 */ - for (size_t i = 0; i < 23; ++i) {/* 2^120 - 2^24 */ - felem_square(tmp, ftmp4); - felem_reduce(ftmp4, tmp); - } - felem_mul(tmp, ftmp2, ftmp4); - felem_reduce(ftmp2, tmp); /* 2^120 - 1 */ - for (size_t i = 0; i < 6; ++i) { /* 2^126 - 2^6 */ - felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); - } - felem_mul(tmp, ftmp2, ftmp); - felem_reduce(ftmp, tmp); /* 2^126 - 1 */ - felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^127 - 2 */ - felem_mul(tmp, ftmp, in); - felem_reduce(ftmp, tmp); /* 2^127 - 1 */ - for (size_t i = 0; i < 97; ++i) {/* 2^224 - 2^97 */ - felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); - } - felem_mul(tmp, ftmp, ftmp3); - felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */ -} - -/* Copy in constant time: - * if icopy == 1, copy in to out, - * if icopy == 0, copy out to itself. */ -static void copy_conditional(felem out, const felem in, limb icopy) { - /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */ - const limb copy = -icopy; - for (size_t i = 0; i < 4; ++i) { - const limb tmp = copy & (in[i] ^ out[i]); - out[i] ^= tmp; - } -} - -/* ELLIPTIC CURVE POINT OPERATIONS - * - * Points are represented in Jacobian projective coordinates: - * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3), - * or to the point at infinity if Z == 0. */ - -/* Double an elliptic curve point: - * (X', Y', Z') = 2 * (X, Y, Z), where - * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2 - * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2 - * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z - * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed, - * while x_out == y_in is not (maybe this works, but it's not tested). */ -static void point_double(felem x_out, felem y_out, felem z_out, - const felem x_in, const felem y_in, const felem z_in) { - widefelem tmp, tmp2; - felem delta, gamma, beta, alpha, ftmp, ftmp2; - - felem_assign(ftmp, x_in); - felem_assign(ftmp2, x_in); - - /* delta = z^2 */ - felem_square(tmp, z_in); - felem_reduce(delta, tmp); - - /* gamma = y^2 */ - felem_square(tmp, y_in); - felem_reduce(gamma, tmp); - - /* beta = x*gamma */ - felem_mul(tmp, x_in, gamma); - felem_reduce(beta, tmp); - - /* alpha = 3*(x-delta)*(x+delta) */ - felem_diff(ftmp, delta); - /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */ - felem_sum(ftmp2, delta); - /* ftmp2[i] < 2^57 + 2^57 = 2^58 */ - felem_scalar(ftmp2, 3); - /* ftmp2[i] < 3 * 2^58 < 2^60 */ - felem_mul(tmp, ftmp, ftmp2); - /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */ - felem_reduce(alpha, tmp); - - /* x' = alpha^2 - 8*beta */ - felem_square(tmp, alpha); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ - felem_assign(ftmp, beta); - felem_scalar(ftmp, 8); - /* ftmp[i] < 8 * 2^57 = 2^60 */ - felem_diff_128_64(tmp, ftmp); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ - felem_reduce(x_out, tmp); - - /* z' = (y + z)^2 - gamma - delta */ - felem_sum(delta, gamma); - /* delta[i] < 2^57 + 2^57 = 2^58 */ - felem_assign(ftmp, y_in); - felem_sum(ftmp, z_in); - /* ftmp[i] < 2^57 + 2^57 = 2^58 */ - felem_square(tmp, ftmp); - /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */ - felem_diff_128_64(tmp, delta); - /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */ - felem_reduce(z_out, tmp); - - /* y' = alpha*(4*beta - x') - 8*gamma^2 */ - felem_scalar(beta, 4); - /* beta[i] < 4 * 2^57 = 2^59 */ - felem_diff(beta, x_out); - /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */ - felem_mul(tmp, alpha, beta); - /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */ - felem_square(tmp2, gamma); - /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */ - widefelem_scalar(tmp2, 8); - /* tmp2[i] < 8 * 2^116 = 2^119 */ - widefelem_diff(tmp, tmp2); - /* tmp[i] < 2^119 + 2^120 < 2^121 */ - felem_reduce(y_out, tmp); -} - -/* Add two elliptic curve points: - * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where - * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - - * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * - * X_1)^2 - X_3) - - * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) - * - * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. */ - -/* This function is not entirely constant-time: it includes a branch for - * checking whether the two input points are equal, (while not equal to the - * point at infinity). This case never happens during single point - * multiplication, so there is no timing leak for ECDH or ECDSA signing. */ -static void point_add(felem x3, felem y3, felem z3, const felem x1, - const felem y1, const felem z1, const int mixed, - const felem x2, const felem y2, const felem z2) { - felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out; - widefelem tmp, tmp2; - limb z1_is_zero, z2_is_zero, x_equal, y_equal; - - if (!mixed) { - /* ftmp2 = z2^2 */ - felem_square(tmp, z2); - felem_reduce(ftmp2, tmp); - - /* ftmp4 = z2^3 */ - felem_mul(tmp, ftmp2, z2); - felem_reduce(ftmp4, tmp); - - /* ftmp4 = z2^3*y1 */ - felem_mul(tmp2, ftmp4, y1); - felem_reduce(ftmp4, tmp2); - - /* ftmp2 = z2^2*x1 */ - felem_mul(tmp2, ftmp2, x1); - felem_reduce(ftmp2, tmp2); - } else { - /* We'll assume z2 = 1 (special case z2 = 0 is handled later) */ - - /* ftmp4 = z2^3*y1 */ - felem_assign(ftmp4, y1); - - /* ftmp2 = z2^2*x1 */ - felem_assign(ftmp2, x1); - } - - /* ftmp = z1^2 */ - felem_square(tmp, z1); - felem_reduce(ftmp, tmp); - - /* ftmp3 = z1^3 */ - felem_mul(tmp, ftmp, z1); - felem_reduce(ftmp3, tmp); - - /* tmp = z1^3*y2 */ - felem_mul(tmp, ftmp3, y2); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ - - /* ftmp3 = z1^3*y2 - z2^3*y1 */ - felem_diff_128_64(tmp, ftmp4); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ - felem_reduce(ftmp3, tmp); - - /* tmp = z1^2*x2 */ - felem_mul(tmp, ftmp, x2); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ - - /* ftmp = z1^2*x2 - z2^2*x1 */ - felem_diff_128_64(tmp, ftmp2); - /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */ - felem_reduce(ftmp, tmp); - - /* the formulae are incorrect if the points are equal - * so we check for this and do doubling if this happens */ - x_equal = felem_is_zero(ftmp); - y_equal = felem_is_zero(ftmp3); - z1_is_zero = felem_is_zero(z1); - z2_is_zero = felem_is_zero(z2); - /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */ - if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) { - point_double(x3, y3, z3, x1, y1, z1); - return; - } - - /* ftmp5 = z1*z2 */ - if (!mixed) { - felem_mul(tmp, z1, z2); - felem_reduce(ftmp5, tmp); - } else { - /* special case z2 = 0 is handled later */ - felem_assign(ftmp5, z1); - } - - /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */ - felem_mul(tmp, ftmp, ftmp5); - felem_reduce(z_out, tmp); - - /* ftmp = (z1^2*x2 - z2^2*x1)^2 */ - felem_assign(ftmp5, ftmp); - felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); - - /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */ - felem_mul(tmp, ftmp, ftmp5); - felem_reduce(ftmp5, tmp); - - /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ - felem_mul(tmp, ftmp2, ftmp); - felem_reduce(ftmp2, tmp); - - /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ - felem_mul(tmp, ftmp4, ftmp5); - /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */ - - /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */ - felem_square(tmp2, ftmp3); - /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */ - - /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */ - felem_diff_128_64(tmp2, ftmp5); - /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */ - - /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ - felem_assign(ftmp5, ftmp2); - felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2 * 2^57 = 2^58 */ - - /* x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 - - 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ - felem_diff_128_64(tmp2, ftmp5); - /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */ - felem_reduce(x_out, tmp2); - - /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */ - felem_diff(ftmp2, x_out); - /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */ - - /* tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) */ - felem_mul(tmp2, ftmp3, ftmp2); - /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */ - - /* y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) - - z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ - widefelem_diff(tmp2, tmp); - /* tmp2[i] < 2^118 + 2^120 < 2^121 */ - felem_reduce(y_out, tmp2); - - /* the result (x_out, y_out, z_out) is incorrect if one of the inputs is - * the point at infinity, so we need to check for this separately */ - - /* if point 1 is at infinity, copy point 2 to output, and vice versa */ - copy_conditional(x_out, x2, z1_is_zero); - copy_conditional(x_out, x1, z2_is_zero); - copy_conditional(y_out, y2, z1_is_zero); - copy_conditional(y_out, y1, z2_is_zero); - copy_conditional(z_out, z2, z1_is_zero); - copy_conditional(z_out, z1, z2_is_zero); - felem_assign(x3, x_out); - felem_assign(y3, y_out); - felem_assign(z3, z_out); -} - -/* select_point selects the |idx|th point from a precomputation table and - * copies it to out. */ -static void select_point(const u64 idx, size_t size, - const felem pre_comp[/*size*/][3], felem out[3]) { - limb *outlimbs = &out[0][0]; - OPENSSL_memset(outlimbs, 0, 3 * sizeof(felem)); - - for (size_t i = 0; i < size; i++) { - const limb *inlimbs = &pre_comp[i][0][0]; - u64 mask = i ^ idx; - mask |= mask >> 4; - mask |= mask >> 2; - mask |= mask >> 1; - mask &= 1; - mask--; - for (size_t j = 0; j < 4 * 3; j++) { - outlimbs[j] |= inlimbs[j] & mask; - } - } -} - -/* get_bit returns the |i|th bit in |in| */ -static char get_bit(const felem_bytearray in, size_t i) { - if (i >= 224) { - return 0; - } - return (in[i >> 3] >> (i & 7)) & 1; -} - -/* Interleaved point multiplication using precomputed point multiples: - * The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars - * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple - * of the generator, using certain (large) precomputed multiples in g_pre_comp. - * Output point (X, Y, Z) is stored in x_out, y_out, z_out */ -static void batch_mul(felem x_out, felem y_out, felem z_out, const u8 *p_scalar, - const u8 *g_scalar, const felem p_pre_comp[17][3]) { - felem nq[3], tmp[4]; - u64 bits; - u8 sign, digit; - - /* set nq to the point at infinity */ - OPENSSL_memset(nq, 0, 3 * sizeof(felem)); - - /* Loop over both scalars msb-to-lsb, interleaving additions of multiples of - * the generator (two in each of the last 28 rounds) and additions of p (every - * 5th round). */ - int skip = 1; /* save two point operations in the first round */ - size_t i = p_scalar != NULL ? 220 : 27; - for (;;) { - /* double */ - if (!skip) { - point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); - } - - /* add multiples of the generator */ - if (g_scalar != NULL && i <= 27) { - /* first, look 28 bits upwards */ - bits = get_bit(g_scalar, i + 196) << 3; - bits |= get_bit(g_scalar, i + 140) << 2; - bits |= get_bit(g_scalar, i + 84) << 1; - bits |= get_bit(g_scalar, i + 28); - /* select the point to add, in constant time */ - select_point(bits, 16, g_pre_comp[1], tmp); - - if (!skip) { - point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, - tmp[0], tmp[1], tmp[2]); - } else { - OPENSSL_memcpy(nq, tmp, 3 * sizeof(felem)); - skip = 0; - } - - /* second, look at the current position */ - bits = get_bit(g_scalar, i + 168) << 3; - bits |= get_bit(g_scalar, i + 112) << 2; - bits |= get_bit(g_scalar, i + 56) << 1; - bits |= get_bit(g_scalar, i); - /* select the point to add, in constant time */ - select_point(bits, 16, g_pre_comp[0], tmp); - point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], - tmp[1], tmp[2]); - } - - /* do other additions every 5 doublings */ - if (p_scalar != NULL && i % 5 == 0) { - bits = get_bit(p_scalar, i + 4) << 5; - bits |= get_bit(p_scalar, i + 3) << 4; - bits |= get_bit(p_scalar, i + 2) << 3; - bits |= get_bit(p_scalar, i + 1) << 2; - bits |= get_bit(p_scalar, i) << 1; - bits |= get_bit(p_scalar, i - 1); - ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); - - /* select the point to add or subtract */ - select_point(digit, 17, p_pre_comp, tmp); - felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative point */ - copy_conditional(tmp[1], tmp[3], sign); - - if (!skip) { - point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 0 /* mixed */, - tmp[0], tmp[1], tmp[2]); - } else { - OPENSSL_memcpy(nq, tmp, 3 * sizeof(felem)); - skip = 0; - } - } - - if (i == 0) { - break; - } - --i; - } - felem_assign(x_out, nq[0]); - felem_assign(y_out, nq[1]); - felem_assign(z_out, nq[2]); -} - -/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns - * (X', Y') = (X/Z^2, Y/Z^3) */ -static int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group, - const EC_POINT *point, - BIGNUM *x, BIGNUM *y, - BN_CTX *ctx) { - felem z1, z2, x_in, y_in, x_out, y_out; - widefelem tmp; - - if (EC_POINT_is_at_infinity(group, point)) { - OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); - return 0; - } - - if (!BN_to_felem(x_in, &point->X) || - !BN_to_felem(y_in, &point->Y) || - !BN_to_felem(z1, &point->Z)) { - return 0; - } - - felem_inv(z2, z1); - felem_square(tmp, z2); - felem_reduce(z1, tmp); - felem_mul(tmp, x_in, z1); - felem_reduce(x_in, tmp); - felem_contract(x_out, x_in); - if (x != NULL && !felem_to_BN(x, x_out)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - return 0; - } - - felem_mul(tmp, z1, z2); - felem_reduce(z1, tmp); - felem_mul(tmp, y_in, z1); - felem_reduce(y_in, tmp); - felem_contract(y_out, y_in); - if (y != NULL && !felem_to_BN(y, y_out)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - return 0; - } - - return 1; -} - -static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, - const BIGNUM *g_scalar, const EC_POINT *p, - const BIGNUM *p_scalar, BN_CTX *ctx) { - int ret = 0; - BN_CTX *new_ctx = NULL; - BIGNUM *x, *y, *z, *tmp_scalar; - felem_bytearray g_secret, p_secret; - felem p_pre_comp[17][3]; - felem_bytearray tmp; - felem x_in, y_in, z_in, x_out, y_out, z_out; - - if (ctx == NULL) { - ctx = BN_CTX_new(); - new_ctx = ctx; - if (ctx == NULL) { - return 0; - } - } - - BN_CTX_start(ctx); - if ((x = BN_CTX_get(ctx)) == NULL || - (y = BN_CTX_get(ctx)) == NULL || - (z = BN_CTX_get(ctx)) == NULL || - (tmp_scalar = BN_CTX_get(ctx)) == NULL) { - goto err; - } - - if (p != NULL && p_scalar != NULL) { - /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e., - * they contribute nothing to the linear combination. */ - OPENSSL_memset(&p_secret, 0, sizeof(p_secret)); - OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); - size_t num_bytes; - /* reduce g_scalar to 0 <= g_scalar < 2^224 */ - if (BN_num_bits(p_scalar) > 224 || BN_is_negative(p_scalar)) { - /* this is an unusual input, and we don't guarantee - * constant-timeness */ - if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - num_bytes = BN_bn2bin(tmp_scalar, tmp); - } else { - num_bytes = BN_bn2bin(p_scalar, tmp); - } - - flip_endian(p_secret, tmp, num_bytes); - /* precompute multiples */ - if (!BN_to_felem(x_out, &p->X) || - !BN_to_felem(y_out, &p->Y) || - !BN_to_felem(z_out, &p->Z)) { - goto err; - } - - felem_assign(p_pre_comp[1][0], x_out); - felem_assign(p_pre_comp[1][1], y_out); - felem_assign(p_pre_comp[1][2], z_out); - - for (size_t j = 2; j <= 16; ++j) { - if (j & 1) { - point_add(p_pre_comp[j][0], p_pre_comp[j][1], p_pre_comp[j][2], - p_pre_comp[1][0], p_pre_comp[1][1], p_pre_comp[1][2], - 0, p_pre_comp[j - 1][0], p_pre_comp[j - 1][1], - p_pre_comp[j - 1][2]); - } else { - point_double(p_pre_comp[j][0], p_pre_comp[j][1], - p_pre_comp[j][2], p_pre_comp[j / 2][0], - p_pre_comp[j / 2][1], p_pre_comp[j / 2][2]); - } - } - } - - if (g_scalar != NULL) { - OPENSSL_memset(g_secret, 0, sizeof(g_secret)); - size_t num_bytes; - /* reduce g_scalar to 0 <= g_scalar < 2^224 */ - if (BN_num_bits(g_scalar) > 224 || BN_is_negative(g_scalar)) { - /* this is an unusual input, and we don't guarantee constant-timeness */ - if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - num_bytes = BN_bn2bin(tmp_scalar, tmp); - } else { - num_bytes = BN_bn2bin(g_scalar, tmp); - } - - flip_endian(g_secret, tmp, num_bytes); - } - batch_mul(x_out, y_out, z_out, - (p != NULL && p_scalar != NULL) ? p_secret : NULL, - g_scalar != NULL ? g_secret : NULL, (const felem(*)[3])p_pre_comp); - - /* reduce the output to its unique minimal representation */ - felem_contract(x_in, x_out); - felem_contract(y_in, y_out); - felem_contract(z_in, z_out); - if (!felem_to_BN(x, x_in) || - !felem_to_BN(y, y_in) || - !felem_to_BN(z, z_in)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - ret = ec_point_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx); - -err: - BN_CTX_end(ctx); - BN_CTX_free(new_ctx); - return ret; -} - -const EC_METHOD EC_GFp_nistp224_method = { - ec_GFp_simple_group_init, - ec_GFp_simple_group_finish, - ec_GFp_simple_group_copy, - ec_GFp_simple_group_set_curve, - ec_GFp_nistp224_point_get_affine_coordinates, - ec_GFp_nistp224_points_mul, - ec_GFp_simple_field_mul, - ec_GFp_simple_field_sqr, - NULL /* field_encode */, - NULL /* field_decode */, -}; - -#endif /* 64_BIT && !WINDOWS && !SMALL */ diff --git a/Sources/BoringSSL/crypto/ec/util-64.c b/Sources/BoringSSL/crypto/ec/util-64.c deleted file mode 100644 index 400627125..000000000 --- a/Sources/BoringSSL/crypto/ec/util-64.c +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - - -#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) - -#include - -#include "internal.h" - -/* This function looks at 5+1 scalar bits (5 current, 1 adjacent less - * significant bit), and recodes them into a signed digit for use in fast point - * multiplication: the use of signed rather than unsigned digits means that - * fewer points need to be precomputed, given that point inversion is easy (a - * precomputed point dP makes -dP available as well). - * - * BACKGROUND: - * - * Signed digits for multiplication were introduced by Booth ("A signed binary - * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, - * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. - * Booth's original encoding did not generally improve the density of nonzero - * digits over the binary representation, and was merely meant to simplify the - * handling of signed factors given in two's complement; but it has since been - * shown to be the basis of various signed-digit representations that do have - * further advantages, including the wNAF, using the following general - * approach: - * - * (1) Given a binary representation - * - * b_k ... b_2 b_1 b_0, - * - * of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 - * by using bit-wise subtraction as follows: - * - * b_k b_(k-1) ... b_2 b_1 b_0 - * - b_k ... b_3 b_2 b_1 b_0 - * ------------------------------------- - * s_k b_(k-1) ... s_3 s_2 s_1 s_0 - * - * A left-shift followed by subtraction of the original value yields a new - * representation of the same value, using signed bits s_i = b_(i+1) - b_i. - * This representation from Booth's paper has since appeared in the - * literature under a variety of different names including "reversed binary - * form", "alternating greedy expansion", "mutual opposite form", and - * "sign-alternating {+-1}-representation". - * - * An interesting property is that among the nonzero bits, values 1 and -1 - * strictly alternate. - * - * (2) Various window schemes can be applied to the Booth representation of - * integers: for example, right-to-left sliding windows yield the wNAF - * (a signed-digit encoding independently discovered by various researchers - * in the 1990s), and left-to-right sliding windows yield a left-to-right - * equivalent of the wNAF (independently discovered by various researchers - * around 2004). - * - * To prevent leaking information through side channels in point multiplication, - * we need to recode the given integer into a regular pattern: sliding windows - * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few - * decades older: we'll be using the so-called "modified Booth encoding" due to - * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 - * (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five - * signed bits into a signed digit: - * - * s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j) - * - * The sign-alternating property implies that the resulting digit values are - * integers from -16 to 16. - * - * Of course, we don't actually need to compute the signed digits s_i as an - * intermediate step (that's just a nice way to see how this scheme relates - * to the wNAF): a direct computation obtains the recoded digit from the - * six bits b_(4j + 4) ... b_(4j - 1). - * - * This function takes those five bits as an integer (0 .. 63), writing the - * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute - * value, in the range 0 .. 8). Note that this integer essentially provides the - * input bits "shifted to the left" by one position: for example, the input to - * compute the least significant recoded digit, given that there's no bit b_-1, - * has to be b_4 b_3 b_2 b_1 b_0 0. */ -void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, - uint8_t in) { - uint8_t s, d; - - s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as - * 6-bit value */ - d = (1 << 6) - in - 1; - d = (d & s) | (in & ~s); - d = (d >> 1) + (d & 1); - - *sign = s & 1; - *digit = d; -} - -#endif /* 64_BIT && !WINDOWS */ diff --git a/Sources/BoringSSL/crypto/ec/ec_asn1.c b/Sources/BoringSSL/crypto/ec_extra/ec_asn1.c similarity index 76% rename from Sources/BoringSSL/crypto/ec/ec_asn1.c rename to Sources/BoringSSL/crypto/ec_extra/ec_asn1.c index 35c8f2771..c125af2b4 100644 --- a/Sources/BoringSSL/crypto/ec/ec_asn1.c +++ b/Sources/BoringSSL/crypto/ec_extra/ec_asn1.c @@ -62,14 +62,14 @@ #include #include -#include "internal.h" +#include "../fipsmodule/ec/internal.h" #include "../bytestring/internal.h" #include "../internal.h" -static const uint8_t kParametersTag = +static const unsigned kParametersTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0; -static const uint8_t kPublicKeyTag = +static const unsigned kPublicKeyTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1; EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { @@ -83,14 +83,14 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { return NULL; } - /* Parse the optional parameters field. */ + // Parse the optional parameters field. EC_GROUP *inner_group = NULL; EC_KEY *ret = NULL; if (CBS_peek_asn1_tag(&ec_private_key, kParametersTag)) { - /* Per SEC 1, as an alternative to omitting it, one is allowed to specify - * this field and put in a NULL to mean inheriting this value. This was - * omitted in a previous version of this logic without problems, so leave it - * unimplemented. */ + // Per SEC 1, as an alternative to omitting it, one is allowed to specify + // this field and put in a NULL to mean inheriting this value. This was + // omitted in a previous version of this logic without problems, so leave it + // unimplemented. CBS child; if (!CBS_get_asn1(&ec_private_key, &child, kParametersTag)) { OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR); @@ -103,7 +103,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { if (group == NULL) { group = inner_group; } else if (EC_GROUP_cmp(group, inner_group, NULL) != 0) { - /* If a group was supplied externally, it must match. */ + // If a group was supplied externally, it must match. OPENSSL_PUT_ERROR(EC, EC_R_GROUP_MISMATCH); goto err; } @@ -123,9 +123,9 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Although RFC 5915 specifies the length of the key, OpenSSL historically - * got this wrong, so accept any length. See upstream's - * 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. */ + // Although RFC 5915 specifies the length of the key, OpenSSL historically + // got this wrong, so accept any length. See upstream's + // 30cd4ff294252c4b6a4b69cbef6a5b4117705d22. ret->priv_key = BN_bin2bn(CBS_data(&private_key), CBS_len(&private_key), NULL); ret->pub_key = EC_POINT_new(group); @@ -143,12 +143,12 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { uint8_t padding; if (!CBS_get_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBS_get_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || - /* As in a SubjectPublicKeyInfo, the byte-encoded public key is then - * encoded as a BIT STRING with bits ordered as in the DER encoding. */ + // As in a SubjectPublicKeyInfo, the byte-encoded public key is then + // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBS_get_u8(&public_key, &padding) || padding != 0 || - /* Explicitly check |public_key| is non-empty to save the conversion - * form later. */ + // Explicitly check |public_key| is non-empty to save the conversion + // form later. CBS_len(&public_key) == 0 || !EC_POINT_oct2point(group, ret->pub_key, CBS_data(&public_key), CBS_len(&public_key), NULL) || @@ -157,16 +157,17 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Save the point conversion form. - * TODO(davidben): Consider removing this. */ - ret->conv_form = (point_conversion_form_t)(CBS_data(&public_key)[0] & ~0x01); + // Save the point conversion form. + // TODO(davidben): Consider removing this. + ret->conv_form = + (point_conversion_form_t)(CBS_data(&public_key)[0] & ~0x01); } else { - /* Compute the public key instead. */ + // Compute the public key instead. if (!EC_POINT_mul(group, ret->pub_key, ret->priv_key, NULL, NULL, NULL)) { goto err; } - /* Remember the original private-key-only encoding. - * TODO(davidben): Consider removing this. */ + // Remember the original private-key-only encoding. + // TODO(davidben): Consider removing this. ret->enc_flag |= EC_PKEY_NO_PUBKEY; } @@ -175,7 +176,7 @@ EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group) { goto err; } - /* Ensure the resulting key is valid. */ + // Ensure the resulting key is valid. if (!EC_KEY_check_key(ret)) { goto err; } @@ -217,13 +218,13 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, } } - /* TODO(fork): replace this flexibility with sensible default? */ + // TODO(fork): replace this flexibility with sensible default? if (!(enc_flags & EC_PKEY_NO_PUBKEY) && key->pub_key != NULL) { CBB child, public_key; if (!CBB_add_asn1(&ec_private_key, &child, kPublicKeyTag) || !CBB_add_asn1(&child, &public_key, CBS_ASN1_BITSTRING) || - /* As in a SubjectPublicKeyInfo, the byte-encoded public key is then - * encoded as a BIT STRING with bits ordered as in the DER encoding. */ + // As in a SubjectPublicKeyInfo, the byte-encoded public key is then + // encoded as a BIT STRING with bits ordered as in the DER encoding. !CBB_add_u8(&public_key, 0 /* padding */) || !EC_POINT_point2cbb(&public_key, key->group, key->pub_key, key->conv_form, NULL) || @@ -241,8 +242,8 @@ int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, return 1; } -/* is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and - * zero otherwise. */ +// is_unsigned_integer returns one if |cbs| is a valid unsigned DER INTEGER and +// zero otherwise. static int is_unsigned_integer(const CBS *cbs) { if (CBS_len(cbs) == 0) { return 0; @@ -250,20 +251,20 @@ static int is_unsigned_integer(const CBS *cbs) { uint8_t byte = CBS_data(cbs)[0]; if ((byte & 0x80) || (byte == 0 && CBS_len(cbs) > 1 && (CBS_data(cbs)[1] & 0x80) == 0)) { - /* Negative or not minimally-encoded. */ + // Negative or not minimally-encoded. return 0; } return 1; } -/* kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. */ +// kPrimeFieldOID is the encoding of 1.2.840.10045.1.1. static const uint8_t kPrimeField[] = {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x01, 0x01}; static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, CBS *out_b, CBS *out_base_x, CBS *out_base_y, CBS *out_order) { - /* See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an - * ECParameters while RFC 5480 calls it a SpecifiedECDomain. */ + // See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an + // ECParameters while RFC 5480 calls it a SpecifiedECDomain. CBS params, field_id, field_type, curve, base; uint64_t version; if (!CBS_get_asn1(in, ¶ms, CBS_ASN1_SEQUENCE) || @@ -279,7 +280,7 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, !CBS_get_asn1(¶ms, &curve, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&curve, out_a, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(&curve, out_b, CBS_ASN1_OCTETSTRING) || - /* |curve| has an optional BIT STRING seed which we ignore. */ + // |curve| has an optional BIT STRING seed which we ignore. !CBS_get_asn1(¶ms, &base, CBS_ASN1_OCTETSTRING) || !CBS_get_asn1(¶ms, out_order, CBS_ASN1_INTEGER) || !is_unsigned_integer(out_order)) { @@ -287,11 +288,11 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, return 0; } - /* |params| has an optional cofactor which we ignore. With the optional seed - * in |curve|, a group already has arbitrarily many encodings. Parse enough to - * uniquely determine the curve. */ + // |params| has an optional cofactor which we ignore. With the optional seed + // in |curve|, a group already has arbitrarily many encodings. Parse enough to + // uniquely determine the curve. - /* Require that the base point use uncompressed form. */ + // Require that the base point use uncompressed form. uint8_t form; if (!CBS_get_u8(&base, &form) || form != POINT_CONVERSION_UNCOMPRESSED) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FORM); @@ -309,10 +310,10 @@ static int parse_explicit_prime_curve(CBS *in, CBS *out_prime, CBS *out_a, return 1; } -/* integers_equal returns one if |a| and |b| are equal, up to leading zeros, and - * zero otherwise. */ +// integers_equal returns one if |a| and |b| are equal, up to leading zeros, and +// zero otherwise. static int integers_equal(const CBS *a, const uint8_t *b, size_t b_len) { - /* Remove leading zeros from |a| and |b|. */ + // Remove leading zeros from |a| and |b|. CBS a_copy = *a; while (CBS_len(&a_copy) > 0 && CBS_data(&a_copy)[0] == 0) { CBS_skip(&a_copy, 1); @@ -331,12 +332,13 @@ EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs) { return NULL; } - /* Look for a matching curve. */ - unsigned i; - for (i = 0; OPENSSL_built_in_curves[i].nid != NID_undef; i++) { - const struct built_in_curve *curve = &OPENSSL_built_in_curves[i]; + // Look for a matching curve. + const struct built_in_curves *const curves = OPENSSL_built_in_curves(); + for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { + const struct built_in_curve *curve = &curves->curves[i]; if (CBS_len(&named_curve) == curve->oid_len && - OPENSSL_memcmp(CBS_data(&named_curve), curve->oid, curve->oid_len) == 0) { + OPENSSL_memcmp(CBS_data(&named_curve), curve->oid, curve->oid_len) == + 0) { return EC_GROUP_new_by_curve_name(curve->nid); } } @@ -352,9 +354,9 @@ int EC_KEY_marshal_curve_name(CBB *cbb, const EC_GROUP *group) { return 0; } - unsigned i; - for (i = 0; OPENSSL_built_in_curves[i].nid != NID_undef; i++) { - const struct built_in_curve *curve = &OPENSSL_built_in_curves[i]; + const struct built_in_curves *const curves = OPENSSL_built_in_curves(); + for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { + const struct built_in_curve *curve = &curves->curves[i]; if (curve->nid == nid) { CBB child; return CBB_add_asn1(cbb, &child, CBS_ASN1_OBJECT) && @@ -372,32 +374,32 @@ EC_GROUP *EC_KEY_parse_parameters(CBS *cbs) { return EC_KEY_parse_curve_name(cbs); } - /* OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions - * of named curves. - * - * TODO(davidben): Remove support for this. */ + // OpenSSL sometimes produces ECPrivateKeys with explicitly-encoded versions + // of named curves. + // + // TODO(davidben): Remove support for this. CBS prime, a, b, base_x, base_y, order; if (!parse_explicit_prime_curve(cbs, &prime, &a, &b, &base_x, &base_y, &order)) { return NULL; } - /* Look for a matching prime curve. */ - unsigned i; - for (i = 0; OPENSSL_built_in_curves[i].nid != NID_undef; i++) { - const struct built_in_curve *curve = &OPENSSL_built_in_curves[i]; - const unsigned param_len = curve->data->param_len; - /* |curve->data->data| is ordered p, a, b, x, y, order, each component - * zero-padded up to the field length. Although SEC 1 states that the - * Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes - * |a| and |b|, so this comparison must allow omitting leading zeros. (This - * is relevant for P-521 whose |b| has a leading 0.) */ - if (integers_equal(&prime, curve->data->data, param_len) && - integers_equal(&a, curve->data->data + param_len, param_len) && - integers_equal(&b, curve->data->data + param_len * 2, param_len) && - integers_equal(&base_x, curve->data->data + param_len * 3, param_len) && - integers_equal(&base_y, curve->data->data + param_len * 4, param_len) && - integers_equal(&order, curve->data->data + param_len * 5, param_len)) { + // Look for a matching prime curve. + const struct built_in_curves *const curves = OPENSSL_built_in_curves(); + for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { + const struct built_in_curve *curve = &curves->curves[i]; + const unsigned param_len = curve->param_len; + // |curve->params| is ordered p, a, b, x, y, order, each component + // zero-padded up to the field length. Although SEC 1 states that the + // Field-Element-to-Octet-String conversion also pads, OpenSSL mis-encodes + // |a| and |b|, so this comparison must allow omitting leading zeros. (This + // is relevant for P-521 whose |b| has a leading 0.) + if (integers_equal(&prime, curve->params, param_len) && + integers_equal(&a, curve->params + param_len, param_len) && + integers_equal(&b, curve->params + param_len * 2, param_len) && + integers_equal(&base_x, curve->params + param_len * 3, param_len) && + integers_equal(&base_y, curve->params + param_len * 4, param_len) && + integers_equal(&order, curve->params + param_len * 5, param_len)) { return EC_GROUP_new_by_curve_name(curve->nid); } } @@ -406,9 +408,20 @@ EC_GROUP *EC_KEY_parse_parameters(CBS *cbs) { return NULL; } +int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point, + point_conversion_form_t form, BN_CTX *ctx) { + size_t len = EC_POINT_point2oct(group, point, form, NULL, 0, ctx); + if (len == 0) { + return 0; + } + uint8_t *p; + return CBB_add_space(out, &p, len) && + EC_POINT_point2oct(group, point, form, p, len, ctx) == len; +} + EC_KEY *d2i_ECPrivateKey(EC_KEY **out, const uint8_t **inp, long len) { - /* This function treats its |out| parameter differently from other |d2i| - * functions. If supplied, take the group from |*out|. */ + // This function treats its |out| parameter differently from other |d2i| + // functions. If supplied, take the group from |*out|. const EC_GROUP *group = NULL; if (out != NULL && *out != NULL) { group = EC_KEY_get0_group(*out); @@ -502,7 +515,7 @@ EC_KEY *o2i_ECPublicKey(EC_KEY **keyp, const uint8_t **inp, long len) { OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); return NULL; } - /* save the point conversion form */ + // save the point conversion form ret->conv_form = (point_conversion_form_t)(*inp[0] & ~0x01); *inp += len; return ret; @@ -521,7 +534,7 @@ int i2o_ECPublicKey(const EC_KEY *key, uint8_t **outp) { 0, NULL); if (outp == NULL || buf_len == 0) { - /* out == NULL => just return the length of the octet string */ + // out == NULL => just return the length of the octet string return buf_len; } diff --git a/Sources/BoringSSL/crypto/ecdh/ecdh.c b/Sources/BoringSSL/crypto/ecdh/ecdh.c index 22b216ef4..f38de2f1f 100644 --- a/Sources/BoringSSL/crypto/ecdh/ecdh.c +++ b/Sources/BoringSSL/crypto/ecdh/ecdh.c @@ -138,7 +138,7 @@ int ECDH_compute_key(void *out, size_t outlen, const EC_POINT *pub_key, goto err; } } else { - /* no KDF, just copy as much as we can */ + // no KDF, just copy as much as we can if (buflen < outlen) { outlen = buflen; } diff --git a/Sources/BoringSSL/crypto/ecdsa/ecdsa.c b/Sources/BoringSSL/crypto/ecdsa/ecdsa.c deleted file mode 100644 index e1a0525fc..000000000 --- a/Sources/BoringSSL/crypto/ecdsa/ecdsa.c +++ /dev/null @@ -1,479 +0,0 @@ -/* ==================================================================== - * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). */ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "../bn/internal.h" -#include "../ec/internal.h" -#include "../internal.h" - - -int ECDSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *sig, - unsigned int *sig_len, const EC_KEY *eckey) { - if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { - return eckey->ecdsa_meth->sign(digest, digest_len, sig, sig_len, - (EC_KEY*) eckey /* cast away const */); - } - - return ECDSA_sign_ex(type, digest, digest_len, sig, sig_len, NULL, NULL, - eckey); -} - -int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, - const uint8_t *sig, size_t sig_len, const EC_KEY *eckey) { - ECDSA_SIG *s; - int ret = 0; - uint8_t *der = NULL; - - /* Decode the ECDSA signature. */ - s = ECDSA_SIG_from_bytes(sig, sig_len); - if (s == NULL) { - goto err; - } - - /* Defend against potential laxness in the DER parser. */ - size_t der_len; - if (!ECDSA_SIG_to_bytes(&der, &der_len, s) || - der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len) != 0) { - /* This should never happen. crypto/bytestring is strictly DER. */ - OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR); - goto err; - } - - ret = ECDSA_do_verify(digest, digest_len, s, eckey); - -err: - OPENSSL_free(der); - ECDSA_SIG_free(s); - return ret; -} - -/* digest_to_bn interprets |digest_len| bytes from |digest| as a big-endian - * number and sets |out| to that value. It then truncates |out| so that it's, - * at most, as long as |order|. It returns one on success and zero otherwise. */ -static int digest_to_bn(BIGNUM *out, const uint8_t *digest, size_t digest_len, - const BIGNUM *order) { - size_t num_bits; - - num_bits = BN_num_bits(order); - /* Need to truncate digest if it is too long: first truncate whole - * bytes. */ - if (8 * digest_len > num_bits) { - digest_len = (num_bits + 7) / 8; - } - if (!BN_bin2bn(digest, digest_len, out)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - return 0; - } - - /* If still too long truncate remaining bits with a shift */ - if ((8 * digest_len > num_bits) && - !BN_rshift(out, out, 8 - (num_bits & 0x7))) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - return 0; - } - - return 1; -} - -ECDSA_SIG *ECDSA_do_sign(const uint8_t *digest, size_t digest_len, - const EC_KEY *key) { - return ECDSA_do_sign_ex(digest, digest_len, NULL, NULL, key); -} - -int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, - const ECDSA_SIG *sig, const EC_KEY *eckey) { - int ret = 0; - BN_CTX *ctx; - BIGNUM *u1, *u2, *m, *X; - EC_POINT *point = NULL; - const EC_GROUP *group; - const EC_POINT *pub_key; - - /* check input values */ - if ((group = EC_KEY_get0_group(eckey)) == NULL || - (pub_key = EC_KEY_get0_public_key(eckey)) == NULL || - sig == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_MISSING_PARAMETERS); - return 0; - } - - ctx = BN_CTX_new(); - if (!ctx) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - return 0; - } - BN_CTX_start(ctx); - u1 = BN_CTX_get(ctx); - u2 = BN_CTX_get(ctx); - m = BN_CTX_get(ctx); - X = BN_CTX_get(ctx); - if (u1 == NULL || u2 == NULL || m == NULL || X == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - - const BIGNUM *order = EC_GROUP_get0_order(group); - if (BN_is_zero(sig->r) || BN_is_negative(sig->r) || - BN_ucmp(sig->r, order) >= 0 || BN_is_zero(sig->s) || - BN_is_negative(sig->s) || BN_ucmp(sig->s, order) >= 0) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); - ret = 0; /* signature is invalid */ - goto err; - } - /* calculate tmp1 = inv(S) mod order */ - int no_inverse; - if (!BN_mod_inverse_odd(u2, &no_inverse, sig->s, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - if (!digest_to_bn(m, digest, digest_len, order)) { - goto err; - } - /* u1 = m * tmp mod order */ - if (!BN_mod_mul(u1, m, u2, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - /* u2 = r * w mod q */ - if (!BN_mod_mul(u2, sig->r, u2, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - - point = EC_POINT_new(group); - if (point == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - goto err; - } - if (!EC_POINT_mul(group, point, u1, pub_key, u2, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); - goto err; - } - if (!EC_POINT_get_affine_coordinates_GFp(group, point, X, NULL, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); - goto err; - } - if (!BN_nnmod(u1, X, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - /* if the signature is correct u1 is equal to sig->r */ - ret = (BN_ucmp(u1, sig->r) == 0); - -err: - BN_CTX_end(ctx); - BN_CTX_free(ctx); - EC_POINT_free(point); - return ret; -} - -static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx_in, BIGNUM **kinvp, - BIGNUM **rp, const uint8_t *digest, - size_t digest_len) { - BN_CTX *ctx = NULL; - BIGNUM *k = NULL, *r = NULL, *tmp = NULL; - EC_POINT *tmp_point = NULL; - const EC_GROUP *group; - int ret = 0; - - if (eckey == NULL || (group = EC_KEY_get0_group(eckey)) == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - if (ctx_in == NULL) { - if ((ctx = BN_CTX_new()) == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - return 0; - } - } else { - ctx = ctx_in; - } - - k = BN_new(); /* this value is later returned in *kinvp */ - r = BN_new(); /* this value is later returned in *rp */ - tmp = BN_new(); - if (k == NULL || r == NULL || tmp == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - goto err; - } - tmp_point = EC_POINT_new(group); - if (tmp_point == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); - goto err; - } - - const BIGNUM *order = EC_GROUP_get0_order(group); - - do { - /* If possible, we'll include the private key and message digest in the k - * generation. The |digest| argument is only empty if |ECDSA_sign_setup| is - * being used. */ - if (digest_len > 0) { - do { - if (!BN_generate_dsa_nonce(k, order, EC_KEY_get0_private_key(eckey), - digest, digest_len, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED); - goto err; - } - } while (BN_is_zero(k)); - } else if (!BN_rand_range_ex(k, 1, order)) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED); - goto err; - } - - /* We do not want timing information to leak the length of k, - * so we compute G*k using an equivalent scalar of fixed - * bit-length. */ - - if (!BN_add(k, k, order)) { - goto err; - } - if (BN_num_bits(k) <= BN_num_bits(order)) { - if (!BN_add(k, k, order)) { - goto err; - } - } - - /* compute r the x-coordinate of generator * k */ - if (!EC_POINT_mul(group, tmp_point, k, NULL, NULL, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); - goto err; - } - if (!EC_POINT_get_affine_coordinates_GFp(group, tmp_point, tmp, NULL, - ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); - goto err; - } - - if (!BN_nnmod(r, tmp, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - } while (BN_is_zero(r)); - - /* Compute the inverse of k. The order is a prime, so use Fermat's Little - * Theorem. Note |ec_group_get_mont_data| may return NULL but - * |bn_mod_inverse_prime| allows this. */ - if (!bn_mod_inverse_prime(k, k, order, ctx, ec_group_get_mont_data(group))) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - /* clear old values if necessary */ - BN_clear_free(*rp); - BN_clear_free(*kinvp); - - /* save the pre-computed values */ - *rp = r; - *kinvp = k; - ret = 1; - -err: - if (!ret) { - BN_clear_free(k); - BN_clear_free(r); - } - if (ctx_in == NULL) { - BN_CTX_free(ctx); - } - EC_POINT_free(tmp_point); - BN_clear_free(tmp); - return ret; -} - -int ECDSA_sign_setup(const EC_KEY *eckey, BN_CTX *ctx, BIGNUM **kinv, - BIGNUM **rp) { - return ecdsa_sign_setup(eckey, ctx, kinv, rp, NULL, 0); -} - -ECDSA_SIG *ECDSA_do_sign_ex(const uint8_t *digest, size_t digest_len, - const BIGNUM *in_kinv, const BIGNUM *in_r, - const EC_KEY *eckey) { - int ok = 0; - BIGNUM *kinv = NULL, *s, *m = NULL, *tmp = NULL; - const BIGNUM *ckinv; - BN_CTX *ctx = NULL; - const EC_GROUP *group; - ECDSA_SIG *ret; - const BIGNUM *priv_key; - - if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); - return NULL; - } - - group = EC_KEY_get0_group(eckey); - priv_key = EC_KEY_get0_private_key(eckey); - - if (group == NULL || priv_key == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); - return NULL; - } - - ret = ECDSA_SIG_new(); - if (!ret) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - return NULL; - } - s = ret->s; - - if ((ctx = BN_CTX_new()) == NULL || - (tmp = BN_new()) == NULL || - (m = BN_new()) == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - goto err; - } - - const BIGNUM *order = EC_GROUP_get0_order(group); - - if (!digest_to_bn(m, digest, digest_len, order)) { - goto err; - } - for (;;) { - if (in_kinv == NULL || in_r == NULL) { - if (!ecdsa_sign_setup(eckey, ctx, &kinv, &ret->r, digest, digest_len)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_ECDSA_LIB); - goto err; - } - ckinv = kinv; - } else { - ckinv = in_kinv; - if (BN_copy(ret->r, in_r) == NULL) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); - goto err; - } - } - - if (!BN_mod_mul(tmp, priv_key, ret->r, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - if (!BN_mod_add_quick(s, tmp, m, order)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - if (!BN_mod_mul(s, s, ckinv, order, ctx)) { - OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); - goto err; - } - if (BN_is_zero(s)) { - /* if kinv and r have been supplied by the caller - * don't to generate new kinv and r values */ - if (in_kinv != NULL && in_r != NULL) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NEED_NEW_SETUP_VALUES); - goto err; - } - } else { - /* s != 0 => we have a valid signature */ - break; - } - } - - ok = 1; - -err: - if (!ok) { - ECDSA_SIG_free(ret); - ret = NULL; - } - BN_CTX_free(ctx); - BN_clear_free(m); - BN_clear_free(tmp); - BN_clear_free(kinv); - return ret; -} - -int ECDSA_sign_ex(int type, const uint8_t *digest, size_t digest_len, - uint8_t *sig, unsigned int *sig_len, const BIGNUM *kinv, - const BIGNUM *r, const EC_KEY *eckey) { - int ret = 0; - ECDSA_SIG *s = NULL; - - if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); - *sig_len = 0; - goto err; - } - - s = ECDSA_do_sign_ex(digest, digest_len, kinv, r, eckey); - if (s == NULL) { - *sig_len = 0; - goto err; - } - - CBB cbb; - CBB_zero(&cbb); - size_t len; - if (!CBB_init_fixed(&cbb, sig, ECDSA_size(eckey)) || - !ECDSA_SIG_marshal(&cbb, s) || - !CBB_finish(&cbb, NULL, &len)) { - OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_ENCODE_ERROR); - CBB_cleanup(&cbb); - *sig_len = 0; - goto err; - } - *sig_len = (unsigned)len; - ret = 1; - -err: - ECDSA_SIG_free(s); - return ret; -} diff --git a/Sources/BoringSSL/crypto/ecdsa/ecdsa_asn1.c b/Sources/BoringSSL/crypto/ecdsa_extra/ecdsa_asn1.c similarity index 74% rename from Sources/BoringSSL/crypto/ecdsa/ecdsa_asn1.c rename to Sources/BoringSSL/crypto/ecdsa_extra/ecdsa_asn1.c index d41a53667..fbf4ccafe 100644 --- a/Sources/BoringSSL/crypto/ecdsa/ecdsa_asn1.c +++ b/Sources/BoringSSL/crypto/ecdsa_extra/ecdsa_asn1.c @@ -62,7 +62,79 @@ #include #include "../bytestring/internal.h" -#include "../ec/internal.h" +#include "../fipsmodule/ec/internal.h" +#include "../internal.h" + + +int ECDSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *sig, + unsigned int *sig_len, const EC_KEY *eckey) { + if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { + return eckey->ecdsa_meth->sign(digest, digest_len, sig, sig_len, + (EC_KEY*) eckey /* cast away const */); + } + + int ret = 0; + ECDSA_SIG *s = NULL; + + if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); + *sig_len = 0; + goto err; + } + + s = ECDSA_do_sign(digest, digest_len, eckey); + if (s == NULL) { + *sig_len = 0; + goto err; + } + + CBB cbb; + CBB_zero(&cbb); + size_t len; + if (!CBB_init_fixed(&cbb, sig, ECDSA_size(eckey)) || + !ECDSA_SIG_marshal(&cbb, s) || + !CBB_finish(&cbb, NULL, &len)) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_ENCODE_ERROR); + CBB_cleanup(&cbb); + *sig_len = 0; + goto err; + } + *sig_len = (unsigned)len; + ret = 1; + +err: + ECDSA_SIG_free(s); + return ret; +} + +int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, + const uint8_t *sig, size_t sig_len, const EC_KEY *eckey) { + ECDSA_SIG *s; + int ret = 0; + uint8_t *der = NULL; + + // Decode the ECDSA signature. + s = ECDSA_SIG_from_bytes(sig, sig_len); + if (s == NULL) { + goto err; + } + + // Defend against potential laxness in the DER parser. + size_t der_len; + if (!ECDSA_SIG_to_bytes(&der, &der_len, s) || + der_len != sig_len || OPENSSL_memcmp(sig, der, sig_len) != 0) { + // This should never happen. crypto/bytestring is strictly DER. + OPENSSL_PUT_ERROR(ECDSA, ERR_R_INTERNAL_ERROR); + goto err; + } + + ret = ECDSA_do_verify(digest, digest_len, s, eckey); + +err: + OPENSSL_free(der); + ECDSA_SIG_free(s); + return ret; +} size_t ECDSA_size(const EC_KEY *key) { @@ -85,30 +157,6 @@ size_t ECDSA_size(const EC_KEY *key) { return ECDSA_SIG_max_len(group_order_size); } -ECDSA_SIG *ECDSA_SIG_new(void) { - ECDSA_SIG *sig = OPENSSL_malloc(sizeof(ECDSA_SIG)); - if (sig == NULL) { - return NULL; - } - sig->r = BN_new(); - sig->s = BN_new(); - if (sig->r == NULL || sig->s == NULL) { - ECDSA_SIG_free(sig); - return NULL; - } - return sig; -} - -void ECDSA_SIG_free(ECDSA_SIG *sig) { - if (sig == NULL) { - return; - } - - BN_free(sig->r); - BN_free(sig->s); - OPENSSL_free(sig); -} - ECDSA_SIG *ECDSA_SIG_parse(CBS *cbs) { ECDSA_SIG *ret = ECDSA_SIG_new(); if (ret == NULL) { @@ -164,8 +212,8 @@ int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len, return 1; } -/* der_len_len returns the number of bytes needed to represent a length of |len| - * in DER. */ +// der_len_len returns the number of bytes needed to represent a length of |len| +// in DER. static size_t der_len_len(size_t len) { if (len < 0x80) { return 1; @@ -179,18 +227,18 @@ static size_t der_len_len(size_t len) { } size_t ECDSA_SIG_max_len(size_t order_len) { - /* Compute the maximum length of an |order_len| byte integer. Defensively - * assume that the leading 0x00 is included. */ + // Compute the maximum length of an |order_len| byte integer. Defensively + // assume that the leading 0x00 is included. size_t integer_len = 1 /* tag */ + der_len_len(order_len + 1) + 1 + order_len; if (integer_len < order_len) { return 0; } - /* An ECDSA signature is two INTEGERs. */ + // An ECDSA signature is two INTEGERs. size_t value_len = 2 * integer_len; if (value_len < integer_len) { return 0; } - /* Add the header. */ + // Add the header. size_t ret = 1 /* tag */ + der_len_len(value_len) + value_len; if (ret < value_len) { return 0; diff --git a/Sources/BoringSSL/crypto/engine/engine.c b/Sources/BoringSSL/crypto/engine/engine.c index 141ed230f..875f1480d 100644 --- a/Sources/BoringSSL/crypto/engine/engine.c +++ b/Sources/BoringSSL/crypto/engine/engine.c @@ -42,15 +42,15 @@ ENGINE *ENGINE_new(void) { } void ENGINE_free(ENGINE *engine) { - /* Methods are currently required to be static so are not unref'ed. */ + // Methods are currently required to be static so are not unref'ed. OPENSSL_free(engine); } -/* set_method takes a pointer to a method and its given size and sets - * |*out_member| to point to it. This function might want to be extended in the - * future to support making a copy of the method so that a stable ABI for - * ENGINEs can be supported. But, for the moment, all *_METHODS must be - * static. */ +// set_method takes a pointer to a method and its given size and sets +// |*out_member| to point to it. This function might want to be extended in the +// future to support making a copy of the method so that a stable ABI for +// ENGINEs can be supported. But, for the moment, all *_METHODS must be +// static. static int set_method(void **out_member, const void *method, size_t method_size, size_t compiled_size) { const struct openssl_method_common_st *common = method; diff --git a/Sources/BoringSSL/crypto/err/err.c b/Sources/BoringSSL/crypto/err/err.c index cbb1260e5..c7bff1680 100644 --- a/Sources/BoringSSL/crypto/err/err.c +++ b/Sources/BoringSSL/crypto/err/err.c @@ -123,32 +123,64 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #include #include "../internal.h" +#include "./internal.h" + + +struct err_error_st { + // file contains the filename where the error occurred. + const char *file; + // data contains a NUL-terminated string with optional data. It must be freed + // with |OPENSSL_free|. + char *data; + // packed contains the error library and reason, as packed by ERR_PACK. + uint32_t packed; + // line contains the line number where the error occurred. + uint16_t line; + // mark indicates a reversion point in the queue. See |ERR_pop_to_mark|. + unsigned mark : 1; +}; +// ERR_STATE contains the per-thread, error queue. +typedef struct err_state_st { + // errors contains the ERR_NUM_ERRORS most recent errors, organised as a ring + // buffer. + struct err_error_st errors[ERR_NUM_ERRORS]; + // top contains the index one past the most recent error. If |top| equals + // |bottom| then the queue is empty. + unsigned top; + // bottom contains the index of the last error in the queue. + unsigned bottom; + + // to_free, if not NULL, contains a pointer owned by this structure that was + // previously a |data| pointer of one of the elements of |errors|. + void *to_free; +} ERR_STATE; extern const uint32_t kOpenSSLReasonValues[]; extern const size_t kOpenSSLReasonValuesLen; extern const char kOpenSSLReasonStringData[]; -/* err_clear_data frees the optional |data| member of the given error. */ -static void err_clear_data(struct err_error_st *error) { - if ((error->flags & ERR_FLAG_MALLOCED) != 0) { - OPENSSL_free(error->data); - } - error->data = NULL; - error->flags &= ~ERR_FLAG_MALLOCED; -} - -/* err_clear clears the given queued error. */ +// err_clear clears the given queued error. static void err_clear(struct err_error_st *error) { - err_clear_data(error); + OPENSSL_free(error->data); OPENSSL_memset(error, 0, sizeof(struct err_error_st)); } -/* global_next_library contains the next custom library value to return. */ +static void err_copy(struct err_error_st *dst, const struct err_error_st *src) { + err_clear(dst); + dst->file = src->file; + if (src->data != NULL) { + dst->data = OPENSSL_strdup(src->data); + } + dst->packed = src->packed; + dst->line = src->line; +} + +// global_next_library contains the next custom library value to return. static int global_next_library = ERR_NUM_LIBS; -/* global_next_library_mutex protects |global_next_library| from concurrent - * updates. */ +// global_next_library_mutex protects |global_next_library| from concurrent +// updates. static struct CRYPTO_STATIC_MUTEX global_next_library_mutex = CRYPTO_STATIC_MUTEX_INIT; @@ -159,15 +191,14 @@ static void err_state_free(void *statep) { return; } - unsigned i; - for (i = 0; i < ERR_NUM_ERRORS; i++) { + for (unsigned i = 0; i < ERR_NUM_ERRORS; i++) { err_clear(&state->errors[i]); } OPENSSL_free(state->to_free); OPENSSL_free(state); } -/* err_get_state gets the ERR_STATE object for the current thread. */ +// err_get_state gets the ERR_STATE object for the current thread. static ERR_STATE *err_get_state(void) { ERR_STATE *state = CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_ERR); if (state == NULL) { @@ -199,7 +230,7 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line, if (top) { assert(!inc); - /* last error */ + // last error i = state->top; } else { i = (state->bottom + 1) % ERR_NUM_ERRORS; @@ -227,20 +258,19 @@ static uint32_t get_error_values(int inc, int top, const char **file, int *line, } else { *data = error->data; if (flags != NULL) { - *flags = error->flags & ERR_FLAG_PUBLIC_MASK; + *flags = ERR_FLAG_STRING; } - /* If this error is being removed, take ownership of data from - * the error. The semantics are such that the caller doesn't - * take ownership either. Instead the error system takes - * ownership and retains it until the next call that affects the - * error queue. */ + // If this error is being removed, take ownership of data from + // the error. The semantics are such that the caller doesn't + // take ownership either. Instead the error system takes + // ownership and retains it until the next call that affects the + // error queue. if (inc) { - if (error->flags & ERR_FLAG_MALLOCED) { + if (error->data != NULL) { OPENSSL_free(state->to_free); state->to_free = error->data; } error->data = NULL; - error->flags = 0; } } } @@ -342,13 +372,13 @@ char *ERR_error_string(uint32_t packed_error, char *ret) { static char buf[ERR_ERROR_STRING_BUF_LEN]; if (ret == NULL) { - /* TODO(fork): remove this. */ + // TODO(fork): remove this. ret = buf; } #if !defined(NDEBUG) - /* This is aimed to help catch callers who don't provide - * |ERR_ERROR_STRING_BUF_LEN| bytes of space. */ + // This is aimed to help catch callers who don't provide + // |ERR_ERROR_STRING_BUF_LEN| bytes of space. OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN); #endif @@ -386,15 +416,15 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { packed_error, lib_str, reason_str); if (strlen(buf) == len - 1) { - /* output may be truncated; make sure we always have 5 colon-separated - * fields, i.e. 4 colons. */ + // output may be truncated; make sure we always have 5 colon-separated + // fields, i.e. 4 colons. static const unsigned num_colons = 4; unsigned i; char *s = buf; if (len <= num_colons) { - /* In this situation it's not possible to ensure that the correct number - * of colons are included in the output. */ + // In this situation it's not possible to ensure that the correct number + // of colons are included in the output. return; } @@ -403,10 +433,10 @@ void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) { char *last_pos = &buf[len - 1] - num_colons + i; if (colon == NULL || colon > last_pos) { - /* set colon |i| at last possible position (buf[len-1] is the - * terminating 0). If we're setting this colon, then all whole of the - * rest of the string must be colons in order to have the correct - * number. */ + // set colon |i| at last possible position (buf[len-1] is the + // terminating 0). If we're setting this colon, then all whole of the + // rest of the string must be colons in order to have the correct + // number. OPENSSL_memset(last_pos, ':', num_colons - i); break; } @@ -431,25 +461,25 @@ static int err_string_cmp(const void *a, const void *b) { } } -/* err_string_lookup looks up the string associated with |lib| and |key| in - * |values| and |string_data|. It returns the string or NULL if not found. */ +// err_string_lookup looks up the string associated with |lib| and |key| in +// |values| and |string_data|. It returns the string or NULL if not found. static const char *err_string_lookup(uint32_t lib, uint32_t key, const uint32_t *values, size_t num_values, const char *string_data) { - /* |values| points to data in err_data.h, which is generated by - * err_data_generate.go. It's an array of uint32_t values. Each value has the - * following structure: - * | lib | key | offset | - * |6 bits| 11 bits | 15 bits | - * - * The |lib| value is a library identifier: one of the |ERR_LIB_*| values. - * The |key| is a reason code, depending on the context. - * The |offset| is the number of bytes from the start of |string_data| where - * the (NUL terminated) string for this value can be found. - * - * Values are sorted based on treating the |lib| and |key| part as an - * unsigned integer. */ + // |values| points to data in err_data.h, which is generated by + // err_data_generate.go. It's an array of uint32_t values. Each value has the + // following structure: + // | lib | key | offset | + // |6 bits| 11 bits | 15 bits | + // + // The |lib| value is a library identifier: one of the |ERR_LIB_*| values. + // The |key| is a reason code, depending on the context. + // The |offset| is the number of bytes from the start of |string_data| where + // the (NUL terminated) string for this value can be found. + // + // Values are sorted based on treating the |lib| and |key| part as an + // unsigned integer. if (lib >= (1 << 6) || key >= (1 << 11)) { return NULL; } @@ -465,38 +495,38 @@ static const char *err_string_lookup(uint32_t lib, uint32_t key, static const char *const kLibraryNames[ERR_NUM_LIBS] = { "invalid library (0)", - "unknown library", /* ERR_LIB_NONE */ - "system library", /* ERR_LIB_SYS */ - "bignum routines", /* ERR_LIB_BN */ - "RSA routines", /* ERR_LIB_RSA */ - "Diffie-Hellman routines", /* ERR_LIB_DH */ - "public key routines", /* ERR_LIB_EVP */ - "memory buffer routines", /* ERR_LIB_BUF */ - "object identifier routines", /* ERR_LIB_OBJ */ - "PEM routines", /* ERR_LIB_PEM */ - "DSA routines", /* ERR_LIB_DSA */ - "X.509 certificate routines", /* ERR_LIB_X509 */ - "ASN.1 encoding routines", /* ERR_LIB_ASN1 */ - "configuration file routines", /* ERR_LIB_CONF */ - "common libcrypto routines", /* ERR_LIB_CRYPTO */ - "elliptic curve routines", /* ERR_LIB_EC */ - "SSL routines", /* ERR_LIB_SSL */ - "BIO routines", /* ERR_LIB_BIO */ - "PKCS7 routines", /* ERR_LIB_PKCS7 */ - "PKCS8 routines", /* ERR_LIB_PKCS8 */ - "X509 V3 routines", /* ERR_LIB_X509V3 */ - "random number generator", /* ERR_LIB_RAND */ - "ENGINE routines", /* ERR_LIB_ENGINE */ - "OCSP routines", /* ERR_LIB_OCSP */ - "UI routines", /* ERR_LIB_UI */ - "COMP routines", /* ERR_LIB_COMP */ - "ECDSA routines", /* ERR_LIB_ECDSA */ - "ECDH routines", /* ERR_LIB_ECDH */ - "HMAC routines", /* ERR_LIB_HMAC */ - "Digest functions", /* ERR_LIB_DIGEST */ - "Cipher functions", /* ERR_LIB_CIPHER */ - "HKDF functions", /* ERR_LIB_HKDF */ - "User defined functions", /* ERR_LIB_USER */ + "unknown library", // ERR_LIB_NONE + "system library", // ERR_LIB_SYS + "bignum routines", // ERR_LIB_BN + "RSA routines", // ERR_LIB_RSA + "Diffie-Hellman routines", // ERR_LIB_DH + "public key routines", // ERR_LIB_EVP + "memory buffer routines", // ERR_LIB_BUF + "object identifier routines", // ERR_LIB_OBJ + "PEM routines", // ERR_LIB_PEM + "DSA routines", // ERR_LIB_DSA + "X.509 certificate routines", // ERR_LIB_X509 + "ASN.1 encoding routines", // ERR_LIB_ASN1 + "configuration file routines", // ERR_LIB_CONF + "common libcrypto routines", // ERR_LIB_CRYPTO + "elliptic curve routines", // ERR_LIB_EC + "SSL routines", // ERR_LIB_SSL + "BIO routines", // ERR_LIB_BIO + "PKCS7 routines", // ERR_LIB_PKCS7 + "PKCS8 routines", // ERR_LIB_PKCS8 + "X509 V3 routines", // ERR_LIB_X509V3 + "random number generator", // ERR_LIB_RAND + "ENGINE routines", // ERR_LIB_ENGINE + "OCSP routines", // ERR_LIB_OCSP + "UI routines", // ERR_LIB_UI + "COMP routines", // ERR_LIB_COMP + "ECDSA routines", // ERR_LIB_ECDSA + "ECDH routines", // ERR_LIB_ECDH + "HMAC routines", // ERR_LIB_HMAC + "Digest functions", // ERR_LIB_DIGEST + "Cipher functions", // ERR_LIB_CIPHER + "HKDF functions", // ERR_LIB_HKDF + "User defined functions", // ERR_LIB_USER }; const char *ERR_lib_error_string(uint32_t packed_error) { @@ -555,8 +585,8 @@ void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) { int line, flags; uint32_t packed_error; - /* thread_hash is the least-significant bits of the |ERR_STATE| pointer value - * for this thread. */ + // thread_hash is the least-significant bits of the |ERR_STATE| pointer value + // for this thread. const unsigned long thread_hash = (uintptr_t) err_get_state(); for (;;) { @@ -585,24 +615,20 @@ void ERR_print_errors_fp(FILE *file) { ERR_print_errors_cb(print_errors_to_file, file); } -/* err_set_error_data sets the data on the most recent error. The |flags| - * argument is a combination of the |ERR_FLAG_*| values. */ -static void err_set_error_data(char *data, int flags) { +// err_set_error_data sets the data on the most recent error. +static void err_set_error_data(char *data) { ERR_STATE *const state = err_get_state(); struct err_error_st *error; if (state == NULL || state->top == state->bottom) { - if (flags & ERR_FLAG_MALLOCED) { - OPENSSL_free(data); - } + OPENSSL_free(data); return; } error = &state->errors[state->top]; - err_clear_data(error); + OPENSSL_free(error->data); error->data = data; - error->flags = flags; } void ERR_put_error(int library, int unused, int reason, const char *file, @@ -634,9 +660,9 @@ void ERR_put_error(int library, int unused, int reason, const char *file, error->packed = ERR_PACK(library, reason); } -/* ERR_add_error_data_vdata takes a variable number of const char* pointers, - * concatenates them and sets the result as the data on the most recent - * error. */ +// ERR_add_error_data_vdata takes a variable number of const char* pointers, +// concatenates them and sets the result as the data on the most recent +// error. static void err_add_error_vdata(unsigned num, va_list args) { size_t alloced, new_len, len = 0, substr_len; char *buf; @@ -661,7 +687,7 @@ static void err_add_error_vdata(unsigned num, va_list args) { char *new_buf; if (alloced + 20 + 1 < alloced) { - /* overflow. */ + // overflow. OPENSSL_free(buf); return; } @@ -680,7 +706,7 @@ static void err_add_error_vdata(unsigned num, va_list args) { } buf[len] = 0; - err_set_error_data(buf, ERR_FLAG_MALLOCED | ERR_FLAG_STRING); + err_set_error_data(buf); } void ERR_add_error_data(unsigned count, ...) { @@ -695,9 +721,9 @@ void ERR_add_error_dataf(const char *format, ...) { char *buf; static const unsigned buf_len = 256; - /* A fixed-size buffer is used because va_copy (which would be needed in - * order to call vsnprintf twice and measure the buffer) wasn't defined until - * C99. */ + // A fixed-size buffer is used because va_copy (which would be needed in + // order to call vsnprintf twice and measure the buffer) wasn't defined until + // C99. buf = OPENSSL_malloc(buf_len + 1); if (buf == NULL) { return; @@ -708,7 +734,7 @@ void ERR_add_error_dataf(const char *format, ...) { buf[buf_len] = 0; va_end(ap); - err_set_error_data(buf, ERR_FLAG_MALLOCED | ERR_FLAG_STRING); + err_set_error_data(buf); } int ERR_set_mark(void) { @@ -717,7 +743,7 @@ int ERR_set_mark(void) { if (state == NULL || state->bottom == state->top) { return 0; } - state->errors[state->top].flags |= ERR_FLAG_MARK; + state->errors[state->top].mark = 1; return 1; } @@ -731,8 +757,8 @@ int ERR_pop_to_mark(void) { while (state->bottom != state->top) { struct err_error_st *error = &state->errors[state->top]; - if ((error->flags & ERR_FLAG_MARK) != 0) { - error->flags &= ~ERR_FLAG_MARK; + if (error->mark) { + error->mark = 0; return 1; } @@ -754,3 +780,68 @@ void ERR_free_strings(void) {} void ERR_load_BIO_strings(void) {} void ERR_load_ERR_strings(void) {} + +struct err_save_state_st { + struct err_error_st *errors; + size_t num_errors; +}; + +void ERR_SAVE_STATE_free(ERR_SAVE_STATE *state) { + if (state == NULL) { + return; + } + for (size_t i = 0; i < state->num_errors; i++) { + err_clear(&state->errors[i]); + } + OPENSSL_free(state->errors); + OPENSSL_free(state); +} + +ERR_SAVE_STATE *ERR_save_state(void) { + ERR_STATE *const state = err_get_state(); + if (state == NULL || state->top == state->bottom) { + return NULL; + } + + ERR_SAVE_STATE *ret = OPENSSL_malloc(sizeof(ERR_SAVE_STATE)); + if (ret == NULL) { + return NULL; + } + + // Errors are stored in the range (bottom, top]. + size_t num_errors = state->top >= state->bottom + ? state->top - state->bottom + : ERR_NUM_ERRORS + state->top - state->bottom; + assert(num_errors < ERR_NUM_ERRORS); + ret->errors = OPENSSL_malloc(num_errors * sizeof(struct err_error_st)); + if (ret->errors == NULL) { + OPENSSL_free(ret); + return NULL; + } + OPENSSL_memset(ret->errors, 0, num_errors * sizeof(struct err_error_st)); + ret->num_errors = num_errors; + + for (size_t i = 0; i < num_errors; i++) { + size_t j = (state->bottom + i + 1) % ERR_NUM_ERRORS; + err_copy(&ret->errors[i], &state->errors[j]); + } + return ret; +} + +void ERR_restore_state(const ERR_SAVE_STATE *state) { + if (state == NULL || state->num_errors == 0) { + ERR_clear_error(); + return; + } + + ERR_STATE *const dst = err_get_state(); + if (dst == NULL) { + return; + } + + for (size_t i = 0; i < state->num_errors; i++) { + err_copy(&dst->errors[i], &state->errors[i]); + } + dst->top = state->num_errors - 1; + dst->bottom = ERR_NUM_ERRORS - 1; +} diff --git a/Sources/BoringSSL/crypto/err/err_data.c b/Sources/BoringSSL/crypto/err/err_data.c new file mode 100644 index 000000000..c2be4abba --- /dev/null +++ b/Sources/BoringSSL/crypto/err/err_data.c @@ -0,0 +1,63 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + + /* This file was generated by err_data_generate.go. */ + +#include +#include +#include + + +OPENSSL_COMPILE_ASSERT(ERR_LIB_NONE == 1, library_values_changed_1); +OPENSSL_COMPILE_ASSERT(ERR_LIB_SYS == 2, library_values_changed_2); +OPENSSL_COMPILE_ASSERT(ERR_LIB_BN == 3, library_values_changed_3); +OPENSSL_COMPILE_ASSERT(ERR_LIB_RSA == 4, library_values_changed_4); +OPENSSL_COMPILE_ASSERT(ERR_LIB_DH == 5, library_values_changed_5); +OPENSSL_COMPILE_ASSERT(ERR_LIB_EVP == 6, library_values_changed_6); +OPENSSL_COMPILE_ASSERT(ERR_LIB_BUF == 7, library_values_changed_7); +OPENSSL_COMPILE_ASSERT(ERR_LIB_OBJ == 8, library_values_changed_8); +OPENSSL_COMPILE_ASSERT(ERR_LIB_PEM == 9, library_values_changed_9); +OPENSSL_COMPILE_ASSERT(ERR_LIB_DSA == 10, library_values_changed_10); +OPENSSL_COMPILE_ASSERT(ERR_LIB_X509 == 11, library_values_changed_11); +OPENSSL_COMPILE_ASSERT(ERR_LIB_ASN1 == 12, library_values_changed_12); +OPENSSL_COMPILE_ASSERT(ERR_LIB_CONF == 13, library_values_changed_13); +OPENSSL_COMPILE_ASSERT(ERR_LIB_CRYPTO == 14, library_values_changed_14); +OPENSSL_COMPILE_ASSERT(ERR_LIB_EC == 15, library_values_changed_15); +OPENSSL_COMPILE_ASSERT(ERR_LIB_SSL == 16, library_values_changed_16); +OPENSSL_COMPILE_ASSERT(ERR_LIB_BIO == 17, library_values_changed_17); +OPENSSL_COMPILE_ASSERT(ERR_LIB_PKCS7 == 18, library_values_changed_18); +OPENSSL_COMPILE_ASSERT(ERR_LIB_PKCS8 == 19, library_values_changed_19); +OPENSSL_COMPILE_ASSERT(ERR_LIB_X509V3 == 20, library_values_changed_20); +OPENSSL_COMPILE_ASSERT(ERR_LIB_RAND == 21, library_values_changed_21); +OPENSSL_COMPILE_ASSERT(ERR_LIB_ENGINE == 22, library_values_changed_22); +OPENSSL_COMPILE_ASSERT(ERR_LIB_OCSP == 23, library_values_changed_23); +OPENSSL_COMPILE_ASSERT(ERR_LIB_UI == 24, library_values_changed_24); +OPENSSL_COMPILE_ASSERT(ERR_LIB_COMP == 25, library_values_changed_25); +OPENSSL_COMPILE_ASSERT(ERR_LIB_ECDSA == 26, library_values_changed_26); +OPENSSL_COMPILE_ASSERT(ERR_LIB_ECDH == 27, library_values_changed_27); +OPENSSL_COMPILE_ASSERT(ERR_LIB_HMAC == 28, library_values_changed_28); +OPENSSL_COMPILE_ASSERT(ERR_LIB_DIGEST == 29, library_values_changed_29); +OPENSSL_COMPILE_ASSERT(ERR_LIB_CIPHER == 30, library_values_changed_30); +OPENSSL_COMPILE_ASSERT(ERR_LIB_HKDF == 31, library_values_changed_31); +OPENSSL_COMPILE_ASSERT(ERR_LIB_USER == 32, library_values_changed_32); +OPENSSL_COMPILE_ASSERT(ERR_NUM_LIBS == 33, library_values_changed_num); + +const uint32_t kOpenSSLReasonValues[] = { +}; + +const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); + +const char kOpenSSLReasonStringData[] = + ""; + diff --git a/Sources/BoringSSL/crypto/err/internal.h b/Sources/BoringSSL/crypto/err/internal.h new file mode 100644 index 000000000..3f2397c1c --- /dev/null +++ b/Sources/BoringSSL/crypto/err/internal.h @@ -0,0 +1,58 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H +#define OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + + +// Private error queue functions. + +// ERR_SAVE_STATE contains a saved representation of the error queue. It is +// slightly more compact than |ERR_STATE| as the error queue will typically not +// contain |ERR_NUM_ERRORS| entries. +typedef struct err_save_state_st ERR_SAVE_STATE; + +// ERR_SAVE_STATE_free releases all memory associated with |state|. +OPENSSL_EXPORT void ERR_SAVE_STATE_free(ERR_SAVE_STATE *state); + +// ERR_save_state returns a newly-allocated |ERR_SAVE_STATE| structure +// containing the current state of the error queue or NULL on allocation +// error. It should be released with |ERR_SAVE_STATE_free|. +OPENSSL_EXPORT ERR_SAVE_STATE *ERR_save_state(void); + +// ERR_restore_state clears the error queue and replaces it with |state|. +OPENSSL_EXPORT void ERR_restore_state(const ERR_SAVE_STATE *state); + + +#if defined(__cplusplus) +} // extern C + +extern "C++" { + +namespace bssl { + +BORINGSSL_MAKE_DELETER(ERR_SAVE_STATE, ERR_SAVE_STATE_free) + +} // namespace bssl + +} // extern C++ +#endif + +#endif // OPENSSL_HEADER_CRYPTO_ERR_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/evp/digestsign.c b/Sources/BoringSSL/crypto/evp/digestsign.c index 69c483ad7..6e4d305ff 100644 --- a/Sources/BoringSSL/crypto/evp/digestsign.c +++ b/Sources/BoringSSL/crypto/evp/digestsign.c @@ -58,17 +58,27 @@ #include #include "internal.h" -#include "../digest/internal.h" +#include "../fipsmodule/digest/internal.h" +enum evp_sign_verify_t { + evp_sign, + evp_verify, +}; + static const struct evp_md_pctx_ops md_pctx_ops = { EVP_PKEY_CTX_free, EVP_PKEY_CTX_dup, }; +static int uses_prehash(EVP_MD_CTX *ctx, enum evp_sign_verify_t op) { + return (op == evp_sign) ? (ctx->pctx->pmeth->sign != NULL) + : (ctx->pctx->pmeth->verify != NULL); +} + static int do_sigver_init(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey, - int is_verify) { + enum evp_sign_verify_t op) { if (ctx->pctx == NULL) { ctx->pctx = EVP_PKEY_CTX_new(pkey, e); } @@ -77,12 +87,7 @@ static int do_sigver_init(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, } ctx->pctx_ops = &md_pctx_ops; - if (type == NULL) { - OPENSSL_PUT_ERROR(EVP, EVP_R_NO_DEFAULT_DIGEST); - return 0; - } - - if (is_verify) { + if (op == evp_verify) { if (!EVP_PKEY_verify_init(ctx->pctx)) { return 0; } @@ -91,38 +96,63 @@ static int do_sigver_init(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, return 0; } } - if (!EVP_PKEY_CTX_set_signature_md(ctx->pctx, type)) { + + if (type != NULL && + !EVP_PKEY_CTX_set_signature_md(ctx->pctx, type)) { return 0; } + + if (uses_prehash(ctx, op)) { + if (type == NULL) { + OPENSSL_PUT_ERROR(EVP, EVP_R_NO_DEFAULT_DIGEST); + return 0; + } + if (!EVP_DigestInit_ex(ctx, type, e)) { + return 0; + } + } + if (pctx) { *pctx = ctx->pctx; } - if (!EVP_DigestInit_ex(ctx, type, e)) { - return 0; - } return 1; } int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) { - return do_sigver_init(ctx, pctx, type, e, pkey, 0); + return do_sigver_init(ctx, pctx, type, e, pkey, evp_sign); } int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey) { - return do_sigver_init(ctx, pctx, type, e, pkey, 1); + return do_sigver_init(ctx, pctx, type, e, pkey, evp_verify); } int EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { + if (!uses_prehash(ctx, evp_sign)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + return EVP_DigestUpdate(ctx, data, len); } int EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) { + if (!uses_prehash(ctx, evp_verify)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + return EVP_DigestUpdate(ctx, data, len); } int EVP_DigestSignFinal(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len) { + if (!uses_prehash(ctx, evp_sign)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + if (out_sig) { EVP_MD_CTX tmp_ctx; int ret; @@ -144,6 +174,11 @@ int EVP_DigestSignFinal(EVP_MD_CTX *ctx, uint8_t *out_sig, int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len) { + if (!uses_prehash(ctx, evp_verify)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + EVP_MD_CTX tmp_ctx; int ret; uint8_t md[EVP_MAX_MD_SIZE]; @@ -157,3 +192,40 @@ int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, return ret; } + +int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len, + const uint8_t *data, size_t data_len) { + if (uses_prehash(ctx, evp_sign)) { + // If |out_sig| is NULL, the caller is only querying the maximum output + // length. |data| should only be incorporated in the final call. + if (out_sig != NULL && + !EVP_DigestSignUpdate(ctx, data, data_len)) { + return 0; + } + + return EVP_DigestSignFinal(ctx, out_sig, out_sig_len); + } + + if (ctx->pctx->pmeth->sign_message == NULL) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + + return ctx->pctx->pmeth->sign_message(ctx->pctx, out_sig, out_sig_len, data, + data_len); +} + +int EVP_DigestVerify(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, + const uint8_t *data, size_t len) { + if (uses_prehash(ctx, evp_verify)) { + return EVP_DigestVerifyUpdate(ctx, data, len) && + EVP_DigestVerifyFinal(ctx, sig, sig_len); + } + + if (ctx->pctx->pmeth->verify_message == NULL) { + OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); + return 0; + } + + return ctx->pctx->pmeth->verify_message(ctx->pctx, sig, sig_len, data, len); +} diff --git a/Sources/BoringSSL/crypto/evp/evp.c b/Sources/BoringSSL/crypto/evp/evp.c index f08387978..ad5f85bf3 100644 --- a/Sources/BoringSSL/crypto/evp/evp.c +++ b/Sources/BoringSSL/crypto/evp/evp.c @@ -120,13 +120,6 @@ int EVP_PKEY_is_opaque(const EVP_PKEY *pkey) { return 0; } -int EVP_PKEY_supports_digest(const EVP_PKEY *pkey, const EVP_MD *md) { - if (pkey->ameth && pkey->ameth->pkey_supports_digest) { - return pkey->ameth->pkey_supports_digest(pkey, md); - } - return 1; -} - int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (a->type != b->type) { return -1; @@ -134,7 +127,7 @@ int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { if (a->ameth) { int ret; - /* Compare parameters if the algorithm has them */ + // Compare parameters if the algorithm has them if (a->ameth->param_cmp) { ret = a->ameth->param_cmp(a, b); if (ret <= 0) { @@ -194,9 +187,9 @@ int EVP_PKEY_id(const EVP_PKEY *pkey) { return pkey->type; } -/* evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which - * should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is - * unknown. */ +// evp_pkey_asn1_find returns the ASN.1 method table for the given |nid|, which +// should be one of the |EVP_PKEY_*| values. It returns NULL if |nid| is +// unknown. static const EVP_PKEY_ASN1_METHOD *evp_pkey_asn1_find(int nid) { switch (nid) { case EVP_PKEY_RSA: @@ -205,6 +198,8 @@ static const EVP_PKEY_ASN1_METHOD *evp_pkey_asn1_find(int nid) { return &ec_asn1_meth; case EVP_PKEY_DSA: return &dsa_asn1_meth; + case EVP_PKEY_ED25519: + return &ed25519_asn1_meth; default: return NULL; } diff --git a/Sources/BoringSSL/crypto/evp/evp_asn1.c b/Sources/BoringSSL/crypto/evp/evp_asn1.c index 6c9057191..bcb86d760 100644 --- a/Sources/BoringSSL/crypto/evp/evp_asn1.c +++ b/Sources/BoringSSL/crypto/evp/evp_asn1.c @@ -72,6 +72,7 @@ static const EVP_PKEY_ASN1_METHOD *const kASN1Methods[] = { &rsa_asn1_meth, &ec_asn1_meth, &dsa_asn1_meth, + &ed25519_asn1_meth, }; static int parse_key_type(CBS *cbs, int *out_type) { @@ -80,8 +81,7 @@ static int parse_key_type(CBS *cbs, int *out_type) { return 0; } - unsigned i; - for (i = 0; i < OPENSSL_ARRAY_SIZE(kASN1Methods); i++) { + for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kASN1Methods); i++) { const EVP_PKEY_ASN1_METHOD *method = kASN1Methods[i]; if (CBS_len(&oid) == method->oid_len && OPENSSL_memcmp(CBS_data(&oid), method->oid, method->oid_len) == 0) { @@ -94,7 +94,7 @@ static int parse_key_type(CBS *cbs, int *out_type) { } EVP_PKEY *EVP_parse_public_key(CBS *cbs) { - /* Parse the SubjectPublicKeyInfo. */ + // Parse the SubjectPublicKeyInfo. CBS spki, algorithm, key; int type; uint8_t padding; @@ -103,22 +103,22 @@ EVP_PKEY *EVP_parse_public_key(CBS *cbs) { !parse_key_type(&algorithm, &type) || !CBS_get_asn1(&spki, &key, CBS_ASN1_BITSTRING) || CBS_len(&spki) != 0 || - /* Every key type defined encodes the key as a byte string with the same - * conversion to BIT STRING. */ + // Every key type defined encodes the key as a byte string with the same + // conversion to BIT STRING. !CBS_get_u8(&key, &padding) || padding != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); return NULL; } - /* Set up an |EVP_PKEY| of the appropriate type. */ + // Set up an |EVP_PKEY| of the appropriate type. EVP_PKEY *ret = EVP_PKEY_new(); if (ret == NULL || !EVP_PKEY_set_type(ret, type)) { goto err; } - /* Call into the type-specific SPKI decoding function. */ + // Call into the type-specific SPKI decoding function. if (ret->ameth->pub_decode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); goto err; @@ -144,7 +144,7 @@ int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key) { } EVP_PKEY *EVP_parse_private_key(CBS *cbs) { - /* Parse the PrivateKeyInfo. */ + // Parse the PrivateKeyInfo. CBS pkcs8, algorithm, key; uint64_t version; int type; @@ -158,16 +158,16 @@ EVP_PKEY *EVP_parse_private_key(CBS *cbs) { return NULL; } - /* A PrivateKeyInfo ends with a SET of Attributes which we ignore. */ + // A PrivateKeyInfo ends with a SET of Attributes which we ignore. - /* Set up an |EVP_PKEY| of the appropriate type. */ + // Set up an |EVP_PKEY| of the appropriate type. EVP_PKEY *ret = EVP_PKEY_new(); if (ret == NULL || !EVP_PKEY_set_type(ret, type)) { goto err; } - /* Call into the type-specific PrivateKeyInfo decoding function. */ + // Call into the type-specific PrivateKeyInfo decoding function. if (ret->ameth->priv_decode == NULL) { OPENSSL_PUT_ERROR(EVP, EVP_R_UNSUPPORTED_ALGORITHM); goto err; @@ -240,12 +240,12 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, return NULL; } - /* Parse with the legacy format. */ + // Parse with the legacy format. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *ret = old_priv_decode(&cbs, type); if (ret == NULL) { - /* Try again with PKCS#8. */ + // Try again with PKCS#8. ERR_clear_error(); CBS_init(&cbs, *inp, (size_t)len); ret = EVP_parse_private_key(&cbs); @@ -267,8 +267,8 @@ EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, return ret; } -/* num_elements parses one SEQUENCE from |in| and returns the number of elements - * in it. On parse error, it returns zero. */ +// num_elements parses one SEQUENCE from |in| and returns the number of elements +// in it. On parse error, it returns zero. static size_t num_elements(const uint8_t *in, size_t in_len) { CBS cbs, sequence; CBS_init(&cbs, in, (size_t)in_len); @@ -295,7 +295,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) { return NULL; } - /* Parse the input as a PKCS#8 PrivateKeyInfo. */ + // Parse the input as a PKCS#8 PrivateKeyInfo. CBS cbs; CBS_init(&cbs, *inp, (size_t)len); EVP_PKEY *ret = EVP_parse_private_key(&cbs); @@ -309,7 +309,7 @@ EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len) { } ERR_clear_error(); - /* Count the elements to determine the legacy key format. */ + // Count the elements to determine the legacy key format. switch (num_elements(*inp, (size_t)len)) { case 4: return d2i_PrivateKey(EVP_PKEY_EC, out, inp, len); diff --git a/Sources/BoringSSL/crypto/evp/evp_ctx.c b/Sources/BoringSSL/crypto/evp/evp_ctx.c index a17a8ccc2..3599f7783 100644 --- a/Sources/BoringSSL/crypto/evp/evp_ctx.c +++ b/Sources/BoringSSL/crypto/evp/evp_ctx.c @@ -58,6 +58,7 @@ #include +#include #include #include @@ -68,6 +69,7 @@ static const EVP_PKEY_METHOD *const evp_methods[] = { &rsa_pkey_meth, &ec_pkey_meth, + &ed25519_pkey_meth, }; static const EVP_PKEY_METHOD *evp_pkey_meth_find(int type) { @@ -211,7 +213,8 @@ int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, } int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx) { - if (!ctx || !ctx->pmeth || !ctx->pmeth->sign) { + if (ctx == NULL || ctx->pmeth == NULL || + (ctx->pmeth->sign == NULL && ctx->pmeth->sign_message == NULL)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } @@ -221,7 +224,7 @@ int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx) { } int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *sig_len, - const uint8_t *data, size_t data_len) { + const uint8_t *digest, size_t digest_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->sign) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; @@ -230,11 +233,12 @@ int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *sig_len, OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } - return ctx->pmeth->sign(ctx, sig, sig_len, data, data_len); + return ctx->pmeth->sign(ctx, sig, sig_len, digest, digest_len); } int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx) { - if (!ctx || !ctx->pmeth || !ctx->pmeth->verify) { + if (ctx == NULL || ctx->pmeth == NULL || + (ctx->pmeth->verify == NULL && ctx->pmeth->verify_message == NULL)) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; } @@ -243,7 +247,7 @@ int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx) { } int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t sig_len, - const uint8_t *data, size_t data_len) { + const uint8_t *digest, size_t digest_len) { if (!ctx || !ctx->pmeth || !ctx->pmeth->verify) { OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE); return 0; @@ -252,7 +256,7 @@ int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t sig_len, OPENSSL_PUT_ERROR(EVP, EVP_R_OPERATON_NOT_INITIALIZED); return 0; } - return ctx->pmeth->verify(ctx, sig, sig_len, data, data_len); + return ctx->pmeth->verify(ctx, sig, sig_len, digest, digest_len); } int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx) { @@ -365,11 +369,11 @@ int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer) { return 0; } - /* ran@cryptocom.ru: For clarity. The error is if parameters in peer are - * present (!missing) but don't match. EVP_PKEY_cmp_parameters may return - * 1 (match), 0 (don't match) and -2 (comparison is not defined). -1 - * (different key types) is impossible here because it is checked earlier. - * -2 is OK for us here, as well as 1, so we can check for 0 only. */ + // ran@cryptocom.ru: For clarity. The error is if parameters in peer are + // present (!missing) but don't match. EVP_PKEY_cmp_parameters may return + // 1 (match), 0 (don't match) and -2 (comparison is not defined). -1 + // (different key types) is impossible here because it is checked earlier. + // -2 is OK for us here, as well as 1, so we can check for 0 only. if (!EVP_PKEY_missing_parameters(peer) && !EVP_PKEY_cmp_parameters(ctx->pkey, peer)) { OPENSSL_PUT_ERROR(EVP, EVP_R_DIFFERENT_PARAMETERS); diff --git a/Sources/BoringSSL/crypto/evp/internal.h b/Sources/BoringSSL/crypto/evp/internal.h index 0783143d3..4aefa3526 100644 --- a/Sources/BoringSSL/crypto/evp/internal.h +++ b/Sources/BoringSSL/crypto/evp/internal.h @@ -71,41 +71,35 @@ struct evp_pkey_asn1_method_st { uint8_t oid[9]; uint8_t oid_len; - /* pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo - * and writes the result into |out|. It returns one on success and zero on - * error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER - * type field, and |key| is the contents of the subjectPublicKey with the - * leading padding byte checked and removed. Although X.509 uses BIT STRINGs - * to represent SubjectPublicKeyInfo, every key type defined encodes the key - * as a byte string with the same conversion to BIT STRING. */ + // pub_decode decodes |params| and |key| as a SubjectPublicKeyInfo + // and writes the result into |out|. It returns one on success and zero on + // error. |params| is the AlgorithmIdentifier after the OBJECT IDENTIFIER + // type field, and |key| is the contents of the subjectPublicKey with the + // leading padding byte checked and removed. Although X.509 uses BIT STRINGs + // to represent SubjectPublicKeyInfo, every key type defined encodes the key + // as a byte string with the same conversion to BIT STRING. int (*pub_decode)(EVP_PKEY *out, CBS *params, CBS *key); - /* pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result - * to |out|. It returns one on success and zero on error. */ + // pub_encode encodes |key| as a SubjectPublicKeyInfo and appends the result + // to |out|. It returns one on success and zero on error. int (*pub_encode)(CBB *out, const EVP_PKEY *key); int (*pub_cmp)(const EVP_PKEY *a, const EVP_PKEY *b); - /* priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the - * result into |out|. It returns one on success and zero on error. |params| is - * the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key| - * is the contents of the OCTET STRING privateKey field. */ + // priv_decode decodes |params| and |key| as a PrivateKeyInfo and writes the + // result into |out|. It returns one on success and zero on error. |params| is + // the AlgorithmIdentifier after the OBJECT IDENTIFIER type field, and |key| + // is the contents of the OCTET STRING privateKey field. int (*priv_decode)(EVP_PKEY *out, CBS *params, CBS *key); - /* priv_encode encodes |key| as a PrivateKeyInfo and appends the result to - * |out|. It returns one on success and zero on error. */ + // priv_encode encodes |key| as a PrivateKeyInfo and appends the result to + // |out|. It returns one on success and zero on error. int (*priv_encode)(CBB *out, const EVP_PKEY *key); - /* pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by - * custom implementations which do not expose key material and parameters.*/ + // pkey_opaque returns 1 if the |pk| is opaque. Opaque keys are backed by + // custom implementations which do not expose key material and parameters. int (*pkey_opaque)(const EVP_PKEY *pk); - /* pkey_supports_digest returns one if |pkey| supports digests of - * type |md|. This is intended for use with EVP_PKEYs backing custom - * implementations which can't sign all digests. If null, it is - * assumed that all digests are supported. */ - int (*pkey_supports_digest)(const EVP_PKEY *pkey, const EVP_MD *md); - int (*pkey_size)(const EVP_PKEY *pk); int (*pkey_bits)(const EVP_PKEY *pk); @@ -136,33 +130,33 @@ struct evp_pkey_asn1_method_st { #define EVP_PKEY_OP_TYPE_GEN EVP_PKEY_OP_KEYGEN -/* EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype| - * arguments can be -1 to specify that any type and operation are acceptable, - * otherwise |keytype| must match the type of |ctx| and the bits of |optype| - * must intersect the operation flags set on |ctx|. - * - * The |p1| and |p2| arguments depend on the value of |cmd|. - * - * It returns one on success and zero on error. */ +// EVP_PKEY_CTX_ctrl performs |cmd| on |ctx|. The |keytype| and |optype| +// arguments can be -1 to specify that any type and operation are acceptable, +// otherwise |keytype| must match the type of |ctx| and the bits of |optype| +// must intersect the operation flags set on |ctx|. +// +// The |p1| and |p2| arguments depend on the value of |cmd|. +// +// It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2); #define EVP_PKEY_CTRL_MD 1 #define EVP_PKEY_CTRL_GET_MD 2 -/* EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|: - * 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key. - * If the return value is <= 0, the key is rejected. - * 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a - * peer key. If the return value is <= 0, the key is rejected. - * 2: Is called with |p2| == NULL to test whether the peer's key was used. - * (EC)DH always return one in this case. - * 3: Is called with |p2| == NULL to set whether the peer's key was used. - * (EC)DH always return one in this case. This was only used for GOST. */ +// EVP_PKEY_CTRL_PEER_KEY is called with different values of |p1|: +// 0: Is called from |EVP_PKEY_derive_set_peer| and |p2| contains a peer key. +// If the return value is <= 0, the key is rejected. +// 1: Is called at the end of |EVP_PKEY_derive_set_peer| and |p2| contains a +// peer key. If the return value is <= 0, the key is rejected. +// 2: Is called with |p2| == NULL to test whether the peer's key was used. +// (EC)DH always return one in this case. +// 3: Is called with |p2| == NULL to set whether the peer's key was used. +// (EC)DH always return one in this case. This was only used for GOST. #define EVP_PKEY_CTRL_PEER_KEY 3 -/* EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl - * commands are numbered. */ +// EVP_PKEY_ALG_CTRL is the base value from which key-type specific ctrl +// commands are numbered. #define EVP_PKEY_ALG_CTRL 0x1000 #define EVP_PKEY_CTRL_RSA_PADDING (EVP_PKEY_ALG_CTRL + 1) @@ -170,7 +164,7 @@ OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, #define EVP_PKEY_CTRL_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 3) #define EVP_PKEY_CTRL_GET_RSA_PSS_SALTLEN (EVP_PKEY_ALG_CTRL + 4) #define EVP_PKEY_CTRL_RSA_KEYGEN_BITS (EVP_PKEY_ALG_CTRL + 5) -#define EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP (EVP_PKEY_ALG_CTRL + 6) +#define EVP_PKEY_CTRL_RSA_KEYGEN_PUBEXP (EVP_PKEY_ALG_CTRL + 6) #define EVP_PKEY_CTRL_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 7) #define EVP_PKEY_CTRL_GET_RSA_OAEP_MD (EVP_PKEY_ALG_CTRL + 8) #define EVP_PKEY_CTRL_RSA_MGF1_MD (EVP_PKEY_ALG_CTRL + 9) @@ -179,17 +173,17 @@ OPENSSL_EXPORT int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, #define EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL (EVP_PKEY_ALG_CTRL + 12) struct evp_pkey_ctx_st { - /* Method associated with this operation */ + // Method associated with this operation const EVP_PKEY_METHOD *pmeth; - /* Engine that implements this method or NULL if builtin */ + // Engine that implements this method or NULL if builtin ENGINE *engine; - /* Key: may be NULL */ + // Key: may be NULL EVP_PKEY *pkey; - /* Peer key for key agreement, may be NULL */ + // Peer key for key agreement, may be NULL EVP_PKEY *peerkey; - /* operation contains one of the |EVP_PKEY_OP_*| values. */ + // operation contains one of the |EVP_PKEY_OP_*| values. int operation; - /* Algorithm specific data */ + // Algorithm specific data void *data; } /* EVP_PKEY_CTX */; @@ -205,9 +199,15 @@ struct evp_pkey_method_st { int (*sign)(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, const uint8_t *tbs, size_t tbslen); + int (*sign_message)(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, + const uint8_t *tbs, size_t tbslen); + int (*verify)(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, const uint8_t *tbs, size_t tbslen); + int (*verify_message)(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t siglen, + const uint8_t *tbs, size_t tbslen); + int (*verify_recover)(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t sig_len); @@ -222,16 +222,31 @@ struct evp_pkey_method_st { int (*ctrl)(EVP_PKEY_CTX *ctx, int type, int p1, void *p2); } /* EVP_PKEY_METHOD */; +typedef struct { + union { + uint8_t priv[64]; + struct { + // Shift the location of the public key to align with where it is in the + // private key representation. + uint8_t pad[32]; + uint8_t value[32]; + } pub; + } key; + char has_private; +} ED25519_KEY; + extern const EVP_PKEY_ASN1_METHOD dsa_asn1_meth; extern const EVP_PKEY_ASN1_METHOD ec_asn1_meth; extern const EVP_PKEY_ASN1_METHOD rsa_asn1_meth; +extern const EVP_PKEY_ASN1_METHOD ed25519_asn1_meth; extern const EVP_PKEY_METHOD rsa_pkey_meth; extern const EVP_PKEY_METHOD ec_pkey_meth; +extern const EVP_PKEY_METHOD ed25519_pkey_meth; #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_EVP_INTERNAL_H */ +#endif // OPENSSL_HEADER_EVP_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/evp/p_dsa_asn1.c b/Sources/BoringSSL/crypto/evp/p_dsa_asn1.c index 1f022f1a8..34b2e70c2 100644 --- a/Sources/BoringSSL/crypto/evp/p_dsa_asn1.c +++ b/Sources/BoringSSL/crypto/evp/p_dsa_asn1.c @@ -65,9 +65,9 @@ static int dsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 3279, section 2.3.2. */ + // See RFC 3279, section 2.3.2. - /* Parameters may or may not be present. */ + // Parameters may or may not be present. DSA *dsa; if (CBS_len(params) == 0) { dsa = DSA_new(); @@ -105,7 +105,7 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) { const DSA *dsa = key->pkey.dsa; const int has_params = dsa->p != NULL && dsa->q != NULL && dsa->g != NULL; - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -125,9 +125,9 @@ static int dsa_pub_encode(CBB *out, const EVP_PKEY *key) { } static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See PKCS#11, v2.40, section 2.5. */ + // See PKCS#11, v2.40, section 2.5. - /* Decode parameters. */ + // Decode parameters. BN_CTX *ctx = NULL; DSA *dsa = DSA_parse_parameters(params); if (dsa == NULL || CBS_len(params) != 0) { @@ -141,17 +141,18 @@ static int dsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { goto err; } - /* Decode the key. */ + // Decode the key. if (!BN_parse_asn1_unsigned(key, dsa->priv_key) || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); goto err; } - /* Calculate the public key. */ + // Calculate the public key. ctx = BN_CTX_new(); if (ctx == NULL || - !BN_mod_exp(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, ctx)) { + !BN_mod_exp_mont_consttime(dsa->pub_key, dsa->g, dsa->priv_key, dsa->p, + ctx, NULL)) { goto err; } @@ -172,7 +173,7 @@ static int dsa_priv_encode(CBB *out, const EVP_PKEY *key) { return 0; } - /* See PKCS#11, v2.40, section 2.5. */ + // See PKCS#11, v2.40, section 2.5. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || @@ -244,7 +245,7 @@ static void int_dsa_free(EVP_PKEY *pkey) { DSA_free(pkey->pkey.dsa); } const EVP_PKEY_ASN1_METHOD dsa_asn1_meth = { EVP_PKEY_DSA, - /* 1.2.840.10040.4.1 */ + // 1.2.840.10040.4.1 {0x2a, 0x86, 0x48, 0xce, 0x38, 0x04, 0x01}, 7, dsa_pub_decode, @@ -255,7 +256,6 @@ const EVP_PKEY_ASN1_METHOD dsa_asn1_meth = { dsa_priv_encode, NULL /* pkey_opaque */, - NULL /* pkey_supports_digest */, int_dsa_size, dsa_bits, diff --git a/Sources/BoringSSL/crypto/evp/p_ec.c b/Sources/BoringSSL/crypto/evp/p_ec.c index dc1ea6f6d..d311d2209 100644 --- a/Sources/BoringSSL/crypto/evp/p_ec.c +++ b/Sources/BoringSSL/crypto/evp/p_ec.c @@ -69,12 +69,12 @@ #include #include "internal.h" -#include "../ec/internal.h" +#include "../fipsmodule/ec/internal.h" #include "../internal.h" typedef struct { - /* message digest */ + // message digest const EVP_MD *md; } EC_PKEY_CTX; @@ -161,8 +161,8 @@ static int pkey_ec_derive(EVP_PKEY_CTX *ctx, uint8_t *key, } pubkey = EC_KEY_get0_public_key(ctx->peerkey->pkey.ec); - /* NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is - * not an error, the result is truncated. */ + // NB: unlike PKCS#3 DH, if *outlen is less than maximum size this is + // not an error, the result is truncated. outlen = *keylen; @@ -196,7 +196,7 @@ static int pkey_ec_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { return 1; case EVP_PKEY_CTRL_PEER_KEY: - /* Default behaviour is OK */ + // Default behaviour is OK return 1; default: @@ -228,10 +228,12 @@ const EVP_PKEY_METHOD ec_pkey_meth = { pkey_ec_cleanup, pkey_ec_keygen, pkey_ec_sign, + NULL /* sign_message */, pkey_ec_verify, - 0 /* verify_recover */, - 0 /* encrypt */, - 0 /* decrypt */, + NULL /* verify_message */, + NULL /* verify_recover */, + NULL /* encrypt */, + NULL /* decrypt */, pkey_ec_derive, pkey_ec_ctrl, }; diff --git a/Sources/BoringSSL/crypto/evp/p_ec_asn1.c b/Sources/BoringSSL/crypto/evp/p_ec_asn1.c index 8d44dcdce..c5828d932 100644 --- a/Sources/BoringSSL/crypto/evp/p_ec_asn1.c +++ b/Sources/BoringSSL/crypto/evp/p_ec_asn1.c @@ -70,7 +70,7 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) { const EC_GROUP *group = EC_KEY_get0_group(ec_key); const EC_POINT *public_key = EC_KEY_get0_public_key(ec_key); - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. CBB spki, algorithm, oid, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -90,9 +90,9 @@ static int eckey_pub_encode(CBB *out, const EVP_PKEY *key) { } static int eckey_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 5480, section 2. */ + // See RFC 5480, section 2. - /* The parameters are a named curve. */ + // The parameters are a named curve. EC_POINT *point = NULL; EC_KEY *eckey = NULL; EC_GROUP *group = EC_KEY_parse_curve_name(params); @@ -141,7 +141,7 @@ static int eckey_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { } static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 5915. */ + // See RFC 5915. EC_GROUP *group = EC_KEY_parse_parameters(params); if (group == NULL || CBS_len(params) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); @@ -164,13 +164,13 @@ static int eckey_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { static int eckey_priv_encode(CBB *out, const EVP_PKEY *key) { const EC_KEY *ec_key = key->pkey.ec; - /* Omit the redundant copy of the curve name. This contradicts RFC 5915 but - * aligns with PKCS #11. SEC 1 only says they may be omitted if known by other - * means. Both OpenSSL and NSS omit the redundant parameters, so we omit them - * as well. */ + // Omit the redundant copy of the curve name. This contradicts RFC 5915 but + // aligns with PKCS #11. SEC 1 only says they may be omitted if known by other + // means. Both OpenSSL and NSS omit the redundant parameters, so we omit them + // as well. unsigned enc_flags = EC_KEY_get_enc_flags(ec_key) | EC_PKEY_NO_PARAMETERS; - /* See RFC 5915. */ + // See RFC 5915. CBB pkcs8, algorithm, oid, private_key; if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || @@ -219,7 +219,7 @@ static int ec_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b) { const EC_GROUP *group_a = EC_KEY_get0_group(a->pkey.ec), *group_b = EC_KEY_get0_group(b->pkey.ec); if (EC_GROUP_cmp(group_a, group_b, NULL) != 0) { - /* mismatch */ + // mismatch return 0; } return 1; @@ -233,7 +233,7 @@ static int eckey_opaque(const EVP_PKEY *pkey) { const EVP_PKEY_ASN1_METHOD ec_asn1_meth = { EVP_PKEY_EC, - /* 1.2.840.10045.2.1 */ + // 1.2.840.10045.2.1 {0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01}, 7, eckey_pub_decode, @@ -244,7 +244,6 @@ const EVP_PKEY_ASN1_METHOD ec_asn1_meth = { eckey_priv_encode, eckey_opaque, - 0 /* pkey_supports_digest */, int_ec_size, ec_bits, diff --git a/Sources/BoringSSL/crypto/evp/p_ed25519.c b/Sources/BoringSSL/crypto/evp/p_ed25519.c new file mode 100644 index 000000000..554a379c7 --- /dev/null +++ b/Sources/BoringSSL/crypto/evp/p_ed25519.c @@ -0,0 +1,71 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include "internal.h" + + +// Ed25519 has no parameters to copy. +static int pkey_ed25519_copy(EVP_PKEY_CTX *dst, EVP_PKEY_CTX *src) { return 1; } + +static int pkey_ed25519_sign_message(EVP_PKEY_CTX *ctx, uint8_t *sig, + size_t *siglen, const uint8_t *tbs, + size_t tbslen) { + ED25519_KEY *key = ctx->pkey->pkey.ptr; + if (!key->has_private) { + OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); + return 0; + } + + *siglen = 64; + if (sig == NULL) { + return 1; + } + + return ED25519_sign(sig, tbs, tbslen, key->key.priv); +} + +static int pkey_ed25519_verify_message(EVP_PKEY_CTX *ctx, const uint8_t *sig, + size_t siglen, const uint8_t *tbs, + size_t tbslen) { + ED25519_KEY *key = ctx->pkey->pkey.ptr; + if (siglen != 64 || + !ED25519_verify(tbs, tbslen, sig, key->key.pub.value)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_SIGNATURE); + return 0; + } + + return 1; +} + +const EVP_PKEY_METHOD ed25519_pkey_meth = { + EVP_PKEY_ED25519, + NULL /* init */, + pkey_ed25519_copy, + NULL /* cleanup */, + NULL /* keygen */, + NULL /* sign */, + pkey_ed25519_sign_message, + NULL /* verify */, + pkey_ed25519_verify_message, + NULL /* verify_recover */, + NULL /* encrypt */, + NULL /* decrypt */, + NULL /* derive */, + NULL /* ctrl */, +}; diff --git a/Sources/BoringSSL/crypto/evp/p_ed25519_asn1.c b/Sources/BoringSSL/crypto/evp/p_ed25519_asn1.c new file mode 100644 index 000000000..65b4112a7 --- /dev/null +++ b/Sources/BoringSSL/crypto/evp/p_ed25519_asn1.c @@ -0,0 +1,190 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include +#include +#include + +#include "internal.h" +#include "../internal.h" + + +static void ed25519_free(EVP_PKEY *pkey) { + OPENSSL_free(pkey->pkey.ptr); + pkey->pkey.ptr = NULL; +} + +static int set_pubkey(EVP_PKEY *pkey, const uint8_t pubkey[32]) { + ED25519_KEY *key = OPENSSL_malloc(sizeof(ED25519_KEY)); + if (key == NULL) { + OPENSSL_PUT_ERROR(EVP, ERR_R_MALLOC_FAILURE); + return 0; + } + key->has_private = 0; + OPENSSL_memcpy(key->key.pub.value, pubkey, 32); + + ed25519_free(pkey); + pkey->pkey.ptr = key; + return 1; +} + +static int set_privkey(EVP_PKEY *pkey, const uint8_t privkey[64]) { + ED25519_KEY *key = OPENSSL_malloc(sizeof(ED25519_KEY)); + if (key == NULL) { + OPENSSL_PUT_ERROR(EVP, ERR_R_MALLOC_FAILURE); + return 0; + } + key->has_private = 1; + OPENSSL_memcpy(key->key.priv, privkey, 64); + + ed25519_free(pkey); + pkey->pkey.ptr = key; + return 1; +} + +static int ed25519_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { + // See draft-ietf-curdle-pkix-04, section 4. + + // The parameters must be omitted. Public keys have length 32. + if (CBS_len(params) != 0 || + CBS_len(key) != 32) { + OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); + return 0; + } + + return set_pubkey(out, CBS_data(key)); +} + +static int ed25519_pub_encode(CBB *out, const EVP_PKEY *pkey) { + const ED25519_KEY *key = pkey->pkey.ptr; + + // See draft-ietf-curdle-pkix-04, section 4. + CBB spki, algorithm, oid, key_bitstring; + if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) || + !CBB_add_asn1(&spki, &key_bitstring, CBS_ASN1_BITSTRING) || + !CBB_add_u8(&key_bitstring, 0 /* padding */) || + !CBB_add_bytes(&key_bitstring, key->key.pub.value, 32) || + !CBB_flush(out)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); + return 0; + } + + return 1; +} + +static int ed25519_pub_cmp(const EVP_PKEY *a, const EVP_PKEY *b) { + const ED25519_KEY *a_key = a->pkey.ptr; + const ED25519_KEY *b_key = b->pkey.ptr; + return OPENSSL_memcmp(a_key->key.pub.value, b_key->key.pub.value, 32) == 0; +} + +static int ed25519_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { + // See draft-ietf-curdle-pkix-04, section 7. + + // Parameters must be empty. The key is a 32-byte value wrapped in an extra + // OCTET STRING layer. + CBS inner; + if (CBS_len(params) != 0 || + !CBS_get_asn1(key, &inner, CBS_ASN1_OCTETSTRING) || + CBS_len(key) != 0 || + CBS_len(&inner) != 32) { + OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); + return 0; + } + + // The PKCS#8 encoding stores only the 32-byte seed, so we must recover the + // full representation which we use from it. + uint8_t pubkey[32], privkey[64]; + ED25519_keypair_from_seed(pubkey, privkey, CBS_data(&inner)); + return set_privkey(out, privkey); +} + +static int ed25519_priv_encode(CBB *out, const EVP_PKEY *pkey) { + ED25519_KEY *key = pkey->pkey.ptr; + if (!key->has_private) { + OPENSSL_PUT_ERROR(EVP, EVP_R_NOT_A_PRIVATE_KEY); + return 0; + } + + // See draft-ietf-curdle-pkix-04, section 7. + CBB pkcs8, algorithm, oid, private_key, inner; + if (!CBB_add_asn1(out, &pkcs8, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1_uint64(&pkcs8, 0 /* version */) || + !CBB_add_asn1(&pkcs8, &algorithm, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, ed25519_asn1_meth.oid, ed25519_asn1_meth.oid_len) || + !CBB_add_asn1(&pkcs8, &private_key, CBS_ASN1_OCTETSTRING) || + !CBB_add_asn1(&private_key, &inner, CBS_ASN1_OCTETSTRING) || + // The PKCS#8 encoding stores only the 32-byte seed which is the first 32 + // bytes of the private key. + !CBB_add_bytes(&inner, key->key.priv, 32) || + !CBB_flush(out)) { + OPENSSL_PUT_ERROR(EVP, EVP_R_ENCODE_ERROR); + return 0; + } + + return 1; +} + +static int ed25519_size(const EVP_PKEY *pkey) { return 64; } + +static int ed25519_bits(const EVP_PKEY *pkey) { return 256; } + +const EVP_PKEY_ASN1_METHOD ed25519_asn1_meth = { + EVP_PKEY_ED25519, + {0x2b, 0x65, 0x70}, + 3, + ed25519_pub_decode, + ed25519_pub_encode, + ed25519_pub_cmp, + ed25519_priv_decode, + ed25519_priv_encode, + NULL /* pkey_opaque */, + ed25519_size, + ed25519_bits, + NULL /* param_missing */, + NULL /* param_copy */, + NULL /* param_cmp */, + ed25519_free, +}; + +EVP_PKEY *EVP_PKEY_new_ed25519_public(const uint8_t public_key[32]) { + EVP_PKEY *ret = EVP_PKEY_new(); + if (ret == NULL || + !EVP_PKEY_set_type(ret, EVP_PKEY_ED25519) || + !set_pubkey(ret, public_key)) { + EVP_PKEY_free(ret); + return NULL; + } + + return ret; +} + +EVP_PKEY *EVP_PKEY_new_ed25519_private(const uint8_t private_key[64]) { + EVP_PKEY *ret = EVP_PKEY_new(); + if (ret == NULL || + !EVP_PKEY_set_type(ret, EVP_PKEY_ED25519) || + !set_privkey(ret, private_key)) { + EVP_PKEY_free(ret); + return NULL; + } + + return ret; +} diff --git a/Sources/BoringSSL/crypto/evp/p_rsa.c b/Sources/BoringSSL/crypto/evp/p_rsa.c index ea2ba9987..cfc6bea1e 100644 --- a/Sources/BoringSSL/crypto/evp/p_rsa.c +++ b/Sources/BoringSSL/crypto/evp/p_rsa.c @@ -68,30 +68,35 @@ #include #include "../internal.h" -#include "../rsa/internal.h" +#include "../fipsmodule/rsa/internal.h" #include "internal.h" typedef struct { - /* Key gen parameters */ + // Key gen parameters int nbits; BIGNUM *pub_exp; - /* RSA padding mode */ + // RSA padding mode int pad_mode; - /* message digest */ + // message digest const EVP_MD *md; - /* message digest for MGF1 */ + // message digest for MGF1 const EVP_MD *mgf1md; - /* PSS salt length */ + // PSS salt length int saltlen; - /* tbuf is a buffer which is either NULL, or is the size of the RSA modulus. - * It's used to store the output of RSA operations. */ + // tbuf is a buffer which is either NULL, or is the size of the RSA modulus. + // It's used to store the output of RSA operations. uint8_t *tbuf; - /* OAEP label */ + // OAEP label uint8_t *oaep_label; size_t oaep_labellen; } RSA_PKEY_CTX; +typedef struct { + uint8_t *data; + size_t len; +} RSA_OAEP_LABEL_PARAMS; + static int pkey_rsa_init(EVP_PKEY_CTX *ctx) { RSA_PKEY_CTX *rctx; rctx = OPENSSL_malloc(sizeof(RSA_PKEY_CTX)); @@ -180,18 +185,7 @@ static int pkey_rsa_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, } if (rctx->md) { - unsigned int out_len; - - if (tbslen != EVP_MD_size(rctx->md)) { - OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_DIGEST_LENGTH); - return 0; - } - - if (EVP_MD_type(rctx->md) == NID_mdc2) { - OPENSSL_PUT_ERROR(EVP, EVP_R_NO_MDC2_SUPPORT); - return 0; - } - + unsigned out_len; switch (rctx->pad_mode) { case RSA_PKCS1_PADDING: if (!RSA_sign(EVP_MD_type(rctx->md), tbs, tbslen, sig, &out_len, rsa)) { @@ -201,14 +195,8 @@ static int pkey_rsa_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, size_t *siglen, return 1; case RSA_PKCS1_PSS_PADDING: - if (!setup_tbuf(rctx, ctx) || - !RSA_padding_add_PKCS1_PSS_mgf1(rsa, rctx->tbuf, tbs, rctx->md, - rctx->mgf1md, rctx->saltlen) || - !RSA_sign_raw(rsa, siglen, sig, *siglen, rctx->tbuf, key_len, - RSA_NO_PADDING)) { - return 0; - } - return 1; + return RSA_sign_pss_mgf1(rsa, siglen, sig, *siglen, tbs, tbslen, + rctx->md, rctx->mgf1md, rctx->saltlen); default: return 0; @@ -223,8 +211,6 @@ static int pkey_rsa_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, size_t tbslen) { RSA_PKEY_CTX *rctx = ctx->data; RSA *rsa = ctx->pkey->pkey.rsa; - size_t rslen; - const size_t key_len = EVP_PKEY_size(ctx->pkey); if (rctx->md) { switch (rctx->pad_mode) { @@ -232,25 +218,16 @@ static int pkey_rsa_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, return RSA_verify(EVP_MD_type(rctx->md), tbs, tbslen, sig, siglen, rsa); case RSA_PKCS1_PSS_PADDING: - if (tbslen != EVP_MD_size(rctx->md)) { - OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_DIGEST_LENGTH); - return 0; - } - - if (!setup_tbuf(rctx, ctx) || - !RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, siglen, - RSA_NO_PADDING) || - !RSA_verify_PKCS1_PSS_mgf1(rsa, tbs, rctx->md, rctx->mgf1md, - rctx->tbuf, rctx->saltlen)) { - return 0; - } - return 1; + return RSA_verify_pss_mgf1(rsa, tbs, tbslen, rctx->md, rctx->mgf1md, + rctx->saltlen, sig, siglen); default: return 0; } } + size_t rslen; + const size_t key_len = EVP_PKEY_size(ctx->pkey); if (!setup_tbuf(rctx, ctx) || !RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, siglen, rctx->pad_mode) || @@ -279,31 +256,25 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, return 0; } - if (!setup_tbuf(rctx, ctx)) { - return 0; - } - if (rctx->md == NULL) { - const int ret = RSA_public_decrypt(sig_len, sig, rctx->tbuf, - ctx->pkey->pkey.rsa, rctx->pad_mode); - if (ret < 0) { - return 0; - } - *out_len = ret; - OPENSSL_memcpy(out, rctx->tbuf, *out_len); - return 1; + return RSA_verify_raw(rsa, out_len, out, *out_len, sig, sig_len, + rctx->pad_mode); } if (rctx->pad_mode != RSA_PKCS1_PADDING) { return 0; } + // Assemble the encoded hash, using a placeholder hash value. + static const uint8_t kDummyHash[EVP_MAX_MD_SIZE] = {0}; + const size_t hash_len = EVP_MD_size(rctx->md); uint8_t *asn1_prefix; size_t asn1_prefix_len; int asn1_prefix_allocated; - if (!RSA_add_pkcs1_prefix(&asn1_prefix, &asn1_prefix_len, - &asn1_prefix_allocated, EVP_MD_type(rctx->md), NULL, - 0)) { + if (!setup_tbuf(rctx, ctx) || + !RSA_add_pkcs1_prefix(&asn1_prefix, &asn1_prefix_len, + &asn1_prefix_allocated, EVP_MD_type(rctx->md), + kDummyHash, hash_len)) { return 0; } @@ -311,8 +282,9 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, int ok = 1; if (!RSA_verify_raw(rsa, &rslen, rctx->tbuf, key_len, sig, sig_len, RSA_PKCS1_PADDING) || - rslen < asn1_prefix_len || - CRYPTO_memcmp(rctx->tbuf, asn1_prefix, asn1_prefix_len) != 0) { + rslen != asn1_prefix_len || + // Compare all but the hash suffix. + CRYPTO_memcmp(rctx->tbuf, asn1_prefix, asn1_prefix_len - hash_len) != 0) { ok = 0; } @@ -324,15 +296,10 @@ static int pkey_rsa_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, return 0; } - const size_t result_len = rslen - asn1_prefix_len; - if (result_len != EVP_MD_size(rctx->md)) { - return 0; - } - if (out != NULL) { - OPENSSL_memcpy(out, rctx->tbuf + asn1_prefix_len, result_len); + OPENSSL_memcpy(out, rctx->tbuf + rslen - hash_len, hash_len); } - *out_len = result_len; + *out_len = hash_len; return 1; } @@ -386,22 +353,15 @@ static int pkey_rsa_decrypt(EVP_PKEY_CTX *ctx, uint8_t *out, } if (rctx->pad_mode == RSA_PKCS1_OAEP_PADDING) { - size_t plaintext_len; - int message_len; - + size_t padded_len; if (!setup_tbuf(rctx, ctx) || - !RSA_decrypt(rsa, &plaintext_len, rctx->tbuf, key_len, in, inlen, - RSA_NO_PADDING)) { + !RSA_decrypt(rsa, &padded_len, rctx->tbuf, key_len, in, inlen, + RSA_NO_PADDING) || + !RSA_padding_check_PKCS1_OAEP_mgf1( + out, outlen, key_len, rctx->tbuf, padded_len, rctx->oaep_label, + rctx->oaep_labellen, rctx->md, rctx->mgf1md)) { return 0; } - - message_len = RSA_padding_check_PKCS1_OAEP_mgf1( - out, key_len, rctx->tbuf, plaintext_len, rctx->oaep_label, - rctx->oaep_labellen, rctx->md, rctx->mgf1md); - if (message_len < 0) { - return 0; - } - *outlen = message_len; return 1; } @@ -530,20 +490,17 @@ static int pkey_rsa_ctrl(EVP_PKEY_CTX *ctx, int type, int p1, void *p2) { } return 1; - case EVP_PKEY_CTRL_RSA_OAEP_LABEL: + case EVP_PKEY_CTRL_RSA_OAEP_LABEL: { if (rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PADDING_MODE); return 0; } OPENSSL_free(rctx->oaep_label); - if (p2 && p1 > 0) { - rctx->oaep_label = p2; - rctx->oaep_labellen = p1; - } else { - rctx->oaep_label = NULL; - rctx->oaep_labellen = 0; - } + RSA_OAEP_LABEL_PARAMS *params = p2; + rctx->oaep_label = params->data; + rctx->oaep_labellen = params->len; return 1; + } case EVP_PKEY_CTRL_GET_RSA_OAEP_LABEL: if (rctx->pad_mode != RSA_PKCS1_OAEP_PADDING) { @@ -590,7 +547,9 @@ const EVP_PKEY_METHOD rsa_pkey_meth = { pkey_rsa_cleanup, pkey_rsa_keygen, pkey_rsa_sign, + NULL /* sign_message */, pkey_rsa_verify, + NULL /* verify_message */, pkey_rsa_verify_recover, pkey_rsa_encrypt, pkey_rsa_decrypt, @@ -654,13 +613,9 @@ int EVP_PKEY_CTX_get_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md) { int EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, uint8_t *label, size_t label_len) { - if (label_len > INT_MAX) { - return 0; - } - + RSA_OAEP_LABEL_PARAMS params = {label, label_len}; return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, - EVP_PKEY_CTRL_RSA_OAEP_LABEL, (int)label_len, - (void *)label); + EVP_PKEY_CTRL_RSA_OAEP_LABEL, 0, ¶ms); } int EVP_PKEY_CTX_get0_rsa_oaep_label(EVP_PKEY_CTX *ctx, diff --git a/Sources/BoringSSL/crypto/evp/p_rsa_asn1.c b/Sources/BoringSSL/crypto/evp/p_rsa_asn1.c index 2c4b266d9..85f6fc837 100644 --- a/Sources/BoringSSL/crypto/evp/p_rsa_asn1.c +++ b/Sources/BoringSSL/crypto/evp/p_rsa_asn1.c @@ -62,12 +62,12 @@ #include #include -#include "../rsa/internal.h" +#include "../fipsmodule/rsa/internal.h" #include "internal.h" static int rsa_pub_encode(CBB *out, const EVP_PKEY *key) { - /* See RFC 3279, section 2.3.1. */ + // See RFC 3279, section 2.3.1. CBB spki, algorithm, oid, null, key_bitstring; if (!CBB_add_asn1(out, &spki, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&spki, &algorithm, CBS_ASN1_SEQUENCE) || @@ -86,9 +86,9 @@ static int rsa_pub_encode(CBB *out, const EVP_PKEY *key) { } static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* See RFC 3279, section 2.3.1. */ + // See RFC 3279, section 2.3.1. - /* The parameters must be NULL. */ + // The parameters must be NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || @@ -97,13 +97,7 @@ static int rsa_pub_decode(EVP_PKEY *out, CBS *params, CBS *key) { return 0; } - /* Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Switch this to the strict version in March 2016 or when - * Chromium can force client certificates down a different codepath, whichever - * comes first. */ - RSA *rsa = RSA_parse_public_key_buggy(key); + RSA *rsa = RSA_parse_public_key(key); if (rsa == NULL || CBS_len(key) != 0) { OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR); RSA_free(rsa); @@ -138,7 +132,7 @@ static int rsa_priv_encode(CBB *out, const EVP_PKEY *key) { } static int rsa_priv_decode(EVP_PKEY *out, CBS *params, CBS *key) { - /* Per RFC 3447, A.1, the parameters have type NULL. */ + // Per RFC 3447, A.1, the parameters have type NULL. CBS null; if (!CBS_get_asn1(params, &null, CBS_ASN1_NULL) || CBS_len(&null) != 0 || @@ -162,23 +156,19 @@ static int rsa_opaque(const EVP_PKEY *pkey) { return RSA_is_opaque(pkey->pkey.rsa); } -static int rsa_supports_digest(const EVP_PKEY *pkey, const EVP_MD *md) { - return RSA_supports_digest(pkey->pkey.rsa, md); -} - static int int_rsa_size(const EVP_PKEY *pkey) { return RSA_size(pkey->pkey.rsa); } static int rsa_bits(const EVP_PKEY *pkey) { - return BN_num_bits(pkey->pkey.rsa->n); + return RSA_bits(pkey->pkey.rsa); } static void int_rsa_free(EVP_PKEY *pkey) { RSA_free(pkey->pkey.rsa); } const EVP_PKEY_ASN1_METHOD rsa_asn1_meth = { EVP_PKEY_RSA, - /* 1.2.840.113549.1.1.1 */ + // 1.2.840.113549.1.1.1 {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01}, 9, rsa_pub_decode, @@ -189,7 +179,6 @@ const EVP_PKEY_ASN1_METHOD rsa_asn1_meth = { rsa_priv_encode, rsa_opaque, - rsa_supports_digest, int_rsa_size, rsa_bits, diff --git a/Sources/BoringSSL/crypto/evp/pbkdf.c b/Sources/BoringSSL/crypto/evp/pbkdf.c index 1792cdc81..f23a74bd8 100644 --- a/Sources/BoringSSL/crypto/evp/pbkdf.c +++ b/Sources/BoringSSL/crypto/evp/pbkdf.c @@ -65,83 +65,76 @@ int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, unsigned iterations, const EVP_MD *digest, size_t key_len, uint8_t *out_key) { - uint8_t digest_tmp[EVP_MAX_MD_SIZE], *p, itmp[4]; - size_t cplen, mdlen, tkeylen, k; - unsigned j; + // See RFC 8018, section 5.2. + int ret = 0; + size_t md_len = EVP_MD_size(digest); uint32_t i = 1; - HMAC_CTX hctx_tpl, hctx; - - mdlen = EVP_MD_size(digest); - HMAC_CTX_init(&hctx_tpl); - p = out_key; - tkeylen = key_len; - if (!HMAC_Init_ex(&hctx_tpl, password, password_len, digest, NULL)) { - HMAC_CTX_cleanup(&hctx_tpl); - return 0; + HMAC_CTX hctx; + HMAC_CTX_init(&hctx); + + if (!HMAC_Init_ex(&hctx, password, password_len, digest, NULL)) { + goto err; } - while (tkeylen) { - if (tkeylen > mdlen) { - cplen = mdlen; - } else { - cplen = tkeylen; - } - /* We are unlikely to ever use more than 256 blocks (5120 bits!) - * but just in case... */ - itmp[0] = (uint8_t)((i >> 24) & 0xff); - itmp[1] = (uint8_t)((i >> 16) & 0xff); - itmp[2] = (uint8_t)((i >> 8) & 0xff); - itmp[3] = (uint8_t)(i & 0xff); - if (!HMAC_CTX_copy(&hctx, &hctx_tpl)) { - HMAC_CTX_cleanup(&hctx_tpl); - return 0; + + while (key_len > 0) { + size_t todo = md_len; + if (todo > key_len) { + todo = key_len; } - if (!HMAC_Update(&hctx, salt, salt_len) || - !HMAC_Update(&hctx, itmp, 4) || + + uint8_t i_buf[4]; + i_buf[0] = (uint8_t)((i >> 24) & 0xff); + i_buf[1] = (uint8_t)((i >> 16) & 0xff); + i_buf[2] = (uint8_t)((i >> 8) & 0xff); + i_buf[3] = (uint8_t)(i & 0xff); + + // Compute U_1. + uint8_t digest_tmp[EVP_MAX_MD_SIZE]; + if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) || + !HMAC_Update(&hctx, salt, salt_len) || + !HMAC_Update(&hctx, i_buf, 4) || !HMAC_Final(&hctx, digest_tmp, NULL)) { - HMAC_CTX_cleanup(&hctx_tpl); - HMAC_CTX_cleanup(&hctx); - return 0; + goto err; } - HMAC_CTX_cleanup(&hctx); - OPENSSL_memcpy(p, digest_tmp, cplen); - for (j = 1; j < iterations; j++) { - if (!HMAC_CTX_copy(&hctx, &hctx_tpl)) { - HMAC_CTX_cleanup(&hctx_tpl); - return 0; - } - if (!HMAC_Update(&hctx, digest_tmp, mdlen) || + + OPENSSL_memcpy(out_key, digest_tmp, todo); + for (unsigned j = 1; j < iterations; j++) { + // Compute the remaining U_* values and XOR. + if (!HMAC_Init_ex(&hctx, NULL, 0, NULL, NULL) || + !HMAC_Update(&hctx, digest_tmp, md_len) || !HMAC_Final(&hctx, digest_tmp, NULL)) { - HMAC_CTX_cleanup(&hctx_tpl); - HMAC_CTX_cleanup(&hctx); - return 0; + goto err; } - HMAC_CTX_cleanup(&hctx); - for (k = 0; k < cplen; k++) { - p[k] ^= digest_tmp[k]; + for (size_t k = 0; k < todo; k++) { + out_key[k] ^= digest_tmp[k]; } } - tkeylen -= cplen; + + key_len -= todo; + out_key += todo; i++; - p += cplen; } - HMAC_CTX_cleanup(&hctx_tpl); - // RFC 2898 describes iterations (c) as being a "positive integer", so a + // RFC 8018 describes iterations (c) as being a "positive integer", so a // value of 0 is an error. // - // Unfortunatley not all consumers of PKCS5_PBKDF2_HMAC() check their return - // value, expecting it to succeed and unconditonally using |out_key|. - // As a precaution for such callsites in external code, the old behavior - // of iterations < 1 being treated as iterations == 1 is preserved, but + // Unfortunately not all consumers of PKCS5_PBKDF2_HMAC() check their return + // value, expecting it to succeed and unconditionally using |out_key|. As a + // precaution for such callsites in external code, the old behavior of + // iterations < 1 being treated as iterations == 1 is preserved, but // additionally an error result is returned. // // TODO(eroman): Figure out how to remove this compatibility hack, or change // the default to something more sensible like 2048. if (iterations == 0) { - return 0; + goto err; } - return 1; + ret = 1; + +err: + HMAC_CTX_cleanup(&hctx); + return ret; } int PKCS5_PBKDF2_HMAC_SHA1(const char *password, size_t password_len, diff --git a/Sources/BoringSSL/crypto/evp/print.c b/Sources/BoringSSL/crypto/evp/print.c index b2e350982..3621d5f2c 100644 --- a/Sources/BoringSSL/crypto/evp/print.c +++ b/Sources/BoringSSL/crypto/evp/print.c @@ -61,7 +61,7 @@ #include #include "../internal.h" -#include "../rsa/internal.h" +#include "../fipsmodule/rsa/internal.h" static int bn_print(BIO *bp, const char *number, const BIGNUM *num, @@ -131,7 +131,7 @@ static void update_buflen(const BIGNUM *b, size_t *pbuflen) { } } -/* RSA keys. */ +// RSA keys. static int do_rsa_print(BIO *out, const RSA *rsa, int off, int include_private) { @@ -150,17 +150,6 @@ static int do_rsa_print(BIO *out, const RSA *rsa, int off, update_buflen(rsa->dmp1, &buf_len); update_buflen(rsa->dmq1, &buf_len); update_buflen(rsa->iqmp, &buf_len); - - if (rsa->additional_primes != NULL) { - for (size_t i = 0; - i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) { - const RSA_additional_prime *ap = - sk_RSA_additional_prime_value(rsa->additional_primes, i); - update_buflen(ap->prime, &buf_len); - update_buflen(ap->exp, &buf_len); - update_buflen(ap->coeff, &buf_len); - } - } } m = (uint8_t *)OPENSSL_malloc(buf_len + 10); @@ -204,26 +193,6 @@ static int do_rsa_print(BIO *out, const RSA *rsa, int off, !bn_print(out, "coefficient:", rsa->iqmp, m, off)) { goto err; } - - if (rsa->additional_primes != NULL && - sk_RSA_additional_prime_num(rsa->additional_primes) > 0) { - if (BIO_printf(out, "otherPrimeInfos:\n") <= 0) { - goto err; - } - for (size_t i = 0; - i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) { - const RSA_additional_prime *ap = - sk_RSA_additional_prime_value(rsa->additional_primes, i); - - if (BIO_printf(out, "otherPrimeInfo (prime %u):\n", - (unsigned)(i + 3)) <= 0 || - !bn_print(out, "prime:", ap->prime, m, off) || - !bn_print(out, "exponent:", ap->exp, m, off) || - !bn_print(out, "coeff:", ap->coeff, m, off)) { - goto err; - } - } - } } ret = 1; @@ -243,7 +212,7 @@ static int rsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent, } -/* DSA keys. */ +// DSA keys. static int do_dsa_print(BIO *bp, const DSA *x, int off, int ptype) { uint8_t *m = NULL; @@ -319,7 +288,7 @@ static int dsa_priv_print(BIO *bp, const EVP_PKEY *pkey, int indent, } -/* EC keys. */ +// EC keys. static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) { uint8_t *buffer = NULL; @@ -410,7 +379,7 @@ static int do_EC_KEY_print(BIO *bp, const EC_KEY *x, int off, int ktype) { if (pub_key_bytes != NULL) { BIO_hexdump(bp, pub_key_bytes, pub_key_bytes_len, off); } - /* TODO(fork): implement */ + // TODO(fork): implement /* if (!ECPKParameters_print(bp, group, off)) goto err; */ diff --git a/Sources/BoringSSL/crypto/evp/scrypt.c b/Sources/BoringSSL/crypto/evp/scrypt.c new file mode 100644 index 000000000..ed186eed6 --- /dev/null +++ b/Sources/BoringSSL/crypto/evp/scrypt.c @@ -0,0 +1,209 @@ +/* + * Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include + +#include + +#include +#include +#include + +#include "../internal.h" + + +// This file implements scrypt, described in RFC 7914. +// +// Note scrypt refers to both "blocks" and a "block size" parameter, r. These +// are two different notions of blocks. A Salsa20 block is 64 bytes long, +// represented in this implementation by 16 |uint32_t|s. |r| determines the +// number of 64-byte Salsa20 blocks in a scryptBlockMix block, which is 2 * |r| +// Salsa20 blocks. This implementation refers to them as Salsa20 blocks and +// scrypt blocks, respectively. + +// A block_t is a Salsa20 block. +typedef struct { uint32_t words[16]; } block_t; + +OPENSSL_COMPILE_ASSERT(sizeof(block_t) == 64, block_t_has_padding); + +#define R(a, b) (((a) << (b)) | ((a) >> (32 - (b)))) + +// salsa208_word_specification implements the Salsa20/8 core function, also +// described in RFC 7914, section 3. It modifies the block at |inout| +// in-place. +static void salsa208_word_specification(block_t *inout) { + block_t x; + OPENSSL_memcpy(&x, inout, sizeof(x)); + + for (int i = 8; i > 0; i -= 2) { + x.words[4] ^= R(x.words[0] + x.words[12], 7); + x.words[8] ^= R(x.words[4] + x.words[0], 9); + x.words[12] ^= R(x.words[8] + x.words[4], 13); + x.words[0] ^= R(x.words[12] + x.words[8], 18); + x.words[9] ^= R(x.words[5] + x.words[1], 7); + x.words[13] ^= R(x.words[9] + x.words[5], 9); + x.words[1] ^= R(x.words[13] + x.words[9], 13); + x.words[5] ^= R(x.words[1] + x.words[13], 18); + x.words[14] ^= R(x.words[10] + x.words[6], 7); + x.words[2] ^= R(x.words[14] + x.words[10], 9); + x.words[6] ^= R(x.words[2] + x.words[14], 13); + x.words[10] ^= R(x.words[6] + x.words[2], 18); + x.words[3] ^= R(x.words[15] + x.words[11], 7); + x.words[7] ^= R(x.words[3] + x.words[15], 9); + x.words[11] ^= R(x.words[7] + x.words[3], 13); + x.words[15] ^= R(x.words[11] + x.words[7], 18); + x.words[1] ^= R(x.words[0] + x.words[3], 7); + x.words[2] ^= R(x.words[1] + x.words[0], 9); + x.words[3] ^= R(x.words[2] + x.words[1], 13); + x.words[0] ^= R(x.words[3] + x.words[2], 18); + x.words[6] ^= R(x.words[5] + x.words[4], 7); + x.words[7] ^= R(x.words[6] + x.words[5], 9); + x.words[4] ^= R(x.words[7] + x.words[6], 13); + x.words[5] ^= R(x.words[4] + x.words[7], 18); + x.words[11] ^= R(x.words[10] + x.words[9], 7); + x.words[8] ^= R(x.words[11] + x.words[10], 9); + x.words[9] ^= R(x.words[8] + x.words[11], 13); + x.words[10] ^= R(x.words[9] + x.words[8], 18); + x.words[12] ^= R(x.words[15] + x.words[14], 7); + x.words[13] ^= R(x.words[12] + x.words[15], 9); + x.words[14] ^= R(x.words[13] + x.words[12], 13); + x.words[15] ^= R(x.words[14] + x.words[13], 18); + } + + for (int i = 0; i < 16; ++i) { + inout->words[i] += x.words[i]; + } +} + +// xor_block sets |*out| to be |*a| XOR |*b|. +static void xor_block(block_t *out, const block_t *a, const block_t *b) { + for (size_t i = 0; i < 16; i++) { + out->words[i] = a->words[i] ^ b->words[i]; + } +} + +// scryptBlockMix implements the function described in RFC 7914, section 4. B' +// is written to |out|. |out| and |B| may not alias and must be each one scrypt +// block (2 * |r| Salsa20 blocks) long. +static void scryptBlockMix(block_t *out, const block_t *B, uint64_t r) { + assert(out != B); + + block_t X; + OPENSSL_memcpy(&X, &B[r * 2 - 1], sizeof(X)); + for (uint64_t i = 0; i < r * 2; i++) { + xor_block(&X, &X, &B[i]); + salsa208_word_specification(&X); + + // This implements the permutation in step 3. + OPENSSL_memcpy(&out[i / 2 + (i & 1) * r], &X, sizeof(X)); + } +} + +// scryptROMix implements the function described in RFC 7914, section 5. |B| is +// an scrypt block (2 * |r| Salsa20 blocks) and is modified in-place. |T| and +// |V| are scratch space allocated by the caller. |T| must have space for one +// scrypt block (2 * |r| Salsa20 blocks). |V| must have space for |N| scrypt +// blocks (2 * |r| * |N| Salsa20 blocks). +static void scryptROMix(block_t *B, uint64_t r, uint64_t N, block_t *T, + block_t *V) { + // Steps 1 and 2. + OPENSSL_memcpy(V, B, 2 * r * sizeof(block_t)); + for (uint64_t i = 1; i < N; i++) { + scryptBlockMix(&V[2 * r * i /* scrypt block i */], + &V[2 * r * (i - 1) /* scrypt block i-1 */], r); + } + scryptBlockMix(B, &V[2 * r * (N - 1) /* scrypt block N-1 */], r); + + // Step 3. + for (uint64_t i = 0; i < N; i++) { + // Note this assumes |N| <= 2^32 and is a power of 2. + uint32_t j = B[2 * r - 1].words[0] & (N - 1); + for (size_t k = 0; k < 2 * r; k++) { + xor_block(&T[k], &B[k], &V[2 * r * j + k]); + } + scryptBlockMix(B, T, r); + } +} + +// SCRYPT_PR_MAX is the maximum value of p * r. This is equivalent to the +// bounds on p in section 6: +// +// p <= ((2^32-1) * hLen) / MFLen iff +// p <= ((2^32-1) * 32) / (128 * r) iff +// p * r <= (2^30-1) +#define SCRYPT_PR_MAX ((1 << 30) - 1) + +// SCRYPT_MAX_MEM is the default maximum memory that may be allocated by +// |EVP_PBE_scrypt|. +#define SCRYPT_MAX_MEM (1024 * 1024 * 32) + +int EVP_PBE_scrypt(const char *password, size_t password_len, + const uint8_t *salt, size_t salt_len, uint64_t N, uint64_t r, + uint64_t p, size_t max_mem, uint8_t *out_key, + size_t key_len) { + if (r == 0 || p == 0 || p > SCRYPT_PR_MAX / r || + // |N| must be a power of two. + N < 2 || (N & (N - 1)) || + // We only support |N| <= 2^32 in |scryptROMix|. + N > UINT64_C(1) << 32 || + // Check that |N| < 2^(128×r / 8). + (16 * r <= 63 && N >= UINT64_C(1) << (16 * r))) { + OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PARAMETERS); + return 0; + } + + // Determine the amount of memory needed. B, T, and V are |p|, 1, and |N| + // scrypt blocks, respectively. Each scrypt block is 2*|r| |block_t|s. + if (max_mem == 0) { + max_mem = SCRYPT_MAX_MEM; + } + + size_t max_scrypt_blocks = max_mem / (2 * r * sizeof(block_t)); + if (max_scrypt_blocks < p + 1 || + max_scrypt_blocks - p - 1 < N) { + OPENSSL_PUT_ERROR(EVP, EVP_R_MEMORY_LIMIT_EXCEEDED); + return 0; + } + + // Allocate and divide up the scratch space. |max_mem| fits in a size_t, which + // is no bigger than uint64_t, so none of these operations may overflow. + OPENSSL_COMPILE_ASSERT(UINT64_MAX >= ((size_t)-1), size_t_exceeds_u64); + size_t B_blocks = p * 2 * r; + size_t B_bytes = B_blocks * sizeof(block_t); + size_t T_blocks = 2 * r; + size_t V_blocks = N * 2 * r; + block_t *B = OPENSSL_malloc((B_blocks + T_blocks + V_blocks) * sizeof(block_t)); + if (B == NULL) { + OPENSSL_PUT_ERROR(EVP, ERR_R_MALLOC_FAILURE); + return 0; + } + + int ret = 0; + block_t *T = B + B_blocks; + block_t *V = T + T_blocks; + if (!PKCS5_PBKDF2_HMAC(password, password_len, salt, salt_len, 1, + EVP_sha256(), B_bytes, (uint8_t *)B)) { + goto err; + } + + for (uint64_t i = 0; i < p; i++) { + scryptROMix(B + 2 * r * i, r, N, T, V); + } + + if (!PKCS5_PBKDF2_HMAC(password, password_len, (const uint8_t *)B, B_bytes, 1, + EVP_sha256(), key_len, out_key)) { + goto err; + } + + ret = 1; + +err: + OPENSSL_free(B); + return ret; +} diff --git a/Sources/BoringSSL/crypto/ex_data.c b/Sources/BoringSSL/crypto/ex_data.c index 528651331..71d60a528 100644 --- a/Sources/BoringSSL/crypto/ex_data.c +++ b/Sources/BoringSSL/crypto/ex_data.c @@ -113,7 +113,6 @@ #include #include -#include #include #include #include @@ -121,16 +120,16 @@ #include "internal.h" +DEFINE_STACK_OF(CRYPTO_EX_DATA_FUNCS) + struct crypto_ex_data_func_st { - long argl; /* Arbitary long */ - void *argp; /* Arbitary void pointer */ + long argl; // Arbitary long + void *argp; // Arbitary void pointer CRYPTO_EX_free *free_func; - CRYPTO_EX_dup *dup_func; }; int CRYPTO_get_ex_new_index(CRYPTO_EX_DATA_CLASS *ex_data_class, int *out_index, - long argl, void *argp, CRYPTO_EX_dup *dup_func, - CRYPTO_EX_free *free_func) { + long argl, void *argp, CRYPTO_EX_free *free_func) { CRYPTO_EX_DATA_FUNCS *funcs; int ret = 0; @@ -142,7 +141,6 @@ int CRYPTO_get_ex_new_index(CRYPTO_EX_DATA_CLASS *ex_data_class, int *out_index, funcs->argl = argl; funcs->argp = argp; - funcs->dup_func = dup_func; funcs->free_func = free_func; CRYPTO_STATIC_MUTEX_lock_write(&ex_data_class->lock); @@ -180,7 +178,7 @@ int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val) { n = sk_void_num(ad->sk); - /* Add NULL values until the stack is long enough. */ + // Add NULL values until the stack is long enough. for (i = n; i <= index; i++) { if (!sk_void_push(ad->sk, NULL)) { OPENSSL_PUT_ERROR(CRYPTO, ERR_R_MALLOC_FAILURE); @@ -199,19 +197,19 @@ void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int idx) { return sk_void_value(ad->sk, idx); } -/* get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any, - * for the given class. If there are some pointers, it sets |*out| to point to - * a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on - * success or zero on error. */ +// get_func_pointers takes a copy of the CRYPTO_EX_DATA_FUNCS pointers, if any, +// for the given class. If there are some pointers, it sets |*out| to point to +// a fresh stack of them. Otherwise it sets |*out| to NULL. It returns one on +// success or zero on error. static int get_func_pointers(STACK_OF(CRYPTO_EX_DATA_FUNCS) **out, CRYPTO_EX_DATA_CLASS *ex_data_class) { size_t n; *out = NULL; - /* CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a - * shallow copy of the list under lock and then use the structures without - * the lock held. */ + // CRYPTO_EX_DATA_FUNCS structures are static once set, so we can take a + // shallow copy of the list under lock and then use the structures without + // the lock held. CRYPTO_STATIC_MUTEX_lock_read(&ex_data_class->lock); n = sk_CRYPTO_EX_DATA_FUNCS_num(ex_data_class->meth); if (n > 0) { @@ -231,45 +229,16 @@ void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad) { ad->sk = NULL; } -int CRYPTO_dup_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, CRYPTO_EX_DATA *to, - const CRYPTO_EX_DATA *from) { - if (from->sk == NULL) { - /* In this case, |from| is blank, which is also the initial state of |to|, - * so there's nothing to do. */ - return 1; - } - - STACK_OF(CRYPTO_EX_DATA_FUNCS) *func_pointers; - if (!get_func_pointers(&func_pointers, ex_data_class)) { - return 0; - } - - for (size_t i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) { - CRYPTO_EX_DATA_FUNCS *func_pointer = - sk_CRYPTO_EX_DATA_FUNCS_value(func_pointers, i); - void *ptr = CRYPTO_get_ex_data(from, i + ex_data_class->num_reserved); - if (func_pointer->dup_func) { - func_pointer->dup_func(to, from, &ptr, i + ex_data_class->num_reserved, - func_pointer->argl, func_pointer->argp); - } - CRYPTO_set_ex_data(to, i + ex_data_class->num_reserved, ptr); - } - - sk_CRYPTO_EX_DATA_FUNCS_free(func_pointers); - - return 1; -} - void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad) { if (ad->sk == NULL) { - /* Nothing to do. */ + // Nothing to do. return; } STACK_OF(CRYPTO_EX_DATA_FUNCS) *func_pointers; if (!get_func_pointers(&func_pointers, ex_data_class)) { - /* TODO(davidben): This leaks memory on malloc error. */ + // TODO(davidben): This leaks memory on malloc error. return; } diff --git a/Sources/BoringSSL/crypto/aes/aes.c b/Sources/BoringSSL/crypto/fipsmodule/aes/aes.c similarity index 95% rename from Sources/BoringSSL/crypto/aes/aes.c rename to Sources/BoringSSL/crypto/fipsmodule/aes/aes.c index 1aed63e51..a988b3959 100644 --- a/Sources/BoringSSL/crypto/aes/aes.c +++ b/Sources/BoringSSL/crypto/fipsmodule/aes/aes.c @@ -49,26 +49,26 @@ #include #include -#include #include #include "internal.h" +#include "../modes/internal.h" #if defined(OPENSSL_NO_ASM) || \ (!defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) && !defined(OPENSSL_ARM)) -/* Te0[x] = S [x].[02, 01, 01, 03]; - * Te1[x] = S [x].[03, 02, 01, 01]; - * Te2[x] = S [x].[01, 03, 02, 01]; - * Te3[x] = S [x].[01, 01, 03, 02]; - * - * Td0[x] = Si[x].[0e, 09, 0d, 0b]; - * Td1[x] = Si[x].[0b, 0e, 09, 0d]; - * Td2[x] = Si[x].[0d, 0b, 0e, 09]; - * Td3[x] = Si[x].[09, 0d, 0b, 0e]; - * Td4[x] = Si[x].[01]; */ +// Te0[x] = S [x].[02, 01, 01, 03]; +// Te1[x] = S [x].[03, 02, 01, 01]; +// Te2[x] = S [x].[01, 03, 02, 01]; +// Te3[x] = S [x].[01, 01, 03, 02]; +// +// Td0[x] = Si[x].[0e, 09, 0d, 0b]; +// Td1[x] = Si[x].[0b, 0e, 09, 0d]; +// Td2[x] = Si[x].[0d, 0b, 0e, 09]; +// Td3[x] = Si[x].[09, 0d, 0b, 0e]; +// Td4[x] = Si[x].[01]; static const uint32_t Te0[256] = { 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU, @@ -531,7 +531,7 @@ static const uint8_t Td4[256] = { static const uint32_t rcon[] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, - /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */ + // for 128-bit blocks, Rijndael never uses more than 10 rcon values }; int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { @@ -634,7 +634,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { int i, j, status; uint32_t temp; - /* first, start with an encryption schedule */ + // first, start with an encryption schedule status = AES_set_encrypt_key(key, bits, aeskey); if (status < 0) { return status; @@ -642,7 +642,7 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { rk = aeskey->rd_key; - /* invert the order of the round keys: */ + // invert the order of the round keys: for (i = 0, j = 4 * aeskey->rounds; i < j; i += 4, j -= 4) { temp = rk[i]; rk[i] = rk[j]; @@ -657,8 +657,8 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { rk[i + 3] = rk[j + 3]; rk[j + 3] = temp; } - /* apply the inverse MixColumn transform to all round keys but the first and - * the last: */ + // apply the inverse MixColumn transform to all round keys but the first and + // the last: for (i = 1; i < (int)aeskey->rounds; i++) { rk += 4; rk[0] = @@ -682,19 +682,19 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { uint32_t s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; -#endif /* ?FULL_UNROLL */ +#endif // ?FULL_UNROLL assert(in && out && key); rk = key->rd_key; - /* map byte array block to cipher state - * and add initial round key: */ + // map byte array block to cipher state + // and add initial round key: s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL - /* round 1: */ + // round 1: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[4]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -703,7 +703,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[6]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[7]; - /* round 2: */ + // round 2: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[8]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -712,7 +712,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[10]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[11]; - /* round 3: */ + // round 3: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[12]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -721,7 +721,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[14]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[15]; - /* round 4: */ + // round 4: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[16]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -730,7 +730,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[18]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[19]; - /* round 5: */ + // round 5: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[20]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -739,7 +739,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[22]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[23]; - /* round 6: */ + // round 6: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[24]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -748,7 +748,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[26]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[27]; - /* round 7: */ + // round 7: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[28]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -757,7 +757,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[s1 & 0xff] ^ rk[30]; t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[31]; - /* round 8: */ + // round 8: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[32]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -766,7 +766,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[34]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[35]; - /* round 9: */ + // round 9: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[36]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -776,7 +776,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[39]; if (key->rounds > 10) { - /* round 10: */ + // round 10: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[40]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -785,7 +785,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[42]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[43]; - /* round 11: */ + // round 11: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[44]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -795,7 +795,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ rk[47]; if (key->rounds > 12) { - /* round 12: */ + // round 12: s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ rk[48]; s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ @@ -804,7 +804,7 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Te3[t1 & 0xff] ^ rk[50]; s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ rk[51]; - /* round 13: */ + // round 13: t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ rk[52]; t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ @@ -816,10 +816,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { } } rk += key->rounds << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ +#else // !FULL_UNROLL + // Nr - 1 full rounds: r = key->rounds >> 1; for (;;) { t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ @@ -845,8 +843,8 @@ void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[(t2) & 0xff] ^ rk[3]; } -#endif /* ?FULL_UNROLL */ - /* apply last round and map cipher state to byte array block: */ +#endif // ?FULL_UNROLL + // apply last round and map cipher state to byte array block: s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^ rk[0]; @@ -870,19 +868,19 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { uint32_t s0, s1, s2, s3, t0, t1, t2, t3; #ifndef FULL_UNROLL int r; -#endif /* ?FULL_UNROLL */ +#endif // ?FULL_UNROLL assert(in && out && key); rk = key->rd_key; - /* map byte array block to cipher state - * and add initial round key: */ + // map byte array block to cipher state + // and add initial round key: s0 = GETU32(in) ^ rk[0]; s1 = GETU32(in + 4) ^ rk[1]; s2 = GETU32(in + 8) ^ rk[2]; s3 = GETU32(in + 12) ^ rk[3]; #ifdef FULL_UNROLL - /* round 1: */ + // round 1: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[4]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -891,7 +889,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[6]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[7]; - /* round 2: */ + // round 2: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[8]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -900,7 +898,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[10]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[11]; - /* round 3: */ + // round 3: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[12]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -909,7 +907,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[14]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[15]; - /* round 4: */ + // round 4: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[16]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -918,7 +916,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[18]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[19]; - /* round 5: */ + // round 5: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[20]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -927,7 +925,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[22]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[23]; - /* round 6: */ + // round 6: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[24]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -936,7 +934,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[26]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[27]; - /* round 7: */ + // round 7: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[28]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -945,7 +943,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[s3 & 0xff] ^ rk[30]; t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[31]; - /* round 8: */ + // round 8: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[32]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -954,7 +952,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[34]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[35]; - /* round 9: */ + // round 9: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[36]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -964,7 +962,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[39]; if (key->rounds > 10) { - /* round 10: */ + // round 10: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[40]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -973,7 +971,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[42]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[43]; - /* round 11: */ + // round 11: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[44]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -983,7 +981,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^ Td3[s0 & 0xff] ^ rk[47]; if (key->rounds > 12) { - /* round 12: */ + // round 12: s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^ Td3[t1 & 0xff] ^ rk[48]; s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^ @@ -992,7 +990,7 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { Td3[t3 & 0xff] ^ rk[50]; s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[t0 & 0xff] ^ rk[51]; - /* round 13: */ + // round 13: t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ Td3[s1 & 0xff] ^ rk[52]; t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^ @@ -1004,10 +1002,8 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { } } rk += key->rounds << 2; -#else /* !FULL_UNROLL */ - /* - * Nr - 1 full rounds: - */ +#else // !FULL_UNROLL + // Nr - 1 full rounds: r = key->rounds >> 1; for (;;) { t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^ @@ -1033,9 +1029,9 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^ Td3[(t0) & 0xff] ^ rk[3]; } -#endif /* ?FULL_UNROLL */ - /* apply last round and - * map cipher state to byte array block: */ +#endif // ?FULL_UNROLL + // apply last round and + // map cipher state to byte array block: s0 = ((uint32_t)Td4[(t0 >> 24)] << 24) ^ ((uint32_t)Td4[(t3 >> 16) & 0xff] << 16) ^ ((uint32_t)Td4[(t2 >> 8) & 0xff] << 8) ^ @@ -1060,48 +1056,10 @@ void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { #else -#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) - -static int hwaes_capable(void) { - return CRYPTO_is_ARMv8_AES_capable(); -} - -int aes_hw_set_encrypt_key(const uint8_t *user_key, const int bits, - AES_KEY *key); -int aes_hw_set_decrypt_key(const uint8_t *user_key, const int bits, - AES_KEY *key); -void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); - -#else - -static int hwaes_capable(void) { - return 0; -} - -static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { - abort(); -} - -static int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) { - abort(); -} - -static void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { - abort(); -} - -static void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { - abort(); -} - -#endif - - -/* In this case several functions are provided by asm code. However, one cannot - * control asm symbol visibility with command line flags and such so they are - * always hidden and wrapped by these C functions, which can be so - * controlled. */ +// In this case several functions are provided by asm code. However, one cannot +// control asm symbol visibility with command line flags and such so they are +// always hidden and wrapped by these C functions, which can be so +// controlled. void asm_AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { @@ -1139,4 +1097,4 @@ int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) { } } -#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) */ +#endif // OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) diff --git a/Sources/BoringSSL/crypto/fipsmodule/aes/internal.h b/Sources/BoringSSL/crypto/fipsmodule/aes/internal.h new file mode 100644 index 000000000..45db9eec6 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/aes/internal.h @@ -0,0 +1,100 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_AES_INTERNAL_H +#define OPENSSL_HEADER_AES_INTERNAL_H + +#include + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + + +#if !defined(OPENSSL_NO_ASM) && (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) +#define HWAES + +static int hwaes_capable(void) { + return CRYPTO_is_ARMv8_AES_capable(); +} +#endif // !NO_ASM && (AES || AARCH64) + +#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_PPC64LE) +#define HWAES + +static int hwaes_capable(void) { + return CRYPTO_is_PPC64LE_vcrypto_capable(); +} +#endif // !NO_ASM && PPC64LE + + +#if defined(HWAES) + +int aes_hw_set_encrypt_key(const uint8_t *user_key, const int bits, + AES_KEY *key); +int aes_hw_set_decrypt_key(const uint8_t *user_key, const int bits, + AES_KEY *key); +void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); +void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); +void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t *ivec, const int enc); +void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, + const AES_KEY *key, const uint8_t ivec[16]); + +#else + +// If HWAES isn't defined then we provide dummy functions for each of the hwaes +// functions. +static int hwaes_capable(void) { return 0; } + +static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, + AES_KEY *key) { + abort(); +} + +static int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, + AES_KEY *key) { + abort(); +} + +static void aes_hw_encrypt(const uint8_t *in, uint8_t *out, + const AES_KEY *key) { + abort(); +} + +static void aes_hw_decrypt(const uint8_t *in, uint8_t *out, + const AES_KEY *key) { + abort(); +} + +static void aes_hw_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t *ivec, int enc) { + abort(); +} + +static void aes_hw_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16]) { + abort(); +} + +#endif // !HWAES + +#if defined(__cplusplus) +} // extern C +#endif + +#endif // OPENSSL_HEADER_AES_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/aes/key_wrap.c b/Sources/BoringSSL/crypto/fipsmodule/aes/key_wrap.c similarity index 96% rename from Sources/BoringSSL/crypto/aes/key_wrap.c rename to Sources/BoringSSL/crypto/fipsmodule/aes/key_wrap.c index 23553b7ad..feee0c72c 100644 --- a/Sources/BoringSSL/crypto/aes/key_wrap.c +++ b/Sources/BoringSSL/crypto/fipsmodule/aes/key_wrap.c @@ -53,10 +53,10 @@ #include -#include "../internal.h" +#include "../../internal.h" -/* kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. */ +// kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. static const uint8_t kDefaultIV[] = { 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, }; @@ -65,7 +65,7 @@ static const unsigned kBound = 6; int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { - /* See RFC 3394, section 2.2.1. */ + // See RFC 3394, section 2.2.1. if (in_len > INT_MAX - 8 || in_len < 8 || in_len % 8 != 0) { return -1; @@ -101,7 +101,7 @@ int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len) { - /* See RFC 3394, section 2.2.2. */ + // See RFC 3394, section 2.2.2. if (in_len > INT_MAX || in_len < 16 || in_len % 8 != 0) { return -1; diff --git a/Sources/BoringSSL/crypto/aes/mode_wrappers.c b/Sources/BoringSSL/crypto/fipsmodule/aes/mode_wrappers.c similarity index 98% rename from Sources/BoringSSL/crypto/aes/mode_wrappers.c rename to Sources/BoringSSL/crypto/fipsmodule/aes/mode_wrappers.c index 4929920f0..34514db51 100644 --- a/Sources/BoringSSL/crypto/aes/mode_wrappers.c +++ b/Sources/BoringSSL/crypto/fipsmodule/aes/mode_wrappers.c @@ -92,7 +92,7 @@ void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, asm_AES_cbc_encrypt(in, out, len, key, ivec, enc); } -#endif /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */ +#endif // OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length, const AES_KEY *key, uint8_t *ivec, int *num) { diff --git a/Sources/BoringSSL/crypto/bn/add.c b/Sources/BoringSSL/crypto/fipsmodule/bn/add.c similarity index 86% rename from Sources/BoringSSL/crypto/bn/add.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/add.c index cfa3bbe39..201c526d1 100644 --- a/Sources/BoringSSL/crypto/bn/add.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/add.c @@ -68,20 +68,19 @@ int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { const BIGNUM *tmp; int a_neg = a->neg, ret; - /* a + b a+b - * a + -b a-b - * -a + b b-a - * -a + -b -(a+b) - */ + // a + b a+b + // a + -b a-b + // -a + b b-a + // -a + -b -(a+b) if (a_neg ^ b->neg) { - /* only one is negative */ + // only one is negative if (a_neg) { tmp = a; a = b; b = tmp; } - /* we are now a - b */ + // we are now a - b if (BN_ucmp(a, b) < 0) { if (!BN_usub(r, b, a)) { return 0; @@ -115,7 +114,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { min = b->top; dif = max - min; - if (bn_wexpand(r, max + 1) == NULL) { + if (!bn_wexpand(r, max + 1)) { return 0; } @@ -134,7 +133,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { while (dif) { dif--; t1 = *(ap++); - t2 = (t1 + 1) & BN_MASK2; + t2 = t1 + 1; *(rp++) = t2; if (t2) { carry = 0; @@ -142,7 +141,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { } } if (carry) { - /* carry != 0 => dif == 0 */ + // carry != 0 => dif == 0 *rp = 1; r->top++; } @@ -150,7 +149,7 @@ int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { if (dif && rp != ap) { while (dif--) { - /* copy remaining words if ap != rp */ + // copy remaining words if ap != rp *(rp++) = *(ap++); } } @@ -163,19 +162,17 @@ int BN_add_word(BIGNUM *a, BN_ULONG w) { BN_ULONG l; int i; - w &= BN_MASK2; - - /* degenerate case: w is zero */ + // degenerate case: w is zero if (!w) { return 1; } - /* degenerate case: a is zero */ + // degenerate case: a is zero if (BN_is_zero(a)) { return BN_set_word(a, w); } - /* handle 'a' when negative */ + // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_sub_word(a, w); @@ -186,12 +183,12 @@ int BN_add_word(BIGNUM *a, BN_ULONG w) { } for (i = 0; w != 0 && i < a->top; i++) { - a->d[i] = l = (a->d[i] + w) & BN_MASK2; + a->d[i] = l = a->d[i] + w; w = (w > l) ? 1 : 0; } if (w && i == a->top) { - if (bn_wexpand(a, a->top + 1) == NULL) { + if (!bn_wexpand(a, a->top + 1)) { return 0; } a->top++; @@ -206,11 +203,10 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int add = 0, neg = 0; const BIGNUM *tmp; - /* a - b a-b - * a - -b a+b - * -a - b -(a+b) - * -a - -b b-a - */ + // a - b a-b + // a - -b a+b + // -a - b -(a+b) + // -a - -b b-a if (a->neg) { if (b->neg) { tmp = a; @@ -236,10 +232,10 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { return 1; } - /* We are actually doing a - b :-) */ + // We are actually doing a - b :-) max = (a->top > b->top) ? a->top : b->top; - if (bn_wexpand(r, max) == NULL) { + if (!bn_wexpand(r, max)) { return 0; } @@ -267,13 +263,13 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { min = b->top; dif = max - min; - if (dif < 0) /* hmm... should not be happening */ + if (dif < 0) // hmm... should not be happening { OPENSSL_PUT_ERROR(BN, BN_R_ARG2_LT_ARG3); return 0; } - if (bn_wexpand(r, max) == NULL) { + if (!bn_wexpand(r, max)) { return 0; } @@ -287,25 +283,25 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { t2 = *(bp++); if (carry) { carry = (t1 <= t2); - t1 = (t1 - t2 - 1) & BN_MASK2; + t1 -= t2 + 1; } else { carry = (t1 < t2); - t1 = (t1 - t2) & BN_MASK2; + t1 -= t2; } - *(rp++) = t1 & BN_MASK2; + *(rp++) = t1; } - if (carry) /* subtracted */ + if (carry) // subtracted { if (!dif) { - /* error: a < b */ + // error: a < b return 0; } while (dif) { dif--; t1 = *(ap++); - t2 = (t1 - 1) & BN_MASK2; + t2 = t1 - 1; *(rp++) = t2; if (t1) { break; @@ -327,14 +323,12 @@ int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b) { int BN_sub_word(BIGNUM *a, BN_ULONG w) { int i; - w &= BN_MASK2; - - /* degenerate case: w is zero */ + // degenerate case: w is zero if (!w) { return 1; } - /* degenerate case: a is zero */ + // degenerate case: a is zero if (BN_is_zero(a)) { i = BN_set_word(a, w); if (i != 0) { @@ -343,7 +337,7 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) { return i; } - /* handle 'a' when negative */ + // handle 'a' when negative if (a->neg) { a->neg = 0; i = BN_add_word(a, w); @@ -363,7 +357,7 @@ int BN_sub_word(BIGNUM *a, BN_ULONG w) { a->d[i] -= w; break; } else { - a->d[i] = (a->d[i] - w) & BN_MASK2; + a->d[i] -= w; i++; w = 1; } diff --git a/Sources/BoringSSL/crypto/bn/bn.c b/Sources/BoringSSL/crypto/fipsmodule/bn/bn.c similarity index 92% rename from Sources/BoringSSL/crypto/bn/bn.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/bn.c index e3c55f281..4ed6ab056 100644 --- a/Sources/BoringSSL/crypto/bn/bn.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/bn.c @@ -63,6 +63,7 @@ #include #include "internal.h" +#include "../delocate.h" BIGNUM *BN_new(void) { @@ -107,16 +108,18 @@ void BN_clear_free(BIGNUM *bn) { } if (bn->d != NULL) { - OPENSSL_cleanse(bn->d, bn->dmax * sizeof(bn->d[0])); if ((bn->flags & BN_FLG_STATIC_DATA) == 0) { OPENSSL_free(bn->d); + } else { + OPENSSL_cleanse(bn->d, bn->dmax * sizeof(bn->d[0])); } } should_free = (bn->flags & BN_FLG_MALLOCED) != 0; - OPENSSL_cleanse(bn, sizeof(BIGNUM)); if (should_free) { OPENSSL_free(bn); + } else { + OPENSSL_cleanse(bn, sizeof(BIGNUM)); } } @@ -145,7 +148,7 @@ BIGNUM *BN_copy(BIGNUM *dest, const BIGNUM *src) { return dest; } - if (bn_wexpand(dest, src->top) == NULL) { + if (!bn_wexpand(dest, src->top)) { return NULL; } @@ -165,15 +168,17 @@ void BN_clear(BIGNUM *bn) { bn->neg = 0; } -const BIGNUM *BN_value_one(void) { +DEFINE_METHOD_FUNCTION(BIGNUM, BN_value_one) { static const BN_ULONG kOneLimbs[1] = { 1 }; - static const BIGNUM kOne = STATIC_BIGNUM(kOneLimbs); - - return &kOne; + out->d = (BN_ULONG*) kOneLimbs; + out->top = 1; + out->dmax = 1; + out->neg = 0; + out->flags = BN_FLG_STATIC_DATA; } -/* BN_num_bits_word returns the minimum number of bits needed to represent the - * value in |l|. */ +// BN_num_bits_word returns the minimum number of bits needed to represent the +// value in |l|. unsigned BN_num_bits_word(BN_ULONG l) { static const unsigned char bits[256] = { 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, @@ -250,7 +255,7 @@ int BN_set_word(BIGNUM *bn, BN_ULONG value) { return 1; } - if (bn_wexpand(bn, 1) == NULL) { + if (!bn_wexpand(bn, 1)) { return 0; } @@ -268,7 +273,7 @@ int BN_set_u64(BIGNUM *bn, uint64_t value) { return BN_set_word(bn, (BN_ULONG)value); } - if (bn_wexpand(bn, 2) == NULL) { + if (!bn_wexpand(bn, 2)) { return 0; } @@ -283,11 +288,11 @@ int BN_set_u64(BIGNUM *bn, uint64_t value) { } int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num) { - if (bn_wexpand(bn, num) == NULL) { + if (!bn_wexpand(bn, num)) { return 0; } OPENSSL_memmove(bn->d, words, num * sizeof(BN_ULONG)); - /* |bn_wexpand| verified that |num| isn't too large. */ + // |bn_wexpand| verified that |num| isn't too large. bn->top = (int)num; bn_correct_top(bn); bn->neg = 0; @@ -306,27 +311,27 @@ void BN_set_negative(BIGNUM *bn, int sign) { } } -BIGNUM *bn_wexpand(BIGNUM *bn, size_t words) { +int bn_wexpand(BIGNUM *bn, size_t words) { BN_ULONG *a; if (words <= (size_t)bn->dmax) { - return bn; + return 1; } if (words > (INT_MAX / (4 * BN_BITS2))) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); - return NULL; + return 0; } if (bn->flags & BN_FLG_STATIC_DATA) { OPENSSL_PUT_ERROR(BN, BN_R_EXPAND_ON_STATIC_BIGNUM_DATA); - return NULL; + return 0; } a = OPENSSL_malloc(sizeof(BN_ULONG) * words); if (a == NULL) { OPENSSL_PUT_ERROR(BN, ERR_R_MALLOC_FAILURE); - return NULL; + return 0; } OPENSSL_memcpy(a, bn->d, sizeof(BN_ULONG) * bn->top); @@ -335,13 +340,13 @@ BIGNUM *bn_wexpand(BIGNUM *bn, size_t words) { bn->d = a; bn->dmax = (int)words; - return bn; + return 1; } -BIGNUM *bn_expand(BIGNUM *bn, size_t bits) { +int bn_expand(BIGNUM *bn, size_t bits) { if (bits + BN_BITS2 - 1 < bits) { OPENSSL_PUT_ERROR(BN, BN_R_BIGNUM_TOO_LONG); - return NULL; + return 0; } return bn_wexpand(bn, (bits+BN_BITS2-1)/BN_BITS2); } diff --git a/Sources/BoringSSL/crypto/fipsmodule/bn/bytes.c b/Sources/BoringSSL/crypto/fipsmodule/bn/bytes.c new file mode 100644 index 000000000..328d56e74 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/bytes.c @@ -0,0 +1,269 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] */ + +#include + +#include +#include + +#include "internal.h" + + +BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret) { + size_t num_words; + unsigned m; + BN_ULONG word = 0; + BIGNUM *bn = NULL; + + if (ret == NULL) { + ret = bn = BN_new(); + } + + if (ret == NULL) { + return NULL; + } + + if (len == 0) { + ret->top = 0; + return ret; + } + + num_words = ((len - 1) / BN_BYTES) + 1; + m = (len - 1) % BN_BYTES; + if (!bn_wexpand(ret, num_words)) { + if (bn) { + BN_free(bn); + } + return NULL; + } + + // |bn_wexpand| must check bounds on |num_words| to write it into + // |ret->dmax|. + assert(num_words <= INT_MAX); + ret->top = (int)num_words; + ret->neg = 0; + + while (len--) { + word = (word << 8) | *(in++); + if (m-- == 0) { + ret->d[--num_words] = word; + word = 0; + m = BN_BYTES - 1; + } + } + + // need to call this due to clear byte at top if avoiding having the top bit + // set (-ve number) + bn_correct_top(ret); + return ret; +} + +BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret) { + BIGNUM *bn = NULL; + if (ret == NULL) { + bn = BN_new(); + ret = bn; + } + + if (ret == NULL) { + return NULL; + } + + if (len == 0) { + ret->top = 0; + ret->neg = 0; + return ret; + } + + // Reserve enough space in |ret|. + size_t num_words = ((len - 1) / BN_BYTES) + 1; + if (!bn_wexpand(ret, num_words)) { + BN_free(bn); + return NULL; + } + ret->top = num_words; + + // Make sure the top bytes will be zeroed. + ret->d[num_words - 1] = 0; + + // We only support little-endian platforms, so we can simply memcpy the + // internal representation. + OPENSSL_memcpy(ret->d, in, len); + + bn_correct_top(ret); + return ret; +} + +size_t BN_bn2bin(const BIGNUM *in, uint8_t *out) { + size_t n, i; + BN_ULONG l; + + n = i = BN_num_bytes(in); + while (i--) { + l = in->d[i / BN_BYTES]; + *(out++) = (unsigned char)(l >> (8 * (i % BN_BYTES))) & 0xff; + } + return n; +} + +int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in) { + // If we don't have enough space, fail out. + size_t num_bytes = BN_num_bytes(in); + if (len < num_bytes) { + return 0; + } + + // We only support little-endian platforms, so we can simply memcpy into the + // internal representation. + OPENSSL_memcpy(out, in->d, num_bytes); + + // Pad out the rest of the buffer with zeroes. + OPENSSL_memset(out + num_bytes, 0, len - num_bytes); + + return 1; +} + +// constant_time_select_ulong returns |x| if |v| is 1 and |y| if |v| is 0. Its +// behavior is undefined if |v| takes any other value. +static BN_ULONG constant_time_select_ulong(int v, BN_ULONG x, BN_ULONG y) { + BN_ULONG mask = v; + mask--; + + return (~mask & x) | (mask & y); +} + +// constant_time_le_size_t returns 1 if |x| <= |y| and 0 otherwise. |x| and |y| +// must not have their MSBs set. +static int constant_time_le_size_t(size_t x, size_t y) { + return ((x - y - 1) >> (sizeof(size_t) * 8 - 1)) & 1; +} + +// read_word_padded returns the |i|'th word of |in|, if it is not out of +// bounds. Otherwise, it returns 0. It does so without branches on the size of +// |in|, however it necessarily does not have the same memory access pattern. If +// the access would be out of bounds, it reads the last word of |in|. |in| must +// not be zero. +static BN_ULONG read_word_padded(const BIGNUM *in, size_t i) { + // Read |in->d[i]| if valid. Otherwise, read the last word. + BN_ULONG l = in->d[constant_time_select_ulong( + constant_time_le_size_t(in->dmax, i), in->dmax - 1, i)]; + + // Clamp to zero if above |d->top|. + return constant_time_select_ulong(constant_time_le_size_t(in->top, i), 0, l); +} + +int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) { + // Special case for |in| = 0. Just branch as the probability is negligible. + if (BN_is_zero(in)) { + OPENSSL_memset(out, 0, len); + return 1; + } + + // Check if the integer is too big. This case can exit early in non-constant + // time. + if ((size_t)in->top > (len + (BN_BYTES - 1)) / BN_BYTES) { + return 0; + } + if ((len % BN_BYTES) != 0) { + BN_ULONG l = read_word_padded(in, len / BN_BYTES); + if (l >> (8 * (len % BN_BYTES)) != 0) { + return 0; + } + } + + // Write the bytes out one by one. Serialization is done without branching on + // the bits of |in| or on |in->top|, but if the routine would otherwise read + // out of bounds, the memory access pattern can't be fixed. However, for an + // RSA key of size a multiple of the word size, the probability of BN_BYTES + // leading zero octets is low. + // + // See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. + size_t i = len; + while (i--) { + BN_ULONG l = read_word_padded(in, i / BN_BYTES); + *(out++) = (uint8_t)(l >> (8 * (i % BN_BYTES))) & 0xff; + } + return 1; +} + +BN_ULONG BN_get_word(const BIGNUM *bn) { + switch (bn->top) { + case 0: + return 0; + case 1: + return bn->d[0]; + default: + return BN_MASK2; + } +} + +int BN_get_u64(const BIGNUM *bn, uint64_t *out) { + switch (bn->top) { + case 0: + *out = 0; + return 1; + case 1: + *out = bn->d[0]; + return 1; +#if defined(OPENSSL_32_BIT) + case 2: + *out = (uint64_t) bn->d[0] | (((uint64_t) bn->d[1]) << 32); + return 1; +#endif + default: + return 0; + } +} diff --git a/Sources/BoringSSL/crypto/bn/cmp.c b/Sources/BoringSSL/crypto/fipsmodule/bn/cmp.c similarity index 90% rename from Sources/BoringSSL/crypto/bn/cmp.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/cmp.c index 71c04658c..acc017ff6 100644 --- a/Sources/BoringSSL/crypto/bn/cmp.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/cmp.c @@ -57,8 +57,10 @@ #include #include +#include #include "internal.h" +#include "../../internal.h" int BN_ucmp(const BIGNUM *a, const BIGNUM *b) { @@ -159,14 +161,14 @@ int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { if (dl < 0) { for (i = dl; i < 0; i++) { if (b[n - i] != 0) { - return -1; /* a < b */ + return -1; // a < b } } } if (dl > 0) { for (i = dl; i > 0; i--) { if (a[n + i] != 0) { - return 1; /* a > b */ + return 1; // a > b } } } @@ -174,6 +176,19 @@ int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { return bn_cmp_words(a, b, cl); } +int bn_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len) { + OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t), + crypto_word_t_too_small); + int ret = 0; + // Process the words in little-endian order. + for (size_t i = 0; i < len; i++) { + crypto_word_t eq = constant_time_eq_w(a[i], b[i]); + crypto_word_t lt = constant_time_lt_w(a[i], b[i]); + ret = constant_time_select_int(eq, ret, constant_time_select_int(lt, 1, 0)); + } + return ret; +} + int BN_abs_is_word(const BIGNUM *bn, BN_ULONG w) { switch (bn->top) { case 1: diff --git a/Sources/BoringSSL/crypto/bn/ctx.c b/Sources/BoringSSL/crypto/fipsmodule/bn/ctx.c similarity index 80% rename from Sources/BoringSSL/crypto/bn/ctx.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/ctx.c index bca6619af..af50de939 100644 --- a/Sources/BoringSSL/crypto/bn/ctx.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/ctx.c @@ -59,27 +59,27 @@ #include #include -#include "../internal.h" +#include "../../internal.h" -/* How many bignums are in each "pool item"; */ +// How many bignums are in each "pool item"; #define BN_CTX_POOL_SIZE 16 -/* The stack frame info is resizing, set a first-time expansion size; */ +// The stack frame info is resizing, set a first-time expansion size; #define BN_CTX_START_FRAMES 32 -/* A bundle of bignums that can be linked with other bundles */ +// A bundle of bignums that can be linked with other bundles typedef struct bignum_pool_item { - /* The bignum values */ + // The bignum values BIGNUM vals[BN_CTX_POOL_SIZE]; - /* Linked-list admin */ + // Linked-list admin struct bignum_pool_item *prev, *next; } BN_POOL_ITEM; typedef struct bignum_pool { - /* Linked-list admin */ + // Linked-list admin BN_POOL_ITEM *head, *current, *tail; - /* Stack depth and allocation size */ + // Stack depth and allocation size unsigned used, size; } BN_POOL; @@ -88,15 +88,14 @@ static void BN_POOL_finish(BN_POOL *); static BIGNUM *BN_POOL_get(BN_POOL *); static void BN_POOL_release(BN_POOL *, unsigned int); -/************/ -/* BN_STACK */ -/************/ -/* A wrapper to manage the "stack frames" */ +// BN_STACK + +// A wrapper to manage the "stack frames" typedef struct bignum_ctx_stack { - /* Array of indexes into the bignum stack */ + // Array of indexes into the bignum stack unsigned int *indexes; - /* Number of stack frames, and the size of the allocated array */ + // Number of stack frames, and the size of the allocated array unsigned int depth, size; } BN_STACK; @@ -105,21 +104,20 @@ static void BN_STACK_finish(BN_STACK *); static int BN_STACK_push(BN_STACK *, unsigned int); static unsigned int BN_STACK_pop(BN_STACK *); -/**********/ -/* BN_CTX */ -/**********/ -/* The opaque BN_CTX type */ +// BN_CTX + +// The opaque BN_CTX type struct bignum_ctx { - /* The bignum bundles */ + // The bignum bundles BN_POOL pool; - /* The "stack frames", if you will */ + // The "stack frames", if you will BN_STACK stack; - /* The number of bignums currently assigned */ + // The number of bignums currently assigned unsigned int used; - /* Depth of stack overflow */ + // Depth of stack overflow int err_stack; - /* Block "gets" until an "end" (compatibility behaviour) */ + // Block "gets" until an "end" (compatibility behaviour) int too_many; }; @@ -130,7 +128,7 @@ BN_CTX *BN_CTX_new(void) { return NULL; } - /* Initialise the structure */ + // Initialise the structure BN_POOL_init(&ret->pool); BN_STACK_init(&ret->stack); ret->used = 0; @@ -150,11 +148,11 @@ void BN_CTX_free(BN_CTX *ctx) { } void BN_CTX_start(BN_CTX *ctx) { - /* If we're already overflowing ... */ + // If we're already overflowing ... if (ctx->err_stack || ctx->too_many) { ctx->err_stack++; } else if (!BN_STACK_push(&ctx->stack, ctx->used)) { - /* (Try to) get a new frame pointer */ + // (Try to) get a new frame pointer OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); ctx->err_stack++; } @@ -168,14 +166,14 @@ BIGNUM *BN_CTX_get(BN_CTX *ctx) { ret = BN_POOL_get(&ctx->pool); if (ret == NULL) { - /* Setting too_many prevents repeated "get" attempts from - * cluttering the error stack. */ + // Setting too_many prevents repeated "get" attempts from + // cluttering the error stack. ctx->too_many = 1; OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_TEMPORARY_VARIABLES); return NULL; } - /* OK, make sure the returned bignum is "zero" */ + // OK, make sure the returned bignum is "zero" BN_zero(ret); ctx->used++; return ret; @@ -186,20 +184,19 @@ void BN_CTX_end(BN_CTX *ctx) { ctx->err_stack--; } else { unsigned int fp = BN_STACK_pop(&ctx->stack); - /* Does this stack frame have anything to release? */ + // Does this stack frame have anything to release? if (fp < ctx->used) { BN_POOL_release(&ctx->pool, ctx->used - fp); } ctx->used = fp; - /* Unjam "too_many" in case "get" had failed */ + // Unjam "too_many" in case "get" had failed ctx->too_many = 0; } } -/************/ -/* BN_STACK */ -/************/ + +// BN_STACK static void BN_STACK_init(BN_STACK *st) { st->indexes = NULL; @@ -212,7 +209,7 @@ static void BN_STACK_finish(BN_STACK *st) { static int BN_STACK_push(BN_STACK *st, unsigned int idx) { if (st->depth == st->size) { - /* Need to expand */ + // Need to expand unsigned int newsize = (st->size ? (st->size * 3 / 2) : BN_CTX_START_FRAMES); unsigned int *newitems = OPENSSL_malloc(newsize * sizeof(unsigned int)); @@ -235,6 +232,7 @@ static unsigned int BN_STACK_pop(BN_STACK *st) { return st->indexes[--(st->depth)]; } + static void BN_POOL_init(BN_POOL *p) { p->head = p->current = p->tail = NULL; p->used = p->size = 0; @@ -242,13 +240,8 @@ static void BN_POOL_init(BN_POOL *p) { static void BN_POOL_finish(BN_POOL *p) { while (p->head) { - unsigned int loop = 0; - BIGNUM *bn = p->head->vals; - while (loop++ < BN_CTX_POOL_SIZE) { - if (bn->d) { - BN_clear_free(bn); - } - bn++; + for (size_t i = 0; i < BN_CTX_POOL_SIZE; i++) { + BN_clear_free(&p->head->vals[i]); } p->current = p->head->next; @@ -259,22 +252,19 @@ static void BN_POOL_finish(BN_POOL *p) { static BIGNUM *BN_POOL_get(BN_POOL *p) { if (p->used == p->size) { - BIGNUM *bn; - unsigned int loop = 0; BN_POOL_ITEM *item = OPENSSL_malloc(sizeof(BN_POOL_ITEM)); if (!item) { return NULL; } - /* Initialise the structure */ - bn = item->vals; - while (loop++ < BN_CTX_POOL_SIZE) { - BN_init(bn++); + // Initialise the structure + for (size_t i = 0; i < BN_CTX_POOL_SIZE; i++) { + BN_init(&item->vals[i]); } item->prev = p->tail; item->next = NULL; - /* Link it in */ + // Link it in if (!p->head) { p->head = p->current = p->tail = item; } else { @@ -285,7 +275,7 @@ static BIGNUM *BN_POOL_get(BN_POOL *p) { p->size += BN_CTX_POOL_SIZE; p->used++; - /* Return the first bignum from the new pool */ + // Return the first bignum from the new pool return item->vals; } diff --git a/Sources/BoringSSL/crypto/bn/div.c b/Sources/BoringSSL/crypto/fipsmodule/bn/div.c similarity index 68% rename from Sources/BoringSSL/crypto/bn/div.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/div.c index de3fa1f1f..c92eab365 100644 --- a/Sources/BoringSSL/crypto/bn/div.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/div.c @@ -65,8 +65,8 @@ #if !defined(BN_ULLONG) -/* bn_div_words divides a double-width |h|,|l| by |d| and returns the result, - * which must fit in a |BN_ULONG|. */ +// bn_div_words divides a double-width |h|,|l| by |d| and returns the result, +// which must fit in a |BN_ULONG|. static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { BN_ULONG dh, dl, q, ret = 0, th, tl, t; int i, count = 2; @@ -128,33 +128,33 @@ static BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) { } ret = q << BN_BITS4; - h = ((h << BN_BITS4) | (l >> BN_BITS4)) & BN_MASK2; + h = (h << BN_BITS4) | (l >> BN_BITS4); l = (l & BN_MASK2l) << BN_BITS4; } ret |= q; return ret; } -#endif /* !defined(BN_ULLONG) */ +#endif // !defined(BN_ULLONG) static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, BN_ULONG n0, BN_ULONG n1, BN_ULONG d0) { - /* GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when - * the |BN_ULLONG|-based C code is used. - * - * GCC bugs: - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897 - * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668 - * - * Clang bugs: - * * https://llvm.org/bugs/show_bug.cgi?id=6397 - * * https://llvm.org/bugs/show_bug.cgi?id=12418 - * - * These issues aren't specific to x86 and x86_64, so it might be worthwhile - * to add more assembly language implementations. */ + // GCC and Clang generate function calls to |__udivdi3| and |__umoddi3| when + // the |BN_ULLONG|-based C code is used. + // + // GCC bugs: + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=14224 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54183 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58897 + // * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65668 + // + // Clang bugs: + // * https://llvm.org/bugs/show_bug.cgi?id=6397 + // * https://llvm.org/bugs/show_bug.cgi?id=12418 + // + // These issues aren't specific to x86 and x86_64, so it might be worthwhile + // to add more assembly language implementations. #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86) && defined(__GNUC__) __asm__ volatile ( "divl %4" @@ -178,28 +178,33 @@ static inline void bn_div_rem_words(BN_ULONG *quotient_out, BN_ULONG *rem_out, #endif } -/* BN_div computes dv := num / divisor, rounding towards - * zero, and sets up rm such that dv*divisor + rm = num holds. - * Thus: - * dv->neg == num->neg ^ divisor->neg (unless the result is zero) - * rm->neg == num->neg (unless the remainder is zero) - * If 'dv' or 'rm' is NULL, the respective value is not returned. - * - * This was specifically designed to contain fewer branches that may leak - * sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL - * and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and - * Jean-Pierre Seifert. */ -int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, - BN_CTX *ctx) { - int norm_shift, i, loop; - BIGNUM *tmp, wnum, *snum, *sdiv, *res; +// BN_div computes "quotient := numerator / divisor", rounding towards zero, +// and sets up |rem| such that "quotient * divisor + rem = numerator" holds. +// +// Thus: +// +// quotient->neg == numerator->neg ^ divisor->neg +// (unless the result is zero) +// rem->neg == numerator->neg +// (unless the remainder is zero) +// +// If |quotient| or |rem| is NULL, the respective value is not returned. +// +// This was specifically designed to contain fewer branches that may leak +// sensitive information; see "New Branch Prediction Vulnerabilities in OpenSSL +// and Necessary Software Countermeasures" by Onur Acıçmez, Shay Gueron, and +// Jean-Pierre Seifert. +int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, + const BIGNUM *divisor, BN_CTX *ctx) { + int norm_shift, loop; + BIGNUM wnum; BN_ULONG *resp, *wnump; BN_ULONG d0, d1; int num_n, div_n; - /* Invalid zero-padding would have particularly bad consequences - * so don't just rely on bn_check_top() here */ - if ((num->top > 0 && num->d[num->top - 1] == 0) || + // Invalid zero-padding would have particularly bad consequences + // so don't just rely on bn_check_top() here + if ((numerator->top > 0 && numerator->d[numerator->top - 1] == 0) || (divisor->top > 0 && divisor->d[divisor->top - 1] == 0)) { OPENSSL_PUT_ERROR(BN, BN_R_NOT_INITIALIZED); return 0; @@ -211,43 +216,44 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, } BN_CTX_start(ctx); - tmp = BN_CTX_get(ctx); - snum = BN_CTX_get(ctx); - sdiv = BN_CTX_get(ctx); - if (dv == NULL) { + BIGNUM *tmp = BN_CTX_get(ctx); + BIGNUM *snum = BN_CTX_get(ctx); + BIGNUM *sdiv = BN_CTX_get(ctx); + BIGNUM *res = NULL; + if (quotient == NULL) { res = BN_CTX_get(ctx); } else { - res = dv; + res = quotient; } - if (sdiv == NULL || res == NULL || tmp == NULL || snum == NULL) { + if (sdiv == NULL || res == NULL) { goto err; } - /* First we normalise the numbers */ - norm_shift = BN_BITS2 - ((BN_num_bits(divisor)) % BN_BITS2); - if (!(BN_lshift(sdiv, divisor, norm_shift))) { + // First we normalise the numbers + norm_shift = BN_BITS2 - (BN_num_bits(divisor) % BN_BITS2); + if (!BN_lshift(sdiv, divisor, norm_shift)) { goto err; } sdiv->neg = 0; norm_shift += BN_BITS2; - if (!(BN_lshift(snum, num, norm_shift))) { + if (!BN_lshift(snum, numerator, norm_shift)) { goto err; } snum->neg = 0; - /* Since we don't want to have special-case logic for the case where snum is - * larger than sdiv, we pad snum with enough zeroes without changing its - * value. */ + // Since we don't want to have special-case logic for the case where snum is + // larger than sdiv, we pad snum with enough zeroes without changing its + // value. if (snum->top <= sdiv->top + 1) { - if (bn_wexpand(snum, sdiv->top + 2) == NULL) { + if (!bn_wexpand(snum, sdiv->top + 2)) { goto err; } - for (i = snum->top; i < sdiv->top + 2; i++) { + for (int i = snum->top; i < sdiv->top + 2; i++) { snum->d[i] = 0; } snum->top = sdiv->top + 2; } else { - if (bn_wexpand(snum, snum->top + 1) == NULL) { + if (!bn_wexpand(snum, snum->top + 1)) { goto err; } snum->d[snum->top] = 0; @@ -257,126 +263,128 @@ int BN_div(BIGNUM *dv, BIGNUM *rm, const BIGNUM *num, const BIGNUM *divisor, div_n = sdiv->top; num_n = snum->top; loop = num_n - div_n; - /* Lets setup a 'window' into snum - * This is the part that corresponds to the current - * 'area' being divided */ + // Lets setup a 'window' into snum + // This is the part that corresponds to the current + // 'area' being divided wnum.neg = 0; wnum.d = &(snum->d[loop]); wnum.top = div_n; - /* only needed when BN_ucmp messes up the values between top and max */ - wnum.dmax = snum->dmax - loop; /* so we don't step out of bounds */ + // only needed when BN_ucmp messes up the values between top and max + wnum.dmax = snum->dmax - loop; // so we don't step out of bounds - /* Get the top 2 words of sdiv */ - /* div_n=sdiv->top; */ + // Get the top 2 words of sdiv + // div_n=sdiv->top; d0 = sdiv->d[div_n - 1]; d1 = (div_n == 1) ? 0 : sdiv->d[div_n - 2]; - /* pointer to the 'top' of snum */ + // pointer to the 'top' of snum wnump = &(snum->d[num_n - 1]); - /* Setup to 'res' */ - res->neg = (num->neg ^ divisor->neg); - if (!bn_wexpand(res, (loop + 1))) { + // Setup to 'res' + res->neg = (numerator->neg ^ divisor->neg); + if (!bn_wexpand(res, loop + 1)) { goto err; } res->top = loop - 1; resp = &(res->d[loop - 1]); - /* space for temp */ - if (!bn_wexpand(tmp, (div_n + 1))) { + // space for temp + if (!bn_wexpand(tmp, div_n + 1)) { goto err; } - /* if res->top == 0 then clear the neg value otherwise decrease - * the resp pointer */ + // if res->top == 0 then clear the neg value otherwise decrease + // the resp pointer if (res->top == 0) { res->neg = 0; } else { resp--; } - for (i = 0; i < loop - 1; i++, wnump--, resp--) { + for (int i = 0; i < loop - 1; i++, wnump--, resp--) { BN_ULONG q, l0; - /* the first part of the loop uses the top two words of snum and sdiv to - * calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv */ - BN_ULONG n0, n1, rem = 0; + // the first part of the loop uses the top two words of snum and sdiv to + // calculate a BN_ULONG q such that | wnum - sdiv * q | < sdiv + BN_ULONG n0, n1, rm = 0; n0 = wnump[0]; n1 = wnump[-1]; if (n0 == d0) { q = BN_MASK2; } else { - /* n0 < d0 */ - bn_div_rem_words(&q, &rem, n0, n1, d0); + // n0 < d0 + bn_div_rem_words(&q, &rm, n0, n1, d0); #ifdef BN_ULLONG BN_ULLONG t2 = (BN_ULLONG)d1 * q; for (;;) { - if (t2 <= ((((BN_ULLONG)rem) << BN_BITS2) | wnump[-2])) { + if (t2 <= ((((BN_ULLONG)rm) << BN_BITS2) | wnump[-2])) { break; } q--; - rem += d0; - if (rem < d0) { - break; /* don't let rem overflow */ + rm += d0; + if (rm < d0) { + break; // don't let rm overflow } t2 -= d1; } -#else /* !BN_ULLONG */ +#else // !BN_ULLONG BN_ULONG t2l, t2h; BN_UMULT_LOHI(t2l, t2h, d1, q); for (;;) { - if ((t2h < rem) || ((t2h == rem) && (t2l <= wnump[-2]))) { + if (t2h < rm || + (t2h == rm && t2l <= wnump[-2])) { break; } q--; - rem += d0; - if (rem < d0) { - break; /* don't let rem overflow */ + rm += d0; + if (rm < d0) { + break; // don't let rm overflow } if (t2l < d1) { t2h--; } t2l -= d1; } -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG } l0 = bn_mul_words(tmp->d, sdiv->d, div_n, q); tmp->d[div_n] = l0; wnum.d--; - /* ingore top values of the bignums just sub the two - * BN_ULONG arrays with bn_sub_words */ + // ingore top values of the bignums just sub the two + // BN_ULONG arrays with bn_sub_words if (bn_sub_words(wnum.d, wnum.d, tmp->d, div_n + 1)) { - /* Note: As we have considered only the leading - * two BN_ULONGs in the calculation of q, sdiv * q - * might be greater than wnum (but then (q-1) * sdiv - * is less or equal than wnum) - */ + // Note: As we have considered only the leading + // two BN_ULONGs in the calculation of q, sdiv * q + // might be greater than wnum (but then (q-1) * sdiv + // is less or equal than wnum) q--; if (bn_add_words(wnum.d, wnum.d, sdiv->d, div_n)) { - /* we can't have an overflow here (assuming - * that q != 0, but if q == 0 then tmp is - * zero anyway) */ + // we can't have an overflow here (assuming + // that q != 0, but if q == 0 then tmp is + // zero anyway) (*wnump)++; } } - /* store part of the result */ + // store part of the result *resp = q; } + bn_correct_top(snum); - if (rm != NULL) { - /* Keep a copy of the neg flag in num because if rm==num - * BN_rshift() will overwrite it. - */ - int neg = num->neg; - if (!BN_rshift(rm, snum, norm_shift)) { + + if (rem != NULL) { + // Keep a copy of the neg flag in numerator because if |rem| == |numerator| + // |BN_rshift| will overwrite it. + int neg = numerator->neg; + if (!BN_rshift(rem, snum, norm_shift)) { goto err; } - if (!BN_is_zero(rm)) { - rm->neg = neg; + if (!BN_is_zero(rem)) { + rem->neg = neg; } } + bn_correct_top(res); BN_CTX_end(ctx); return 1; @@ -394,7 +402,7 @@ int BN_nnmod(BIGNUM *r, const BIGNUM *m, const BIGNUM *d, BN_CTX *ctx) { return 1; } - /* now -|d| < r < 0, so we have to set r := r + |d|. */ + // now -|d| < r < 0, so we have to set r := r + |d|. return (d->neg ? BN_sub : BN_add)(r, r, d); } @@ -425,8 +433,8 @@ int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, return BN_nnmod(r, r, m, ctx); } -/* BN_mod_sub variant that may be used if both a and b are non-negative - * and less than m */ +// BN_mod_sub variant that may be used if both a and b are non-negative +// and less than m int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m) { if (!BN_sub(r, a, b)) { @@ -475,7 +483,7 @@ int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx) { return 0; } - /* r->neg == 0, thus we don't need BN_nnmod */ + // r->neg == 0, thus we don't need BN_nnmod return BN_mod(r, r, m, ctx); } @@ -512,9 +520,9 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) { while (n > 0) { int max_shift; - /* 0 < r < m */ + // 0 < r < m max_shift = BN_num_bits(m) - BN_num_bits(r); - /* max_shift >= 0 */ + // max_shift >= 0 if (max_shift < 0) { OPENSSL_PUT_ERROR(BN, BN_R_INPUT_NOT_REDUCED); @@ -537,7 +545,7 @@ int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m) { --n; } - /* BN_num_bits(r) <= BN_num_bits(m) */ + // BN_num_bits(r) <= BN_num_bits(m) if (BN_cmp(r, m) >= 0) { if (!BN_sub(r, r, m)) { return 0; @@ -571,10 +579,8 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { BN_ULONG ret = 0; int i, j; - w &= BN_MASK2; - if (!w) { - /* actually this an error (division by zero) */ + // actually this an error (division by zero) return (BN_ULONG) - 1; } @@ -582,7 +588,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { return 0; } - /* normalize input for |bn_div_rem_words|. */ + // normalize input for |bn_div_rem_words|. j = BN_BITS2 - BN_num_bits_word(w); w <<= j; if (!BN_lshift(a, a, j)) { @@ -594,7 +600,7 @@ BN_ULONG BN_div_word(BIGNUM *a, BN_ULONG w) { BN_ULONG d; BN_ULONG unused_rem; bn_div_rem_words(&d, &unused_rem, ret, l, w); - ret = (l - ((d * w) & BN_MASK2)) & BN_MASK2; + ret = l - (d * w); a->d[i] = d; } @@ -623,8 +629,8 @@ BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) { } #ifndef BN_ULLONG - /* If |w| is too long and we don't have |BN_ULLONG| then we need to fall back - * to using |BN_div_word|. */ + // If |w| is too long and we don't have |BN_ULLONG| then we need to fall back + // to using |BN_div_word|. if (w > ((BN_ULONG)1 << BN_BITS4)) { BIGNUM *tmp = BN_dup(a); if (tmp == NULL) { @@ -636,7 +642,6 @@ BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) { } #endif - w &= BN_MASK2; for (i = a->top - 1; i >= 0; i--) { #ifndef BN_ULLONG ret = ((ret << BN_BITS4) | ((a->d[i] >> BN_BITS4) & BN_MASK2l)) % w; @@ -656,27 +661,27 @@ int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { size_t num_words = 1 + ((e - 1) / BN_BITS2); - /* If |a| definitely has less than |e| bits, just BN_copy. */ + // If |a| definitely has less than |e| bits, just BN_copy. if ((size_t) a->top < num_words) { return BN_copy(r, a) != NULL; } - /* Otherwise, first make sure we have enough space in |r|. - * Note that this will fail if num_words > INT_MAX. */ - if (bn_wexpand(r, num_words) == NULL) { + // Otherwise, first make sure we have enough space in |r|. + // Note that this will fail if num_words > INT_MAX. + if (!bn_wexpand(r, num_words)) { return 0; } - /* Copy the content of |a| into |r|. */ + // Copy the content of |a| into |r|. OPENSSL_memcpy(r->d, a->d, num_words * sizeof(BN_ULONG)); - /* If |e| isn't word-aligned, we have to mask off some of our bits. */ + // If |e| isn't word-aligned, we have to mask off some of our bits. size_t top_word_exponent = e % (sizeof(BN_ULONG) * 8); if (top_word_exponent != 0) { r->d[num_words - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1; } - /* Fill in the remaining fields of |r|. */ + // Fill in the remaining fields of |r|. r->neg = a->neg; r->top = (int) num_words; bn_correct_top(r); @@ -688,41 +693,41 @@ int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e) { return 0; } - /* If the returned value was non-negative, we're done. */ + // If the returned value was non-negative, we're done. if (BN_is_zero(r) || !r->neg) { return 1; } size_t num_words = 1 + (e - 1) / BN_BITS2; - /* Expand |r| to the size of our modulus. */ - if (bn_wexpand(r, num_words) == NULL) { + // Expand |r| to the size of our modulus. + if (!bn_wexpand(r, num_words)) { return 0; } - /* Clear the upper words of |r|. */ + // Clear the upper words of |r|. OPENSSL_memset(&r->d[r->top], 0, (num_words - r->top) * BN_BYTES); - /* Set parameters of |r|. */ + // Set parameters of |r|. r->neg = 0; r->top = (int) num_words; - /* Now, invert every word. The idea here is that we want to compute 2^e-|x|, - * which is actually equivalent to the twos-complement representation of |x| - * in |e| bits, which is -x = ~x + 1. */ + // Now, invert every word. The idea here is that we want to compute 2^e-|x|, + // which is actually equivalent to the twos-complement representation of |x| + // in |e| bits, which is -x = ~x + 1. for (int i = 0; i < r->top; i++) { r->d[i] = ~r->d[i]; } - /* If our exponent doesn't span the top word, we have to mask the rest. */ + // If our exponent doesn't span the top word, we have to mask the rest. size_t top_word_exponent = e % BN_BITS2; if (top_word_exponent != 0) { r->d[r->top - 1] &= (((BN_ULONG) 1) << top_word_exponent) - 1; } - /* Keep the correct_top invariant for BN_add. */ + // Keep the correct_top invariant for BN_add. bn_correct_top(r); - /* Finally, add one, for the reason described above. */ + // Finally, add one, for the reason described above. return BN_add(r, r, BN_value_one()); } diff --git a/Sources/BoringSSL/crypto/bn/exponentiation.c b/Sources/BoringSSL/crypto/fipsmodule/bn/exponentiation.c similarity index 63% rename from Sources/BoringSSL/crypto/bn/exponentiation.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/exponentiation.c index 933a731c0..a5cb7dab2 100644 --- a/Sources/BoringSSL/crypto/bn/exponentiation.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/exponentiation.c @@ -188,12 +188,9 @@ int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { return ret; } -/* maximum precomputation table size for *variable* sliding windows */ -#define TABLE_SIZE 32 - typedef struct bn_recp_ctx_st { - BIGNUM N; /* the divisor */ - BIGNUM Nr; /* the reciprocal */ + BIGNUM N; // the divisor + BIGNUM Nr; // the reciprocal int num_bits; int shift; int flags; @@ -227,10 +224,10 @@ static int BN_RECP_CTX_set(BN_RECP_CTX *recp, const BIGNUM *d, BN_CTX *ctx) { return 1; } -/* len is the expected size of the result We actually calculate with an extra - * word of precision, so we can do faster division if the remainder is not - * required. - * r := 2^len / m */ +// len is the expected size of the result We actually calculate with an extra +// word of precision, so we can do faster division if the remainder is not +// required. +// r := 2^len / m static int BN_reciprocal(BIGNUM *r, const BIGNUM *m, int len, BN_CTX *ctx) { int ret = -1; BIGNUM *t; @@ -289,34 +286,34 @@ static int BN_div_recp(BIGNUM *dv, BIGNUM *rem, const BIGNUM *m, return 1; } - /* We want the remainder - * Given input of ABCDEF / ab - * we need multiply ABCDEF by 3 digests of the reciprocal of ab */ + // We want the remainder + // Given input of ABCDEF / ab + // we need multiply ABCDEF by 3 digests of the reciprocal of ab - /* i := max(BN_num_bits(m), 2*BN_num_bits(N)) */ + // i := max(BN_num_bits(m), 2*BN_num_bits(N)) i = BN_num_bits(m); j = recp->num_bits << 1; if (j > i) { i = j; } - /* Nr := round(2^i / N) */ + // Nr := round(2^i / N) if (i != recp->shift) { recp->shift = BN_reciprocal(&(recp->Nr), &(recp->N), i, - ctx); /* BN_reciprocal returns i, or -1 for an error */ + ctx); // BN_reciprocal returns i, or -1 for an error } if (recp->shift == -1) { goto err; } - /* d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - - * BN_num_bits(N)))| - * = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - - * BN_num_bits(N)))| - * <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)| - * = |m/N| */ + // d := |round(round(m / 2^BN_num_bits(N)) * recp->Nr / 2^(i - + // BN_num_bits(N)))| + // = |round(round(m / 2^BN_num_bits(N)) * round(2^i / N) / 2^(i - + // BN_num_bits(N)))| + // <= |(m / 2^BN_num_bits(N)) * (2^i / N) * (2^BN_num_bits(N) / 2^i)| + // = |m/N| if (!BN_rshift(a, m, recp->num_bits)) { goto err; } @@ -383,7 +380,7 @@ static int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, } ca = a; } else { - ca = x; /* Just do the mod */ + ca = x; // Just do the mod } ret = BN_div_recp(NULL, r, ca, recp, ctx); @@ -393,48 +390,72 @@ static int BN_mod_mul_reciprocal(BIGNUM *r, const BIGNUM *x, const BIGNUM *y, return ret; } -/* BN_window_bits_for_exponent_size -- macro for sliding window mod_exp - * functions - * - * For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of - * multiplications is a constant plus on average - * - * 2^(w-1) + (b-w)/(w+1); - * - * here 2^(w-1) is for precomputing the table (we actually need entries only - * for windows that have the lowest bit set), and (b-w)/(w+1) is an - * approximation for the expected number of w-bit windows, not counting the - * first one. - * - * Thus we should use - * - * w >= 6 if b > 671 - * w = 5 if 671 > b > 239 - * w = 4 if 239 > b > 79 - * w = 3 if 79 > b > 23 - * w <= 2 if 23 > b - * - * (with draws in between). Very small exponents are often selected - * with low Hamming weight, so we use w = 1 for b <= 23. */ -#define BN_window_bits_for_exponent_size(b) \ - ((b) > 671 ? 6 : \ - (b) > 239 ? 5 : \ - (b) > 79 ? 4 : \ - (b) > 23 ? 3 : 1) +// BN_window_bits_for_exponent_size returns sliding window size for mod_exp with +// a |b| bit exponent. +// +// For window size 'w' (w >= 2) and a random 'b' bits exponent, the number of +// multiplications is a constant plus on average +// +// 2^(w-1) + (b-w)/(w+1); +// +// here 2^(w-1) is for precomputing the table (we actually need entries only +// for windows that have the lowest bit set), and (b-w)/(w+1) is an +// approximation for the expected number of w-bit windows, not counting the +// first one. +// +// Thus we should use +// +// w >= 6 if b > 671 +// w = 5 if 671 > b > 239 +// w = 4 if 239 > b > 79 +// w = 3 if 79 > b > 23 +// w <= 2 if 23 > b +// +// (with draws in between). Very small exponents are often selected +// with low Hamming weight, so we use w = 1 for b <= 23. +static int BN_window_bits_for_exponent_size(int b) { + if (b > 671) { + return 6; + } + if (b > 239) { + return 5; + } + if (b > 79) { + return 4; + } + if (b > 23) { + return 3; + } + return 1; +} + +// TABLE_SIZE is the maximum precomputation table size for *variable* sliding +// windows. This must be 2^(max_window - 1), where max_window is the largest +// value returned from |BN_window_bits_for_exponent_size|. +#define TABLE_SIZE 32 + +// TABLE_BITS_SMALL is the smallest value returned from +// |BN_window_bits_for_exponent_size| when |b| is at most |BN_BITS2| * +// |BN_SMALL_MAX_WORDS| words. +#define TABLE_BITS_SMALL 5 + +// TABLE_SIZE_SMALL is the same as |TABLE_SIZE|, but when |b| is at most +// |BN_BITS2| * |BN_SMALL_MAX_WORDS|. +#define TABLE_SIZE_SMALL (1 << (TABLE_BITS_SMALL - 1)) static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx) { int i, j, bits, ret = 0, wstart, window; int start = 1; BIGNUM *aa; - /* Table of variables obtained from 'ctx' */ + // Table of variables obtained from 'ctx' BIGNUM *val[TABLE_SIZE]; BN_RECP_CTX recp; bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(r); return 1; @@ -451,7 +472,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_RECP_CTX_init(&recp); if (m->neg) { - /* ignore sign of 'm' */ + // ignore sign of 'm' if (!BN_copy(aa, m)) { goto err; } @@ -466,7 +487,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } if (!BN_nnmod(val[0], a, m, ctx)) { - goto err; /* 1 */ + goto err; // 1 } if (BN_is_zero(val[0])) { BN_zero(r); @@ -477,7 +498,7 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_reciprocal(aa, val[0], val[0], &recp, ctx)) { - goto err; /* 2 */ + goto err; // 2 } j = 1 << (window - 1); for (i = 1; i < j; i++) { @@ -488,20 +509,20 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - start = 1; /* This is used to avoid multiplication etc - * when there is only the value '1' in the - * buffer. */ - wstart = bits - 1; /* The top bit of the window */ + start = 1; // This is used to avoid multiplication etc + // when there is only the value '1' in the + // buffer. + wstart = bits - 1; // The top bit of the window if (!BN_one(r)) { goto err; } for (;;) { - int wvalue; /* The 'value' of the window */ - int wend; /* The bottom bit of the window */ + int wvalue; // The 'value' of the window + int wend; // The bottom bit of the window - if (BN_is_bit_set(p, wstart) == 0) { + if (!BN_is_bit_set(p, wstart)) { if (!start) { if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) { goto err; @@ -514,10 +535,10 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, continue; } - /* We now have wstart on a 'set' bit, we now need to work out - * how bit a window to do. To do this we need to scan - * forward until the last set bit before the end of the - * window */ + // We now have wstart on a 'set' bit, we now need to work out + // how bit a window to do. To do this we need to scan + // forward until the last set bit before the end of the + // window wvalue = 1; wend = 0; for (i = 1; i < window; i++) { @@ -531,9 +552,9 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - /* wend is the size of the current window */ + // wend is the size of the current window j = wend + 1; - /* add the 'bytes above' */ + // add the 'bytes above' if (!start) { for (i = 0; i < j; i++) { if (!BN_mod_mul_reciprocal(r, r, r, &recp, ctx)) { @@ -542,12 +563,12 @@ static int mod_exp_recp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, } } - /* wvalue will be an odd number < 2^window */ + // wvalue will be an odd number < 2^window if (!BN_mod_mul_reciprocal(r, r, val[wvalue >> 1], &recp, ctx)) { goto err; } - /* move the 'window' down further */ + // move the 'window' down further wstart -= wend + 1; start = 0; if (wstart < 0) { @@ -573,21 +594,13 @@ int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { - int i, j, bits, ret = 0, wstart, window; - int start = 1; - BIGNUM *d, *r; - const BIGNUM *aa; - /* Table of variables obtained from 'ctx' */ - BIGNUM *val[TABLE_SIZE]; - BN_MONT_CTX *new_mont = NULL; - if (!BN_is_odd(m)) { OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); return 0; } - bits = BN_num_bits(p); + int bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(rr); return 1; @@ -595,15 +608,19 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, return BN_one(rr); } + int ret = 0; + BIGNUM *val[TABLE_SIZE]; + BN_MONT_CTX *new_mont = NULL; + BN_CTX_start(ctx); - d = BN_CTX_get(ctx); - r = BN_CTX_get(ctx); + BIGNUM *d = BN_CTX_get(ctx); + BIGNUM *r = BN_CTX_get(ctx); val[0] = BN_CTX_get(ctx); if (!d || !r || !val[0]) { goto err; } - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -612,6 +629,7 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, mont = new_mont; } + const BIGNUM *aa; if (a->neg || BN_ucmp(a, m) >= 0) { if (!BN_nnmod(val[0], a, m, ctx)) { goto err; @@ -626,53 +644,52 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, ret = 1; goto err; } + + // We exponentiate by looking at sliding windows of the exponent and + // precomputing powers of |aa|. Windows may be shifted so they always end on a + // set bit, so only precompute odd powers. We compute val[i] = aa^(2*i + 1) + // for i = 0 to 2^(window-1), all in Montgomery form. + int window = BN_window_bits_for_exponent_size(bits); if (!BN_to_montgomery(val[0], aa, mont, ctx)) { - goto err; /* 1 */ + goto err; } - - window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_montgomery(d, val[0], val[0], mont, ctx)) { - goto err; /* 2 */ + goto err; } - j = 1 << (window - 1); - for (i = 1; i < j; i++) { - if (((val[i] = BN_CTX_get(ctx)) == NULL) || + for (int i = 1; i < 1 << (window - 1); i++) { + val[i] = BN_CTX_get(ctx); + if (val[i] == NULL || !BN_mod_mul_montgomery(val[i], val[i - 1], d, mont, ctx)) { goto err; } } } - start = 1; /* This is used to avoid multiplication etc - * when there is only the value '1' in the - * buffer. */ - wstart = bits - 1; /* The top bit of the window */ - - j = m->top; /* borrow j */ - if (m->d[j - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { - if (bn_wexpand(r, j) == NULL) { + // Set |r| to one in Montgomery form. If the high bit of |m| is set, |m| is + // close to R and we subtract rather than perform Montgomery reduction. + if (m->d[m->top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { + if (!bn_wexpand(r, m->top)) { goto err; } - /* 2^(top*BN_BITS2) - m */ - r->d[0] = (0 - m->d[0]) & BN_MASK2; - for (i = 1; i < j; i++) { - r->d[i] = (~m->d[i]) & BN_MASK2; + // r = 2^(top*BN_BITS2) - m + r->d[0] = 0 - m->d[0]; + for (int i = 1; i < m->top; i++) { + r->d[i] = ~m->d[i]; } - r->top = j; - /* Upper words will be zero if the corresponding words of 'm' - * were 0xfff[...], so decrement r->top accordingly. */ + r->top = m->top; + // The upper words will be zero if the corresponding words of |m| were + // 0xfff[...], so call |bn_correct_top|. bn_correct_top(r); } else if (!BN_to_montgomery(r, BN_value_one(), mont, ctx)) { goto err; } + int r_is_one = 1; + int wstart = bits - 1; // The top bit of the window. for (;;) { - int wvalue; /* The 'value' of the window */ - int wend; /* The bottom bit of the window */ - - if (BN_is_bit_set(p, wstart) == 0) { - if (!start && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) { + if (!BN_is_bit_set(p, wstart)) { + if (!r_is_one && !BN_mod_mul_montgomery(r, r, r, mont, ctx)) { goto err; } if (wstart == 0) { @@ -682,44 +699,37 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, continue; } - /* We now have wstart on a 'set' bit, we now need to work out how bit a - * window to do. To do this we need to scan forward until the last set bit - * before the end of the window */ - wvalue = 1; - wend = 0; - for (i = 1; i < window; i++) { - if (wstart - i < 0) { - break; - } + // We now have wstart on a set bit. Find the largest window we can use. + int wvalue = 1; + int wsize = 0; + for (int i = 1; i < window && i <= wstart; i++) { if (BN_is_bit_set(p, wstart - i)) { - wvalue <<= (i - wend); + wvalue <<= (i - wsize); wvalue |= 1; - wend = i; + wsize = i; } } - /* wend is the size of the current window */ - j = wend + 1; - /* add the 'bytes above' */ - if (!start) { - for (i = 0; i < j; i++) { + // Shift |r| to the end of the window. + if (!r_is_one) { + for (int i = 0; i < wsize + 1; i++) { if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) { goto err; } } } - /* wvalue will be an odd number < 2^window */ + assert(wvalue & 1); + assert(wvalue < (1 << window)); if (!BN_mod_mul_montgomery(r, r, val[wvalue >> 1], mont, ctx)) { goto err; } - /* move the 'window' down further */ - wstart -= wend + 1; - start = 0; - if (wstart < 0) { + r_is_one = 0; + if (wstart == wsize) { break; } + wstart -= wsize + 1; } if (!BN_from_montgomery(rr, r, mont, ctx)) { @@ -733,25 +743,171 @@ int BN_mod_exp_mont(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, return ret; } -/* BN_mod_exp_mont_consttime() stores the precomputed powers in a specific - * layout so that accessing any of these table values shows the same access - * pattern as far as cache lines are concerned. The following functions are - * used to transfer a BIGNUM from/to that table. */ -static int copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, int idx, - int window) { +int bn_mod_exp_mont_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_ULONG *p, size_t num_p, + const BN_MONT_CTX *mont) { + const BN_ULONG *n = mont->N.d; + size_t num_n = mont->N.top; + if (num_n != num_a || num_n != num_r || num_n > BN_SMALL_MAX_WORDS) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + if (!BN_is_odd(&mont->N)) { + OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); + return 0; + } + unsigned bits = 0; + if (num_p != 0) { + bits = BN_num_bits_word(p[num_p - 1]) + (num_p - 1) * BN_BITS2; + } + if (bits == 0) { + OPENSSL_memset(r, 0, num_r * sizeof(BN_ULONG)); + if (!BN_is_one(&mont->N)) { + r[0] = 1; + } + return 1; + } + + // We exponentiate by looking at sliding windows of the exponent and + // precomputing powers of |a|. Windows may be shifted so they always end on a + // set bit, so only precompute odd powers. We compute val[i] = a^(2*i + 1) for + // i = 0 to 2^(window-1), all in Montgomery form. + unsigned window = BN_window_bits_for_exponent_size(bits); + if (window > TABLE_BITS_SMALL) { + window = TABLE_BITS_SMALL; // Tolerate excessively large |p|. + } + int ret = 0; + BN_ULONG val[TABLE_SIZE_SMALL][BN_SMALL_MAX_WORDS]; + OPENSSL_memcpy(val[0], a, num_n * sizeof(BN_ULONG)); + if (window > 1) { + BN_ULONG d[BN_SMALL_MAX_WORDS]; + if (!bn_mod_mul_montgomery_small(d, num_n, val[0], num_n, val[0], num_n, + mont)) { + goto err; + } + for (unsigned i = 1; i < 1u << (window - 1); i++) { + if (!bn_mod_mul_montgomery_small(val[i], num_n, val[i - 1], num_n, d, + num_n, mont)) { + goto err; + } + } + } + + // Set |r| to one in Montgomery form. If the high bit of |m| is set, |m| is + // close to R and we subtract rather than perform Montgomery reduction. + if (n[num_n - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { + // r = 2^(top*BN_BITS2) - m + r[0] = 0 - n[0]; + for (size_t i = 1; i < num_n; i++) { + r[i] = ~n[i]; + } + } else if (!bn_from_montgomery_small(r, num_r, mont->RR.d, mont->RR.top, + mont)) { + goto err; + } + + int r_is_one = 1; + unsigned wstart = bits - 1; // The top bit of the window. + for (;;) { + if (!bn_is_bit_set_words(p, num_p, wstart)) { + if (!r_is_one && + !bn_mod_mul_montgomery_small(r, num_r, r, num_r, r, num_r, mont)) { + goto err; + } + if (wstart == 0) { + break; + } + wstart--; + continue; + } + + // We now have wstart on a set bit. Find the largest window we can use. + unsigned wvalue = 1; + unsigned wsize = 0; + for (unsigned i = 1; i < window && i <= wstart; i++) { + if (bn_is_bit_set_words(p, num_p, wstart - i)) { + wvalue <<= (i - wsize); + wvalue |= 1; + wsize = i; + } + } + + // Shift |r| to the end of the window. + if (!r_is_one) { + for (unsigned i = 0; i < wsize + 1; i++) { + if (!bn_mod_mul_montgomery_small(r, num_r, r, num_r, r, num_r, mont)) { + goto err; + } + } + } + + assert(wvalue & 1); + assert(wvalue < (1u << window)); + if (!bn_mod_mul_montgomery_small(r, num_r, r, num_r, val[wvalue >> 1], + num_n, mont)) { + goto err; + } + + r_is_one = 0; + if (wstart == wsize) { + break; + } + wstart -= wsize + 1; + } + + ret = 1; + +err: + OPENSSL_cleanse(val, sizeof(val)); + return ret; +} + +int bn_mod_inverse_prime_mont_small(BN_ULONG *r, size_t num_r, + const BN_ULONG *a, size_t num_a, + const BN_MONT_CTX *mont) { + const BN_ULONG *p = mont->N.d; + size_t num_p = mont->N.top; + if (num_p > BN_SMALL_MAX_WORDS || num_p == 0) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + + // Per Fermat's Little Theorem, a^-1 = a^(p-2) (mod p) for p prime. + BN_ULONG p_minus_two[BN_SMALL_MAX_WORDS]; + OPENSSL_memcpy(p_minus_two, p, num_p * sizeof(BN_ULONG)); + if (p_minus_two[0] >= 2) { + p_minus_two[0] -= 2; + } else { + p_minus_two[0] -= 2; + for (size_t i = 1; i < num_p; i++) { + if (p_minus_two[i]-- != 0) { + break; + } + } + } + + return bn_mod_exp_mont_small(r, num_r, a, num_a, p_minus_two, num_p, mont); +} + + +// |BN_mod_exp_mont_consttime| stores the precomputed powers in a specific +// layout so that accessing any of these table values shows the same access +// pattern as far as cache lines are concerned. The following functions are +// used to transfer a BIGNUM from/to that table. + +static void copy_to_prebuf(const BIGNUM *b, int top, unsigned char *buf, + int idx, int window) { int i, j; const int width = 1 << window; BN_ULONG *table = (BN_ULONG *) buf; if (top > b->top) { - top = b->top; /* this works because 'buf' is explicitly zeroed */ + top = b->top; // this works because 'buf' is explicitly zeroed } for (i = 0, j = idx; i < top; i++, j += width) { table[j] = b->d[i]; } - - return 1; } static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, @@ -760,7 +916,7 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, const int width = 1 << window; volatile BN_ULONG *table = (volatile BN_ULONG *)buf; - if (bn_wexpand(b, top) == NULL) { + if (!bn_wexpand(b, top)) { return 0; } @@ -778,8 +934,8 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, int xstride = 1 << (window - 2); BN_ULONG y0, y1, y2, y3; - i = idx >> (window - 2); /* equivalent of idx / xstride */ - idx &= xstride - 1; /* equivalent of idx % xstride */ + i = idx >> (window - 2); // equivalent of idx / xstride + idx &= xstride - 1; // equivalent of idx % xstride y0 = (BN_ULONG)0 - (constant_time_eq_int(i, 0) & 1); y1 = (BN_ULONG)0 - (constant_time_eq_int(i, 1) & 1); @@ -804,23 +960,23 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, return 1; } -/* BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache - * line width of the target processor is at least the following value. */ +// BN_mod_exp_mont_conttime is based on the assumption that the L1 data cache +// line width of the target processor is at least the following value. #define MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH (64) #define MOD_EXP_CTIME_MIN_CACHE_LINE_MASK \ (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - 1) -/* Window sizes optimized for fixed window size modular exponentiation - * algorithm (BN_mod_exp_mont_consttime). - * - * To achieve the security goals of BN_mode_exp_mont_consttime, the maximum - * size of the window must not exceed - * log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH). - * - * Window size thresholds are defined for cache line sizes of 32 and 64, cache - * line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of - * 7 should only be used on processors that have a 128 byte or greater cache - * line size. */ +// Window sizes optimized for fixed window size modular exponentiation +// algorithm (BN_mod_exp_mont_consttime). +// +// To achieve the security goals of BN_mode_exp_mont_consttime, the maximum +// size of the window must not exceed +// log_2(MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH). +// +// Window size thresholds are defined for cache line sizes of 32 and 64, cache +// line sizes where log_2(32)=5 and log_2(64)=6 respectively. A window size of +// 7 should only be used on processors that have a 128 byte or greater cache +// line size. #if MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH == 64 #define BN_window_bits_for_ctime_exponent_size(b) \ @@ -835,19 +991,18 @@ static int copy_from_prebuf(BIGNUM *b, int top, unsigned char *buf, int idx, #endif -/* Given a pointer value, compute the next address that is a cache line - * multiple. */ +// Given a pointer value, compute the next address that is a cache line +// multiple. #define MOD_EXP_CTIME_ALIGN(x_) \ ((unsigned char *)(x_) + \ (MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH - \ (((size_t)(x_)) & (MOD_EXP_CTIME_MIN_CACHE_LINE_MASK)))) -/* This variant of BN_mod_exp_mont() uses fixed windows and the special - * precomputation memory layout to limit data-dependency to a minimum - * to protect secret exponents (cf. the hyper-threading timing attacks - * pointed out by Colin Percival, - * http://www.daemonology.net/hyperthreading-considered-harmful/) - */ +// This variant of BN_mod_exp_mont() uses fixed windows and the special +// precomputation memory layout to limit data-dependency to a minimum +// to protect secret exponents (cf. the hyper-threading timing attacks +// pointed out by Colin Percival, +// http://www.daemonology.net/hyperthreading-considered-harmful/) int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont) { @@ -871,7 +1026,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bits = BN_num_bits(p); if (bits == 0) { - /* x**0 mod 1 is still zero. */ + // x**0 mod 1 is still zero. if (BN_is_one(m)) { BN_zero(rr); return 1; @@ -879,7 +1034,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, return BN_one(rr); } - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -898,12 +1053,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #ifdef RSAZ_ENABLED - /* If the size of the operands allow it, perform the optimized - * RSAZ exponentiation. For further information see - * crypto/bn/rsaz_exp.c and accompanying assembly modules. */ + // If the size of the operands allow it, perform the optimized + // RSAZ exponentiation. For further information see + // crypto/bn/rsaz_exp.c and accompanying assembly modules. if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) && rsaz_avx2_eligible()) { - if (NULL == bn_wexpand(rr, 16)) { + if (!bn_wexpand(rr, 16)) { goto err; } RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, mont->n0[0]); @@ -915,19 +1070,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #endif - /* Get the window size to use with size of p. */ + // Get the window size to use with size of p. window = BN_window_bits_for_ctime_exponent_size(bits); #if defined(OPENSSL_BN_ASM_MONT5) if (window >= 5) { - window = 5; /* ~5% improvement for RSA2048 sign, and even for RSA4096 */ - /* reserve space for mont->N.d[] copy */ + window = 5; // ~5% improvement for RSA2048 sign, and even for RSA4096 + // reserve space for mont->N.d[] copy powerbufLen += top * sizeof(mont->N.d[0]); } #endif - /* Allocate a buffer large enough to hold all of the pre-computed - * powers of am, am itself and tmp. - */ + // Allocate a buffer large enough to hold all of the pre-computed + // powers of am, am itself and tmp. numPowers = 1 << window; powerbufLen += sizeof(m->d[0]) * @@ -953,7 +1107,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #endif - /* lay down tmp and am right after powers table */ + // lay down tmp and am right after powers table tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); am.d = tmp.d + top; tmp.top = am.top = 0; @@ -961,20 +1115,20 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, tmp.neg = am.neg = 0; tmp.flags = am.flags = BN_FLG_STATIC_DATA; -/* prepare a^0 in Montgomery domain */ -/* by Shay Gueron's suggestion */ +// prepare a^0 in Montgomery domain +// by Shay Gueron's suggestion if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { - /* 2^(top*BN_BITS2) - m */ - tmp.d[0] = (0 - m->d[0]) & BN_MASK2; + // 2^(top*BN_BITS2) - m + tmp.d[0] = 0 - m->d[0]; for (i = 1; i < top; i++) { - tmp.d[i] = (~m->d[i]) & BN_MASK2; + tmp.d[i] = ~m->d[i]; } tmp.top = top; } else if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx)) { goto err; } - /* prepare a^1 in Montgomery domain */ + // prepare a^1 in Montgomery domain assert(!a->neg); assert(BN_ucmp(a, m) < 0); if (!BN_to_montgomery(&am, a, mont, ctx)) { @@ -982,18 +1136,18 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #if defined(OPENSSL_BN_ASM_MONT5) - /* This optimization uses ideas from http://eprint.iacr.org/2011/239, - * specifically optimization of cache-timing attack countermeasures - * and pre-computation optimization. */ + // This optimization uses ideas from http://eprint.iacr.org/2011/239, + // specifically optimization of cache-timing attack countermeasures + // and pre-computation optimization. - /* Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as - * 512-bit RSA is hardly relevant, we omit it to spare size... */ + // Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as + // 512-bit RSA is hardly relevant, we omit it to spare size... if (window == 5 && top > 1) { const BN_ULONG *n0 = mont->n0; BN_ULONG *np; - /* BN_to_montgomery can contaminate words above .top - * [in BN_DEBUG[_DEBUG] build]... */ + // BN_to_montgomery can contaminate words above .top + // [in BN_DEBUG[_DEBUG] build]... for (i = am.top; i < top; i++) { am.d[i] = 0; } @@ -1001,7 +1155,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, tmp.d[i] = 0; } - /* copy mont->N.d[] to improve cache locality */ + // copy mont->N.d[] to improve cache locality for (np = am.d + top, i = 0; i < top; i++) { np[i] = mont->N.d[i]; } @@ -1011,7 +1165,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2); - /* same as above, but uses squaring for 1/2 of operations */ + // same as above, but uses squaring for 1/2 of operations for (i = 4; i < 32; i *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, i); @@ -1042,13 +1196,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } bn_gather5(tmp.d, top, powerbuf, wvalue); - /* At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit - * that has not been read yet.) */ + // At this point |bits| is 4 mod 5 and at least -1. (|bits| is the first bit + // that has not been read yet.) assert(bits >= -1 && (bits == -1 || bits % 5 == 4)); - /* Scan the exponent one window at a time starting from the most - * significant bits. - */ + // Scan the exponent one window at a time starting from the most + // significant bits. if (top & 7) { while (bits >= 0) { for (wvalue = 0, i = 0; i < 5; i++, bits--) { @@ -1066,16 +1219,16 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, const uint8_t *p_bytes = (const uint8_t *)p->d; int max_bits = p->top * BN_BITS2; assert(bits < max_bits); - /* |p = 0| has been handled as a special case, so |max_bits| is at least - * one word. */ + // |p = 0| has been handled as a special case, so |max_bits| is at least + // one word. assert(max_bits >= 64); - /* If the first bit to be read lands in the last byte, unroll the first - * iteration to avoid reading past the bounds of |p->d|. (After the first - * iteration, we are guaranteed to be past the last byte.) Note |bits| - * here is the top bit, inclusive. */ + // If the first bit to be read lands in the last byte, unroll the first + // iteration to avoid reading past the bounds of |p->d|. (After the first + // iteration, we are guaranteed to be past the last byte.) Note |bits| + // here is the top bit, inclusive. if (bits - 4 >= max_bits - 8) { - /* Read five bits from |bits-4| through |bits|, inclusive. */ + // Read five bits from |bits-4| through |bits|, inclusive. wvalue = p_bytes[p->top * BN_BYTES - 1]; wvalue >>= (bits - 4) & 7; wvalue &= 0x1f; @@ -1083,13 +1236,14 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } while (bits >= 0) { - /* Read five bits from |bits-4| through |bits|, inclusive. */ + // Read five bits from |bits-4| through |bits|, inclusive. int first_bit = bits - 4; - wvalue = *(const uint16_t *) (p_bytes + (first_bit >> 3)); - wvalue >>= first_bit & 7; - wvalue &= 0x1f; + uint16_t val; + OPENSSL_memcpy(&val, p_bytes + (first_bit >> 3), sizeof(val)); + val >>= first_bit & 7; + val &= 0x1f; bits -= 5; - bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); + bn_power5(tmp.d, tmp.d, powerbuf, np, n0, top, val); } } @@ -1100,32 +1254,32 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, if (!BN_copy(rr, &tmp)) { ret = 0; } - goto err; /* non-zero ret means it's not error */ + goto err; // non-zero ret means it's not error } } else #endif { - if (!copy_to_prebuf(&tmp, top, powerbuf, 0, window) || - !copy_to_prebuf(&am, top, powerbuf, 1, window)) { - goto err; - } + copy_to_prebuf(&tmp, top, powerbuf, 0, window); + copy_to_prebuf(&am, top, powerbuf, 1, window); - /* If the window size is greater than 1, then calculate - * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) - * (even powers could instead be computed as (a^(i/2))^2 - * to use the slight performance advantage of sqr over mul). - */ + // If the window size is greater than 1, then calculate + // val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) + // (even powers could instead be computed as (a^(i/2))^2 + // to use the slight performance advantage of sqr over mul). if (window > 1) { - if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx) || - !copy_to_prebuf(&tmp, top, powerbuf, 2, window)) { + if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) { goto err; } + + copy_to_prebuf(&tmp, top, powerbuf, 2, window); + for (i = 3; i < numPowers; i++) { - /* Calculate a^i = a^(i-1) * a */ - if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx) || - !copy_to_prebuf(&tmp, top, powerbuf, i, window)) { + // Calculate a^i = a^(i-1) * a + if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) { goto err; } + + copy_to_prebuf(&tmp, top, powerbuf, i, window); } } @@ -1137,13 +1291,12 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, goto err; } - /* Scan the exponent one window at a time starting from the most - * significant bits. - */ + // Scan the exponent one window at a time starting from the most + // significant bits. while (bits >= 0) { - wvalue = 0; /* The 'value' of the window */ + wvalue = 0; // The 'value' of the window - /* Scan the window, squaring the result as we go */ + // Scan the window, squaring the result as we go for (i = 0; i < window; i++, bits--) { if (!BN_mod_mul_montgomery(&tmp, &tmp, &tmp, mont, ctx)) { goto err; @@ -1151,19 +1304,19 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } - /* Fetch the appropriate pre-computed value from the pre-buf */ + // Fetch the appropriate pre-computed value from the pre-buf if (!copy_from_prebuf(&am, top, powerbuf, wvalue, window)) { goto err; } - /* Multiply the result into the intermediate result */ + // Multiply the result into the intermediate result if (!BN_mod_mul_montgomery(&tmp, &tmp, &am, mont, ctx)) { goto err; } } } - /* Convert the final result from montgomery to standard format */ + // Convert the final result from montgomery to standard format if (!BN_from_montgomery(rr, &tmp, mont, ctx)) { goto err; } @@ -1172,10 +1325,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, err: BN_MONT_CTX_free(new_mont); BN_clear_free(new_a); - if (powerbuf != NULL) { - OPENSSL_cleanse(powerbuf, powerbufLen); - OPENSSL_free(powerbufFree); - } + OPENSSL_free(powerbufFree); return (ret); } @@ -1211,7 +1361,7 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1, int ret = 0; BN_MONT_CTX *new_mont = NULL; - /* Allocate a montgomery context if it was not supplied by the caller. */ + // Allocate a montgomery context if it was not supplied by the caller. if (mont == NULL) { new_mont = BN_MONT_CTX_new(); if (new_mont == NULL || !BN_MONT_CTX_set(new_mont, m, ctx)) { @@ -1220,9 +1370,9 @@ int BN_mod_exp2_mont(BIGNUM *rr, const BIGNUM *a1, const BIGNUM *p1, mont = new_mont; } - /* BN_mod_mul_montgomery removes one Montgomery factor, so passing one - * Montgomery-encoded and one non-Montgomery-encoded value gives a - * non-Montgomery-encoded result. */ + // BN_mod_mul_montgomery removes one Montgomery factor, so passing one + // Montgomery-encoded and one non-Montgomery-encoded value gives a + // non-Montgomery-encoded result. if (!BN_mod_exp_mont(rr, a1, p1, m, ctx, mont) || !BN_mod_exp_mont(&tmp, a2, p2, m, ctx, mont) || !BN_to_montgomery(rr, rr, mont, ctx) || diff --git a/Sources/BoringSSL/crypto/bn/gcd.c b/Sources/BoringSSL/crypto/fipsmodule/bn/gcd.c similarity index 78% rename from Sources/BoringSSL/crypto/bn/gcd.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/gcd.c index 7c20b8e2b..850d44672 100644 --- a/Sources/BoringSSL/crypto/bn/gcd.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/gcd.c @@ -118,9 +118,9 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { BIGNUM *t; int shifts = 0; - /* 0 <= b <= a */ + // 0 <= b <= a while (!BN_is_zero(b)) { - /* 0 < b <= a */ + // 0 < b <= a if (BN_is_odd(a)) { if (BN_is_odd(b)) { @@ -136,7 +136,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { b = t; } } else { - /* a odd - b even */ + // a odd - b even if (!BN_rshift1(b, b)) { goto err; } @@ -147,7 +147,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { } } } else { - /* a is even */ + // a is even if (BN_is_odd(b)) { if (!BN_rshift1(a, a)) { goto err; @@ -158,7 +158,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { b = t; } } else { - /* a even - b even */ + // a even - b even if (!BN_rshift1(a, a)) { goto err; } @@ -168,7 +168,7 @@ static BIGNUM *euclid(BIGNUM *a, BIGNUM *b) { shifts++; } } - /* 0 <= b <= a */ + // 0 <= b <= a } if (shifts) { @@ -224,7 +224,7 @@ int BN_gcd(BIGNUM *r, const BIGNUM *in_a, const BIGNUM *in_b, BN_CTX *ctx) { return ret; } -/* solves ax == 1 (mod n) */ +// solves ax == 1 (mod n) static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); @@ -264,30 +264,29 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } A->neg = 0; sign = -1; - /* From B = a mod |n|, A = |n| it follows that - * - * 0 <= B < A, - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - */ - - /* Binary inversion algorithm; requires odd modulus. This is faster than the - * general algorithm if the modulus is sufficiently small (about 400 .. 500 - * bits on 32-bit systems, but much more on 64-bit systems) */ + // From B = a mod |n|, A = |n| it follows that + // + // 0 <= B < A, + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). + + // Binary inversion algorithm; requires odd modulus. This is faster than the + // general algorithm if the modulus is sufficiently small (about 400 .. 500 + // bits on 32-bit systems, but much more on 64-bit systems) int shift; while (!BN_is_zero(B)) { - /* 0 < B < |n|, - * 0 < A <= |n|, - * (1) -sign*X*a == B (mod |n|), - * (2) sign*Y*a == A (mod |n|) */ - - /* Now divide B by the maximum possible power of two in the integers, - * and divide X by the same value mod |n|. - * When we're done, (1) still holds. */ + // 0 < B < |n|, + // 0 < A <= |n|, + // (1) -sign*X*a == B (mod |n|), + // (2) sign*Y*a == A (mod |n|) + + // Now divide B by the maximum possible power of two in the integers, + // and divide X by the same value mod |n|. + // When we're done, (1) still holds. shift = 0; while (!BN_is_bit_set(B, shift)) { - /* note that 0 < B */ + // note that 0 < B shift++; if (BN_is_odd(X)) { @@ -295,7 +294,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } } - /* now X is even, so we can easily divide it by two */ + // now X is even, so we can easily divide it by two if (!BN_rshift1(X, X)) { goto err; } @@ -306,10 +305,10 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } } - /* Same for A and Y. Afterwards, (2) still holds. */ + // Same for A and Y. Afterwards, (2) still holds. shift = 0; while (!BN_is_bit_set(A, shift)) { - /* note that 0 < A */ + // note that 0 < A shift++; if (BN_is_odd(Y)) { @@ -317,7 +316,7 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } } - /* now Y is even */ + // now Y is even if (!BN_rshift1(Y, Y)) { goto err; } @@ -328,32 +327,32 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, } } - /* We still have (1) and (2). - * Both A and B are odd. - * The following computations ensure that - * - * 0 <= B < |n|, - * 0 < A < |n|, - * (1) -sign*X*a == B (mod |n|), - * (2) sign*Y*a == A (mod |n|), - * - * and that either A or B is even in the next iteration. */ + // We still have (1) and (2). + // Both A and B are odd. + // The following computations ensure that + // + // 0 <= B < |n|, + // 0 < A < |n|, + // (1) -sign*X*a == B (mod |n|), + // (2) sign*Y*a == A (mod |n|), + // + // and that either A or B is even in the next iteration. if (BN_ucmp(B, A) >= 0) { - /* -sign*(X + Y)*a == B - A (mod |n|) */ + // -sign*(X + Y)*a == B - A (mod |n|) if (!BN_uadd(X, X, Y)) { goto err; } - /* NB: we could use BN_mod_add_quick(X, X, Y, n), but that - * actually makes the algorithm slower */ + // NB: we could use BN_mod_add_quick(X, X, Y, n), but that + // actually makes the algorithm slower if (!BN_usub(B, B, A)) { goto err; } } else { - /* sign*(X + Y)*a == A - B (mod |n|) */ + // sign*(X + Y)*a == A - B (mod |n|) if (!BN_uadd(Y, Y, X)) { goto err; } - /* as above, BN_mod_add_quick(Y, Y, X, n) would slow things down */ + // as above, BN_mod_add_quick(Y, Y, X, n) would slow things down if (!BN_usub(A, A, B)) { goto err; } @@ -366,20 +365,20 @@ int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, goto err; } - /* The while loop (Euclid's algorithm) ends when - * A == gcd(a,n); - * we have - * sign*Y*a == A (mod |n|), - * where Y is non-negative. */ + // The while loop (Euclid's algorithm) ends when + // A == gcd(a,n); + // we have + // sign*Y*a == A (mod |n|), + // where Y is non-negative. if (sign < 0) { if (!BN_sub(Y, n, Y)) { goto err; } } - /* Now Y*a == A (mod |n|). */ + // Now Y*a == A (mod |n|). - /* Y*a == 1 (mod |n|) */ + // Y*a == 1 (mod |n|) if (!Y->neg && BN_ucmp(Y, n) < 0) { if (!BN_copy(R, Y)) { goto err; @@ -470,11 +469,11 @@ int BN_mod_inverse_blinded(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, return ret; } -/* bn_mod_inverse_general is the general inversion algorithm that works for - * both even and odd |n|. It was specifically designed to contain fewer - * branches that may leak sensitive information; see "New Branch Prediction - * Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by - * Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. */ +// bn_mod_inverse_general is the general inversion algorithm that works for +// both even and odd |n|. It was specifically designed to contain fewer +// branches that may leak sensitive information; see "New Branch Prediction +// Vulnerabilities in OpenSSL and Necessary Software Countermeasures" by +// Onur Acıçmez, Shay Gueron, and Jean-Pierre Seifert. static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { @@ -505,58 +504,53 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, A->neg = 0; sign = -1; - /* From B = a mod |n|, A = |n| it follows that - * - * 0 <= B < A, - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - */ + // From B = a mod |n|, A = |n| it follows that + // + // 0 <= B < A, + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). while (!BN_is_zero(B)) { BIGNUM *tmp; - /* - * 0 < B < A, - * (*) -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|) - */ + // 0 < B < A, + // (*) -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|) - /* (D, M) := (A/B, A%B) ... */ + // (D, M) := (A/B, A%B) ... if (!BN_div(D, M, A, B, ctx)) { goto err; } - /* Now - * A = D*B + M; - * thus we have - * (**) sign*Y*a == D*B + M (mod |n|). - */ + // Now + // A = D*B + M; + // thus we have + // (**) sign*Y*a == D*B + M (mod |n|). - tmp = A; /* keep the BIGNUM object, the value does not matter */ + tmp = A; // keep the BIGNUM object, the value does not matter - /* (A, B) := (B, A mod B) ... */ + // (A, B) := (B, A mod B) ... A = B; B = M; - /* ... so we have 0 <= B < A again */ - - /* Since the former M is now B and the former B is now A, - * (**) translates into - * sign*Y*a == D*A + B (mod |n|), - * i.e. - * sign*Y*a - D*A == B (mod |n|). - * Similarly, (*) translates into - * -sign*X*a == A (mod |n|). - * - * Thus, - * sign*Y*a + D*sign*X*a == B (mod |n|), - * i.e. - * sign*(Y + D*X)*a == B (mod |n|). - * - * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at - * -sign*X*a == B (mod |n|), - * sign*Y*a == A (mod |n|). - * Note that X and Y stay non-negative all the time. - */ + // ... so we have 0 <= B < A again + + // Since the former M is now B and the former B is now A, + // (**) translates into + // sign*Y*a == D*A + B (mod |n|), + // i.e. + // sign*Y*a - D*A == B (mod |n|). + // Similarly, (*) translates into + // -sign*X*a == A (mod |n|). + // + // Thus, + // sign*Y*a + D*sign*X*a == B (mod |n|), + // i.e. + // sign*(Y + D*X)*a == B (mod |n|). + // + // So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at + // -sign*X*a == B (mod |n|), + // sign*Y*a == A (mod |n|). + // Note that X and Y stay non-negative all the time. if (!BN_mul(tmp, D, X, ctx)) { goto err; @@ -565,7 +559,7 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, goto err; } - M = Y; /* keep the BIGNUM object, the value does not matter */ + M = Y; // keep the BIGNUM object, the value does not matter Y = X; X = tmp; sign = -sign; @@ -577,22 +571,20 @@ static int bn_mod_inverse_general(BIGNUM *out, int *out_no_inverse, goto err; } - /* - * The while loop (Euclid's algorithm) ends when - * A == gcd(a,n); - * we have - * sign*Y*a == A (mod |n|), - * where Y is non-negative. - */ + // The while loop (Euclid's algorithm) ends when + // A == gcd(a,n); + // we have + // sign*Y*a == A (mod |n|), + // where Y is non-negative. if (sign < 0) { if (!BN_sub(Y, n, Y)) { goto err; } } - /* Now Y*a == A (mod |n|). */ + // Now Y*a == A (mod |n|). - /* Y*a == 1 (mod |n|) */ + // Y*a == 1 (mod |n|) if (!Y->neg && BN_ucmp(Y, n) < 0) { if (!BN_copy(R, Y)) { goto err; diff --git a/Sources/BoringSSL/crypto/bn/generic.c b/Sources/BoringSSL/crypto/fipsmodule/bn/generic.c similarity index 87% rename from Sources/BoringSSL/crypto/bn/generic.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/generic.c index de77cc573..a39a033c1 100644 --- a/Sources/BoringSSL/crypto/bn/generic.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/generic.c @@ -61,8 +61,8 @@ #include "internal.h" -/* This file has two other implementations: x86 assembly language in - * asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. */ +// This file has two other implementations: x86 assembly language in +// asm/bn-586.pl and x86_64 inline assembly in asm/x86_64-gcc.c. #if defined(OPENSSL_NO_ASM) || \ !(defined(OPENSSL_X86) || (defined(OPENSSL_X86_64) && defined(__GNUC__))) @@ -122,14 +122,13 @@ BN_UMULT_LOHI(r0, r1, tmp, tmp); \ } while (0) -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG -BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, +BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w) { BN_ULONG c1 = 0; - assert(num >= 0); - if (num <= 0) { + if (num == 0) { return c1; } @@ -153,11 +152,11 @@ BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, return c1; } -BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) { +BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, + BN_ULONG w) { BN_ULONG c1 = 0; - assert(num >= 0); - if (num <= 0) { + if (num == 0) { return c1; } @@ -179,9 +178,8 @@ BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) { return c1; } -void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) { - assert(n >= 0); - if (n <= 0) { +void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, size_t n) { + if (n == 0) { return; } @@ -204,26 +202,25 @@ void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) { #ifdef BN_ULLONG BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, - int n) { + size_t n) { BN_ULLONG ll = 0; - assert(n >= 0); - if (n <= 0) { - return (BN_ULONG)0; + if (n == 0) { + return 0; } while (n & ~3) { ll += (BN_ULLONG)a[0] + b[0]; - r[0] = (BN_ULONG)ll & BN_MASK2; + r[0] = (BN_ULONG)ll; ll >>= BN_BITS2; ll += (BN_ULLONG)a[1] + b[1]; - r[1] = (BN_ULONG)ll & BN_MASK2; + r[1] = (BN_ULONG)ll; ll >>= BN_BITS2; ll += (BN_ULLONG)a[2] + b[2]; - r[2] = (BN_ULONG)ll & BN_MASK2; + r[2] = (BN_ULONG)ll; ll >>= BN_BITS2; ll += (BN_ULLONG)a[3] + b[3]; - r[3] = (BN_ULONG)ll & BN_MASK2; + r[3] = (BN_ULONG)ll; ll >>= BN_BITS2; a += 4; b += 4; @@ -232,7 +229,7 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, } while (n) { ll += (BN_ULLONG)a[0] + b[0]; - r[0] = (BN_ULONG)ll & BN_MASK2; + r[0] = (BN_ULONG)ll; ll >>= BN_BITS2; a++; b++; @@ -242,41 +239,40 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return (BN_ULONG)ll; } -#else /* !BN_ULLONG */ +#else // !BN_ULLONG BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, - int n) { + size_t n) { BN_ULONG c, l, t; - assert(n >= 0); - if (n <= 0) { + if (n == 0) { return (BN_ULONG)0; } c = 0; while (n & ~3) { t = a[0]; - t = (t + c) & BN_MASK2; + t += c; c = (t < c); - l = (t + b[0]) & BN_MASK2; + l = t + b[0]; c += (l < t); r[0] = l; t = a[1]; - t = (t + c) & BN_MASK2; + t += c; c = (t < c); - l = (t + b[1]) & BN_MASK2; + l = t + b[1]; c += (l < t); r[1] = l; t = a[2]; - t = (t + c) & BN_MASK2; + t += c; c = (t < c); - l = (t + b[2]) & BN_MASK2; + l = t + b[2]; c += (l < t); r[2] = l; t = a[3]; - t = (t + c) & BN_MASK2; + t += c; c = (t < c); - l = (t + b[3]) & BN_MASK2; + l = t + b[3]; c += (l < t); r[3] = l; a += 4; @@ -286,9 +282,9 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, } while (n) { t = a[0]; - t = (t + c) & BN_MASK2; + t += c; c = (t < c); - l = (t + b[0]) & BN_MASK2; + l = t + b[0]; c += (l < t); r[0] = l; a++; @@ -299,40 +295,39 @@ BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return (BN_ULONG)c; } -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, - int n) { + size_t n) { BN_ULONG t1, t2; int c = 0; - assert(n >= 0); - if (n <= 0) { + if (n == 0) { return (BN_ULONG)0; } while (n & ~3) { t1 = a[0]; t2 = b[0]; - r[0] = (t1 - t2 - c) & BN_MASK2; + r[0] = t1 - t2 - c; if (t1 != t2) { c = (t1 < t2); } t1 = a[1]; t2 = b[1]; - r[1] = (t1 - t2 - c) & BN_MASK2; + r[1] = t1 - t2 - c; if (t1 != t2) { c = (t1 < t2); } t1 = a[2]; t2 = b[2]; - r[2] = (t1 - t2 - c) & BN_MASK2; + r[2] = t1 - t2 - c; if (t1 != t2) { c = (t1 < t2); } t1 = a[3]; t2 = b[3]; - r[3] = (t1 - t2 - c) & BN_MASK2; + r[3] = t1 - t2 - c; if (t1 != t2) { c = (t1 < t2); } @@ -344,7 +339,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, while (n) { t1 = a[0]; t2 = b[0]; - r[0] = (t1 - t2 - c) & BN_MASK2; + r[0] = t1 - t2 - c; if (t1 != t2) { c = (t1 < t2); } @@ -356,15 +351,15 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, return c; } -/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ -/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ -/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ -/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */ +// mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) +// mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) +// sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) +// sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) #ifdef BN_ULLONG -/* Keep in mind that additions to multiplication result can not overflow, - * because its high half cannot be all-ones. */ +// Keep in mind that additions to multiplication result can not overflow, +// because its high half cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG hi; \ @@ -372,7 +367,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ - (c1) = ((c1) + (hi)) & BN_MASK2; \ + (c1) += (hi); \ if ((c1) < hi) { \ (c2)++; \ } \ @@ -385,14 +380,14 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, BN_ULLONG tt = t + (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(tt); \ hi = (BN_ULONG)Hw(tt); \ - (c1) = ((c1) + hi) & BN_MASK2; \ + (c1) += hi; \ if ((c1) < hi) { \ (c2)++; \ } \ t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ - (c1) = ((c1) + hi) & BN_MASK2; \ + (c1) += hi; \ if ((c1) < hi) { \ (c2)++; \ } \ @@ -405,7 +400,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, t += (c0); /* no carry */ \ (c0) = (BN_ULONG)Lw(t); \ hi = (BN_ULONG)Hw(t); \ - (c1) = ((c1) + hi) & BN_MASK2; \ + (c1) += hi; \ if ((c1) < hi) { \ (c2)++; \ } \ @@ -415,8 +410,8 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, #else -/* Keep in mind that additions to hi can not overflow, because the high word of - * a multiplication result cannot be all-ones. */ +// Keep in mind that additions to hi can not overflow, because the high word of +// a multiplication result cannot be all-ones. #define mul_add_c(a, b, c0, c1, c2) \ do { \ BN_ULONG ta = (a), tb = (b); \ @@ -456,9 +451,9 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, #define sqr_add_c2(a, i, j, c0, c1, c2) mul_add_c2((a)[i], (a)[j], c0, c1, c2) -#endif /* !BN_ULLONG */ +#endif // !BN_ULLONG -void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) { +void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]) { BN_ULONG c1, c2, c3; c1 = 0; @@ -560,7 +555,7 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) { r[15] = c1; } -void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) { +void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]) { BN_ULONG c1, c2, c3; c1 = 0; @@ -598,7 +593,7 @@ void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) { r[7] = c2; } -void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) { +void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]) { BN_ULONG c1, c2, c3; c1 = 0; @@ -672,7 +667,7 @@ void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) { r[15] = c1; } -void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) { +void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]) { BN_ULONG c1, c2, c3; c1 = 0; @@ -704,4 +699,12 @@ void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) { r[7] = c2; } +#undef mul_add +#undef mul +#undef sqr +#undef mul_add_c +#undef mul_add_c2 +#undef sqr_add_c +#undef sqr_add_c2 + #endif diff --git a/Sources/BoringSSL/crypto/fipsmodule/bn/internal.h b/Sources/BoringSSL/crypto/fipsmodule/bn/internal.h new file mode 100644 index 000000000..75efbfab9 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/internal.h @@ -0,0 +1,413 @@ +/* Copyright (C) 1995-1997 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * + * Portions of the attached software ("Contribution") are developed by + * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. + * + * The Contribution is licensed pursuant to the Eric Young open source + * license provided above. + * + * The binary polynomial arithmetic software is originally written by + * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems + * Laboratories. */ + +#ifndef OPENSSL_HEADER_BN_INTERNAL_H +#define OPENSSL_HEADER_BN_INTERNAL_H + +#include + +#if defined(OPENSSL_X86_64) && defined(_MSC_VER) +OPENSSL_MSVC_PRAGMA(warning(push, 3)) +#include +OPENSSL_MSVC_PRAGMA(warning(pop)) +#pragma intrinsic(__umulh, _umul128) +#endif + +#include "../../internal.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(OPENSSL_64_BIT) + +#if !defined(_MSC_VER) +// MSVC doesn't support two-word integers on 64-bit. +#define BN_ULLONG uint128_t +#endif + +#define BN_BITS2 64 +#define BN_BYTES 8 +#define BN_BITS4 32 +#define BN_MASK2 (0xffffffffffffffffUL) +#define BN_MASK2l (0xffffffffUL) +#define BN_MASK2h (0xffffffff00000000UL) +#define BN_MASK2h1 (0xffffffff80000000UL) +#define BN_MONT_CTX_N0_LIMBS 1 +#define BN_DEC_CONV (10000000000000000000UL) +#define BN_DEC_NUM 19 +#define TOBN(hi, lo) ((BN_ULONG)(hi) << 32 | (lo)) + +#elif defined(OPENSSL_32_BIT) + +#define BN_ULLONG uint64_t +#define BN_BITS2 32 +#define BN_BYTES 4 +#define BN_BITS4 16 +#define BN_MASK2 (0xffffffffUL) +#define BN_MASK2l (0xffffUL) +#define BN_MASK2h1 (0xffff8000UL) +#define BN_MASK2h (0xffff0000UL) +// On some 32-bit platforms, Montgomery multiplication is done using 64-bit +// arithmetic with SIMD instructions. On such platforms, |BN_MONT_CTX::n0| +// needs to be two words long. Only certain 32-bit platforms actually make use +// of n0[1] and shorter R value would suffice for the others. However, +// currently only the assembly files know which is which. +#define BN_MONT_CTX_N0_LIMBS 2 +#define BN_DEC_CONV (1000000000UL) +#define BN_DEC_NUM 9 +#define TOBN(hi, lo) (lo), (hi) + +#else +#error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" +#endif + + +#define STATIC_BIGNUM(x) \ + { \ + (BN_ULONG *)(x), sizeof(x) / sizeof(BN_ULONG), \ + sizeof(x) / sizeof(BN_ULONG), 0, BN_FLG_STATIC_DATA \ + } + +#if defined(BN_ULLONG) +#define Lw(t) ((BN_ULONG)(t)) +#define Hw(t) ((BN_ULONG)((t) >> BN_BITS2)) +#endif + +// bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or +// until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. +void bn_correct_top(BIGNUM *bn); + +// bn_wexpand ensures that |bn| has at least |words| works of space without +// altering its value. It returns one on success or zero on allocation +// failure. +int bn_wexpand(BIGNUM *bn, size_t words); + +// bn_expand acts the same as |bn_wexpand|, but takes a number of bits rather +// than a number of words. +int bn_expand(BIGNUM *bn, size_t bits); + +// bn_set_words sets |bn| to the value encoded in the |num| words in |words|, +// least significant word first. +int bn_set_words(BIGNUM *bn, const BN_ULONG *words, size_t num); + +// bn_mul_add_words multiples |ap| by |w|, adds the result to |rp|, and places +// the result in |rp|. |ap| and |rp| must both be |num| words long. It returns +// the carry word of the operation. |ap| and |rp| may be equal but otherwise may +// not alias. +BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, + BN_ULONG w); + +// bn_mul_words multiples |ap| by |w| and places the result in |rp|. |ap| and +// |rp| must both be |num| words long. It returns the carry word of the +// operation. |ap| and |rp| may be equal but otherwise may not alias. +BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num, BN_ULONG w); + +// bn_sqr_words sets |rp[2*i]| and |rp[2*i+1]| to |ap[i]|'s square, for all |i| +// up to |num|. |ap| is an array of |num| words and |rp| an array of |2*num| +// words. |ap| and |rp| may not alias. +// +// This gives the contribution of the |ap[i]*ap[i]| terms when squaring |ap|. +void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, size_t num); + +// bn_add_words adds |ap| to |bp| and places the result in |rp|, each of which +// are |num| words long. It returns the carry bit, which is one if the operation +// overflowed and zero otherwise. Any pair of |ap|, |bp|, and |rp| may be equal +// to each other but otherwise may not alias. +BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, + size_t num); + +// bn_sub_words subtracts |bp| from |ap| and places the result in |rp|. It +// returns the borrow bit, which is one if the computation underflowed and zero +// otherwise. Any pair of |ap|, |bp|, and |rp| may be equal to each other but +// otherwise may not alias. +BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, + size_t num); + +// bn_mul_comba4 sets |r| to the product of |a| and |b|. +void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]); + +// bn_mul_comba8 sets |r| to the product of |a| and |b|. +void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]); + +// bn_sqr_comba8 sets |r| to |a|^2. +void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[4]); + +// bn_sqr_comba4 sets |r| to |a|^2. +void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]); + +// bn_cmp_words returns a value less than, equal to or greater than zero if +// the, length |n|, array |a| is less than, equal to or greater than |b|. +int bn_cmp_words(const BN_ULONG *a, const BN_ULONG *b, int n); + +// bn_cmp_words returns a value less than, equal to or greater than zero if the +// array |a| is less than, equal to or greater than |b|. The arrays can be of +// different lengths: |cl| gives the minimum of the two lengths and |dl| gives +// the length of |a| minus the length of |b|. +int bn_cmp_part_words(const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); + +// bn_less_than_words returns one if |a| < |b| and zero otherwise, where |a| +// and |b| both are |len| words long. It runs in constant time. +int bn_less_than_words(const BN_ULONG *a, const BN_ULONG *b, size_t len); + +// bn_in_range_words returns one if |min_inclusive| <= |a| < |max_exclusive|, +// where |a| and |max_exclusive| both are |len| words long. This function leaks +// which of [0, min_inclusive), [min_inclusive, max_exclusive), and +// [max_exclusive, 2^(BN_BITS2*len)) contains |a|, but otherwise the value of +// |a| is secret. +int bn_in_range_words(const BN_ULONG *a, BN_ULONG min_inclusive, + const BN_ULONG *max_exclusive, size_t len); + +// bn_rand_range_words sets |out| to a uniformly distributed random number from +// |min_inclusive| to |max_exclusive|. Both |out| and |max_exclusive| are |len| +// words long. +// +// This function runs in time independent of the result, but |min_inclusive| and +// |max_exclusive| are public data. (Information about the range is unavoidably +// leaked by how many iterations it took to select a number.) +int bn_rand_range_words(BN_ULONG *out, BN_ULONG min_inclusive, + const BN_ULONG *max_exclusive, size_t len, + const uint8_t additional_data[32]); + +int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, + const BN_ULONG *np, const BN_ULONG *n0, int num); + +uint64_t bn_mont_n0(const BIGNUM *n); +int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n); + +#if defined(OPENSSL_X86_64) && defined(_MSC_VER) +#define BN_UMULT_LOHI(low, high, a, b) ((low) = _umul128((a), (b), &(high))) +#endif + +#if !defined(BN_ULLONG) && !defined(BN_UMULT_LOHI) +#error "Either BN_ULLONG or BN_UMULT_LOHI must be defined on every platform." +#endif + +// bn_mod_inverse_prime sets |out| to the modular inverse of |a| modulo |p|, +// computed with Fermat's Little Theorem. It returns one on success and zero on +// error. If |mont_p| is NULL, one will be computed temporarily. +int bn_mod_inverse_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, + BN_CTX *ctx, const BN_MONT_CTX *mont_p); + +// bn_mod_inverse_secret_prime behaves like |bn_mod_inverse_prime| but uses +// |BN_mod_exp_mont_consttime| instead of |BN_mod_exp_mont| in hopes of +// protecting the exponent. +int bn_mod_inverse_secret_prime(BIGNUM *out, const BIGNUM *a, const BIGNUM *p, + BN_CTX *ctx, const BN_MONT_CTX *mont_p); + +// bn_jacobi returns the Jacobi symbol of |a| and |b| (which is -1, 0 or 1), or +// -2 on error. +int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); + +// bn_is_bit_set_words returns one if bit |bit| is set in |a| and zero +// otherwise. +int bn_is_bit_set_words(const BN_ULONG *a, size_t num, unsigned bit); + + +// Low-level operations for small numbers. +// +// The following functions implement algorithms suitable for use with scalars +// and field elements in elliptic curves. They rely on the number being small +// both to stack-allocate various temporaries and because they do not implement +// optimizations useful for the larger values used in RSA. + +// BN_SMALL_MAX_WORDS is the largest size input these functions handle. This +// limit allows temporaries to be more easily stack-allocated. This limit is set +// to accommodate P-521. +#if defined(OPENSSL_32_BIT) +#define BN_SMALL_MAX_WORDS 17 +#else +#define BN_SMALL_MAX_WORDS 9 +#endif + +// bn_mul_small sets |r| to |a|*|b|. |num_r| must be |num_a| + |num_b|. |r| may +// not alias with |a| or |b|. This function returns one on success and zero if +// lengths are inconsistent. +int bn_mul_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, + const BN_ULONG *b, size_t num_b); + +// bn_sqr_small sets |r| to |a|^2. |num_a| must be at most |BN_SMALL_MAX_WORDS|. +// |num_r| must be |num_a|*2. |r| and |a| may not alias. This function returns +// one on success and zero on programmer error. +int bn_sqr_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a); + +// In the following functions, the modulus must be at most |BN_SMALL_MAX_WORDS| +// words long. + +// bn_to_montgomery_small sets |r| to |a| translated to the Montgomery domain. +// |num_a| and |num_r| must be the length of the modulus, which is +// |mont->N.top|. |a| must be fully reduced. This function returns one on +// success and zero if lengths are inconsistent. |r| and |a| may alias. +int bn_to_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_MONT_CTX *mont); + +// bn_from_montgomery_small sets |r| to |a| translated out of the Montgomery +// domain. |num_r| must be the length of the modulus, which is |mont->N.top|. +// |a| must be at most |mont->N.top| * R and |num_a| must be at most 2 * +// |mont->N.top|. This function returns one on success and zero if lengths are +// inconsistent. |r| and |a| may alias. +int bn_from_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_MONT_CTX *mont); + +// bn_mod_mul_montgomery_small sets |r| to |a| * |b| mod |mont->N|. Both inputs +// and outputs are in the Montgomery domain. |num_r| must be the length of the +// modulus, which is |mont->N.top|. This function returns one on success and +// zero on internal error or inconsistent lengths. Any two of |r|, |a|, and |b| +// may alias. +// +// This function requires |a| * |b| < N * R, where N is the modulus and R is the +// Montgomery divisor, 2^(N.top * BN_BITS2). This should generally be satisfied +// by ensuring |a| and |b| are fully reduced, however ECDSA has one computation +// which requires the more general bound. +int bn_mod_mul_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_ULONG *b, size_t num_b, + const BN_MONT_CTX *mont); + +// bn_mod_exp_mont_small sets |r| to |a|^|p| mod |mont->N|. It returns one on +// success and zero on programmer or internal error. Both inputs and outputs are +// in the Montgomery domain. |num_r| and |num_a| must be |mont->N.top|, which +// must be at most |BN_SMALL_MAX_WORDS|. |a| must be fully-reduced. This +// function runs in time independent of |a|, but |p| and |mont->N| are public +// values. +// +// Note this function differs from |BN_mod_exp_mont| which uses Montgomery +// reduction but takes input and output outside the Montgomery domain. Combine +// this function with |bn_from_montgomery_small| and |bn_to_montgomery_small| +// if necessary. +int bn_mod_exp_mont_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_ULONG *p, size_t num_p, + const BN_MONT_CTX *mont); + +// bn_mod_inverse_prime_mont_small sets |r| to |a|^-1 mod |mont->N|. |mont->N| +// must be a prime. |num_r| and |num_a| must be |mont->N.top|, which must be at +// most |BN_SMALL_MAX_WORDS|. |a| must be fully-reduced. This function runs in +// time independent of |a|, but |mont->N| is a public value. +int bn_mod_inverse_prime_mont_small(BN_ULONG *r, size_t num_r, + const BN_ULONG *a, size_t num_a, + const BN_MONT_CTX *mont); + + +#if defined(__cplusplus) +} // extern C +#endif + +#endif // OPENSSL_HEADER_BN_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/bn/kronecker.c b/Sources/BoringSSL/crypto/fipsmodule/bn/jacobi.c similarity index 68% rename from Sources/BoringSSL/crypto/bn/kronecker.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/jacobi.c index 208985185..9c909bb22 100644 --- a/Sources/BoringSSL/crypto/bn/kronecker.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/jacobi.c @@ -52,94 +52,64 @@ #include +#include + #include "internal.h" -/* least significant word */ +// least significant word #define BN_lsw(n) (((n)->top == 0) ? (BN_ULONG) 0 : (n)->d[0]) -/* Returns -2 for errors because both -1 and 0 are valid results. */ -int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { - int i; - int ret = -2; - BIGNUM *A, *B, *tmp; - /* In 'tab', only odd-indexed entries are relevant: - * For any odd BIGNUM n, - * tab[BN_lsw(n) & 7] - * is $(-1)^{(n^2-1)/8}$ (using TeX notation). - * Note that the sign of n does not matter. */ +int bn_jacobi(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { + // In 'tab', only odd-indexed entries are relevant: + // For any odd BIGNUM n, + // tab[BN_lsw(n) & 7] + // is $(-1)^{(n^2-1)/8}$ (using TeX notation). + // Note that the sign of n does not matter. static const int tab[8] = {0, 1, 0, -1, 0, -1, 0, 1}; - BN_CTX_start(ctx); - A = BN_CTX_get(ctx); - B = BN_CTX_get(ctx); - if (B == NULL) { - goto end; - } - - if (!BN_copy(A, a) || - !BN_copy(B, b)) { - goto end; + // The Jacobi symbol is only defined for odd modulus. + if (!BN_is_odd(b)) { + OPENSSL_PUT_ERROR(BN, BN_R_CALLED_WITH_EVEN_MODULUS); + return -2; } - /* Kronecker symbol, imlemented according to Henri Cohen, - * "A Course in Computational Algebraic Number Theory" - * (algorithm 1.4.10). */ - - /* Cohen's step 1: */ - - if (BN_is_zero(B)) { - ret = BN_abs_is_word(A, 1); - goto end; + // Require b be positive. + if (BN_is_negative(b)) { + OPENSSL_PUT_ERROR(BN, BN_R_NEGATIVE_NUMBER); + return -2; } - /* Cohen's step 2: */ - - if (!BN_is_odd(A) && !BN_is_odd(B)) { - ret = 0; + int ret = -2; + BN_CTX_start(ctx); + BIGNUM *A = BN_CTX_get(ctx); + BIGNUM *B = BN_CTX_get(ctx); + if (B == NULL) { goto end; } - /* now B is non-zero */ - i = 0; - while (!BN_is_bit_set(B, i)) { - i++; - } - if (!BN_rshift(B, B, i)) { + if (!BN_copy(A, a) || + !BN_copy(B, b)) { goto end; } - if (i & 1) { - /* i is odd */ - /* (thus B was even, thus A must be odd!) */ - - /* set 'ret' to $(-1)^{(A^2-1)/8}$ */ - ret = tab[BN_lsw(A) & 7]; - } else { - /* i is even */ - ret = 1; - } - if (B->neg) { - B->neg = 0; - if (A->neg) { - ret = -ret; - } - } + // Adapted from logic to compute the Kronecker symbol, originally implemented + // according to Henri Cohen, "A Course in Computational Algebraic Number + // Theory" (algorithm 1.4.10). - /* now B is positive and odd, so what remains to be done is to compute the - * Jacobi symbol (A/B) and multiply it by 'ret' */ + ret = 1; while (1) { - /* Cohen's step 3: */ + // Cohen's step 3: - /* B is positive and odd */ + // B is positive and odd if (BN_is_zero(A)) { ret = BN_is_one(B) ? ret : 0; goto end; } - /* now A is non-zero */ - i = 0; + // now A is non-zero + int i = 0; while (!BN_is_bit_set(A, i)) { i++; } @@ -148,23 +118,23 @@ int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { goto end; } if (i & 1) { - /* i is odd */ - /* multiply 'ret' by $(-1)^{(B^2-1)/8}$ */ + // i is odd + // multiply 'ret' by $(-1)^{(B^2-1)/8}$ ret = ret * tab[BN_lsw(B) & 7]; } - /* Cohen's step 4: */ - /* multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ */ + // Cohen's step 4: + // multiply 'ret' by $(-1)^{(A-1)(B-1)/4}$ if ((A->neg ? ~BN_lsw(A) : BN_lsw(A)) & BN_lsw(B) & 2) { ret = -ret; } - /* (A, B) := (B mod |A|, |A|) */ + // (A, B) := (B mod |A|, |A|) if (!BN_nnmod(B, B, A, ctx)) { ret = -2; goto end; } - tmp = A; + BIGNUM *tmp = A; A = B; B = tmp; tmp->neg = 0; diff --git a/Sources/BoringSSL/crypto/bn/montgomery.c b/Sources/BoringSSL/crypto/fipsmodule/bn/montgomery.c similarity index 61% rename from Sources/BoringSSL/crypto/bn/montgomery.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/montgomery.c index aa5bc4246..e8505dae0 100644 --- a/Sources/BoringSSL/crypto/bn/montgomery.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/montgomery.c @@ -114,9 +114,10 @@ #include #include #include +#include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" #if !defined(OPENSSL_NO_ASM) && \ @@ -125,6 +126,11 @@ #define OPENSSL_BN_ASM_MONT #endif +static int bn_mod_mul_montgomery_fallback(BIGNUM *r, const BIGNUM *a, + const BIGNUM *b, + const BN_MONT_CTX *mont, BN_CTX *ctx); + + BN_MONT_CTX *BN_MONT_CTX_new(void) { BN_MONT_CTX *ret = OPENSSL_malloc(sizeof(BN_MONT_CTX)); @@ -182,18 +188,18 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { return 0; } - /* Save the modulus. */ + // Save the modulus. if (!BN_copy(&mont->N, mod)) { OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); return 0; } - /* Find n0 such that n0 * N == -1 (mod r). - * - * Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the - * others, we could use a shorter R value and use faster |BN_ULONG|-based - * math instead of |uint64_t|-based math, which would be double-precision. - * However, currently only the assembler files know which is which. */ + // Find n0 such that n0 * N == -1 (mod r). + // + // Only certain BN_BITS2<=32 platforms actually make use of n0[1]. For the + // others, we could use a shorter R value and use faster |BN_ULONG|-based + // math instead of |uint64_t|-based math, which would be double-precision. + // However, currently only the assembler files know which is which. uint64_t n0 = bn_mont_n0(mod); mont->n0[0] = (BN_ULONG)n0; #if BN_MONT_CTX_N0_LIMBS == 2 @@ -202,14 +208,13 @@ int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { mont->n0[1] = 0; #endif - /* Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS such that R - * > mod. Even though the assembly on some 32-bit platforms works with 64-bit - * values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * - * BN_BITS2|, is correct because R**2 will still be a multiple of the latter - * as |BN_MONT_CTX_N0_LIMBS| is either one or two. - * - * XXX: This is not constant time with respect to |mont->N|, but it should - * be. */ + // Save RR = R**2 (mod N). R is the smallest power of 2**BN_BITS2 such that R + // > mod. Even though the assembly on some 32-bit platforms works with 64-bit + // values, using |BN_BITS2| here, rather than |BN_MONT_CTX_N0_LIMBS * + // BN_BITS2|, is correct because R**2 will still be a multiple of the latter + // as |BN_MONT_CTX_N0_LIMBS| is either one or two. + // + // XXX: This is not constant time with respect to |mont->N|, but it should be. unsigned lgBigR = (BN_num_bits(mod) + (BN_BITS2 - 1)) / BN_BITS2 * BN_BITS2; if (!bn_mod_exp_base_2_vartime(&mont->RR, lgBigR * 2, &mont->N)) { return 0; @@ -255,87 +260,75 @@ int BN_to_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, return BN_mod_mul_montgomery(ret, a, &mont->RR, mont, ctx); } +static int bn_from_montgomery_in_place(BN_ULONG *r, size_t num_r, BN_ULONG *a, + size_t num_a, const BN_MONT_CTX *mont) { + const BN_ULONG *n = mont->N.d; + size_t num_n = mont->N.top; + if (num_r != num_n || num_a != 2 * num_n) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + + // Add multiples of |n| to |r| until R = 2^(nl * BN_BITS2) divides it. On + // input, we had |r| < |n| * R, so now |r| < 2 * |n| * R. Note that |r| + // includes |carry| which is stored separately. + BN_ULONG n0 = mont->n0[0]; + BN_ULONG carry = 0; + for (size_t i = 0; i < num_n; i++) { + BN_ULONG v = bn_mul_add_words(a + i, n, num_n, a[i] * n0); + v += carry + a[i + num_n]; + carry |= (v != a[i + num_n]); + carry &= (v <= a[i + num_n]); + a[i + num_n] = v; + } + + // Shift |num_n| words to divide by R. We have |a| < 2 * |n|. Note that |a| + // includes |carry| which is stored separately. + a += num_n; + + // |a| thus requires at most one additional subtraction |n| to be reduced. + // Subtract |n| and select the answer in constant time. + OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t), + crypto_word_t_too_small); + BN_ULONG v = bn_sub_words(r, a, n, num_n) - carry; + // |v| is one if |a| - |n| underflowed or zero if it did not. Note |v| cannot + // be -1. That would imply the subtraction did not fit in |num_n| words, and + // we know at most one subtraction is needed. + v = 0u - v; + for (size_t i = 0; i < num_n; i++) { + r[i] = constant_time_select_w(v, a[i], r[i]); + a[i] = 0; + } + return 1; +} + static int BN_from_montgomery_word(BIGNUM *ret, BIGNUM *r, const BN_MONT_CTX *mont) { - BN_ULONG *ap, *np, *rp, n0, v, carry; - int nl, max, i; - const BIGNUM *n = &mont->N; - nl = n->top; - if (nl == 0) { + if (n->top == 0) { ret->top = 0; return 1; } - max = (2 * nl); /* carry is stored separately */ - if (bn_wexpand(r, max) == NULL) { + int max = (2 * n->top); // carry is stored separately + if (!bn_wexpand(r, max) || + !bn_wexpand(ret, n->top)) { return 0; } - - r->neg ^= n->neg; - np = n->d; - rp = r->d; - - /* clear the top words of T */ + // Clear the top words of |r|. if (max > r->top) { - OPENSSL_memset(&rp[r->top], 0, (max - r->top) * sizeof(BN_ULONG)); + OPENSSL_memset(r->d + r->top, 0, (max - r->top) * sizeof(BN_ULONG)); } - r->top = max; - n0 = mont->n0[0]; - - for (carry = 0, i = 0; i < nl; i++, rp++) { - v = bn_mul_add_words(rp, np, nl, (rp[0] * n0) & BN_MASK2); - v = (v + carry + rp[nl]) & BN_MASK2; - carry |= (v != rp[nl]); - carry &= (v <= rp[nl]); - rp[nl] = v; - } + ret->top = n->top; - if (bn_wexpand(ret, nl) == NULL) { + if (!bn_from_montgomery_in_place(ret->d, ret->top, r->d, r->top, mont)) { return 0; } - ret->top = nl; ret->neg = r->neg; - rp = ret->d; - ap = &(r->d[nl]); - - { - BN_ULONG *nrp; - uintptr_t m; - - v = bn_sub_words(rp, ap, np, nl) - carry; - /* if subtraction result is real, then trick unconditional memcpy below to - * perform in-place "refresh" instead of actual copy. */ - m = (0u - (uintptr_t)v); - nrp = (BN_ULONG *)(((uintptr_t)rp & ~m) | ((uintptr_t)ap & m)); - - for (i = 0, nl -= 4; i < nl; i += 4) { - BN_ULONG t1, t2, t3, t4; - - t1 = nrp[i + 0]; - t2 = nrp[i + 1]; - t3 = nrp[i + 2]; - ap[i + 0] = 0; - t4 = nrp[i + 3]; - ap[i + 1] = 0; - rp[i + 0] = t1; - ap[i + 2] = 0; - rp[i + 1] = t2; - ap[i + 3] = 0; - rp[i + 2] = t3; - rp[i + 3] = t4; - } - - for (nl += 4; i < nl; i++) { - rp[i] = nrp[i], ap[i] = 0; - } - } - bn_correct_top(r); bn_correct_top(ret); - return 1; } @@ -361,27 +354,43 @@ int BN_from_montgomery(BIGNUM *r, const BIGNUM *a, const BN_MONT_CTX *mont, int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx) { - BIGNUM *tmp; - int ret = 0; - -#if defined(OPENSSL_BN_ASM_MONT) +#if !defined(OPENSSL_BN_ASM_MONT) + return bn_mod_mul_montgomery_fallback(r, a, b, mont, ctx); +#else int num = mont->N.top; - if (num > 1 && a->top == num && b->top == num) { - if (bn_wexpand(r, num) == NULL) { - return 0; - } - if (bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { - r->neg = a->neg ^ b->neg; - r->top = num; - bn_correct_top(r); - return 1; - } + // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. + if (num < (128 / BN_BITS2) || + a->top != num || + b->top != num) { + return bn_mod_mul_montgomery_fallback(r, a, b, mont, ctx); + } + + if (!bn_wexpand(r, num)) { + return 0; + } + if (!bn_mul_mont(r->d, a->d, b->d, mont->N.d, mont->n0, num)) { + // The check above ensures this won't happen. + assert(0); + OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); + return 0; } + r->neg = a->neg ^ b->neg; + r->top = num; + bn_correct_top(r); + + return 1; #endif +} + +static int bn_mod_mul_montgomery_fallback(BIGNUM *r, const BIGNUM *a, + const BIGNUM *b, + const BN_MONT_CTX *mont, + BN_CTX *ctx) { + int ret = 0; BN_CTX_start(ctx); - tmp = BN_CTX_get(ctx); + BIGNUM *tmp = BN_CTX_get(ctx); if (tmp == NULL) { goto err; } @@ -396,7 +405,7 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, } } - /* reduce from aRR to aR */ + // reduce from aRR to aR if (!BN_from_montgomery_word(r, tmp, mont)) { goto err; } @@ -407,3 +416,68 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX_end(ctx); return ret; } + +int bn_to_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_MONT_CTX *mont) { + return bn_mod_mul_montgomery_small(r, num_r, a, num_a, mont->RR.d, + mont->RR.top, mont); +} + +int bn_from_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_MONT_CTX *mont) { + size_t num_n = mont->N.top; + if (num_a > 2 * num_n || num_r != num_n || num_n > BN_SMALL_MAX_WORDS) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + BN_ULONG tmp[BN_SMALL_MAX_WORDS * 2]; + size_t num_tmp = 2 * num_n; + OPENSSL_memcpy(tmp, a, num_a * sizeof(BN_ULONG)); + OPENSSL_memset(tmp + num_a, 0, (num_tmp - num_a) * sizeof(BN_ULONG)); + int ret = bn_from_montgomery_in_place(r, num_r, tmp, num_tmp, mont); + OPENSSL_cleanse(tmp, num_tmp * sizeof(BN_ULONG)); + return ret; +} + +int bn_mod_mul_montgomery_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, + size_t num_a, const BN_ULONG *b, size_t num_b, + const BN_MONT_CTX *mont) { + size_t num_n = mont->N.top; + if (num_r != num_n || num_a + num_b > 2 * num_n || + num_n > BN_SMALL_MAX_WORDS) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + +#if defined(OPENSSL_BN_ASM_MONT) + // |bn_mul_mont| requires at least 128 bits of limbs, at least for x86. + if (num_n >= (128 / BN_BITS2) && + num_a == num_n && + num_b == num_n) { + if (!bn_mul_mont(r, a, b, mont->N.d, mont->n0, num_n)) { + assert(0); // The check above ensures this won't happen. + OPENSSL_PUT_ERROR(BN, ERR_R_INTERNAL_ERROR); + return 0; + } + return 1; + } +#endif + + // Compute the product. + BN_ULONG tmp[2 * BN_SMALL_MAX_WORDS]; + size_t num_tmp = 2 * num_n; + size_t num_ab = num_a + num_b; + if (a == b && num_a == num_b) { + if (!bn_sqr_small(tmp, num_ab, a, num_a)) { + return 0; + } + } else if (!bn_mul_small(tmp, num_ab, a, num_a, b, num_b)) { + return 0; + } + + // Zero-extend to full width and reduce. + OPENSSL_memset(tmp + num_ab, 0, (num_tmp - num_ab) * sizeof(BN_ULONG)); + int ret = bn_from_montgomery_in_place(r, num_r, tmp, num_tmp, mont); + OPENSSL_cleanse(tmp, num_tmp * sizeof(BN_ULONG)); + return ret; +} diff --git a/Sources/BoringSSL/crypto/fipsmodule/bn/montgomery_inv.c b/Sources/BoringSSL/crypto/fipsmodule/bn/montgomery_inv.c new file mode 100644 index 000000000..c3c788ab1 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/montgomery_inv.c @@ -0,0 +1,207 @@ +/* Copyright 2016 Brian Smith. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include "internal.h" +#include "../../internal.h" + + +static uint64_t bn_neg_inv_mod_r_u64(uint64_t n); + +OPENSSL_COMPILE_ASSERT(BN_MONT_CTX_N0_LIMBS == 1 || BN_MONT_CTX_N0_LIMBS == 2, + BN_MONT_CTX_N0_LIMBS_VALUE_INVALID_2); +OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) == + BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG), + BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T); + +// LG_LITTLE_R is log_2(r). +#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2) + +uint64_t bn_mont_n0(const BIGNUM *n) { + // These conditions are checked by the caller, |BN_MONT_CTX_set|. + assert(!BN_is_zero(n)); + assert(!BN_is_negative(n)); + assert(BN_is_odd(n)); + + // r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This + // ensures that we can do integer division by |r| by simply ignoring + // |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo + // |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is + // what makes Montgomery multiplication efficient. + // + // As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography + // with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a + // multi-limb Montgomery multiplication of |a * b (mod n)|, given the + // unreduced product |t == a * b|, we repeatedly calculate: + // + // t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph). + // t2 := t1*n0*n + // t3 := t + t2 + // t := t3 / r copy all limbs of |t3| except the lowest to |t|. + // + // In the last step, it would only make sense to ignore the lowest limb of + // |t3| if it were zero. The middle steps ensure that this is the case: + // + // t3 == 0 (mod r) + // t + t2 == 0 (mod r) + // t + t1*n0*n == 0 (mod r) + // t1*n0*n == -t (mod r) + // t*n0*n == -t (mod r) + // n0*n == -1 (mod r) + // n0 == -1/n (mod r) + // + // Thus, in each iteration of the loop, we multiply by the constant factor + // |n0|, the negative inverse of n (mod r). + + // n_mod_r = n % r. As explained above, this is done by taking the lowest + // |BN_MONT_CTX_N0_LIMBS| limbs of |n|. + uint64_t n_mod_r = n->d[0]; +#if BN_MONT_CTX_N0_LIMBS == 2 + if (n->top > 1) { + n_mod_r |= (uint64_t)n->d[1] << BN_BITS2; + } +#endif + + return bn_neg_inv_mod_r_u64(n_mod_r); +} + +// bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v| +// such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n| +// must be odd. +// +// This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery +// Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf). +// It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and +// Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000" +// (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21). +// +// This is inspired by Joppe W. Bos's "Constant Time Modular Inversion" +// (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is +// constant-time with respect to |n|. We assume uint64_t additions, +// subtractions, shifts, and bitwise operations are all constant time, which +// may be a large leap of faith on 32-bit targets. We avoid division and +// multiplication, which tend to be the most problematic in terms of timing +// leaks. +// +// Most GCD implementations return values such that |u*r + v*n == 1|, so the +// caller would have to negate the resultant |v| for the purpose of Montgomery +// multiplication. This implementation does the negation implicitly by doing +// the computations as a difference instead of a sum. +static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) { + assert(n % 2 == 1); + + // alpha == 2**(lg r - 1) == r / 2. + static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1); + + const uint64_t beta = n; + + uint64_t u = 1; + uint64_t v = 0; + + // The invariant maintained from here on is: + // 2**(lg r - i) == u*2*alpha - v*beta. + for (size_t i = 0; i < LG_LITTLE_R; ++i) { +#if BN_BITS2 == 64 && defined(BN_ULLONG) + assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) == + ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); +#endif + + // Delete a common factor of 2 in u and v if |u| is even. Otherwise, set + // |u = (u + beta) / 2| and |v = (v / 2) + alpha|. + + uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0. + + // The addition can overflow, so use Dietz's method for it. + // + // Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all + // (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values + // (embedded in 64 bits to so that overflow can be ignored): + // + // (declare-fun x () (_ BitVec 64)) + // (declare-fun y () (_ BitVec 64)) + // (assert (let ( + // (one (_ bv1 64)) + // (thirtyTwo (_ bv32 64))) + // (and + // (bvult x (bvshl one thirtyTwo)) + // (bvult y (bvshl one thirtyTwo)) + // (not (= + // (bvadd (bvlshr (bvxor x y) one) (bvand x y)) + // (bvlshr (bvadd x y) one))) + // ))) + // (check-sat) + uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0. + u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd); + + uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0. + v = (v >> 1) + alpha_if_u_is_odd; + } + + // The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. +#if BN_BITS2 == 64 && defined(BN_ULLONG) + assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta)); +#endif + + return v; +} + +// bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger +// than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and +// odd. +int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) { + assert(!BN_is_zero(n)); + assert(!BN_is_negative(n)); + assert(BN_is_odd(n)); + + BN_zero(r); + + unsigned n_bits = BN_num_bits(n); + assert(n_bits != 0); + if (n_bits == 1) { + return 1; + } + + // Set |r| to the smallest power of two larger than |n|. + assert(p > n_bits); + if (!BN_set_bit(r, n_bits)) { + return 0; + } + + // Unconditionally reduce |r|. + assert(BN_cmp(r, n) > 0); + if (!BN_usub(r, r, n)) { + return 0; + } + assert(BN_cmp(r, n) < 0); + + for (unsigned i = n_bits; i < p; ++i) { + // This is like |BN_mod_lshift1_quick| except using |BN_usub|. + // + // TODO: Replace this with the use of a constant-time variant of + // |BN_mod_lshift1_quick|. + if (!BN_lshift1(r, r)) { + return 0; + } + if (BN_cmp(r, n) >= 0) { + if (!BN_usub(r, r, n)) { + return 0; + } + } + } + + return 1; +} diff --git a/Sources/BoringSSL/crypto/bn/mul.c b/Sources/BoringSSL/crypto/fipsmodule/bn/mul.c similarity index 66% rename from Sources/BoringSSL/crypto/bn/mul.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/mul.c index fdf2c6927..b93f5587a 100644 --- a/Sources/BoringSSL/crypto/bn/mul.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/mul.c @@ -59,50 +59,48 @@ #include #include +#include +#include + #include "internal.h" +#include "../../internal.h" #define BN_MUL_RECURSIVE_SIZE_NORMAL 16 #define BN_SQR_RECURSIVE_SIZE_NORMAL BN_MUL_RECURSIVE_SIZE_NORMAL -static void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b, - int nb) { - BN_ULONG *rr; - +static void bn_mul_normal(BN_ULONG *r, const BN_ULONG *a, size_t na, + const BN_ULONG *b, size_t nb) { if (na < nb) { - int itmp; - BN_ULONG *ltmp; - - itmp = na; + size_t itmp = na; na = nb; nb = itmp; - ltmp = a; + const BN_ULONG *ltmp = a; a = b; b = ltmp; } - rr = &(r[na]); - if (nb <= 0) { - (void)bn_mul_words(r, a, na, 0); + BN_ULONG *rr = &(r[na]); + if (nb == 0) { + OPENSSL_memset(r, 0, na * sizeof(BN_ULONG)); return; - } else { - rr[0] = bn_mul_words(r, a, na, b[0]); } + rr[0] = bn_mul_words(r, a, na, b[0]); for (;;) { - if (--nb <= 0) { + if (--nb == 0) { return; } rr[1] = bn_mul_add_words(&(r[1]), a, na, b[1]); - if (--nb <= 0) { + if (--nb == 0) { return; } rr[2] = bn_mul_add_words(&(r[2]), a, na, b[2]); - if (--nb <= 0) { + if (--nb == 0) { return; } rr[3] = bn_mul_add_words(&(r[3]), a, na, b[3]); - if (--nb <= 0) { + if (--nb == 0) { return; } rr[4] = bn_mul_add_words(&(r[4]), a, na, b[4]); @@ -113,15 +111,15 @@ static void bn_mul_normal(BN_ULONG *r, BN_ULONG *a, int na, BN_ULONG *b, } #if !defined(OPENSSL_X86) || defined(OPENSSL_NO_ASM) -/* Here follows specialised variants of bn_add_words() and bn_sub_words(). They - * have the property performing operations on arrays of different sizes. The - * sizes of those arrays is expressed through cl, which is the common length ( - * basicall, min(len(a),len(b)) ), and dl, which is the delta between the two - * lengths, calculated as len(a)-len(b). All lengths are the number of - * BN_ULONGs... For the operations that require a result array as parameter, - * it must have the length cl+abs(dl). These functions should probably end up - * in bn_asm.c as soon as there are assembler counterparts for the systems that - * use assembler files. */ +// Here follows specialised variants of bn_add_words() and bn_sub_words(). They +// have the property performing operations on arrays of different sizes. The +// sizes of those arrays is expressed through cl, which is the common length ( +// basicall, min(len(a),len(b)) ), and dl, which is the delta between the two +// lengths, calculated as len(a)-len(b). All lengths are the number of +// BN_ULONGs... For the operations that require a result array as parameter, +// it must have the length cl+abs(dl). These functions should probably end up +// in bn_asm.c as soon as there are assembler counterparts for the systems that +// use assembler files. static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl) { @@ -141,7 +139,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, if (dl < 0) { for (;;) { t = b[0]; - r[0] = (0 - t - c) & BN_MASK2; + r[0] = 0 - t - c; if (t != 0) { c = 1; } @@ -150,7 +148,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = b[1]; - r[1] = (0 - t - c) & BN_MASK2; + r[1] = 0 - t - c; if (t != 0) { c = 1; } @@ -159,7 +157,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = b[2]; - r[2] = (0 - t - c) & BN_MASK2; + r[2] = 0 - t - c; if (t != 0) { c = 1; } @@ -168,7 +166,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = b[3]; - r[3] = (0 - t - c) & BN_MASK2; + r[3] = 0 - t - c; if (t != 0) { c = 1; } @@ -183,7 +181,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, int save_dl = dl; while (c) { t = a[0]; - r[0] = (t - c) & BN_MASK2; + r[0] = t - c; if (t != 0) { c = 0; } @@ -192,7 +190,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = a[1]; - r[1] = (t - c) & BN_MASK2; + r[1] = t - c; if (t != 0) { c = 0; } @@ -201,7 +199,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = a[2]; - r[2] = (t - c) & BN_MASK2; + r[2] = t - c; if (t != 0) { c = 0; } @@ -210,7 +208,7 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, } t = a[3]; - r[3] = (t - c) & BN_MASK2; + r[3] = t - c; if (t != 0) { c = 0; } @@ -230,11 +228,13 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, if (--dl <= 0) { break; } + OPENSSL_FALLTHROUGH; case 2: r[2] = a[2]; if (--dl <= 0) { break; } + OPENSSL_FALLTHROUGH; case 3: r[3] = a[3]; if (--dl <= 0) { @@ -274,41 +274,39 @@ static BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, return c; } #else -/* On other platforms the function is defined in asm. */ +// On other platforms the function is defined in asm. BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int cl, int dl); #endif -/* Karatsuba recursive multiplication algorithm - * (cf. Knuth, The Art of Computer Programming, Vol. 2) */ - -/* r is 2*n2 words in size, - * a and b are both n2 words in size. - * n2 must be a power of 2. - * We multiply and return the result. - * t must be 2*n2 words in size - * We calculate - * a[0]*b[0] - * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) - * a[1]*b[1] - */ -/* dnX may not be positive, but n2/2+dnX has to be */ -static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, - int dna, int dnb, BN_ULONG *t) { +// Karatsuba recursive multiplication algorithm +// (cf. Knuth, The Art of Computer Programming, Vol. 2) + +// r is 2*n2 words in size, +// a and b are both n2 words in size. +// n2 must be a power of 2. +// We multiply and return the result. +// t must be 2*n2 words in size +// We calculate +// a[0]*b[0] +// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) +// a[1]*b[1] +// dnX may not be positive, but n2/2+dnX has to be +static void bn_mul_recursive(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, + int n2, int dna, int dnb, BN_ULONG *t) { int n = n2 / 2, c1, c2; int tna = n + dna, tnb = n + dnb; unsigned int neg, zero; BN_ULONG ln, lo, *p; - /* Only call bn_mul_comba 8 if n2 == 8 and the - * two arrays are complete [steve] - */ + // Only call bn_mul_comba 8 if n2 == 8 and the + // two arrays are complete [steve] if (n2 == 8 && dna == 0 && dnb == 0) { bn_mul_comba8(r, a, b); return; } - /* Else do normal multiply */ + // Else do normal multiply if (n2 < BN_MUL_RECURSIVE_SIZE_NORMAL) { bn_mul_normal(r, a, n2 + dna, b, n2 + dnb); if ((dna + dnb) < 0) { @@ -318,21 +316,21 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, return; } - /* r=(a[0]-a[1])*(b[1]-b[0]) */ + // r=(a[0]-a[1])*(b[1]-b[0]) c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna); c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n); zero = neg = 0; switch (c1 * 3 + c2) { case -4: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - break; case -3: zero = 1; break; case -2: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // + neg = 1; break; case -1: @@ -341,8 +339,8 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, zero = 1; break; case 2: - bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // + + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - neg = 1; break; case 3: @@ -355,7 +353,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, } if (n == 4 && dna == 0 && dnb == 0) { - /* XXX: bn_mul_comba4 could take extra args to do this well */ + // XXX: bn_mul_comba4 could take extra args to do this well if (!zero) { bn_mul_comba4(&(t[n2]), t, &(t[n])); } else { @@ -365,7 +363,7 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, bn_mul_comba4(r, a, b); bn_mul_comba4(&(r[n2]), &(a[n]), &(b[n])); } else if (n == 8 && dna == 0 && dnb == 0) { - /* XXX: bn_mul_comba8 could take extra args to do this well */ + // XXX: bn_mul_comba8 could take extra args to do this well if (!zero) { bn_mul_comba8(&(t[n2]), t, &(t[n])); } else { @@ -385,49 +383,50 @@ static void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2, bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); if (neg) { - /* if t[32] is negative */ + // if t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); } else { - /* Might have a carry */ + // Might have a carry c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2)); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); lo = *p; - ln = (lo + c1) & BN_MASK2; + ln = lo + c1; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; lo = *p; - ln = (lo + 1) & BN_MASK2; + ln = lo + 1; *p = ln; } while (ln == 0); } } } -/* n+tn is the word length - * t needs to be n*4 is size, as does r */ -/* tnX may not be negative but less than n */ -static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, - int tna, int tnb, BN_ULONG *t) { +// n+tn is the word length +// t needs to be n*4 is size, as does r +// tnX may not be negative but less than n +static void bn_mul_part_recursive(BN_ULONG *r, const BN_ULONG *a, + const BN_ULONG *b, int n, int tna, int tnb, + BN_ULONG *t) { int i, j, n2 = n * 2; int c1, c2, neg; BN_ULONG ln, lo, *p; @@ -437,33 +436,33 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, return; } - /* r=(a[0]-a[1])*(b[1]-b[0]) */ + // r=(a[0]-a[1])*(b[1]-b[0]) c1 = bn_cmp_part_words(a, &(a[n]), tna, n - tna); c2 = bn_cmp_part_words(&(b[n]), b, tnb, tnb - n); neg = 0; switch (c1 * 3 + c2) { case -4: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - break; case -3: - /* break; */ + // break; case -2: - bn_sub_part_words(t, &(a[n]), a, tna, tna - n); /* - */ - bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); /* + */ + bn_sub_part_words(t, &(a[n]), a, tna, tna - n); // - + bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); // + neg = 1; break; case -1: case 0: case 1: - /* break; */ + // break; case 2: - bn_sub_part_words(t, a, &(a[n]), tna, n - tna); /* + */ - bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); /* - */ + bn_sub_part_words(t, a, &(a[n]), tna, n - tna); // + + bn_sub_part_words(&(t[n]), b, &(b[n]), tnb, n - tnb); // - neg = 1; break; case 3: - /* break; */ + // break; case 4: bn_sub_part_words(t, a, &(a[n]), tna, n - tna); bn_sub_part_words(&(t[n]), &(b[n]), b, tnb, tnb - n); @@ -480,8 +479,8 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, bn_mul_recursive(&(t[n2]), t, &(t[n]), n, 0, 0, p); bn_mul_recursive(r, a, b, n, 0, 0, p); i = n / 2; - /* If there is only a bottom half to the number, - * just do it */ + // If there is only a bottom half to the number, + // just do it if (tna > tnb) { j = tna - i; } else { @@ -492,12 +491,12 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); OPENSSL_memset(&(r[n2 + i * 2]), 0, sizeof(BN_ULONG) * (n2 - i * 2)); } else if (j > 0) { - /* eg, n == 16, i == 8 and tn == 11 */ + // eg, n == 16, i == 8 and tn == 11 bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); OPENSSL_memset(&(r[n2 + tna + tnb]), 0, sizeof(BN_ULONG) * (n2 - tna - tnb)); } else { - /* (j < 0) eg, n == 16, i == 8 and tn == 5 */ + // (j < 0) eg, n == 16, i == 8 and tn == 5 OPENSSL_memset(&(r[n2]), 0, sizeof(BN_ULONG) * n2); if (tna < BN_MUL_RECURSIVE_SIZE_NORMAL && tnb < BN_MUL_RECURSIVE_SIZE_NORMAL) { @@ -505,9 +504,9 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, } else { for (;;) { i /= 2; - /* these simplified conditions work - * exclusively because difference - * between tna and tnb is 1 or 0 */ + // these simplified conditions work + // exclusively because difference + // between tna and tnb is 1 or 0 if (i < tna || i < tnb) { bn_mul_part_recursive(&(r[n2]), &(a[n]), &(b[n]), i, tna - i, tnb - i, p); @@ -522,39 +521,38 @@ static void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n, } } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); if (neg) { - /* if t[32] is negative */ + // if t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); } else { - /* Might have a carry */ + // Might have a carry c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2)); } - /* t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1]) + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); lo = *p; - ln = (lo + c1) & BN_MASK2; + ln = lo + c1; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; lo = *p; - ln = (lo + 1) & BN_MASK2; + ln = lo + 1; *p = ln; } while (ln == 0); } @@ -591,7 +589,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { i = al - bl; if (i == 0) { if (al == 8) { - if (bn_wexpand(rr, 16) == NULL) { + if (!bn_wexpand(rr, 16)) { goto err; } rr->top = 16; @@ -619,19 +617,19 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { goto err; } if (al > j || bl > j) { - if (bn_wexpand(t, k * 4) == NULL) { + if (!bn_wexpand(t, k * 4)) { goto err; } - if (bn_wexpand(rr, k * 4) == NULL) { + if (!bn_wexpand(rr, k * 4)) { goto err; } bn_mul_part_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d); } else { - /* al <= j || bl <= j */ - if (bn_wexpand(t, k * 2) == NULL) { + // al <= j || bl <= j + if (!bn_wexpand(t, k * 2)) { goto err; } - if (bn_wexpand(rr, k * 2) == NULL) { + if (!bn_wexpand(rr, k * 2)) { goto err; } bn_mul_recursive(rr->d, a->d, b->d, j, al - j, bl - j, t->d); @@ -641,7 +639,7 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { } } - if (bn_wexpand(rr, top) == NULL) { + if (!bn_wexpand(rr, top)) { goto err; } rr->top = top; @@ -659,52 +657,71 @@ int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { return ret; } -/* tmp must have 2*n words */ -static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, int n, BN_ULONG *tmp) { - int i, j, max; - const BN_ULONG *ap; - BN_ULONG *rp; +int bn_mul_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a, + const BN_ULONG *b, size_t num_b) { + if (num_r != num_a + num_b) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + // TODO(davidben): Should this call |bn_mul_comba4| too? |BN_mul| does not + // hit that code. + if (num_a == 8 && num_b == 8) { + bn_mul_comba8(r, a, b); + } else { + bn_mul_normal(r, a, num_a, b, num_b); + } + return 1; +} - max = n * 2; - ap = a; - rp = r; +// tmp must have 2*n words +static void bn_sqr_normal(BN_ULONG *r, const BN_ULONG *a, size_t n, + BN_ULONG *tmp) { + if (n == 0) { + return; + } + + size_t max = n * 2; + const BN_ULONG *ap = a; + BN_ULONG *rp = r; rp[0] = rp[max - 1] = 0; rp++; - j = n; - if (--j > 0) { + // Compute the contribution of a[i] * a[j] for all i < j. + if (n > 1) { ap++; - rp[j] = bn_mul_words(rp, ap, j, ap[-1]); + rp[n - 1] = bn_mul_words(rp, ap, n - 1, ap[-1]); rp += 2; } - - for (i = n - 2; i > 0; i--) { - j--; - ap++; - rp[j] = bn_mul_add_words(rp, ap, j, ap[-1]); - rp += 2; + if (n > 2) { + for (size_t i = n - 2; i > 0; i--) { + ap++; + rp[i] = bn_mul_add_words(rp, ap, i, ap[-1]); + rp += 2; + } } - bn_add_words(r, r, r, max); + // The final result fits in |max| words, so none of the following operations + // will overflow. - /* There will not be a carry */ + // Double |r|, giving the contribution of a[i] * a[j] for all i != j. + bn_add_words(r, r, r, max); + // Add in the contribution of a[i] * a[i] for all i. bn_sqr_words(tmp, a, n); - bn_add_words(r, r, tmp, max); } -/* r is 2*n words in size, - * a and b are both n words in size. (There's not actually a 'b' here ...) - * n must be a power of 2. - * We multiply and return the result. - * t must be 2*n words in size - * We calculate - * a[0]*b[0] - * a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) - * a[1]*b[1] - */ -static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t) { +// r is 2*n words in size, +// a and b are both n words in size. (There's not actually a 'b' here ...) +// n must be a power of 2. +// We multiply and return the result. +// t must be 2*n words in size +// We calculate +// a[0]*b[0] +// a[0]*b[0]+a[1]*b[1]+(a[0]-a[1])*(b[1]-b[0]) +// a[1]*b[1] +static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, + BN_ULONG *t) { int n = n2 / 2; int zero, c1; BN_ULONG ln, lo, *p; @@ -720,7 +737,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t bn_sqr_normal(r, a, n2, t); return; } - /* r=(a[0]-a[1])*(a[1]-a[0]) */ + // r=(a[0]-a[1])*(a[1]-a[0]) c1 = bn_cmp_words(a, &(a[n]), n); zero = 0; if (c1 > 0) { @@ -731,7 +748,7 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t zero = 1; } - /* The result will always be negative unless it is zero */ + // The result will always be negative unless it is zero p = &(t[n2 * 2]); if (!zero) { @@ -742,33 +759,33 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t bn_sqr_recursive(r, a, n, p); bn_sqr_recursive(&(r[n2]), &(a[n]), n, p); - /* t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero - * r[10] holds (a[0]*b[0]) - * r[32] holds (b[1]*b[1]) */ + // t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero + // r[10] holds (a[0]*b[0]) + // r[32] holds (b[1]*b[1]) c1 = (int)(bn_add_words(t, r, &(r[n2]), n2)); - /* t[32] is negative */ + // t[32] is negative c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2)); - /* t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1]) - * r[10] holds (a[0]*a[0]) - * r[32] holds (a[1]*a[1]) - * c1 holds the carry bits */ + // t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1]) + // r[10] holds (a[0]*a[0]) + // r[32] holds (a[1]*a[1]) + // c1 holds the carry bits c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2)); if (c1) { p = &(r[n + n2]); lo = *p; - ln = (lo + c1) & BN_MASK2; + ln = lo + c1; *p = ln; - /* The overflow will stop before we over write - * words we should not overwrite */ + // The overflow will stop before we over write + // words we should not overwrite if (ln < (BN_ULONG)c1) { do { p++; lo = *p; - ln = (lo + 1) & BN_MASK2; + ln = lo + 1; *p = ln; } while (ln == 0); } @@ -776,9 +793,6 @@ static void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t } int BN_mul_word(BIGNUM *bn, BN_ULONG w) { - BN_ULONG ll; - - w &= BN_MASK2; if (!bn->top) { return 1; } @@ -788,9 +802,9 @@ int BN_mul_word(BIGNUM *bn, BN_ULONG w) { return 1; } - ll = bn_mul_words(bn->d, bn->d, bn->top, w); + BN_ULONG ll = bn_mul_words(bn->d, bn->d, bn->top, w); if (ll) { - if (bn_wexpand(bn, bn->top + 1) == NULL) { + if (!bn_wexpand(bn, bn->top + 1)) { return 0; } bn->d[bn->top++] = ll; @@ -818,8 +832,8 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { goto err; } - max = 2 * al; /* Non-zero (from above) */ - if (bn_wexpand(rr, max) == NULL) { + max = 2 * al; // Non-zero (from above) + if (!bn_wexpand(rr, max)) { goto err; } @@ -838,12 +852,12 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { j = 1 << (j - 1); k = j + j; if (al == j) { - if (bn_wexpand(tmp, k * 2) == NULL) { + if (!bn_wexpand(tmp, k * 2)) { goto err; } bn_sqr_recursive(rr->d, a->d, al, tmp->d); } else { - if (bn_wexpand(tmp, max) == NULL) { + if (!bn_wexpand(tmp, max)) { goto err; } bn_sqr_normal(rr->d, a->d, al, tmp->d); @@ -852,8 +866,8 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { } rr->neg = 0; - /* If the most-significant half of the top word of 'a' is zero, then - * the square of 'a' will max-1 words. */ + // If the most-significant half of the top word of 'a' is zero, then + // the square of 'a' will max-1 words. if (a->d[al - 1] == (a->d[al - 1] & BN_MASK2l)) { rr->top = max - 1; } else { @@ -869,3 +883,20 @@ int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx) { BN_CTX_end(ctx); return ret; } + +int bn_sqr_small(BN_ULONG *r, size_t num_r, const BN_ULONG *a, size_t num_a) { + if (num_r != 2 * num_a || num_a > BN_SMALL_MAX_WORDS) { + OPENSSL_PUT_ERROR(BN, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + if (num_a == 4) { + bn_sqr_comba4(r, a); + } else if (num_a == 8) { + bn_sqr_comba8(r, a); + } else { + BN_ULONG tmp[2 * BN_SMALL_MAX_WORDS]; + bn_sqr_normal(r, a, num_a, tmp); + OPENSSL_cleanse(tmp, 2 * num_a * sizeof(BN_ULONG)); + } + return 1; +} diff --git a/Sources/BoringSSL/crypto/bn/prime.c b/Sources/BoringSSL/crypto/fipsmodule/bn/prime.c similarity index 83% rename from Sources/BoringSSL/crypto/bn/prime.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/prime.c index 0f668d728..691d0cba5 100644 --- a/Sources/BoringSSL/crypto/bn/prime.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/prime.c @@ -113,32 +113,13 @@ #include "internal.h" -/* number of Miller-Rabin iterations for an error rate of less than 2^-80 - * for random 'b'-bit input, b >= 100 (taken from table 4.4 in the Handbook - * of Applied Cryptography [Menezes, van Oorschot, Vanstone; CRC Press 1996]; - * original paper: Damgaard, Landrock, Pomerance: Average case error estimates - * for the strong probable prime test. -- Math. Comp. 61 (1993) 177-194) */ -#define BN_prime_checks_for_size(b) ((b) >= 1300 ? 2 : \ - (b) >= 850 ? 3 : \ - (b) >= 650 ? 4 : \ - (b) >= 550 ? 5 : \ - (b) >= 450 ? 6 : \ - (b) >= 400 ? 7 : \ - (b) >= 350 ? 8 : \ - (b) >= 300 ? 9 : \ - (b) >= 250 ? 12 : \ - (b) >= 200 ? 15 : \ - (b) >= 150 ? 18 : \ - /* b >= 100 */ 27) - -/* The quick sieve algorithm approach to weeding out primes is Philip - * Zimmermann's, as implemented in PGP. I have had a read of his comments and - * implemented my own version. */ - -/* NUMPRIMES is the number of primes that fit into a uint16_t. */ +// The quick sieve algorithm approach to weeding out primes is Philip +// Zimmermann's, as implemented in PGP. I have had a read of his comments and +// implemented my own version. + #define NUMPRIMES 2048 -/* primes contains all the primes that fit into a uint16_t. */ +// primes contains all the primes that fit into a uint16_t. static const uint16_t primes[NUMPRIMES] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, @@ -329,8 +310,37 @@ static const uint16_t primes[NUMPRIMES] = { 17851, 17863, }; -static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, - const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont); +// BN_prime_checks_for_size returns the number of Miller-Rabin iterations +// necessary for a 'bits'-bit prime, in order to maintain an error rate greater +// than the security level for an RSA prime of that many bits (calculated using +// the FIPS SP 800-57 security level and 186-4 Section F.1; original paper: +// Damgaard, Landrock, Pomerance: Average case error estimates for the strong +// probable prime test. -- Math. Comp. 61 (1993) 177-194) +static int BN_prime_checks_for_size(int bits) { + if (bits >= 3747) { + return 3; + } + if (bits >= 1345) { + return 4; + } + if (bits >= 476) { + return 5; + } + if (bits >= 400) { + return 6; + } + if (bits >= 308) { + return 8; + } + if (bits >= 205) { + return 13; + } + if (bits >= 155) { + return 19; + } + return 28; +} + static int probable_prime(BIGNUM *rnd, int bits); static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, const BIGNUM *rem, BN_CTX *ctx); @@ -361,11 +371,11 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, int checks = BN_prime_checks_for_size(bits); if (bits < 2) { - /* There are no prime numbers this small. */ + // There are no prime numbers this small. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } else if (bits == 2 && safe) { - /* The smallest safe prime (7) is three bits. */ + // The smallest safe prime (7) is three bits. OPENSSL_PUT_ERROR(BN, BN_R_BITS_TOO_SMALL); return 0; } @@ -381,7 +391,7 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, } loop: - /* make a random number and set the top and bottom bits */ + // make a random number and set the top and bottom bits if (add == NULL) { if (!probable_prime(ret, bits)) { goto err; @@ -399,7 +409,7 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, } if (!BN_GENCB_call(cb, BN_GENCB_GENERATED, c1++)) { - /* aborted */ + // aborted goto err; } @@ -411,8 +421,8 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, goto loop; } } else { - /* for "safe prime" generation, check that (p-1)/2 is prime. Since a prime - * is odd, We just need to divide by 2 */ + // for "safe prime" generation, check that (p-1)/2 is prime. Since a prime + // is odd, We just need to divide by 2 if (!BN_rshift1(t, ret)) { goto err; } @@ -435,11 +445,11 @@ int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, if (!BN_GENCB_call(cb, i, c1 - 1)) { goto err; } - /* We have a safe prime test pass */ + // We have a safe prime test pass } } - /* we have a prime :-) */ + // we have a prime :-) found = 1; err: @@ -471,176 +481,199 @@ int BN_is_prime_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, BN_GENCB *c return BN_is_prime_fasttest_ex(candidate, checks, ctx, 0, cb); } -int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx_passed, +int BN_is_prime_fasttest_ex(const BIGNUM *a, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb) { - int i, j, ret = -1; - int k; - BN_CTX *ctx = NULL; - BIGNUM *A1, *A1_odd, *check; /* taken from ctx */ - BN_MONT_CTX *mont = NULL; - const BIGNUM *A = NULL; - if (BN_cmp(a, BN_value_one()) <= 0) { return 0; } - if (checks == BN_prime_checks) { - checks = BN_prime_checks_for_size(BN_num_bits(a)); - } - - /* first look for small factors */ + // first look for small factors if (!BN_is_odd(a)) { - /* a is even => a is prime if and only if a == 2 */ + // a is even => a is prime if and only if a == 2 return BN_is_word(a, 2); } + // Enhanced Miller-Rabin does not work for three. + if (BN_is_word(a, 3)) { + return 1; + } + if (do_trial_division) { - for (i = 1; i < NUMPRIMES; i++) { + for (int i = 1; i < NUMPRIMES; i++) { BN_ULONG mod = BN_mod_word(a, primes[i]); if (mod == (BN_ULONG)-1) { - goto err; + return -1; } if (mod == 0) { - return 0; + return BN_is_word(a, primes[i]); } } if (!BN_GENCB_call(cb, 1, -1)) { - goto err; + return -1; } } - if (ctx_passed != NULL) { - ctx = ctx_passed; - } else if ((ctx = BN_CTX_new()) == NULL) { - goto err; - } - BN_CTX_start(ctx); - - /* A := abs(a) */ - if (a->neg) { - BIGNUM *t = BN_CTX_get(ctx); - if (t == NULL || !BN_copy(t, a)) { - goto err; + int ret = -1; + BN_CTX *ctx_allocated = NULL; + if (ctx == NULL) { + ctx_allocated = BN_CTX_new(); + if (ctx_allocated == NULL) { + return -1; } - t->neg = 0; - A = t; - } else { - A = a; + ctx = ctx_allocated; } - A1 = BN_CTX_get(ctx); - A1_odd = BN_CTX_get(ctx); - check = BN_CTX_get(ctx); - if (check == NULL) { + enum bn_primality_result_t result; + if (!BN_enhanced_miller_rabin_primality_test(&result, a, checks, ctx, cb)) { goto err; } - /* compute A1 := A - 1 */ - if (!BN_copy(A1, A)) { - goto err; + ret = (result == bn_probably_prime); + +err: + BN_CTX_free(ctx_allocated); + return ret; +} + +int BN_enhanced_miller_rabin_primality_test( + enum bn_primality_result_t *out_result, const BIGNUM *w, int iterations, + BN_CTX *ctx, BN_GENCB *cb) { + // Enhanced Miller-Rabin is only valid on odd integers greater than 3. + if (!BN_is_odd(w) || BN_cmp_word(w, 3) <= 0) { + OPENSSL_PUT_ERROR(BN, BN_R_INVALID_INPUT); + return 0; } - if (!BN_sub_word(A1, 1)) { - goto err; + + if (iterations == BN_prime_checks) { + iterations = BN_prime_checks_for_size(BN_num_bits(w)); } - if (BN_is_zero(A1)) { - ret = 0; + + int ret = 0; + BN_MONT_CTX *mont = NULL; + + BN_CTX_start(ctx); + + BIGNUM *w1 = BN_CTX_get(ctx); + if (w1 == NULL || + !BN_copy(w1, w) || + !BN_sub_word(w1, 1)) { goto err; } - /* write A1 as A1_odd * 2^k */ - k = 1; - while (!BN_is_bit_set(A1, k)) { - k++; + // Write w1 as m*2^a (Steps 1 and 2). + int a = 0; + while (!BN_is_bit_set(w1, a)) { + a++; } - if (!BN_rshift(A1_odd, A1, k)) { + BIGNUM *m = BN_CTX_get(ctx); + if (m == NULL || + !BN_rshift(m, w1, a)) { goto err; } - /* Montgomery setup for computations mod A */ - mont = BN_MONT_CTX_new(); - if (mont == NULL) { + BIGNUM *b = BN_CTX_get(ctx); + BIGNUM *g = BN_CTX_get(ctx); + BIGNUM *z = BN_CTX_get(ctx); + BIGNUM *x = BN_CTX_get(ctx); + BIGNUM *x1 = BN_CTX_get(ctx); + if (b == NULL || + g == NULL || + z == NULL || + x == NULL || + x1 == NULL) { goto err; } - if (!BN_MONT_CTX_set(mont, A, ctx)) { + + // Montgomery setup for computations mod A + mont = BN_MONT_CTX_new(); + if (mont == NULL || + !BN_MONT_CTX_set(mont, w, ctx)) { goto err; } - for (i = 0; i < checks; i++) { - if (!BN_pseudo_rand_range(check, A1)) { - goto err; - } - if (!BN_add_word(check, 1)) { + // The following loop performs in inner iteration of the Enhanced Miller-Rabin + // Primality test (Step 4). + for (int i = 1; i <= iterations; i++) { + // Step 4.1-4.2 + if (!BN_rand_range_ex(b, 2, w1)) { goto err; } - /* now 1 <= check < A */ - j = witness(check, A, A1, A1_odd, k, ctx, mont); - if (j == -1) { + // Step 4.3-4.4 + if (!BN_gcd(g, b, w, ctx)) { goto err; } - if (j) { - ret = 0; + if (BN_cmp_word(g, 1) > 0) { + *out_result = bn_composite; + ret = 1; goto err; } - if (!BN_GENCB_call(cb, 1, i)) { + + // Step 4.5 + if (!BN_mod_exp_mont(z, b, m, w, ctx, mont)) { goto err; } - } - ret = 1; -err: - if (ctx != NULL) { - BN_CTX_end(ctx); - if (ctx_passed == NULL) { - BN_CTX_free(ctx); + // Step 4.6 + if (BN_is_one(z) || BN_cmp(z, w1) == 0) { + goto loop; } - } - if (mont != NULL) { - BN_MONT_CTX_free(mont); - } - return ret; -} + // Step 4.7 + for (int j = 1; j < a; j++) { + if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { + goto err; + } + if (BN_cmp(z, w1) == 0) { + goto loop; + } + if (BN_is_one(z)) { + goto composite; + } + } -static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, - const BIGNUM *a1_odd, int k, BN_CTX *ctx, - BN_MONT_CTX *mont) { - if (!BN_mod_exp_mont(w, w, a1_odd, a, ctx, mont)) { /* w := w^a1_odd mod a */ - return -1; - } - if (BN_is_one(w)) { - return 0; /* probably prime */ - } - if (BN_cmp(w, a1) == 0) { - return 0; /* w == -1 (mod a), 'a' is probably prime */ - } + // Step 4.8-4.9 + if (!BN_copy(x, z) || !BN_mod_mul(z, x, x, w, ctx)) { + goto err; + } - while (--k) { - if (!BN_mod_mul(w, w, w, a, ctx)) { /* w := w^2 mod a */ - return -1; + // Step 4.10-4.11 + if (!BN_is_one(z) && !BN_copy(x, z)) { + goto err; } - if (BN_is_one(w)) { - return 1; /* 'a' is composite, otherwise a previous 'w' would - * have been == -1 (mod 'a') */ + composite: + // Step 4.12-4.14 + if (!BN_copy(x1, x) || + !BN_sub_word(x1, 1) || + !BN_gcd(g, x1, w, ctx)) { + goto err; } + if (BN_cmp_word(g, 1) > 0) { + *out_result = bn_composite; + } else { + *out_result = bn_non_prime_power_composite; + } + + ret = 1; + goto err; - if (BN_cmp(w, a1) == 0) { - return 0; /* w == -1 (mod a), 'a' is probably prime */ + loop: + // Step 4.15 + if (!BN_GENCB_call(cb, 1, i)) { + goto err; } } - /* If we get here, 'w' is the (a-1)/2-th power of the original 'w', - * and it is neither -1 nor +1 -- so 'a' cannot be prime */ - return 1; -} + *out_result = bn_probably_prime; + ret = 1; -static BN_ULONG get_word(const BIGNUM *bn) { - if (bn->top == 1) { - return bn->d[0]; - } - return 0; +err: + BN_MONT_CTX_free(mont); + BN_CTX_end(ctx); + + return ret; } static int probable_prime(BIGNUM *rnd, int bits) { @@ -655,7 +688,7 @@ static int probable_prime(BIGNUM *rnd, int bits) { return 0; } - /* we now have a random number 'rnd' to test. */ + // we now have a random number 'rnd' to test. for (i = 1; i < NUMPRIMES; i++) { BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); if (mod == (BN_ULONG)-1) { @@ -663,15 +696,15 @@ static int probable_prime(BIGNUM *rnd, int bits) { } mods[i] = (uint16_t)mod; } - /* If bits is so small that it fits into a single word then we - * additionally don't want to exceed that many bits. */ + // If bits is so small that it fits into a single word then we + // additionally don't want to exceed that many bits. if (is_single_word) { BN_ULONG size_limit; if (bits == BN_BITS2) { - /* Avoid undefined behavior. */ - size_limit = ~((BN_ULONG)0) - get_word(rnd); + // Avoid undefined behavior. + size_limit = ~((BN_ULONG)0) - BN_get_word(rnd); } else { - size_limit = (((BN_ULONG)1) << bits) - get_word(rnd) - 1; + size_limit = (((BN_ULONG)1) << bits) - BN_get_word(rnd) - 1; } if (size_limit < maxdelta) { maxdelta = size_limit; @@ -681,17 +714,17 @@ static int probable_prime(BIGNUM *rnd, int bits) { loop: if (is_single_word) { - BN_ULONG rnd_word = get_word(rnd); - - /* In the case that the candidate prime is a single word then - * we check that: - * 1) It's greater than primes[i] because we shouldn't reject - * 3 as being a prime number because it's a multiple of - * three. - * 2) That it's not a multiple of a known prime. We don't - * check that rnd-1 is also coprime to all the known - * primes because there aren't many small primes where - * that's true. */ + BN_ULONG rnd_word = BN_get_word(rnd); + + // In the case that the candidate prime is a single word then + // we check that: + // 1) It's greater than primes[i] because we shouldn't reject + // 3 as being a prime number because it's a multiple of + // three. + // 2) That it's not a multiple of a known prime. We don't + // check that rnd-1 is also coprime to all the known + // primes because there aren't many small primes where + // that's true. for (i = 1; i < NUMPRIMES && primes[i] < rnd_word; i++) { if ((mods[i] + delta) % primes[i] == 0) { delta += 2; @@ -703,8 +736,8 @@ static int probable_prime(BIGNUM *rnd, int bits) { } } else { for (i = 1; i < NUMPRIMES; i++) { - /* check that rnd is not a prime and also - * that gcd(rnd-1,primes) == 1 (except for 2) */ + // check that rnd is not a prime and also + // that gcd(rnd-1,primes) == 1 (except for 2) if (((mods[i] + delta) % primes[i]) <= 1) { delta += 2; if (delta > maxdelta) { @@ -739,7 +772,7 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, goto err; } - /* we need ((rnd-rem) % add) == 0 */ + // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, rnd, add, ctx)) { goto err; @@ -756,11 +789,11 @@ static int probable_prime_dh(BIGNUM *rnd, int bits, const BIGNUM *add, goto err; } } - /* we now have a random number 'rand' to test. */ + // we now have a random number 'rand' to test. loop: for (i = 1; i < NUMPRIMES; i++) { - /* check that rnd is a prime */ + // check that rnd is a prime BN_ULONG mod = BN_mod_word(rnd, (BN_ULONG)primes[i]); if (mod == (BN_ULONG)-1) { goto err; @@ -802,7 +835,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, goto err; } - /* we need ((rnd-rem) % add) == 0 */ + // we need ((rnd-rem) % add) == 0 if (!BN_mod(t1, q, qadd, ctx)) { goto err; } @@ -824,7 +857,7 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, } } - /* we now have a random number 'rand' to test. */ + // we now have a random number 'rand' to test. if (!BN_lshift1(p, q)) { goto err; } @@ -834,9 +867,9 @@ static int probable_prime_dh_safe(BIGNUM *p, int bits, const BIGNUM *padd, loop: for (i = 1; i < NUMPRIMES; i++) { - /* check that p and q are prime */ - /* check that for p and q - * gcd(p-1,primes) == 1 (except for 2) */ + // check that p and q are prime + // check that for p and q + // gcd(p-1,primes) == 1 (except for 2) BN_ULONG pmod = BN_mod_word(p, (BN_ULONG)primes[i]); BN_ULONG qmod = BN_mod_word(q, (BN_ULONG)primes[i]); if (pmod == (BN_ULONG)-1 || qmod == (BN_ULONG)-1) { diff --git a/Sources/BoringSSL/crypto/bn/random.c b/Sources/BoringSSL/crypto/fipsmodule/bn/random.c similarity index 66% rename from Sources/BoringSSL/crypto/bn/random.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/random.c index 6f922c094..61499af47 100644 --- a/Sources/BoringSSL/crypto/bn/random.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/random.c @@ -113,11 +113,15 @@ #include #include #include -#include +#include -#include "../internal.h" +#include "internal.h" +#include "../../internal.h" +#include "../rand/internal.h" +static const uint8_t kDefaultAdditionalData[32] = {0}; + int BN_rand(BIGNUM *rnd, int bits, int top, int bottom) { uint8_t *buf = NULL; int ret = 0, bit, bytes, mask; @@ -152,10 +156,8 @@ int BN_rand(BIGNUM *rnd, int bits, int top, int bottom) { goto err; } - /* Make a random number and set the top and bottom bits. */ - if (!RAND_bytes(buf, bytes)) { - goto err; - } + // Make a random number and set the top and bottom bits. + RAND_bytes(buf, bytes); if (top != BN_RAND_TOP_ANY) { if (top == BN_RAND_TOP_TWO && bits > 1) { @@ -172,7 +174,7 @@ int BN_rand(BIGNUM *rnd, int bits, int top, int bottom) { buf[0] &= ~mask; - /* Set the bottom bit if requested, */ + // Set the bottom bit if requested, if (bottom == BN_RAND_BOTTOM_ODD) { buf[bytes - 1] |= 1; } @@ -184,72 +186,107 @@ int BN_rand(BIGNUM *rnd, int bits, int top, int bottom) { ret = 1; err: - if (buf != NULL) { - OPENSSL_cleanse(buf, bytes); - OPENSSL_free(buf); - } - return (ret); + OPENSSL_free(buf); + return ret; } int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom) { return BN_rand(rnd, bits, top, bottom); } -int BN_rand_range_ex(BIGNUM *r, BN_ULONG min_inclusive, - const BIGNUM *max_exclusive) { - unsigned n; - unsigned count = 100; - - if (BN_cmp_word(max_exclusive, min_inclusive) <= 0) { - OPENSSL_PUT_ERROR(BN, BN_R_INVALID_RANGE); +// bn_less_than_word returns one if the number represented by |len| words at |a| +// is less than |b| and zero otherwise. It performs this computation in time +// independent of the value of |a|. |b| is assumed public. +static int bn_less_than_word(const BN_ULONG *a, size_t len, BN_ULONG b) { + if (b == 0) { return 0; } + if (len == 0) { + return 1; + } - n = BN_num_bits(max_exclusive); /* n > 0 */ + // |a| < |b| iff a[1..len-1] are all zero and a[0] < b. + OPENSSL_COMPILE_ASSERT(sizeof(BN_ULONG) <= sizeof(crypto_word_t), + crypto_word_t_too_small); + crypto_word_t mask = 0; + for (size_t i = 1; i < len; i++) { + mask |= a[i]; + } + // |mask| is now zero iff a[1..len-1] are all zero. + mask = constant_time_is_zero_w(mask); + mask &= constant_time_lt_w(a[0], b); + return constant_time_select_int(mask, 1, 0); +} - /* BN_is_bit_set(range, n - 1) always holds */ - if (n == 1) { - BN_zero(r); - return 1; +int bn_in_range_words(const BN_ULONG *a, BN_ULONG min_inclusive, + const BN_ULONG *max_exclusive, size_t len) { + return bn_less_than_words(a, max_exclusive, len) && + !bn_less_than_word(a, len, min_inclusive); +} + +int bn_rand_range_words(BN_ULONG *out, BN_ULONG min_inclusive, + const BN_ULONG *max_exclusive, size_t len, + const uint8_t additional_data[32]) { + // This function implements the equivalent of steps 4 through 7 of FIPS 186-4 + // appendices B.4.2 and B.5.2. When called in those contexts, |max_exclusive| + // is n and |min_inclusive| is one. + + // Compute the bit length of |max_exclusive| (step 1), in terms of a number of + // |words| worth of entropy to fill and a mask of bits to clear in the top + // word. + size_t words = len; + while (words > 0 && max_exclusive[words - 1] == 0) { + words--; + } + if (words == 0 || + (words == 1 && max_exclusive[0] <= min_inclusive)) { + OPENSSL_PUT_ERROR(BN, BN_R_INVALID_RANGE); + return 0; } + BN_ULONG mask = max_exclusive[words - 1]; + // This sets all bits in |mask| below the most significant bit. + mask |= mask >> 1; + mask |= mask >> 2; + mask |= mask >> 4; + mask |= mask >> 8; + mask |= mask >> 16; +#if defined(OPENSSL_64_BIT) + mask |= mask >> 32; +#endif + + // Fill any unused words with zero. + OPENSSL_memset(out + words, 0, (len - words) * sizeof(BN_ULONG)); + unsigned count = 100; do { if (!--count) { OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); return 0; } - if (!BN_is_bit_set(max_exclusive, n - 2) && - !BN_is_bit_set(max_exclusive, n - 3)) { - /* range = 100..._2, so 3*range (= 11..._2) is exactly one bit longer - * than range. This is a common scenario when generating a random value - * modulo an RSA public modulus, e.g. for RSA base blinding. */ - if (!BN_rand(r, n + 1, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY)) { - return 0; - } + // Steps 4 and 5. Use |words| and |mask| together to obtain a string of N + // bits, where N is the bit length of |max_exclusive|. + RAND_bytes_with_additional_data((uint8_t *)out, words * sizeof(BN_ULONG), + additional_data); + out[words - 1] &= mask; - /* If r < 3*range, use r := r MOD range (which is either r, r - range, or - * r - 2*range). Otherwise, iterate again. Since 3*range = 11..._2, each - * iteration succeeds with probability >= .75. */ - if (BN_cmp(r, max_exclusive) >= 0) { - if (!BN_sub(r, r, max_exclusive)) { - return 0; - } - if (BN_cmp(r, max_exclusive) >= 0) { - if (!BN_sub(r, r, max_exclusive)) { - return 0; - } - } - } - } else { - /* range = 11..._2 or range = 101..._2 */ - if (!BN_rand(r, n, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY)) { - return 0; - } - } - } while (BN_cmp_word(r, min_inclusive) < 0 || - BN_cmp(r, max_exclusive) >= 0); + // If out >= max_exclusive or out < min_inclusive, retry. This implements + // the equivalent of steps 6 and 7 without leaking the value of |out|. + } while (!bn_in_range_words(out, min_inclusive, max_exclusive, words)); + return 1; +} +int BN_rand_range_ex(BIGNUM *r, BN_ULONG min_inclusive, + const BIGNUM *max_exclusive) { + if (!bn_wexpand(r, max_exclusive->top) || + !bn_rand_range_words(r->d, min_inclusive, max_exclusive->d, + max_exclusive->top, kDefaultAdditionalData)) { + return 0; + } + + r->neg = 0; + r->top = max_exclusive->top; + bn_correct_top(r); return 1; } @@ -260,84 +297,3 @@ int BN_rand_range(BIGNUM *r, const BIGNUM *range) { int BN_pseudo_rand_range(BIGNUM *r, const BIGNUM *range) { return BN_rand_range(r, range); } - -int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, const BIGNUM *priv, - const uint8_t *message, size_t message_len, - BN_CTX *ctx) { - SHA512_CTX sha; - /* We use 512 bits of random data per iteration to - * ensure that we have at least |range| bits of randomness. */ - uint8_t random_bytes[64]; - uint8_t digest[SHA512_DIGEST_LENGTH]; - size_t done, todo, attempt; - const unsigned num_k_bytes = BN_num_bytes(range); - const unsigned bits_to_mask = (8 - (BN_num_bits(range) % 8)) % 8; - uint8_t private_bytes[96]; - uint8_t *k_bytes = NULL; - int ret = 0; - - if (out == NULL) { - return 0; - } - - if (BN_is_zero(range)) { - OPENSSL_PUT_ERROR(BN, BN_R_DIV_BY_ZERO); - goto err; - } - - k_bytes = OPENSSL_malloc(num_k_bytes); - if (!k_bytes) { - OPENSSL_PUT_ERROR(BN, ERR_R_MALLOC_FAILURE); - goto err; - } - - /* We copy |priv| into a local buffer to avoid furthur exposing its - * length. */ - todo = sizeof(priv->d[0]) * priv->top; - if (todo > sizeof(private_bytes)) { - /* No reasonable DSA or ECDSA key should have a private key - * this large and we don't handle this case in order to avoid - * leaking the length of the private key. */ - OPENSSL_PUT_ERROR(BN, BN_R_PRIVATE_KEY_TOO_LARGE); - goto err; - } - OPENSSL_memcpy(private_bytes, priv->d, todo); - OPENSSL_memset(private_bytes + todo, 0, sizeof(private_bytes) - todo); - - for (attempt = 0;; attempt++) { - for (done = 0; done < num_k_bytes;) { - if (!RAND_bytes(random_bytes, sizeof(random_bytes))) { - goto err; - } - SHA512_Init(&sha); - SHA512_Update(&sha, &attempt, sizeof(attempt)); - SHA512_Update(&sha, &done, sizeof(done)); - SHA512_Update(&sha, private_bytes, sizeof(private_bytes)); - SHA512_Update(&sha, message, message_len); - SHA512_Update(&sha, random_bytes, sizeof(random_bytes)); - SHA512_Final(digest, &sha); - - todo = num_k_bytes - done; - if (todo > SHA512_DIGEST_LENGTH) { - todo = SHA512_DIGEST_LENGTH; - } - OPENSSL_memcpy(k_bytes + done, digest, todo); - done += todo; - } - - k_bytes[0] &= 0xff >> bits_to_mask; - - if (!BN_bin2bn(k_bytes, num_k_bytes, out)) { - goto err; - } - if (BN_cmp(out, range) < 0) { - break; - } - } - - ret = 1; - -err: - OPENSSL_free(k_bytes); - return ret; -} diff --git a/Sources/BoringSSL/crypto/bn/rsaz_exp.c b/Sources/BoringSSL/crypto/fipsmodule/bn/rsaz_exp.c similarity index 99% rename from Sources/BoringSSL/crypto/bn/rsaz_exp.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/rsaz_exp.c index c7eed38e1..d0090a663 100644 --- a/Sources/BoringSSL/crypto/bn/rsaz_exp.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/rsaz_exp.c @@ -48,7 +48,7 @@ #include -#include "../internal.h" +#include "../../internal.h" /* diff --git a/Sources/BoringSSL/crypto/bn/rsaz_exp.h b/Sources/BoringSSL/crypto/fipsmodule/bn/rsaz_exp.h similarity index 100% rename from Sources/BoringSSL/crypto/bn/rsaz_exp.h rename to Sources/BoringSSL/crypto/fipsmodule/bn/rsaz_exp.h diff --git a/Sources/BoringSSL/crypto/bn/shift.c b/Sources/BoringSSL/crypto/fipsmodule/bn/shift.c similarity index 87% rename from Sources/BoringSSL/crypto/bn/shift.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/shift.c index dc9b795dc..d4ed79e44 100644 --- a/Sources/BoringSSL/crypto/bn/shift.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/shift.c @@ -75,7 +75,7 @@ int BN_lshift(BIGNUM *r, const BIGNUM *a, int n) { r->neg = a->neg; nw = n / BN_BITS2; - if (bn_wexpand(r, a->top + nw + 1) == NULL) { + if (!bn_wexpand(r, a->top + nw + 1)) { return 0; } lb = n % BN_BITS2; @@ -90,8 +90,8 @@ int BN_lshift(BIGNUM *r, const BIGNUM *a, int n) { } else { for (i = a->top - 1; i >= 0; i--) { l = f[i]; - t[nw + i + 1] |= (l >> rb) & BN_MASK2; - t[nw + i] = (l << lb) & BN_MASK2; + t[nw + i + 1] |= l >> rb; + t[nw + i] = l << lb; } } OPENSSL_memset(t, 0, nw * sizeof(t[0])); @@ -107,12 +107,12 @@ int BN_lshift1(BIGNUM *r, const BIGNUM *a) { if (r != a) { r->neg = a->neg; - if (bn_wexpand(r, a->top + 1) == NULL) { + if (!bn_wexpand(r, a->top + 1)) { return 0; } r->top = a->top; } else { - if (bn_wexpand(r, a->top + 1) == NULL) { + if (!bn_wexpand(r, a->top + 1)) { return 0; } } @@ -121,8 +121,8 @@ int BN_lshift1(BIGNUM *r, const BIGNUM *a) { c = 0; for (i = 0; i < a->top; i++) { t = *(ap++); - *(rp++) = ((t << 1) | c) & BN_MASK2; - c = (t & BN_TBIT) ? 1 : 0; + *(rp++) = (t << 1) | c; + c = t >> (BN_BITS2 - 1); } if (c) { *rp = 1; @@ -152,12 +152,12 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n) { i = (BN_num_bits(a) - n + (BN_BITS2 - 1)) / BN_BITS2; if (r != a) { r->neg = a->neg; - if (bn_wexpand(r, i) == NULL) { + if (!bn_wexpand(r, i)) { return 0; } } else { if (n == 0) { - return 1; /* or the copying loop will go berserk */ + return 1; // or the copying loop will go berserk } } @@ -173,11 +173,12 @@ int BN_rshift(BIGNUM *r, const BIGNUM *a, int n) { } else { l = *(f++); for (i = j - 1; i != 0; i--) { - tmp = (l >> rb) & BN_MASK2; + tmp = l >> rb; l = *(f++); - *(t++) = (tmp | (l << lb)) & BN_MASK2; + *(t++) = tmp | (l << lb); } - if ((l = (l >> rb) & BN_MASK2)) { + l >>= rb; + if (l) { *(t) = l; } } @@ -201,21 +202,21 @@ int BN_rshift1(BIGNUM *r, const BIGNUM *a) { ap = a->d; j = i - (ap[i - 1] == 1); if (a != r) { - if (bn_wexpand(r, j) == NULL) { + if (!bn_wexpand(r, j)) { return 0; } r->neg = a->neg; } rp = r->d; t = ap[--i]; - c = (t & 1) ? BN_TBIT : 0; + c = t << (BN_BITS2 - 1); if (t >>= 1) { rp[i] = t; } while (i > 0) { t = ap[--i]; - rp[i] = ((t >> 1) & BN_MASK2) | c; - c = (t & 1) ? BN_TBIT : 0; + rp[i] = (t >> 1) | c; + c = t << (BN_BITS2 - 1); } r->top = j; @@ -227,19 +228,17 @@ int BN_rshift1(BIGNUM *r, const BIGNUM *a) { } int BN_set_bit(BIGNUM *a, int n) { - int i, j, k; - if (n < 0) { return 0; } - i = n / BN_BITS2; - j = n % BN_BITS2; + int i = n / BN_BITS2; + int j = n % BN_BITS2; if (a->top <= i) { - if (bn_wexpand(a, i + 1) == NULL) { + if (!bn_wexpand(a, i + 1)) { return 0; } - for (k = a->top; k < i + 1; k++) { + for (int k = a->top; k < i + 1; k++) { a->d[k] = 0; } a->top = i + 1; @@ -268,30 +267,29 @@ int BN_clear_bit(BIGNUM *a, int n) { return 1; } -int BN_is_bit_set(const BIGNUM *a, int n) { - int i, j; - - if (n < 0) { +int bn_is_bit_set_words(const BN_ULONG *a, size_t num, unsigned bit) { + unsigned i = bit / BN_BITS2; + unsigned j = bit % BN_BITS2; + if (i >= num) { return 0; } - i = n / BN_BITS2; - j = n % BN_BITS2; - if (a->top <= i) { + return (a[i] >> j) & 1; +} + +int BN_is_bit_set(const BIGNUM *a, int n) { + if (n < 0) { return 0; } - - return (a->d[i]>>j)&1; + return bn_is_bit_set_words(a->d, a->top, n); } int BN_mask_bits(BIGNUM *a, int n) { - int b, w; - if (n < 0) { return 0; } - w = n / BN_BITS2; - b = n % BN_BITS2; + int w = n / BN_BITS2; + int b = n % BN_BITS2; if (w >= a->top) { return 0; } diff --git a/Sources/BoringSSL/crypto/bn/sqrt.c b/Sources/BoringSSL/crypto/fipsmodule/bn/sqrt.c similarity index 68% rename from Sources/BoringSSL/crypto/bn/sqrt.c rename to Sources/BoringSSL/crypto/fipsmodule/bn/sqrt.c index fb962a98b..68ccb9199 100644 --- a/Sources/BoringSSL/crypto/bn/sqrt.c +++ b/Sources/BoringSSL/crypto/fipsmodule/bn/sqrt.c @@ -56,11 +56,13 @@ #include +#include "internal.h" + BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { - /* Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm - * (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory", - * algorithm 1.5.1). |p| is assumed to be a prime. */ + // Compute a square root of |a| mod |p| using the Tonelli/Shanks algorithm + // (cf. Henri Cohen, "A Course in Algebraic Computational Number Theory", + // algorithm 1.5.1). |p| is assumed to be a prime. BIGNUM *ret = in; int err = 1; @@ -123,32 +125,31 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } - /* A = a mod p */ + // A = a mod p if (!BN_nnmod(A, a, p, ctx)) { goto end; } - /* now write |p| - 1 as 2^e*q where q is odd */ + // now write |p| - 1 as 2^e*q where q is odd e = 1; while (!BN_is_bit_set(p, e)) { e++; } - /* we'll set q later (if needed) */ + // we'll set q later (if needed) if (e == 1) { - /* The easy case: (|p|-1)/2 is odd, so 2 has an inverse - * modulo (|p|-1)/2, and square roots can be computed - * directly by modular exponentiation. - * We have - * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), - * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. - */ + // The easy case: (|p|-1)/2 is odd, so 2 has an inverse + // modulo (|p|-1)/2, and square roots can be computed + // directly by modular exponentiation. + // We have + // 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2), + // so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1. if (!BN_rshift(q, p, 2)) { goto end; } q->neg = 0; if (!BN_add_word(q, 1) || - !BN_mod_exp(ret, A, q, p, ctx)) { + !BN_mod_exp_mont(ret, A, q, p, ctx, NULL)) { goto end; } err = 0; @@ -156,59 +157,58 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } if (e == 2) { - /* |p| == 5 (mod 8) - * - * In this case 2 is always a non-square since - * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. - * So if a really is a square, then 2*a is a non-square. - * Thus for - * b := (2*a)^((|p|-5)/8), - * i := (2*a)*b^2 - * we have - * i^2 = (2*a)^((1 + (|p|-5)/4)*2) - * = (2*a)^((p-1)/2) - * = -1; - * so if we set - * x := a*b*(i-1), - * then - * x^2 = a^2 * b^2 * (i^2 - 2*i + 1) - * = a^2 * b^2 * (-2*i) - * = a*(-i)*(2*a*b^2) - * = a*(-i)*i - * = a. - * - * (This is due to A.O.L. Atkin, - * , - * November 1992.) - */ - - /* t := 2*a */ + // |p| == 5 (mod 8) + // + // In this case 2 is always a non-square since + // Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime. + // So if a really is a square, then 2*a is a non-square. + // Thus for + // b := (2*a)^((|p|-5)/8), + // i := (2*a)*b^2 + // we have + // i^2 = (2*a)^((1 + (|p|-5)/4)*2) + // = (2*a)^((p-1)/2) + // = -1; + // so if we set + // x := a*b*(i-1), + // then + // x^2 = a^2 * b^2 * (i^2 - 2*i + 1) + // = a^2 * b^2 * (-2*i) + // = a*(-i)*(2*a*b^2) + // = a*(-i)*i + // = a. + // + // (This is due to A.O.L. Atkin, + // , + // November 1992.) + + // t := 2*a if (!BN_mod_lshift1_quick(t, A, p)) { goto end; } - /* b := (2*a)^((|p|-5)/8) */ + // b := (2*a)^((|p|-5)/8) if (!BN_rshift(q, p, 3)) { goto end; } q->neg = 0; - if (!BN_mod_exp(b, t, q, p, ctx)) { + if (!BN_mod_exp_mont(b, t, q, p, ctx, NULL)) { goto end; } - /* y := b^2 */ + // y := b^2 if (!BN_mod_sqr(y, b, p, ctx)) { goto end; } - /* t := (2*a)*b^2 - 1*/ + // t := (2*a)*b^2 - 1 if (!BN_mod_mul(t, t, y, p, ctx) || !BN_sub_word(t, 1)) { goto end; } - /* x = a*b*t */ + // x = a*b*t if (!BN_mod_mul(x, A, b, p, ctx) || !BN_mod_mul(x, x, t, p, ctx)) { goto end; @@ -221,17 +221,16 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto vrfy; } - /* e > 2, so we really have to use the Tonelli/Shanks algorithm. - * First, find some y that is not a square. */ + // e > 2, so we really have to use the Tonelli/Shanks algorithm. + // First, find some y that is not a square. if (!BN_copy(q, p)) { - goto end; /* use 'q' as temp */ + goto end; // use 'q' as temp } q->neg = 0; i = 2; do { - /* For efficiency, try small numbers first; - * if this fails, try random numbers. - */ + // For efficiency, try small numbers first; + // if this fails, try random numbers. if (i < 22) { if (!BN_set_word(y, i)) { goto end; @@ -245,7 +244,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } } - /* now 0 <= y < |p| */ + // now 0 <= y < |p| if (BN_is_zero(y)) { if (!BN_set_word(y, i)) { goto end; @@ -253,35 +252,34 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } } - r = BN_kronecker(y, q, ctx); /* here 'q' is |p| */ + r = bn_jacobi(y, q, ctx); // here 'q' is |p| if (r < -1) { goto end; } if (r == 0) { - /* m divides p */ + // m divides p OPENSSL_PUT_ERROR(BN, BN_R_P_IS_NOT_PRIME); goto end; } } while (r == 1 && ++i < 82); if (r != -1) { - /* Many rounds and still no non-square -- this is more likely - * a bug than just bad luck. - * Even if p is not prime, we should have found some y - * such that r == -1. - */ + // Many rounds and still no non-square -- this is more likely + // a bug than just bad luck. + // Even if p is not prime, we should have found some y + // such that r == -1. OPENSSL_PUT_ERROR(BN, BN_R_TOO_MANY_ITERATIONS); goto end; } - /* Here's our actual 'q': */ + // Here's our actual 'q': if (!BN_rshift(q, q, e)) { goto end; } - /* Now that we have some non-square, we can find an element - * of order 2^e by computing its q'th power. */ - if (!BN_mod_exp(y, y, q, p, ctx)) { + // Now that we have some non-square, we can find an element + // of order 2^e by computing its q'th power. + if (!BN_mod_exp_mont(y, y, q, p, ctx, NULL)) { goto end; } if (BN_is_one(y)) { @@ -289,37 +287,36 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } - /* Now we know that (if p is indeed prime) there is an integer - * k, 0 <= k < 2^e, such that - * - * a^q * y^k == 1 (mod p). - * - * As a^q is a square and y is not, k must be even. - * q+1 is even, too, so there is an element - * - * X := a^((q+1)/2) * y^(k/2), - * - * and it satisfies - * - * X^2 = a^q * a * y^k - * = a, - * - * so it is the square root that we are looking for. - */ - - /* t := (q-1)/2 (note that q is odd) */ + // Now we know that (if p is indeed prime) there is an integer + // k, 0 <= k < 2^e, such that + // + // a^q * y^k == 1 (mod p). + // + // As a^q is a square and y is not, k must be even. + // q+1 is even, too, so there is an element + // + // X := a^((q+1)/2) * y^(k/2), + // + // and it satisfies + // + // X^2 = a^q * a * y^k + // = a, + // + // so it is the square root that we are looking for. + + // t := (q-1)/2 (note that q is odd) if (!BN_rshift1(t, q)) { goto end; } - /* x := a^((q-1)/2) */ - if (BN_is_zero(t)) /* special case: p = 2^e + 1 */ + // x := a^((q-1)/2) + if (BN_is_zero(t)) // special case: p = 2^e + 1 { if (!BN_nnmod(t, A, p, ctx)) { goto end; } if (BN_is_zero(t)) { - /* special case: a == 0 (mod p) */ + // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; @@ -327,37 +324,36 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { goto end; } } else { - if (!BN_mod_exp(x, A, t, p, ctx)) { + if (!BN_mod_exp_mont(x, A, t, p, ctx, NULL)) { goto end; } if (BN_is_zero(x)) { - /* special case: a == 0 (mod p) */ + // special case: a == 0 (mod p) BN_zero(ret); err = 0; goto end; } } - /* b := a*x^2 (= a^q) */ + // b := a*x^2 (= a^q) if (!BN_mod_sqr(b, x, p, ctx) || !BN_mod_mul(b, b, A, p, ctx)) { goto end; } - /* x := a*x (= a^((q+1)/2)) */ + // x := a*x (= a^((q+1)/2)) if (!BN_mod_mul(x, x, A, p, ctx)) { goto end; } while (1) { - /* Now b is a^q * y^k for some even k (0 <= k < 2^E - * where E refers to the original value of e, which we - * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). - * - * We have a*b = x^2, - * y^2^(e-1) = -1, - * b^2^(e-1) = 1. - */ + // Now b is a^q * y^k for some even k (0 <= k < 2^E + // where E refers to the original value of e, which we + // don't keep in a variable), and x is a^((q+1)/2) * y^(k/2). + // + // We have a*b = x^2, + // y^2^(e-1) = -1, + // b^2^(e-1) = 1. if (BN_is_one(b)) { if (!BN_copy(ret, x)) { @@ -368,7 +364,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } - /* find smallest i such that b^(2^i) = 1 */ + // find smallest i such that b^(2^i) = 1 i = 1; if (!BN_mod_sqr(t, b, p, ctx)) { goto end; @@ -385,7 +381,7 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { } - /* t := y^2^(e - i - 1) */ + // t := y^2^(e - i - 1) if (!BN_copy(t, y)) { goto end; } @@ -404,8 +400,8 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx) { vrfy: if (!err) { - /* verify the result -- the input might have been not a square - * (test added in 0.9.8) */ + // verify the result -- the input might have been not a square + // (test added in 0.9.8) if (!BN_mod_sqr(x, ret, p, ctx)) { err = 1; @@ -455,30 +451,30 @@ int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx) { goto err; } - /* We estimate that the square root of an n-bit number is 2^{n/2}. */ + // We estimate that the square root of an n-bit number is 2^{n/2}. if (!BN_lshift(estimate, BN_value_one(), BN_num_bits(in)/2)) { goto err; } - /* This is Newton's method for finding a root of the equation |estimate|^2 - - * |in| = 0. */ + // This is Newton's method for finding a root of the equation |estimate|^2 - + // |in| = 0. for (;;) { - /* |estimate| = 1/2 * (|estimate| + |in|/|estimate|) */ + // |estimate| = 1/2 * (|estimate| + |in|/|estimate|) if (!BN_div(tmp, NULL, in, estimate, ctx) || !BN_add(tmp, tmp, estimate) || !BN_rshift1(estimate, tmp) || - /* |tmp| = |estimate|^2 */ + // |tmp| = |estimate|^2 !BN_sqr(tmp, estimate, ctx) || - /* |delta| = |in| - |tmp| */ + // |delta| = |in| - |tmp| !BN_sub(delta, in, tmp)) { OPENSSL_PUT_ERROR(BN, ERR_R_BN_LIB); goto err; } delta->neg = 0; - /* The difference between |in| and |estimate| squared is required to always - * decrease. This ensures that the loop always terminates, but I don't have - * a proof that it always finds the square root for a given square. */ + // The difference between |in| and |estimate| squared is required to always + // decrease. This ensures that the loop always terminates, but I don't have + // a proof that it always finds the square root for a given square. if (last_delta_valid && BN_cmp(delta, last_delta) >= 0) { break; } diff --git a/Sources/BoringSSL/crypto/fipsmodule/cipher/aead.c b/Sources/BoringSSL/crypto/fipsmodule/cipher/aead.c new file mode 100644 index 000000000..8d2ad0487 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/cipher/aead.c @@ -0,0 +1,284 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include +#include +#include + +#include "internal.h" +#include "../../internal.h" + + +size_t EVP_AEAD_key_length(const EVP_AEAD *aead) { return aead->key_len; } + +size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead) { return aead->nonce_len; } + +size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead) { return aead->overhead; } + +size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead) { return aead->max_tag_len; } + +void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx) { + OPENSSL_memset(ctx, 0, sizeof(EVP_AEAD_CTX)); +} + +EVP_AEAD_CTX *EVP_AEAD_CTX_new(const EVP_AEAD *aead, const uint8_t *key, + size_t key_len, size_t tag_len) { + EVP_AEAD_CTX *ctx = OPENSSL_malloc(sizeof(EVP_AEAD_CTX)); + EVP_AEAD_CTX_zero(ctx); + + if (EVP_AEAD_CTX_init(ctx, aead, key, key_len, tag_len, NULL)) { + return ctx; + } + + EVP_AEAD_CTX_free(ctx); + return NULL; +} + +void EVP_AEAD_CTX_free(EVP_AEAD_CTX *ctx) { + EVP_AEAD_CTX_cleanup(ctx); + OPENSSL_free(ctx); +} + +int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, + const uint8_t *key, size_t key_len, size_t tag_len, + ENGINE *impl) { + if (!aead->init) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_NO_DIRECTION_SET); + ctx->aead = NULL; + return 0; + } + return EVP_AEAD_CTX_init_with_direction(ctx, aead, key, key_len, tag_len, + evp_aead_open); +} + +int EVP_AEAD_CTX_init_with_direction(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, + const uint8_t *key, size_t key_len, + size_t tag_len, + enum evp_aead_direction_t dir) { + if (key_len != aead->key_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_KEY_SIZE); + ctx->aead = NULL; + return 0; + } + + ctx->aead = aead; + + int ok; + if (aead->init) { + ok = aead->init(ctx, key, key_len, tag_len); + } else { + ok = aead->init_with_direction(ctx, key, key_len, tag_len, dir); + } + + if (!ok) { + ctx->aead = NULL; + } + + return ok; +} + +void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) { + if (ctx->aead == NULL) { + return; + } + ctx->aead->cleanup(ctx); + ctx->aead = NULL; +} + +// check_alias returns 1 if |out| is compatible with |in| and 0 otherwise. If +// |in| and |out| alias, we require that |in| == |out|. +static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out, + size_t out_len) { + if (!buffers_alias(in, in_len, out, out_len)) { + return 1; + } + + return in == out; +} + +int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, + size_t max_out_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, + const uint8_t *ad, size_t ad_len) { + if (in_len + ctx->aead->overhead < in_len /* overflow */) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + goto error; + } + + if (max_out_len < in_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + goto error; + } + + if (!check_alias(in, in_len, out, max_out_len)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); + goto error; + } + + size_t out_tag_len; + if (ctx->aead->seal_scatter(ctx, out, out + in_len, &out_tag_len, + max_out_len - in_len, nonce, nonce_len, in, + in_len, NULL, 0, ad, ad_len)) { + *out_len = in_len + out_tag_len; + return 1; + } + +error: + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't send raw data. + OPENSSL_memset(out, 0, max_out_len); + *out_len = 0; + return 0; +} + +int EVP_AEAD_CTX_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, size_t + *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, size_t + nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + // |in| and |out| may alias exactly, |out_tag| may not alias. + if (!check_alias(in, in_len, out, in_len) || + buffers_alias(out, in_len, out_tag, max_out_tag_len) || + buffers_alias(in, in_len, out_tag, max_out_tag_len)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); + goto error; + } + + if (!ctx->aead->seal_scatter_supports_extra_in && extra_in_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_OPERATION); + goto error; + } + + if (ctx->aead->seal_scatter(ctx, out, out_tag, out_tag_len, max_out_tag_len, + nonce, nonce_len, in, in_len, extra_in, + extra_in_len, ad, ad_len)) { + return 1; + } + +error: + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't send raw data. + OPENSSL_memset(out, 0, in_len); + OPENSSL_memset(out_tag, 0, max_out_tag_len); + *out_tag_len = 0; + return 0; +} + +int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, + size_t max_out_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, + const uint8_t *ad, size_t ad_len) { + if (!check_alias(in, in_len, out, max_out_len)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); + goto error; + } + + if (ctx->aead->open) { + if (!ctx->aead->open(ctx, out, out_len, max_out_len, nonce, nonce_len, in, + in_len, ad, ad_len)) { + goto error; + } + return 1; + } + + // AEADs that use the default implementation of open() must set |tag_len| at + // initialization time. + assert(ctx->tag_len); + + if (in_len < ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + goto error; + } + + size_t plaintext_len = in_len - ctx->tag_len; + if (max_out_len < plaintext_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + goto error; + } + if (EVP_AEAD_CTX_open_gather(ctx, out, nonce, nonce_len, in, plaintext_len, + in + plaintext_len, ctx->tag_len, ad, ad_len)) { + *out_len = plaintext_len; + return 1; + } + +error: + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't try and process bad + // data. + OPENSSL_memset(out, 0, max_out_len); + *out_len = 0; + return 0; +} + +int EVP_AEAD_CTX_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, size_t in_tag_len, + const uint8_t *ad, size_t ad_len) { + if (!check_alias(in, in_len, out, in_len)) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_OUTPUT_ALIASES_INPUT); + goto error; + } + + if (!ctx->aead->open_gather) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_CTRL_NOT_IMPLEMENTED); + goto error; + } + + if (ctx->aead->open_gather(ctx, out, nonce, nonce_len, in, in_len, in_tag, + in_tag_len, ad, ad_len)) { + return 1; + } + +error: + // In the event of an error, clear the output buffer so that a caller + // that doesn't check the return value doesn't try and process bad + // data. + OPENSSL_memset(out, 0, in_len); + return 0; +} + +const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx) { return ctx->aead; } + +int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, + size_t *out_len) { + if (ctx->aead->get_iv == NULL) { + return 0; + } + + return ctx->aead->get_iv(ctx, out_iv, out_len); +} + +int EVP_AEAD_CTX_tag_len(const EVP_AEAD_CTX *ctx, size_t *out_tag_len, + const size_t in_len, const size_t extra_in_len) { + assert(ctx->aead->seal_scatter_supports_extra_in || !extra_in_len); + + if (ctx->aead->tag_len) { + *out_tag_len = ctx->aead->tag_len(ctx, in_len, extra_in_len); + return 1; + } + + if (extra_in_len + ctx->tag_len < extra_in_len) { + OPENSSL_PUT_ERROR(CIPHER, ERR_R_OVERFLOW); + *out_tag_len = 0; + return 0; + } + *out_tag_len = extra_in_len + ctx->tag_len; + return 1; +} diff --git a/Sources/BoringSSL/crypto/cipher/cipher.c b/Sources/BoringSSL/crypto/fipsmodule/cipher/cipher.c similarity index 87% rename from Sources/BoringSSL/crypto/cipher/cipher.c rename to Sources/BoringSSL/crypto/fipsmodule/cipher/cipher.c index e46e43ef1..f3d405742 100644 --- a/Sources/BoringSSL/crypto/cipher/cipher.c +++ b/Sources/BoringSSL/crypto/fipsmodule/cipher/cipher.c @@ -64,29 +64,8 @@ #include #include "internal.h" -#include "../internal.h" - - -const EVP_CIPHER *EVP_get_cipherbynid(int nid) { - switch (nid) { - case NID_rc2_cbc: - return EVP_rc2_cbc(); - case NID_rc2_40_cbc: - return EVP_rc2_40_cbc(); - case NID_des_ede3_cbc: - return EVP_des_ede3_cbc(); - case NID_des_ede_cbc: - return EVP_des_cbc(); - case NID_aes_128_cbc: - return EVP_aes_128_cbc(); - case NID_aes_192_cbc: - return EVP_aes_192_cbc(); - case NID_aes_256_cbc: - return EVP_aes_256_cbc(); - default: - return NULL; - } -} +#include "../../internal.h" + void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_CIPHER_CTX)); @@ -101,11 +80,8 @@ EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void) { } int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *c) { - if (c->cipher != NULL) { - if (c->cipher->cleanup) { - c->cipher->cleanup(c); - } - OPENSSL_cleanse(c->cipher_data, c->cipher->ctx_size); + if (c->cipher != NULL && c->cipher->cleanup) { + c->cipher->cleanup(c); } OPENSSL_free(c->cipher_data); @@ -149,6 +125,11 @@ int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in) { return 1; } +void EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx) { + EVP_CIPHER_CTX_cleanup(ctx); + EVP_CIPHER_CTX_init(ctx); +} + int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, const uint8_t *key, const uint8_t *iv, int enc) { @@ -162,12 +143,12 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, } if (cipher) { - /* Ensure a context left from last time is cleared (the previous check - * attempted to avoid this if the same ENGINE and EVP_CIPHER could be - * used). */ + // Ensure a context left from last time is cleared (the previous check + // attempted to avoid this if the same ENGINE and EVP_CIPHER could be + // used). if (ctx->cipher) { EVP_CIPHER_CTX_cleanup(ctx); - /* Restore encrypt and flags */ + // Restore encrypt and flags ctx->encrypt = enc; } @@ -198,7 +179,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, return 0; } - /* we assume block size is a power of 2 in *cryptUpdate */ + // we assume block size is a power of 2 in *cryptUpdate assert(ctx->cipher->block_size == 1 || ctx->cipher->block_size == 8 || ctx->cipher->block_size == 16); @@ -210,7 +191,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, case EVP_CIPH_CFB_MODE: ctx->num = 0; - /* fall-through */ + // fall-through case EVP_CIPH_CBC_MODE: assert(EVP_CIPHER_CTX_iv_length(ctx) <= sizeof(ctx->iv)); @@ -223,7 +204,7 @@ int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, case EVP_CIPH_CTR_MODE: case EVP_CIPH_OFB_MODE: ctx->num = 0; - /* Don't reuse IV for CTR mode */ + // Don't reuse IV for CTR mode if (iv) { OPENSSL_memcpy(ctx->iv, iv, EVP_CIPHER_CTX_iv_length(ctx)); } @@ -409,8 +390,8 @@ int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, return 0; } - /* if we have 'decrypted' a multiple of block size, make sure - * we have a copy of this last block */ + // if we have 'decrypted' a multiple of block size, make sure + // we have a copy of this last block if (b > 1 && !ctx->buf_len) { *out_len -= b; ctx->final_used = 1; @@ -458,8 +439,8 @@ int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len) { } assert(b <= sizeof(ctx->final)); - /* The following assumes that the ciphertext has been authenticated. - * Otherwise it provides a padding oracle. */ + // The following assumes that the ciphertext has been authenticated. + // Otherwise it provides a padding oracle. n = ctx->final[b - 1]; if (n == 0 || n > (int)b) { OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); @@ -631,27 +612,4 @@ int EVP_add_cipher_alias(const char *a, const char *b) { return 1; } -const EVP_CIPHER *EVP_get_cipherbyname(const char *name) { - if (OPENSSL_strcasecmp(name, "rc4") == 0) { - return EVP_rc4(); - } else if (OPENSSL_strcasecmp(name, "des-cbc") == 0) { - return EVP_des_cbc(); - } else if (OPENSSL_strcasecmp(name, "des-ede3-cbc") == 0 || - OPENSSL_strcasecmp(name, "3des") == 0) { - return EVP_des_ede3_cbc(); - } else if (OPENSSL_strcasecmp(name, "aes-128-cbc") == 0) { - return EVP_aes_128_cbc(); - } else if (OPENSSL_strcasecmp(name, "aes-256-cbc") == 0) { - return EVP_aes_256_cbc(); - } else if (OPENSSL_strcasecmp(name, "aes-128-ctr") == 0) { - return EVP_aes_128_ctr(); - } else if (OPENSSL_strcasecmp(name, "aes-256-ctr") == 0) { - return EVP_aes_256_ctr(); - } else if (OPENSSL_strcasecmp(name, "aes-128-ecb") == 0) { - return EVP_aes_128_ecb(); - } else if (OPENSSL_strcasecmp(name, "aes-256-ecb") == 0) { - return EVP_aes_256_ecb(); - } - - return NULL; -} +void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, uint32_t flags) {} diff --git a/Sources/BoringSSL/crypto/fipsmodule/cipher/e_aes.c b/Sources/BoringSSL/crypto/fipsmodule/cipher/e_aes.c new file mode 100644 index 000000000..b46927675 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/cipher/e_aes.c @@ -0,0 +1,1437 @@ +/* ==================================================================== + * Copyright (c) 2001-2011 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../../internal.h" +#include "../aes/internal.h" +#include "../modes/internal.h" +#include "../delocate.h" + +#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) +#include +#endif + + +OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code. + +typedef struct { + union { + double align; + AES_KEY ks; + } ks; + block128_f block; + union { + cbc128_f cbc; + ctr128_f ctr; + } stream; +} EVP_AES_KEY; + +typedef struct { + union { + double align; + AES_KEY ks; + } ks; // AES key schedule to use + int key_set; // Set if key initialised + int iv_set; // Set if an iv is set + GCM128_CONTEXT gcm; + uint8_t *iv; // Temporary IV store + int ivlen; // IV length + int taglen; + int iv_gen; // It is OK to generate IVs + ctr128_f ctr; +} EVP_AES_GCM_CTX; + +#if !defined(OPENSSL_NO_ASM) && \ + (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) +#define VPAES +static char vpaes_capable(void) { + return (OPENSSL_ia32cap_P[1] & (1 << (41 - 32))) != 0; +} + +#if defined(OPENSSL_X86_64) +#define BSAES +static char bsaes_capable(void) { + return vpaes_capable(); +} +#endif + +#elif !defined(OPENSSL_NO_ASM) && \ + (defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)) + +#if defined(OPENSSL_ARM) && __ARM_MAX_ARCH__ >= 7 +#define BSAES +static char bsaes_capable(void) { + return CRYPTO_is_NEON_capable(); +} +#endif + +#endif + + +#if defined(BSAES) +// On platforms where BSAES gets defined (just above), then these functions are +// provided by asm. +void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t ivec[16], int enc); +void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, + const AES_KEY *key, const uint8_t ivec[16]); +#else +static char bsaes_capable(void) { + return 0; +} + +// On other platforms, bsaes_capable() will always return false and so the +// following will never be called. +static void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t ivec[16], int enc) { + abort(); +} + +static void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, + size_t len, const AES_KEY *key, + const uint8_t ivec[16]) { + abort(); +} +#endif + +#if defined(VPAES) +// On platforms where VPAES gets defined (just above), then these functions are +// provided by asm. +int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); +int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); + +void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); +void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); + +void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t *ivec, int enc); +#else +static char vpaes_capable(void) { + return 0; +} + +// On other platforms, vpaes_capable() will always return false and so the +// following will never be called. +static int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, + AES_KEY *key) { + abort(); +} +static int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, + AES_KEY *key) { + abort(); +} +static void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { + abort(); +} +static void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { + abort(); +} +static void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t *ivec, int enc) { + abort(); +} +#endif + +#if !defined(OPENSSL_NO_ASM) && \ + (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) +int aesni_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); +int aesni_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key); + +void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); +void aesni_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); + +void aesni_ecb_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, int enc); +void aesni_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length, + const AES_KEY *key, uint8_t *ivec, int enc); + +#else + +// On other platforms, aesni_capable() will always return false and so the +// following will never be called. +static void aesni_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) { + abort(); +} +static int aesni_set_encrypt_key(const uint8_t *userKey, int bits, + AES_KEY *key) { + abort(); +} +static void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, + size_t blocks, const void *key, + const uint8_t *ivec) { + abort(); +} + +#endif + +static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, + const uint8_t *iv, int enc) { + int ret, mode; + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; + if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { + if (hwaes_capable()) { + ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)aes_hw_decrypt; + dat->stream.cbc = NULL; + if (mode == EVP_CIPH_CBC_MODE) { + dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt; + } + } else if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) { + ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)AES_decrypt; + dat->stream.cbc = (cbc128_f)bsaes_cbc_encrypt; + } else if (vpaes_capable()) { + ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)vpaes_decrypt; + dat->stream.cbc = + mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL; + } else { + ret = AES_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)AES_decrypt; + dat->stream.cbc = + mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL; + } + } else if (hwaes_capable()) { + ret = aes_hw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)aes_hw_encrypt; + dat->stream.cbc = NULL; + if (mode == EVP_CIPH_CBC_MODE) { + dat->stream.cbc = (cbc128_f)aes_hw_cbc_encrypt; + } else if (mode == EVP_CIPH_CTR_MODE) { + dat->stream.ctr = (ctr128_f)aes_hw_ctr32_encrypt_blocks; + } + } else if (bsaes_capable() && mode == EVP_CIPH_CTR_MODE) { + ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)AES_encrypt; + dat->stream.ctr = (ctr128_f)bsaes_ctr32_encrypt_blocks; + } else if (vpaes_capable()) { + ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)vpaes_encrypt; + dat->stream.cbc = + mode == EVP_CIPH_CBC_MODE ? (cbc128_f)vpaes_cbc_encrypt : NULL; + } else { + ret = AES_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks); + dat->block = (block128_f)AES_encrypt; + dat->stream.cbc = + mode == EVP_CIPH_CBC_MODE ? (cbc128_f)AES_cbc_encrypt : NULL; + } + + if (ret < 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED); + return 0; + } + + return 1; +} + +static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, + size_t len) { + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + if (dat->stream.cbc) { + (*dat->stream.cbc)(in, out, len, &dat->ks, ctx->iv, ctx->encrypt); + } else if (ctx->encrypt) { + CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv, dat->block); + } else { + CRYPTO_cbc128_decrypt(in, out, len, &dat->ks, ctx->iv, dat->block); + } + + return 1; +} + +static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, + size_t len) { + size_t bl = ctx->cipher->block_size; + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + if (len < bl) { + return 1; + } + + len -= bl; + for (size_t i = 0; i <= len; i += bl) { + (*dat->block)(in + i, out + i, &dat->ks); + } + + return 1; +} + +static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, + size_t len) { + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + if (dat->stream.ctr) { + CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks, ctx->iv, ctx->buf, + &ctx->num, dat->stream.ctr); + } else { + CRYPTO_ctr128_encrypt(in, out, len, &dat->ks, ctx->iv, ctx->buf, &ctx->num, + dat->block); + } + return 1; +} + +static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, + size_t len) { + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + CRYPTO_ofb128_encrypt(in, out, len, &dat->ks, ctx->iv, &ctx->num, dat->block); + return 1; +} + +static char aesni_capable(void); + +ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx, + block128_f *out_block, const uint8_t *key, + size_t key_bytes) { + if (aesni_capable()) { + aesni_set_encrypt_key(key, key_bytes * 8, aes_key); + if (gcm_ctx != NULL) { + CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aesni_encrypt, 1); + } + if (out_block) { + *out_block = (block128_f) aesni_encrypt; + } + return (ctr128_f)aesni_ctr32_encrypt_blocks; + } + + if (hwaes_capable()) { + aes_hw_set_encrypt_key(key, key_bytes * 8, aes_key); + if (gcm_ctx != NULL) { + CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)aes_hw_encrypt, 0); + } + if (out_block) { + *out_block = (block128_f) aes_hw_encrypt; + } + return (ctr128_f)aes_hw_ctr32_encrypt_blocks; + } + + if (bsaes_capable()) { + AES_set_encrypt_key(key, key_bytes * 8, aes_key); + if (gcm_ctx != NULL) { + CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt, 0); + } + if (out_block) { + *out_block = (block128_f) AES_encrypt; + } + return (ctr128_f)bsaes_ctr32_encrypt_blocks; + } + + if (vpaes_capable()) { + vpaes_set_encrypt_key(key, key_bytes * 8, aes_key); + if (out_block) { + *out_block = (block128_f) vpaes_encrypt; + } + if (gcm_ctx != NULL) { + CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)vpaes_encrypt, 0); + } + return NULL; + } + + AES_set_encrypt_key(key, key_bytes * 8, aes_key); + if (gcm_ctx != NULL) { + CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt, 0); + } + if (out_block) { + *out_block = (block128_f) AES_encrypt; + } + return NULL; +} + +static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, + const uint8_t *iv, int enc) { + EVP_AES_GCM_CTX *gctx = ctx->cipher_data; + if (!iv && !key) { + return 1; + } + if (key) { + gctx->ctr = + aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm, NULL, key, ctx->key_len); + // If we have an iv can set it directly, otherwise use saved IV. + if (iv == NULL && gctx->iv_set) { + iv = gctx->iv; + } + if (iv) { + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); + gctx->iv_set = 1; + } + gctx->key_set = 1; + } else { + // If key set use IV, otherwise copy + if (gctx->key_set) { + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); + } else { + OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen); + } + gctx->iv_set = 1; + gctx->iv_gen = 0; + } + return 1; +} + +static void aes_gcm_cleanup(EVP_CIPHER_CTX *c) { + EVP_AES_GCM_CTX *gctx = c->cipher_data; + OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm)); + if (gctx->iv != c->iv) { + OPENSSL_free(gctx->iv); + } +} + +// increment counter (64-bit int) by 1 +static void ctr64_inc(uint8_t *counter) { + int n = 8; + uint8_t c; + + do { + --n; + c = counter[n]; + ++c; + counter[n] = c; + if (c) { + return; + } + } while (n); +} + +static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) { + EVP_AES_GCM_CTX *gctx = c->cipher_data; + switch (type) { + case EVP_CTRL_INIT: + gctx->key_set = 0; + gctx->iv_set = 0; + gctx->ivlen = c->cipher->iv_len; + gctx->iv = c->iv; + gctx->taglen = -1; + gctx->iv_gen = 0; + return 1; + + case EVP_CTRL_GCM_SET_IVLEN: + if (arg <= 0) { + return 0; + } + + // Allocate memory for IV if needed + if (arg > EVP_MAX_IV_LENGTH && arg > gctx->ivlen) { + if (gctx->iv != c->iv) { + OPENSSL_free(gctx->iv); + } + gctx->iv = OPENSSL_malloc(arg); + if (!gctx->iv) { + return 0; + } + } + gctx->ivlen = arg; + return 1; + + case EVP_CTRL_GCM_SET_TAG: + if (arg <= 0 || arg > 16 || c->encrypt) { + return 0; + } + OPENSSL_memcpy(c->buf, ptr, arg); + gctx->taglen = arg; + return 1; + + case EVP_CTRL_GCM_GET_TAG: + if (arg <= 0 || arg > 16 || !c->encrypt || gctx->taglen < 0) { + return 0; + } + OPENSSL_memcpy(ptr, c->buf, arg); + return 1; + + case EVP_CTRL_GCM_SET_IV_FIXED: + // Special case: -1 length restores whole IV + if (arg == -1) { + OPENSSL_memcpy(gctx->iv, ptr, gctx->ivlen); + gctx->iv_gen = 1; + return 1; + } + // Fixed field must be at least 4 bytes and invocation field + // at least 8. + if (arg < 4 || (gctx->ivlen - arg) < 8) { + return 0; + } + if (arg) { + OPENSSL_memcpy(gctx->iv, ptr, arg); + } + if (c->encrypt && !RAND_bytes(gctx->iv + arg, gctx->ivlen - arg)) { + return 0; + } + gctx->iv_gen = 1; + return 1; + + case EVP_CTRL_GCM_IV_GEN: + if (gctx->iv_gen == 0 || gctx->key_set == 0) { + return 0; + } + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); + if (arg <= 0 || arg > gctx->ivlen) { + arg = gctx->ivlen; + } + OPENSSL_memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg); + // Invocation field will be at least 8 bytes in size and + // so no need to check wrap around or increment more than + // last 8 bytes. + ctr64_inc(gctx->iv + gctx->ivlen - 8); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_GCM_SET_IV_INV: + if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt) { + return 0; + } + OPENSSL_memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg); + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, gctx->iv, gctx->ivlen); + gctx->iv_set = 1; + return 1; + + case EVP_CTRL_COPY: { + EVP_CIPHER_CTX *out = ptr; + EVP_AES_GCM_CTX *gctx_out = out->cipher_data; + if (gctx->iv == c->iv) { + gctx_out->iv = out->iv; + } else { + gctx_out->iv = OPENSSL_malloc(gctx->ivlen); + if (!gctx_out->iv) { + return 0; + } + OPENSSL_memcpy(gctx_out->iv, gctx->iv, gctx->ivlen); + } + return 1; + } + + default: + return -1; + } +} + +static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, + size_t len) { + EVP_AES_GCM_CTX *gctx = ctx->cipher_data; + + // If not set up, return error + if (!gctx->key_set) { + return -1; + } + if (!gctx->iv_set) { + return -1; + } + + if (in) { + if (out == NULL) { + if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len)) { + return -1; + } + } else if (ctx->encrypt) { + if (gctx->ctr) { + if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len, + gctx->ctr)) { + return -1; + } + } else { + if (!CRYPTO_gcm128_encrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) { + return -1; + } + } + } else { + if (gctx->ctr) { + if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, &gctx->ks.ks, in, out, len, + gctx->ctr)) { + return -1; + } + } else { + if (!CRYPTO_gcm128_decrypt(&gctx->gcm, &gctx->ks.ks, in, out, len)) { + return -1; + } + } + } + return len; + } else { + if (!ctx->encrypt) { + if (gctx->taglen < 0 || + !CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen)) { + return -1; + } + gctx->iv_set = 0; + return 0; + } + CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16); + gctx->taglen = 16; + // Don't reuse the IV + gctx->iv_set = 0; + return 0; + } +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_cbc_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_cbc; + out->block_size = 16; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aes_init_key; + out->cipher = aes_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_ctr_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ctr; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aes_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_ecb_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ecb; + out->block_size = 16; + out->key_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aes_init_key; + out->cipher = aes_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_ofb_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ofb128; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_OFB_MODE; + out->init = aes_init_key; + out->cipher = aes_ofb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_128_gcm_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_gcm; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aes_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_cbc_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_cbc; + out->block_size = 16; + out->key_len = 24; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aes_init_key; + out->cipher = aes_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_ctr_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_ctr; + out->block_size = 1; + out->key_len = 24; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aes_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_ecb_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_ecb; + out->block_size = 16; + out->key_len = 24; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aes_init_key; + out->cipher = aes_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_192_gcm_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_gcm; + out->block_size = 1; + out->key_len = 24; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aes_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_cbc_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_cbc; + out->block_size = 16; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aes_init_key; + out->cipher = aes_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_ctr_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ctr; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aes_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_ecb_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ecb; + out->block_size = 16; + out->key_len = 32; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aes_init_key; + out->cipher = aes_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_ofb_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ofb128; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_OFB_MODE; + out->init = aes_init_key; + out->cipher = aes_ofb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aes_256_gcm_generic) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_gcm; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aes_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +#if !defined(OPENSSL_NO_ASM) && \ + (defined(OPENSSL_X86_64) || defined(OPENSSL_X86)) + +// AES-NI section. + +static char aesni_capable(void) { + return (OPENSSL_ia32cap_P[1] & (1 << (57 - 32))) != 0; +} + +static int aesni_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, + const uint8_t *iv, int enc) { + int ret, mode; + EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data; + + mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK; + if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) { + ret = aesni_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data); + dat->block = (block128_f)aesni_decrypt; + dat->stream.cbc = + mode == EVP_CIPH_CBC_MODE ? (cbc128_f)aesni_cbc_encrypt : NULL; + } else { + ret = aesni_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data); + dat->block = (block128_f)aesni_encrypt; + if (mode == EVP_CIPH_CBC_MODE) { + dat->stream.cbc = (cbc128_f)aesni_cbc_encrypt; + } else if (mode == EVP_CIPH_CTR_MODE) { + dat->stream.ctr = (ctr128_f)aesni_ctr32_encrypt_blocks; + } else { + dat->stream.cbc = NULL; + } + } + + if (ret < 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_AES_KEY_SETUP_FAILED); + return 0; + } + + return 1; +} + +static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, + const uint8_t *in, size_t len) { + aesni_cbc_encrypt(in, out, len, ctx->cipher_data, ctx->iv, ctx->encrypt); + + return 1; +} + +static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, + const uint8_t *in, size_t len) { + size_t bl = ctx->cipher->block_size; + + if (len < bl) { + return 1; + } + + aesni_ecb_encrypt(in, out, len, ctx->cipher_data, ctx->encrypt); + + return 1; +} + +static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, + const uint8_t *iv, int enc) { + EVP_AES_GCM_CTX *gctx = ctx->cipher_data; + if (!iv && !key) { + return 1; + } + if (key) { + aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks); + CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt, 1); + gctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks; + // If we have an iv can set it directly, otherwise use + // saved IV. + if (iv == NULL && gctx->iv_set) { + iv = gctx->iv; + } + if (iv) { + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); + gctx->iv_set = 1; + } + gctx->key_set = 1; + } else { + // If key set use IV, otherwise copy + if (gctx->key_set) { + CRYPTO_gcm128_setiv(&gctx->gcm, &gctx->ks.ks, iv, gctx->ivlen); + } else { + OPENSSL_memcpy(gctx->iv, iv, gctx->ivlen); + } + gctx->iv_set = 1; + gctx->iv_gen = 0; + } + return 1; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_128_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_cbc; + out->block_size = 16; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aesni_init_key; + out->cipher = aesni_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_128_ctr) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ctr; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aesni_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_128_ecb) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ecb; + out->block_size = 16; + out->key_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aesni_init_key; + out->cipher = aesni_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_128_ofb) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_ofb128; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_OFB_MODE; + out->init = aesni_init_key; + out->cipher = aes_ofb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_128_gcm) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_128_gcm; + out->block_size = 1; + out->key_len = 16; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aesni_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_192_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_cbc; + out->block_size = 16; + out->key_len = 24; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aesni_init_key; + out->cipher = aesni_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_192_ctr) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_ctr; + out->block_size = 1; + out->key_len = 24; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aesni_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_192_ecb) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_ecb; + out->block_size = 16; + out->key_len = 24; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aesni_init_key; + out->cipher = aesni_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_192_gcm) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_192_gcm; + out->block_size = 1; + out->key_len = 24; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aesni_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_cbc; + out->block_size = 16; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = aesni_init_key; + out->cipher = aesni_cbc_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_ctr) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ctr; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_CTR_MODE; + out->init = aesni_init_key; + out->cipher = aes_ctr_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_ecb) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ecb; + out->block_size = 16; + out->key_len = 32; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = aesni_init_key; + out->cipher = aesni_ecb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_ofb) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_ofb128; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 16; + out->ctx_size = sizeof(EVP_AES_KEY); + out->flags = EVP_CIPH_OFB_MODE; + out->init = aesni_init_key; + out->cipher = aes_ofb_cipher; +} + +DEFINE_LOCAL_DATA(EVP_CIPHER, aesni_256_gcm) { + memset(out, 0, sizeof(EVP_CIPHER)); + + out->nid = NID_aes_256_gcm; + out->block_size = 1; + out->key_len = 32; + out->iv_len = 12; + out->ctx_size = sizeof(EVP_AES_GCM_CTX); + out->flags = EVP_CIPH_GCM_MODE | EVP_CIPH_CUSTOM_IV | + EVP_CIPH_FLAG_CUSTOM_CIPHER | EVP_CIPH_ALWAYS_CALL_INIT | + EVP_CIPH_CTRL_INIT | EVP_CIPH_CUSTOM_COPY | + EVP_CIPH_FLAG_AEAD_CIPHER; + out->init = aesni_gcm_init_key; + out->cipher = aes_gcm_cipher; + out->cleanup = aes_gcm_cleanup; + out->ctrl = aes_gcm_ctrl; +} + +#define EVP_CIPHER_FUNCTION(keybits, mode) \ + const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \ + if (aesni_capable()) { \ + return aesni_##keybits##_##mode(); \ + } else { \ + return aes_##keybits##_##mode##_generic(); \ + } \ + } + +#else // ^^^ OPENSSL_X86_64 || OPENSSL_X86 + +static char aesni_capable(void) { + return 0; +} + +#define EVP_CIPHER_FUNCTION(keybits, mode) \ + const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \ + return aes_##keybits##_##mode##_generic(); \ + } + +#endif + +EVP_CIPHER_FUNCTION(128, cbc) +EVP_CIPHER_FUNCTION(128, ctr) +EVP_CIPHER_FUNCTION(128, ecb) +EVP_CIPHER_FUNCTION(128, ofb) +EVP_CIPHER_FUNCTION(128, gcm) + +EVP_CIPHER_FUNCTION(192, cbc) +EVP_CIPHER_FUNCTION(192, ctr) +EVP_CIPHER_FUNCTION(192, ecb) +EVP_CIPHER_FUNCTION(192, gcm) + +EVP_CIPHER_FUNCTION(256, cbc) +EVP_CIPHER_FUNCTION(256, ctr) +EVP_CIPHER_FUNCTION(256, ecb) +EVP_CIPHER_FUNCTION(256, ofb) +EVP_CIPHER_FUNCTION(256, gcm) + + +#define EVP_AEAD_AES_GCM_TAG_LEN 16 + +struct aead_aes_gcm_ctx { + union { + double align; + AES_KEY ks; + } ks; + GCM128_CONTEXT gcm; + ctr128_f ctr; +}; + +struct aead_aes_gcm_tls12_ctx { + struct aead_aes_gcm_ctx gcm_ctx; + uint64_t counter; +}; + +static int aead_aes_gcm_init_impl(struct aead_aes_gcm_ctx *gcm_ctx, + size_t *out_tag_len, const uint8_t *key, + size_t key_len, size_t tag_len) { + const size_t key_bits = key_len * 8; + + if (key_bits != 128 && key_bits != 256) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH); + return 0; // EVP_AEAD_CTX_init should catch this. + } + + if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) { + tag_len = EVP_AEAD_AES_GCM_TAG_LEN; + } + + if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TAG_TOO_LARGE); + return 0; + } + + gcm_ctx->ctr = + aes_ctr_set_key(&gcm_ctx->ks.ks, &gcm_ctx->gcm, NULL, key, key_len); + *out_tag_len = tag_len; + return 1; +} + +static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t requested_tag_len) { + struct aead_aes_gcm_ctx *gcm_ctx; + gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_ctx)); + if (gcm_ctx == NULL) { + return 0; + } + + size_t actual_tag_len; + if (!aead_aes_gcm_init_impl(gcm_ctx, &actual_tag_len, key, key_len, + requested_tag_len)) { + OPENSSL_free(gcm_ctx); + return 0; + } + + ctx->aead_state = gcm_ctx; + ctx->tag_len = actual_tag_len; + return 1; +} + +static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) { + OPENSSL_free(ctx->aead_state); +} + +static int aead_aes_gcm_seal_scatter(const EVP_AEAD_CTX *ctx, uint8_t *out, + uint8_t *out_tag, size_t *out_tag_len, + size_t max_out_tag_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *extra_in, + size_t extra_in_len, + const uint8_t *ad, size_t ad_len) { + const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state; + GCM128_CONTEXT gcm; + + if (extra_in_len + ctx->tag_len < ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_TOO_LARGE); + return 0; + } + if (max_out_tag_len < extra_in_len + ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BUFFER_TOO_SMALL); + return 0; + } + if (nonce_len == 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); + return 0; + } + + const AES_KEY *key = &gcm_ctx->ks.ks; + + OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm)); + CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len); + + if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) { + return 0; + } + + if (gcm_ctx->ctr) { + if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, in, out, in_len, + gcm_ctx->ctr)) { + return 0; + } + } else { + if (!CRYPTO_gcm128_encrypt(&gcm, key, in, out, in_len)) { + return 0; + } + } + + if (extra_in_len) { + if (gcm_ctx->ctr) { + if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, key, extra_in, out_tag, + extra_in_len, gcm_ctx->ctr)) { + return 0; + } + } else { + if (!CRYPTO_gcm128_encrypt(&gcm, key, extra_in, out_tag, extra_in_len)) { + return 0; + } + } + } + + CRYPTO_gcm128_tag(&gcm, out_tag + extra_in_len, ctx->tag_len); + *out_tag_len = ctx->tag_len + extra_in_len; + + return 1; +} + +static int aead_aes_gcm_open_gather(const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *in_tag, size_t in_tag_len, + const uint8_t *ad, size_t ad_len) { + const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state; + uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN]; + GCM128_CONTEXT gcm; + + if (nonce_len == 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE_SIZE); + return 0; + } + + if (in_tag_len != ctx->tag_len) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + const AES_KEY *key = &gcm_ctx->ks.ks; + + OPENSSL_memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm)); + CRYPTO_gcm128_setiv(&gcm, key, nonce, nonce_len); + + if (!CRYPTO_gcm128_aad(&gcm, ad, ad_len)) { + return 0; + } + + if (gcm_ctx->ctr) { + if (!CRYPTO_gcm128_decrypt_ctr32(&gcm, key, in, out, in_len, + gcm_ctx->ctr)) { + return 0; + } + } else { + if (!CRYPTO_gcm128_decrypt(&gcm, key, in, out, in_len)) { + return 0; + } + } + + CRYPTO_gcm128_tag(&gcm, tag, ctx->tag_len); + if (CRYPTO_memcmp(tag, in_tag, ctx->tag_len) != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT); + return 0; + } + + return 1; +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 16; + out->nonce_len = 12; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_init; + out->cleanup = aead_aes_gcm_cleanup; + out->seal_scatter = aead_aes_gcm_seal_scatter; + out->open_gather = aead_aes_gcm_open_gather; +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 32; + out->nonce_len = 12; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_init; + out->cleanup = aead_aes_gcm_cleanup; + out->seal_scatter = aead_aes_gcm_seal_scatter; + out->open_gather = aead_aes_gcm_open_gather; +} + +static int aead_aes_gcm_tls12_init(EVP_AEAD_CTX *ctx, const uint8_t *key, + size_t key_len, size_t requested_tag_len) { + struct aead_aes_gcm_tls12_ctx *gcm_ctx; + gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_tls12_ctx)); + if (gcm_ctx == NULL) { + return 0; + } + + gcm_ctx->counter = 0; + + size_t actual_tag_len; + if (!aead_aes_gcm_init_impl(&gcm_ctx->gcm_ctx, &actual_tag_len, key, key_len, + requested_tag_len)) { + OPENSSL_free(gcm_ctx); + return 0; + } + + ctx->aead_state = gcm_ctx; + ctx->tag_len = actual_tag_len; + return 1; +} + +static void aead_aes_gcm_tls12_cleanup(EVP_AEAD_CTX *ctx) { + OPENSSL_free(ctx->aead_state); +} + +static int aead_aes_gcm_tls12_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len) { + struct aead_aes_gcm_tls12_ctx *gcm_ctx = ctx->aead_state; + if (gcm_ctx->counter == UINT64_MAX) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); + return 0; + } + + if (nonce_len != 12) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_UNSUPPORTED_NONCE_SIZE); + return 0; + } + + const uint64_t be_counter = CRYPTO_bswap8(gcm_ctx->counter); + if (OPENSSL_memcmp((uint8_t *)&be_counter, nonce + nonce_len - 8, 8) != 0) { + OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_INVALID_NONCE); + return 0; + } + + gcm_ctx->counter++; + + return aead_aes_gcm_seal_scatter(ctx, out, out_tag, out_tag_len, + max_out_tag_len, nonce, nonce_len, in, + in_len, extra_in, extra_in_len, ad, ad_len); +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_128_gcm_tls12) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 16; + out->nonce_len = 12; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_tls12_init; + out->cleanup = aead_aes_gcm_tls12_cleanup; + out->seal_scatter = aead_aes_gcm_tls12_seal_scatter; + out->open_gather = aead_aes_gcm_open_gather; +} + +DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm_tls12) { + memset(out, 0, sizeof(EVP_AEAD)); + + out->key_len = 32; + out->nonce_len = 12; + out->overhead = EVP_AEAD_AES_GCM_TAG_LEN; + out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN; + out->seal_scatter_supports_extra_in = 1; + + out->init = aead_aes_gcm_tls12_init; + out->cleanup = aead_aes_gcm_tls12_cleanup; + out->seal_scatter = aead_aes_gcm_tls12_seal_scatter; + out->open_gather = aead_aes_gcm_open_gather; +} + +int EVP_has_aes_hardware(void) { +#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) + return aesni_capable() && crypto_gcm_clmul_enabled(); +#elif defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) + return hwaes_capable() && CRYPTO_is_ARMv8_PMULL_capable(); +#else + return 0; +#endif +} diff --git a/Sources/BoringSSL/crypto/cipher/e_des.c b/Sources/BoringSSL/crypto/fipsmodule/cipher/e_des.c similarity index 75% rename from Sources/BoringSSL/crypto/cipher/e_des.c rename to Sources/BoringSSL/crypto/fipsmodule/cipher/e_des.c index 6834a42c3..eaba6d7ae 100644 --- a/Sources/BoringSSL/crypto/cipher/e_des.c +++ b/Sources/BoringSSL/crypto/fipsmodule/cipher/e_des.c @@ -59,6 +59,7 @@ #include #include "internal.h" +#include "../delocate.h" typedef struct { @@ -87,14 +88,17 @@ static int des_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, return 1; } -static const EVP_CIPHER des_cbc = { - NID_des_cbc, 8 /* block_size */, 8 /* key_size */, - 8 /* iv_len */, sizeof(EVP_DES_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, des_init_key, des_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */, }; - -const EVP_CIPHER *EVP_des_cbc(void) { return &des_cbc; } - +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_cbc; + out->block_size = 8; + out->key_len = 8; + out->iv_len = 8; + out->ctx_size = sizeof(EVP_DES_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = des_init_key; + out->cipher = des_cbc_cipher; +} static int des_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { @@ -111,14 +115,17 @@ static int des_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, return 1; } -static const EVP_CIPHER des_ecb = { - NID_des_ecb, 8 /* block_size */, 8 /* key_size */, - 0 /* iv_len */, sizeof(EVP_DES_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, des_init_key, des_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */, }; - -const EVP_CIPHER *EVP_des_ecb(void) { return &des_ecb; } - +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_ecb) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_ecb; + out->block_size = 8; + out->key_len = 8; + out->iv_len = 0; + out->ctx_size = sizeof(EVP_DES_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = des_init_key; + out->cipher = des_ecb_cipher; +} typedef struct { union { @@ -127,7 +134,6 @@ typedef struct { } ks; } DES_EDE_KEY; - static int des_ede3_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { DES_cblock *deskey = (DES_cblock *)key; @@ -150,14 +156,17 @@ static int des_ede3_cbc_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, return 1; } -static const EVP_CIPHER des_ede3_cbc = { - NID_des_ede3_cbc, 8 /* block_size */, 24 /* key_size */, - 8 /* iv_len */, sizeof(DES_EDE_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, des_ede3_init_key, des_ede3_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */, }; - -const EVP_CIPHER *EVP_des_ede3_cbc(void) { return &des_ede3_cbc; } - +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_ede3_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_ede3_cbc; + out->block_size = 8; + out->key_len = 24; + out->iv_len = 8; + out->ctx_size = sizeof(DES_EDE_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = des_ede3_init_key; + out->cipher = des_ede3_cbc_cipher; +} static int des_ede_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, int enc) { @@ -171,14 +180,17 @@ static int des_ede_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key, return 1; } -static const EVP_CIPHER des_ede_cbc = { - NID_des_ede_cbc, 8 /* block_size */, 16 /* key_size */, - 8 /* iv_len */, sizeof(DES_EDE_KEY), EVP_CIPH_CBC_MODE, - NULL /* app_data */, des_ede_init_key , des_ede3_cbc_cipher, - NULL /* cleanup */, NULL /* ctrl */, }; - -const EVP_CIPHER *EVP_des_ede_cbc(void) { return &des_ede_cbc; } - +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_ede_cbc) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_ede_cbc; + out->block_size = 8; + out->key_len = 16; + out->iv_len = 8; + out->ctx_size = sizeof(DES_EDE_KEY); + out->flags = EVP_CIPH_CBC_MODE; + out->init = des_ede_init_key; + out->cipher = des_ede3_cbc_cipher; +} static int des_ede_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len) { @@ -196,10 +208,26 @@ static int des_ede_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, return 1; } -static const EVP_CIPHER des_ede_ecb = { - NID_des_ede_cbc, 8 /* block_size */, 16 /* key_size */, - 0 /* iv_len */, sizeof(DES_EDE_KEY), EVP_CIPH_ECB_MODE, - NULL /* app_data */, des_ede_init_key , des_ede_ecb_cipher, - NULL /* cleanup */, NULL /* ctrl */, }; +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_ede) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_ede_ecb; + out->block_size = 8; + out->key_len = 16; + out->iv_len = 0; + out->ctx_size = sizeof(DES_EDE_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = des_ede_init_key; + out->cipher = des_ede_ecb_cipher; +} -const EVP_CIPHER *EVP_des_ede(void) { return &des_ede_ecb; } +DEFINE_METHOD_FUNCTION(EVP_CIPHER, EVP_des_ede3) { + memset(out, 0, sizeof(EVP_CIPHER)); + out->nid = NID_des_ede3_ecb; + out->block_size = 8; + out->key_len = 24; + out->iv_len = 0; + out->ctx_size = sizeof(DES_EDE_KEY); + out->flags = EVP_CIPH_ECB_MODE; + out->init = des_ede3_init_key; + out->cipher = des_ede_ecb_cipher; +} diff --git a/Sources/BoringSSL/crypto/cipher/internal.h b/Sources/BoringSSL/crypto/fipsmodule/cipher/internal.h similarity index 54% rename from Sources/BoringSSL/crypto/cipher/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/cipher/internal.h index d29ce599c..7b5f23f0c 100644 --- a/Sources/BoringSSL/crypto/cipher/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/cipher/internal.h @@ -60,103 +60,70 @@ #include #include +#include + +#include "../../internal.h" +#include "../modes/internal.h" #if defined(__cplusplus) extern "C" { #endif -/* EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. */ +// EVP_CIPH_MODE_MASK contains the bits of |flags| that represent the mode. #define EVP_CIPH_MODE_MASK 0x3f - -/* EVP_AEAD represents a specific AEAD algorithm. */ +// EVP_AEAD represents a specific AEAD algorithm. struct evp_aead_st { uint8_t key_len; uint8_t nonce_len; uint8_t overhead; uint8_t max_tag_len; + int seal_scatter_supports_extra_in; - /* init initialises an |EVP_AEAD_CTX|. If this call returns zero then - * |cleanup| will not be called for that context. */ + // init initialises an |EVP_AEAD_CTX|. If this call returns zero then + // |cleanup| will not be called for that context. int (*init)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, size_t tag_len); int (*init_with_direction)(EVP_AEAD_CTX *, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir); void (*cleanup)(EVP_AEAD_CTX *); - int (*seal)(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, - size_t max_out_len, const uint8_t *nonce, size_t nonce_len, - const uint8_t *in, size_t in_len, const uint8_t *ad, - size_t ad_len); - int (*open)(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); + int (*seal_scatter)(const EVP_AEAD_CTX *ctx, uint8_t *out, uint8_t *out_tag, + size_t *out_tag_len, size_t max_out_tag_len, + const uint8_t *nonce, size_t nonce_len, const uint8_t *in, + size_t in_len, const uint8_t *extra_in, + size_t extra_in_len, const uint8_t *ad, size_t ad_len); + + int (*open_gather)(const EVP_AEAD_CTX *ctx, uint8_t *out, + const uint8_t *nonce, size_t nonce_len, const uint8_t *in, + size_t in_len, const uint8_t *in_tag, size_t in_tag_len, + const uint8_t *ad, size_t ad_len); + int (*get_iv)(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_len); -}; + size_t (*tag_len)(const EVP_AEAD_CTX *ctx, size_t in_Len, + size_t extra_in_len); +}; -/* EVP_tls_cbc_get_padding determines the padding from the decrypted, TLS, CBC - * record in |in|. This decrypted record should not include any "decrypted" - * explicit IV. If the record is publicly invalid, it returns zero. Otherwise, - * it returns one and sets |*out_padding_ok| to all ones (0xfff..f) if the - * padding is valid and zero otherwise. It then sets |*out_len| to the length - * with the padding removed or |in_len| if invalid. - * - * If the function returns one, it runs in time independent of the contents of - * |in|. It is also guaranteed that |*out_len| >= |mac_size|, satisfying - * |EVP_tls_cbc_copy_mac|'s precondition. */ -int EVP_tls_cbc_remove_padding(unsigned *out_padding_ok, unsigned *out_len, - const uint8_t *in, unsigned in_len, - unsigned block_size, unsigned mac_size); - -/* EVP_tls_cbc_copy_mac copies |md_size| bytes from the end of the first - * |in_len| bytes of |in| to |out| in constant time (independent of the concrete - * value of |in_len|, which may vary within a 256-byte window). |in| must point - * to a buffer of |orig_len| bytes. - * - * On entry: - * orig_len >= in_len >= md_size - * md_size <= EVP_MAX_MD_SIZE */ -void EVP_tls_cbc_copy_mac(uint8_t *out, unsigned md_size, - const uint8_t *in, unsigned in_len, - unsigned orig_len); - -/* EVP_tls_cbc_record_digest_supported returns 1 iff |md| is a hash function - * which EVP_tls_cbc_digest_record supports. */ -int EVP_tls_cbc_record_digest_supported(const EVP_MD *md); - -/* EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS - * record. - * - * md: the hash function used in the HMAC. - * EVP_tls_cbc_record_digest_supported must return true for this hash. - * md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. - * md_out_size: the number of output bytes is written here. - * header: the 13-byte, TLS record header. - * data: the record data itself - * data_plus_mac_size: the secret, reported length of the data and MAC - * once the padding has been removed. - * data_plus_mac_plus_padding_size: the public length of the whole - * record, including padding. - * - * On entry: by virtue of having been through one of the remove_padding - * functions, above, we know that data_plus_mac_size is large enough to contain - * a padding byte and MAC. (If the padding was invalid, it might contain the - * padding too. ) */ -int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, - size_t *md_out_size, const uint8_t header[13], - const uint8_t *data, size_t data_plus_mac_size, - size_t data_plus_mac_plus_padding_size, - const uint8_t *mac_secret, - unsigned mac_secret_length); +// aes_ctr_set_key initialises |*aes_key| using |key_bytes| bytes from |key|, +// where |key_bytes| must either be 16, 24 or 32. If not NULL, |*out_block| is +// set to a function that encrypts single blocks. If not NULL, |*gcm_ctx| is +// initialised to do GHASH with the given key. It returns a function for +// optimised CTR-mode, or NULL if CTR-mode should be built using +// |*out_block|. +ctr128_f aes_ctr_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx, + block128_f *out_block, const uint8_t *key, + size_t key_bytes); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CIPHER_INTERNAL_H */ +#endif // OPENSSL_HEADER_CIPHER_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/fipsmodule/delocate.h b/Sources/BoringSSL/crypto/fipsmodule/delocate.h new file mode 100644 index 000000000..065a21ca4 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/delocate.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_FIPSMODULE_DELOCATE_H +#define OPENSSL_HEADER_FIPSMODULE_DELOCATE_H + +#include + +#include "../internal.h" + + +#if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN) && !defined(OPENSSL_MSAN) +#define DEFINE_BSS_GET(type, name) \ + static type name __attribute__((used)); \ + type *name##_bss_get(void); +// For FIPS builds we require that CRYPTO_ONCE_INIT be zero. +#define DEFINE_STATIC_ONCE(name) DEFINE_BSS_GET(CRYPTO_once_t, name) +// For FIPS builds we require that CRYPTO_STATIC_MUTEX_INIT be zero. +#define DEFINE_STATIC_MUTEX(name) \ + DEFINE_BSS_GET(struct CRYPTO_STATIC_MUTEX, name) +// For FIPS builds we require that CRYPTO_EX_DATA_CLASS_INIT be zero. +#define DEFINE_STATIC_EX_DATA_CLASS(name) \ + DEFINE_BSS_GET(CRYPTO_EX_DATA_CLASS, name) +#else +#define DEFINE_BSS_GET(type, name) \ + static type name; \ + static type *name##_bss_get(void) { return &name; } +#define DEFINE_STATIC_ONCE(name) \ + static CRYPTO_once_t name = CRYPTO_ONCE_INIT; \ + static CRYPTO_once_t *name##_bss_get(void) { return &name; } +#define DEFINE_STATIC_MUTEX(name) \ + static struct CRYPTO_STATIC_MUTEX name = CRYPTO_STATIC_MUTEX_INIT; \ + static struct CRYPTO_STATIC_MUTEX *name##_bss_get(void) { return &name; } +#define DEFINE_STATIC_EX_DATA_CLASS(name) \ + static CRYPTO_EX_DATA_CLASS name = CRYPTO_EX_DATA_CLASS_INIT; \ + static CRYPTO_EX_DATA_CLASS *name##_bss_get(void) { return &name; } +#endif + +#define DEFINE_DATA(type, name, accessor_decorations) \ + DEFINE_BSS_GET(type, name##_storage) \ + DEFINE_STATIC_ONCE(name##_once) \ + static void name##_do_init(type *out); \ + static void name##_init(void) { name##_do_init(name##_storage_bss_get()); } \ + accessor_decorations type *name(void) { \ + CRYPTO_once(name##_once_bss_get(), name##_init); \ + /* See http://c-faq.com/ansi/constmismatch.html for why the following \ + * cast is needed. */ \ + return (const type *)name##_storage_bss_get(); \ + } \ + static void name##_do_init(type *out) + +// DEFINE_METHOD_FUNCTION defines a function named |name| which returns a +// method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it +// is split into a CRYPTO_once_t-guarded initializer in the module and +// unhashed, non-module accessor functions to space reserved in the BSS. The +// method table is initialized by a caller-supplied function which takes a +// parameter named |out| of type |type|*. The caller should follow the macro +// invocation with the body of this function: +// +// DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) { +// out->type = NID_md4; +// out->md_size = MD4_DIGEST_LENGTH; +// out->flags = 0; +// out->init = md4_init; +// out->update = md4_update; +// out->final = md4_final; +// out->block_size = 64; +// out->ctx_size = sizeof(MD4_CTX); +// } +// +// This mechanism does not use a static initializer because their execution +// order is undefined. See FIPS.md for more details. +#define DEFINE_METHOD_FUNCTION(type, name) DEFINE_DATA(type, name, const) + +#define DEFINE_LOCAL_DATA(type, name) DEFINE_DATA(type, name, static const) + +#endif // OPENSSL_HEADER_FIPSMODULE_DELOCATE_H diff --git a/Sources/BoringSSL/crypto/des/des.c b/Sources/BoringSSL/crypto/fipsmodule/des/des.c similarity index 93% rename from Sources/BoringSSL/crypto/des/des.c rename to Sources/BoringSSL/crypto/fipsmodule/des/des.c index cada7d104..2b0fdcd70 100644 --- a/Sources/BoringSSL/crypto/des/des.c +++ b/Sources/BoringSSL/crypto/fipsmodule/des/des.c @@ -62,7 +62,7 @@ static const uint32_t des_skb[8][64] = { - {/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ + { // for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000L, 0x00000010L, 0x20000000L, 0x20000010L, 0x00010000L, 0x00010010L, 0x20010000L, 0x20010010L, 0x00000800L, 0x00000810L, 0x20000800L, 0x20000810L, 0x00010800L, 0x00010810L, 0x20010800L, @@ -76,7 +76,7 @@ static const uint32_t des_skb[8][64] = { 0x20080020L, 0x20080030L, 0x00090020L, 0x00090030L, 0x20090020L, 0x20090030L, 0x00080820L, 0x00080830L, 0x20080820L, 0x20080830L, 0x00090820L, 0x00090830L, 0x20090820L, 0x20090830L, }, - {/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */ + { // for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 0x00000000L, 0x02000000L, 0x00002000L, 0x02002000L, 0x00200000L, 0x02200000L, 0x00202000L, 0x02202000L, 0x00000004L, 0x02000004L, 0x00002004L, 0x02002004L, 0x00200004L, 0x02200004L, 0x00202004L, @@ -90,7 +90,7 @@ static const uint32_t des_skb[8][64] = { 0x10002400L, 0x12002400L, 0x10200400L, 0x12200400L, 0x10202400L, 0x12202400L, 0x10000404L, 0x12000404L, 0x10002404L, 0x12002404L, 0x10200404L, 0x12200404L, 0x10202404L, 0x12202404L, }, - {/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */ + { // for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 0x00000000L, 0x00000001L, 0x00040000L, 0x00040001L, 0x01000000L, 0x01000001L, 0x01040000L, 0x01040001L, 0x00000002L, 0x00000003L, 0x00040002L, 0x00040003L, 0x01000002L, 0x01000003L, 0x01040002L, @@ -104,7 +104,7 @@ static const uint32_t des_skb[8][64] = { 0x08040200L, 0x08040201L, 0x09000200L, 0x09000201L, 0x09040200L, 0x09040201L, 0x08000202L, 0x08000203L, 0x08040202L, 0x08040203L, 0x09000202L, 0x09000203L, 0x09040202L, 0x09040203L, }, - {/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */ + { // for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 0x00000000L, 0x00100000L, 0x00000100L, 0x00100100L, 0x00000008L, 0x00100008L, 0x00000108L, 0x00100108L, 0x00001000L, 0x00101000L, 0x00001100L, 0x00101100L, 0x00001008L, 0x00101008L, 0x00001108L, @@ -118,7 +118,7 @@ static const uint32_t des_skb[8][64] = { 0x04020100L, 0x04120100L, 0x04020008L, 0x04120008L, 0x04020108L, 0x04120108L, 0x04021000L, 0x04121000L, 0x04021100L, 0x04121100L, 0x04021008L, 0x04121008L, 0x04021108L, 0x04121108L, }, - {/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ + { // for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 0x00000000L, 0x10000000L, 0x00010000L, 0x10010000L, 0x00000004L, 0x10000004L, 0x00010004L, 0x10010004L, 0x20000000L, 0x30000000L, 0x20010000L, 0x30010000L, 0x20000004L, 0x30000004L, 0x20010004L, @@ -132,7 +132,7 @@ static const uint32_t des_skb[8][64] = { 0x00111000L, 0x10111000L, 0x00101004L, 0x10101004L, 0x00111004L, 0x10111004L, 0x20101000L, 0x30101000L, 0x20111000L, 0x30111000L, 0x20101004L, 0x30101004L, 0x20111004L, 0x30111004L, }, - {/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */ + { // for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 0x00000000L, 0x08000000L, 0x00000008L, 0x08000008L, 0x00000400L, 0x08000400L, 0x00000408L, 0x08000408L, 0x00020000L, 0x08020000L, 0x00020008L, 0x08020008L, 0x00020400L, 0x08020400L, 0x00020408L, @@ -146,7 +146,7 @@ static const uint32_t des_skb[8][64] = { 0x02000009L, 0x0A000009L, 0x02000401L, 0x0A000401L, 0x02000409L, 0x0A000409L, 0x02020001L, 0x0A020001L, 0x02020009L, 0x0A020009L, 0x02020401L, 0x0A020401L, 0x02020409L, 0x0A020409L, }, - {/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */ + { // for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 0x00000000L, 0x00000100L, 0x00080000L, 0x00080100L, 0x01000000L, 0x01000100L, 0x01080000L, 0x01080100L, 0x00000010L, 0x00000110L, 0x00080010L, 0x00080110L, 0x01000010L, 0x01000110L, 0x01080010L, @@ -160,7 +160,7 @@ static const uint32_t des_skb[8][64] = { 0x00280200L, 0x00280300L, 0x01200200L, 0x01200300L, 0x01280200L, 0x01280300L, 0x00200210L, 0x00200310L, 0x00280210L, 0x00280310L, 0x01200210L, 0x01200310L, 0x01280210L, 0x01280310L, }, - {/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */ + { // for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 0x00000000L, 0x04000000L, 0x00040000L, 0x04040000L, 0x00000002L, 0x04000002L, 0x00040002L, 0x04040002L, 0x00002000L, 0x04002000L, 0x00042000L, 0x04042000L, 0x00002002L, 0x04002002L, 0x00042002L, @@ -176,7 +176,7 @@ static const uint32_t des_skb[8][64] = { 0x00002822L, 0x04002822L, 0x00042822L, 0x04042822L, }}; static const uint32_t DES_SPtrans[8][64] = { - {/* nibble 0 */ + { // nibble 0 0x02080800L, 0x00080000L, 0x02000002L, 0x02080802L, 0x02000000L, 0x00080802L, 0x00080002L, 0x02000002L, 0x00080802L, 0x02080800L, 0x02080000L, 0x00000802L, 0x02000802L, 0x02000000L, 0x00000000L, @@ -190,7 +190,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x02080000L, 0x02000802L, 0x02000000L, 0x00000802L, 0x00080002L, 0x00000000L, 0x00080000L, 0x02000000L, 0x02000802L, 0x02080800L, 0x00000002L, 0x02080002L, 0x00000800L, 0x00080802L, }, - {/* nibble 1 */ + { // nibble 1 0x40108010L, 0x00000000L, 0x00108000L, 0x40100000L, 0x40000010L, 0x00008010L, 0x40008000L, 0x00108000L, 0x00008000L, 0x40100010L, 0x00000010L, 0x40008000L, 0x00100010L, 0x40108000L, 0x40100000L, @@ -204,7 +204,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00000000L, 0x40000010L, 0x00000010L, 0x40108010L, 0x00108000L, 0x40100000L, 0x40100010L, 0x00100000L, 0x00008010L, 0x40008000L, 0x40008010L, 0x00000010L, 0x40100000L, 0x00108000L, }, - {/* nibble 2 */ + { // nibble 2 0x04000001L, 0x04040100L, 0x00000100L, 0x04000101L, 0x00040001L, 0x04000000L, 0x04000101L, 0x00040100L, 0x04000100L, 0x00040000L, 0x04040000L, 0x00000001L, 0x04040101L, 0x00000101L, 0x00000001L, @@ -218,7 +218,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x04000000L, 0x04040101L, 0x00040000L, 0x04000100L, 0x04000101L, 0x00040100L, 0x04000100L, 0x00000000L, 0x04040001L, 0x00000101L, 0x04000001L, 0x00040101L, 0x00000100L, 0x04040000L, }, - {/* nibble 3 */ + { // nibble 3 0x00401008L, 0x10001000L, 0x00000008L, 0x10401008L, 0x00000000L, 0x10400000L, 0x10001008L, 0x00400008L, 0x10401000L, 0x10000008L, 0x10000000L, 0x00001008L, 0x10000008L, 0x00401008L, 0x00400000L, @@ -232,7 +232,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00401008L, 0x00400000L, 0x10401008L, 0x00000008L, 0x10001000L, 0x00401008L, 0x00400008L, 0x00401000L, 0x10400000L, 0x10001008L, 0x00001008L, 0x10000000L, 0x10000008L, 0x10401000L, }, - {/* nibble 4 */ + { // nibble 4 0x08000000L, 0x00010000L, 0x00000400L, 0x08010420L, 0x08010020L, 0x08000400L, 0x00010420L, 0x08010000L, 0x00010000L, 0x00000020L, 0x08000020L, 0x00010400L, 0x08000420L, 0x08010020L, 0x08010400L, @@ -246,7 +246,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00000000L, 0x08010420L, 0x08010020L, 0x08010400L, 0x00000420L, 0x00010000L, 0x00010400L, 0x08010020L, 0x08000400L, 0x00000420L, 0x00000020L, 0x00010420L, 0x08010000L, 0x08000020L, }, - {/* nibble 5 */ + { // nibble 5 0x80000040L, 0x00200040L, 0x00000000L, 0x80202000L, 0x00200040L, 0x00002000L, 0x80002040L, 0x00200000L, 0x00002040L, 0x80202040L, 0x00202000L, 0x80000000L, 0x80002000L, 0x80000040L, 0x80200000L, @@ -260,7 +260,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x00200000L, 0x80002040L, 0x80000040L, 0x80200000L, 0x00202040L, 0x00000000L, 0x00002000L, 0x80000040L, 0x80002040L, 0x80202000L, 0x80200000L, 0x00002040L, 0x00000040L, 0x80200040L, }, - {/* nibble 6 */ + { // nibble 6 0x00004000L, 0x00000200L, 0x01000200L, 0x01000004L, 0x01004204L, 0x00004004L, 0x00004200L, 0x00000000L, 0x01000000L, 0x01000204L, 0x00000204L, 0x01004000L, 0x00000004L, 0x01004200L, 0x01004000L, @@ -274,7 +274,7 @@ static const uint32_t DES_SPtrans[8][64] = { 0x01000200L, 0x00004200L, 0x00000204L, 0x00004000L, 0x01004204L, 0x01000000L, 0x01004200L, 0x00000004L, 0x00004004L, 0x01004204L, 0x01000004L, 0x01004200L, 0x01004000L, 0x00004004L, }, - {/* nibble 7 */ + { // nibble 7 0x20800080L, 0x20820000L, 0x00020080L, 0x00000000L, 0x20020000L, 0x00800080L, 0x20800000L, 0x20820080L, 0x00000080L, 0x20000000L, 0x00820000L, 0x00020080L, 0x00820080L, 0x20020080L, 0x20000080L, @@ -305,9 +305,9 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { c2l(in, c); c2l(in, d); - /* do PC1 in 47 simple operations :-) - * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) - * for the inspiration. :-) */ + // do PC1 in 47 simple operations :-) + // Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) + // for the inspiration. :-) PERM_OP(d, c, t, 4, 0x0f0f0f0fL); HPERM_OP(c, t, -2, 0xcccc0000L); HPERM_OP(d, t, -2, 0xcccc0000L); @@ -328,8 +328,8 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { } c &= 0x0fffffffL; d &= 0x0fffffffL; - /* could be a few less shifts but I am to lazy at this - * point in time to investigate */ + // could be a few less shifts but I am to lazy at this + // point in time to investigate s = des_skb[0][(c) & 0x3f] | des_skb[1][((c >> 6L) & 0x03) | ((c >> 7L) & 0x3c)] | des_skb[2][((c >> 13L) & 0x0f) | ((c >> 14L) & 0x30)] | @@ -340,7 +340,7 @@ void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule) { des_skb[6][(d >> 15L) & 0x3f] | des_skb[7][((d >> 21L) & 0x0f) | ((d >> 22L) & 0x30)]; - /* table contained 0213 4657 */ + // table contained 0213 4657 t2 = ((t << 16L) | (s & 0x0000ffffL)) & 0xffffffffL; schedule->subkeys[i][0] = ROTATE(t2, 30) & 0xffffffffL; @@ -385,18 +385,18 @@ static void DES_encrypt1(uint32_t *data, const DES_key_schedule *ks, int enc) { l = data[1]; IP(r, l); - /* Things have been modified so that the initial rotate is done outside - * the loop. This required the DES_SPtrans values in sp.h to be - * rotated 1 bit to the right. One perl script later and things have a - * 5% speed up on a sparc2. Thanks to Richard Outerbridge - * <71755.204@CompuServe.COM> for pointing this out. */ - /* clear the top bits on machines with 8byte longs */ - /* shift left by 2 */ + // Things have been modified so that the initial rotate is done outside + // the loop. This required the DES_SPtrans values in sp.h to be + // rotated 1 bit to the right. One perl script later and things have a + // 5% speed up on a sparc2. Thanks to Richard Outerbridge + // <71755.204@CompuServe.COM> for pointing this out. + // clear the top bits on machines with 8byte longs + // shift left by 2 r = ROTATE(r, 29) & 0xffffffffL; l = ROTATE(l, 29) & 0xffffffffL; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop */ + // I don't know if it is worth the effort of loop unrolling the + // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); @@ -433,7 +433,7 @@ static void DES_encrypt1(uint32_t *data, const DES_key_schedule *ks, int enc) { D_ENCRYPT(ks, r, l, 0); } - /* rotate and clear the top bits on machines with 8byte longs */ + // rotate and clear the top bits on machines with 8byte longs l = ROTATE(l, 3) & 0xffffffffL; r = ROTATE(r, 3) & 0xffffffffL; @@ -448,17 +448,17 @@ static void DES_encrypt2(uint32_t *data, const DES_key_schedule *ks, int enc) { r = data[0]; l = data[1]; - /* Things have been modified so that the initial rotate is done outside the - * loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to - * the right. One perl script later and things have a 5% speed up on a - * sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for - * pointing this out. */ - /* clear the top bits on machines with 8byte longs */ + // Things have been modified so that the initial rotate is done outside the + // loop. This required the DES_SPtrans values in sp.h to be rotated 1 bit to + // the right. One perl script later and things have a 5% speed up on a + // sparc2. Thanks to Richard Outerbridge <71755.204@CompuServe.COM> for + // pointing this out. + // clear the top bits on machines with 8byte longs r = ROTATE(r, 29) & 0xffffffffL; l = ROTATE(l, 29) & 0xffffffffL; - /* I don't know if it is worth the effort of loop unrolling the - * inner loop */ + // I don't know if it is worth the effort of loop unrolling the + // inner loop if (enc) { D_ENCRYPT(ks, l, r, 0); D_ENCRYPT(ks, r, l, 1); @@ -494,7 +494,7 @@ static void DES_encrypt2(uint32_t *data, const DES_key_schedule *ks, int enc) { D_ENCRYPT(ks, l, r, 1); D_ENCRYPT(ks, r, l, 0); } - /* rotate and clear the top bits on machines with 8byte longs */ + // rotate and clear the top bits on machines with 8byte longs data[0] = ROTATE(l, 3) & 0xffffffffL; data[1] = ROTATE(r, 3) & 0xffffffffL; } @@ -764,8 +764,22 @@ void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, } -/* Deprecated functions. */ +// Deprecated functions. void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule) { DES_set_key(key, schedule); } + +#undef HPERM_OP +#undef c2l +#undef l2c +#undef c2ln +#undef l2cn +#undef PERM_OP +#undef IP +#undef FP +#undef LOAD_DATA +#undef D_ENCRYPT +#undef ITERATIONS +#undef HALF_ITERATIONS +#undef ROTATE diff --git a/Sources/BoringSSL/crypto/des/internal.h b/Sources/BoringSSL/crypto/fipsmodule/des/internal.h similarity index 91% rename from Sources/BoringSSL/crypto/des/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/des/internal.h index 21eb93357..4d65ff1d0 100644 --- a/Sources/BoringSSL/crypto/des/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/des/internal.h @@ -80,7 +80,7 @@ extern "C" { *((c)++) = (unsigned char)(((l) >> 24L) & 0xff); \ } while (0) -/* NOTE - c is not incremented as per c2l */ +// NOTE - c is not incremented as per c2l #define c2ln(c, l1, l2, n) \ do { \ (c) += (n); \ @@ -88,42 +88,56 @@ extern "C" { switch (n) { \ case 8: \ (l2) = ((uint32_t)(*(--(c)))) << 24L; \ + OPENSSL_FALLTHROUGH; \ case 7: \ (l2) |= ((uint32_t)(*(--(c)))) << 16L; \ + OPENSSL_FALLTHROUGH; \ case 6: \ (l2) |= ((uint32_t)(*(--(c)))) << 8L; \ + OPENSSL_FALLTHROUGH; \ case 5: \ (l2) |= ((uint32_t)(*(--(c)))); \ + OPENSSL_FALLTHROUGH; \ case 4: \ (l1) = ((uint32_t)(*(--(c)))) << 24L; \ + OPENSSL_FALLTHROUGH; \ case 3: \ (l1) |= ((uint32_t)(*(--(c)))) << 16L; \ + OPENSSL_FALLTHROUGH; \ case 2: \ (l1) |= ((uint32_t)(*(--(c)))) << 8L; \ + OPENSSL_FALLTHROUGH; \ case 1: \ (l1) |= ((uint32_t)(*(--(c)))); \ } \ } while (0) -/* NOTE - c is not incremented as per l2c */ +// NOTE - c is not incremented as per l2c #define l2cn(l1, l2, c, n) \ do { \ (c) += (n); \ switch (n) { \ case 8: \ *(--(c)) = (unsigned char)(((l2) >> 24L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 7: \ *(--(c)) = (unsigned char)(((l2) >> 16L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 6: \ *(--(c)) = (unsigned char)(((l2) >> 8L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 5: \ *(--(c)) = (unsigned char)(((l2)) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 4: \ *(--(c)) = (unsigned char)(((l1) >> 24L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 3: \ *(--(c)) = (unsigned char)(((l1) >> 16L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 2: \ *(--(c)) = (unsigned char)(((l1) >> 8L) & 0xff); \ + OPENSSL_FALLTHROUGH; \ case 1: \ *(--(c)) = (unsigned char)(((l1)) & 0xff); \ } \ @@ -218,7 +232,7 @@ how to use xors :-) I got it to its final state. #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DES_INTERNAL_H */ +#endif // OPENSSL_HEADER_DES_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/digest/digest.c b/Sources/BoringSSL/crypto/fipsmodule/digest/digest.c similarity index 82% rename from Sources/BoringSSL/crypto/digest/digest.c rename to Sources/BoringSSL/crypto/fipsmodule/digest/digest.c index 9c9962b53..1c35809fe 100644 --- a/Sources/BoringSSL/crypto/digest/digest.c +++ b/Sources/BoringSSL/crypto/fipsmodule/digest/digest.c @@ -63,7 +63,7 @@ #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" int EVP_MD_type(const EVP_MD *md) { return md->type; } @@ -79,7 +79,7 @@ void EVP_MD_CTX_init(EVP_MD_CTX *ctx) { OPENSSL_memset(ctx, 0, sizeof(EVP_MD_CTX)); } -EVP_MD_CTX *EVP_MD_CTX_create(void) { +EVP_MD_CTX *EVP_MD_CTX_new(void) { EVP_MD_CTX *ctx = OPENSSL_malloc(sizeof(EVP_MD_CTX)); if (ctx) { @@ -89,11 +89,10 @@ EVP_MD_CTX *EVP_MD_CTX_create(void) { return ctx; } +EVP_MD_CTX *EVP_MD_CTX_create(void) { return EVP_MD_CTX_new(); } + int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx) { - if (ctx->digest && ctx->digest->ctx_size && ctx->md_data) { - OPENSSL_cleanse(ctx->md_data, ctx->digest->ctx_size); - OPENSSL_free(ctx->md_data); - } + OPENSSL_free(ctx->md_data); assert(ctx->pctx == NULL || ctx->pctx_ops != NULL); if (ctx->pctx_ops) { @@ -105,7 +104,7 @@ int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx) { return 1; } -void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) { +void EVP_MD_CTX_free(EVP_MD_CTX *ctx) { if (!ctx) { return; } @@ -114,18 +113,38 @@ void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) { OPENSSL_free(ctx); } -int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) { - uint8_t *tmp_buf = NULL; +void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) { EVP_MD_CTX_free(ctx); } +int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) { if (in == NULL || in->digest == NULL) { OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_INPUT_NOT_INITIALIZED); return 0; } - if (out->digest == in->digest) { - /* |md_data| will be the correct size in this case so it's removed from - * |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and - * then it's reused. */ + EVP_PKEY_CTX *pctx = NULL; + assert(in->pctx == NULL || in->pctx_ops != NULL); + if (in->pctx) { + pctx = in->pctx_ops->dup(in->pctx); + if (!pctx) { + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; + } + } + + uint8_t *tmp_buf; + if (out->digest != in->digest) { + assert(in->digest->ctx_size != 0); + tmp_buf = OPENSSL_malloc(in->digest->ctx_size); + if (tmp_buf == NULL) { + if (pctx) { + in->pctx_ops->free(pctx); + } + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; + } + } else { + // |md_data| will be the correct size in this case. It's removed from |out| + // so that |EVP_MD_CTX_cleanup| doesn't free it, and then it's reused. tmp_buf = out->md_data; out->md_data = NULL; } @@ -133,28 +152,11 @@ int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) { EVP_MD_CTX_cleanup(out); out->digest = in->digest; - if (in->md_data && in->digest->ctx_size) { - if (tmp_buf) { - out->md_data = tmp_buf; - } else { - out->md_data = OPENSSL_malloc(in->digest->ctx_size); - if (!out->md_data) { - OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); - return 0; - } - } - OPENSSL_memcpy(out->md_data, in->md_data, in->digest->ctx_size); - } - - assert(in->pctx == NULL || in->pctx_ops != NULL); + out->md_data = tmp_buf; + OPENSSL_memcpy(out->md_data, in->md_data, in->digest->ctx_size); + out->pctx = pctx; out->pctx_ops = in->pctx_ops; - if (in->pctx && in->pctx_ops) { - out->pctx = in->pctx_ops->dup(in->pctx); - if (!out->pctx) { - EVP_MD_CTX_cleanup(out); - return 0; - } - } + assert(out->pctx == NULL || out->pctx_ops != NULL); return 1; } @@ -164,20 +166,23 @@ int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in) { return EVP_MD_CTX_copy_ex(out, in); } +void EVP_MD_CTX_reset(EVP_MD_CTX *ctx) { + EVP_MD_CTX_cleanup(ctx); + EVP_MD_CTX_init(ctx); +} + int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *engine) { if (ctx->digest != type) { - if (ctx->digest && ctx->digest->ctx_size > 0) { - OPENSSL_free(ctx->md_data); - ctx->md_data = NULL; + assert(type->ctx_size != 0); + uint8_t *md_data = OPENSSL_malloc(type->ctx_size); + if (md_data == NULL) { + OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); + return 0; } + + OPENSSL_free(ctx->md_data); + ctx->md_data = md_data; ctx->digest = type; - if (type->ctx_size > 0) { - ctx->md_data = OPENSSL_malloc(type->ctx_size); - if (ctx->md_data == NULL) { - OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE); - return 0; - } - } } assert(ctx->pctx == NULL || ctx->pctx_ops != NULL); diff --git a/Sources/BoringSSL/crypto/digest/digests.c b/Sources/BoringSSL/crypto/fipsmodule/digest/digests.c similarity index 52% rename from Sources/BoringSSL/crypto/digest/digests.c rename to Sources/BoringSSL/crypto/fipsmodule/digest/digests.c index fd2a939ab..f2fa349c2 100644 --- a/Sources/BoringSSL/crypto/digest/digests.c +++ b/Sources/BoringSSL/crypto/fipsmodule/digest/digests.c @@ -59,14 +59,14 @@ #include #include -#include #include #include #include #include #include "internal.h" -#include "../internal.h" +#include "../delocate.h" +#include "../../internal.h" #if defined(NDEBUG) #define CHECK(x) (void) (x) @@ -87,12 +87,16 @@ static void md4_final(EVP_MD_CTX *ctx, uint8_t *out) { CHECK(MD4_Final(out, ctx->md_data)); } -static const EVP_MD md4_md = { - NID_md4, MD4_DIGEST_LENGTH, 0 /* flags */, md4_init, - md4_update, md4_final, 64 /* block size */, sizeof(MD4_CTX), -}; - -const EVP_MD *EVP_md4(void) { return &md4_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) { + out->type = NID_md4; + out->md_size = MD4_DIGEST_LENGTH; + out->flags = 0; + out->init = md4_init; + out->update = md4_update; + out->final = md4_final; + out->block_size = 64; + out->ctx_size = sizeof(MD4_CTX); +} static void md5_init(EVP_MD_CTX *ctx) { @@ -107,12 +111,16 @@ static void md5_final(EVP_MD_CTX *ctx, uint8_t *out) { CHECK(MD5_Final(out, ctx->md_data)); } -static const EVP_MD md5_md = { - NID_md5, MD5_DIGEST_LENGTH, 0 /* flags */, md5_init, - md5_update, md5_final, 64 /* block size */, sizeof(MD5_CTX), -}; - -const EVP_MD *EVP_md5(void) { return &md5_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md5) { + out->type = NID_md5; + out->md_size = MD5_DIGEST_LENGTH; + out->flags = 0; + out->init = md5_init; + out->update = md5_update; + out->final = md5_final; + out->block_size = 64; + out->ctx_size = sizeof(MD5_CTX); +} static void sha1_init(EVP_MD_CTX *ctx) { @@ -127,12 +135,16 @@ static void sha1_final(EVP_MD_CTX *ctx, uint8_t *md) { CHECK(SHA1_Final(md, ctx->md_data)); } -static const EVP_MD sha1_md = { - NID_sha1, SHA_DIGEST_LENGTH, 0 /* flags */, sha1_init, - sha1_update, sha1_final, 64 /* block size */, sizeof(SHA_CTX), -}; - -const EVP_MD *EVP_sha1(void) { return &sha1_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha1) { + out->type = NID_sha1; + out->md_size = SHA_DIGEST_LENGTH; + out->flags = 0; + out->init = sha1_init; + out->update = sha1_update; + out->final = sha1_final; + out->block_size = 64; + out->ctx_size = sizeof(SHA_CTX); +} static void sha224_init(EVP_MD_CTX *ctx) { @@ -147,13 +159,16 @@ static void sha224_final(EVP_MD_CTX *ctx, uint8_t *md) { CHECK(SHA224_Final(md, ctx->md_data)); } -static const EVP_MD sha224_md = { - NID_sha224, SHA224_DIGEST_LENGTH, 0 /* flags */, - sha224_init, sha224_update, sha224_final, - 64 /* block size */, sizeof(SHA256_CTX), -}; - -const EVP_MD *EVP_sha224(void) { return &sha224_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha224) { + out->type = NID_sha224; + out->md_size = SHA224_DIGEST_LENGTH; + out->flags = 0; + out->init = sha224_init; + out->update = sha224_update; + out->final = sha224_final; + out->block_size = 64; + out->ctx_size = sizeof(SHA256_CTX); +} static void sha256_init(EVP_MD_CTX *ctx) { @@ -168,13 +183,16 @@ static void sha256_final(EVP_MD_CTX *ctx, uint8_t *md) { CHECK(SHA256_Final(md, ctx->md_data)); } -static const EVP_MD sha256_md = { - NID_sha256, SHA256_DIGEST_LENGTH, 0 /* flags */, - sha256_init, sha256_update, sha256_final, - 64 /* block size */, sizeof(SHA256_CTX), -}; - -const EVP_MD *EVP_sha256(void) { return &sha256_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha256) { + out->type = NID_sha256; + out->md_size = SHA256_DIGEST_LENGTH; + out->flags = 0; + out->init = sha256_init; + out->update = sha256_update; + out->final = sha256_final; + out->block_size = 64; + out->ctx_size = sizeof(SHA256_CTX); +} static void sha384_init(EVP_MD_CTX *ctx) { @@ -189,13 +207,16 @@ static void sha384_final(EVP_MD_CTX *ctx, uint8_t *md) { CHECK(SHA384_Final(md, ctx->md_data)); } -static const EVP_MD sha384_md = { - NID_sha384, SHA384_DIGEST_LENGTH, 0 /* flags */, - sha384_init, sha384_update, sha384_final, - 128 /* block size */, sizeof(SHA512_CTX), -}; - -const EVP_MD *EVP_sha384(void) { return &sha384_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha384) { + out->type = NID_sha384; + out->md_size = SHA384_DIGEST_LENGTH; + out->flags = 0; + out->init = sha384_init; + out->update = sha384_update; + out->final = sha384_final; + out->block_size = 128; + out->ctx_size = sizeof(SHA512_CTX); +} static void sha512_init(EVP_MD_CTX *ctx) { @@ -210,13 +231,16 @@ static void sha512_final(EVP_MD_CTX *ctx, uint8_t *md) { CHECK(SHA512_Final(md, ctx->md_data)); } -static const EVP_MD sha512_md = { - NID_sha512, SHA512_DIGEST_LENGTH, 0 /* flags */, - sha512_init, sha512_update, sha512_final, - 128 /* block size */, sizeof(SHA512_CTX), -}; - -const EVP_MD *EVP_sha512(void) { return &sha512_md; } +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha512) { + out->type = NID_sha512; + out->md_size = SHA512_DIGEST_LENGTH; + out->flags = 0; + out->init = sha512_init; + out->update = sha512_update; + out->final = sha512_final; + out->block_size = 128; + out->ctx_size = sizeof(SHA512_CTX); +} typedef struct { @@ -242,117 +266,15 @@ static void md5_sha1_final(EVP_MD_CTX *md_ctx, uint8_t *out) { SHA1_Final(out + MD5_DIGEST_LENGTH, &ctx->sha1)); } -static const EVP_MD md5_sha1_md = { - NID_md5_sha1, - MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH, - 0 /* flags */, - md5_sha1_init, - md5_sha1_update, - md5_sha1_final, - 64 /* block size */, - sizeof(MD5_SHA1_CTX), -}; - -const EVP_MD *EVP_md5_sha1(void) { return &md5_sha1_md; } - - -struct nid_to_digest { - int nid; - const EVP_MD* (*md_func)(void); - const char *short_name; - const char *long_name; -}; - -static const struct nid_to_digest nid_to_digest_mapping[] = { - {NID_md4, EVP_md4, SN_md4, LN_md4}, - {NID_md5, EVP_md5, SN_md5, LN_md5}, - {NID_sha1, EVP_sha1, SN_sha1, LN_sha1}, - {NID_sha224, EVP_sha224, SN_sha224, LN_sha224}, - {NID_sha256, EVP_sha256, SN_sha256, LN_sha256}, - {NID_sha384, EVP_sha384, SN_sha384, LN_sha384}, - {NID_sha512, EVP_sha512, SN_sha512, LN_sha512}, - {NID_md5_sha1, EVP_md5_sha1, SN_md5_sha1, LN_md5_sha1}, - /* As a remnant of signing |EVP_MD|s, OpenSSL returned the corresponding - * hash function when given a signature OID. To avoid unintended lax parsing - * of hash OIDs, this is no longer supported for lookup by OID or NID. - * Node.js, however, exposes |EVP_get_digestbyname|'s full behavior to - * consumers so we retain it there. */ - {NID_undef, EVP_sha1, SN_dsaWithSHA, LN_dsaWithSHA}, - {NID_undef, EVP_sha1, SN_dsaWithSHA1, LN_dsaWithSHA1}, - {NID_undef, EVP_sha1, SN_ecdsa_with_SHA1, NULL}, - {NID_undef, EVP_md5, SN_md5WithRSAEncryption, LN_md5WithRSAEncryption}, - {NID_undef, EVP_sha1, SN_sha1WithRSAEncryption, LN_sha1WithRSAEncryption}, - {NID_undef, EVP_sha224, SN_sha224WithRSAEncryption, - LN_sha224WithRSAEncryption}, - {NID_undef, EVP_sha256, SN_sha256WithRSAEncryption, - LN_sha256WithRSAEncryption}, - {NID_undef, EVP_sha384, SN_sha384WithRSAEncryption, - LN_sha384WithRSAEncryption}, - {NID_undef, EVP_sha512, SN_sha512WithRSAEncryption, - LN_sha512WithRSAEncryption}, -}; - -const EVP_MD* EVP_get_digestbynid(int nid) { - if (nid == NID_undef) { - /* Skip the |NID_undef| entries in |nid_to_digest_mapping|. */ - return NULL; - } - - for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { - if (nid_to_digest_mapping[i].nid == nid) { - return nid_to_digest_mapping[i].md_func(); - } - } - - return NULL; -} - -static const struct { - uint8_t oid[9]; - uint8_t oid_len; - const EVP_MD *(*md_func) (void); -} kMDOIDs[] = { - /* 1.2.840.113549.2.4 */ - { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x04}, 8, EVP_md4 }, - /* 1.2.840.113549.2.5 */ - { {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05}, 8, EVP_md5 }, - /* 1.3.14.3.2.26 */ - { {0x2b, 0x0e, 0x03, 0x02, 0x1a}, 5, EVP_sha1 }, - /* 2.16.840.1.101.3.4.2.1 */ - { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01}, 9, EVP_sha256 }, - /* 2.16.840.1.101.3.4.2.2 */ - { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02}, 9, EVP_sha384 }, - /* 2.16.840.1.101.3.4.2.3 */ - { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03}, 9, EVP_sha512 }, - /* 2.16.840.1.101.3.4.2.4 */ - { {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04}, 9, EVP_sha224 }, -}; - -const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) { - /* Handle objects with no corresponding OID. */ - if (obj->nid != NID_undef) { - return EVP_get_digestbynid(obj->nid); - } - - for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMDOIDs); i++) { - if (obj->length == kMDOIDs[i].oid_len && - memcmp(obj->data, kMDOIDs[i].oid, obj->length) == 0) { - return kMDOIDs[i].md_func(); - } - } - - return NULL; -} - -const EVP_MD *EVP_get_digestbyname(const char *name) { - for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(nid_to_digest_mapping); i++) { - const char *short_name = nid_to_digest_mapping[i].short_name; - const char *long_name = nid_to_digest_mapping[i].long_name; - if ((short_name && strcmp(short_name, name) == 0) || - (long_name && strcmp(long_name, name) == 0)) { - return nid_to_digest_mapping[i].md_func(); - } - } - - return NULL; +DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md5_sha1) { + out->type = NID_md5_sha1; + out->md_size = MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH; + out->flags = 0; + out->init = md5_sha1_init; + out->update = md5_sha1_update; + out->final = md5_sha1_final; + out->block_size = 64; + out->ctx_size = sizeof(MD5_SHA1_CTX); } + +#undef CHECK diff --git a/Sources/BoringSSL/crypto/digest/internal.h b/Sources/BoringSSL/crypto/fipsmodule/digest/internal.h similarity index 80% rename from Sources/BoringSSL/crypto/digest/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/digest/internal.h index e3d812ad8..2d06ed07b 100644 --- a/Sources/BoringSSL/crypto/digest/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/digest/internal.h @@ -65,48 +65,48 @@ extern "C" { struct env_md_st { - /* type contains a NID identifing the digest function. (For example, - * NID_md5.) */ + // type contains a NID identifing the digest function. (For example, + // NID_md5.) int type; - /* md_size contains the size, in bytes, of the resulting digest. */ + // md_size contains the size, in bytes, of the resulting digest. unsigned md_size; - /* flags contains the OR of |EVP_MD_FLAG_*| values. */ + // flags contains the OR of |EVP_MD_FLAG_*| values. uint32_t flags; - /* init initialises the state in |ctx->md_data|. */ + // init initialises the state in |ctx->md_data|. void (*init)(EVP_MD_CTX *ctx); - /* update hashes |len| bytes of |data| into the state in |ctx->md_data|. */ + // update hashes |len| bytes of |data| into the state in |ctx->md_data|. void (*update)(EVP_MD_CTX *ctx, const void *data, size_t count); - /* final completes the hash and writes |md_size| bytes of digest to |out|. */ + // final completes the hash and writes |md_size| bytes of digest to |out|. void (*final)(EVP_MD_CTX *ctx, uint8_t *out); - /* block_size contains the hash's native block size. */ + // block_size contains the hash's native block size. unsigned block_size; - /* ctx_size contains the size, in bytes, of the state of the hash function. */ + // ctx_size contains the size, in bytes, of the state of the hash function. unsigned ctx_size; }; -/* evp_md_pctx_ops contains function pointers to allow the |pctx| member of - * |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP - * functions. */ +// evp_md_pctx_ops contains function pointers to allow the |pctx| member of +// |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP +// functions. struct evp_md_pctx_ops { - /* free is called when an |EVP_MD_CTX| is being freed and the |pctx| also - * needs to be freed. */ + // free is called when an |EVP_MD_CTX| is being freed and the |pctx| also + // needs to be freed. void (*free) (EVP_PKEY_CTX *pctx); - /* dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs - * to be copied. */ + // dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs + // to be copied. EVP_PKEY_CTX* (*dup) (EVP_PKEY_CTX *pctx); }; #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DIGEST_INTERNAL */ +#endif // OPENSSL_HEADER_DIGEST_INTERNAL diff --git a/Sources/BoringSSL/crypto/digest/md32_common.h b/Sources/BoringSSL/crypto/fipsmodule/digest/md32_common.h similarity index 73% rename from Sources/BoringSSL/crypto/digest/md32_common.h rename to Sources/BoringSSL/crypto/fipsmodule/digest/md32_common.h index 45fe93951..a0c3665d5 100644 --- a/Sources/BoringSSL/crypto/digest/md32_common.h +++ b/Sources/BoringSSL/crypto/fipsmodule/digest/md32_common.h @@ -46,70 +46,66 @@ * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== */ -#ifndef OPENSSL_HEADER_MD32_COMMON_H -#define OPENSSL_HEADER_MD32_COMMON_H - #include #include -#include "../internal.h" +#include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif -/* This is a generic 32-bit "collector" for message digest algorithms. It - * collects input character stream into chunks of 32-bit values and invokes the - * block function that performs the actual hash calculations. To make use of - * this mechanism, the following macros must be defined before including - * md32_common.h. - * - * One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be - * defined to specify the byte order of the input stream. - * - * |HASH_CBLOCK| must be defined as the integer block size, in bytes. - * - * |HASH_CTX| must be defined as the name of the context structure, which must - * have at least the following members: - * - * typedef struct _state_st { - * uint32_t h[ / sizeof(uint32_t)]; - * uint32_t Nl, Nh; - * uint8_t data[HASH_CBLOCK]; - * unsigned num; - * ... - * } _CTX; - * - * is the output length of the hash in bytes, before - * any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and - * SHA-512). - * - * |HASH_UPDATE| must be defined as the name of the "Update" function to - * generate. - * - * |HASH_TRANSFORM| must be defined as the the name of the "Transform" - * function to generate. - * - * |HASH_FINAL| must be defined as the name of "Final" function to generate. - * - * |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function. - * That function must be implemented manually. It must be capable of operating - * on *unaligned* input data in its original (data) byte order. It must have - * this signature: - * - * void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data, - * size_t num); - * - * It must update the hash state |state| with |num| blocks of data from |data|, - * where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of - * |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|, - * and so will have | / sizeof(uint32_t)| elements. - * - * |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts - * the hash state |c->h| into the output byte order, storing the result in |s|. - */ +// This is a generic 32-bit "collector" for message digest algorithms. It +// collects input character stream into chunks of 32-bit values and invokes the +// block function that performs the actual hash calculations. To make use of +// this mechanism, the following macros must be defined before including +// md32_common.h. +// +// One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be +// defined to specify the byte order of the input stream. +// +// |HASH_CBLOCK| must be defined as the integer block size, in bytes. +// +// |HASH_CTX| must be defined as the name of the context structure, which must +// have at least the following members: +// +// typedef struct _state_st { +// uint32_t h[ / sizeof(uint32_t)]; +// uint32_t Nl, Nh; +// uint8_t data[HASH_CBLOCK]; +// unsigned num; +// ... +// } _CTX; +// +// is the output length of the hash in bytes, before +// any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and +// SHA-512). +// +// |HASH_UPDATE| must be defined as the name of the "Update" function to +// generate. +// +// |HASH_TRANSFORM| must be defined as the the name of the "Transform" +// function to generate. +// +// |HASH_FINAL| must be defined as the name of "Final" function to generate. +// +// |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function. +// That function must be implemented manually. It must be capable of operating +// on *unaligned* input data in its original (data) byte order. It must have +// this signature: +// +// void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data, +// size_t num); +// +// It must update the hash state |state| with |num| blocks of data from |data|, +// where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of +// |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|, +// and so will have | / sizeof(uint32_t)| elements. +// +// |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts +// the hash state |c->h| into the output byte order, storing the result in |s|. #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN) #error "DATA_ORDER must be defined!" @@ -176,7 +172,7 @@ extern "C" { *((c)++) = (uint8_t)(((l) >> 24) & 0xff); \ } while (0) -#endif /* DATA_ORDER */ +#endif // DATA_ORDER int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { const uint8_t *data = data_; @@ -187,7 +183,7 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { uint32_t l = c->Nl + (((uint32_t)len) << 3); if (l < c->Nl) { - /* Handle carries. */ + // Handle carries. c->Nh++; } c->Nh += (uint32_t)(len >> 29); @@ -202,7 +198,7 @@ int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) { data += n; len -= n; c->num = 0; - /* Keep |c->data| zeroed when unused. */ + // Keep |c->data| zeroed when unused. OPENSSL_memset(c->data, 0, HASH_CBLOCK); } else { OPENSSL_memcpy(c->data + n, data, len); @@ -233,14 +229,14 @@ void HASH_TRANSFORM(HASH_CTX *c, const uint8_t *data) { int HASH_FINAL(uint8_t *md, HASH_CTX *c) { - /* |c->data| always has room for at least one byte. A full block would have - * been consumed. */ + // |c->data| always has room for at least one byte. A full block would have + // been consumed. size_t n = c->num; assert(n < HASH_CBLOCK); c->data[n] = 0x80; n++; - /* Fill the block with zeros if there isn't room for a 64-bit length. */ + // Fill the block with zeros if there isn't room for a 64-bit length. if (n > (HASH_CBLOCK - 8)) { OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - n); n = 0; @@ -248,7 +244,7 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { } OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - 8 - n); - /* Append a 64-bit length to the block and process it. */ + // Append a 64-bit length to the block and process it. uint8_t *p = c->data + HASH_CBLOCK - 8; #if defined(DATA_ORDER_IS_BIG_ENDIAN) HOST_l2c(c->Nh, p); @@ -268,7 +264,5 @@ int HASH_FINAL(uint8_t *md, HASH_CTX *c) { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif - -#endif /* OPENSSL_HEADER_MD32_COMMON_H */ diff --git a/Sources/BoringSSL/crypto/fipsmodule/ec/ec.c b/Sources/BoringSSL/crypto/fipsmodule/ec/ec.c new file mode 100644 index 000000000..977cd265a --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/ec.c @@ -0,0 +1,943 @@ +/* Originally written by Bodo Moeller for the OpenSSL project. + * ==================================================================== + * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * + * Portions of the attached software ("Contribution") are developed by + * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. + * + * The Contribution is licensed pursuant to the OpenSSL open source + * license provided above. + * + * The elliptic curve binary polynomial software is originally written by + * Sheueling Chang Shantz and Douglas Stebila of Sun Microsystems + * Laboratories. */ + +#include + +#include +#include + +#include +#include +#include +#include + +#include "internal.h" +#include "../../internal.h" +#include "../bn/internal.h" +#include "../delocate.h" + + +static void ec_point_free(EC_POINT *point, int free_group); + +static const uint8_t kP224Params[6 * 28] = { + // p + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + // a + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFE, + // b + 0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, 0x32, 0x56, + 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA, 0x27, 0x0B, 0x39, 0x43, + 0x23, 0x55, 0xFF, 0xB4, + // x + 0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, 0x90, 0xB9, + 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22, 0x34, 0x32, 0x80, 0xD6, + 0x11, 0x5C, 0x1D, 0x21, + // y + 0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, 0xdf, 0xe6, + 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64, 0x44, 0xd5, 0x81, 0x99, + 0x85, 0x00, 0x7e, 0x34, + // order + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x16, 0xA2, 0xE0, 0xB8, 0xF0, 0x3E, 0x13, 0xDD, 0x29, 0x45, + 0x5C, 0x5C, 0x2A, 0x3D, +}; + +static const uint8_t kP256Params[6 * 32] = { + // p + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + // a + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, + // b + 0x5A, 0xC6, 0x35, 0xD8, 0xAA, 0x3A, 0x93, 0xE7, 0xB3, 0xEB, 0xBD, 0x55, + 0x76, 0x98, 0x86, 0xBC, 0x65, 0x1D, 0x06, 0xB0, 0xCC, 0x53, 0xB0, 0xF6, + 0x3B, 0xCE, 0x3C, 0x3E, 0x27, 0xD2, 0x60, 0x4B, + // x + 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8, 0xBC, 0xE6, 0xE5, + 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D, 0x81, 0x2D, 0xEB, 0x33, 0xA0, + 0xF4, 0xA1, 0x39, 0x45, 0xD8, 0x98, 0xC2, 0x96, + // y + 0x4f, 0xe3, 0x42, 0xe2, 0xfe, 0x1a, 0x7f, 0x9b, 0x8e, 0xe7, 0xeb, 0x4a, + 0x7c, 0x0f, 0x9e, 0x16, 0x2b, 0xce, 0x33, 0x57, 0x6b, 0x31, 0x5e, 0xce, + 0xcb, 0xb6, 0x40, 0x68, 0x37, 0xbf, 0x51, 0xf5, + // order + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84, + 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x51, +}; + +static const uint8_t kP384Params[6 * 48] = { + // p + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + // a + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFC, + // b + 0xB3, 0x31, 0x2F, 0xA7, 0xE2, 0x3E, 0xE7, 0xE4, 0x98, 0x8E, 0x05, 0x6B, + 0xE3, 0xF8, 0x2D, 0x19, 0x18, 0x1D, 0x9C, 0x6E, 0xFE, 0x81, 0x41, 0x12, + 0x03, 0x14, 0x08, 0x8F, 0x50, 0x13, 0x87, 0x5A, 0xC6, 0x56, 0x39, 0x8D, + 0x8A, 0x2E, 0xD1, 0x9D, 0x2A, 0x85, 0xC8, 0xED, 0xD3, 0xEC, 0x2A, 0xEF, + // x + 0xAA, 0x87, 0xCA, 0x22, 0xBE, 0x8B, 0x05, 0x37, 0x8E, 0xB1, 0xC7, 0x1E, + 0xF3, 0x20, 0xAD, 0x74, 0x6E, 0x1D, 0x3B, 0x62, 0x8B, 0xA7, 0x9B, 0x98, + 0x59, 0xF7, 0x41, 0xE0, 0x82, 0x54, 0x2A, 0x38, 0x55, 0x02, 0xF2, 0x5D, + 0xBF, 0x55, 0x29, 0x6C, 0x3A, 0x54, 0x5E, 0x38, 0x72, 0x76, 0x0A, 0xB7, + // y + 0x36, 0x17, 0xde, 0x4a, 0x96, 0x26, 0x2c, 0x6f, 0x5d, 0x9e, 0x98, 0xbf, + 0x92, 0x92, 0xdc, 0x29, 0xf8, 0xf4, 0x1d, 0xbd, 0x28, 0x9a, 0x14, 0x7c, + 0xe9, 0xda, 0x31, 0x13, 0xb5, 0xf0, 0xb8, 0xc0, 0x0a, 0x60, 0xb1, 0xce, + 0x1d, 0x7e, 0x81, 0x9d, 0x7a, 0x43, 0x1d, 0x7c, 0x90, 0xea, 0x0e, 0x5f, + // order + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xC7, 0x63, 0x4D, 0x81, 0xF4, 0x37, 0x2D, 0xDF, 0x58, 0x1A, 0x0D, 0xB2, + 0x48, 0xB0, 0xA7, 0x7A, 0xEC, 0xEC, 0x19, 0x6A, 0xCC, 0xC5, 0x29, 0x73, +}; + +static const uint8_t kP521Params[6 * 66] = { + // p + 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + // a + 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, + // b + 0x00, 0x51, 0x95, 0x3E, 0xB9, 0x61, 0x8E, 0x1C, 0x9A, 0x1F, 0x92, 0x9A, + 0x21, 0xA0, 0xB6, 0x85, 0x40, 0xEE, 0xA2, 0xDA, 0x72, 0x5B, 0x99, 0xB3, + 0x15, 0xF3, 0xB8, 0xB4, 0x89, 0x91, 0x8E, 0xF1, 0x09, 0xE1, 0x56, 0x19, + 0x39, 0x51, 0xEC, 0x7E, 0x93, 0x7B, 0x16, 0x52, 0xC0, 0xBD, 0x3B, 0xB1, + 0xBF, 0x07, 0x35, 0x73, 0xDF, 0x88, 0x3D, 0x2C, 0x34, 0xF1, 0xEF, 0x45, + 0x1F, 0xD4, 0x6B, 0x50, 0x3F, 0x00, + // x + 0x00, 0xC6, 0x85, 0x8E, 0x06, 0xB7, 0x04, 0x04, 0xE9, 0xCD, 0x9E, 0x3E, + 0xCB, 0x66, 0x23, 0x95, 0xB4, 0x42, 0x9C, 0x64, 0x81, 0x39, 0x05, 0x3F, + 0xB5, 0x21, 0xF8, 0x28, 0xAF, 0x60, 0x6B, 0x4D, 0x3D, 0xBA, 0xA1, 0x4B, + 0x5E, 0x77, 0xEF, 0xE7, 0x59, 0x28, 0xFE, 0x1D, 0xC1, 0x27, 0xA2, 0xFF, + 0xA8, 0xDE, 0x33, 0x48, 0xB3, 0xC1, 0x85, 0x6A, 0x42, 0x9B, 0xF9, 0x7E, + 0x7E, 0x31, 0xC2, 0xE5, 0xBD, 0x66, + // y + 0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, 0xc0, 0x04, 0x5c, 0x8a, + 0x5f, 0xb4, 0x2c, 0x7d, 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b, + 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e, 0x66, 0x2c, 0x97, 0xee, + 0x72, 0x99, 0x5e, 0xf4, 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad, + 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72, 0xc2, 0x40, 0x88, 0xbe, + 0x94, 0x76, 0x9f, 0xd1, 0x66, 0x50, + // order + 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFA, 0x51, 0x86, + 0x87, 0x83, 0xBF, 0x2F, 0x96, 0x6B, 0x7F, 0xCC, 0x01, 0x48, 0xF7, 0x09, + 0xA5, 0xD0, 0x3B, 0xB5, 0xC9, 0xB8, 0x89, 0x9C, 0x47, 0xAE, 0xBB, 0x6F, + 0xB7, 0x1E, 0x91, 0x38, 0x64, 0x09, +}; + +// MSan appears to have a bug that causes code to be miscompiled in opt mode. +// While that is being looked at, don't run the uint128_t code under MSan. +#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \ + !defined(MEMORY_SANITIZER) +#define BORINGSSL_USE_INT128_CODE +#endif + +DEFINE_METHOD_FUNCTION(struct built_in_curves, OPENSSL_built_in_curves) { + // 1.3.132.0.35 + static const uint8_t kOIDP521[] = {0x2b, 0x81, 0x04, 0x00, 0x23}; + out->curves[0].nid = NID_secp521r1; + out->curves[0].oid = kOIDP521; + out->curves[0].oid_len = sizeof(kOIDP521); + out->curves[0].comment = "NIST P-521"; + out->curves[0].param_len = 66; + out->curves[0].params = kP521Params; + out->curves[0].method = EC_GFp_mont_method(); + + // 1.3.132.0.34 + static const uint8_t kOIDP384[] = {0x2b, 0x81, 0x04, 0x00, 0x22}; + out->curves[1].nid = NID_secp384r1; + out->curves[1].oid = kOIDP384; + out->curves[1].oid_len = sizeof(kOIDP384); + out->curves[1].comment = "NIST P-384"; + out->curves[1].param_len = 48; + out->curves[1].params = kP384Params; + out->curves[1].method = EC_GFp_mont_method(); + + // 1.2.840.10045.3.1.7 + static const uint8_t kOIDP256[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x03, 0x01, 0x07}; + out->curves[2].nid = NID_X9_62_prime256v1; + out->curves[2].oid = kOIDP256; + out->curves[2].oid_len = sizeof(kOIDP256); + out->curves[2].comment = "NIST P-256"; + out->curves[2].param_len = 32; + out->curves[2].params = kP256Params; + out->curves[2].method = +#if defined(BORINGSSL_USE_INT128_CODE) +#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ + !defined(OPENSSL_SMALL) + EC_GFp_nistz256_method(); +#else + EC_GFp_nistp256_method(); +#endif +#else + EC_GFp_mont_method(); +#endif + + // 1.3.132.0.33 + static const uint8_t kOIDP224[] = {0x2b, 0x81, 0x04, 0x00, 0x21}; + out->curves[3].nid = NID_secp224r1; + out->curves[3].oid = kOIDP224; + out->curves[3].oid_len = sizeof(kOIDP224); + out->curves[3].comment = "NIST P-224"; + out->curves[3].param_len = 28; + out->curves[3].params = kP224Params; + out->curves[3].method = +#if defined(BORINGSSL_USE_INT128_CODE) && !defined(OPENSSL_SMALL) + EC_GFp_nistp224_method(); +#else + EC_GFp_mont_method(); +#endif +} + +EC_GROUP *ec_group_new(const EC_METHOD *meth) { + EC_GROUP *ret; + + if (meth == NULL) { + OPENSSL_PUT_ERROR(EC, EC_R_SLOT_FULL); + return NULL; + } + + if (meth->group_init == 0) { + OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return NULL; + } + + ret = OPENSSL_malloc(sizeof(EC_GROUP)); + if (ret == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); + return NULL; + } + OPENSSL_memset(ret, 0, sizeof(EC_GROUP)); + + ret->references = 1; + ret->meth = meth; + BN_init(&ret->order); + + if (!meth->group_init(ret)) { + OPENSSL_free(ret); + return NULL; + } + + return ret; +} + +static void ec_group_set0_generator(EC_GROUP *group, EC_POINT *generator) { + assert(group->generator == NULL); + assert(group == generator->group); + + // Avoid a reference cycle. |group->generator| does not maintain an owning + // pointer to |group|. + group->generator = generator; + int is_zero = CRYPTO_refcount_dec_and_test_zero(&group->references); + + assert(!is_zero); + (void)is_zero; +} + +EC_GROUP *EC_GROUP_new_curve_GFp(const BIGNUM *p, const BIGNUM *a, + const BIGNUM *b, BN_CTX *ctx) { + if (BN_num_bytes(p) > EC_MAX_SCALAR_BYTES) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); + return NULL; + } + + EC_GROUP *ret = ec_group_new(EC_GFp_mont_method()); + if (ret == NULL) { + return NULL; + } + + if (ret->meth->group_set_curve == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + EC_GROUP_free(ret); + return NULL; + } + if (!ret->meth->group_set_curve(ret, p, a, b, ctx)) { + EC_GROUP_free(ret); + return NULL; + } + return ret; +} + +int EC_GROUP_set_generator(EC_GROUP *group, const EC_POINT *generator, + const BIGNUM *order, const BIGNUM *cofactor) { + if (group->curve_name != NID_undef || group->generator != NULL || + generator->group != group) { + // |EC_GROUP_set_generator| may only be used with |EC_GROUP|s returned by + // |EC_GROUP_new_curve_GFp| and may only used once on each group. + // Additionally, |generator| must been created from + // |EC_GROUP_new_curve_GFp|, not a copy, so that + // |generator->group->generator| is set correctly. + OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + + if (BN_num_bytes(order) > EC_MAX_SCALAR_BYTES) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); + return 0; + } + + // Require a cofactor of one for custom curves, which implies prime order. + if (!BN_is_one(cofactor)) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_COFACTOR); + return 0; + } + + // Require that p < 2×order. This simplifies some ECDSA operations. + // + // Note any curve which did not satisfy this must have been invalid or use a + // tiny prime (less than 17). See the proof in |field_element_to_scalar| in + // the ECDSA implementation. + BIGNUM *tmp = BN_new(); + if (tmp == NULL || + !BN_lshift1(tmp, order)) { + BN_free(tmp); + return 0; + } + int ok = BN_cmp(tmp, &group->field) > 0; + BN_free(tmp); + if (!ok) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); + return 0; + } + + EC_POINT *copy = EC_POINT_new(group); + if (copy == NULL || + !EC_POINT_copy(copy, generator) || + !BN_copy(&group->order, order)) { + EC_POINT_free(copy); + return 0; + } + + BN_MONT_CTX_free(group->order_mont); + group->order_mont = BN_MONT_CTX_new(); + if (group->order_mont == NULL || + !BN_MONT_CTX_set(group->order_mont, &group->order, NULL)) { + return 0; + } + + ec_group_set0_generator(group, copy); + return 1; +} + +static EC_GROUP *ec_group_new_from_data(const struct built_in_curve *curve) { + EC_GROUP *group = NULL; + EC_POINT *P = NULL; + BIGNUM *p = NULL, *a = NULL, *b = NULL, *x = NULL, *y = NULL; + int ok = 0; + + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); + goto err; + } + + const unsigned param_len = curve->param_len; + const uint8_t *params = curve->params; + + if (!(p = BN_bin2bn(params + 0 * param_len, param_len, NULL)) || + !(a = BN_bin2bn(params + 1 * param_len, param_len, NULL)) || + !(b = BN_bin2bn(params + 2 * param_len, param_len, NULL))) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + goto err; + } + + group = ec_group_new(curve->method); + if (group == NULL || + !group->meth->group_set_curve(group, p, a, b, ctx)) { + OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); + goto err; + } + + if ((P = EC_POINT_new(group)) == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); + goto err; + } + + if (!(x = BN_bin2bn(params + 3 * param_len, param_len, NULL)) || + !(y = BN_bin2bn(params + 4 * param_len, param_len, NULL))) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + goto err; + } + + if (!EC_POINT_set_affine_coordinates_GFp(group, P, x, y, ctx)) { + OPENSSL_PUT_ERROR(EC, ERR_R_EC_LIB); + goto err; + } + if (!BN_bin2bn(params + 5 * param_len, param_len, &group->order)) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + goto err; + } + + group->order_mont = BN_MONT_CTX_new(); + if (group->order_mont == NULL || + !BN_MONT_CTX_set(group->order_mont, &group->order, ctx)) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + goto err; + } + + ec_group_set0_generator(group, P); + P = NULL; + ok = 1; + +err: + if (!ok) { + EC_GROUP_free(group); + group = NULL; + } + EC_POINT_free(P); + BN_CTX_free(ctx); + BN_free(p); + BN_free(a); + BN_free(b); + BN_free(x); + BN_free(y); + return group; +} + +// Built-in groups are allocated lazily and static once allocated. +// TODO(davidben): Make these actually static. https://crbug.com/boringssl/20. +struct built_in_groups_st { + EC_GROUP *groups[OPENSSL_NUM_BUILT_IN_CURVES]; +}; +DEFINE_BSS_GET(struct built_in_groups_st, built_in_groups); +DEFINE_STATIC_MUTEX(built_in_groups_lock); + +EC_GROUP *EC_GROUP_new_by_curve_name(int nid) { + struct built_in_groups_st *groups = built_in_groups_bss_get(); + EC_GROUP **group_ptr = NULL; + const struct built_in_curves *const curves = OPENSSL_built_in_curves(); + const struct built_in_curve *curve = NULL; + for (size_t i = 0; i < OPENSSL_NUM_BUILT_IN_CURVES; i++) { + if (curves->curves[i].nid == nid) { + curve = &curves->curves[i]; + group_ptr = &groups->groups[i]; + break; + } + } + + if (curve == NULL) { + OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP); + return NULL; + } + + CRYPTO_STATIC_MUTEX_lock_read(built_in_groups_lock_bss_get()); + EC_GROUP *ret = *group_ptr; + CRYPTO_STATIC_MUTEX_unlock_read(built_in_groups_lock_bss_get()); + if (ret != NULL) { + return ret; + } + + ret = ec_group_new_from_data(curve); + if (ret == NULL) { + return NULL; + } + + EC_GROUP *to_free = NULL; + CRYPTO_STATIC_MUTEX_lock_write(built_in_groups_lock_bss_get()); + if (*group_ptr == NULL) { + *group_ptr = ret; + // Filling in |ret->curve_name| makes |EC_GROUP_free| and |EC_GROUP_dup| + // into no-ops. At this point, |ret| is considered static. + ret->curve_name = nid; + } else { + to_free = ret; + ret = *group_ptr; + } + CRYPTO_STATIC_MUTEX_unlock_write(built_in_groups_lock_bss_get()); + + EC_GROUP_free(to_free); + return ret; +} + +void EC_GROUP_free(EC_GROUP *group) { + if (group == NULL || + // Built-in curves are static. + group->curve_name != NID_undef || + !CRYPTO_refcount_dec_and_test_zero(&group->references)) { + return; + } + + if (group->meth->group_finish != NULL) { + group->meth->group_finish(group); + } + + ec_point_free(group->generator, 0 /* don't free group */); + BN_free(&group->order); + BN_MONT_CTX_free(group->order_mont); + + OPENSSL_free(group); +} + +EC_GROUP *EC_GROUP_dup(const EC_GROUP *a) { + if (a == NULL || + // Built-in curves are static. + a->curve_name != NID_undef) { + return (EC_GROUP *)a; + } + + // Groups are logically immutable (but for |EC_GROUP_set_generator| which must + // be called early on), so we simply take a reference. + EC_GROUP *group = (EC_GROUP *)a; + CRYPTO_refcount_inc(&group->references); + return group; +} + +int EC_GROUP_cmp(const EC_GROUP *a, const EC_GROUP *b, BN_CTX *ignored) { + // Note this function returns 0 if equal and non-zero otherwise. + if (a == b) { + return 0; + } + if (a->curve_name != b->curve_name) { + return 1; + } + if (a->curve_name != NID_undef) { + // Built-in curves may be compared by curve name alone. + return 0; + } + + // |a| and |b| are both custom curves. We compare the entire curve + // structure. If |a| or |b| is incomplete (due to legacy OpenSSL mistakes, + // custom curve construction is sadly done in two parts) but otherwise not the + // same object, we consider them always unequal. + return a->generator == NULL || + b->generator == NULL || + BN_cmp(&a->order, &b->order) != 0 || + BN_cmp(&a->field, &b->field) != 0 || + BN_cmp(&a->a, &b->a) != 0 || + BN_cmp(&a->b, &b->b) != 0 || + ec_GFp_simple_cmp(a, a->generator, b->generator, NULL) != 0; +} + +const EC_POINT *EC_GROUP_get0_generator(const EC_GROUP *group) { + return group->generator; +} + +const BIGNUM *EC_GROUP_get0_order(const EC_GROUP *group) { + assert(!BN_is_zero(&group->order)); + return &group->order; +} + +int EC_GROUP_get_order(const EC_GROUP *group, BIGNUM *order, BN_CTX *ctx) { + if (BN_copy(order, EC_GROUP_get0_order(group)) == NULL) { + return 0; + } + return 1; +} + +int EC_GROUP_get_cofactor(const EC_GROUP *group, BIGNUM *cofactor, + BN_CTX *ctx) { + // All |EC_GROUP|s have cofactor 1. + return BN_set_word(cofactor, 1); +} + +int EC_GROUP_get_curve_GFp(const EC_GROUP *group, BIGNUM *out_p, BIGNUM *out_a, + BIGNUM *out_b, BN_CTX *ctx) { + return ec_GFp_simple_group_get_curve(group, out_p, out_a, out_b, ctx); +} + +int EC_GROUP_get_curve_name(const EC_GROUP *group) { return group->curve_name; } + +unsigned EC_GROUP_get_degree(const EC_GROUP *group) { + return ec_GFp_simple_group_get_degree(group); +} + +EC_POINT *EC_POINT_new(const EC_GROUP *group) { + EC_POINT *ret; + + if (group == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); + return NULL; + } + + ret = OPENSSL_malloc(sizeof *ret); + if (ret == NULL) { + OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); + return NULL; + } + + ret->group = EC_GROUP_dup(group); + if (ret->group == NULL || + !ec_GFp_simple_point_init(ret)) { + OPENSSL_free(ret); + return NULL; + } + + return ret; +} + +static void ec_point_free(EC_POINT *point, int free_group) { + if (!point) { + return; + } + ec_GFp_simple_point_finish(point); + if (free_group) { + EC_GROUP_free(point->group); + } + OPENSSL_free(point); +} + +void EC_POINT_free(EC_POINT *point) { + ec_point_free(point, 1 /* free group */); +} + +void EC_POINT_clear_free(EC_POINT *point) { EC_POINT_free(point); } + +int EC_POINT_copy(EC_POINT *dest, const EC_POINT *src) { + if (EC_GROUP_cmp(dest->group, src->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + if (dest == src) { + return 1; + } + return ec_GFp_simple_point_copy(dest, src); +} + +EC_POINT *EC_POINT_dup(const EC_POINT *a, const EC_GROUP *group) { + if (a == NULL) { + return NULL; + } + + EC_POINT *ret = EC_POINT_new(group); + if (ret == NULL || + !EC_POINT_copy(ret, a)) { + EC_POINT_free(ret); + return NULL; + } + + return ret; +} + +int EC_POINT_set_to_infinity(const EC_GROUP *group, EC_POINT *point) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_point_set_to_infinity(group, point); +} + +int EC_POINT_is_at_infinity(const EC_GROUP *group, const EC_POINT *point) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_is_at_infinity(group, point); +} + +int EC_POINT_is_on_curve(const EC_GROUP *group, const EC_POINT *point, + BN_CTX *ctx) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_is_on_curve(group, point, ctx); +} + +int EC_POINT_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, + BN_CTX *ctx) { + if (EC_GROUP_cmp(group, a->group, NULL) != 0 || + EC_GROUP_cmp(group, b->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return -1; + } + return ec_GFp_simple_cmp(group, a, b, ctx); +} + +int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_make_affine(group, point, ctx); +} + +int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[], + BN_CTX *ctx) { + for (size_t i = 0; i < num; i++) { + if (EC_GROUP_cmp(group, points[i]->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + } + return ec_GFp_simple_points_make_affine(group, num, points, ctx); +} + +int EC_POINT_get_affine_coordinates_GFp(const EC_GROUP *group, + const EC_POINT *point, BIGNUM *x, + BIGNUM *y, BN_CTX *ctx) { + if (group->meth->point_get_affine_coordinates == 0) { + OPENSSL_PUT_ERROR(EC, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); + return 0; + } + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return group->meth->point_get_affine_coordinates(group, point, x, y, ctx); +} + +int EC_POINT_set_affine_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, + const BIGNUM *x, const BIGNUM *y, + BN_CTX *ctx) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + if (!ec_GFp_simple_point_set_affine_coordinates(group, point, x, y, ctx)) { + return 0; + } + + if (!EC_POINT_is_on_curve(group, point, ctx)) { + OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); + return 0; + } + + return 1; +} + +int EC_POINT_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, + const EC_POINT *b, BN_CTX *ctx) { + if (EC_GROUP_cmp(group, r->group, NULL) != 0 || + EC_GROUP_cmp(group, a->group, NULL) != 0 || + EC_GROUP_cmp(group, b->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_add(group, r, a, b, ctx); +} + + +int EC_POINT_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, + BN_CTX *ctx) { + if (EC_GROUP_cmp(group, r->group, NULL) != 0 || + EC_GROUP_cmp(group, a->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_dbl(group, r, a, ctx); +} + + +int EC_POINT_invert(const EC_GROUP *group, EC_POINT *a, BN_CTX *ctx) { + if (EC_GROUP_cmp(group, a->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_invert(group, a, ctx); +} + +int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, + const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { + // Previously, this function set |r| to the point at infinity if there was + // nothing to multiply. But, nobody should be calling this function with + // nothing to multiply in the first place. + if ((g_scalar == NULL && p_scalar == NULL) || + (p == NULL) != (p_scalar == NULL)) { + OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + // We cannot easily process arbitrary scalars in constant-time, and there is + // no need to do so. Require that scalars be the same size as the order. + // + // One could require they be fully reduced, but some consumers try to check + // that |order| * |pubkey| is the identity. This comes from following NIST SP + // 800-56A section 5.6.2.3.2. (Though all our curves have cofactor one, so + // this check isn't useful.) + int ret = 0; + EC_SCALAR g_scalar_storage, p_scalar_storage; + EC_SCALAR *g_scalar_arg = NULL, *p_scalar_arg = NULL; + unsigned order_bits = BN_num_bits(&group->order); + if (g_scalar != NULL) { + if (BN_is_negative(g_scalar) || BN_num_bits(g_scalar) > order_bits || + !ec_bignum_to_scalar(group, &g_scalar_storage, g_scalar)) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); + goto err; + } + g_scalar_arg = &g_scalar_storage; + } + + if (p_scalar != NULL) { + if (BN_is_negative(p_scalar) || BN_num_bits(p_scalar) > order_bits || + !ec_bignum_to_scalar(group, &p_scalar_storage, p_scalar)) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); + goto err; + } + p_scalar_arg = &p_scalar_storage; + } + + ret = ec_point_mul_scalar(group, r, g_scalar_arg, p, p_scalar_arg, ctx); + +err: + OPENSSL_cleanse(&g_scalar_storage, sizeof(g_scalar_storage)); + OPENSSL_cleanse(&p_scalar_storage, sizeof(p_scalar_storage)); + return ret; +} + +int ec_point_mul_scalar(const EC_GROUP *group, EC_POINT *r, + const EC_SCALAR *g_scalar, const EC_POINT *p, + const EC_SCALAR *p_scalar, BN_CTX *ctx) { + if ((g_scalar == NULL && p_scalar == NULL) || + (p == NULL) != (p_scalar == NULL)) { + OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + if (EC_GROUP_cmp(group, r->group, NULL) != 0 || + (p != NULL && EC_GROUP_cmp(group, p->group, NULL) != 0)) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + + return group->meth->mul(group, r, g_scalar, p, p_scalar, ctx); +} + +int ec_point_set_Jprojective_coordinates_GFp(const EC_GROUP *group, + EC_POINT *point, const BIGNUM *x, + const BIGNUM *y, const BIGNUM *z, + BN_CTX *ctx) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { + OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); + return 0; + } + return ec_GFp_simple_set_Jprojective_coordinates_GFp(group, point, x, y, z, + ctx); +} + +void EC_GROUP_set_asn1_flag(EC_GROUP *group, int flag) {} + +const EC_METHOD *EC_GROUP_method_of(const EC_GROUP *group) { + return NULL; +} + +int EC_METHOD_get_field_type(const EC_METHOD *meth) { + return NID_X9_62_prime_field; +} + +void EC_GROUP_set_point_conversion_form(EC_GROUP *group, + point_conversion_form_t form) { + if (form != POINT_CONVERSION_UNCOMPRESSED) { + abort(); + } +} + +size_t EC_get_builtin_curves(EC_builtin_curve *out_curves, + size_t max_num_curves) { + const struct built_in_curves *const curves = OPENSSL_built_in_curves(); + + for (size_t i = 0; i < max_num_curves && i < OPENSSL_NUM_BUILT_IN_CURVES; + i++) { + out_curves[i].comment = curves->curves[i].comment; + out_curves[i].nid = curves->curves[i].nid; + } + + return OPENSSL_NUM_BUILT_IN_CURVES; +} + +int ec_bignum_to_scalar(const EC_GROUP *group, EC_SCALAR *out, + const BIGNUM *in) { + if (BN_is_negative(in) || in->top > group->order.top) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_SCALAR); + return 0; + } + OPENSSL_memset(out->words, 0, group->order.top * sizeof(BN_ULONG)); + OPENSSL_memcpy(out->words, in->d, in->top * sizeof(BN_ULONG)); + return 1; +} + +int ec_random_nonzero_scalar(const EC_GROUP *group, EC_SCALAR *out, + const uint8_t additional_data[32]) { + return bn_rand_range_words(out->words, 1, group->order.d, group->order.top, + additional_data); +} diff --git a/Sources/BoringSSL/crypto/ec/ec_key.c b/Sources/BoringSSL/crypto/fipsmodule/ec/ec_key.c similarity index 84% rename from Sources/BoringSSL/crypto/ec/ec_key.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/ec_key.c index 1a933462d..bba4402bc 100644 --- a/Sources/BoringSSL/crypto/ec/ec_key.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/ec_key.c @@ -70,6 +70,7 @@ #include #include +#include #include #include #include @@ -77,10 +78,11 @@ #include #include "internal.h" -#include "../internal.h" +#include "../delocate.h" +#include "../../internal.h" -static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; +DEFINE_STATIC_EX_DATA_CLASS(g_ec_ex_data_class); EC_KEY *EC_KEY_new(void) { return EC_KEY_new_method(NULL); } @@ -106,7 +108,7 @@ EC_KEY *EC_KEY_new_method(const ENGINE *engine) { CRYPTO_new_ex_data(&ret->ex_data); if (ret->ecdsa_meth && ret->ecdsa_meth->init && !ret->ecdsa_meth->init(ret)) { - CRYPTO_free_ex_data(&g_ex_data_class, ret, &ret->ex_data); + CRYPTO_free_ex_data(g_ec_ex_data_class_bss_get(), ret, &ret->ex_data); if (ret->ecdsa_meth) { METHOD_unref(ret->ecdsa_meth); } @@ -150,10 +152,10 @@ void EC_KEY_free(EC_KEY *r) { EC_GROUP_free(r->group); EC_POINT_free(r->pub_key); BN_clear_free(r->priv_key); + BN_free(r->fixed_k); - CRYPTO_free_ex_data(&g_ex_data_class, r, &r->ex_data); + CRYPTO_free_ex_data(g_ec_ex_data_class_bss_get(), r, &r->ex_data); - OPENSSL_cleanse((void *)r, sizeof(EC_KEY)); OPENSSL_free(r); } @@ -162,9 +164,9 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return NULL; } - /* Copy the parameters. */ + // Copy the parameters. if (src->group) { - /* TODO(fork): duplicating the group seems wasteful. */ + // TODO(fork): duplicating the group seems wasteful. EC_GROUP_free(dest->group); dest->group = EC_GROUP_dup(src->group); if (dest->group == NULL) { @@ -172,7 +174,7 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { } } - /* Copy the public key. */ + // Copy the public key. if (src->pub_key && src->group) { EC_POINT_free(dest->pub_key); dest->pub_key = EC_POINT_dup(src->pub_key, src->group); @@ -181,7 +183,7 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { } } - /* copy the private key */ + // copy the private key if (src->priv_key) { if (dest->priv_key == NULL) { dest->priv_key = BN_new(); @@ -193,19 +195,14 @@ EC_KEY *EC_KEY_copy(EC_KEY *dest, const EC_KEY *src) { return NULL; } } - /* copy method/extra data */ + // copy method/extra data if (src->ecdsa_meth) { METHOD_unref(dest->ecdsa_meth); dest->ecdsa_meth = src->ecdsa_meth; METHOD_ref(dest->ecdsa_meth); } - CRYPTO_free_ex_data(&g_ex_data_class, dest, &dest->ex_data); - if (!CRYPTO_dup_ex_data(&g_ex_data_class, &dest->ex_data, - &src->ex_data)) { - return NULL; - } - /* copy the rest */ + // copy the rest dest->enc_flag = src->enc_flag; dest->conv_form = src->conv_form; @@ -237,13 +234,13 @@ const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key) { return key->group; } int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group) { EC_GROUP_free(key->group); - /* TODO(fork): duplicating the group seems wasteful but see - * |EC_KEY_set_conv_form|. */ + // TODO(fork): duplicating the group seems wasteful but see + // |EC_KEY_set_conv_form|. key->group = EC_GROUP_dup(group); if (key->group == NULL) { return 0; } - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (key->priv_key != NULL && BN_cmp(key->priv_key, EC_GROUP_get0_order(group)) >= 0) { return 0; @@ -256,7 +253,7 @@ const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key) { } int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *priv_key) { - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (key->group != NULL && BN_cmp(priv_key, EC_GROUP_get0_order(key->group)) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); @@ -312,16 +309,15 @@ int EC_KEY_check_key(const EC_KEY *eckey) { goto err; } - /* testing whether the pub_key is on the elliptic curve */ + // testing whether the pub_key is on the elliptic curve if (!EC_POINT_is_on_curve(eckey->group, eckey->pub_key, ctx)) { OPENSSL_PUT_ERROR(EC, EC_R_POINT_IS_NOT_ON_CURVE); goto err; } - /* in case the priv_key is present : - * check if generator * priv_key == pub_key - */ + // in case the priv_key is present : + // check if generator * priv_key == pub_key if (eckey->priv_key) { - /* XXX: |BN_cmp| is not constant time. */ + // XXX: |BN_cmp| is not constant time. if (BN_cmp(eckey->priv_key, EC_GROUP_get0_order(eckey->group)) >= 0) { OPENSSL_PUT_ERROR(EC, EC_R_WRONG_ORDER); goto err; @@ -345,6 +341,35 @@ int EC_KEY_check_key(const EC_KEY *eckey) { return ok; } +int EC_KEY_check_fips(const EC_KEY *key) { + if (EC_KEY_is_opaque(key)) { + // Opaque keys can't be checked. + OPENSSL_PUT_ERROR(EC, EC_R_PUBLIC_KEY_VALIDATION_FAILED); + return 0; + } + + if (!EC_KEY_check_key(key)) { + return 0; + } + + if (key->priv_key) { + uint8_t data[16] = {0}; + ECDSA_SIG *sig = ECDSA_do_sign(data, sizeof(data), key); +#if defined(BORINGSSL_FIPS_BREAK_ECDSA_PWCT) + data[0] = ~data[0]; +#endif + int ok = sig != NULL && + ECDSA_do_verify(data, sizeof(data), sig, key); + ECDSA_SIG_free(sig); + if (!ok) { + OPENSSL_PUT_ERROR(EC, EC_R_PUBLIC_KEY_VALIDATION_FAILED); + return 0; + } + } + + return 1; +} + int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y) { BN_CTX *ctx = NULL; @@ -381,8 +406,8 @@ int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, goto err; } - /* Check if retrieved coordinates match originals: if not values - * are out of range. */ + // Check if retrieved coordinates match originals: if not values + // are out of range. if (BN_cmp(x, tx) || BN_cmp(y, ty)) { OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE); goto err; @@ -425,6 +450,15 @@ int EC_KEY_generate_key(EC_KEY *eckey) { } const BIGNUM *order = EC_GROUP_get0_order(eckey->group); + + // Check that the size of the group order is FIPS compliant (FIPS 186-4 + // B.4.2). + if (BN_num_bits(order) < 160) { + OPENSSL_PUT_ERROR(EC, EC_R_INVALID_GROUP_ORDER); + goto err; + } + + // Generate the private key by testing candidates (FIPS 186-4 B.4.2). if (!BN_rand_range_ex(priv_key, 1, order)) { goto err; } @@ -457,11 +491,15 @@ int EC_KEY_generate_key(EC_KEY *eckey) { return ok; } +int EC_KEY_generate_key_fips(EC_KEY *eckey) { + return EC_KEY_generate_key(eckey) && EC_KEY_check_fips(eckey); +} + int EC_KEY_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; - if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, dup_func, + if (!CRYPTO_get_ex_new_index(g_ec_ex_data_class_bss_get(), &index, argl, argp, free_func)) { return -1; } diff --git a/Sources/BoringSSL/crypto/ec/ec_montgomery.c b/Sources/BoringSSL/crypto/fipsmodule/ec/ec_montgomery.c similarity index 81% rename from Sources/BoringSSL/crypto/ec/ec_montgomery.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/ec_montgomery.c index 4643fd2c4..6670b84ee 100644 --- a/Sources/BoringSSL/crypto/ec/ec_montgomery.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/ec_montgomery.c @@ -72,6 +72,7 @@ #include #include "../bn/internal.h" +#include "../delocate.h" #include "internal.h" @@ -89,32 +90,6 @@ void ec_GFp_mont_group_finish(EC_GROUP *group) { ec_GFp_simple_group_finish(group); } -int ec_GFp_mont_group_copy(EC_GROUP *dest, const EC_GROUP *src) { - BN_MONT_CTX_free(dest->mont); - dest->mont = NULL; - - if (!ec_GFp_simple_group_copy(dest, src)) { - return 0; - } - - if (src->mont != NULL) { - dest->mont = BN_MONT_CTX_new(); - if (dest->mont == NULL) { - return 0; - } - if (!BN_MONT_CTX_copy(dest->mont, src->mont)) { - goto err; - } - } - - return 1; - -err: - BN_MONT_CTX_free(dest->mont); - dest->mont = NULL; - return 0; -} - int ec_GFp_mont_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { BN_CTX *new_ctx = NULL; @@ -218,7 +193,7 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, BN_CTX_start(ctx); if (BN_cmp(&point->Z, &group->one) == 0) { - /* |point| is already affine. */ + // |point| is already affine. if (x != NULL && !BN_from_montgomery(x, &point->X, group->mont, ctx)) { goto err; } @@ -226,7 +201,7 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } } else { - /* transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3) */ + // transform (X, Y, Z) into (x, y) := (X/Z^2, Y/Z^3) BIGNUM *Z_1 = BN_CTX_get(ctx); BIGNUM *Z_2 = BN_CTX_get(ctx); @@ -237,18 +212,18 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } - /* The straightforward way to calculate the inverse of a Montgomery-encoded - * value where the result is Montgomery-encoded is: - * - * |BN_from_montgomery| + invert + |BN_to_montgomery|. - * - * This is equivalent, but more efficient, because |BN_from_montgomery| - * is more efficient (at least in theory) than |BN_to_montgomery|, since it - * doesn't have to do the multiplication before the reduction. - * - * Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this - * inversion may be done as the final step of private key operations. - * Unfortunately, this is suboptimal for ECDSA verification. */ + // The straightforward way to calculate the inverse of a Montgomery-encoded + // value where the result is Montgomery-encoded is: + // + // |BN_from_montgomery| + invert + |BN_to_montgomery|. + // + // This is equivalent, but more efficient, because |BN_from_montgomery| + // is more efficient (at least in theory) than |BN_to_montgomery|, since it + // doesn't have to do the multiplication before the reduction. + // + // Use Fermat's Little Theorem instead of |BN_mod_inverse_odd| since this + // inversion may be done as the final step of private key operations. + // Unfortunately, this is suboptimal for ECDSA verification. if (!BN_from_montgomery(Z_1, &point->Z, group->mont, ctx) || !BN_from_montgomery(Z_1, Z_1, group->mont, ctx) || !bn_mod_inverse_prime(Z_1, Z_1, &group->field, ctx, group->mont)) { @@ -259,10 +234,10 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, goto err; } - /* Instead of using |BN_from_montgomery| to convert the |x| coordinate - * and then calling |BN_from_montgomery| again to convert the |y| - * coordinate below, convert the common factor |Z_2| once now, saving one - * reduction. */ + // Instead of using |BN_from_montgomery| to convert the |x| coordinate + // and then calling |BN_from_montgomery| again to convert the |y| + // coordinate below, convert the common factor |Z_2| once now, saving one + // reduction. if (!BN_from_montgomery(Z_2, Z_2, group->mont, ctx)) { goto err; } @@ -289,15 +264,14 @@ static int ec_GFp_mont_point_get_affine_coordinates(const EC_GROUP *group, return ret; } -const EC_METHOD EC_GFp_mont_method = { - ec_GFp_mont_group_init, - ec_GFp_mont_group_finish, - ec_GFp_mont_group_copy, - ec_GFp_mont_group_set_curve, - ec_GFp_mont_point_get_affine_coordinates, - ec_wNAF_mul /* XXX: Not constant time. */, - ec_GFp_mont_field_mul, - ec_GFp_mont_field_sqr, - ec_GFp_mont_field_encode, - ec_GFp_mont_field_decode, -}; +DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_mont_method) { + out->group_init = ec_GFp_mont_group_init; + out->group_finish = ec_GFp_mont_group_finish; + out->group_set_curve = ec_GFp_mont_group_set_curve; + out->point_get_affine_coordinates = ec_GFp_mont_point_get_affine_coordinates; + out->mul = ec_wNAF_mul /* XXX: Not constant time. */; + out->field_mul = ec_GFp_mont_field_mul; + out->field_sqr = ec_GFp_mont_field_sqr; + out->field_encode = ec_GFp_mont_field_encode; + out->field_decode = ec_GFp_mont_field_decode; +} diff --git a/Sources/BoringSSL/crypto/ec/internal.h b/Sources/BoringSSL/crypto/fipsmodule/ec/internal.h similarity index 62% rename from Sources/BoringSSL/crypto/ec/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/ec/internal.h index b3c2a71f1..7374e8b57 100644 --- a/Sources/BoringSSL/crypto/ec/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/internal.h @@ -73,92 +73,132 @@ #include #include #include +#include + +#include "../bn/internal.h" #if defined(__cplusplus) extern "C" { #endif +// Cap the size of all field elements and scalars, including custom curves, to +// 66 bytes, large enough to fit secp521r1 and brainpoolP512r1, which appear to +// be the largest fields anyone plausibly uses. +#define EC_MAX_SCALAR_BYTES 66 +#define EC_MAX_SCALAR_WORDS ((66 + BN_BYTES - 1) / BN_BYTES) + +OPENSSL_COMPILE_ASSERT(EC_MAX_SCALAR_WORDS <= BN_SMALL_MAX_WORDS, + bn_small_functions_applicable); + +// An EC_SCALAR is a |BN_num_bits(order)|-bit integer. Only the first +// |order->top| words are used. An |EC_SCALAR| is specific to an |EC_GROUP| and +// must not be mixed between groups. Unless otherwise specified, it is fully +// reduced modulo the |order|. +typedef union { + // bytes is the representation of the scalar in little-endian order. + uint8_t bytes[EC_MAX_SCALAR_BYTES]; + BN_ULONG words[EC_MAX_SCALAR_WORDS]; +} EC_SCALAR; + struct ec_method_st { int (*group_init)(EC_GROUP *); void (*group_finish)(EC_GROUP *); - int (*group_copy)(EC_GROUP *, const EC_GROUP *); int (*group_set_curve)(EC_GROUP *, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int (*point_get_affine_coordinates)(const EC_GROUP *, const EC_POINT *, BIGNUM *x, BIGNUM *y, BN_CTX *); - /* Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar| - * are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null. - * Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar| - * and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is - * non-null. */ - int (*mul)(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, - const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx); - - /* 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the - * same implementations of point operations can be used with different - * optimized implementations of expensive field operations: */ + // Computes |r = g_scalar*generator + p_scalar*p| if |g_scalar| and |p_scalar| + // are both non-null. Computes |r = g_scalar*generator| if |p_scalar| is null. + // Computes |r = p_scalar*p| if g_scalar is null. At least one of |g_scalar| + // and |p_scalar| must be non-null, and |p| must be non-null if |p_scalar| is + // non-null. + int (*mul)(const EC_GROUP *group, EC_POINT *r, const EC_SCALAR *g_scalar, + const EC_POINT *p, const EC_SCALAR *p_scalar, BN_CTX *ctx); + + // 'field_mul' and 'field_sqr' can be used by 'add' and 'dbl' so that the + // same implementations of point operations can be used with different + // optimized implementations of expensive field operations: int (*field_mul)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int (*field_sqr)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, BN_CTX *); int (*field_encode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, - BN_CTX *); /* e.g. to Montgomery */ + BN_CTX *); // e.g. to Montgomery int (*field_decode)(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, - BN_CTX *); /* e.g. from Montgomery */ + BN_CTX *); // e.g. from Montgomery } /* EC_METHOD */; -extern const EC_METHOD EC_GFp_mont_method; +const EC_METHOD *EC_GFp_mont_method(void); struct ec_group_st { const EC_METHOD *meth; + // Unlike all other |EC_POINT|s, |generator| does not own |generator->group| + // to avoid a reference cycle. EC_POINT *generator; BIGNUM order; - int curve_name; /* optional NID for named curve */ + int curve_name; // optional NID for named curve + + BN_MONT_CTX *order_mont; // data for ECDSA inverse - const BN_MONT_CTX *mont_data; /* data for ECDSA inverse */ + // The following members are handled by the method functions, + // even if they appear generic - /* The following members are handled by the method functions, - * even if they appear generic */ + BIGNUM field; // For curves over GF(p), this is the modulus. - BIGNUM field; /* For curves over GF(p), this is the modulus. */ + BIGNUM a, b; // Curve coefficients. - BIGNUM a, b; /* Curve coefficients. */ + int a_is_minus3; // enable optimized point arithmetics for special case - int a_is_minus3; /* enable optimized point arithmetics for special case */ + CRYPTO_refcount_t references; - BN_MONT_CTX *mont; /* Montgomery structure. */ + BN_MONT_CTX *mont; // Montgomery structure. - BIGNUM one; /* The value one. */ + BIGNUM one; // The value one. } /* EC_GROUP */; struct ec_point_st { - const EC_METHOD *meth; + // group is an owning reference to |group|, unless this is + // |group->generator|. + EC_GROUP *group; BIGNUM X; BIGNUM Y; - BIGNUM Z; /* Jacobian projective coordinates: - * (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0 */ + BIGNUM Z; // Jacobian projective coordinates: + // (X, Y, Z) represents (X/Z^2, Y/Z^3) if Z != 0 } /* EC_POINT */; EC_GROUP *ec_group_new(const EC_METHOD *meth); -int ec_group_copy(EC_GROUP *dest, const EC_GROUP *src); - -/* ec_group_get_mont_data returns a Montgomery context for operations in the - * scalar field of |group|. It may return NULL in the case that |group| is not - * a built-in group. */ -const BN_MONT_CTX *ec_group_get_mont_data(const EC_GROUP *group); - -int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, - const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx); -/* method functions in simple.c */ +// ec_bignum_to_scalar converts |in| to an |EC_SCALAR| and writes it to |*out|. +// |in| must be non-negative and have at most |BN_num_bits(&group->order)| bits. +// It returns one on success and zero on error. It does not ensure |in| is fully +// reduced. +int ec_bignum_to_scalar(const EC_GROUP *group, EC_SCALAR *out, + const BIGNUM *in); + +// ec_random_nonzero_scalar sets |out| to a uniformly selected random value from +// 1 to |group->order| - 1. It returns one on success and zero on error. +int ec_random_nonzero_scalar(const EC_GROUP *group, EC_SCALAR *out, + const uint8_t additional_data[32]); + +// ec_point_mul_scalar sets |r| to generator * |g_scalar| + |p| * +// |p_scalar|. Unlike other functions which take |EC_SCALAR|, |g_scalar| and +// |p_scalar| need not be fully reduced. They need only contain as many bits as +// the order. +int ec_point_mul_scalar(const EC_GROUP *group, EC_POINT *r, + const EC_SCALAR *g_scalar, const EC_POINT *p, + const EC_SCALAR *p_scalar, BN_CTX *ctx); + +int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const EC_SCALAR *g_scalar, + const EC_POINT *p, const EC_SCALAR *p_scalar, BN_CTX *ctx); + +// method functions in simple.c int ec_GFp_simple_group_init(EC_GROUP *); void ec_GFp_simple_group_finish(EC_GROUP *); -int ec_GFp_simple_group_copy(EC_GROUP *, const EC_GROUP *); int ec_GFp_simple_group_set_curve(EC_GROUP *, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int ec_GFp_simple_group_get_curve(const EC_GROUP *, BIGNUM *p, BIGNUM *a, @@ -166,17 +206,12 @@ int ec_GFp_simple_group_get_curve(const EC_GROUP *, BIGNUM *p, BIGNUM *a, unsigned ec_GFp_simple_group_get_degree(const EC_GROUP *); int ec_GFp_simple_point_init(EC_POINT *); void ec_GFp_simple_point_finish(EC_POINT *); -void ec_GFp_simple_point_clear_finish(EC_POINT *); int ec_GFp_simple_point_copy(EC_POINT *, const EC_POINT *); int ec_GFp_simple_point_set_to_infinity(const EC_GROUP *, EC_POINT *); int ec_GFp_simple_set_Jprojective_coordinates_GFp(const EC_GROUP *, EC_POINT *, const BIGNUM *x, const BIGNUM *y, const BIGNUM *z, BN_CTX *); -int ec_GFp_simple_get_Jprojective_coordinates_GFp(const EC_GROUP *, - const EC_POINT *, BIGNUM *x, - BIGNUM *y, BIGNUM *z, - BN_CTX *); int ec_GFp_simple_point_set_affine_coordinates(const EC_GROUP *, EC_POINT *, const BIGNUM *x, const BIGNUM *y, BN_CTX *); @@ -200,12 +235,11 @@ int ec_GFp_simple_field_mul(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, int ec_GFp_simple_field_sqr(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, BN_CTX *); -/* method functions in montgomery.c */ +// method functions in montgomery.c int ec_GFp_mont_group_init(EC_GROUP *); int ec_GFp_mont_group_set_curve(EC_GROUP *, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *); void ec_GFp_mont_group_finish(EC_GROUP *); -int ec_GFp_mont_group_copy(EC_GROUP *, const EC_GROUP *); int ec_GFp_mont_field_mul(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *); int ec_GFp_mont_field_sqr(const EC_GROUP *, BIGNUM *r, const BIGNUM *a, @@ -222,12 +256,12 @@ int ec_point_set_Jprojective_coordinates_GFp(const EC_GROUP *group, void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, uint8_t in); -extern const EC_METHOD EC_GFp_nistp224_method; -extern const EC_METHOD EC_GFp_nistp256_method; +const EC_METHOD *EC_GFp_nistp224_method(void); +const EC_METHOD *EC_GFp_nistp256_method(void); -/* EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with - * x86-64 optimized P256. See http://eprint.iacr.org/2013/816. */ -extern const EC_METHOD EC_GFp_nistz256_method; +// EC_GFp_nistz256_method is a GFp method using montgomery multiplication, with +// x86-64 optimized P256. See http://eprint.iacr.org/2013/816. +const EC_METHOD *EC_GFp_nistz256_method(void); struct ec_key_st { EC_GROUP *group; @@ -235,6 +269,10 @@ struct ec_key_st { EC_POINT *pub_key; BIGNUM *priv_key; + // fixed_k may contain a specific value of 'k', to be used in ECDSA signing. + // This is only for the FIPS power-on tests. + BIGNUM *fixed_k; + unsigned int enc_flag; point_conversion_form_t conv_form; @@ -245,32 +283,34 @@ struct ec_key_st { CRYPTO_EX_DATA ex_data; } /* EC_KEY */; -/* curve_data contains data about a built-in elliptic curve. */ -struct curve_data { - /* comment is a human-readable string describing the curve. */ - const char *comment; - /* param_len is the number of bytes needed to store a field element. */ - uint8_t param_len; - /* data points to an array of 6*|param_len| bytes which hold the field - * elements of the following (in big-endian order): prime, a, b, generator x, - * generator y, order. */ - const uint8_t data[]; -}; - struct built_in_curve { int nid; - uint8_t oid[8]; + const uint8_t *oid; uint8_t oid_len; - const struct curve_data *data; + // comment is a human-readable string describing the curve. + const char *comment; + // param_len is the number of bytes needed to store a field element. + uint8_t param_len; + // params points to an array of 6*|param_len| bytes which hold the field + // elements of the following (in big-endian order): prime, a, b, generator x, + // generator y, order. + const uint8_t *params; const EC_METHOD *method; }; -/* OPENSSL_built_in_curves is terminated with an entry where |nid| is - * |NID_undef|. */ -extern const struct built_in_curve OPENSSL_built_in_curves[]; +#define OPENSSL_NUM_BUILT_IN_CURVES 4 + +struct built_in_curves { + struct built_in_curve curves[OPENSSL_NUM_BUILT_IN_CURVES]; +}; + +// OPENSSL_built_in_curves returns a pointer to static information about +// standard curves. The array is terminated with an entry where |nid| is +// |NID_undef|. +const struct built_in_curves *OPENSSL_built_in_curves(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_EC_INTERNAL_H */ +#endif // OPENSSL_HEADER_EC_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/ec/oct.c b/Sources/BoringSSL/crypto/fipsmodule/ec/oct.c similarity index 92% rename from Sources/BoringSSL/crypto/ec/oct.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/oct.c index 4e8272da6..7d6239567 100644 --- a/Sources/BoringSSL/crypto/ec/oct.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/oct.c @@ -68,7 +68,6 @@ #include #include -#include #include #include "internal.h" @@ -95,12 +94,12 @@ static size_t ec_GFp_simple_point2oct(const EC_GROUP *group, goto err; } - /* ret := required output buffer length */ + // ret := required output buffer length field_len = BN_num_bytes(&group->field); ret = (form == POINT_CONVERSION_COMPRESSED) ? 1 + field_len : 1 + 2 * field_len; - /* if 'buf' is NULL, just return required length */ + // if 'buf' is NULL, just return required length if (buf != NULL) { if (len < ret) { OPENSSL_PUT_ERROR(EC, EC_R_BUFFER_TOO_SMALL); @@ -252,7 +251,7 @@ static int ec_GFp_simple_oct2point(const EC_GROUP *group, EC_POINT *point, int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *point, const uint8_t *buf, size_t len, BN_CTX *ctx) { - if (group->meth != point->meth) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } @@ -262,24 +261,13 @@ int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *point, size_t EC_POINT_point2oct(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, uint8_t *buf, size_t len, BN_CTX *ctx) { - if (group->meth != point->meth) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } return ec_GFp_simple_point2oct(group, point, form, buf, len, ctx); } -int EC_POINT_point2cbb(CBB *out, const EC_GROUP *group, const EC_POINT *point, - point_conversion_form_t form, BN_CTX *ctx) { - size_t len = EC_POINT_point2oct(group, point, form, NULL, 0, ctx); - if (len == 0) { - return 0; - } - uint8_t *p; - return CBB_add_space(out, &p, len) && - EC_POINT_point2oct(group, point, form, p, len, ctx) == len; -} - int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, int y_bit, BN_CTX *ctx) { @@ -311,13 +299,13 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, goto err; } - /* Recover y. We have a Weierstrass equation - * y^2 = x^3 + a*x + b, - * so y is one of the square roots of x^3 + a*x + b. */ + // Recover y. We have a Weierstrass equation + // y^2 = x^3 + a*x + b, + // so y is one of the square roots of x^3 + a*x + b. - /* tmp1 := x^3 */ + // tmp1 := x^3 if (group->meth->field_decode == 0) { - /* field_{sqr,mul} work on standard representation */ + // field_{sqr,mul} work on standard representation if (!group->meth->field_sqr(group, tmp2, x, ctx) || !group->meth->field_mul(group, tmp1, tmp2, x, ctx)) { goto err; @@ -329,7 +317,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, } } - /* tmp1 := tmp1 + a*x */ + // tmp1 := tmp1 + a*x if (group->a_is_minus3) { if (!BN_mod_lshift1_quick(tmp2, x, &group->field) || !BN_mod_add_quick(tmp2, tmp2, x, &group->field) || @@ -343,7 +331,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, goto err; } } else { - /* field_mul works on standard representation */ + // field_mul works on standard representation if (!group->meth->field_mul(group, tmp2, &group->a, x, ctx)) { goto err; } @@ -354,7 +342,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, } } - /* tmp1 := tmp1 + b */ + // tmp1 := tmp1 + b if (group->meth->field_decode) { if (!group->meth->field_decode(group, tmp2, &group->b, ctx) || !BN_mod_add_quick(tmp1, tmp1, tmp2, &group->field)) { @@ -408,7 +396,7 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group, int EC_POINT_set_compressed_coordinates_GFp(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, int y_bit, BN_CTX *ctx) { - if (group->meth != point->meth) { + if (EC_GROUP_cmp(group, point->group, NULL) != 0) { OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS); return 0; } diff --git a/Sources/BoringSSL/crypto/fipsmodule/ec/p224-64.c b/Sources/BoringSSL/crypto/fipsmodule/ec/p224-64.c new file mode 100644 index 000000000..ba25d22a7 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/p224-64.c @@ -0,0 +1,1131 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +// A 64-bit implementation of the NIST P-224 elliptic curve point multiplication +// +// Inspired by Daniel J. Bernstein's public domain nistp224 implementation +// and Adam Langley's public domain 64-bit C implementation of curve25519. + +#include + +#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) && \ + !defined(OPENSSL_SMALL) + +#include +#include +#include +#include + +#include + +#include "internal.h" +#include "../delocate.h" +#include "../../internal.h" + + +// Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3 +// using 64-bit coefficients called 'limbs', and sometimes (for multiplication +// results) as b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + +// 2^336*b_6 using 128-bit coefficients called 'widelimbs'. A 4-p224_limb +// representation is an 'p224_felem'; a 7-p224_widelimb representation is a +// 'p224_widefelem'. Even within felems, bits of adjacent limbs overlap, and we +// don't always reduce the representations: we ensure that inputs to each +// p224_felem multiplication satisfy a_i < 2^60, so outputs satisfy b_i < +// 4*2^60*2^60, and fit into a 128-bit word without overflow. The coefficients +// are then again partially reduced to obtain an p224_felem satisfying a_i < +// 2^57. We only reduce to the unique minimal representation at the end of the +// computation. + +typedef uint64_t p224_limb; +typedef uint128_t p224_widelimb; + +typedef p224_limb p224_felem[4]; +typedef p224_widelimb p224_widefelem[7]; + +// Field element represented as a byte arrary. 28*8 = 224 bits is also the +// group order size for the elliptic curve, and we also use this type for +// scalars for point multiplication. +typedef uint8_t p224_felem_bytearray[28]; + +// Precomputed multiples of the standard generator +// Points are given in coordinates (X, Y, Z) where Z normally is 1 +// (0 for the point at infinity). +// For each field element, slice a_0 is word 0, etc. +// +// The table has 2 * 16 elements, starting with the following: +// index | bits | point +// ------+---------+------------------------------ +// 0 | 0 0 0 0 | 0G +// 1 | 0 0 0 1 | 1G +// 2 | 0 0 1 0 | 2^56G +// 3 | 0 0 1 1 | (2^56 + 1)G +// 4 | 0 1 0 0 | 2^112G +// 5 | 0 1 0 1 | (2^112 + 1)G +// 6 | 0 1 1 0 | (2^112 + 2^56)G +// 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G +// 8 | 1 0 0 0 | 2^168G +// 9 | 1 0 0 1 | (2^168 + 1)G +// 10 | 1 0 1 0 | (2^168 + 2^56)G +// 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G +// 12 | 1 1 0 0 | (2^168 + 2^112)G +// 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G +// 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G +// 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G +// followed by a copy of this with each element multiplied by 2^28. +// +// The reason for this is so that we can clock bits into four different +// locations when doing simple scalar multiplies against the base point, +// and then another four locations using the second 16 elements. +static const p224_felem g_p224_pre_comp[2][16][3] = { + {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, + {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf}, + {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723}, + {1, 0, 0, 0}}, + {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5}, + {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321}, + {1, 0, 0, 0}}, + {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748}, + {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17}, + {1, 0, 0, 0}}, + {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe}, + {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b}, + {1, 0, 0, 0}}, + {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3}, + {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a}, + {1, 0, 0, 0}}, + {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c}, + {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244}, + {1, 0, 0, 0}}, + {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849}, + {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112}, + {1, 0, 0, 0}}, + {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47}, + {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394}, + {1, 0, 0, 0}}, + {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d}, + {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7}, + {1, 0, 0, 0}}, + {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24}, + {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881}, + {1, 0, 0, 0}}, + {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984}, + {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369}, + {1, 0, 0, 0}}, + {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3}, + {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60}, + {1, 0, 0, 0}}, + {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057}, + {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9}, + {1, 0, 0, 0}}, + {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9}, + {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc}, + {1, 0, 0, 0}}, + {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58}, + {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558}, + {1, 0, 0, 0}}}, + {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, + {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31}, + {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d}, + {1, 0, 0, 0}}, + {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3}, + {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a}, + {1, 0, 0, 0}}, + {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33}, + {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100}, + {1, 0, 0, 0}}, + {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5}, + {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea}, + {1, 0, 0, 0}}, + {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be}, + {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51}, + {1, 0, 0, 0}}, + {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1}, + {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb}, + {1, 0, 0, 0}}, + {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233}, + {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def}, + {1, 0, 0, 0}}, + {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae}, + {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45}, + {1, 0, 0, 0}}, + {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e}, + {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb}, + {1, 0, 0, 0}}, + {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de}, + {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3}, + {1, 0, 0, 0}}, + {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05}, + {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58}, + {1, 0, 0, 0}}, + {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb}, + {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0}, + {1, 0, 0, 0}}, + {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9}, + {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea}, + {1, 0, 0, 0}}, + {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba}, + {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405}, + {1, 0, 0, 0}}, + {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e}, + {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e}, + {1, 0, 0, 0}}}}; + +static uint64_t p224_load_u64(const uint8_t in[8]) { + uint64_t ret; + OPENSSL_memcpy(&ret, in, sizeof(ret)); + return ret; +} + +// Helper functions to convert field elements to/from internal representation +static void p224_bin28_to_felem(p224_felem out, const uint8_t in[28]) { + out[0] = p224_load_u64(in) & 0x00ffffffffffffff; + out[1] = p224_load_u64(in + 7) & 0x00ffffffffffffff; + out[2] = p224_load_u64(in + 14) & 0x00ffffffffffffff; + out[3] = p224_load_u64(in + 20) >> 8; +} + +static void p224_felem_to_bin28(uint8_t out[28], const p224_felem in) { + for (size_t i = 0; i < 7; ++i) { + out[i] = in[0] >> (8 * i); + out[i + 7] = in[1] >> (8 * i); + out[i + 14] = in[2] >> (8 * i); + out[i + 21] = in[3] >> (8 * i); + } +} + +// To preserve endianness when using BN_bn2bin and BN_bin2bn +static void p224_flip_endian(uint8_t *out, const uint8_t *in, size_t len) { + for (size_t i = 0; i < len; ++i) { + out[i] = in[len - 1 - i]; + } +} + +// From OpenSSL BIGNUM to internal representation +static int p224_BN_to_felem(p224_felem out, const BIGNUM *bn) { + // BN_bn2bin eats leading zeroes + p224_felem_bytearray b_out; + OPENSSL_memset(b_out, 0, sizeof(b_out)); + size_t num_bytes = BN_num_bytes(bn); + if (num_bytes > sizeof(b_out) || + BN_is_negative(bn)) { + OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE); + return 0; + } + + p224_felem_bytearray b_in; + num_bytes = BN_bn2bin(bn, b_in); + p224_flip_endian(b_out, b_in, num_bytes); + p224_bin28_to_felem(out, b_out); + return 1; +} + +// From internal representation to OpenSSL BIGNUM +static BIGNUM *p224_felem_to_BN(BIGNUM *out, const p224_felem in) { + p224_felem_bytearray b_in, b_out; + p224_felem_to_bin28(b_in, in); + p224_flip_endian(b_out, b_in, sizeof(b_out)); + return BN_bin2bn(b_out, sizeof(b_out), out); +} + +// Field operations, using the internal representation of field elements. +// NB! These operations are specific to our point multiplication and cannot be +// expected to be correct in general - e.g., multiplication with a large scalar +// will cause an overflow. + +static void p224_felem_assign(p224_felem out, const p224_felem in) { + out[0] = in[0]; + out[1] = in[1]; + out[2] = in[2]; + out[3] = in[3]; +} + +// Sum two field elements: out += in +static void p224_felem_sum(p224_felem out, const p224_felem in) { + out[0] += in[0]; + out[1] += in[1]; + out[2] += in[2]; + out[3] += in[3]; +} + +// Get negative value: out = -in +// Assumes in[i] < 2^57 +static void p224_felem_neg(p224_felem out, const p224_felem in) { + static const p224_limb two58p2 = + (((p224_limb)1) << 58) + (((p224_limb)1) << 2); + static const p224_limb two58m2 = + (((p224_limb)1) << 58) - (((p224_limb)1) << 2); + static const p224_limb two58m42m2 = + (((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2); + + // Set to 0 mod 2^224-2^96+1 to ensure out > in + out[0] = two58p2 - in[0]; + out[1] = two58m42m2 - in[1]; + out[2] = two58m2 - in[2]; + out[3] = two58m2 - in[3]; +} + +// Subtract field elements: out -= in +// Assumes in[i] < 2^57 +static void p224_felem_diff(p224_felem out, const p224_felem in) { + static const p224_limb two58p2 = + (((p224_limb)1) << 58) + (((p224_limb)1) << 2); + static const p224_limb two58m2 = + (((p224_limb)1) << 58) - (((p224_limb)1) << 2); + static const p224_limb two58m42m2 = + (((p224_limb)1) << 58) - (((p224_limb)1) << 42) - (((p224_limb)1) << 2); + + // Add 0 mod 2^224-2^96+1 to ensure out > in + out[0] += two58p2; + out[1] += two58m42m2; + out[2] += two58m2; + out[3] += two58m2; + + out[0] -= in[0]; + out[1] -= in[1]; + out[2] -= in[2]; + out[3] -= in[3]; +} + +// Subtract in unreduced 128-bit mode: out -= in +// Assumes in[i] < 2^119 +static void p224_widefelem_diff(p224_widefelem out, const p224_widefelem in) { + static const p224_widelimb two120 = ((p224_widelimb)1) << 120; + static const p224_widelimb two120m64 = + (((p224_widelimb)1) << 120) - (((p224_widelimb)1) << 64); + static const p224_widelimb two120m104m64 = (((p224_widelimb)1) << 120) - + (((p224_widelimb)1) << 104) - + (((p224_widelimb)1) << 64); + + // Add 0 mod 2^224-2^96+1 to ensure out > in + out[0] += two120; + out[1] += two120m64; + out[2] += two120m64; + out[3] += two120; + out[4] += two120m104m64; + out[5] += two120m64; + out[6] += two120m64; + + out[0] -= in[0]; + out[1] -= in[1]; + out[2] -= in[2]; + out[3] -= in[3]; + out[4] -= in[4]; + out[5] -= in[5]; + out[6] -= in[6]; +} + +// Subtract in mixed mode: out128 -= in64 +// in[i] < 2^63 +static void p224_felem_diff_128_64(p224_widefelem out, const p224_felem in) { + static const p224_widelimb two64p8 = + (((p224_widelimb)1) << 64) + (((p224_widelimb)1) << 8); + static const p224_widelimb two64m8 = + (((p224_widelimb)1) << 64) - (((p224_widelimb)1) << 8); + static const p224_widelimb two64m48m8 = (((p224_widelimb)1) << 64) - + (((p224_widelimb)1) << 48) - + (((p224_widelimb)1) << 8); + + // Add 0 mod 2^224-2^96+1 to ensure out > in + out[0] += two64p8; + out[1] += two64m48m8; + out[2] += two64m8; + out[3] += two64m8; + + out[0] -= in[0]; + out[1] -= in[1]; + out[2] -= in[2]; + out[3] -= in[3]; +} + +// Multiply a field element by a scalar: out = out * scalar +// The scalars we actually use are small, so results fit without overflow +static void p224_felem_scalar(p224_felem out, const p224_limb scalar) { + out[0] *= scalar; + out[1] *= scalar; + out[2] *= scalar; + out[3] *= scalar; +} + +// Multiply an unreduced field element by a scalar: out = out * scalar +// The scalars we actually use are small, so results fit without overflow +static void p224_widefelem_scalar(p224_widefelem out, + const p224_widelimb scalar) { + out[0] *= scalar; + out[1] *= scalar; + out[2] *= scalar; + out[3] *= scalar; + out[4] *= scalar; + out[5] *= scalar; + out[6] *= scalar; +} + +// Square a field element: out = in^2 +static void p224_felem_square(p224_widefelem out, const p224_felem in) { + p224_limb tmp0, tmp1, tmp2; + tmp0 = 2 * in[0]; + tmp1 = 2 * in[1]; + tmp2 = 2 * in[2]; + out[0] = ((p224_widelimb)in[0]) * in[0]; + out[1] = ((p224_widelimb)in[0]) * tmp1; + out[2] = ((p224_widelimb)in[0]) * tmp2 + ((p224_widelimb)in[1]) * in[1]; + out[3] = ((p224_widelimb)in[3]) * tmp0 + ((p224_widelimb)in[1]) * tmp2; + out[4] = ((p224_widelimb)in[3]) * tmp1 + ((p224_widelimb)in[2]) * in[2]; + out[5] = ((p224_widelimb)in[3]) * tmp2; + out[6] = ((p224_widelimb)in[3]) * in[3]; +} + +// Multiply two field elements: out = in1 * in2 +static void p224_felem_mul(p224_widefelem out, const p224_felem in1, + const p224_felem in2) { + out[0] = ((p224_widelimb)in1[0]) * in2[0]; + out[1] = ((p224_widelimb)in1[0]) * in2[1] + ((p224_widelimb)in1[1]) * in2[0]; + out[2] = ((p224_widelimb)in1[0]) * in2[2] + ((p224_widelimb)in1[1]) * in2[1] + + ((p224_widelimb)in1[2]) * in2[0]; + out[3] = ((p224_widelimb)in1[0]) * in2[3] + ((p224_widelimb)in1[1]) * in2[2] + + ((p224_widelimb)in1[2]) * in2[1] + ((p224_widelimb)in1[3]) * in2[0]; + out[4] = ((p224_widelimb)in1[1]) * in2[3] + ((p224_widelimb)in1[2]) * in2[2] + + ((p224_widelimb)in1[3]) * in2[1]; + out[5] = ((p224_widelimb)in1[2]) * in2[3] + ((p224_widelimb)in1[3]) * in2[2]; + out[6] = ((p224_widelimb)in1[3]) * in2[3]; +} + +// Reduce seven 128-bit coefficients to four 64-bit coefficients. +// Requires in[i] < 2^126, +// ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 +static void p224_felem_reduce(p224_felem out, const p224_widefelem in) { + static const p224_widelimb two127p15 = + (((p224_widelimb)1) << 127) + (((p224_widelimb)1) << 15); + static const p224_widelimb two127m71 = + (((p224_widelimb)1) << 127) - (((p224_widelimb)1) << 71); + static const p224_widelimb two127m71m55 = (((p224_widelimb)1) << 127) - + (((p224_widelimb)1) << 71) - + (((p224_widelimb)1) << 55); + p224_widelimb output[5]; + + // Add 0 mod 2^224-2^96+1 to ensure all differences are positive + output[0] = in[0] + two127p15; + output[1] = in[1] + two127m71m55; + output[2] = in[2] + two127m71; + output[3] = in[3]; + output[4] = in[4]; + + // Eliminate in[4], in[5], in[6] + output[4] += in[6] >> 16; + output[3] += (in[6] & 0xffff) << 40; + output[2] -= in[6]; + + output[3] += in[5] >> 16; + output[2] += (in[5] & 0xffff) << 40; + output[1] -= in[5]; + + output[2] += output[4] >> 16; + output[1] += (output[4] & 0xffff) << 40; + output[0] -= output[4]; + + // Carry 2 -> 3 -> 4 + output[3] += output[2] >> 56; + output[2] &= 0x00ffffffffffffff; + + output[4] = output[3] >> 56; + output[3] &= 0x00ffffffffffffff; + + // Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 + + // Eliminate output[4] + output[2] += output[4] >> 16; + // output[2] < 2^56 + 2^56 = 2^57 + output[1] += (output[4] & 0xffff) << 40; + output[0] -= output[4]; + + // Carry 0 -> 1 -> 2 -> 3 + output[1] += output[0] >> 56; + out[0] = output[0] & 0x00ffffffffffffff; + + output[2] += output[1] >> 56; + // output[2] < 2^57 + 2^72 + out[1] = output[1] & 0x00ffffffffffffff; + output[3] += output[2] >> 56; + // output[3] <= 2^56 + 2^16 + out[2] = output[2] & 0x00ffffffffffffff; + + // out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, + // out[3] <= 2^56 + 2^16 (due to final carry), + // so out < 2*p + out[3] = output[3]; +} + +// Reduce to unique minimal representation. +// Requires 0 <= in < 2*p (always call p224_felem_reduce first) +static void p224_felem_contract(p224_felem out, const p224_felem in) { + static const int64_t two56 = ((p224_limb)1) << 56; + // 0 <= in < 2*p, p = 2^224 - 2^96 + 1 + // if in > p , reduce in = in - 2^224 + 2^96 - 1 + int64_t tmp[4], a; + tmp[0] = in[0]; + tmp[1] = in[1]; + tmp[2] = in[2]; + tmp[3] = in[3]; + // Case 1: a = 1 iff in >= 2^224 + a = (in[3] >> 56); + tmp[0] -= a; + tmp[1] += a << 40; + tmp[3] &= 0x00ffffffffffffff; + // Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1 and + // the lower part is non-zero + a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) | + (((int64_t)(in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63); + a &= 0x00ffffffffffffff; + // turn a into an all-one mask (if a = 0) or an all-zero mask + a = (a - 1) >> 63; + // subtract 2^224 - 2^96 + 1 if a is all-one + tmp[3] &= a ^ 0xffffffffffffffff; + tmp[2] &= a ^ 0xffffffffffffffff; + tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff; + tmp[0] -= 1 & a; + + // eliminate negative coefficients: if tmp[0] is negative, tmp[1] must + // be non-zero, so we only need one step + a = tmp[0] >> 63; + tmp[0] += two56 & a; + tmp[1] -= 1 & a; + + // carry 1 -> 2 -> 3 + tmp[2] += tmp[1] >> 56; + tmp[1] &= 0x00ffffffffffffff; + + tmp[3] += tmp[2] >> 56; + tmp[2] &= 0x00ffffffffffffff; + + // Now 0 <= out < p + out[0] = tmp[0]; + out[1] = tmp[1]; + out[2] = tmp[2]; + out[3] = tmp[3]; +} + +// Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field +// elements are reduced to in < 2^225, so we only need to check three cases: 0, +// 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2 +static p224_limb p224_felem_is_zero(const p224_felem in) { + p224_limb zero = in[0] | in[1] | in[2] | in[3]; + zero = (((int64_t)(zero)-1) >> 63) & 1; + + p224_limb two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000) | + (in[2] ^ 0x00ffffffffffffff) | + (in[3] ^ 0x00ffffffffffffff); + two224m96p1 = (((int64_t)(two224m96p1)-1) >> 63) & 1; + p224_limb two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000) | + (in[2] ^ 0x00ffffffffffffff) | + (in[3] ^ 0x01ffffffffffffff); + two225m97p2 = (((int64_t)(two225m97p2)-1) >> 63) & 1; + return (zero | two224m96p1 | two225m97p2); +} + +// Invert a field element +// Computation chain copied from djb's code +static void p224_felem_inv(p224_felem out, const p224_felem in) { + p224_felem ftmp, ftmp2, ftmp3, ftmp4; + p224_widefelem tmp; + + p224_felem_square(tmp, in); + p224_felem_reduce(ftmp, tmp); // 2 + p224_felem_mul(tmp, in, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^2 - 1 + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^3 - 2 + p224_felem_mul(tmp, in, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^3 - 1 + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp2, tmp); // 2^4 - 2 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp2, tmp); // 2^5 - 4 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp2, tmp); // 2^6 - 8 + p224_felem_mul(tmp, ftmp2, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^6 - 1 + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp2, tmp); // 2^7 - 2 + for (size_t i = 0; i < 5; ++i) { // 2^12 - 2^6 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp2, tmp); + } + p224_felem_mul(tmp, ftmp2, ftmp); + p224_felem_reduce(ftmp2, tmp); // 2^12 - 1 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp3, tmp); // 2^13 - 2 + for (size_t i = 0; i < 11; ++i) { // 2^24 - 2^12 + p224_felem_square(tmp, ftmp3); + p224_felem_reduce(ftmp3, tmp); + } + p224_felem_mul(tmp, ftmp3, ftmp2); + p224_felem_reduce(ftmp2, tmp); // 2^24 - 1 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp3, tmp); // 2^25 - 2 + for (size_t i = 0; i < 23; ++i) { // 2^48 - 2^24 + p224_felem_square(tmp, ftmp3); + p224_felem_reduce(ftmp3, tmp); + } + p224_felem_mul(tmp, ftmp3, ftmp2); + p224_felem_reduce(ftmp3, tmp); // 2^48 - 1 + p224_felem_square(tmp, ftmp3); + p224_felem_reduce(ftmp4, tmp); // 2^49 - 2 + for (size_t i = 0; i < 47; ++i) { // 2^96 - 2^48 + p224_felem_square(tmp, ftmp4); + p224_felem_reduce(ftmp4, tmp); + } + p224_felem_mul(tmp, ftmp3, ftmp4); + p224_felem_reduce(ftmp3, tmp); // 2^96 - 1 + p224_felem_square(tmp, ftmp3); + p224_felem_reduce(ftmp4, tmp); // 2^97 - 2 + for (size_t i = 0; i < 23; ++i) { // 2^120 - 2^24 + p224_felem_square(tmp, ftmp4); + p224_felem_reduce(ftmp4, tmp); + } + p224_felem_mul(tmp, ftmp2, ftmp4); + p224_felem_reduce(ftmp2, tmp); // 2^120 - 1 + for (size_t i = 0; i < 6; ++i) { // 2^126 - 2^6 + p224_felem_square(tmp, ftmp2); + p224_felem_reduce(ftmp2, tmp); + } + p224_felem_mul(tmp, ftmp2, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^126 - 1 + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp, tmp); // 2^127 - 2 + p224_felem_mul(tmp, ftmp, in); + p224_felem_reduce(ftmp, tmp); // 2^127 - 1 + for (size_t i = 0; i < 97; ++i) { // 2^224 - 2^97 + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp, tmp); + } + p224_felem_mul(tmp, ftmp, ftmp3); + p224_felem_reduce(out, tmp); // 2^224 - 2^96 - 1 +} + +// Copy in constant time: +// if icopy == 1, copy in to out, +// if icopy == 0, copy out to itself. +static void p224_copy_conditional(p224_felem out, const p224_felem in, + p224_limb icopy) { + // icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one + const p224_limb copy = -icopy; + for (size_t i = 0; i < 4; ++i) { + const p224_limb tmp = copy & (in[i] ^ out[i]); + out[i] ^= tmp; + } +} + +// ELLIPTIC CURVE POINT OPERATIONS +// +// Points are represented in Jacobian projective coordinates: +// (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3), +// or to the point at infinity if Z == 0. + +// Double an elliptic curve point: +// (X', Y', Z') = 2 * (X, Y, Z), where +// X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2 +// Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2 +// Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z +// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed, +// while x_out == y_in is not (maybe this works, but it's not tested). +static void p224_point_double(p224_felem x_out, p224_felem y_out, + p224_felem z_out, const p224_felem x_in, + const p224_felem y_in, const p224_felem z_in) { + p224_widefelem tmp, tmp2; + p224_felem delta, gamma, beta, alpha, ftmp, ftmp2; + + p224_felem_assign(ftmp, x_in); + p224_felem_assign(ftmp2, x_in); + + // delta = z^2 + p224_felem_square(tmp, z_in); + p224_felem_reduce(delta, tmp); + + // gamma = y^2 + p224_felem_square(tmp, y_in); + p224_felem_reduce(gamma, tmp); + + // beta = x*gamma + p224_felem_mul(tmp, x_in, gamma); + p224_felem_reduce(beta, tmp); + + // alpha = 3*(x-delta)*(x+delta) + p224_felem_diff(ftmp, delta); + // ftmp[i] < 2^57 + 2^58 + 2 < 2^59 + p224_felem_sum(ftmp2, delta); + // ftmp2[i] < 2^57 + 2^57 = 2^58 + p224_felem_scalar(ftmp2, 3); + // ftmp2[i] < 3 * 2^58 < 2^60 + p224_felem_mul(tmp, ftmp, ftmp2); + // tmp[i] < 2^60 * 2^59 * 4 = 2^121 + p224_felem_reduce(alpha, tmp); + + // x' = alpha^2 - 8*beta + p224_felem_square(tmp, alpha); + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 + p224_felem_assign(ftmp, beta); + p224_felem_scalar(ftmp, 8); + // ftmp[i] < 8 * 2^57 = 2^60 + p224_felem_diff_128_64(tmp, ftmp); + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 + p224_felem_reduce(x_out, tmp); + + // z' = (y + z)^2 - gamma - delta + p224_felem_sum(delta, gamma); + // delta[i] < 2^57 + 2^57 = 2^58 + p224_felem_assign(ftmp, y_in); + p224_felem_sum(ftmp, z_in); + // ftmp[i] < 2^57 + 2^57 = 2^58 + p224_felem_square(tmp, ftmp); + // tmp[i] < 4 * 2^58 * 2^58 = 2^118 + p224_felem_diff_128_64(tmp, delta); + // tmp[i] < 2^118 + 2^64 + 8 < 2^119 + p224_felem_reduce(z_out, tmp); + + // y' = alpha*(4*beta - x') - 8*gamma^2 + p224_felem_scalar(beta, 4); + // beta[i] < 4 * 2^57 = 2^59 + p224_felem_diff(beta, x_out); + // beta[i] < 2^59 + 2^58 + 2 < 2^60 + p224_felem_mul(tmp, alpha, beta); + // tmp[i] < 4 * 2^57 * 2^60 = 2^119 + p224_felem_square(tmp2, gamma); + // tmp2[i] < 4 * 2^57 * 2^57 = 2^116 + p224_widefelem_scalar(tmp2, 8); + // tmp2[i] < 8 * 2^116 = 2^119 + p224_widefelem_diff(tmp, tmp2); + // tmp[i] < 2^119 + 2^120 < 2^121 + p224_felem_reduce(y_out, tmp); +} + +// Add two elliptic curve points: +// (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where +// X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 - +// 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 +// Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * +// X_1)^2 - X_3) - +// Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3 +// Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2) +// +// This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0. + +// This function is not entirely constant-time: it includes a branch for +// checking whether the two input points are equal, (while not equal to the +// point at infinity). This case never happens during single point +// multiplication, so there is no timing leak for ECDH or ECDSA signing. +static void p224_point_add(p224_felem x3, p224_felem y3, p224_felem z3, + const p224_felem x1, const p224_felem y1, + const p224_felem z1, const int mixed, + const p224_felem x2, const p224_felem y2, + const p224_felem z2) { + p224_felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out; + p224_widefelem tmp, tmp2; + p224_limb z1_is_zero, z2_is_zero, x_equal, y_equal; + + if (!mixed) { + // ftmp2 = z2^2 + p224_felem_square(tmp, z2); + p224_felem_reduce(ftmp2, tmp); + + // ftmp4 = z2^3 + p224_felem_mul(tmp, ftmp2, z2); + p224_felem_reduce(ftmp4, tmp); + + // ftmp4 = z2^3*y1 + p224_felem_mul(tmp2, ftmp4, y1); + p224_felem_reduce(ftmp4, tmp2); + + // ftmp2 = z2^2*x1 + p224_felem_mul(tmp2, ftmp2, x1); + p224_felem_reduce(ftmp2, tmp2); + } else { + // We'll assume z2 = 1 (special case z2 = 0 is handled later) + + // ftmp4 = z2^3*y1 + p224_felem_assign(ftmp4, y1); + + // ftmp2 = z2^2*x1 + p224_felem_assign(ftmp2, x1); + } + + // ftmp = z1^2 + p224_felem_square(tmp, z1); + p224_felem_reduce(ftmp, tmp); + + // ftmp3 = z1^3 + p224_felem_mul(tmp, ftmp, z1); + p224_felem_reduce(ftmp3, tmp); + + // tmp = z1^3*y2 + p224_felem_mul(tmp, ftmp3, y2); + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 + + // ftmp3 = z1^3*y2 - z2^3*y1 + p224_felem_diff_128_64(tmp, ftmp4); + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 + p224_felem_reduce(ftmp3, tmp); + + // tmp = z1^2*x2 + p224_felem_mul(tmp, ftmp, x2); + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 + + // ftmp = z1^2*x2 - z2^2*x1 + p224_felem_diff_128_64(tmp, ftmp2); + // tmp[i] < 2^116 + 2^64 + 8 < 2^117 + p224_felem_reduce(ftmp, tmp); + + // the formulae are incorrect if the points are equal + // so we check for this and do doubling if this happens + x_equal = p224_felem_is_zero(ftmp); + y_equal = p224_felem_is_zero(ftmp3); + z1_is_zero = p224_felem_is_zero(z1); + z2_is_zero = p224_felem_is_zero(z2); + // In affine coordinates, (X_1, Y_1) == (X_2, Y_2) + if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) { + p224_point_double(x3, y3, z3, x1, y1, z1); + return; + } + + // ftmp5 = z1*z2 + if (!mixed) { + p224_felem_mul(tmp, z1, z2); + p224_felem_reduce(ftmp5, tmp); + } else { + // special case z2 = 0 is handled later + p224_felem_assign(ftmp5, z1); + } + + // z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) + p224_felem_mul(tmp, ftmp, ftmp5); + p224_felem_reduce(z_out, tmp); + + // ftmp = (z1^2*x2 - z2^2*x1)^2 + p224_felem_assign(ftmp5, ftmp); + p224_felem_square(tmp, ftmp); + p224_felem_reduce(ftmp, tmp); + + // ftmp5 = (z1^2*x2 - z2^2*x1)^3 + p224_felem_mul(tmp, ftmp, ftmp5); + p224_felem_reduce(ftmp5, tmp); + + // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 + p224_felem_mul(tmp, ftmp2, ftmp); + p224_felem_reduce(ftmp2, tmp); + + // tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 + p224_felem_mul(tmp, ftmp4, ftmp5); + // tmp[i] < 4 * 2^57 * 2^57 = 2^116 + + // tmp2 = (z1^3*y2 - z2^3*y1)^2 + p224_felem_square(tmp2, ftmp3); + // tmp2[i] < 4 * 2^57 * 2^57 < 2^116 + + // tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 + p224_felem_diff_128_64(tmp2, ftmp5); + // tmp2[i] < 2^116 + 2^64 + 8 < 2^117 + + // ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 + p224_felem_assign(ftmp5, ftmp2); + p224_felem_scalar(ftmp5, 2); + // ftmp5[i] < 2 * 2^57 = 2^58 + + /* x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 - + 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */ + p224_felem_diff_128_64(tmp2, ftmp5); + // tmp2[i] < 2^117 + 2^64 + 8 < 2^118 + p224_felem_reduce(x_out, tmp2); + + // ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out + p224_felem_diff(ftmp2, x_out); + // ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 + + // tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) + p224_felem_mul(tmp2, ftmp3, ftmp2); + // tmp2[i] < 4 * 2^57 * 2^59 = 2^118 + + /* y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) - + z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */ + p224_widefelem_diff(tmp2, tmp); + // tmp2[i] < 2^118 + 2^120 < 2^121 + p224_felem_reduce(y_out, tmp2); + + // the result (x_out, y_out, z_out) is incorrect if one of the inputs is + // the point at infinity, so we need to check for this separately + + // if point 1 is at infinity, copy point 2 to output, and vice versa + p224_copy_conditional(x_out, x2, z1_is_zero); + p224_copy_conditional(x_out, x1, z2_is_zero); + p224_copy_conditional(y_out, y2, z1_is_zero); + p224_copy_conditional(y_out, y1, z2_is_zero); + p224_copy_conditional(z_out, z2, z1_is_zero); + p224_copy_conditional(z_out, z1, z2_is_zero); + p224_felem_assign(x3, x_out); + p224_felem_assign(y3, y_out); + p224_felem_assign(z3, z_out); +} + +// p224_select_point selects the |idx|th point from a precomputation table and +// copies it to out. +static void p224_select_point(const uint64_t idx, size_t size, + const p224_felem pre_comp[/*size*/][3], + p224_felem out[3]) { + p224_limb *outlimbs = &out[0][0]; + OPENSSL_memset(outlimbs, 0, 3 * sizeof(p224_felem)); + + for (size_t i = 0; i < size; i++) { + const p224_limb *inlimbs = &pre_comp[i][0][0]; + uint64_t mask = i ^ idx; + mask |= mask >> 4; + mask |= mask >> 2; + mask |= mask >> 1; + mask &= 1; + mask--; + for (size_t j = 0; j < 4 * 3; j++) { + outlimbs[j] |= inlimbs[j] & mask; + } + } +} + +// p224_get_bit returns the |i|th bit in |in| +static char p224_get_bit(const p224_felem_bytearray in, size_t i) { + if (i >= 224) { + return 0; + } + return (in[i >> 3] >> (i & 7)) & 1; +} + +// Interleaved point multiplication using precomputed point multiples: +// The small point multiples 0*P, 1*P, ..., 16*P are in p_pre_comp, the scalars +// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple +// of the generator, using certain (large) precomputed multiples in +// g_p224_pre_comp. Output point (X, Y, Z) is stored in x_out, y_out, z_out +static void p224_batch_mul(p224_felem x_out, p224_felem y_out, p224_felem z_out, + const uint8_t *p_scalar, const uint8_t *g_scalar, + const p224_felem p_pre_comp[17][3]) { + p224_felem nq[3], tmp[4]; + uint64_t bits; + uint8_t sign, digit; + + // set nq to the point at infinity + OPENSSL_memset(nq, 0, 3 * sizeof(p224_felem)); + + // Loop over both scalars msb-to-lsb, interleaving additions of multiples of + // the generator (two in each of the last 28 rounds) and additions of p (every + // 5th round). + int skip = 1; // save two point operations in the first round + size_t i = p_scalar != NULL ? 220 : 27; + for (;;) { + // double + if (!skip) { + p224_point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); + } + + // add multiples of the generator + if (g_scalar != NULL && i <= 27) { + // first, look 28 bits upwards + bits = p224_get_bit(g_scalar, i + 196) << 3; + bits |= p224_get_bit(g_scalar, i + 140) << 2; + bits |= p224_get_bit(g_scalar, i + 84) << 1; + bits |= p224_get_bit(g_scalar, i + 28); + // select the point to add, in constant time + p224_select_point(bits, 16, g_p224_pre_comp[1], tmp); + + if (!skip) { + p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, + tmp[0], tmp[1], tmp[2]); + } else { + OPENSSL_memcpy(nq, tmp, 3 * sizeof(p224_felem)); + skip = 0; + } + + // second, look at the current position + bits = p224_get_bit(g_scalar, i + 168) << 3; + bits |= p224_get_bit(g_scalar, i + 112) << 2; + bits |= p224_get_bit(g_scalar, i + 56) << 1; + bits |= p224_get_bit(g_scalar, i); + // select the point to add, in constant time + p224_select_point(bits, 16, g_p224_pre_comp[0], tmp); + p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, + tmp[0], tmp[1], tmp[2]); + } + + // do other additions every 5 doublings + if (p_scalar != NULL && i % 5 == 0) { + bits = p224_get_bit(p_scalar, i + 4) << 5; + bits |= p224_get_bit(p_scalar, i + 3) << 4; + bits |= p224_get_bit(p_scalar, i + 2) << 3; + bits |= p224_get_bit(p_scalar, i + 1) << 2; + bits |= p224_get_bit(p_scalar, i) << 1; + bits |= p224_get_bit(p_scalar, i - 1); + ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); + + // select the point to add or subtract + p224_select_point(digit, 17, p_pre_comp, tmp); + p224_felem_neg(tmp[3], tmp[1]); // (X, -Y, Z) is the negative point + p224_copy_conditional(tmp[1], tmp[3], sign); + + if (!skip) { + p224_point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 0 /* mixed */, + tmp[0], tmp[1], tmp[2]); + } else { + OPENSSL_memcpy(nq, tmp, 3 * sizeof(p224_felem)); + skip = 0; + } + } + + if (i == 0) { + break; + } + --i; + } + p224_felem_assign(x_out, nq[0]); + p224_felem_assign(y_out, nq[1]); + p224_felem_assign(z_out, nq[2]); +} + +// Takes the Jacobian coordinates (X, Y, Z) of a point and returns +// (X', Y') = (X/Z^2, Y/Z^3) +static int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group, + const EC_POINT *point, + BIGNUM *x, BIGNUM *y, + BN_CTX *ctx) { + p224_felem z1, z2, x_in, y_in, x_out, y_out; + p224_widefelem tmp; + + if (EC_POINT_is_at_infinity(group, point)) { + OPENSSL_PUT_ERROR(EC, EC_R_POINT_AT_INFINITY); + return 0; + } + + if (!p224_BN_to_felem(x_in, &point->X) || + !p224_BN_to_felem(y_in, &point->Y) || + !p224_BN_to_felem(z1, &point->Z)) { + return 0; + } + + p224_felem_inv(z2, z1); + p224_felem_square(tmp, z2); + p224_felem_reduce(z1, tmp); + p224_felem_mul(tmp, x_in, z1); + p224_felem_reduce(x_in, tmp); + p224_felem_contract(x_out, x_in); + if (x != NULL && !p224_felem_to_BN(x, x_out)) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + return 0; + } + + p224_felem_mul(tmp, z1, z2); + p224_felem_reduce(z1, tmp); + p224_felem_mul(tmp, y_in, z1); + p224_felem_reduce(y_in, tmp); + p224_felem_contract(y_out, y_in); + if (y != NULL && !p224_felem_to_BN(y, y_out)) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + return 0; + } + + return 1; +} + +static int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r, + const EC_SCALAR *g_scalar, + const EC_POINT *p, + const EC_SCALAR *p_scalar, BN_CTX *ctx) { + int ret = 0; + BN_CTX *new_ctx = NULL; + BIGNUM *x, *y, *z, *tmp_scalar; + p224_felem p_pre_comp[17][3]; + p224_felem x_in, y_in, z_in, x_out, y_out, z_out; + + if (ctx == NULL) { + ctx = BN_CTX_new(); + new_ctx = ctx; + if (ctx == NULL) { + return 0; + } + } + + BN_CTX_start(ctx); + if ((x = BN_CTX_get(ctx)) == NULL || + (y = BN_CTX_get(ctx)) == NULL || + (z = BN_CTX_get(ctx)) == NULL || + (tmp_scalar = BN_CTX_get(ctx)) == NULL) { + goto err; + } + + if (p != NULL && p_scalar != NULL) { + // We treat NULL scalars as 0, and NULL points as points at infinity, i.e., + // they contribute nothing to the linear combination. + OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); + // precompute multiples + if (!p224_BN_to_felem(x_out, &p->X) || + !p224_BN_to_felem(y_out, &p->Y) || + !p224_BN_to_felem(z_out, &p->Z)) { + goto err; + } + + p224_felem_assign(p_pre_comp[1][0], x_out); + p224_felem_assign(p_pre_comp[1][1], y_out); + p224_felem_assign(p_pre_comp[1][2], z_out); + + for (size_t j = 2; j <= 16; ++j) { + if (j & 1) { + p224_point_add(p_pre_comp[j][0], p_pre_comp[j][1], p_pre_comp[j][2], + p_pre_comp[1][0], p_pre_comp[1][1], p_pre_comp[1][2], + 0, p_pre_comp[j - 1][0], p_pre_comp[j - 1][1], + p_pre_comp[j - 1][2]); + } else { + p224_point_double(p_pre_comp[j][0], p_pre_comp[j][1], + p_pre_comp[j][2], p_pre_comp[j / 2][0], + p_pre_comp[j / 2][1], p_pre_comp[j / 2][2]); + } + } + } + + p224_batch_mul(x_out, y_out, z_out, + (p != NULL && p_scalar != NULL) ? p_scalar->bytes : NULL, + g_scalar != NULL ? g_scalar->bytes : NULL, + (const p224_felem(*)[3])p_pre_comp); + + // reduce the output to its unique minimal representation + p224_felem_contract(x_in, x_out); + p224_felem_contract(y_in, y_out); + p224_felem_contract(z_in, z_out); + if (!p224_felem_to_BN(x, x_in) || + !p224_felem_to_BN(y, y_in) || + !p224_felem_to_BN(z, z_in)) { + OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); + goto err; + } + ret = ec_point_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx); + +err: + BN_CTX_end(ctx); + BN_CTX_free(new_ctx); + return ret; +} + +DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp224_method) { + out->group_init = ec_GFp_simple_group_init; + out->group_finish = ec_GFp_simple_group_finish; + out->group_set_curve = ec_GFp_simple_group_set_curve; + out->point_get_affine_coordinates = + ec_GFp_nistp224_point_get_affine_coordinates; + out->mul = ec_GFp_nistp224_points_mul; + out->field_mul = ec_GFp_simple_field_mul; + out->field_sqr = ec_GFp_simple_field_sqr; + out->field_encode = NULL; + out->field_decode = NULL; +}; + +#endif // 64_BIT && !WINDOWS && !SMALL diff --git a/Sources/BoringSSL/crypto/ec/p256-64.c b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-64.c similarity index 61% rename from Sources/BoringSSL/crypto/ec/p256-64.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/p256-64.c index 0f32c2eec..d4a8ff681 100644 --- a/Sources/BoringSSL/crypto/ec/p256-64.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-64.c @@ -12,12 +12,12 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* A 64-bit implementation of the NIST P-256 elliptic curve point - * multiplication - * - * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c. - * Otherwise based on Emilia's P224 work, which was inspired by my curve25519 - * work which got its smarts from Daniel J. Bernstein's work on the same. */ +// A 64-bit implementation of the NIST P-256 elliptic curve point +// multiplication +// +// OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c. +// Otherwise based on Emilia's P224 work, which was inspired by my curve25519 +// work which got its smarts from Daniel J. Bernstein's work on the same. #include @@ -30,76 +30,83 @@ #include +#include "../delocate.h" +#include "../../internal.h" #include "internal.h" -#include "../internal.h" - -typedef uint8_t u8; -typedef uint64_t u64; -typedef int64_t s64; -/* The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We - * can serialise an element of this field into 32 bytes. We call this an - * felem_bytearray. */ -typedef u8 felem_bytearray[32]; - -/* The representation of field elements. - * ------------------------------------ - * - * We represent field elements with either four 128-bit values, eight 128-bit - * values, or four 64-bit values. The field element represented is: - * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p) - * or: - * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p) - * - * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits - * apart, but are 128-bits wide, the most significant bits of each limb overlap - * with the least significant bits of the next. - * - * A field element with four limbs is an 'felem'. One with eight limbs is a - * 'longfelem' - * - * A field element with four, 64-bit values is called a 'smallfelem'. Small - * values are used as intermediate values before multiplication. */ +// The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We +// can serialise an element of this field into 32 bytes. We call this an +// felem_bytearray. +typedef uint8_t felem_bytearray[32]; + +// The representation of field elements. +// ------------------------------------ +// +// We represent field elements with either four 128-bit values, eight 128-bit +// values, or four 64-bit values. The field element represented is: +// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p) +// or: +// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p) +// +// 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits +// apart, but are 128-bits wide, the most significant bits of each limb overlap +// with the least significant bits of the next. +// +// A field element with four limbs is an 'felem'. One with eight limbs is a +// 'longfelem' +// +// A field element with four, 64-bit values is called a 'smallfelem'. Small +// values are used as intermediate values before multiplication. #define NLIMBS 4 typedef uint128_t limb; typedef limb felem[NLIMBS]; typedef limb longfelem[NLIMBS * 2]; -typedef u64 smallfelem[NLIMBS]; +typedef uint64_t smallfelem[NLIMBS]; -/* This is the value of the prime as four 64-bit words, little-endian. */ -static const u64 kPrime[4] = {0xfffffffffffffffful, 0xffffffff, 0, +// This is the value of the prime as four 64-bit words, little-endian. +static const uint64_t kPrime[4] = {0xfffffffffffffffful, 0xffffffff, 0, 0xffffffff00000001ul}; -static const u64 bottom63bits = 0x7ffffffffffffffful; - -/* bin32_to_felem takes a little-endian byte array and converts it into felem - * form. This assumes that the CPU is little-endian. */ -static void bin32_to_felem(felem out, const u8 in[32]) { - out[0] = *((const u64 *)&in[0]); - out[1] = *((const u64 *)&in[8]); - out[2] = *((const u64 *)&in[16]); - out[3] = *((const u64 *)&in[24]); +static const uint64_t bottom63bits = 0x7ffffffffffffffful; + +static uint64_t load_u64(const uint8_t in[8]) { + uint64_t ret; + OPENSSL_memcpy(&ret, in, sizeof(ret)); + return ret; +} + +static void store_u64(uint8_t out[8], uint64_t in) { + OPENSSL_memcpy(out, &in, sizeof(in)); +} + +// bin32_to_felem takes a little-endian byte array and converts it into felem +// form. This assumes that the CPU is little-endian. +static void bin32_to_felem(felem out, const uint8_t in[32]) { + out[0] = load_u64(&in[0]); + out[1] = load_u64(&in[8]); + out[2] = load_u64(&in[16]); + out[3] = load_u64(&in[24]); } -/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian, - * 32 byte array. This assumes that the CPU is little-endian. */ -static void smallfelem_to_bin32(u8 out[32], const smallfelem in) { - *((u64 *)&out[0]) = in[0]; - *((u64 *)&out[8]) = in[1]; - *((u64 *)&out[16]) = in[2]; - *((u64 *)&out[24]) = in[3]; +// smallfelem_to_bin32 takes a smallfelem and serialises into a little endian, +// 32 byte array. This assumes that the CPU is little-endian. +static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) { + store_u64(&out[0], in[0]); + store_u64(&out[8], in[1]); + store_u64(&out[16], in[2]); + store_u64(&out[24], in[3]); } -/* To preserve endianness when using BN_bn2bin and BN_bin2bn. */ -static void flip_endian(u8 *out, const u8 *in, size_t len) { +// To preserve endianness when using BN_bn2bin and BN_bin2bn. +static void flip_endian(uint8_t *out, const uint8_t *in, size_t len) { for (size_t i = 0; i < len; ++i) { out[i] = in[len - 1 - i]; } } -/* BN_to_felem converts an OpenSSL BIGNUM into an felem. */ +// BN_to_felem converts an OpenSSL BIGNUM into an felem. static int BN_to_felem(felem out, const BIGNUM *bn) { if (BN_is_negative(bn)) { OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE); @@ -107,7 +114,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) { } felem_bytearray b_out; - /* BN_bn2bin eats leading zeroes */ + // BN_bn2bin eats leading zeroes OPENSSL_memset(b_out, 0, sizeof(b_out)); size_t num_bytes = BN_num_bytes(bn); if (num_bytes > sizeof(b_out)) { @@ -122,7 +129,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) { return 1; } -/* felem_to_BN converts an felem into an OpenSSL BIGNUM. */ +// felem_to_BN converts an felem into an OpenSSL BIGNUM. static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) { felem_bytearray b_in, b_out; smallfelem_to_bin32(b_in, in); @@ -130,7 +137,7 @@ static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) { return BN_bin2bn(b_out, sizeof(b_out), out); } -/* Field operations. */ +// Field operations. static void felem_assign(felem out, const felem in) { out[0] = in[0]; @@ -139,7 +146,7 @@ static void felem_assign(felem out, const felem in) { out[3] = in[3]; } -/* felem_sum sets out = out + in. */ +// felem_sum sets out = out + in. static void felem_sum(felem out, const felem in) { out[0] += in[0]; out[1] += in[1]; @@ -147,7 +154,7 @@ static void felem_sum(felem out, const felem in) { out[3] += in[3]; } -/* felem_small_sum sets out = out + in. */ +// felem_small_sum sets out = out + in. static void felem_small_sum(felem out, const smallfelem in) { out[0] += in[0]; out[1] += in[1]; @@ -155,16 +162,16 @@ static void felem_small_sum(felem out, const smallfelem in) { out[3] += in[3]; } -/* felem_scalar sets out = out * scalar */ -static void felem_scalar(felem out, const u64 scalar) { +// felem_scalar sets out = out * scalar +static void felem_scalar(felem out, const uint64_t scalar) { out[0] *= scalar; out[1] *= scalar; out[2] *= scalar; out[3] *= scalar; } -/* longfelem_scalar sets out = out * scalar */ -static void longfelem_scalar(longfelem out, const u64 scalar) { +// longfelem_scalar sets out = out * scalar +static void longfelem_scalar(longfelem out, const uint64_t scalar) { out[0] *= scalar; out[1] *= scalar; out[2] *= scalar; @@ -179,27 +186,27 @@ static void longfelem_scalar(longfelem out, const u64 scalar) { #define two105 (((limb)1) << 105) #define two105m41p9 ((((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9)) -/* zero105 is 0 mod p */ +// zero105 is 0 mod p static const felem zero105 = {two105m41m9, two105, two105m41p9, two105m41p9}; -/* smallfelem_neg sets |out| to |-small| - * On exit: - * out[i] < out[i] + 2^105 */ +// smallfelem_neg sets |out| to |-small| +// On exit: +// out[i] < out[i] + 2^105 static void smallfelem_neg(felem out, const smallfelem small) { - /* In order to prevent underflow, we subtract from 0 mod p. */ + // In order to prevent underflow, we subtract from 0 mod p. out[0] = zero105[0] - small[0]; out[1] = zero105[1] - small[1]; out[2] = zero105[2] - small[2]; out[3] = zero105[3] - small[3]; } -/* felem_diff subtracts |in| from |out| - * On entry: - * in[i] < 2^104 - * On exit: - * out[i] < out[i] + 2^105. */ +// felem_diff subtracts |in| from |out| +// On entry: +// in[i] < 2^104 +// On exit: +// out[i] < out[i] + 2^105. static void felem_diff(felem out, const felem in) { - /* In order to prevent underflow, we add 0 mod p before subtracting. */ + // In order to prevent underflow, we add 0 mod p before subtracting. out[0] += zero105[0]; out[1] += zero105[1]; out[2] += zero105[2]; @@ -217,17 +224,17 @@ static void felem_diff(felem out, const felem in) { #define two107m43p11 \ ((((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11)) -/* zero107 is 0 mod p */ +// zero107 is 0 mod p static const felem zero107 = {two107m43m11, two107, two107m43p11, two107m43p11}; -/* An alternative felem_diff for larger inputs |in| - * felem_diff_zero107 subtracts |in| from |out| - * On entry: - * in[i] < 2^106 - * On exit: - * out[i] < out[i] + 2^107. */ +// An alternative felem_diff for larger inputs |in| +// felem_diff_zero107 subtracts |in| from |out| +// On entry: +// in[i] < 2^106 +// On exit: +// out[i] < out[i] + 2^107. static void felem_diff_zero107(felem out, const felem in) { - /* In order to prevent underflow, we add 0 mod p before subtracting. */ + // In order to prevent underflow, we add 0 mod p before subtracting. out[0] += zero107[0]; out[1] += zero107[1]; out[2] += zero107[2]; @@ -239,11 +246,11 @@ static void felem_diff_zero107(felem out, const felem in) { out[3] -= in[3]; } -/* longfelem_diff subtracts |in| from |out| - * On entry: - * in[i] < 7*2^67 - * On exit: - * out[i] < out[i] + 2^70 + 2^40. */ +// longfelem_diff subtracts |in| from |out| +// On entry: +// in[i] < 7*2^67 +// On exit: +// out[i] < out[i] + 2^70 + 2^40. static void longfelem_diff(longfelem out, const longfelem in) { static const limb two70m8p6 = (((limb)1) << 70) - (((limb)1) << 8) + (((limb)1) << 6); @@ -253,7 +260,7 @@ static void longfelem_diff(longfelem out, const longfelem in) { (((limb)1) << 38) + (((limb)1) << 6); static const limb two70m6 = (((limb)1) << 70) - (((limb)1) << 6); - /* add 0 mod p to avoid underflow */ + // add 0 mod p to avoid underflow out[0] += two70m8p6; out[1] += two70p40; out[2] += two70; @@ -263,7 +270,7 @@ static void longfelem_diff(longfelem out, const longfelem in) { out[6] += two70m6; out[7] += two70m6; - /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */ + // in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 out[0] -= in[0]; out[1] -= in[1]; out[2] -= in[2]; @@ -279,87 +286,88 @@ static void longfelem_diff(longfelem out, const longfelem in) { #define two64m46 ((((limb)1) << 64) - (((limb)1) << 46)) #define two64m32 ((((limb)1) << 64) - (((limb)1) << 32)) -/* zero110 is 0 mod p. */ +// zero110 is 0 mod p. static const felem zero110 = {two64m0, two110p32m0, two64m46, two64m32}; -/* felem_shrink converts an felem into a smallfelem. The result isn't quite - * minimal as the value may be greater than p. - * - * On entry: - * in[i] < 2^109 - * On exit: - * out[i] < 2^64. */ +// felem_shrink converts an felem into a smallfelem. The result isn't quite +// minimal as the value may be greater than p. +// +// On entry: +// in[i] < 2^109 +// On exit: +// out[i] < 2^64. static void felem_shrink(smallfelem out, const felem in) { felem tmp; - u64 a, b, mask; - s64 high, low; - static const u64 kPrime3Test = 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */ + uint64_t a, b, mask; + int64_t high, low; + static const uint64_t kPrime3Test = + 0x7fffffff00000001ul; // 2^63 - 2^32 + 1 - /* Carry 2->3 */ - tmp[3] = zero110[3] + in[3] + ((u64)(in[2] >> 64)); - /* tmp[3] < 2^110 */ + // Carry 2->3 + tmp[3] = zero110[3] + in[3] + ((uint64_t)(in[2] >> 64)); + // tmp[3] < 2^110 - tmp[2] = zero110[2] + (u64)in[2]; + tmp[2] = zero110[2] + (uint64_t)in[2]; tmp[0] = zero110[0] + in[0]; tmp[1] = zero110[1] + in[1]; - /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */ + // tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 - /* We perform two partial reductions where we eliminate the high-word of - * tmp[3]. We don't update the other words till the end. */ - a = tmp[3] >> 64; /* a < 2^46 */ - tmp[3] = (u64)tmp[3]; + // We perform two partial reductions where we eliminate the high-word of + // tmp[3]. We don't update the other words till the end. + a = tmp[3] >> 64; // a < 2^46 + tmp[3] = (uint64_t)tmp[3]; tmp[3] -= a; tmp[3] += ((limb)a) << 32; - /* tmp[3] < 2^79 */ + // tmp[3] < 2^79 b = a; - a = tmp[3] >> 64; /* a < 2^15 */ - b += a; /* b < 2^46 + 2^15 < 2^47 */ - tmp[3] = (u64)tmp[3]; + a = tmp[3] >> 64; // a < 2^15 + b += a; // b < 2^46 + 2^15 < 2^47 + tmp[3] = (uint64_t)tmp[3]; tmp[3] -= a; tmp[3] += ((limb)a) << 32; - /* tmp[3] < 2^64 + 2^47 */ + // tmp[3] < 2^64 + 2^47 - /* This adjusts the other two words to complete the two partial - * reductions. */ + // This adjusts the other two words to complete the two partial + // reductions. tmp[0] += b; tmp[1] -= (((limb)b) << 32); - /* In order to make space in tmp[3] for the carry from 2 -> 3, we - * conditionally subtract kPrime if tmp[3] is large enough. */ + // In order to make space in tmp[3] for the carry from 2 -> 3, we + // conditionally subtract kPrime if tmp[3] is large enough. high = tmp[3] >> 64; - /* As tmp[3] < 2^65, high is either 1 or 0 */ + // As tmp[3] < 2^65, high is either 1 or 0 high = ~(high - 1); - /* high is: - * all ones if the high word of tmp[3] is 1 - * all zeros if the high word of tmp[3] if 0 */ + // high is: + // all ones if the high word of tmp[3] is 1 + // all zeros if the high word of tmp[3] if 0 low = tmp[3]; mask = low >> 63; - /* mask is: - * all ones if the MSB of low is 1 - * all zeros if the MSB of low if 0 */ + // mask is: + // all ones if the MSB of low is 1 + // all zeros if the MSB of low if 0 low &= bottom63bits; low -= kPrime3Test; - /* if low was greater than kPrime3Test then the MSB is zero */ + // if low was greater than kPrime3Test then the MSB is zero low = ~low; low >>= 63; - /* low is: - * all ones if low was > kPrime3Test - * all zeros if low was <= kPrime3Test */ + // low is: + // all ones if low was > kPrime3Test + // all zeros if low was <= kPrime3Test mask = (mask & low) | high; tmp[0] -= mask & kPrime[0]; tmp[1] -= mask & kPrime[1]; - /* kPrime[2] is zero, so omitted */ + // kPrime[2] is zero, so omitted tmp[3] -= mask & kPrime[3]; - /* tmp[3] < 2**64 - 2**32 + 1 */ + // tmp[3] < 2**64 - 2**32 + 1 - tmp[1] += ((u64)(tmp[0] >> 64)); - tmp[0] = (u64)tmp[0]; - tmp[2] += ((u64)(tmp[1] >> 64)); - tmp[1] = (u64)tmp[1]; - tmp[3] += ((u64)(tmp[2] >> 64)); - tmp[2] = (u64)tmp[2]; - /* tmp[i] < 2^64 */ + tmp[1] += ((uint64_t)(tmp[0] >> 64)); + tmp[0] = (uint64_t)tmp[0]; + tmp[2] += ((uint64_t)(tmp[1] >> 64)); + tmp[1] = (uint64_t)tmp[1]; + tmp[3] += ((uint64_t)(tmp[2] >> 64)); + tmp[2] = (uint64_t)tmp[2]; + // tmp[i] < 2^64 out[0] = tmp[0]; out[1] = tmp[1]; @@ -367,7 +375,7 @@ static void felem_shrink(smallfelem out, const felem in) { out[3] = tmp[3]; } -/* smallfelem_expand converts a smallfelem to an felem */ +// smallfelem_expand converts a smallfelem to an felem static void smallfelem_expand(felem out, const smallfelem in) { out[0] = in[0]; out[1] = in[1]; @@ -375,14 +383,14 @@ static void smallfelem_expand(felem out, const smallfelem in) { out[3] = in[3]; } -/* smallfelem_square sets |out| = |small|^2 - * On entry: - * small[i] < 2^64 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// smallfelem_square sets |out| = |small|^2 +// On entry: +// small[i] < 2^64 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void smallfelem_square(longfelem out, const smallfelem small) { limb a; - u64 high, low; + uint64_t high, low; a = ((uint128_t)small[0]) * small[0]; low = a; @@ -451,27 +459,27 @@ static void smallfelem_square(longfelem out, const smallfelem small) { out[7] = high; } -/*felem_square sets |out| = |in|^2 - * On entry: - * in[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67. */ +//felem_square sets |out| = |in|^2 +// On entry: +// in[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67. static void felem_square(longfelem out, const felem in) { - u64 small[4]; + uint64_t small[4]; felem_shrink(small, in); smallfelem_square(out, small); } -/* smallfelem_mul sets |out| = |small1| * |small2| - * On entry: - * small1[i] < 2^64 - * small2[i] < 2^64 - * On exit: - * out[i] < 7 * 2^64 < 2^67. */ +// smallfelem_mul sets |out| = |small1| * |small2| +// On entry: +// small1[i] < 2^64 +// small2[i] < 2^64 +// On exit: +// out[i] < 7 * 2^64 < 2^67. static void smallfelem_mul(longfelem out, const smallfelem small1, const smallfelem small2) { limb a; - u64 high, low; + uint64_t high, low; a = ((uint128_t)small1[0]) * small2[0]; low = a; @@ -570,12 +578,12 @@ static void smallfelem_mul(longfelem out, const smallfelem small1, out[7] = high; } -/* felem_mul sets |out| = |in1| * |in2| - * On entry: - * in1[i] < 2^109 - * in2[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// felem_mul sets |out| = |in1| * |in2| +// On entry: +// in1[i] < 2^109 +// in2[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void felem_mul(longfelem out, const felem in1, const felem in2) { smallfelem small1, small2; felem_shrink(small1, in1); @@ -583,12 +591,12 @@ static void felem_mul(longfelem out, const felem in1, const felem in2) { smallfelem_mul(out, small1, small2); } -/* felem_small_mul sets |out| = |small1| * |in2| - * On entry: - * small1[i] < 2^64 - * in2[i] < 2^109 - * On exit: - * out[i] < 7 * 2^64 < 2^67 */ +// felem_small_mul sets |out| = |small1| * |in2| +// On entry: +// small1[i] < 2^64 +// in2[i] < 2^109 +// On exit: +// out[i] < 7 * 2^64 < 2^67 static void felem_small_mul(longfelem out, const smallfelem small1, const felem in2) { smallfelem small2; @@ -600,24 +608,24 @@ static void felem_small_mul(longfelem out, const smallfelem small1, #define two100 (((limb)1) << 100) #define two100m36p4 ((((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4)) -/* zero100 is 0 mod p */ +// zero100 is 0 mod p static const felem zero100 = {two100m36m4, two100, two100m36p4, two100m36p4}; -/* Internal function for the different flavours of felem_reduce. - * felem_reduce_ reduces the higher coefficients in[4]-in[7]. - * On entry: - * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7] - * out[1] >= in[7] + 2^32*in[4] - * out[2] >= in[5] + 2^32*in[5] - * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6] - * On exit: - * out[0] <= out[0] + in[4] + 2^32*in[5] - * out[1] <= out[1] + in[5] + 2^33*in[6] - * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7] - * out[3] <= out[3] + 2^32*in[4] + 3*in[7] */ +// Internal function for the different flavours of felem_reduce. +// felem_reduce_ reduces the higher coefficients in[4]-in[7]. +// On entry: +// out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7] +// out[1] >= in[7] + 2^32*in[4] +// out[2] >= in[5] + 2^32*in[5] +// out[3] >= in[4] + 2^32*in[5] + 2^32*in[6] +// On exit: +// out[0] <= out[0] + in[4] + 2^32*in[5] +// out[1] <= out[1] + in[5] + 2^33*in[6] +// out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7] +// out[3] <= out[3] + 2^32*in[4] + 3*in[7] static void felem_reduce_(felem out, const longfelem in) { int128_t c; - /* combine common terms from below */ + // combine common terms from below c = in[4] + (in[5] << 32); out[0] += c; out[3] -= c; @@ -626,35 +634,35 @@ static void felem_reduce_(felem out, const longfelem in) { out[1] += c; out[2] -= c; - /* the remaining terms */ - /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */ + // the remaining terms + // 256: [(0,1),(96,-1),(192,-1),(224,1)] out[1] -= (in[4] << 32); out[3] += (in[4] << 32); - /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */ + // 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] out[2] -= (in[5] << 32); - /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */ + // 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] out[0] -= in[6]; out[0] -= (in[6] << 32); out[1] += (in[6] << 33); out[2] += (in[6] * 2); out[3] -= (in[6] << 32); - /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */ + // 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] out[0] -= in[7]; out[0] -= (in[7] << 32); out[2] += (in[7] << 33); out[3] += (in[7] * 3); } -/* felem_reduce converts a longfelem into an felem. - * To be called directly after felem_square or felem_mul. - * On entry: - * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64 - * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64 - * On exit: - * out[i] < 2^101 */ +// felem_reduce converts a longfelem into an felem. +// To be called directly after felem_square or felem_mul. +// On entry: +// in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64 +// in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64 +// On exit: +// out[i] < 2^101 static void felem_reduce(felem out, const longfelem in) { out[0] = zero100[0] + in[0]; out[1] = zero100[1] + in[1]; @@ -663,22 +671,22 @@ static void felem_reduce(felem out, const longfelem in) { felem_reduce_(out, in); - /* out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0 - * out[1] > 2^100 - 2^64 - 7*2^96 > 0 - * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0 - * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0 - * - * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 - * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101 - * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101 - * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 */ + // out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0 + // out[1] > 2^100 - 2^64 - 7*2^96 > 0 + // out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0 + // out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0 + // + // out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101 + // out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101 + // out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101 + // out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 } -/* felem_reduce_zero105 converts a larger longfelem into an felem. - * On entry: - * in[0] < 2^71 - * On exit: - * out[i] < 2^106 */ +// felem_reduce_zero105 converts a larger longfelem into an felem. +// On entry: +// in[0] < 2^71 +// On exit: +// out[i] < 2^106 static void felem_reduce_zero105(felem out, const longfelem in) { out[0] = zero105[0] + in[0]; out[1] = zero105[1] + in[1]; @@ -687,48 +695,48 @@ static void felem_reduce_zero105(felem out, const longfelem in) { felem_reduce_(out, in); - /* out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0 - * out[1] > 2^105 - 2^71 - 2^103 > 0 - * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0 - * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0 - * - * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 - * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 - * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106 - * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 */ + // out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0 + // out[1] > 2^105 - 2^71 - 2^103 > 0 + // out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0 + // out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0 + // + // out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 + // out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106 + // out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106 + // out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 } -/* subtract_u64 sets *result = *result - v and *carry to one if the - * subtraction underflowed. */ -static void subtract_u64(u64 *result, u64 *carry, u64 v) { +// subtract_u64 sets *result = *result - v and *carry to one if the +// subtraction underflowed. +static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) { uint128_t r = *result; r -= v; *carry = (r >> 64) & 1; - *result = (u64)r; + *result = (uint64_t)r; } -/* felem_contract converts |in| to its unique, minimal representation. On - * entry: in[i] < 2^109. */ +// felem_contract converts |in| to its unique, minimal representation. On +// entry: in[i] < 2^109. static void felem_contract(smallfelem out, const felem in) { - u64 all_equal_so_far = 0, result = 0; + uint64_t all_equal_so_far = 0, result = 0; felem_shrink(out, in); - /* small is minimal except that the value might be > p */ + // small is minimal except that the value might be > p all_equal_so_far--; - /* We are doing a constant time test if out >= kPrime. We need to compare - * each u64, from most-significant to least significant. For each one, if - * all words so far have been equal (m is all ones) then a non-equal - * result is the answer. Otherwise we continue. */ + // We are doing a constant time test if out >= kPrime. We need to compare + // each uint64_t, from most-significant to least significant. For each one, if + // all words so far have been equal (m is all ones) then a non-equal + // result is the answer. Otherwise we continue. for (size_t i = 3; i < 4; i--) { - u64 equal; + uint64_t equal; uint128_t a = ((uint128_t)kPrime[i]) - out[i]; - /* if out[i] > kPrime[i] then a will underflow and the high 64-bits - * will all be set. */ - result |= all_equal_so_far & ((u64)(a >> 64)); + // if out[i] > kPrime[i] then a will underflow and the high 64-bits + // will all be set. + result |= all_equal_so_far & ((uint64_t)(a >> 64)); - /* if kPrime[i] == out[i] then |equal| will be all zeros and the - * decrement will make it all ones. */ + // if kPrime[i] == out[i] then |equal| will be all zeros and the + // decrement will make it all ones. equal = kPrime[i] ^ out[i]; equal--; equal &= equal << 32; @@ -737,17 +745,17 @@ static void felem_contract(smallfelem out, const felem in) { equal &= equal << 4; equal &= equal << 2; equal &= equal << 1; - equal = ((s64)equal) >> 63; + equal = ((int64_t)equal) >> 63; all_equal_so_far &= equal; } - /* if all_equal_so_far is still all ones then the two values are equal - * and so out >= kPrime is true. */ + // if all_equal_so_far is still all ones then the two values are equal + // and so out >= kPrime is true. result |= all_equal_so_far; - /* if out >= kPrime then we subtract kPrime. */ - u64 carry; + // if out >= kPrime then we subtract kPrime. + uint64_t carry; subtract_u64(&out[0], &carry, result & kPrime[0]); subtract_u64(&out[1], &carry, carry); subtract_u64(&out[2], &carry, carry); @@ -763,15 +771,15 @@ static void felem_contract(smallfelem out, const felem in) { subtract_u64(&out[3], &carry, result & kPrime[3]); } -/* felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0 - * otherwise. - * On entry: - * small[i] < 2^64 */ +// felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0 +// otherwise. +// On entry: +// small[i] < 2^64 static limb smallfelem_is_zero(const smallfelem small) { limb result; - u64 is_p; + uint64_t is_p; - u64 is_zero = small[0] | small[1] | small[2] | small[3]; + uint64_t is_zero = small[0] | small[1] | small[2] | small[3]; is_zero--; is_zero &= is_zero << 32; is_zero &= is_zero << 16; @@ -779,7 +787,7 @@ static limb smallfelem_is_zero(const smallfelem small) { is_zero &= is_zero << 4; is_zero &= is_zero << 2; is_zero &= is_zero << 1; - is_zero = ((s64)is_zero) >> 63; + is_zero = ((int64_t)is_zero) >> 63; is_p = (small[0] ^ kPrime[0]) | (small[1] ^ kPrime[1]) | (small[2] ^ kPrime[2]) | (small[3] ^ kPrime[3]); @@ -790,7 +798,7 @@ static limb smallfelem_is_zero(const smallfelem small) { is_p &= is_p << 4; is_p &= is_p << 2; is_p &= is_p << 1; - is_p = ((s64)is_p) >> 63; + is_p = ((int64_t)is_p) >> 63; is_zero |= is_p; @@ -799,118 +807,118 @@ static limb smallfelem_is_zero(const smallfelem small) { return result; } -/* felem_inv calculates |out| = |in|^{-1} - * - * Based on Fermat's Little Theorem: - * a^p = a (mod p) - * a^{p-1} = 1 (mod p) - * a^{p-2} = a^{-1} (mod p) */ +// felem_inv calculates |out| = |in|^{-1} +// +// Based on Fermat's Little Theorem: +// a^p = a (mod p) +// a^{p-1} = 1 (mod p) +// a^{p-2} = a^{-1} (mod p) static void felem_inv(felem out, const felem in) { felem ftmp, ftmp2; - /* each e_I will hold |in|^{2^I - 1} */ + // each e_I will hold |in|^{2^I - 1} felem e2, e4, e8, e16, e32, e64; longfelem tmp; felem_square(tmp, in); - felem_reduce(ftmp, tmp); /* 2^1 */ + felem_reduce(ftmp, tmp); // 2^1 felem_mul(tmp, in, ftmp); - felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^2 - 2^0 felem_assign(e2, ftmp); felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */ + felem_reduce(ftmp, tmp); // 2^3 - 2^1 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */ + felem_reduce(ftmp, tmp); // 2^4 - 2^2 felem_mul(tmp, ftmp, e2); - felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^4 - 2^0 felem_assign(e4, ftmp); felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */ + felem_reduce(ftmp, tmp); // 2^5 - 2^1 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */ + felem_reduce(ftmp, tmp); // 2^6 - 2^2 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */ + felem_reduce(ftmp, tmp); // 2^7 - 2^3 felem_square(tmp, ftmp); - felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */ + felem_reduce(ftmp, tmp); // 2^8 - 2^4 felem_mul(tmp, ftmp, e4); - felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^8 - 2^0 felem_assign(e8, ftmp); for (size_t i = 0; i < 8; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^16 - 2^8 */ + } // 2^16 - 2^8 felem_mul(tmp, ftmp, e8); - felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^16 - 2^0 felem_assign(e16, ftmp); for (size_t i = 0; i < 16; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^32 - 2^16 */ + } // 2^32 - 2^16 felem_mul(tmp, ftmp, e16); - felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */ + felem_reduce(ftmp, tmp); // 2^32 - 2^0 felem_assign(e32, ftmp); for (size_t i = 0; i < 32; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^64 - 2^32 */ + } // 2^64 - 2^32 felem_assign(e64, ftmp); felem_mul(tmp, ftmp, in); - felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */ + felem_reduce(ftmp, tmp); // 2^64 - 2^32 + 2^0 for (size_t i = 0; i < 192; i++) { felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - } /* 2^256 - 2^224 + 2^192 */ + } // 2^256 - 2^224 + 2^192 felem_mul(tmp, e64, e32); - felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^64 - 2^0 for (size_t i = 0; i < 16; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^80 - 2^16 */ + } // 2^80 - 2^16 felem_mul(tmp, ftmp2, e16); - felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^80 - 2^0 for (size_t i = 0; i < 8; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^88 - 2^8 */ + } // 2^88 - 2^8 felem_mul(tmp, ftmp2, e8); - felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^88 - 2^0 for (size_t i = 0; i < 4; i++) { felem_square(tmp, ftmp2); felem_reduce(ftmp2, tmp); - } /* 2^92 - 2^4 */ + } // 2^92 - 2^4 felem_mul(tmp, ftmp2, e4); - felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^92 - 2^0 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */ + felem_reduce(ftmp2, tmp); // 2^93 - 2^1 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */ + felem_reduce(ftmp2, tmp); // 2^94 - 2^2 felem_mul(tmp, ftmp2, e2); - felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */ + felem_reduce(ftmp2, tmp); // 2^94 - 2^0 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */ + felem_reduce(ftmp2, tmp); // 2^95 - 2^1 felem_square(tmp, ftmp2); - felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */ + felem_reduce(ftmp2, tmp); // 2^96 - 2^2 felem_mul(tmp, ftmp2, in); - felem_reduce(ftmp2, tmp); /* 2^96 - 3 */ + felem_reduce(ftmp2, tmp); // 2^96 - 3 felem_mul(tmp, ftmp2, ftmp); - felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */ + felem_reduce(out, tmp); // 2^256 - 2^224 + 2^192 + 2^96 - 3 } -/* Group operations - * ---------------- - * - * Building on top of the field operations we have the operations on the - * elliptic curve group itself. Points on the curve are represented in Jacobian - * coordinates. */ - -/* point_double calculates 2*(x_in, y_in, z_in) - * - * The method is taken from: - * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b - * - * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed. - * while x_out == y_in is not (maybe this works, but it's not tested). */ +// Group operations +// ---------------- +// +// Building on top of the field operations we have the operations on the +// elliptic curve group itself. Points on the curve are represented in Jacobian +// coordinates. + +// point_double calculates 2*(x_in, y_in, z_in) +// +// The method is taken from: +// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b +// +// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed. +// while x_out == y_in is not (maybe this works, but it's not tested). static void point_double(felem x_out, felem y_out, felem z_out, const felem x_in, const felem y_in, const felem z_in) { longfelem tmp, tmp2; @@ -918,77 +926,77 @@ static void point_double(felem x_out, felem y_out, felem z_out, smallfelem small1, small2; felem_assign(ftmp, x_in); - /* ftmp[i] < 2^106 */ + // ftmp[i] < 2^106 felem_assign(ftmp2, x_in); - /* ftmp2[i] < 2^106 */ + // ftmp2[i] < 2^106 - /* delta = z^2 */ + // delta = z^2 felem_square(tmp, z_in); felem_reduce(delta, tmp); - /* delta[i] < 2^101 */ + // delta[i] < 2^101 - /* gamma = y^2 */ + // gamma = y^2 felem_square(tmp, y_in); felem_reduce(gamma, tmp); - /* gamma[i] < 2^101 */ + // gamma[i] < 2^101 felem_shrink(small1, gamma); - /* beta = x*gamma */ + // beta = x*gamma felem_small_mul(tmp, small1, x_in); felem_reduce(beta, tmp); - /* beta[i] < 2^101 */ + // beta[i] < 2^101 - /* alpha = 3*(x-delta)*(x+delta) */ + // alpha = 3*(x-delta)*(x+delta) felem_diff(ftmp, delta); - /* ftmp[i] < 2^105 + 2^106 < 2^107 */ + // ftmp[i] < 2^105 + 2^106 < 2^107 felem_sum(ftmp2, delta); - /* ftmp2[i] < 2^105 + 2^106 < 2^107 */ + // ftmp2[i] < 2^105 + 2^106 < 2^107 felem_scalar(ftmp2, 3); - /* ftmp2[i] < 3 * 2^107 < 2^109 */ + // ftmp2[i] < 3 * 2^107 < 2^109 felem_mul(tmp, ftmp, ftmp2); felem_reduce(alpha, tmp); - /* alpha[i] < 2^101 */ + // alpha[i] < 2^101 felem_shrink(small2, alpha); - /* x' = alpha^2 - 8*beta */ + // x' = alpha^2 - 8*beta smallfelem_square(tmp, small2); felem_reduce(x_out, tmp); felem_assign(ftmp, beta); felem_scalar(ftmp, 8); - /* ftmp[i] < 8 * 2^101 = 2^104 */ + // ftmp[i] < 8 * 2^101 = 2^104 felem_diff(x_out, ftmp); - /* x_out[i] < 2^105 + 2^101 < 2^106 */ + // x_out[i] < 2^105 + 2^101 < 2^106 - /* z' = (y + z)^2 - gamma - delta */ + // z' = (y + z)^2 - gamma - delta felem_sum(delta, gamma); - /* delta[i] < 2^101 + 2^101 = 2^102 */ + // delta[i] < 2^101 + 2^101 = 2^102 felem_assign(ftmp, y_in); felem_sum(ftmp, z_in); - /* ftmp[i] < 2^106 + 2^106 = 2^107 */ + // ftmp[i] < 2^106 + 2^106 = 2^107 felem_square(tmp, ftmp); felem_reduce(z_out, tmp); felem_diff(z_out, delta); - /* z_out[i] < 2^105 + 2^101 < 2^106 */ + // z_out[i] < 2^105 + 2^101 < 2^106 - /* y' = alpha*(4*beta - x') - 8*gamma^2 */ + // y' = alpha*(4*beta - x') - 8*gamma^2 felem_scalar(beta, 4); - /* beta[i] < 4 * 2^101 = 2^103 */ + // beta[i] < 4 * 2^101 = 2^103 felem_diff_zero107(beta, x_out); - /* beta[i] < 2^107 + 2^103 < 2^108 */ + // beta[i] < 2^107 + 2^103 < 2^108 felem_small_mul(tmp, small2, beta); - /* tmp[i] < 7 * 2^64 < 2^67 */ + // tmp[i] < 7 * 2^64 < 2^67 smallfelem_square(tmp2, small1); - /* tmp2[i] < 7 * 2^64 */ + // tmp2[i] < 7 * 2^64 longfelem_scalar(tmp2, 8); - /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */ + // tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 longfelem_diff(tmp, tmp2); - /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */ + // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 felem_reduce_zero105(y_out, tmp); - /* y_out[i] < 2^106 */ + // y_out[i] < 2^106 } -/* point_double_small is the same as point_double, except that it operates on - * smallfelems. */ +// point_double_small is the same as point_double, except that it operates on +// smallfelems. static void point_double_small(smallfelem x_out, smallfelem y_out, smallfelem z_out, const smallfelem x_in, const smallfelem y_in, const smallfelem z_in) { @@ -1005,32 +1013,32 @@ static void point_double_small(smallfelem x_out, smallfelem y_out, felem_shrink(z_out, felem_z_out); } -/* copy_conditional copies in to out iff mask is all ones. */ -static void copy_conditional(felem out, const felem in, limb mask) { +// p256_copy_conditional copies in to out iff mask is all ones. +static void p256_copy_conditional(felem out, const felem in, limb mask) { for (size_t i = 0; i < NLIMBS; ++i) { const limb tmp = mask & (in[i] ^ out[i]); out[i] ^= tmp; } } -/* copy_small_conditional copies in to out iff mask is all ones. */ +// copy_small_conditional copies in to out iff mask is all ones. static void copy_small_conditional(felem out, const smallfelem in, limb mask) { - const u64 mask64 = mask; + const uint64_t mask64 = mask; for (size_t i = 0; i < NLIMBS; ++i) { out[i] = ((limb)(in[i] & mask64)) | (out[i] & ~mask); } } -/* point_add calcuates (x1, y1, z1) + (x2, y2, z2) - * - * The method is taken from: - * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl, - * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity). - * - * This function includes a branch for checking whether the two input points - * are equal, (while not equal to the point at infinity). This case never - * happens during single point multiplication, so there is no timing leak for - * ECDH or ECDSA signing. */ +// point_add calcuates (x1, y1, z1) + (x2, y2, z2) +// +// The method is taken from: +// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl, +// adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity). +// +// This function includes a branch for checking whether the two input points +// are equal, (while not equal to the point at infinity). This case never +// happens during single point multiplication, so there is no timing leak for +// ECDH or ECDSA signing. static void point_add(felem x3, felem y3, felem z3, const felem x1, const felem y1, const felem z1, const int mixed, const smallfelem x2, const smallfelem y2, @@ -1045,94 +1053,94 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1, z1_is_zero = smallfelem_is_zero(small3); z2_is_zero = smallfelem_is_zero(z2); - /* ftmp = z1z1 = z1**2 */ + // ftmp = z1z1 = z1**2 smallfelem_square(tmp, small3); felem_reduce(ftmp, tmp); - /* ftmp[i] < 2^101 */ + // ftmp[i] < 2^101 felem_shrink(small1, ftmp); if (!mixed) { - /* ftmp2 = z2z2 = z2**2 */ + // ftmp2 = z2z2 = z2**2 smallfelem_square(tmp, z2); felem_reduce(ftmp2, tmp); - /* ftmp2[i] < 2^101 */ + // ftmp2[i] < 2^101 felem_shrink(small2, ftmp2); felem_shrink(small5, x1); - /* u1 = ftmp3 = x1*z2z2 */ + // u1 = ftmp3 = x1*z2z2 smallfelem_mul(tmp, small5, small2); felem_reduce(ftmp3, tmp); - /* ftmp3[i] < 2^101 */ + // ftmp3[i] < 2^101 - /* ftmp5 = z1 + z2 */ + // ftmp5 = z1 + z2 felem_assign(ftmp5, z1); felem_small_sum(ftmp5, z2); - /* ftmp5[i] < 2^107 */ + // ftmp5[i] < 2^107 - /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */ + // ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 felem_square(tmp, ftmp5); felem_reduce(ftmp5, tmp); - /* ftmp2 = z2z2 + z1z1 */ + // ftmp2 = z2z2 + z1z1 felem_sum(ftmp2, ftmp); - /* ftmp2[i] < 2^101 + 2^101 = 2^102 */ + // ftmp2[i] < 2^101 + 2^101 = 2^102 felem_diff(ftmp5, ftmp2); - /* ftmp5[i] < 2^105 + 2^101 < 2^106 */ + // ftmp5[i] < 2^105 + 2^101 < 2^106 - /* ftmp2 = z2 * z2z2 */ + // ftmp2 = z2 * z2z2 smallfelem_mul(tmp, small2, z2); felem_reduce(ftmp2, tmp); - /* s1 = ftmp2 = y1 * z2**3 */ + // s1 = ftmp2 = y1 * z2**3 felem_mul(tmp, y1, ftmp2); felem_reduce(ftmp6, tmp); - /* ftmp6[i] < 2^101 */ + // ftmp6[i] < 2^101 } else { - /* We'll assume z2 = 1 (special case z2 = 0 is handled later). */ + // We'll assume z2 = 1 (special case z2 = 0 is handled later). - /* u1 = ftmp3 = x1*z2z2 */ + // u1 = ftmp3 = x1*z2z2 felem_assign(ftmp3, x1); - /* ftmp3[i] < 2^106 */ + // ftmp3[i] < 2^106 - /* ftmp5 = 2z1z2 */ + // ftmp5 = 2z1z2 felem_assign(ftmp5, z1); felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2*2^106 = 2^107 */ + // ftmp5[i] < 2*2^106 = 2^107 - /* s1 = ftmp2 = y1 * z2**3 */ + // s1 = ftmp2 = y1 * z2**3 felem_assign(ftmp6, y1); - /* ftmp6[i] < 2^106 */ + // ftmp6[i] < 2^106 } - /* u2 = x2*z1z1 */ + // u2 = x2*z1z1 smallfelem_mul(tmp, x2, small1); felem_reduce(ftmp4, tmp); - /* h = ftmp4 = u2 - u1 */ + // h = ftmp4 = u2 - u1 felem_diff_zero107(ftmp4, ftmp3); - /* ftmp4[i] < 2^107 + 2^101 < 2^108 */ + // ftmp4[i] < 2^107 + 2^101 < 2^108 felem_shrink(small4, ftmp4); x_equal = smallfelem_is_zero(small4); - /* z_out = ftmp5 * h */ + // z_out = ftmp5 * h felem_small_mul(tmp, small4, ftmp5); felem_reduce(z_out, tmp); - /* z_out[i] < 2^101 */ + // z_out[i] < 2^101 - /* ftmp = z1 * z1z1 */ + // ftmp = z1 * z1z1 smallfelem_mul(tmp, small1, small3); felem_reduce(ftmp, tmp); - /* s2 = tmp = y2 * z1**3 */ + // s2 = tmp = y2 * z1**3 felem_small_mul(tmp, y2, ftmp); felem_reduce(ftmp5, tmp); - /* r = ftmp5 = (s2 - s1)*2 */ + // r = ftmp5 = (s2 - s1)*2 felem_diff_zero107(ftmp5, ftmp6); - /* ftmp5[i] < 2^107 + 2^107 = 2^108 */ + // ftmp5[i] < 2^107 + 2^107 = 2^108 felem_scalar(ftmp5, 2); - /* ftmp5[i] < 2^109 */ + // ftmp5[i] < 2^109 felem_shrink(small1, ftmp5); y_equal = smallfelem_is_zero(small1); @@ -1141,56 +1149,56 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1, return; } - /* I = ftmp = (2h)**2 */ + // I = ftmp = (2h)**2 felem_assign(ftmp, ftmp4); felem_scalar(ftmp, 2); - /* ftmp[i] < 2*2^108 = 2^109 */ + // ftmp[i] < 2*2^108 = 2^109 felem_square(tmp, ftmp); felem_reduce(ftmp, tmp); - /* J = ftmp2 = h * I */ + // J = ftmp2 = h * I felem_mul(tmp, ftmp4, ftmp); felem_reduce(ftmp2, tmp); - /* V = ftmp4 = U1 * I */ + // V = ftmp4 = U1 * I felem_mul(tmp, ftmp3, ftmp); felem_reduce(ftmp4, tmp); - /* x_out = r**2 - J - 2V */ + // x_out = r**2 - J - 2V smallfelem_square(tmp, small1); felem_reduce(x_out, tmp); felem_assign(ftmp3, ftmp4); felem_scalar(ftmp4, 2); felem_sum(ftmp4, ftmp2); - /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */ + // ftmp4[i] < 2*2^101 + 2^101 < 2^103 felem_diff(x_out, ftmp4); - /* x_out[i] < 2^105 + 2^101 */ + // x_out[i] < 2^105 + 2^101 - /* y_out = r(V-x_out) - 2 * s1 * J */ + // y_out = r(V-x_out) - 2 * s1 * J felem_diff_zero107(ftmp3, x_out); - /* ftmp3[i] < 2^107 + 2^101 < 2^108 */ + // ftmp3[i] < 2^107 + 2^101 < 2^108 felem_small_mul(tmp, small1, ftmp3); felem_mul(tmp2, ftmp6, ftmp2); longfelem_scalar(tmp2, 2); - /* tmp2[i] < 2*2^67 = 2^68 */ + // tmp2[i] < 2*2^67 = 2^68 longfelem_diff(tmp, tmp2); - /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */ + // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 felem_reduce_zero105(y_out, tmp); - /* y_out[i] < 2^106 */ + // y_out[i] < 2^106 copy_small_conditional(x_out, x2, z1_is_zero); - copy_conditional(x_out, x1, z2_is_zero); + p256_copy_conditional(x_out, x1, z2_is_zero); copy_small_conditional(y_out, y2, z1_is_zero); - copy_conditional(y_out, y1, z2_is_zero); + p256_copy_conditional(y_out, y1, z2_is_zero); copy_small_conditional(z_out, z2, z1_is_zero); - copy_conditional(z_out, z1, z2_is_zero); + p256_copy_conditional(z_out, z1, z2_is_zero); felem_assign(x3, x_out); felem_assign(y3, y_out); felem_assign(z3, z_out); } -/* point_add_small is the same as point_add, except that it operates on - * smallfelems. */ +// point_add_small is the same as point_add, except that it operates on +// smallfelems. static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3, smallfelem x1, smallfelem y1, smallfelem z1, smallfelem x2, smallfelem y2, smallfelem z2) { @@ -1206,42 +1214,42 @@ static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3, felem_shrink(z3, felem_z3); } -/* Base point pre computation - * -------------------------- - * - * Two different sorts of precomputed tables are used in the following code. - * Each contain various points on the curve, where each point is three field - * elements (x, y, z). - * - * For the base point table, z is usually 1 (0 for the point at infinity). - * This table has 2 * 16 elements, starting with the following: - * index | bits | point - * ------+---------+------------------------------ - * 0 | 0 0 0 0 | 0G - * 1 | 0 0 0 1 | 1G - * 2 | 0 0 1 0 | 2^64G - * 3 | 0 0 1 1 | (2^64 + 1)G - * 4 | 0 1 0 0 | 2^128G - * 5 | 0 1 0 1 | (2^128 + 1)G - * 6 | 0 1 1 0 | (2^128 + 2^64)G - * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G - * 8 | 1 0 0 0 | 2^192G - * 9 | 1 0 0 1 | (2^192 + 1)G - * 10 | 1 0 1 0 | (2^192 + 2^64)G - * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G - * 12 | 1 1 0 0 | (2^192 + 2^128)G - * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G - * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G - * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G - * followed by a copy of this with each element multiplied by 2^32. - * - * The reason for this is so that we can clock bits into four different - * locations when doing simple scalar multiplies against the base point, - * and then another four locations using the second 16 elements. - * - * Tables for other points have table[i] = iG for i in 0 .. 16. */ - -/* g_pre_comp is the table of precomputed base points */ +// Base point pre computation +// -------------------------- +// +// Two different sorts of precomputed tables are used in the following code. +// Each contain various points on the curve, where each point is three field +// elements (x, y, z). +// +// For the base point table, z is usually 1 (0 for the point at infinity). +// This table has 2 * 16 elements, starting with the following: +// index | bits | point +// ------+---------+------------------------------ +// 0 | 0 0 0 0 | 0G +// 1 | 0 0 0 1 | 1G +// 2 | 0 0 1 0 | 2^64G +// 3 | 0 0 1 1 | (2^64 + 1)G +// 4 | 0 1 0 0 | 2^128G +// 5 | 0 1 0 1 | (2^128 + 1)G +// 6 | 0 1 1 0 | (2^128 + 2^64)G +// 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G +// 8 | 1 0 0 0 | 2^192G +// 9 | 1 0 0 1 | (2^192 + 1)G +// 10 | 1 0 1 0 | (2^192 + 2^64)G +// 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G +// 12 | 1 1 0 0 | (2^192 + 2^128)G +// 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G +// 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G +// 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G +// followed by a copy of this with each element multiplied by 2^32. +// +// The reason for this is so that we can clock bits into four different +// locations when doing simple scalar multiplies against the base point, +// and then another four locations using the second 16 elements. +// +// Tables for other points have table[i] = iG for i in 0 .. 16. + +// g_pre_comp is the table of precomputed base points static const smallfelem g_pre_comp[2][16][3] = { {{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}, {{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2, @@ -1396,17 +1404,17 @@ static const smallfelem g_pre_comp[2][16][3] = { 0x4ab5b6b2b8753f81}, {1, 0, 0, 0}}}}; -/* select_point selects the |idx|th point from a precomputation table and - * copies it to out. */ -static void select_point(const u64 idx, size_t size, +// select_point selects the |idx|th point from a precomputation table and +// copies it to out. +static void select_point(const uint64_t idx, size_t size, const smallfelem pre_comp[/*size*/][3], smallfelem out[3]) { - u64 *outlimbs = &out[0][0]; + uint64_t *outlimbs = &out[0][0]; OPENSSL_memset(outlimbs, 0, 3 * sizeof(smallfelem)); for (size_t i = 0; i < size; i++) { - const u64 *inlimbs = (const u64 *)&pre_comp[i][0][0]; - u64 mask = i ^ idx; + const uint64_t *inlimbs = (const uint64_t *)&pre_comp[i][0][0]; + uint64_t mask = i ^ idx; mask |= mask >> 4; mask |= mask >> 2; mask |= mask >> 1; @@ -1418,7 +1426,7 @@ static void select_point(const u64 idx, size_t size, } } -/* get_bit returns the |i|th bit in |in| */ +// get_bit returns the |i|th bit in |in| static char get_bit(const felem_bytearray in, int i) { if (i < 0 || i >= 256) { return 0; @@ -1426,41 +1434,42 @@ static char get_bit(const felem_bytearray in, int i) { return (in[i >> 3] >> (i & 7)) & 1; } -/* Interleaved point multiplication using precomputed point multiples: The - * small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar - * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple - * of the generator, using certain (large) precomputed multiples in g_pre_comp. - * Output point (X, Y, Z) is stored in x_out, y_out, z_out. */ -static void batch_mul(felem x_out, felem y_out, felem z_out, const u8 *p_scalar, - const u8 *g_scalar, const smallfelem p_pre_comp[17][3]) { +// Interleaved point multiplication using precomputed point multiples: The +// small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar +// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple +// of the generator, using certain (large) precomputed multiples in g_pre_comp. +// Output point (X, Y, Z) is stored in x_out, y_out, z_out. +static void batch_mul(felem x_out, felem y_out, felem z_out, + const uint8_t *p_scalar, const uint8_t *g_scalar, + const smallfelem p_pre_comp[17][3]) { felem nq[3], ftmp; smallfelem tmp[3]; - u64 bits; - u8 sign, digit; + uint64_t bits; + uint8_t sign, digit; - /* set nq to the point at infinity */ + // set nq to the point at infinity OPENSSL_memset(nq, 0, 3 * sizeof(felem)); - /* Loop over both scalars msb-to-lsb, interleaving additions of multiples - * of the generator (two in each of the last 32 rounds) and additions of p - * (every 5th round). */ + // Loop over both scalars msb-to-lsb, interleaving additions of multiples + // of the generator (two in each of the last 32 rounds) and additions of p + // (every 5th round). - int skip = 1; /* save two point operations in the first round */ + int skip = 1; // save two point operations in the first round size_t i = p_scalar != NULL ? 255 : 31; for (;;) { - /* double */ + // double if (!skip) { point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]); } - /* add multiples of the generator */ + // add multiples of the generator if (g_scalar != NULL && i <= 31) { - /* first, look 32 bits upwards */ + // first, look 32 bits upwards bits = get_bit(g_scalar, i + 224) << 3; bits |= get_bit(g_scalar, i + 160) << 2; bits |= get_bit(g_scalar, i + 96) << 1; bits |= get_bit(g_scalar, i + 32); - /* select the point to add, in constant time */ + // select the point to add, in constant time select_point(bits, 16, g_pre_comp[1], tmp); if (!skip) { @@ -1473,18 +1482,18 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, const u8 *p_scalar, skip = 0; } - /* second, look at the current position */ + // second, look at the current position bits = get_bit(g_scalar, i + 192) << 3; bits |= get_bit(g_scalar, i + 128) << 2; bits |= get_bit(g_scalar, i + 64) << 1; bits |= get_bit(g_scalar, i); - /* select the point to add, in constant time */ + // select the point to add, in constant time select_point(bits, 16, g_pre_comp[0], tmp); point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0], tmp[1], tmp[2]); } - /* do other additions every 5 doublings */ + // do other additions every 5 doublings if (p_scalar != NULL && i % 5 == 0) { bits = get_bit(p_scalar, i + 4) << 5; bits |= get_bit(p_scalar, i + 3) << 4; @@ -1494,10 +1503,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, const u8 *p_scalar, bits |= get_bit(p_scalar, i - 1); ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits); - /* select the point to add or subtract, in constant time. */ + // select the point to add or subtract, in constant time. select_point(digit, 17, p_pre_comp, tmp); - smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative - * point */ + smallfelem_neg(ftmp, tmp[1]); // (X, -Y, Z) is the negative + // point copy_small_conditional(ftmp, tmp[1], (((limb)sign) - 1)); felem_contract(tmp[1], ftmp); @@ -1522,13 +1531,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out, const u8 *p_scalar, felem_assign(z_out, nq[2]); } -/******************************************************************************/ -/* - * OPENSSL EC_METHOD FUNCTIONS - */ +// OPENSSL EC_METHOD FUNCTIONS -/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') = - * (X/Z^2, Y/Z^3). */ +// Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') = +// (X/Z^2, Y/Z^3). static int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group, const EC_POINT *point, BIGNUM *x, BIGNUM *y, @@ -1576,14 +1582,13 @@ static int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group, } static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, - const BIGNUM *g_scalar, const EC_POINT *p, - const BIGNUM *p_scalar, BN_CTX *ctx) { + const EC_SCALAR *g_scalar, + const EC_POINT *p, + const EC_SCALAR *p_scalar, BN_CTX *ctx) { int ret = 0; BN_CTX *new_ctx = NULL; BIGNUM *x, *y, *z, *tmp_scalar; - felem_bytearray g_secret, p_secret; smallfelem p_pre_comp[17][3]; - felem_bytearray tmp; smallfelem x_in, y_in, z_in; felem x_out, y_out, z_out; @@ -1603,24 +1608,10 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, } if (p != NULL && p_scalar != NULL) { - /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e., - * they contribute nothing to the linear combination. */ - OPENSSL_memset(&p_secret, 0, sizeof(p_secret)); + // We treat NULL scalars as 0, and NULL points as points at infinity, i.e., + // they contribute nothing to the linear combination. OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp)); - size_t num_bytes; - /* Reduce g_scalar to 0 <= g_scalar < 2^256. */ - if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) { - /* This is an unusual input, and we don't guarantee constant-timeness. */ - if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - num_bytes = BN_bn2bin(tmp_scalar, tmp); - } else { - num_bytes = BN_bn2bin(p_scalar, tmp); - } - flip_endian(p_secret, tmp, num_bytes); - /* Precompute multiples. */ + // Precompute multiples. if (!BN_to_felem(x_out, &p->X) || !BN_to_felem(y_out, &p->Y) || !BN_to_felem(z_out, &p->Z)) { @@ -1644,30 +1635,12 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, } } - if (g_scalar != NULL) { - size_t num_bytes; - - OPENSSL_memset(g_secret, 0, sizeof(g_secret)); - /* reduce g_scalar to 0 <= g_scalar < 2^256 */ - if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) { - /* this is an unusual input, and we don't guarantee - * constant-timeness. */ - if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - num_bytes = BN_bn2bin(tmp_scalar, tmp); - } else { - num_bytes = BN_bn2bin(g_scalar, tmp); - } - flip_endian(g_secret, tmp, num_bytes); - } batch_mul(x_out, y_out, z_out, - (p != NULL && p_scalar != NULL) ? p_secret : NULL, - g_scalar != NULL ? g_secret : NULL, - (const smallfelem(*)[3]) &p_pre_comp); + (p != NULL && p_scalar != NULL) ? p_scalar->bytes : NULL, + g_scalar != NULL ? g_scalar->bytes : NULL, + (const smallfelem(*)[3]) & p_pre_comp); - /* reduce the output to its unique minimal representation */ + // reduce the output to its unique minimal representation felem_contract(x_in, x_out); felem_contract(y_in, y_out); felem_contract(z_in, z_out); @@ -1685,17 +1658,17 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r, return ret; } -const EC_METHOD EC_GFp_nistp256_method = { - ec_GFp_simple_group_init, - ec_GFp_simple_group_finish, - ec_GFp_simple_group_copy, - ec_GFp_simple_group_set_curve, - ec_GFp_nistp256_point_get_affine_coordinates, - ec_GFp_nistp256_points_mul, - ec_GFp_simple_field_mul, - ec_GFp_simple_field_sqr, - NULL /* field_encode */, - NULL /* field_decode */, +DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp256_method) { + out->group_init = ec_GFp_simple_group_init; + out->group_finish = ec_GFp_simple_group_finish; + out->group_set_curve = ec_GFp_simple_group_set_curve; + out->point_get_affine_coordinates = + ec_GFp_nistp256_point_get_affine_coordinates; + out->mul = ec_GFp_nistp256_points_mul; + out->field_mul = ec_GFp_simple_field_mul; + out->field_sqr = ec_GFp_simple_field_sqr; + out->field_encode = NULL; + out->field_decode = NULL; }; -#endif /* 64_BIT && !WINDOWS */ +#endif // 64_BIT && !WINDOWS diff --git a/Sources/BoringSSL/crypto/ec/p256-x86_64-table.h b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64-table.h similarity index 99% rename from Sources/BoringSSL/crypto/ec/p256-x86_64-table.h rename to Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64-table.h index e4705f8e1..575a2034a 100644 --- a/Sources/BoringSSL/crypto/ec/p256-x86_64-table.h +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64-table.h @@ -12,17 +12,17 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This is the precomputed constant time access table for the code in - * p256-x86_64.c, for the default generator. The table consists of 37 - * subtables, each subtable contains 64 affine points. The affine points are - * encoded as eight uint64's, four for the x coordinate and four for the y. - * Both values are in little-endian order. There are 37 tables because a - * signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37. - * Within each table there are 64 values because the 6-bit wNAF value can take - * 64 values, ignoring the sign bit, which is implemented by performing a - * negation of the affine point when required. We would like to align it to 2MB - * in order to increase the chances of using a large page but that appears to - * lead to invalid ELF files being produced. */ +// This is the precomputed constant time access table for the code in +// p256-x86_64.c, for the default generator. The table consists of 37 +// subtables, each subtable contains 64 affine points. The affine points are +// encoded as eight uint64's, four for the x coordinate and four for the y. +// Both values are in little-endian order. There are 37 tables because a +// signed, 6-bit wNAF form of the scalar is used and ceil(256/(6 + 1)) = 37. +// Within each table there are 64 values because the 6-bit wNAF value can take +// 64 values, ignoring the sign bit, which is implemented by performing a +// negation of the affine point when required. We would like to align it to 2MB +// in order to increase the chances of using a large page but that appears to +// lead to invalid ELF files being produced. static const alignas(4096) BN_ULONG ecp_nistz256_precomputed[37][64 * sizeof(P256_POINT_AFFINE) / diff --git a/Sources/BoringSSL/crypto/ec/p256-x86_64.c b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.c similarity index 64% rename from Sources/BoringSSL/crypto/ec/p256-x86_64.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.c index 652d10cd1..a9b603aeb 100644 --- a/Sources/BoringSSL/crypto/ec/p256-x86_64.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.c @@ -12,13 +12,13 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Developers and authors: - * Shay Gueron (1, 2), and Vlad Krasnov (1) - * (1) Intel Corporation, Israel Development Center - * (2) University of Haifa - * Reference: - * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with - * 256 Bit Primes" */ +// Developers and authors: +// Shay Gueron (1, 2), and Vlad Krasnov (1) +// (1) Intel Corporation, Israel Development Center +// (2) University of Haifa +// Reference: +// S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with +// 256 Bit Primes" #include @@ -31,7 +31,8 @@ #include #include "../bn/internal.h" -#include "../internal.h" +#include "../delocate.h" +#include "../../internal.h" #include "internal.h" #include "p256-x86_64.h" @@ -41,16 +42,16 @@ typedef P256_POINT_AFFINE PRECOMP256_ROW[64]; -/* One converted into the Montgomery domain */ +// One converted into the Montgomery domain static const BN_ULONG ONE[P256_LIMBS] = { TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000), TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe), }; -/* Precomputed tables for the default generator */ +// Precomputed tables for the default generator #include "p256-x86_64-table.h" -/* Recode window to a signed digit, see util-64.c for details */ +// Recode window to a signed digit, see util-64.c for details static unsigned booth_recode_w5(unsigned in) { unsigned s, d; @@ -73,11 +74,11 @@ static unsigned booth_recode_w7(unsigned in) { return (d << 1) + (s & 1); } -/* copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is - * if |move| is zero. - * - * WARNING: this breaks the usual convention of constant-time functions - * returning masks. */ +// copy_conditional copies |src| to |dst| if |move| is one and leaves it as-is +// if |move| is zero. +// +// WARNING: this breaks the usual convention of constant-time functions +// returning masks. static void copy_conditional(BN_ULONG dst[P256_LIMBS], const BN_ULONG src[P256_LIMBS], BN_ULONG move) { BN_ULONG mask1 = ((BN_ULONG)0) - move; @@ -95,32 +96,32 @@ static void copy_conditional(BN_ULONG dst[P256_LIMBS], } } -/* is_not_zero returns one iff in != 0 and zero otherwise. - * - * WARNING: this breaks the usual convention of constant-time functions - * returning masks. - * - * (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64) - * (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f) - * ) - * - * (declare-fun x () (_ BitVec 64)) - * - * (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001))) - * (check-sat) - * - * (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000))) - * (check-sat) - * */ +// is_not_zero returns one iff in != 0 and zero otherwise. +// +// WARNING: this breaks the usual convention of constant-time functions +// returning masks. +// +// (define-fun is_not_zero ((in (_ BitVec 64))) (_ BitVec 64) +// (bvlshr (bvor in (bvsub #x0000000000000000 in)) #x000000000000003f) +// ) +// +// (declare-fun x () (_ BitVec 64)) +// +// (assert (and (= x #x0000000000000000) (= (is_not_zero x) #x0000000000000001))) +// (check-sat) +// +// (assert (and (not (= x #x0000000000000000)) (= (is_not_zero x) #x0000000000000000))) +// (check-sat) +// static BN_ULONG is_not_zero(BN_ULONG in) { in |= (0 - in); in >>= BN_BITS2 - 1; return in; } -/* ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p. - * That is, |r| is the modular inverse of |in| for input and output in the - * Montgomery domain. */ +// ecp_nistz256_mod_inverse_mont sets |r| to (|in| * 2^-256)^-1 * 2^256 mod p. +// That is, |r| is the modular inverse of |in| for input and output in the +// Montgomery domain. static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { /* The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff ffffffff @@ -135,29 +136,29 @@ static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], int i; ecp_nistz256_sqr_mont(res, in); - ecp_nistz256_mul_mont(p2, res, in); /* 3*p */ + ecp_nistz256_mul_mont(p2, res, in); // 3*p ecp_nistz256_sqr_mont(res, p2); ecp_nistz256_sqr_mont(res, res); - ecp_nistz256_mul_mont(p4, res, p2); /* f*p */ + ecp_nistz256_mul_mont(p4, res, p2); // f*p ecp_nistz256_sqr_mont(res, p4); ecp_nistz256_sqr_mont(res, res); ecp_nistz256_sqr_mont(res, res); ecp_nistz256_sqr_mont(res, res); - ecp_nistz256_mul_mont(p8, res, p4); /* ff*p */ + ecp_nistz256_mul_mont(p8, res, p4); // ff*p ecp_nistz256_sqr_mont(res, p8); for (i = 0; i < 7; i++) { ecp_nistz256_sqr_mont(res, res); } - ecp_nistz256_mul_mont(p16, res, p8); /* ffff*p */ + ecp_nistz256_mul_mont(p16, res, p8); // ffff*p ecp_nistz256_sqr_mont(res, p16); for (i = 0; i < 15; i++) { ecp_nistz256_sqr_mont(res, res); } - ecp_nistz256_mul_mont(p32, res, p16); /* ffffffff*p */ + ecp_nistz256_mul_mont(p32, res, p16); // ffffffff*p ecp_nistz256_sqr_mont(res, p32); for (i = 0; i < 31; i++) { @@ -200,8 +201,8 @@ static void ecp_nistz256_mod_inverse_mont(BN_ULONG r[P256_LIMBS], ecp_nistz256_mul_mont(r, res, in); } -/* ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and - * returns one if it fits. Otherwise it returns zero. */ +// ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and +// returns one if it fits. Otherwise it returns zero. static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS], const BIGNUM *in) { if (in->top > P256_LIMBS) { @@ -213,81 +214,34 @@ static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS], return 1; } -/* r = p * p_scalar */ +// r = p * p_scalar static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, - const EC_POINT *p, const BIGNUM *p_scalar, - BN_CTX *ctx) { + const EC_POINT *p, + const EC_SCALAR *p_scalar) { assert(p != NULL); assert(p_scalar != NULL); static const unsigned kWindowSize = 5; static const unsigned kMask = (1 << (5 /* kWindowSize */ + 1)) - 1; - /* A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should - * add no more than 63 bytes of overhead. Thus, |table| should require - * ~1599 ((96 * 16) + 63) bytes of stack space. */ + // A |P256_POINT| is (3 * 32) = 96 bytes, and the 64-byte alignment should + // add no more than 63 bytes of overhead. Thus, |table| should require + // ~1599 ((96 * 16) + 63) bytes of stack space. alignas(64) P256_POINT table[16]; uint8_t p_str[33]; + OPENSSL_memcpy(p_str, p_scalar->bytes, 32); + p_str[32] = 0; - - int ret = 0; - BN_CTX *new_ctx = NULL; - int ctx_started = 0; - - if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) { - if (ctx == NULL) { - new_ctx = BN_CTX_new(); - if (new_ctx == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); - goto err; - } - ctx = new_ctx; - } - BN_CTX_start(ctx); - ctx_started = 1; - BIGNUM *mod = BN_CTX_get(ctx); - if (mod == NULL) { - OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); - goto err; - } - if (!BN_nnmod(mod, p_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - p_scalar = mod; - } - - int j; - for (j = 0; j < p_scalar->top * BN_BYTES; j += BN_BYTES) { - BN_ULONG d = p_scalar->d[j / BN_BYTES]; - - p_str[j + 0] = d & 0xff; - p_str[j + 1] = (d >> 8) & 0xff; - p_str[j + 2] = (d >> 16) & 0xff; - p_str[j + 3] = (d >>= 24) & 0xff; - if (BN_BYTES == 8) { - d >>= 8; - p_str[j + 4] = d & 0xff; - p_str[j + 5] = (d >> 8) & 0xff; - p_str[j + 6] = (d >> 16) & 0xff; - p_str[j + 7] = (d >> 24) & 0xff; - } - } - - for (; j < 33; j++) { - p_str[j] = 0; - } - - /* table[0] is implicitly (0,0,0) (the point at infinity), therefore it is - * not stored. All other values are actually stored with an offset of -1 in - * table. */ + // table[0] is implicitly (0,0,0) (the point at infinity), therefore it is + // not stored. All other values are actually stored with an offset of -1 in + // table. P256_POINT *row = table; if (!ecp_nistz256_bignum_to_field_elem(row[1 - 1].X, &p->X) || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Y, &p->Y) || !ecp_nistz256_bignum_to_field_elem(row[1 - 1].Z, &p->Z)) { OPENSSL_PUT_ERROR(EC, EC_R_COORDINATES_OUT_OF_RANGE); - goto err; + return 0; } ecp_nistz256_point_double(&row[2 - 1], &row[1 - 1]); @@ -340,7 +294,7 @@ static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, ecp_nistz256_point_double(r, r); } - /* Final window */ + // Final window wvalue = p_str[0]; wvalue = (wvalue << 1) & kMask; @@ -353,19 +307,13 @@ static int ecp_nistz256_windowed_mul(const EC_GROUP *group, P256_POINT *r, ecp_nistz256_point_add(r, r, &h); - ret = 1; - -err: - if (ctx_started) { - BN_CTX_end(ctx); - } - BN_CTX_free(new_ctx); - return ret; + return 1; } -static int ecp_nistz256_points_mul( - const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, - const EC_POINT *p_, const BIGNUM *p_scalar, BN_CTX *ctx) { +static int ecp_nistz256_points_mul(const EC_GROUP *group, EC_POINT *r, + const EC_SCALAR *g_scalar, + const EC_POINT *p_, + const EC_SCALAR *p_scalar, BN_CTX *ctx) { assert((p_ != NULL) == (p_scalar != NULL)); static const unsigned kWindowSize = 7; @@ -376,56 +324,12 @@ static int ecp_nistz256_points_mul( P256_POINT_AFFINE a; } t, p; - int ret = 0; - BN_CTX *new_ctx = NULL; - int ctx_started = 0; - if (g_scalar != NULL) { - if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) { - if (ctx == NULL) { - new_ctx = BN_CTX_new(); - if (new_ctx == NULL) { - goto err; - } - ctx = new_ctx; - } - BN_CTX_start(ctx); - ctx_started = 1; - BIGNUM *tmp_scalar = BN_CTX_get(ctx); - if (tmp_scalar == NULL) { - goto err; - } - - if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) { - OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB); - goto err; - } - g_scalar = tmp_scalar; - } + uint8_t p_str[33]; + OPENSSL_memcpy(p_str, g_scalar->bytes, 32); + p_str[32] = 0; - uint8_t p_str[33] = {0}; - int i; - for (i = 0; i < g_scalar->top * BN_BYTES; i += BN_BYTES) { - BN_ULONG d = g_scalar->d[i / BN_BYTES]; - - p_str[i + 0] = d & 0xff; - p_str[i + 1] = (d >> 8) & 0xff; - p_str[i + 2] = (d >> 16) & 0xff; - p_str[i + 3] = (d >>= 24) & 0xff; - if (BN_BYTES == 8) { - d >>= 8; - p_str[i + 4] = d & 0xff; - p_str[i + 5] = (d >> 8) & 0xff; - p_str[i + 6] = (d >> 16) & 0xff; - p_str[i + 7] = (d >> 24) & 0xff; - } - } - - for (; i < (int) sizeof(p_str); i++) { - p_str[i] = 0; - } - - /* First window */ + // First window unsigned wvalue = (p_str[0] << 1) & kMask; unsigned index = kWindowSize; @@ -438,13 +342,13 @@ static int ecp_nistz256_points_mul( ecp_nistz256_neg(p.p.Z, p.p.Y); copy_conditional(p.p.Y, p.p.Z, wvalue & 1); - /* Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p| - * is infinity and |ONE| otherwise. |p| was computed from the table, so it - * is infinity iff |wvalue >> 1| is zero. */ + // Convert |p| from affine to Jacobian coordinates. We set Z to zero if |p| + // is infinity and |ONE| otherwise. |p| was computed from the table, so it + // is infinity iff |wvalue >> 1| is zero. OPENSSL_memset(p.p.Z, 0, sizeof(p.p.Z)); copy_conditional(p.p.Z, ONE, is_not_zero(wvalue >> 1)); - for (i = 1; i < 37; i++) { + for (int i = 1; i < 37; i++) { unsigned off = (index - 1) / 8; wvalue = p_str[off] | p_str[off + 1] << 8; wvalue = (wvalue >> ((index - 1) % 8)) & kMask; @@ -468,8 +372,8 @@ static int ecp_nistz256_points_mul( out = &p.p; } - if (!ecp_nistz256_windowed_mul(group, out, p_, p_scalar, ctx)) { - goto err; + if (!ecp_nistz256_windowed_mul(group, out, p_, p_scalar)) { + return 0; } if (!p_is_infinity) { @@ -477,21 +381,14 @@ static int ecp_nistz256_points_mul( } } - /* Not constant-time, but we're only operating on the public output. */ + // Not constant-time, but we're only operating on the public output. if (!bn_set_words(&r->X, p.p.X, P256_LIMBS) || !bn_set_words(&r->Y, p.p.Y, P256_LIMBS) || !bn_set_words(&r->Z, p.p.Z, P256_LIMBS)) { return 0; } - ret = 1; - -err: - if (ctx_started) { - BN_CTX_end(ctx); - } - BN_CTX_free(new_ctx); - return ret; + return 1; } static int ecp_nistz256_get_affine(const EC_GROUP *group, const EC_POINT *point, @@ -515,10 +412,10 @@ static int ecp_nistz256_get_affine(const EC_GROUP *group, const EC_POINT *point, ecp_nistz256_mod_inverse_mont(z_inv3, point_z); ecp_nistz256_sqr_mont(z_inv2, z_inv3); - /* Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate - * and then calling |ecp_nistz256_from_mont| again to convert the |y| - * coordinate below, convert the common factor |z_inv2| once now, saving one - * reduction. */ + // Instead of using |ecp_nistz256_from_mont| to convert the |x| coordinate + // and then calling |ecp_nistz256_from_mont| again to convert the |y| + // coordinate below, convert the common factor |z_inv2| once now, saving one + // reduction. ecp_nistz256_from_mont(z_inv2, z_inv2); if (x != NULL) { @@ -543,18 +440,16 @@ static int ecp_nistz256_get_affine(const EC_GROUP *group, const EC_POINT *point, return 1; } - -const EC_METHOD EC_GFp_nistz256_method = { - ec_GFp_mont_group_init, - ec_GFp_mont_group_finish, - ec_GFp_mont_group_copy, - ec_GFp_mont_group_set_curve, - ecp_nistz256_get_affine, - ecp_nistz256_points_mul, - ec_GFp_mont_field_mul, - ec_GFp_mont_field_sqr, - ec_GFp_mont_field_encode, - ec_GFp_mont_field_decode, +DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistz256_method) { + out->group_init = ec_GFp_mont_group_init; + out->group_finish = ec_GFp_mont_group_finish; + out->group_set_curve = ec_GFp_mont_group_set_curve; + out->point_get_affine_coordinates = ecp_nistz256_get_affine; + out->mul = ecp_nistz256_points_mul; + out->field_mul = ec_GFp_mont_field_mul; + out->field_sqr = ec_GFp_mont_field_sqr; + out->field_encode = ec_GFp_mont_field_encode; + out->field_decode = ec_GFp_mont_field_decode; }; #endif /* !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ diff --git a/Sources/BoringSSL/crypto/ec/p256-x86_64.h b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.h similarity index 64% rename from Sources/BoringSSL/crypto/ec/p256-x86_64.h rename to Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.h index 0132348e7..6a0bebb75 100644 --- a/Sources/BoringSSL/crypto/ec/p256-x86_64.h +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/p256-x86_64.h @@ -27,30 +27,30 @@ extern "C" { #if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && \ !defined(OPENSSL_SMALL) -/* P-256 field operations. - * - * An element mod P in P-256 is represented as a little-endian array of - * |P256_LIMBS| |BN_ULONG|s, spanning the full range of values. - * - * The following functions take fully-reduced inputs mod P and give - * fully-reduced outputs. They may be used in-place. */ +// P-256 field operations. +// +// An element mod P in P-256 is represented as a little-endian array of +// |P256_LIMBS| |BN_ULONG|s, spanning the full range of values. +// +// The following functions take fully-reduced inputs mod P and give +// fully-reduced outputs. They may be used in-place. #define P256_LIMBS (256 / BN_BITS2) -/* ecp_nistz256_neg sets |res| to -|a| mod P. */ +// ecp_nistz256_neg sets |res| to -|a| mod P. void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. */ +// ecp_nistz256_mul_mont sets |res| to |a| * |b| * 2^-256 mod P. void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS], const BN_ULONG b[P256_LIMBS]); -/* ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. */ +// ecp_nistz256_sqr_mont sets |res| to |a| * |a| * 2^-256 mod P. void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]); -/* ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain - * by multiplying with 1. */ +// ecp_nistz256_from_mont sets |res| to |in|, converted from Montgomery domain +// by multiplying with 1. static inline void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS], const BN_ULONG in[P256_LIMBS]) { static const BN_ULONG ONE[P256_LIMBS] = { 1 }; @@ -58,47 +58,47 @@ static inline void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS], } -/* P-256 point operations. - * - * The following functions may be used in-place. All coordinates are in the - * Montgomery domain. */ +// P-256 point operations. +// +// The following functions may be used in-place. All coordinates are in the +// Montgomery domain. -/* A P256_POINT represents a P-256 point in Jacobian coordinates. */ +// A P256_POINT represents a P-256 point in Jacobian coordinates. typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; BN_ULONG Z[P256_LIMBS]; } P256_POINT; -/* A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity - * is encoded as (0, 0). */ +// A P256_POINT_AFFINE represents a P-256 point in affine coordinates. Infinity +// is encoded as (0, 0). typedef struct { BN_ULONG X[P256_LIMBS]; BN_ULONG Y[P256_LIMBS]; } P256_POINT_AFFINE; -/* ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16 - * and all zeros (the point at infinity) if |index| is 0. This is done in - * constant time. */ +// ecp_nistz256_select_w5 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 16 +// and all zeros (the point at infinity) if |index| is 0. This is done in +// constant time. void ecp_nistz256_select_w5(P256_POINT *val, const P256_POINT in_t[16], int index); -/* ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64 - * and all zeros (the point at infinity) if |index| is 0. This is done in - * constant time. */ +// ecp_nistz256_select_w7 sets |*val| to |in_t[index-1]| if 1 <= |index| <= 64 +// and all zeros (the point at infinity) if |index| is 0. This is done in +// constant time. void ecp_nistz256_select_w7(P256_POINT_AFFINE *val, const P256_POINT_AFFINE in_t[64], int index); -/* ecp_nistz256_point_double sets |r| to |a| doubled. */ +// ecp_nistz256_point_double sets |r| to |a| doubled. void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a); -/* ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. */ +// ecp_nistz256_point_add adds |a| to |b| and places the result in |r|. void ecp_nistz256_point_add(P256_POINT *r, const P256_POINT *a, const P256_POINT *b); -/* ecp_nistz256_point_add_affine adds |a| to |b| and places the result in - * |r|. |a| and |b| must not represent the same point unless they are both - * infinity. */ +// ecp_nistz256_point_add_affine adds |a| to |b| and places the result in +// |r|. |a| and |b| must not represent the same point unless they are both +// infinity. void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, const P256_POINT_AFFINE *b); @@ -107,7 +107,7 @@ void ecp_nistz256_point_add_affine(P256_POINT *r, const P256_POINT *a, #if defined(__cplusplus) -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_EC_P256_X86_64_H */ +#endif // OPENSSL_HEADER_EC_P256_X86_64_H diff --git a/Sources/BoringSSL/crypto/ec/simple.c b/Sources/BoringSSL/crypto/fipsmodule/ec/simple.c similarity index 78% rename from Sources/BoringSSL/crypto/ec/simple.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/simple.c index 880b717c1..bc395252b 100644 --- a/Sources/BoringSSL/crypto/ec/simple.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/simple.c @@ -74,19 +74,19 @@ #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" -/* Most method functions in this file are designed to work with non-trivial - * representations of field elements if necessary (see ecp_mont.c): while - * standard modular addition and subtraction are used, the field_mul and - * field_sqr methods will be used for multiplication, and field_encode and - * field_decode (if defined) will be used for converting between - * representations. - * - * Functions here specifically assume that if a non-trivial representation is - * used, it is a Montgomery representation (i.e. 'encoding' means multiplying - * by some factor R). */ +// Most method functions in this file are designed to work with non-trivial +// representations of field elements if necessary (see ecp_mont.c): while +// standard modular addition and subtraction are used, the field_mul and +// field_sqr methods will be used for multiplication, and field_encode and +// field_decode (if defined) will be used for converting between +// representations. +// +// Functions here specifically assume that if a non-trivial representation is +// used, it is a Montgomery representation (i.e. 'encoding' means multiplying +// by some factor R). int ec_GFp_simple_group_init(EC_GROUP *group) { BN_init(&group->field); @@ -104,18 +104,6 @@ void ec_GFp_simple_group_finish(EC_GROUP *group) { BN_free(&group->one); } -int ec_GFp_simple_group_copy(EC_GROUP *dest, const EC_GROUP *src) { - if (!BN_copy(&dest->field, &src->field) || - !BN_copy(&dest->a, &src->a) || - !BN_copy(&dest->b, &src->b) || - !BN_copy(&dest->one, &src->one)) { - return 0; - } - - dest->a_is_minus3 = src->a_is_minus3; - return 1; -} - int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx) { @@ -123,7 +111,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, BN_CTX *new_ctx = NULL; BIGNUM *tmp_a; - /* p must be a prime > 3 */ + // p must be a prime > 3 if (BN_num_bits(p) <= 2 || !BN_is_odd(p)) { OPENSSL_PUT_ERROR(EC, EC_R_INVALID_FIELD); return 0; @@ -142,13 +130,13 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->field */ + // group->field if (!BN_copy(&group->field, p)) { goto err; } BN_set_negative(&group->field, 0); - /* group->a */ + // group->a if (!BN_nnmod(tmp_a, a, p, ctx)) { goto err; } @@ -160,7 +148,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->b */ + // group->b if (!BN_nnmod(&group->b, b, p, ctx)) { goto err; } @@ -169,7 +157,7 @@ int ec_GFp_simple_group_set_curve(EC_GROUP *group, const BIGNUM *p, goto err; } - /* group->a_is_minus3 */ + // group->a_is_minus3 if (!BN_add_word(tmp_a, 3)) { goto err; } @@ -249,12 +237,6 @@ void ec_GFp_simple_point_finish(EC_POINT *point) { BN_free(&point->Z); } -void ec_GFp_simple_point_clear_finish(EC_POINT *point) { - BN_clear_free(&point->X); - BN_clear_free(&point->Y); - BN_clear_free(&point->Z); -} - int ec_GFp_simple_point_copy(EC_POINT *dest, const EC_POINT *src) { if (!BN_copy(&dest->X, &src->X) || !BN_copy(&dest->Y, &src->Y) || @@ -313,54 +295,11 @@ int ec_GFp_simple_set_Jprojective_coordinates_GFp( return ret; } -int ec_GFp_simple_get_Jprojective_coordinates_GFp(const EC_GROUP *group, - const EC_POINT *point, - BIGNUM *x, BIGNUM *y, - BIGNUM *z, BN_CTX *ctx) { - BN_CTX *new_ctx = NULL; - int ret = 0; - - if (group->meth->field_decode != 0) { - if (ctx == NULL) { - ctx = new_ctx = BN_CTX_new(); - if (ctx == NULL) { - return 0; - } - } - - if (x != NULL && !group->meth->field_decode(group, x, &point->X, ctx)) { - goto err; - } - if (y != NULL && !group->meth->field_decode(group, y, &point->Y, ctx)) { - goto err; - } - if (z != NULL && !group->meth->field_decode(group, z, &point->Z, ctx)) { - goto err; - } - } else { - if (x != NULL && !BN_copy(x, &point->X)) { - goto err; - } - if (y != NULL && !BN_copy(y, &point->Y)) { - goto err; - } - if (z != NULL && !BN_copy(z, &point->Z)) { - goto err; - } - } - - ret = 1; - -err: - BN_CTX_free(new_ctx); - return ret; -} - int ec_GFp_simple_point_set_affine_coordinates(const EC_GROUP *group, EC_POINT *point, const BIGNUM *x, const BIGNUM *y, BN_CTX *ctx) { if (x == NULL || y == NULL) { - /* unlike for projective coordinates, we do not tolerate this */ + // unlike for projective coordinates, we do not tolerate this OPENSSL_PUT_ERROR(EC, ERR_R_PASSED_NULL_PARAMETER); return 0; } @@ -412,88 +351,87 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, goto end; } - /* Note that in this function we must not read components of 'a' or 'b' - * once we have written the corresponding components of 'r'. - * ('r' might be one of 'a' or 'b'.) - */ + // Note that in this function we must not read components of 'a' or 'b' + // once we have written the corresponding components of 'r'. + // ('r' might be one of 'a' or 'b'.) - /* n1, n2 */ + // n1, n2 int b_Z_is_one = BN_cmp(&b->Z, &group->one) == 0; if (b_Z_is_one) { if (!BN_copy(n1, &a->X) || !BN_copy(n2, &a->Y)) { goto end; } - /* n1 = X_a */ - /* n2 = Y_a */ + // n1 = X_a + // n2 = Y_a } else { if (!field_sqr(group, n0, &b->Z, ctx) || !field_mul(group, n1, &a->X, n0, ctx)) { goto end; } - /* n1 = X_a * Z_b^2 */ + // n1 = X_a * Z_b^2 if (!field_mul(group, n0, n0, &b->Z, ctx) || !field_mul(group, n2, &a->Y, n0, ctx)) { goto end; } - /* n2 = Y_a * Z_b^3 */ + // n2 = Y_a * Z_b^3 } - /* n3, n4 */ + // n3, n4 int a_Z_is_one = BN_cmp(&a->Z, &group->one) == 0; if (a_Z_is_one) { if (!BN_copy(n3, &b->X) || !BN_copy(n4, &b->Y)) { goto end; } - /* n3 = X_b */ - /* n4 = Y_b */ + // n3 = X_b + // n4 = Y_b } else { if (!field_sqr(group, n0, &a->Z, ctx) || !field_mul(group, n3, &b->X, n0, ctx)) { goto end; } - /* n3 = X_b * Z_a^2 */ + // n3 = X_b * Z_a^2 if (!field_mul(group, n0, n0, &a->Z, ctx) || !field_mul(group, n4, &b->Y, n0, ctx)) { goto end; } - /* n4 = Y_b * Z_a^3 */ + // n4 = Y_b * Z_a^3 } - /* n5, n6 */ + // n5, n6 if (!BN_mod_sub_quick(n5, n1, n3, p) || !BN_mod_sub_quick(n6, n2, n4, p)) { goto end; } - /* n5 = n1 - n3 */ - /* n6 = n2 - n4 */ + // n5 = n1 - n3 + // n6 = n2 - n4 if (BN_is_zero(n5)) { if (BN_is_zero(n6)) { - /* a is the same point as b */ + // a is the same point as b BN_CTX_end(ctx); ret = EC_POINT_dbl(group, r, a, ctx); ctx = NULL; goto end; } else { - /* a is the inverse of b */ + // a is the inverse of b BN_zero(&r->Z); ret = 1; goto end; } } - /* 'n7', 'n8' */ + // 'n7', 'n8' if (!BN_mod_add_quick(n1, n1, n3, p) || !BN_mod_add_quick(n2, n2, n4, p)) { goto end; } - /* 'n7' = n1 + n3 */ - /* 'n8' = n2 + n4 */ + // 'n7' = n1 + n3 + // 'n8' = n2 + n4 - /* Z_r */ + // Z_r if (a_Z_is_one && b_Z_is_one) { if (!BN_copy(&r->Z, n5)) { goto end; @@ -515,28 +453,28 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, } } - /* Z_r = Z_a * Z_b * n5 */ + // Z_r = Z_a * Z_b * n5 - /* X_r */ + // X_r if (!field_sqr(group, n0, n6, ctx) || !field_sqr(group, n4, n5, ctx) || !field_mul(group, n3, n1, n4, ctx) || !BN_mod_sub_quick(&r->X, n0, n3, p)) { goto end; } - /* X_r = n6^2 - n5^2 * 'n7' */ + // X_r = n6^2 - n5^2 * 'n7' - /* 'n9' */ + // 'n9' if (!BN_mod_lshift1_quick(n0, &r->X, p) || !BN_mod_sub_quick(n0, n3, n0, p)) { goto end; } - /* n9 = n5^2 * 'n7' - 2 * X_r */ + // n9 = n5^2 * 'n7' - 2 * X_r - /* Y_r */ + // Y_r if (!field_mul(group, n0, n0, n6, ctx) || !field_mul(group, n5, n4, n5, ctx)) { - goto end; /* now n5 is n5^3 */ + goto end; // now n5 is n5^3 } if (!field_mul(group, n1, n2, n5, ctx) || !BN_mod_sub_quick(n0, n0, n1, p)) { @@ -545,17 +483,17 @@ int ec_GFp_simple_add(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, if (BN_is_odd(n0) && !BN_add(n0, n0, p)) { goto end; } - /* now 0 <= n0 < 2*p, and n0 is even */ + // now 0 <= n0 < 2*p, and n0 is even if (!BN_rshift1(&r->Y, n0)) { goto end; } - /* Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2 */ + // Y_r = (n6 * 'n9' - 'n8' * 'n5^3') / 2 ret = 1; end: if (ctx) { - /* otherwise we already called BN_CTX_end */ + // otherwise we already called BN_CTX_end BN_CTX_end(ctx); } BN_CTX_free(new_ctx); @@ -597,12 +535,11 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, goto err; } - /* Note that in this function we must not read components of 'a' - * once we have written the corresponding components of 'r'. - * ('r' might the same as 'a'.) - */ + // Note that in this function we must not read components of 'a' + // once we have written the corresponding components of 'r'. + // ('r' might the same as 'a'.) - /* n1 */ + // n1 if (BN_cmp(&a->Z, &group->one) == 0) { if (!field_sqr(group, n0, &a->X, ctx) || !BN_mod_lshift1_quick(n1, n0, p) || @@ -610,7 +547,7 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n0, &group->a, p)) { goto err; } - /* n1 = 3 * X_a^2 + a_curve */ + // n1 = 3 * X_a^2 + a_curve } else if (group->a_is_minus3) { if (!field_sqr(group, n1, &a->Z, ctx) || !BN_mod_add_quick(n0, &a->X, n1, p) || @@ -620,8 +557,8 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n0, n1, p)) { goto err; } - /* n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2) - * = 3 * X_a^2 - 3 * Z_a^4 */ + // n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2) + // = 3 * X_a^2 - 3 * Z_a^4 } else { if (!field_sqr(group, n0, &a->X, ctx) || !BN_mod_lshift1_quick(n1, n0, p) || @@ -632,10 +569,10 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, !BN_mod_add_quick(n1, n1, n0, p)) { goto err; } - /* n1 = 3 * X_a^2 + a_curve * Z_a^4 */ + // n1 = 3 * X_a^2 + a_curve * Z_a^4 } - /* Z_r */ + // Z_r if (BN_cmp(&a->Z, &group->one) == 0) { if (!BN_copy(n0, &a->Y)) { goto err; @@ -646,38 +583,38 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, if (!BN_mod_lshift1_quick(&r->Z, n0, p)) { goto err; } - /* Z_r = 2 * Y_a * Z_a */ + // Z_r = 2 * Y_a * Z_a - /* n2 */ + // n2 if (!field_sqr(group, n3, &a->Y, ctx) || !field_mul(group, n2, &a->X, n3, ctx) || !BN_mod_lshift_quick(n2, n2, 2, p)) { goto err; } - /* n2 = 4 * X_a * Y_a^2 */ + // n2 = 4 * X_a * Y_a^2 - /* X_r */ + // X_r if (!BN_mod_lshift1_quick(n0, n2, p) || !field_sqr(group, &r->X, n1, ctx) || !BN_mod_sub_quick(&r->X, &r->X, n0, p)) { goto err; } - /* X_r = n1^2 - 2 * n2 */ + // X_r = n1^2 - 2 * n2 - /* n3 */ + // n3 if (!field_sqr(group, n0, n3, ctx) || !BN_mod_lshift_quick(n3, n0, 3, p)) { goto err; } - /* n3 = 8 * Y_a^4 */ + // n3 = 8 * Y_a^4 - /* Y_r */ + // Y_r if (!BN_mod_sub_quick(n0, n2, &r->X, p) || !field_mul(group, n0, n1, n0, ctx) || !BN_mod_sub_quick(&r->Y, n0, n3, p)) { goto err; } - /* Y_r = n1 * (n2 - X_r) - n3 */ + // Y_r = n1 * (n2 - X_r) - n3 ret = 1; @@ -689,7 +626,7 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a, int ec_GFp_simple_invert(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx) { if (EC_POINT_is_at_infinity(group, point) || BN_is_zero(&point->Y)) { - /* point is its own inverse */ + // point is its own inverse return 1; } @@ -734,17 +671,16 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, goto err; } - /* We have a curve defined by a Weierstrass equation - * y^2 = x^3 + a*x + b. - * The point to consider is given in Jacobian projective coordinates - * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3). - * Substituting this and multiplying by Z^6 transforms the above equation - * into - * Y^2 = X^3 + a*X*Z^4 + b*Z^6. - * To test this, we add up the right-hand side in 'rh'. - */ + // We have a curve defined by a Weierstrass equation + // y^2 = x^3 + a*x + b. + // The point to consider is given in Jacobian projective coordinates + // where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3). + // Substituting this and multiplying by Z^6 transforms the above equation + // into + // Y^2 = X^3 + a*X*Z^4 + b*Z^6. + // To test this, we add up the right-hand side in 'rh'. - /* rh := X^2 */ + // rh := X^2 if (!field_sqr(group, rh, &point->X, ctx)) { goto err; } @@ -756,7 +692,7 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, goto err; } - /* rh := (rh + a*Z^4)*X */ + // rh := (rh + a*Z^4)*X if (group->a_is_minus3) { if (!BN_mod_lshift1_quick(tmp, Z4, p) || !BN_mod_add_quick(tmp, tmp, Z4, p) || @@ -772,24 +708,24 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, } } - /* rh := rh + b*Z^6 */ + // rh := rh + b*Z^6 if (!field_mul(group, tmp, &group->b, Z6, ctx) || !BN_mod_add_quick(rh, rh, tmp, p)) { goto err; } } else { - /* rh := (rh + a)*X */ + // rh := (rh + a)*X if (!BN_mod_add_quick(rh, rh, &group->a, p) || !field_mul(group, rh, rh, &point->X, ctx)) { goto err; } - /* rh := rh + b */ + // rh := rh + b if (!BN_mod_add_quick(rh, rh, &group->b, p)) { goto err; } } - /* 'lh' := Y^2 */ + // 'lh' := Y^2 if (!field_sqr(group, tmp, &point->Y, ctx)) { goto err; } @@ -804,11 +740,10 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point, int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, const EC_POINT *b, BN_CTX *ctx) { - /* return values: - * -1 error - * 0 equal (in affine coordinates) - * 1 not equal - */ + // return values: + // -1 error + // 0 equal (in affine coordinates) + // 1 not equal int (*field_mul)(const EC_GROUP *, BIGNUM *, const BIGNUM *, const BIGNUM *, BN_CTX *); @@ -818,11 +753,11 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, const BIGNUM *tmp1_, *tmp2_; int ret = -1; - if (EC_POINT_is_at_infinity(group, a)) { - return EC_POINT_is_at_infinity(group, b) ? 0 : 1; + if (ec_GFp_simple_is_at_infinity(group, a)) { + return ec_GFp_simple_is_at_infinity(group, b) ? 0 : 1; } - if (EC_POINT_is_at_infinity(group, b)) { + if (ec_GFp_simple_is_at_infinity(group, b)) { return 1; } @@ -852,11 +787,10 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, goto end; } - /* We have to decide whether - * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3), - * or equivalently, whether - * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3). - */ + // We have to decide whether + // (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3), + // or equivalently, whether + // (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3). if (!b_Z_is_one) { if (!field_sqr(group, Zb23, &b->Z, ctx) || @@ -877,9 +811,9 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, tmp2_ = &b->X; } - /* compare X_a*Z_b^2 with X_b*Z_a^2 */ + // compare X_a*Z_b^2 with X_b*Z_a^2 if (BN_cmp(tmp1_, tmp2_) != 0) { - ret = 1; /* points differ */ + ret = 1; // points differ goto end; } @@ -889,7 +823,7 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, !field_mul(group, tmp1, &a->Y, Zb23, ctx)) { goto end; } - /* tmp1_ = tmp1 */ + // tmp1_ = tmp1 } else { tmp1_ = &a->Y; } @@ -898,18 +832,18 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a, !field_mul(group, tmp2, &b->Y, Za23, ctx)) { goto end; } - /* tmp2_ = tmp2 */ + // tmp2_ = tmp2 } else { tmp2_ = &b->Y; } - /* compare Y_a*Z_b^3 with Y_b*Z_a^3 */ + // compare Y_a*Z_b^3 with Y_b*Z_a^3 if (BN_cmp(tmp1_, tmp2_) != 0) { - ret = 1; /* points differ */ + ret = 1; // points differ goto end; } - /* points are equal */ + // points are equal ret = 0; end: @@ -997,8 +931,8 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } } - /* Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z, - * skipping any zero-valued inputs (pretend that they're 1). */ + // Set each prod_Z[i] to the product of points[0]->Z .. points[i]->Z, + // skipping any zero-valued inputs (pretend that they're 1). if (!BN_is_zero(&points[0]->Z)) { if (!BN_copy(prod_Z[0], &points[0]->Z)) { @@ -1023,13 +957,13 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } } - /* Now use a single explicit inversion to replace every non-zero points[i]->Z - * by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant- - * time inversion using Fermat's Little Theorem because this function is - * usually only used for converting multiples of a public key point to - * affine, and a public key point isn't secret. If we were to use Fermat's - * Little Theorem then the cost of the inversion would usually be so high - * that converting the multiples to affine would be counterproductive. */ + // Now use a single explicit inversion to replace every non-zero points[i]->Z + // by its inverse. We use |BN_mod_inverse_odd| instead of doing a constant- + // time inversion using Fermat's Little Theorem because this function is + // usually only used for converting multiples of a public key point to + // affine, and a public key point isn't secret. If we were to use Fermat's + // Little Theorem then the cost of the inversion would usually be so high + // that converting the multiples to affine would be counterproductive. int no_inverse; if (!BN_mod_inverse_odd(tmp, &no_inverse, prod_Z[num - 1], &group->field, ctx)) { @@ -1038,9 +972,9 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } if (group->meth->field_encode != NULL) { - /* In the Montgomery case, we just turned R*H (representing H) - * into 1/(R*H), but we need R*(1/H) (representing 1/H); - * i.e. we need to multiply by the Montgomery factor twice. */ + // In the Montgomery case, we just turned R*H (representing H) + // into 1/(R*H), but we need R*(1/H) (representing 1/H); + // i.e. we need to multiply by the Montgomery factor twice. if (!group->meth->field_encode(group, tmp, tmp, ctx) || !group->meth->field_encode(group, tmp, tmp, ctx)) { goto err; @@ -1048,34 +982,34 @@ int ec_GFp_simple_points_make_affine(const EC_GROUP *group, size_t num, } for (size_t i = num - 1; i > 0; --i) { - /* Loop invariant: tmp is the product of the inverses of - * points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */ + // Loop invariant: tmp is the product of the inverses of + // points[0]->Z .. points[i]->Z (zero-valued inputs skipped). if (BN_is_zero(&points[i]->Z)) { continue; } - /* Set tmp_Z to the inverse of points[i]->Z (as product - * of Z inverses 0 .. i, Z values 0 .. i - 1). */ + // Set tmp_Z to the inverse of points[i]->Z (as product + // of Z inverses 0 .. i, Z values 0 .. i - 1). if (!group->meth->field_mul(group, tmp_Z, prod_Z[i - 1], tmp, ctx) || - /* Update tmp to satisfy the loop invariant for i - 1. */ + // Update tmp to satisfy the loop invariant for i - 1. !group->meth->field_mul(group, tmp, tmp, &points[i]->Z, ctx) || - /* Replace points[i]->Z by its inverse. */ + // Replace points[i]->Z by its inverse. !BN_copy(&points[i]->Z, tmp_Z)) { goto err; } } - /* Replace points[0]->Z by its inverse. */ + // Replace points[0]->Z by its inverse. if (!BN_is_zero(&points[0]->Z) && !BN_copy(&points[0]->Z, tmp)) { goto err; } - /* Finally, fix up the X and Y coordinates for all points. */ + // Finally, fix up the X and Y coordinates for all points. for (size_t i = 0; i < num; i++) { EC_POINT *p = points[i]; if (!BN_is_zero(&p->Z)) { - /* turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1). */ + // turn (X, Y, 1/Z) into (X/Z^2, Y/Z^3, 1). if (!group->meth->field_sqr(group, tmp, &p->Z, ctx) || !group->meth->field_mul(group, &p->X, &p->X, tmp, ctx) || !group->meth->field_mul(group, tmp, tmp, &p->Z, ctx) || diff --git a/Sources/BoringSSL/crypto/fipsmodule/ec/util-64.c b/Sources/BoringSSL/crypto/fipsmodule/ec/util-64.c new file mode 100644 index 000000000..0cb117b4d --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/util-64.c @@ -0,0 +1,109 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + + +#if defined(OPENSSL_64_BIT) && !defined(OPENSSL_WINDOWS) + +#include + +#include "internal.h" + +// This function looks at 5+1 scalar bits (5 current, 1 adjacent less +// significant bit), and recodes them into a signed digit for use in fast point +// multiplication: the use of signed rather than unsigned digits means that +// fewer points need to be precomputed, given that point inversion is easy (a +// precomputed point dP makes -dP available as well). +// +// BACKGROUND: +// +// Signed digits for multiplication were introduced by Booth ("A signed binary +// multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV, +// pt. 2 (1951), pp. 236-240), in that case for multiplication of integers. +// Booth's original encoding did not generally improve the density of nonzero +// digits over the binary representation, and was merely meant to simplify the +// handling of signed factors given in two's complement; but it has since been +// shown to be the basis of various signed-digit representations that do have +// further advantages, including the wNAF, using the following general +// approach: +// +// (1) Given a binary representation +// +// b_k ... b_2 b_1 b_0, +// +// of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1 +// by using bit-wise subtraction as follows: +// +// b_k b_(k-1) ... b_2 b_1 b_0 +// - b_k ... b_3 b_2 b_1 b_0 +// ------------------------------------- +// s_k b_(k-1) ... s_3 s_2 s_1 s_0 +// +// A left-shift followed by subtraction of the original value yields a new +// representation of the same value, using signed bits s_i = b_(i+1) - b_i. +// This representation from Booth's paper has since appeared in the +// literature under a variety of different names including "reversed binary +// form", "alternating greedy expansion", "mutual opposite form", and +// "sign-alternating {+-1}-representation". +// +// An interesting property is that among the nonzero bits, values 1 and -1 +// strictly alternate. +// +// (2) Various window schemes can be applied to the Booth representation of +// integers: for example, right-to-left sliding windows yield the wNAF +// (a signed-digit encoding independently discovered by various researchers +// in the 1990s), and left-to-right sliding windows yield a left-to-right +// equivalent of the wNAF (independently discovered by various researchers +// around 2004). +// +// To prevent leaking information through side channels in point multiplication, +// we need to recode the given integer into a regular pattern: sliding windows +// as in wNAFs won't do, we need their fixed-window equivalent -- which is a few +// decades older: we'll be using the so-called "modified Booth encoding" due to +// MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49 +// (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five +// signed bits into a signed digit: +// +// s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j) +// +// The sign-alternating property implies that the resulting digit values are +// integers from -16 to 16. +// +// Of course, we don't actually need to compute the signed digits s_i as an +// intermediate step (that's just a nice way to see how this scheme relates +// to the wNAF): a direct computation obtains the recoded digit from the +// six bits b_(4j + 4) ... b_(4j - 1). +// +// This function takes those five bits as an integer (0 .. 63), writing the +// recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute +// value, in the range 0 .. 8). Note that this integer essentially provides the +// input bits "shifted to the left" by one position: for example, the input to +// compute the least significant recoded digit, given that there's no bit b_-1, +// has to be b_4 b_3 b_2 b_1 b_0 0. +void ec_GFp_nistp_recode_scalar_bits(uint8_t *sign, uint8_t *digit, + uint8_t in) { + uint8_t s, d; + + s = ~((in >> 5) - 1); /* sets all bits to MSB(in), 'in' seen as + * 6-bit value */ + d = (1 << 6) - in - 1; + d = (d & s) | (in & ~s); + d = (d >> 1) + (d & 1); + + *sign = s & 1; + *digit = d; +} + +#endif // 64_BIT && !WINDOWS diff --git a/Sources/BoringSSL/crypto/ec/wnaf.c b/Sources/BoringSSL/crypto/fipsmodule/ec/wnaf.c similarity index 72% rename from Sources/BoringSSL/crypto/ec/wnaf.c rename to Sources/BoringSSL/crypto/fipsmodule/ec/wnaf.c index 67b7f3489..e3b6437b6 100644 --- a/Sources/BoringSSL/crypto/ec/wnaf.c +++ b/Sources/BoringSSL/crypto/fipsmodule/ec/wnaf.c @@ -75,21 +75,21 @@ #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" -/* This file implements the wNAF-based interleaving multi-exponentation method - * (); - * */ +// This file implements the wNAF-based interleaving multi-exponentiation method +// at: +// http://link.springer.com/chapter/10.1007%2F3-540-45537-X_13 +// http://www.bmoeller.de/pdf/TI-01-08.multiexp.pdf -/* Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'. - * This is an array r[] of values that are either zero or odd with an - * absolute value less than 2^w satisfying - * scalar = \sum_j r[j]*2^j - * where at most one of any w+1 consecutive digits is non-zero - * with the exception that the most significant digit may be only - * w-1 zeros away from that next non-zero digit. - */ +// Determine the modified width-(w+1) Non-Adjacent Form (wNAF) of 'scalar'. +// This is an array r[] of values that are either zero or odd with an +// absolute value less than 2^w satisfying +// scalar = \sum_j r[j]*2^j +// where at most one of any w+1 consecutive digits is non-zero +// with the exception that the most significant digit may be only +// w-1 zeros away from that next non-zero digit. static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { int window_val; int ok = 0; @@ -109,28 +109,23 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { return r; } - /* 'int8_t' can represent integers with absolute values less than 2^7. */ + // 'int8_t' can represent integers with absolute values less than 2^7. if (w <= 0 || w > 7) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; } - bit = 1 << w; /* at most 128 */ - next_bit = bit << 1; /* at most 256 */ - mask = next_bit - 1; /* at most 255 */ + bit = 1 << w; // at most 128 + next_bit = bit << 1; // at most 256 + mask = next_bit - 1; // at most 255 if (BN_is_negative(scalar)) { sign = -1; } - if (scalar->d == NULL || scalar->top == 0) { - OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); - goto err; - } - len = BN_num_bits(scalar); - /* The modified wNAF may be one digit longer than binary representation - * (*ret_len will be set to the actual length, i.e. at most - * BN_num_bits(scalar) + 1). */ + // The modified wNAF may be one digit longer than binary representation + // (*ret_len will be set to the actual length, i.e. at most + // BN_num_bits(scalar) + 1). r = OPENSSL_malloc(len + 1); if (r == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); @@ -138,30 +133,30 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { } window_val = scalar->d[0] & mask; j = 0; - /* If j+w+1 >= len, window_val will not increase. */ + // If j+w+1 >= len, window_val will not increase. while (window_val != 0 || j + w + 1 < len) { int digit = 0; - /* 0 <= window_val <= 2^(w+1) */ + // 0 <= window_val <= 2^(w+1) if (window_val & 1) { - /* 0 < window_val < 2^(w+1) */ + // 0 < window_val < 2^(w+1) if (window_val & bit) { - digit = window_val - next_bit; /* -2^w < digit < 0 */ + digit = window_val - next_bit; // -2^w < digit < 0 -#if 1 /* modified wNAF */ +#if 1 // modified wNAF if (j + w + 1 >= len) { - /* special case for generating modified wNAFs: - * no new bits will be added into window_val, - * so using a positive digit here will decrease - * the total length of the representation */ + // special case for generating modified wNAFs: + // no new bits will be added into window_val, + // so using a positive digit here will decrease + // the total length of the representation - digit = window_val & (mask >> 1); /* 0 < digit < 2^w */ + digit = window_val & (mask >> 1); // 0 < digit < 2^w } #endif } else { - digit = window_val; /* 0 < digit < 2^w */ + digit = window_val; // 0 < digit < 2^w } if (digit <= -bit || digit >= bit || !(digit & 1)) { @@ -171,8 +166,8 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { window_val -= digit; - /* Now window_val is 0 or 2^(w+1) in standard wNAF generation; - * for modified window NAFs, it may also be 2^w. */ + // Now window_val is 0 or 2^(w+1) in standard wNAF generation; + // for modified window NAFs, it may also be 2^w. if (window_val != 0 && window_val != next_bit && window_val != bit) { OPENSSL_PUT_ERROR(EC, ERR_R_INTERNAL_ERROR); goto err; @@ -209,10 +204,9 @@ static int8_t *compute_wNAF(const BIGNUM *scalar, int w, size_t *ret_len) { } -/* TODO: table should be optimised for the wNAF-based implementation, - * sometimes smaller windows will give better performance - * (thus the boundaries should be increased) - */ +// TODO: table should be optimised for the wNAF-based implementation, +// sometimes smaller windows will give better performance +// (thus the boundaries should be increased) static size_t window_bits_for_scalar_size(size_t b) { if (b >= 2000) { return 6; @@ -237,8 +231,9 @@ static size_t window_bits_for_scalar_size(size_t b) { return 1; } -int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, - const EC_POINT *p, const BIGNUM *p_scalar, BN_CTX *ctx) { +int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, + const EC_SCALAR *g_scalar_raw, const EC_POINT *p, + const EC_SCALAR *p_scalar_raw, BN_CTX *ctx) { BN_CTX *new_ctx = NULL; const EC_POINT *generator = NULL; EC_POINT *tmp = NULL; @@ -247,14 +242,14 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, int k; int r_is_inverted = 0; int r_is_at_infinity = 1; - size_t *wsize = NULL; /* individual window sizes */ - int8_t **wNAF = NULL; /* individual wNAFs */ + size_t *wsize = NULL; // individual window sizes + int8_t **wNAF = NULL; // individual wNAFs size_t *wNAF_len = NULL; size_t max_len = 0; size_t num_val = 0; - EC_POINT **val = NULL; /* precomputation */ + EC_POINT **val = NULL; // precomputation EC_POINT **v; - EC_POINT ***val_sub = NULL; /* pointers to sub-arrays of 'val' */ + EC_POINT ***val_sub = NULL; // pointers to sub-arrays of 'val' int ret = 0; if (ctx == NULL) { @@ -263,13 +258,32 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } } + BN_CTX_start(ctx); + + // Convert from |EC_SCALAR| to |BIGNUM|. |BIGNUM| is not constant-time, but + // neither is the rest of this function. + BIGNUM *g_scalar = NULL, *p_scalar = NULL; + if (g_scalar_raw != NULL) { + g_scalar = BN_CTX_get(ctx); + if (g_scalar == NULL || + !bn_set_words(g_scalar, g_scalar_raw->words, group->order.top)) { + goto err; + } + } + if (p_scalar_raw != NULL) { + p_scalar = BN_CTX_get(ctx); + if (p_scalar == NULL || + !bn_set_words(p_scalar, p_scalar_raw->words, group->order.top)) { + goto err; + } + } - /* TODO: This function used to take |points| and |scalars| as arrays of - * |num| elements. The code below should be simplified to work in terms of |p| - * and |p_scalar|. */ + // TODO: This function used to take |points| and |scalars| as arrays of + // |num| elements. The code below should be simplified to work in terms of |p| + // and |p_scalar|. size_t num = p != NULL ? 1 : 0; const EC_POINT **points = p != NULL ? &p : NULL; - const BIGNUM **scalars = p != NULL ? &p_scalar : NULL; + BIGNUM **scalars = p != NULL ? &p_scalar : NULL; total_num = num; @@ -280,7 +294,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - ++total_num; /* treat 'g_scalar' like 'num'-th element of 'scalars' */ + ++total_num; // treat 'g_scalar' like 'num'-th element of 'scalars' } @@ -289,7 +303,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, wNAF = OPENSSL_malloc(total_num * sizeof(wNAF[0])); val_sub = OPENSSL_malloc(total_num * sizeof(val_sub[0])); - /* Ensure wNAF is initialised in case we end up going to err. */ + // Ensure wNAF is initialised in case we end up going to err. if (wNAF != NULL) { OPENSSL_memset(wNAF, 0, total_num * sizeof(wNAF[0])); } @@ -299,7 +313,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - /* num_val will be the total number of temporarily precomputed points */ + // num_val will be the total number of temporarily precomputed points num_val = 0; for (i = 0; i < total_num; i++) { @@ -318,8 +332,8 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } } - /* All points we precompute now go into a single array 'val'. 'val_sub[i]' is - * a pointer to the subarray for the i-th point. */ + // All points we precompute now go into a single array 'val'. 'val_sub[i]' is + // a pointer to the subarray for the i-th point. val = OPENSSL_malloc(num_val * sizeof(val[0])); if (val == NULL) { OPENSSL_PUT_ERROR(EC, ERR_R_MALLOC_FAILURE); @@ -327,7 +341,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } OPENSSL_memset(val, 0, num_val * sizeof(val[0])); - /* allocate points for precomputation */ + // allocate points for precomputation v = val; for (i = 0; i < total_num; i++) { val_sub[i] = v; @@ -348,12 +362,11 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, goto err; } - /* prepare precomputed values: - * val_sub[i][0] := points[i] - * val_sub[i][1] := 3 * points[i] - * val_sub[i][2] := 5 * points[i] - * ... - */ + // prepare precomputed values: + // val_sub[i][0] := points[i] + // val_sub[i][1] := 3 * points[i] + // val_sub[i][2] := 5 * points[i] + // ... for (i = 0; i < total_num; i++) { if (i < num) { if (!EC_POINT_copy(val_sub[i][0], points[i])) { @@ -375,7 +388,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } } -#if 1 /* optional; window_bits_for_scalar_size assumes we do this step */ +#if 1 // optional; window_bits_for_scalar_size assumes we do this step if (!EC_POINTs_make_affine(group, num_val, val, ctx)) { goto err; } @@ -407,7 +420,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, r_is_inverted = !r_is_inverted; } - /* digit > 0 */ + // digit > 0 if (r_is_at_infinity) { if (!EC_POINT_copy(r, val_sub[i][digit >> 1])) { @@ -435,6 +448,9 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, ret = 1; err: + if (ctx != NULL) { + BN_CTX_end(ctx); + } BN_CTX_free(new_ctx); EC_POINT_free(tmp); OPENSSL_free(wsize); @@ -448,7 +464,7 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, } if (val != NULL) { for (i = 0; i < num_val; i++) { - EC_POINT_clear_free(val[i]); + EC_POINT_free(val[i]); } OPENSSL_free(val); diff --git a/Sources/BoringSSL/crypto/fipsmodule/ecdsa/ecdsa.c b/Sources/BoringSSL/crypto/fipsmodule/ecdsa/ecdsa.c new file mode 100644 index 000000000..319a934e7 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/ecdsa/ecdsa.c @@ -0,0 +1,442 @@ +/* ==================================================================== + * Copyright (c) 1998-2005 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@OpenSSL.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ + +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "../bn/internal.h" +#include "../ec/internal.h" +#include "../../internal.h" + + +// digest_to_scalar interprets |digest_len| bytes from |digest| as a scalar for +// ECDSA. Note this value is not fully reduced modulo the order, only the +// correct number of bits. +static void digest_to_scalar(const EC_GROUP *group, EC_SCALAR *out, + const uint8_t *digest, size_t digest_len) { + const BIGNUM *order = &group->order; + size_t num_bits = BN_num_bits(order); + // Need to truncate digest if it is too long: first truncate whole bytes. + if (8 * digest_len > num_bits) { + digest_len = (num_bits + 7) / 8; + } + OPENSSL_memset(out, 0, sizeof(EC_SCALAR)); + for (size_t i = 0; i < digest_len; i++) { + out->bytes[i] = digest[digest_len - 1 - i]; + } + + // If still too long truncate remaining bits with a shift + if (8 * digest_len > num_bits) { + size_t shift = 8 - (num_bits & 0x7); + for (int i = 0; i < order->top - 1; i++) { + out->words[i] = + (out->words[i] >> shift) | (out->words[i + 1] << (BN_BITS2 - shift)); + } + out->words[order->top - 1] >>= shift; + } +} + +// field_element_to_scalar reduces |r| modulo |group->order|. |r| must +// previously have been reduced modulo |group->field|. +static int field_element_to_scalar(const EC_GROUP *group, BIGNUM *r) { + // We must have p < 2×order, assuming p is not tiny (p >= 17). Thus rather we + // can reduce by performing at most one subtraction. + // + // Proof: We only work with prime order curves, so the number of points on + // the curve is the order. Thus Hasse's theorem gives: + // + // |order - (p + 1)| <= 2×sqrt(p) + // p + 1 - order <= 2×sqrt(p) + // p + 1 - 2×sqrt(p) <= order + // p + 1 - 2×(p/4) < order (p/4 > sqrt(p) for p >= 17) + // p/2 < p/2 + 1 < order + // p < 2×order + // + // Additionally, one can manually check this property for built-in curves. It + // is enforced for legacy custom curves in |EC_GROUP_set_generator|. + // + // TODO(davidben): Introduce |EC_FIELD_ELEMENT|, make this a function from + // |EC_FIELD_ELEMENT| to |EC_SCALAR|, and cut out the |BIGNUM|. Does this need + // to be constant-time for signing? |r| is the x-coordinate for kG, which is + // public unless k was rerolled because |s| was zero. + assert(!BN_is_negative(r)); + assert(BN_cmp(r, &group->field) < 0); + if (BN_cmp(r, &group->order) >= 0 && + !BN_sub(r, r, &group->order)) { + return 0; + } + assert(!BN_is_negative(r)); + assert(BN_cmp(r, &group->order) < 0); + return 1; +} + +ECDSA_SIG *ECDSA_SIG_new(void) { + ECDSA_SIG *sig = OPENSSL_malloc(sizeof(ECDSA_SIG)); + if (sig == NULL) { + return NULL; + } + sig->r = BN_new(); + sig->s = BN_new(); + if (sig->r == NULL || sig->s == NULL) { + ECDSA_SIG_free(sig); + return NULL; + } + return sig; +} + +void ECDSA_SIG_free(ECDSA_SIG *sig) { + if (sig == NULL) { + return; + } + + BN_free(sig->r); + BN_free(sig->s); + OPENSSL_free(sig); +} + +void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **out_r, + const BIGNUM **out_s) { + if (out_r != NULL) { + *out_r = sig->r; + } + if (out_s != NULL) { + *out_s = sig->s; + } +} + +int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s) { + if (r == NULL || s == NULL) { + return 0; + } + BN_free(sig->r); + BN_free(sig->s); + sig->r = r; + sig->s = s; + return 1; +} + +int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, + const ECDSA_SIG *sig, const EC_KEY *eckey) { + const EC_GROUP *group = EC_KEY_get0_group(eckey); + const EC_POINT *pub_key = EC_KEY_get0_public_key(eckey); + if (group == NULL || pub_key == NULL || sig == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_MISSING_PARAMETERS); + return 0; + } + + BN_CTX *ctx = BN_CTX_new(); + if (!ctx) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); + return 0; + } + int ret = 0; + EC_POINT *point = NULL; + BN_CTX_start(ctx); + BIGNUM *X = BN_CTX_get(ctx); + if (X == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); + goto err; + } + + EC_SCALAR r, s, m, u1, u2, s_inv_mont; + const BIGNUM *order = EC_GROUP_get0_order(group); + if (BN_is_zero(sig->r) || + BN_is_negative(sig->r) || + BN_ucmp(sig->r, order) >= 0 || + !ec_bignum_to_scalar(group, &r, sig->r) || + BN_is_zero(sig->s) || + BN_is_negative(sig->s) || + BN_ucmp(sig->s, order) >= 0 || + !ec_bignum_to_scalar(group, &s, sig->s)) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); + goto err; + } + // s_inv_mont = s^-1 mod order. We convert the result to Montgomery form for + // the products below. + int no_inverse; + if (!BN_mod_inverse_odd(X, &no_inverse, sig->s, order, ctx) || + !ec_bignum_to_scalar(group, &s_inv_mont, X) || + !bn_to_montgomery_small(s_inv_mont.words, order->top, s_inv_mont.words, + order->top, group->order_mont)) { + goto err; + } + // u1 = m * s_inv_mont mod order + // u2 = r * s_inv_mont mod order + // + // |s_inv_mont| is in Montgomery form while |m| and |r| are not, so |u1| and + // |u2| will be taken out of Montgomery form, as desired. Note that, although + // |m| is not fully reduced, |bn_mod_mul_montgomery_small| only requires the + // product not exceed R * |order|. |s_inv_mont| is fully reduced and |m| < + // 2^BN_num_bits(order) <= R, so this holds. + digest_to_scalar(group, &m, digest, digest_len); + if (!bn_mod_mul_montgomery_small(u1.words, order->top, m.words, order->top, + s_inv_mont.words, order->top, + group->order_mont) || + !bn_mod_mul_montgomery_small(u2.words, order->top, r.words, order->top, + s_inv_mont.words, order->top, + group->order_mont)) { + goto err; + } + + point = EC_POINT_new(group); + if (point == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); + goto err; + } + if (!ec_point_mul_scalar(group, point, &u1, pub_key, &u2, ctx)) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); + goto err; + } + if (!EC_POINT_get_affine_coordinates_GFp(group, point, X, NULL, ctx)) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); + goto err; + } + if (!field_element_to_scalar(group, X)) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_BN_LIB); + goto err; + } + // The signature is correct iff |X| is equal to |sig->r|. + if (BN_ucmp(X, sig->r) != 0) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_BAD_SIGNATURE); + goto err; + } + + ret = 1; + +err: + BN_CTX_end(ctx); + BN_CTX_free(ctx); + EC_POINT_free(point); + return ret; +} + +static int ecdsa_sign_setup(const EC_KEY *eckey, BN_CTX *ctx, + EC_SCALAR *out_kinv_mont, BIGNUM **rp, + const uint8_t *digest, size_t digest_len, + const EC_SCALAR *priv_key) { + EC_POINT *tmp_point = NULL; + int ret = 0; + EC_SCALAR k; + BIGNUM *r = BN_new(); // this value is later returned in *rp + if (r == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); + goto err; + } + const EC_GROUP *group = EC_KEY_get0_group(eckey); + const BIGNUM *order = EC_GROUP_get0_order(group); + tmp_point = EC_POINT_new(group); + if (tmp_point == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_EC_LIB); + goto err; + } + + // Check that the size of the group order is FIPS compliant (FIPS 186-4 + // B.5.2). + if (BN_num_bits(order) < 160) { + OPENSSL_PUT_ERROR(ECDSA, EC_R_INVALID_GROUP_ORDER); + goto err; + } + + do { + // Include the private key and message digest in the k generation. + if (eckey->fixed_k != NULL) { + if (!ec_bignum_to_scalar(group, &k, eckey->fixed_k)) { + goto err; + } + } else { + // Pass a SHA512 hash of the private key and digest as additional data + // into the RBG. This is a hardening measure against entropy failure. + OPENSSL_COMPILE_ASSERT(SHA512_DIGEST_LENGTH >= 32, + additional_data_is_too_large_for_sha512); + SHA512_CTX sha; + uint8_t additional_data[SHA512_DIGEST_LENGTH]; + SHA512_Init(&sha); + SHA512_Update(&sha, priv_key->words, order->top * sizeof(BN_ULONG)); + SHA512_Update(&sha, digest, digest_len); + SHA512_Final(additional_data, &sha); + if (!ec_random_nonzero_scalar(group, &k, additional_data)) { + goto err; + } + } + + // Compute k^-1. We leave it in the Montgomery domain as an optimization for + // later operations. + if (!bn_to_montgomery_small(out_kinv_mont->words, order->top, k.words, + order->top, group->order_mont) || + !bn_mod_inverse_prime_mont_small(out_kinv_mont->words, order->top, + out_kinv_mont->words, order->top, + group->order_mont)) { + goto err; + } + + // Compute r, the x-coordinate of generator * k. + if (!ec_point_mul_scalar(group, tmp_point, &k, NULL, NULL, ctx) || + !EC_POINT_get_affine_coordinates_GFp(group, tmp_point, r, NULL, + ctx)) { + goto err; + } + + if (!field_element_to_scalar(group, r)) { + goto err; + } + } while (BN_is_zero(r)); + + BN_clear_free(*rp); + *rp = r; + r = NULL; + ret = 1; + +err: + OPENSSL_cleanse(&k, sizeof(k)); + BN_clear_free(r); + EC_POINT_free(tmp_point); + return ret; +} + +ECDSA_SIG *ECDSA_do_sign(const uint8_t *digest, size_t digest_len, + const EC_KEY *eckey) { + if (eckey->ecdsa_meth && eckey->ecdsa_meth->sign) { + OPENSSL_PUT_ERROR(ECDSA, ECDSA_R_NOT_IMPLEMENTED); + return NULL; + } + + const EC_GROUP *group = EC_KEY_get0_group(eckey); + const BIGNUM *priv_key_bn = EC_KEY_get0_private_key(eckey); + if (group == NULL || priv_key_bn == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_PASSED_NULL_PARAMETER); + return NULL; + } + const BIGNUM *order = EC_GROUP_get0_order(group); + + int ok = 0; + ECDSA_SIG *ret = ECDSA_SIG_new(); + BN_CTX *ctx = BN_CTX_new(); + EC_SCALAR kinv_mont, priv_key, r_mont, s, tmp, m; + if (ret == NULL || ctx == NULL) { + OPENSSL_PUT_ERROR(ECDSA, ERR_R_MALLOC_FAILURE); + return NULL; + } + + digest_to_scalar(group, &m, digest, digest_len); + if (!ec_bignum_to_scalar(group, &priv_key, priv_key_bn)) { + goto err; + } + for (;;) { + if (!ecdsa_sign_setup(eckey, ctx, &kinv_mont, &ret->r, digest, digest_len, + &priv_key)) { + goto err; + } + + // Compute priv_key * r (mod order). Note if only one parameter is in the + // Montgomery domain, |bn_mod_mul_montgomery_small| will compute the answer + // in the normal domain. + if (!ec_bignum_to_scalar(group, &r_mont, ret->r) || + !bn_to_montgomery_small(r_mont.words, order->top, r_mont.words, + order->top, group->order_mont) || + !bn_mod_mul_montgomery_small(s.words, order->top, priv_key.words, + order->top, r_mont.words, order->top, + group->order_mont)) { + goto err; + } + + // Compute s += m in constant time. Reduce one copy of |order| if necessary. + // Note this does not leave |s| fully reduced. We have + // |m| < 2^BN_num_bits(order), so subtracting |order| leaves + // 0 <= |s| < 2^BN_num_bits(order). + BN_ULONG carry = bn_add_words(s.words, s.words, m.words, order->top); + BN_ULONG v = bn_sub_words(tmp.words, s.words, order->d, order->top) - carry; + v = 0u - v; + for (int i = 0; i < order->top; i++) { + s.words[i] = constant_time_select_w(v, s.words[i], tmp.words[i]); + } + + // Finally, multiply s by k^-1. That was retained in Montgomery form, so the + // same technique as the previous multiplication works. Although the + // previous step did not fully reduce |s|, |bn_mod_mul_montgomery_small| + // only requires the product not exceed R * |order|. |kinv_mont| is fully + // reduced and |s| < 2^BN_num_bits(order) <= R, so this holds. + if (!bn_mod_mul_montgomery_small(s.words, order->top, s.words, order->top, + kinv_mont.words, order->top, + group->order_mont) || + !bn_set_words(ret->s, s.words, order->top)) { + goto err; + } + if (!BN_is_zero(ret->s)) { + // s != 0 => we have a valid signature + break; + } + } + + ok = 1; + +err: + if (!ok) { + ECDSA_SIG_free(ret); + ret = NULL; + } + BN_CTX_free(ctx); + OPENSSL_cleanse(&kinv_mont, sizeof(kinv_mont)); + OPENSSL_cleanse(&priv_key, sizeof(priv_key)); + OPENSSL_cleanse(&r_mont, sizeof(r_mont)); + OPENSSL_cleanse(&s, sizeof(s)); + OPENSSL_cleanse(&tmp, sizeof(tmp)); + OPENSSL_cleanse(&m, sizeof(m)); + return ret; +} diff --git a/Sources/BoringSSL/crypto/hmac/hmac.c b/Sources/BoringSSL/crypto/fipsmodule/hmac/hmac.c similarity index 87% rename from Sources/BoringSSL/crypto/hmac/hmac.c rename to Sources/BoringSSL/crypto/fipsmodule/hmac/hmac.c index a2526678e..fb57bf24a 100644 --- a/Sources/BoringSSL/crypto/hmac/hmac.c +++ b/Sources/BoringSSL/crypto/fipsmodule/hmac/hmac.c @@ -62,22 +62,13 @@ #include #include -#include "../internal.h" +#include "../../internal.h" uint8_t *HMAC(const EVP_MD *evp_md, const void *key, size_t key_len, const uint8_t *data, size_t data_len, uint8_t *out, unsigned int *out_len) { HMAC_CTX ctx; - static uint8_t static_out_buffer[EVP_MAX_MD_SIZE]; - - /* OpenSSL has traditionally supported using a static buffer if |out| is - * NULL. We maintain that but don't document it. This behaviour should be - * considered to be deprecated. */ - if (out == NULL) { - out = static_out_buffer; - } - HMAC_CTX_init(&ctx); if (!HMAC_Init_ex(&ctx, key, key_len, evp_md, NULL) || !HMAC_Update(&ctx, data, data_len) || @@ -96,6 +87,14 @@ void HMAC_CTX_init(HMAC_CTX *ctx) { EVP_MD_CTX_init(&ctx->md_ctx); } +HMAC_CTX *HMAC_CTX_new(void) { + HMAC_CTX *ctx = OPENSSL_malloc(sizeof(HMAC_CTX)); + if (ctx != NULL) { + HMAC_CTX_init(ctx); + } + return ctx; +} + void HMAC_CTX_cleanup(HMAC_CTX *ctx) { EVP_MD_CTX_cleanup(&ctx->i_ctx); EVP_MD_CTX_cleanup(&ctx->o_ctx); @@ -103,19 +102,28 @@ void HMAC_CTX_cleanup(HMAC_CTX *ctx) { OPENSSL_cleanse(ctx, sizeof(HMAC_CTX)); } +void HMAC_CTX_free(HMAC_CTX *ctx) { + if (ctx == NULL) { + return; + } + + HMAC_CTX_cleanup(ctx); + OPENSSL_free(ctx); +} + int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl) { if (md == NULL) { md = ctx->md; } - /* If either |key| is non-NULL or |md| has changed, initialize with a new key - * rather than rewinding the previous one. - * - * TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is - * ambiguous between using the empty key and reusing the previous key. There - * exist callers which intend the latter, but the former is an awkward edge - * case. Fix to API to avoid this. */ + // If either |key| is non-NULL or |md| has changed, initialize with a new key + // rather than rewinding the previous one. + // + // TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is + // ambiguous between using the empty key and reusing the previous key. There + // exist callers which intend the latter, but the former is an awkward edge + // case. Fix to API to avoid this. if (md != ctx->md || key != NULL) { uint8_t pad[EVP_MAX_MD_BLOCK_SIZE]; uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE]; @@ -124,7 +132,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, size_t block_size = EVP_MD_block_size(md); assert(block_size <= sizeof(key_block)); if (block_size < key_len) { - /* Long keys are hashed. */ + // Long keys are hashed. if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) || !EVP_DigestUpdate(&ctx->md_ctx, key, key_len) || !EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) { @@ -135,7 +143,7 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, OPENSSL_memcpy(key_block, key, key_len); key_block_len = (unsigned)key_len; } - /* Keys are then padded with zeros. */ + // Keys are then padded with zeros. if (key_block_len != EVP_MAX_MD_BLOCK_SIZE) { OPENSSL_memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len); } @@ -174,8 +182,8 @@ int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len) { unsigned int i; uint8_t buf[EVP_MAX_MD_SIZE]; - /* TODO(davidben): The only thing that can officially fail here is - * |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. */ + // TODO(davidben): The only thing that can officially fail here is + // |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. if (!EVP_DigestFinal_ex(&ctx->md_ctx, buf, &i) || !EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->o_ctx) || !EVP_DigestUpdate(&ctx->md_ctx, buf, i) || @@ -202,6 +210,11 @@ int HMAC_CTX_copy_ex(HMAC_CTX *dest, const HMAC_CTX *src) { return 1; } +void HMAC_CTX_reset(HMAC_CTX *ctx) { + HMAC_CTX_cleanup(ctx); + HMAC_CTX_init(ctx); +} + int HMAC_Init(HMAC_CTX *ctx, const void *key, int key_len, const EVP_MD *md) { if (key && md) { HMAC_CTX_init(ctx); diff --git a/Sources/BoringSSL/crypto/rand/internal.h b/Sources/BoringSSL/crypto/fipsmodule/is_fips.c similarity index 64% rename from Sources/BoringSSL/crypto/rand/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/is_fips.c index dcff3aa6e..4182dfb7f 100644 --- a/Sources/BoringSSL/crypto/rand/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/is_fips.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Google Inc. +/* Copyright (c) 2017, Google Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -12,21 +12,16 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H -#define OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H - -#if defined(__cplusplus) -extern "C" { -#endif +#include -/* CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating - * system. */ -void CRYPTO_sysrand(uint8_t *buf, size_t len); +// This file exists in order to give the fipsmodule target, in non-FIPS mode, +// something to compile. - -#if defined(__cplusplus) -} /* extern C */ +int FIPS_mode(void) { +#if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN) + return 1; +#else + return 0; #endif - -#endif /* OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H */ +} diff --git a/Sources/BoringSSL/crypto/md4/md4.c b/Sources/BoringSSL/crypto/fipsmodule/md4/md4.c similarity index 92% rename from Sources/BoringSSL/crypto/md4/md4.c rename to Sources/BoringSSL/crypto/fipsmodule/md4/md4.c index 0046c217f..f0c1dcdf1 100644 --- a/Sources/BoringSSL/crypto/md4/md4.c +++ b/Sources/BoringSSL/crypto/fipsmodule/md4/md4.c @@ -59,7 +59,7 @@ #include #include -#include "../internal.h" +#include "../../internal.h" uint8_t *MD4(const uint8_t *data, size_t len, uint8_t *out) { @@ -71,7 +71,7 @@ uint8_t *MD4(const uint8_t *data, size_t len, uint8_t *out) { return out; } -/* Implemented from RFC1186 The MD4 Message-Digest Algorithm. */ +// Implemented from RFC1186 The MD4 Message-Digest Algorithm. int MD4_Init(MD4_CTX *md4) { OPENSSL_memset(md4, 0, sizeof(MD4_CTX)); @@ -107,9 +107,9 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #include "../digest/md32_common.h" -/* As pointed out by Wei Dai , the above can be - * simplified to the code below. Wei attributes these optimizations - * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. */ +// As pointed out by Wei Dai , the above can be +// simplified to the code below. Wei attributes these optimizations +// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) #define H(b, c, d) ((b) ^ (c) ^ (d)) @@ -148,7 +148,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X0 = l; HOST_c2l(data, l); X1 = l; - /* Round 0 */ + // Round 0 R0(A, B, C, D, X0, 3, 0); HOST_c2l(data, l); X2 = l; @@ -193,7 +193,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X15 = l; R0(C, D, A, B, X14, 11, 0); R0(B, C, D, A, X15, 19, 0); - /* Round 1 */ + // Round 1 R1(A, B, C, D, X0, 3, 0x5A827999L); R1(D, A, B, C, X4, 5, 0x5A827999L); R1(C, D, A, B, X8, 9, 0x5A827999L); @@ -210,7 +210,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R1(D, A, B, C, X7, 5, 0x5A827999L); R1(C, D, A, B, X11, 9, 0x5A827999L); R1(B, C, D, A, X15, 13, 0x5A827999L); - /* Round 2 */ + // Round 2 R2(A, B, C, D, X0, 3, 0x6ED9EBA1L); R2(D, A, B, C, X8, 9, 0x6ED9EBA1L); R2(C, D, A, B, X4, 11, 0x6ED9EBA1L); @@ -234,3 +234,21 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { D = state[3] += D; } } + +#undef DATA_ORDER_IS_LITTLE_ENDIAN +#undef HASH_CTX +#undef HASH_CBLOCK +#undef HASH_UPDATE +#undef HASH_TRANSFORM +#undef HASH_FINAL +#undef HASH_MAKE_STRING +#undef HASH_BLOCK_DATA_ORDER +#undef F +#undef G +#undef H +#undef ROTATE +#undef R0 +#undef R1 +#undef R2 +#undef HOST_c2l +#undef HOST_l2c diff --git a/Sources/BoringSSL/crypto/md5/md5.c b/Sources/BoringSSL/crypto/fipsmodule/md5/md5.c similarity index 94% rename from Sources/BoringSSL/crypto/md5/md5.c rename to Sources/BoringSSL/crypto/fipsmodule/md5/md5.c index 7712f4757..32429da38 100644 --- a/Sources/BoringSSL/crypto/md5/md5.c +++ b/Sources/BoringSSL/crypto/fipsmodule/md5/md5.c @@ -60,18 +60,11 @@ #include -#include "../internal.h" +#include "../../internal.h" uint8_t *MD5(const uint8_t *data, size_t len, uint8_t *out) { MD5_CTX ctx; - static uint8_t digest[MD5_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = digest; - } - MD5_Init(&ctx); MD5_Update(&ctx, data, len); MD5_Final(out, &ctx); @@ -120,10 +113,9 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #include "../digest/md32_common.h" -/* As pointed out by Wei Dai , the above can be - * simplified to the code below. Wei attributes these optimizations - * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. - */ +// As pointed out by Wei Dai , the above can be +// simplified to the code below. Wei attributes these optimizations +// to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define G(b, c, d) ((((b) ^ (c)) & (d)) ^ (c)) #define H(b, c, d) ((b) ^ (c) ^ (d)) @@ -179,7 +171,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X(0) = l; HOST_c2l(data, l); X(1) = l; - /* Round 0 */ + // Round 0 R0(A, B, C, D, X(0), 7, 0xd76aa478L); HOST_c2l(data, l); X(2) = l; @@ -224,7 +216,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { X(15) = l; R0(C, D, A, B, X(14), 17, 0xa679438eL); R0(B, C, D, A, X(15), 22, 0x49b40821L); - /* Round 1 */ + // Round 1 R1(A, B, C, D, X(1), 5, 0xf61e2562L); R1(D, A, B, C, X(6), 9, 0xc040b340L); R1(C, D, A, B, X(11), 14, 0x265e5a51L); @@ -241,7 +233,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R1(D, A, B, C, X(2), 9, 0xfcefa3f8L); R1(C, D, A, B, X(7), 14, 0x676f02d9L); R1(B, C, D, A, X(12), 20, 0x8d2a4c8aL); - /* Round 2 */ + // Round 2 R2(A, B, C, D, X(5), 4, 0xfffa3942L); R2(D, A, B, C, X(8), 11, 0x8771f681L); R2(C, D, A, B, X(11), 16, 0x6d9d6122L); @@ -258,7 +250,7 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { R2(D, A, B, C, X(12), 11, 0xe6db99e5L); R2(C, D, A, B, X(15), 16, 0x1fa27cf8L); R2(B, C, D, A, X(2), 23, 0xc4ac5665L); - /* Round 3 */ + // Round 3 R3(A, B, C, D, X(0), 6, 0xf4292244L); R3(D, A, B, C, X(7), 10, 0x432aff97L); R3(C, D, A, B, X(14), 15, 0xab9423a7L); @@ -282,4 +274,25 @@ void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { D = state[3] += D; } } +#undef X #endif + +#undef DATA_ORDER_IS_LITTLE_ENDIAN +#undef HASH_CTX +#undef HASH_CBLOCK +#undef HASH_UPDATE +#undef HASH_TRANSFORM +#undef HASH_FINAL +#undef HASH_MAKE_STRING +#undef HASH_BLOCK_DATA_ORDER +#undef F +#undef G +#undef H +#undef I +#undef ROTATE +#undef R0 +#undef R1 +#undef R2 +#undef R3 +#undef HOST_c2l +#undef HOST_l2c diff --git a/Sources/BoringSSL/crypto/modes/cbc.c b/Sources/BoringSSL/crypto/fipsmodule/modes/cbc.c similarity index 81% rename from Sources/BoringSSL/crypto/modes/cbc.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/cbc.c index 12d551ce7..db9f02419 100644 --- a/Sources/BoringSSL/crypto/modes/cbc.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/cbc.c @@ -62,7 +62,8 @@ void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, assert(len == 0 || (in != NULL && out != NULL)); if (STRICT_ALIGNMENT && - ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { + ((uintptr_t)in | (uintptr_t)out | (uintptr_t)ivec) % sizeof(size_t) != + 0) { while (len >= 16) { for (n = 0; n < 16; ++n) { out[n] = in[n] ^ iv[n]; @@ -76,7 +77,7 @@ void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, } else { while (len >= 16) { for (n = 0; n < 16; n += sizeof(size_t)) { - *(size_t *)(out + n) = *(size_t *)(in + n) ^ *(size_t *)(iv + n); + store_word_le(out + n, load_word_le(in + n) ^ load_word_le(iv + n)); } (*block)(out, out, key); iv = out; @@ -120,16 +121,17 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const uintptr_t inptr = (uintptr_t) in; const uintptr_t outptr = (uintptr_t) out; - /* If |in| and |out| alias, |in| must be ahead. */ + // If |in| and |out| alias, |in| must be ahead. assert(inptr >= outptr || inptr + len <= outptr); if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) { - /* If |out| is at least two blocks behind |in| or completely disjoint, there - * is no need to decrypt to a temporary block. */ + // If |out| is at least two blocks behind |in| or completely disjoint, there + // is no need to decrypt to a temporary block. const uint8_t *iv = ivec; if (STRICT_ALIGNMENT && - ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { + ((uintptr_t)in | (uintptr_t)out | (uintptr_t)ivec) % sizeof(size_t) != + 0) { while (len >= 16) { (*block)(in, out, key); for (n = 0; n < 16; ++n) { @@ -140,13 +142,11 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, in += 16; out += 16; } - } else if (16 % sizeof(size_t) == 0) { /* always true */ + } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { - size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv; - (*block)(in, out, key); - for (n = 0; n < 16 / sizeof(size_t); n++) { - out_t[n] ^= iv_t[n]; + for (n = 0; n < 16; n += sizeof(size_t)) { + store_word_le(out + n, load_word_le(out + n) ^ load_word_le(iv + n)); } iv = in; len -= 16; @@ -156,11 +156,12 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, } OPENSSL_memcpy(ivec, iv, 16); } else { - /* |out| is less than two blocks behind |in|. Decrypting an input block - * directly to |out| would overwrite a ciphertext block before it is used as - * the next block's IV. Decrypt to a temporary block instead. */ + // |out| is less than two blocks behind |in|. Decrypting an input block + // directly to |out| would overwrite a ciphertext block before it is used as + // the next block's IV. Decrypt to a temporary block instead. if (STRICT_ALIGNMENT && - ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { + ((uintptr_t)in | (uintptr_t)out | (uintptr_t)ivec) % sizeof(size_t) != + 0) { uint8_t c; while (len >= 16) { (*block)(in, tmp.c, key); @@ -173,16 +174,14 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, in += 16; out += 16; } - } else if (16 % sizeof(size_t) == 0) { /* always true */ + } else if (16 % sizeof(size_t) == 0) { // always true while (len >= 16) { - size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec; - const size_t *in_t = (const size_t *)in; - (*block)(in, tmp.c, key); - for (n = 0; n < 16 / sizeof(size_t); n++) { - c = in_t[n]; - out_t[n] = tmp.t[n] ^ ivec_t[n]; - ivec_t[n] = c; + for (n = 0; n < 16; n += sizeof(size_t)) { + size_t c = load_word_le(in + n); + store_word_le(out + n, + tmp.t[n / sizeof(size_t)] ^ load_word_le(ivec + n)); + store_word_le(ivec + n, c); } len -= 16; in += 16; diff --git a/Sources/BoringSSL/crypto/modes/cfb.c b/Sources/BoringSSL/crypto/fipsmodule/modes/cfb.c similarity index 88% rename from Sources/BoringSSL/crypto/modes/cfb.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/cfb.c index af15255b4..e1b0a80e0 100644 --- a/Sources/BoringSSL/crypto/modes/cfb.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/cfb.c @@ -54,7 +54,7 @@ #include "internal.h" -OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size); +OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_cfb); void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, @@ -72,7 +72,8 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, n = (n + 1) % 16; } #if STRICT_ALIGNMENT - if (((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { + if (((uintptr_t)in | (uintptr_t)out | (uintptr_t)ivec) % sizeof(size_t) != + 0) { while (l < len) { if (n == 0) { (*block)(ivec, ivec, key); @@ -88,7 +89,9 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, while (len >= 16) { (*block)(ivec, ivec, key); for (; n < 16; n += sizeof(size_t)) { - *(size_t *)(out + n) = *(size_t *)(ivec + n) ^= *(size_t *)(in + n); + size_t tmp = load_word_le(ivec + n) ^ load_word_le(in + n); + store_word_le(ivec + n, tmp); + store_word_le(out + n, tmp); } len -= 16; out += 16; @@ -112,9 +115,11 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, --len; n = (n + 1) % 16; } - if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out | (size_t)ivec) % sizeof(size_t) != 0) { + if (STRICT_ALIGNMENT && + ((uintptr_t)in | (uintptr_t)out | (uintptr_t)ivec) % sizeof(size_t) != + 0) { while (l < len) { - unsigned char c; + uint8_t c; if (n == 0) { (*block)(ivec, ivec, key); } @@ -129,9 +134,9 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, while (len >= 16) { (*block)(ivec, ivec, key); for (; n < 16; n += sizeof(size_t)) { - size_t t = *(size_t *)(in + n); - *(size_t *)(out + n) = *(size_t *)(ivec + n) ^ t; - *(size_t *)(ivec + n) = t; + size_t t = load_word_le(in + n); + store_word_le(out + n, load_word_le(ivec + n) ^ t); + store_word_le(ivec + n, t); } len -= 16; out += 16; @@ -166,23 +171,23 @@ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits, return; } - /* fill in the first half of the new IV with the current IV */ + // fill in the first half of the new IV with the current IV OPENSSL_memcpy(ovec, ivec, 16); - /* construct the new IV */ + // construct the new IV (*block)(ivec, ivec, key); num = (nbits + 7) / 8; if (enc) { - /* encrypt the input */ + // encrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n] ^ ivec[n]); } } else { - /* decrypt the input */ + // decrypt the input for (n = 0; n < num; ++n) { out[n] = (ovec[16 + n] = in[n]) ^ ivec[n]; } } - /* shift ovec left... */ + // shift ovec left... rem = nbits % 8; num = nbits / 8; if (rem == 0) { @@ -193,10 +198,10 @@ static void cfbr_encrypt_block(const uint8_t *in, uint8_t *out, unsigned nbits, } } - /* it is not necessary to cleanse ovec, since the IV is not secret */ + // it is not necessary to cleanse ovec, since the IV is not secret } -/* N.B. This expects the input to be packed, MS bit first */ +// N.B. This expects the input to be packed, MS bit first void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block) { @@ -227,4 +232,3 @@ void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out, cfbr_encrypt_block(&in[n], &out[n], 8, key, ivec, enc, block); } } - diff --git a/Sources/BoringSSL/crypto/modes/ctr.c b/Sources/BoringSSL/crypto/fipsmodule/modes/ctr.c similarity index 79% rename from Sources/BoringSSL/crypto/modes/ctr.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/ctr.c index c026d1541..63907b43a 100644 --- a/Sources/BoringSSL/crypto/modes/ctr.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/ctr.c @@ -54,10 +54,10 @@ #include "internal.h" -/* NOTE: the IV/counter CTR mode is big-endian. The code itself - * is endian-neutral. */ +// NOTE: the IV/counter CTR mode is big-endian. The code itself +// is endian-neutral. -/* increment counter (128-bit int) by 1 */ +// increment counter (128-bit int) by 1 static void ctr128_inc(uint8_t *counter) { uint32_t n = 16, c = 1; @@ -69,18 +69,18 @@ static void ctr128_inc(uint8_t *counter) { } while (n); } -OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size); - -/* The input encrypted as though 128bit counter mode is being used. The extra - * state information to record how much of the 128bit block we have used is - * contained in *num, and the encrypted counter is kept in ecount_buf. Both - * *num and ecount_buf must be initialised with zeros before the first call to - * CRYPTO_ctr128_encrypt(). - * - * This algorithm assumes that the counter is in the x lower bits of the IV - * (ivec), and that the application has full control over overflow and the rest - * of the IV. This implementation takes NO responsibility for checking that - * the counter doesn't overflow into the rest of the IV when incremented. */ +OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ctr); + +// The input encrypted as though 128bit counter mode is being used. The extra +// state information to record how much of the 128bit block we have used is +// contained in *num, and the encrypted counter is kept in ecount_buf. Both +// *num and ecount_buf must be initialised with zeros before the first call to +// CRYPTO_ctr128_encrypt(). +// +// This algorithm assumes that the counter is in the x lower bits of the IV +// (ivec), and that the application has full control over overflow and the rest +// of the IV. This implementation takes NO responsibility for checking that +// the counter doesn't overflow into the rest of the IV when incremented. void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned int *num, @@ -100,7 +100,8 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, } #if STRICT_ALIGNMENT - if (((size_t)in | (size_t)out | (size_t)ecount_buf) % sizeof(size_t) != 0) { + if (((uintptr_t)in | (uintptr_t)out | + (uintptr_t)ecount_buf) % sizeof(size_t) != 0) { size_t l = 0; while (l < len) { if (n == 0) { @@ -121,8 +122,8 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, (*block)(ivec, ecount_buf, key); ctr128_inc(ivec); for (n = 0; n < 16; n += sizeof(size_t)) { - *(size_t *)(out + n) = *(const size_t *)(in + n) ^ - *(const size_t *)(ecount_buf + n); + store_word_le(out + n, + load_word_le(in + n) ^ load_word_le(ecount_buf + n)); } len -= 16; out += 16; @@ -140,7 +141,7 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, *num = n; } -/* increment upper 96 bits of 128-bit counter by 1 */ +// increment upper 96 bits of 128-bit counter by 1 static void ctr96_inc(uint8_t *counter) { uint32_t n = 12, c = 1; @@ -174,25 +175,25 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, ctr32 = GETU32(ivec + 12); while (len >= 16) { size_t blocks = len / 16; - /* 1<<28 is just a not-so-small yet not-so-large number... - * Below condition is practically never met, but it has to - * be checked for code correctness. */ + // 1<<28 is just a not-so-small yet not-so-large number... + // Below condition is practically never met, but it has to + // be checked for code correctness. if (sizeof(size_t) > sizeof(unsigned int) && blocks > (1U << 28)) { blocks = (1U << 28); } - /* As (*func) operates on 32-bit counter, caller - * has to handle overflow. 'if' below detects the - * overflow, which is then handled by limiting the - * amount of blocks to the exact overflow point... */ + // As (*func) operates on 32-bit counter, caller + // has to handle overflow. 'if' below detects the + // overflow, which is then handled by limiting the + // amount of blocks to the exact overflow point... ctr32 += (uint32_t)blocks; if (ctr32 < blocks) { blocks -= ctr32; ctr32 = 0; } (*func)(in, out, blocks, key, ivec); - /* (*func) does not update ivec, caller does: */ + // (*func) does not update ivec, caller does: PUTU32(ivec + 12, ctr32); - /* ... overflow was detected, propogate carry. */ + // ... overflow was detected, propogate carry. if (ctr32 == 0) { ctr96_inc(ivec); } diff --git a/Sources/BoringSSL/crypto/modes/gcm.c b/Sources/BoringSSL/crypto/fipsmodule/modes/gcm.c similarity index 85% rename from Sources/BoringSSL/crypto/modes/gcm.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/gcm.c index 1330ad626..05cd18d4f 100644 --- a/Sources/BoringSSL/crypto/modes/gcm.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/gcm.c @@ -55,8 +55,7 @@ #include #include "internal.h" -#include "../internal.h" - +#include "../../internal.h" #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ @@ -178,11 +177,11 @@ static void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]) { Xi[1] = CRYPTO_bswap8(Z.lo); } -/* Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for - * details... Compiler-generated code doesn't seem to give any - * performance improvement, at least not on x86[_64]. It's here - * mostly as reference and a placeholder for possible future - * non-trivial optimization[s]... */ +// Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for +// details... Compiler-generated code doesn't seem to give any +// performance improvement, at least not on x86[_64]. It's here +// mostly as reference and a placeholder for possible future +// non-trivial optimization[s]... static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len) { u128 Z; @@ -238,7 +237,7 @@ static void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], Xi[1] = CRYPTO_bswap8(Z.lo); } while (inp += 16, len -= 16); } -#else /* GHASH_ASM */ +#else // GHASH_ASM void gcm_gmult_4bit(uint64_t Xi[2], const u128 Htable[16]); void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); @@ -247,9 +246,9 @@ void gcm_ghash_4bit(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, #define GCM_MUL(ctx, Xi) gcm_gmult_4bit((ctx)->Xi.u, (ctx)->Htable) #if defined(GHASH_ASM) #define GHASH(ctx, in, len) gcm_ghash_4bit((ctx)->Xi.u, (ctx)->Htable, in, len) -/* GHASH_CHUNK is "stride parameter" missioned to mitigate cache - * trashing effect. In other words idea is to hash data while it's - * still in L1 cache after encryption pass... */ +// GHASH_CHUNK is "stride parameter" missioned to mitigate cache +// trashing effect. In other words idea is to hash data while it's +// still in L1 cache after encryption pass... #define GHASH_CHUNK (3 * 1024) #endif @@ -270,11 +269,6 @@ void gcm_gmult_avx(uint64_t Xi[2], const u128 Htable[16]); void gcm_ghash_avx(uint64_t Xi[2], const u128 Htable[16], const uint8_t *in, size_t len); #define AESNI_GCM -static int aesni_gcm_enabled(GCM128_CONTEXT *ctx, ctr128_f stream) { - return stream == aesni_ctr32_encrypt_blocks && - ctx->ghash == gcm_ghash_avx; -} - size_t aesni_gcm_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint64_t *Xi); size_t aesni_gcm_decrypt(const uint8_t *in, uint8_t *out, size_t len, @@ -304,7 +298,7 @@ void gcm_ghash_v8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); #if defined(OPENSSL_ARM) -/* 32-bit ARM also has support for doing GCM with NEON instructions. */ +// 32-bit ARM also has support for doing GCM with NEON instructions. static int neon_capable(void) { return CRYPTO_is_NEON_capable(); } @@ -314,7 +308,7 @@ void gcm_gmult_neon(uint64_t Xi[2], const u128 Htable[16]); void gcm_ghash_neon(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); #else -/* AArch64 only has the ARMv8 versions of functions. */ +// AArch64 only has the ARMv8 versions of functions. static int neon_capable(void) { return 0; } @@ -352,7 +346,10 @@ void gcm_ghash_p8(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, u128 *out_key, u128 out_table[16], + int *out_is_avx, const uint8_t *gcm_key) { + *out_is_avx = 0; + union { uint64_t u[2]; uint8_t c[16]; @@ -360,7 +357,7 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, OPENSSL_memcpy(H.c, gcm_key, 16); - /* H is stored in host byte order */ + // H is stored in host byte order H.u[0] = CRYPTO_bswap8(H.u[0]); H.u[1] = CRYPTO_bswap8(H.u[1]); @@ -368,10 +365,11 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, #if defined(GHASH_ASM_X86_64) if (crypto_gcm_clmul_enabled()) { - if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */ + if (((OPENSSL_ia32cap_get()[1] >> 22) & 0x41) == 0x41) { // AVX+MOVBE gcm_init_avx(out_table, H.u); *out_mult = gcm_gmult_avx; *out_hash = gcm_ghash_avx; + *out_is_avx = 1; return; } gcm_init_clmul(out_table, H.u); @@ -420,7 +418,7 @@ void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, } void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *aes_key, - block128_f block) { + block128_f block, int is_aesni_encrypt) { OPENSSL_memset(ctx, 0, sizeof(*ctx)); ctx->block = block; @@ -428,7 +426,11 @@ void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *aes_key, OPENSSL_memset(gcm_key, 0, sizeof(gcm_key)); (*block)(gcm_key, gcm_key, aes_key); - CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, gcm_key); + int is_avx; + CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, &is_avx, + gcm_key); + + ctx->use_aesni_gcm_crypt = (is_avx && is_aesni_encrypt) ? 1 : 0; } void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, @@ -442,8 +444,8 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, ctx->Yi.u[1] = 0; ctx->Xi.u[0] = 0; ctx->Xi.u[1] = 0; - ctx->len.u[0] = 0; /* AAD length */ - ctx->len.u[1] = 0; /* message length */ + ctx->len.u[0] = 0; // AAD length + ctx->len.u[1] = 0; // message length ctx->ares = 0; ctx->mres = 0; @@ -472,12 +474,12 @@ void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, ctx->Yi.u[1] ^= CRYPTO_bswap8(len0); GCM_MUL(ctx, Yi); - ctr = GETU32_aligned(ctx->Yi.c + 12); + ctr = CRYPTO_bswap4(ctx->Yi.d[3]); } (*ctx->block)(ctx->Yi.c, ctx->EK0.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); } int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { @@ -516,7 +518,7 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { } } - /* Process a whole number of blocks. */ + // Process a whole number of blocks. #ifdef GHASH size_t len_blocks = len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { @@ -535,7 +537,7 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { } #endif - /* Process the remainder. */ + // Process the remainder. if (len != 0) { n = (unsigned int)len; for (size_t i = 0; i < len; ++i) { @@ -548,8 +550,7 @@ int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len) { } int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, - const unsigned char *in, unsigned char *out, - size_t len) { + const uint8_t *in, uint8_t *out, size_t len) { unsigned int n, ctr; uint64_t mlen = ctx->len.u[1]; block128_f block = ctx->block; @@ -569,12 +570,12 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to encrypt finalizes GHASH(AAD) */ + // First call to encrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } - ctr = GETU32_aligned(ctx->Yi.c + 12); + ctr = CRYPTO_bswap4(ctx->Yi.d[3]); n = ctx->mres; if (n) { @@ -590,12 +591,13 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, return 1; } } - if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) { + if (STRICT_ALIGNMENT && + ((uintptr_t)in | (uintptr_t)out) % sizeof(size_t) != 0) { for (size_t i = 0; i < len; ++i) { if (n == 0) { (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); } ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n]; n = (n + 1) % 16; @@ -612,14 +614,12 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, size_t j = GHASH_CHUNK; while (j) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - out_t[i] = in_t[i] ^ ctx->EKi.t[i]; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + store_word_le(out + i, + load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); } out += 16; in += 16; @@ -631,14 +631,12 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, size_t len_blocks = len & kSizeTWithoutLower4Bits; if (len_blocks != 0) { while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - out_t[i] = in_t[i] ^ ctx->EKi.t[i]; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + store_word_le(out + i, + load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); } out += 16; in += 16; @@ -648,14 +646,13 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, } #else while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i]; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + size_t tmp = load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]; + store_word_le(out + i, tmp); + ctx->Xi.t[i / sizeof(size_t)] ^= tmp; } GCM_MUL(ctx, Xi); out += 16; @@ -666,7 +663,7 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, if (len) { (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); while (len--) { ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n]; ++n; @@ -699,12 +696,12 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to decrypt finalizes GHASH(AAD) */ + // First call to decrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } - ctr = GETU32_aligned(ctx->Yi.c + 12); + ctr = CRYPTO_bswap4(ctx->Yi.d[3]); n = ctx->mres; if (n) { @@ -722,13 +719,14 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, return 1; } } - if (STRICT_ALIGNMENT && ((size_t)in | (size_t)out) % sizeof(size_t) != 0) { + if (STRICT_ALIGNMENT && + ((uintptr_t)in | (uintptr_t)out) % sizeof(size_t) != 0) { for (size_t i = 0; i < len; ++i) { uint8_t c; if (n == 0) { (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); } c = in[i]; out[i] = c ^ ctx->EKi.c[n]; @@ -748,14 +746,12 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, GHASH(ctx, in, GHASH_CHUNK); while (j) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - out_t[i] = in_t[i] ^ ctx->EKi.t[i]; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + store_word_le(out + i, + load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); } out += 16; in += 16; @@ -767,14 +763,12 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, if (len_blocks != 0) { GHASH(ctx, in, len_blocks); while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - out_t[i] = in_t[i] ^ ctx->EKi.t[i]; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + store_word_le(out + i, + load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]); } out += 16; in += 16; @@ -783,16 +777,13 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, } #else while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; - (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); - for (size_t i = 0; i < 16 / sizeof(size_t); ++i) { - size_t c = in_t[i]; - out_t[i] = c ^ ctx->EKi.t[i]; - ctx->Xi.t[i] ^= c; + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); + for (size_t i = 0; i < 16; i += sizeof(size_t)) { + size_t c = load_word_le(in + i); + store_word_le(out + i, c ^ ctx->EKi.t[i / sizeof(size_t)]); + ctx->Xi.t[i / sizeof(size_t)] ^= c; } GCM_MUL(ctx, Xi); out += 16; @@ -803,7 +794,7 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, if (len) { (*block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); while (len--) { uint8_t c = in[n]; ctx->Xi.c[n] ^= c; @@ -837,7 +828,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to encrypt finalizes GHASH(AAD) */ + // First call to encrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -858,9 +849,9 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, } #if defined(AESNI_GCM) - if (aesni_gcm_enabled(ctx, stream)) { - /* |aesni_gcm_encrypt| may not process all the input given to it. It may - * not process *any* of its input if it is deemed too small. */ + if (ctx->use_aesni_gcm_crypt) { + // |aesni_gcm_encrypt| may not process all the input given to it. It may + // not process *any* of its input if it is deemed too small. size_t bulk = aesni_gcm_encrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u); in += bulk; out += bulk; @@ -868,13 +859,13 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, } #endif - ctr = GETU32_aligned(ctx->Yi.c + 12); + ctr = CRYPTO_bswap4(ctx->Yi.d[3]); #if defined(GHASH) while (len >= GHASH_CHUNK) { (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c); ctr += GHASH_CHUNK / 16; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); GHASH(ctx, out, GHASH_CHUNK); out += GHASH_CHUNK; in += GHASH_CHUNK; @@ -887,7 +878,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, (*stream)(in, out, j, key, ctx->Yi.c); ctr += (unsigned int)j; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); in += i; len -= i; #if defined(GHASH) @@ -906,7 +897,7 @@ int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, if (len) { (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); while (len--) { ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n]; ++n; @@ -938,7 +929,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, ctx->len.u[1] = mlen; if (ctx->ares) { - /* First call to decrypt finalizes GHASH(AAD) */ + // First call to decrypt finalizes GHASH(AAD) GCM_MUL(ctx, Xi); ctx->ares = 0; } @@ -961,9 +952,9 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, } #if defined(AESNI_GCM) - if (aesni_gcm_enabled(ctx, stream)) { - /* |aesni_gcm_decrypt| may not process all the input given to it. It may - * not process *any* of its input if it is deemed too small. */ + if (ctx->use_aesni_gcm_crypt) { + // |aesni_gcm_decrypt| may not process all the input given to it. It may + // not process *any* of its input if it is deemed too small. size_t bulk = aesni_gcm_decrypt(in, out, len, key, ctx->Yi.c, ctx->Xi.u); in += bulk; out += bulk; @@ -971,14 +962,14 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, } #endif - ctr = GETU32_aligned(ctx->Yi.c + 12); + ctr = CRYPTO_bswap4(ctx->Yi.d[3]); #if defined(GHASH) while (len >= GHASH_CHUNK) { GHASH(ctx, in, GHASH_CHUNK); (*stream)(in, out, GHASH_CHUNK / 16, key, ctx->Yi.c); ctr += GHASH_CHUNK / 16; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); out += GHASH_CHUNK; in += GHASH_CHUNK; len -= GHASH_CHUNK; @@ -1004,7 +995,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, #endif (*stream)(in, out, j, key, ctx->Yi.c); ctr += (unsigned int)j; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); out += i; in += i; len -= i; @@ -1012,7 +1003,7 @@ int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, if (len) { (*ctx->block)(ctx->Yi.c, ctx->EKi.c, key); ++ctr; - PUTU32_aligned(ctx->Yi.c + 12, ctr); + ctx->Yi.d[3] = CRYPTO_bswap4(ctr); while (len--) { uint8_t c = in[n]; ctx->Xi.c[n] ^= c; @@ -1062,8 +1053,9 @@ void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len) { #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) int crypto_gcm_clmul_enabled(void) { #ifdef GHASH_ASM - return OPENSSL_ia32cap_P[0] & (1 << 24) && /* check FXSR bit */ - OPENSSL_ia32cap_P[1] & (1 << 1); /* check PCLMULQDQ bit */ + const uint32_t *ia32cap = OPENSSL_ia32cap_get(); + return (ia32cap[0] & (1 << 24)) && // check FXSR bit + (ia32cap[1] & (1 << 1)); // check PCLMULQDQ bit #else return 0; #endif diff --git a/Sources/BoringSSL/crypto/modes/internal.h b/Sources/BoringSSL/crypto/fipsmodule/modes/internal.h similarity index 62% rename from Sources/BoringSSL/crypto/modes/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/modes/internal.h index 94072ecb7..f6ee8f45a 100644 --- a/Sources/BoringSSL/crypto/modes/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/internal.h @@ -53,15 +53,13 @@ #include -#include "../internal.h" +#include "../../internal.h" #if defined(__cplusplus) extern "C" { #endif -#define asm __asm__ - #define STRICT_ALIGNMENT 1 #if defined(OPENSSL_X86_64) || defined(OPENSSL_X86) || defined(OPENSSL_AARCH64) #undef STRICT_ALIGNMENT @@ -111,38 +109,38 @@ static inline void PUTU32(void *out, uint32_t v) { OPENSSL_memcpy(out, &v, sizeof(v)); } -static inline uint32_t GETU32_aligned(const void *in) { - const char *alias = (const char *) in; - return CRYPTO_bswap4(*((const uint32_t *) alias)); +static inline size_t load_word_le(const void *in) { + size_t v; + OPENSSL_memcpy(&v, in, sizeof(v)); + return v; } -static inline void PUTU32_aligned(void *in, uint32_t v) { - char *alias = (char *) in; - *((uint32_t *) alias) = CRYPTO_bswap4(v); +static inline void store_word_le(void *out, size_t v) { + OPENSSL_memcpy(out, &v, sizeof(v)); } -/* block128_f is the type of a 128-bit, block cipher. */ +// block128_f is the type of a 128-bit, block cipher. typedef void (*block128_f)(const uint8_t in[16], uint8_t out[16], const void *key); -/* GCM definitions */ +// GCM definitions typedef struct { uint64_t hi,lo; } u128; -/* gmult_func multiplies |Xi| by the GCM key and writes the result back to - * |Xi|. */ +// gmult_func multiplies |Xi| by the GCM key and writes the result back to +// |Xi|. typedef void (*gmult_func)(uint64_t Xi[2], const u128 Htable[16]); -/* ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from - * |inp|. The result is written back to |Xi| and the |len| argument must be a - * multiple of 16. */ +// ghash_func repeatedly multiplies |Xi| by the GCM key and adds in blocks from +// |inp|. The result is written back to |Xi| and the |len| argument must be a +// multiple of 16. typedef void (*ghash_func)(uint64_t Xi[2], const u128 Htable[16], const uint8_t *inp, size_t len); -/* This differs from upstream's |gcm128_context| in that it does not have the - * |key| pointer, in order to make it |memcpy|-friendly. Rather the key is - * passed into each call that needs it. */ +// This differs from upstream's |gcm128_context| in that it does not have the +// |key| pointer, in order to make it |memcpy|-friendly. Rather the key is +// passed into each call that needs it. struct gcm128_context { - /* Following 6 names follow names in GCM specification */ + // Following 6 names follow names in GCM specification union { uint64_t u[2]; uint32_t d[4]; @@ -150,8 +148,8 @@ struct gcm128_context { size_t t[16 / sizeof(size_t)]; } Yi, EKi, EK0, len, Xi; - /* Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based, - * x86-64, GHASH assembly. */ + // Note that the order of |Xi|, |H| and |Htable| is fixed by the MOVBE-based, + // x86-64, GHASH assembly. u128 H; u128 Htable[16]; gmult_func gmult; @@ -159,36 +157,40 @@ struct gcm128_context { unsigned int mres, ares; block128_f block; + + // use_aesni_gcm_crypt is true if this context should use the assembly + // functions |aesni_gcm_encrypt| and |aesni_gcm_decrypt| to process data. + unsigned use_aesni_gcm_crypt:1; }; #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is - * used. */ +// crypto_gcm_clmul_enabled returns one if the CLMUL implementation of GCM is +// used. int crypto_gcm_clmul_enabled(void); #endif -/* CTR. */ +// CTR. -/* ctr128_f is the type of a function that performs CTR-mode encryption. */ +// ctr128_f is the type of a function that performs CTR-mode encryption. typedef void (*ctr128_f)(const uint8_t *in, uint8_t *out, size_t blocks, const void *key, const uint8_t ivec[16]); -/* CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) - * |len| bytes from |in| to |out| using |block| in counter mode. There's no - * requirement that |len| be a multiple of any value and any partial blocks are - * stored in |ecount_buf| and |*num|, which must be zeroed before the initial - * call. The counter is a 128-bit, big-endian value in |ivec| and is - * incremented by this function. */ +// CRYPTO_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) +// |len| bytes from |in| to |out| using |block| in counter mode. There's no +// requirement that |len| be a multiple of any value and any partial blocks are +// stored in |ecount_buf| and |*num|, which must be zeroed before the initial +// call. The counter is a 128-bit, big-endian value in |ivec| and is +// incremented by this function. void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned *num, block128_f block); -/* CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes - * |ctr|, a function that performs CTR mode but only deals with the lower 32 - * bits of the counter. This is useful when |ctr| can be an optimised - * function. */ +// CRYPTO_ctr128_encrypt_ctr32 acts like |CRYPTO_ctr128_encrypt| but takes +// |ctr|, a function that performs CTR mode but only deals with the lower 32 +// bits of the counter. This is useful when |ctr| can be an optimised +// function. void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], uint8_t ecount_buf[16], unsigned *num, @@ -201,136 +203,137 @@ void aesni_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t blocks, #endif -/* GCM. - * - * This API differs from the upstream API slightly. The |GCM128_CONTEXT| does - * not have a |key| pointer that points to the key as upstream's version does. - * Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT| - * can be safely copied. */ +// GCM. +// +// This API differs from the upstream API slightly. The |GCM128_CONTEXT| does +// not have a |key| pointer that points to the key as upstream's version does. +// Instead, every function takes a |key| parameter. This way |GCM128_CONTEXT| +// can be safely copied. typedef struct gcm128_context GCM128_CONTEXT; -/* CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to - * |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware - * accelerated) functions for performing operations in the GHASH field. */ +// CRYPTO_ghash_init writes a precomputed table of powers of |gcm_key| to +// |out_table| and sets |*out_mult| and |*out_hash| to (potentially hardware +// accelerated) functions for performing operations in the GHASH field. If the +// AVX implementation was used |*out_is_avx| will be true. void CRYPTO_ghash_init(gmult_func *out_mult, ghash_func *out_hash, - u128 *out_key, u128 out_table[16], + u128 *out_key, u128 out_table[16], int *out_is_avx, const uint8_t *gcm_key); -/* CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with - * the given key. */ +// CRYPTO_gcm128_init initialises |ctx| to use |block| (typically AES) with +// the given key. |is_aesni_encrypt| is one if |block| is |aesni_encrypt|. OPENSSL_EXPORT void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, const void *key, - block128_f block); + block128_f block, int is_aesni_encrypt); -/* CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the - * same key that was passed to |CRYPTO_gcm128_init|. */ +// CRYPTO_gcm128_setiv sets the IV (nonce) for |ctx|. The |key| must be the +// same key that was passed to |CRYPTO_gcm128_init|. OPENSSL_EXPORT void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const void *key, const uint8_t *iv, size_t iv_len); -/* CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM. - * This must be called before and data is encrypted. It returns one on success - * and zero otherwise. */ +// CRYPTO_gcm128_aad sets the authenticated data for an instance of GCM. +// This must be called before and data is encrypted. It returns one on success +// and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const uint8_t *aad, size_t len); -/* CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key| - * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one - * on success and zero otherwise. */ +// CRYPTO_gcm128_encrypt encrypts |len| bytes from |in| to |out|. The |key| +// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one +// on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len); -/* CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key| - * must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one - * on success and zero otherwise. */ +// CRYPTO_gcm128_decrypt decrypts |len| bytes from |in| to |out|. The |key| +// must be the same key that was passed to |CRYPTO_gcm128_init|. It returns one +// on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len); -/* CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using - * a CTR function that only handles the bottom 32 bits of the nonce, like - * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was - * passed to |CRYPTO_gcm128_init|. It returns one on success and zero - * otherwise. */ +// CRYPTO_gcm128_encrypt_ctr32 encrypts |len| bytes from |in| to |out| using +// a CTR function that only handles the bottom 32 bits of the nonce, like +// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was +// passed to |CRYPTO_gcm128_init|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len, ctr128_f stream); -/* CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using - * a CTR function that only handles the bottom 32 bits of the nonce, like - * |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was - * passed to |CRYPTO_gcm128_init|. It returns one on success and zero - * otherwise. */ +// CRYPTO_gcm128_decrypt_ctr32 decrypts |len| bytes from |in| to |out| using +// a CTR function that only handles the bottom 32 bits of the nonce, like +// |CRYPTO_ctr128_encrypt_ctr32|. The |key| must be the same key that was +// passed to |CRYPTO_gcm128_init|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx, const void *key, const uint8_t *in, uint8_t *out, size_t len, ctr128_f stream); -/* CRYPTO_gcm128_finish calculates the authenticator and compares it against - * |len| bytes of |tag|. It returns one on success and zero otherwise. */ +// CRYPTO_gcm128_finish calculates the authenticator and compares it against +// |len| bytes of |tag|. It returns one on success and zero otherwise. OPENSSL_EXPORT int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const uint8_t *tag, size_t len); -/* CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. - * The minimum of |len| and 16 bytes are copied into |tag|. */ +// CRYPTO_gcm128_tag calculates the authenticator and copies it into |tag|. +// The minimum of |len| and 16 bytes are copied into |tag|. OPENSSL_EXPORT void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, uint8_t *tag, size_t len); -/* CBC. */ +// CBC. -/* cbc128_f is the type of a function that performs CBC-mode encryption. */ +// cbc128_f is the type of a function that performs CBC-mode encryption. typedef void (*cbc128_f)(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], int enc); -/* CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the - * given IV and block cipher in CBC mode. The input need not be a multiple of - * 128 bits long, but the output will round up to the nearest 128 bit multiple, - * zero padding the input if needed. The IV will be updated on return. */ +// CRYPTO_cbc128_encrypt encrypts |len| bytes from |in| to |out| using the +// given IV and block cipher in CBC mode. The input need not be a multiple of +// 128 bits long, but the output will round up to the nearest 128 bit multiple, +// zero padding the input if needed. The IV will be updated on return. void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], block128_f block); -/* CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the - * given IV and block cipher in CBC mode. If |len| is not a multiple of 128 - * bits then only that many bytes will be written, but a multiple of 128 bits - * is always read from |in|. The IV will be updated on return. */ +// CRYPTO_cbc128_decrypt decrypts |len| bytes from |in| to |out| using the +// given IV and block cipher in CBC mode. If |len| is not a multiple of 128 +// bits then only that many bytes will be written, but a multiple of 128 bits +// is always read from |in|. The IV will be updated on return. void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], block128_f block); -/* OFB. */ +// OFB. -/* CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode) - * |len| bytes from |in| to |out| using |block| in OFB mode. There's no - * requirement that |len| be a multiple of any value and any partial blocks are - * stored in |ivec| and |*num|, the latter must be zero before the initial - * call. */ +// CRYPTO_ofb128_encrypt encrypts (or decrypts, it's the same with OFB mode) +// |len| bytes from |in| to |out| using |block| in OFB mode. There's no +// requirement that |len| be a multiple of any value and any partial blocks are +// stored in |ivec| and |*num|, the latter must be zero before the initial +// call. void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, block128_f block); -/* CFB. */ +// CFB. -/* CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB mode. There's no requirement that - * |len| be a multiple of any value and any partial blocks are stored in |ivec| - * and |*num|, the latter must be zero before the initial call. */ +// CRYPTO_cfb128_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB mode. There's no requirement that +// |len| be a multiple of any value and any partial blocks are stored in |ivec| +// and |*num|, the latter must be zero before the initial call. void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); -/* CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB-8 mode. Prior to the first call - * |num| should be set to zero. */ +// CRYPTO_cfb128_8_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB-8 mode. Prior to the first call +// |num| should be set to zero. void CRYPTO_cfb128_8_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); -/* CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes - * from |in| to |out| using |block| in CFB-1 mode. Prior to the first call - * |num| should be set to zero. */ +// CRYPTO_cfb128_1_encrypt encrypts (or decrypts, if |enc| is zero) |len| bytes +// from |in| to |out| using |block| in CFB-1 mode. Prior to the first call +// |num| should be set to zero. void CRYPTO_cfb128_1_encrypt(const uint8_t *in, uint8_t *out, size_t bits, const void *key, uint8_t ivec[16], unsigned *num, int enc, block128_f block); @@ -340,11 +343,11 @@ size_t CRYPTO_cts128_encrypt_block(const uint8_t *in, uint8_t *out, size_t len, block128_f block); -/* POLYVAL. - * - * POLYVAL is a polynomial authenticator that operates over a field very - * similar to the one that GHASH uses. See - * https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3. */ +// POLYVAL. +// +// POLYVAL is a polynomial authenticator that operates over a field very +// similar to the one that GHASH uses. See +// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#section-3. typedef union { uint64_t u[2]; @@ -352,8 +355,8 @@ typedef union { } polyval_block; struct polyval_ctx { - /* Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based, - * x86-64, GHASH assembly. */ + // Note that the order of |S|, |H| and |Htable| is fixed by the MOVBE-based, + // x86-64, GHASH assembly. polyval_block S; u128 H; u128 Htable[16]; @@ -361,21 +364,21 @@ struct polyval_ctx { ghash_func ghash; }; -/* CRYPTO_POLYVAL_init initialises |ctx| using |key|. */ +// CRYPTO_POLYVAL_init initialises |ctx| using |key|. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]); -/* CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the - * blocks from |in|. Only a whole number of blocks can be processed so |in_len| - * must be a multiple of 16. */ +// CRYPTO_POLYVAL_update_blocks updates the accumulator in |ctx| given the +// blocks from |in|. Only a whole number of blocks can be processed so |in_len| +// must be a multiple of 16. void CRYPTO_POLYVAL_update_blocks(struct polyval_ctx *ctx, const uint8_t *in, size_t in_len); -/* CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. */ +// CRYPTO_POLYVAL_finish writes the accumulator from |ctx| to |out|. void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_MODES_INTERNAL_H */ +#endif // OPENSSL_HEADER_MODES_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/modes/ofb.c b/Sources/BoringSSL/crypto/fipsmodule/modes/ofb.c similarity index 99% rename from Sources/BoringSSL/crypto/modes/ofb.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/ofb.c index 95d15c3d4..63bba68b0 100644 --- a/Sources/BoringSSL/crypto/modes/ofb.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/ofb.c @@ -54,7 +54,7 @@ #include "internal.h" -OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size); +OPENSSL_COMPILE_ASSERT((16 % sizeof(size_t)) == 0, bad_size_t_size_ofb); void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const void *key, uint8_t ivec[16], unsigned *num, diff --git a/Sources/BoringSSL/crypto/modes/polyval.c b/Sources/BoringSSL/crypto/fipsmodule/modes/polyval.c similarity index 77% rename from Sources/BoringSSL/crypto/modes/polyval.c rename to Sources/BoringSSL/crypto/fipsmodule/modes/polyval.c index 33d37eb79..857dc0e36 100644 --- a/Sources/BoringSSL/crypto/modes/polyval.c +++ b/Sources/BoringSSL/crypto/fipsmodule/modes/polyval.c @@ -14,50 +14,50 @@ #include -#if !defined(OPENSSL_SMALL) - #include #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" -/* byte_reverse reverses the order of the bytes in |b->c|. */ +// byte_reverse reverses the order of the bytes in |b->c|. static void byte_reverse(polyval_block *b) { const uint64_t t = CRYPTO_bswap8(b->u[0]); b->u[0] = CRYPTO_bswap8(b->u[1]); b->u[1] = t; } -/* reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of - * the GHASH field, multiplies that by 'x' and serialises the result back into - * |b|, but with GHASH's backwards bit ordering. */ +// reverse_and_mulX_ghash interprets the bytes |b->c| as a reversed element of +// the GHASH field, multiplies that by 'x' and serialises the result back into +// |b|, but with GHASH's backwards bit ordering. static void reverse_and_mulX_ghash(polyval_block *b) { uint64_t hi = b->u[0]; uint64_t lo = b->u[1]; - const unsigned carry = constant_time_eq(hi & 1, 1); + const crypto_word_t carry = constant_time_eq_w(hi & 1, 1); hi >>= 1; hi |= lo << 63; lo >>= 1; - lo ^= ((uint64_t) constant_time_select(carry, 0xe1, 0)) << 56; + lo ^= ((uint64_t) constant_time_select_w(carry, 0xe1, 0)) << 56; b->u[0] = CRYPTO_bswap8(lo); b->u[1] = CRYPTO_bswap8(hi); } -/* POLYVAL(H, X_1, ..., X_n) = - * ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ..., - * ByteReverse(X_n))). - * - * See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A. */ +// POLYVAL(H, X_1, ..., X_n) = +// ByteReverse(GHASH(mulX_GHASH(ByteReverse(H)), ByteReverse(X_1), ..., +// ByteReverse(X_n))). +// +// See https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02#appendix-A. void CRYPTO_POLYVAL_init(struct polyval_ctx *ctx, const uint8_t key[16]) { polyval_block H; OPENSSL_memcpy(H.c, key, 16); reverse_and_mulX_ghash(&H); - CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, H.c); + int is_avx; + CRYPTO_ghash_init(&ctx->gmult, &ctx->ghash, &ctx->H, ctx->Htable, &is_avx, + H.c); OPENSSL_memset(&ctx->S, 0, sizeof(ctx->S)); } @@ -89,6 +89,3 @@ void CRYPTO_POLYVAL_finish(const struct polyval_ctx *ctx, uint8_t out[16]) { byte_reverse(&S); OPENSSL_memcpy(out, &S.c, sizeof(polyval_block)); } - - -#endif /* !OPENSSL_SMALL */ diff --git a/Sources/BoringSSL/crypto/fipsmodule/rand/ctrdrbg.c b/Sources/BoringSSL/crypto/fipsmodule/rand/ctrdrbg.c new file mode 100644 index 000000000..9f8be6667 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/rand/ctrdrbg.c @@ -0,0 +1,200 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include "internal.h" +#include "../cipher/internal.h" + + +// Section references in this file refer to SP 800-90Ar1: +// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf + +// See table 3. +static const uint64_t kMaxReseedCount = UINT64_C(1) << 48; + +int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, size_t personalization_len) { + // Section 10.2.1.3.1 + if (personalization_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t seed_material[CTR_DRBG_ENTROPY_LEN]; + OPENSSL_memcpy(seed_material, entropy, CTR_DRBG_ENTROPY_LEN); + + for (size_t i = 0; i < personalization_len; i++) { + seed_material[i] ^= personalization[i]; + } + + // Section 10.2.1.2 + + // kInitMask is the result of encrypting blocks with big-endian value 1, 2 + // and 3 with the all-zero AES-256 key. + static const uint8_t kInitMask[CTR_DRBG_ENTROPY_LEN] = { + 0x53, 0x0f, 0x8a, 0xfb, 0xc7, 0x45, 0x36, 0xb9, 0xa9, 0x63, 0xb4, 0xf1, + 0xc4, 0xcb, 0x73, 0x8b, 0xce, 0xa7, 0x40, 0x3d, 0x4d, 0x60, 0x6b, 0x6e, + 0x07, 0x4e, 0xc5, 0xd3, 0xba, 0xf3, 0x9d, 0x18, 0x72, 0x60, 0x03, 0xca, + 0x37, 0xa6, 0x2a, 0x74, 0xd1, 0xa2, 0xf5, 0x8e, 0x75, 0x06, 0x35, 0x8e, + }; + + for (size_t i = 0; i < sizeof(kInitMask); i++) { + seed_material[i] ^= kInitMask[i]; + } + + drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, seed_material, 32); + OPENSSL_memcpy(drbg->counter.bytes, seed_material + 32, 16); + drbg->reseed_counter = 1; + + return 1; +} + +OPENSSL_COMPILE_ASSERT(CTR_DRBG_ENTROPY_LEN % AES_BLOCK_SIZE == 0, + not_a_multiple_of_block_size); + +// ctr_inc adds |n| to the last four bytes of |drbg->counter|, treated as a +// big-endian number. +static void ctr32_add(CTR_DRBG_STATE *drbg, uint32_t n) { + drbg->counter.words[3] = + CRYPTO_bswap4(CRYPTO_bswap4(drbg->counter.words[3]) + n); +} + +static int CTR_DRBG_update(CTR_DRBG_STATE *drbg, const uint8_t *data, + size_t data_len) { + // Section 10.2.1.2. A value of |data_len| which less than + // |CTR_DRBG_ENTROPY_LEN| is permitted and acts the same as right-padding + // with zeros. This can save a copy. + if (data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + uint8_t temp[CTR_DRBG_ENTROPY_LEN]; + for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + drbg->block(drbg->counter.bytes, temp + i, &drbg->ks); + } + + for (size_t i = 0; i < data_len; i++) { + temp[i] ^= data[i]; + } + + drbg->ctr = aes_ctr_set_key(&drbg->ks, NULL, &drbg->block, temp, 32); + OPENSSL_memcpy(drbg->counter.bytes, temp + 32, 16); + + return 1; +} + +int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len) { + // Section 10.2.1.4 + uint8_t entropy_copy[CTR_DRBG_ENTROPY_LEN]; + + if (additional_data_len > 0) { + if (additional_data_len > CTR_DRBG_ENTROPY_LEN) { + return 0; + } + + OPENSSL_memcpy(entropy_copy, entropy, CTR_DRBG_ENTROPY_LEN); + for (size_t i = 0; i < additional_data_len; i++) { + entropy_copy[i] ^= additional_data[i]; + } + + entropy = entropy_copy; + } + + if (!CTR_DRBG_update(drbg, entropy, CTR_DRBG_ENTROPY_LEN)) { + return 0; + } + + drbg->reseed_counter = 1; + + return 1; +} + +int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len) { + // See 9.3.1 + if (out_len > CTR_DRBG_MAX_GENERATE_LENGTH) { + return 0; + } + + // See 10.2.1.5.1 + if (drbg->reseed_counter > kMaxReseedCount) { + return 0; + } + + if (additional_data_len != 0 && + !CTR_DRBG_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + // kChunkSize is used to interact better with the cache. Since the AES-CTR + // code assumes that it's encrypting rather than just writing keystream, the + // buffer has to be zeroed first. Without chunking, large reads would zero + // the whole buffer, flushing the L1 cache, and then do another pass (missing + // the cache every time) to “encrypt” it. The code can avoid this by + // chunking. + static const size_t kChunkSize = 8 * 1024; + + while (out_len >= AES_BLOCK_SIZE) { + size_t todo = kChunkSize; + if (todo > out_len) { + todo = out_len; + } + + todo &= ~(AES_BLOCK_SIZE-1); + const size_t num_blocks = todo / AES_BLOCK_SIZE; + + if (drbg->ctr) { + OPENSSL_memset(out, 0, todo); + ctr32_add(drbg, 1); + drbg->ctr(out, out, num_blocks, &drbg->ks, drbg->counter.bytes); + ctr32_add(drbg, num_blocks - 1); + } else { + for (size_t i = 0; i < todo; i += AES_BLOCK_SIZE) { + ctr32_add(drbg, 1); + drbg->block(drbg->counter.bytes, out + i, &drbg->ks); + } + } + + out += todo; + out_len -= todo; + } + + if (out_len > 0) { + uint8_t block[AES_BLOCK_SIZE]; + ctr32_add(drbg, 1); + drbg->block(drbg->counter.bytes, block, &drbg->ks); + + OPENSSL_memcpy(out, block, out_len); + } + + if (!CTR_DRBG_update(drbg, additional_data, additional_data_len)) { + return 0; + } + + drbg->reseed_counter++; + return 1; +} + +void CTR_DRBG_clear(CTR_DRBG_STATE *drbg) { + OPENSSL_cleanse(drbg, sizeof(CTR_DRBG_STATE)); +} diff --git a/Sources/BoringSSL/crypto/fipsmodule/rand/internal.h b/Sources/BoringSSL/crypto/fipsmodule/rand/internal.h new file mode 100644 index 000000000..f73f4a177 --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/rand/internal.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H +#define OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H + +#include + +#include "../../internal.h" +#include "../modes/internal.h" + +#if defined(__cplusplus) +extern "C" { +#endif + + +// RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes +// from |user_additional_data| in. +void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, + const uint8_t user_additional_data[32]); + +// CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating +// system. +void CRYPTO_sysrand(uint8_t *buf, size_t len); + +// rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has +// been enabled via |RAND_enable_fork_unsafe_buffering|. +int rand_fork_unsafe_buffering_enabled(void); + +// CTR_DRBG_STATE contains the state of a CTR_DRBG based on AES-256. See SP +// 800-90Ar1. +typedef struct { + AES_KEY ks; + block128_f block; + ctr128_f ctr; + union { + uint8_t bytes[16]; + uint32_t words[4]; + } counter; + uint64_t reseed_counter; +} CTR_DRBG_STATE; + +// See SP 800-90Ar1, table 3. +#define CTR_DRBG_ENTROPY_LEN 48 +#define CTR_DRBG_MAX_GENERATE_LENGTH 65536 + +// CTR_DRBG_init initialises |*drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of +// entropy in |entropy| and, optionally, a personalization string up to +// |CTR_DRBG_ENTROPY_LEN| bytes in length. It returns one on success and zero +// on error. +OPENSSL_EXPORT int CTR_DRBG_init(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *personalization, + size_t personalization_len); + +// CTR_DRBG_reseed reseeds |drbg| given |CTR_DRBG_ENTROPY_LEN| bytes of entropy +// in |entropy| and, optionally, up to |CTR_DRBG_ENTROPY_LEN| bytes of +// additional data. It returns one on success or zero on error. +OPENSSL_EXPORT int CTR_DRBG_reseed(CTR_DRBG_STATE *drbg, + const uint8_t entropy[CTR_DRBG_ENTROPY_LEN], + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_generate processes to up |CTR_DRBG_ENTROPY_LEN| bytes of additional +// data (if any) and then writes |out_len| random bytes to |out|, where +// |out_len| <= |CTR_DRBG_MAX_GENERATE_LENGTH|. It returns one on success or +// zero on error. +OPENSSL_EXPORT int CTR_DRBG_generate(CTR_DRBG_STATE *drbg, uint8_t *out, + size_t out_len, + const uint8_t *additional_data, + size_t additional_data_len); + +// CTR_DRBG_clear zeroises the state of |drbg|. +OPENSSL_EXPORT void CTR_DRBG_clear(CTR_DRBG_STATE *drbg); + + +#if defined(__cplusplus) +} // extern C +#endif + +#endif // OPENSSL_HEADER_CRYPTO_RAND_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/fipsmodule/rand/rand.c b/Sources/BoringSSL/crypto/fipsmodule/rand/rand.c new file mode 100644 index 000000000..dafc91f7f --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/rand/rand.c @@ -0,0 +1,358 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include +#include + +#if defined(BORINGSSL_FIPS) +#include +#endif + +#include +#include +#include + +#include "internal.h" +#include "../../internal.h" +#include "../delocate.h" + + +// It's assumed that the operating system always has an unfailing source of +// entropy which is accessed via |CRYPTO_sysrand|. (If the operating system +// entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we +// don't try to handle it.) +// +// In addition, the hardware may provide a low-latency RNG. Intel's rdrand +// instruction is the canonical example of this. When a hardware RNG is +// available we don't need to worry about an RNG failure arising from fork()ing +// the process or moving a VM, so we can keep thread-local RNG state and use it +// as an additional-data input to CTR-DRBG. +// +// (We assume that the OS entropy is safe from fork()ing and VM duplication. +// This might be a bit of a leap of faith, esp on Windows, but there's nothing +// that we can do about it.) + +// kReseedInterval is the number of generate calls made to CTR-DRBG before +// reseeding. +static const unsigned kReseedInterval = 4096; + +// CRNGT_BLOCK_SIZE is the number of bytes in a “block” for the purposes of the +// continuous random number generator test in FIPS 140-2, section 4.9.2. +#define CRNGT_BLOCK_SIZE 16 + +// rand_thread_state contains the per-thread state for the RNG. +struct rand_thread_state { + CTR_DRBG_STATE drbg; + // calls is the number of generate calls made on |drbg| since it was last + // (re)seeded. This is bound by |kReseedInterval|. + unsigned calls; + // last_block_valid is non-zero iff |last_block| contains data from + // |CRYPTO_sysrand|. + int last_block_valid; + +#if defined(BORINGSSL_FIPS) + // last_block contains the previous block from |CRYPTO_sysrand|. + uint8_t last_block[CRNGT_BLOCK_SIZE]; + // next and prev form a NULL-terminated, double-linked list of all states in + // a process. + struct rand_thread_state *next, *prev; +#endif +}; + +#if defined(BORINGSSL_FIPS) +// thread_states_list is the head of a linked-list of all |rand_thread_state| +// objects in the process, one per thread. This is needed because FIPS requires +// that they be zeroed on process exit, but thread-local destructors aren't +// called when the whole process is exiting. +DEFINE_BSS_GET(struct rand_thread_state *, thread_states_list); +DEFINE_STATIC_MUTEX(thread_states_list_lock); + +static void rand_thread_state_clear_all(void) __attribute__((destructor)); +static void rand_thread_state_clear_all(void) { + CRYPTO_STATIC_MUTEX_lock_write(thread_states_list_lock_bss_get()); + for (struct rand_thread_state *cur = *thread_states_list_bss_get(); + cur != NULL; cur = cur->next) { + CTR_DRBG_clear(&cur->drbg); + } + // |thread_states_list_lock is deliberately left locked so that any threads + // that are still running will hang if they try to call |RAND_bytes|. +} +#endif + +// rand_thread_state_free frees a |rand_thread_state|. This is called when a +// thread exits. +static void rand_thread_state_free(void *state_in) { + struct rand_thread_state *state = state_in; + + if (state_in == NULL) { + return; + } + +#if defined(BORINGSSL_FIPS) + CRYPTO_STATIC_MUTEX_lock_write(thread_states_list_lock_bss_get()); + + if (state->prev != NULL) { + state->prev->next = state->next; + } else { + *thread_states_list_bss_get() = state->next; + } + + if (state->next != NULL) { + state->next->prev = state->prev; + } + + CRYPTO_STATIC_MUTEX_unlock_write(thread_states_list_lock_bss_get()); + + CTR_DRBG_clear(&state->drbg); +#endif + + OPENSSL_free(state); +} + +#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ + !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) + +// These functions are defined in asm/rdrand-x86_64.pl +extern int CRYPTO_rdrand(uint8_t out[8]); +extern int CRYPTO_rdrand_multiple8_buf(uint8_t *buf, size_t len); + +static int have_rdrand(void) { + return (OPENSSL_ia32cap_get()[1] & (1u << 30)) != 0; +} + +static int hwrand(uint8_t *buf, const size_t len) { + if (!have_rdrand()) { + return 0; + } + + const size_t len_multiple8 = len & ~7; + if (!CRYPTO_rdrand_multiple8_buf(buf, len_multiple8)) { + return 0; + } + const size_t remainder = len - len_multiple8; + + if (remainder != 0) { + assert(remainder < 8); + + uint8_t rand_buf[8]; + if (!CRYPTO_rdrand(rand_buf)) { + return 0; + } + OPENSSL_memcpy(buf + len_multiple8, rand_buf, remainder); + } + +#if defined(BORINGSSL_FIPS_BREAK_CRNG) + // This breaks the "continuous random number generator test" defined in FIPS + // 140-2, section 4.9.2, and implemented in rand_get_seed(). + OPENSSL_memset(buf, 0, len); +#endif + + return 1; +} + +#else + +static int hwrand(uint8_t *buf, size_t len) { + return 0; +} + +#endif + +#if defined(BORINGSSL_FIPS) + +static void rand_get_seed(struct rand_thread_state *state, + uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { + if (!state->last_block_valid) { + if (!hwrand(state->last_block, sizeof(state->last_block))) { + CRYPTO_sysrand(state->last_block, sizeof(state->last_block)); + } + state->last_block_valid = 1; + } + + // We overread from /dev/urandom or RDRAND by a factor of 10 and XOR to + // whiten. +#define FIPS_OVERREAD 10 + uint8_t entropy[CTR_DRBG_ENTROPY_LEN * FIPS_OVERREAD]; + + if (!hwrand(entropy, sizeof(entropy))) { + CRYPTO_sysrand(entropy, sizeof(entropy)); + } + + // See FIPS 140-2, section 4.9.2. This is the “continuous random number + // generator test” which causes the program to randomly abort. Hopefully the + // rate of failure is small enough not to be a problem in practice. + if (CRYPTO_memcmp(state->last_block, entropy, CRNGT_BLOCK_SIZE) == 0) { + printf("CRNGT failed.\n"); + BORINGSSL_FIPS_abort(); + } + + for (size_t i = CRNGT_BLOCK_SIZE; i < sizeof(entropy); + i += CRNGT_BLOCK_SIZE) { + if (CRYPTO_memcmp(entropy + i - CRNGT_BLOCK_SIZE, entropy + i, + CRNGT_BLOCK_SIZE) == 0) { + printf("CRNGT failed.\n"); + BORINGSSL_FIPS_abort(); + } + } + OPENSSL_memcpy(state->last_block, + entropy + sizeof(entropy) - CRNGT_BLOCK_SIZE, + CRNGT_BLOCK_SIZE); + + OPENSSL_memcpy(seed, entropy, CTR_DRBG_ENTROPY_LEN); + + for (size_t i = 1; i < FIPS_OVERREAD; i++) { + for (size_t j = 0; j < CTR_DRBG_ENTROPY_LEN; j++) { + seed[j] ^= entropy[CTR_DRBG_ENTROPY_LEN * i + j]; + } + } +} + +#else + +static void rand_get_seed(struct rand_thread_state *state, + uint8_t seed[CTR_DRBG_ENTROPY_LEN]) { + // If not in FIPS mode, we don't overread from the system entropy source and + // we don't depend only on the hardware RDRAND. + CRYPTO_sysrand(seed, CTR_DRBG_ENTROPY_LEN); +} + +#endif + +void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len, + const uint8_t user_additional_data[32]) { + if (out_len == 0) { + return; + } + + // Additional data is mixed into every CTR-DRBG call to protect, as best we + // can, against forks & VM clones. We do not over-read this information and + // don't reseed with it so, from the point of view of FIPS, this doesn't + // provide “prediction resistance”. But, in practice, it does. + uint8_t additional_data[32]; + if (!hwrand(additional_data, sizeof(additional_data))) { + // Without a hardware RNG to save us from address-space duplication, the OS + // entropy is used. This can be expensive (one read per |RAND_bytes| call) + // and so can be disabled by applications that we have ensured don't fork + // and aren't at risk of VM cloning. + if (!rand_fork_unsafe_buffering_enabled()) { + CRYPTO_sysrand(additional_data, sizeof(additional_data)); + } else { + OPENSSL_memset(additional_data, 0, sizeof(additional_data)); + } + } + + for (size_t i = 0; i < sizeof(additional_data); i++) { + additional_data[i] ^= user_additional_data[i]; + } + + struct rand_thread_state stack_state; + struct rand_thread_state *state = + CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_RAND); + + if (state == NULL) { + state = OPENSSL_malloc(sizeof(struct rand_thread_state)); + if (state == NULL || + !CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_RAND, state, + rand_thread_state_free)) { + // If the system is out of memory, use an ephemeral state on the + // stack. + state = &stack_state; + } + + state->last_block_valid = 0; + uint8_t seed[CTR_DRBG_ENTROPY_LEN]; + rand_get_seed(state, seed); + if (!CTR_DRBG_init(&state->drbg, seed, NULL, 0)) { + abort(); + } + state->calls = 0; + +#if defined(BORINGSSL_FIPS) + if (state != &stack_state) { + CRYPTO_STATIC_MUTEX_lock_write(thread_states_list_lock_bss_get()); + struct rand_thread_state **states_list = thread_states_list_bss_get(); + state->next = *states_list; + if (state->next != NULL) { + state->next->prev = state; + } + state->prev = NULL; + *states_list = state; + CRYPTO_STATIC_MUTEX_unlock_write(thread_states_list_lock_bss_get()); + } +#endif + } + + if (state->calls >= kReseedInterval) { + uint8_t seed[CTR_DRBG_ENTROPY_LEN]; + rand_get_seed(state, seed); +#if defined(BORINGSSL_FIPS) + // Take a read lock around accesses to |state->drbg|. This is needed to + // avoid returning bad entropy if we race with + // |rand_thread_state_clear_all|. + // + // This lock must be taken after any calls to |CRYPTO_sysrand| to avoid a + // bug on ppc64le. glibc may implement pthread locks by wrapping user code + // in a hardware transaction, but, on some older versions of glibc and the + // kernel, syscalls made with |syscall| did not abort the transaction. + CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get()); +#endif + if (!CTR_DRBG_reseed(&state->drbg, seed, NULL, 0)) { + abort(); + } + state->calls = 0; + } else { +#if defined(BORINGSSL_FIPS) + CRYPTO_STATIC_MUTEX_lock_read(thread_states_list_lock_bss_get()); +#endif + } + + int first_call = 1; + while (out_len > 0) { + size_t todo = out_len; + if (todo > CTR_DRBG_MAX_GENERATE_LENGTH) { + todo = CTR_DRBG_MAX_GENERATE_LENGTH; + } + + if (!CTR_DRBG_generate(&state->drbg, out, todo, additional_data, + first_call ? sizeof(additional_data) : 0)) { + abort(); + } + + out += todo; + out_len -= todo; + state->calls++; + first_call = 0; + } + + if (state == &stack_state) { + CTR_DRBG_clear(&state->drbg); + } + +#if defined(BORINGSSL_FIPS) + CRYPTO_STATIC_MUTEX_unlock_read(thread_states_list_lock_bss_get()); +#endif +} + +int RAND_bytes(uint8_t *out, size_t out_len) { + static const uint8_t kZeroAdditionalData[32] = {0}; + RAND_bytes_with_additional_data(out, out_len, kZeroAdditionalData); + return 1; +} + +int RAND_pseudo_bytes(uint8_t *buf, size_t len) { + return RAND_bytes(buf, len); +} diff --git a/Sources/BoringSSL/crypto/fipsmodule/rand/urandom.c b/Sources/BoringSSL/crypto/fipsmodule/rand/urandom.c new file mode 100644 index 000000000..d2be7199a --- /dev/null +++ b/Sources/BoringSSL/crypto/fipsmodule/rand/urandom.c @@ -0,0 +1,302 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#if !defined(_GNU_SOURCE) +#define _GNU_SOURCE // needed for syscall() on Linux. +#endif + +#include + +#if !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_FUCHSIA) && \ + !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) && !defined(OPENSSL_TRUSTY) + +#include +#include +#include +#include +#include +#include + +#if defined(OPENSSL_LINUX) +#if defined(BORINGSSL_FIPS) +#include +#include +#endif +#include +#endif + +#include +#include + +#include "internal.h" +#include "../delocate.h" +#include "../../internal.h" + + +#if defined(OPENSSL_LINUX) + +#if defined(OPENSSL_X86_64) +#define EXPECTED_NR_getrandom 318 +#elif defined(OPENSSL_X86) +#define EXPECTED_NR_getrandom 355 +#elif defined(OPENSSL_AARCH64) +#define EXPECTED_NR_getrandom 278 +#elif defined(OPENSSL_ARM) +#define EXPECTED_NR_getrandom 384 +#elif defined(OPENSSL_PPC64LE) +#define EXPECTED_NR_getrandom 359 +#endif + +#if defined(EXPECTED_NR_getrandom) +#define USE_NR_getrandom + +#if defined(__NR_getrandom) + +#if __NR_getrandom != EXPECTED_NR_getrandom +#error "system call number for getrandom is not the expected value" +#endif + +#else // __NR_getrandom + +#define __NR_getrandom EXPECTED_NR_getrandom + +#endif // __NR_getrandom + +#endif // EXPECTED_NR_getrandom + +#if !defined(GRND_NONBLOCK) +#define GRND_NONBLOCK 1 +#endif + +#endif // OPENSSL_LINUX + +// rand_lock is used to protect the |*_requested| variables. +DEFINE_STATIC_MUTEX(rand_lock); + +// The following constants are magic values of |urandom_fd|. +static const int kUnset = 0; +static const int kHaveGetrandom = -3; + +// urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by +// |rand_lock|. +DEFINE_BSS_GET(int, urandom_fd_requested); + +// urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. +DEFINE_BSS_GET(int, urandom_fd); + +DEFINE_STATIC_ONCE(rand_once); + +#if defined(USE_NR_getrandom) || defined(BORINGSSL_FIPS) +// message writes |msg| to stderr. We use this because referencing |stderr| +// with |fprintf| generates relocations, which is a problem inside the FIPS +// module. +static void message(const char *msg) { + ssize_t r; + do { + r = write(2, msg, strlen(msg)); + } while (r == -1 && errno == EINTR); +} +#endif + +// init_once initializes the state of this module to values previously +// requested. This is the only function that modifies |urandom_fd| and +// |urandom_buffering|, whose values may be read safely after calling the +// once. +static void init_once(void) { + CRYPTO_STATIC_MUTEX_lock_read(rand_lock_bss_get()); + int fd = *urandom_fd_requested_bss_get(); + CRYPTO_STATIC_MUTEX_unlock_read(rand_lock_bss_get()); + +#if defined(USE_NR_getrandom) + uint8_t dummy; + long getrandom_ret = + syscall(__NR_getrandom, &dummy, sizeof(dummy), GRND_NONBLOCK); + + if (getrandom_ret == 1) { + *urandom_fd_bss_get() = kHaveGetrandom; + return; + } else if (getrandom_ret == -1 && errno == EAGAIN) { + message( + "getrandom indicates that the entropy pool has not been initialized. " + "Rather than continue with poor entropy, this process will block until " + "entropy is available.\n"); + + do { + getrandom_ret = + syscall(__NR_getrandom, &dummy, sizeof(dummy), 0 /* no flags */); + } while (getrandom_ret == -1 && errno == EINTR); + + if (getrandom_ret == 1) { + *urandom_fd_bss_get() = kHaveGetrandom; + return; + } + } +#endif // USE_NR_getrandom + + if (fd == kUnset) { + do { + fd = open("/dev/urandom", O_RDONLY); + } while (fd == -1 && errno == EINTR); + } + + if (fd < 0) { + abort(); + } + + assert(kUnset == 0); + if (fd == kUnset) { + // Because we want to keep |urandom_fd| in the BSS, we have to initialise + // it to zero. But zero is a valid file descriptor too. Thus if open + // returns zero for /dev/urandom, we dup it to get a non-zero number. + fd = dup(fd); + close(kUnset); + + if (fd <= 0) { + abort(); + } + } + +#if defined(BORINGSSL_FIPS) + // In FIPS mode we ensure that the kernel has sufficient entropy before + // continuing. This is automatically handled by getrandom, which requires + // that the entropy pool has been initialised, but for urandom we have to + // poll. + for (;;) { + int entropy_bits; + if (ioctl(fd, RNDGETENTCNT, &entropy_bits)) { + message( + "RNDGETENTCNT on /dev/urandom failed. We cannot continue in this " + "case when in FIPS mode.\n"); + abort(); + } + + static const int kBitsNeeded = 256; + if (entropy_bits >= kBitsNeeded) { + break; + } + + usleep(250000); + } +#endif + + int flags = fcntl(fd, F_GETFD); + if (flags == -1) { + // Native Client doesn't implement |fcntl|. + if (errno != ENOSYS) { + abort(); + } + } else { + flags |= FD_CLOEXEC; + if (fcntl(fd, F_SETFD, flags) == -1) { + abort(); + } + } + *urandom_fd_bss_get() = fd; +} + +void RAND_set_urandom_fd(int fd) { + fd = dup(fd); + if (fd < 0) { + abort(); + } + + assert(kUnset == 0); + if (fd == kUnset) { + // Because we want to keep |urandom_fd| in the BSS, we have to initialise + // it to zero. But zero is a valid file descriptor too. Thus if dup + // returned zero we dup it again to get a non-zero number. + fd = dup(fd); + close(kUnset); + + if (fd <= 0) { + abort(); + } + } + + CRYPTO_STATIC_MUTEX_lock_write(rand_lock_bss_get()); + *urandom_fd_requested_bss_get() = fd; + CRYPTO_STATIC_MUTEX_unlock_write(rand_lock_bss_get()); + + CRYPTO_once(rand_once_bss_get(), init_once); + if (*urandom_fd_bss_get() == kHaveGetrandom) { + close(fd); + } else if (*urandom_fd_bss_get() != fd) { + abort(); // Already initialized. + } +} + +#if defined(USE_NR_getrandom) && defined(OPENSSL_MSAN) +void __msan_unpoison(void *, size_t); +#endif + +// fill_with_entropy writes |len| bytes of entropy into |out|. It returns one +// on success and zero on error. +static char fill_with_entropy(uint8_t *out, size_t len) { + while (len > 0) { + ssize_t r; + + if (*urandom_fd_bss_get() == kHaveGetrandom) { +#if defined(USE_NR_getrandom) + do { + r = syscall(__NR_getrandom, out, len, 0 /* no flags */); + } while (r == -1 && errno == EINTR); + +#if defined(OPENSSL_MSAN) + if (r > 0) { + // MSAN doesn't recognise |syscall| and thus doesn't notice that we + // have initialised the output buffer. + __msan_unpoison(out, r); + } +#endif // OPENSSL_MSAN + +#else // USE_NR_getrandom + abort(); +#endif + } else { + do { + r = read(*urandom_fd_bss_get(), out, len); + } while (r == -1 && errno == EINTR); + } + + if (r <= 0) { + return 0; + } + out += r; + len -= r; + } + + return 1; +} + +// CRYPTO_sysrand puts |requested| random bytes into |out|. +void CRYPTO_sysrand(uint8_t *out, size_t requested) { + if (requested == 0) { + return; + } + + CRYPTO_once(rand_once_bss_get(), init_once); + + if (!fill_with_entropy(out, requested)) { + abort(); + } + +#if defined(BORINGSSL_FIPS_BREAK_CRNG) + // This breaks the "continuous random number generator test" defined in FIPS + // 140-2, section 4.9.2, and implemented in rand_get_seed(). + OPENSSL_memset(out, 0, requested); +#endif +} + +#endif /* !OPENSSL_WINDOWS && !defined(OPENSSL_FUCHSIA) && \ + !BORINGSSL_UNSAFE_DETERMINISTIC_MODE && !OPENSSL_TRUSTY */ diff --git a/Sources/BoringSSL/crypto/rsa/blinding.c b/Sources/BoringSSL/crypto/fipsmodule/rsa/blinding.c similarity index 89% rename from Sources/BoringSSL/crypto/rsa/blinding.c rename to Sources/BoringSSL/crypto/fipsmodule/rsa/blinding.c index 693dced3f..d95605741 100644 --- a/Sources/BoringSSL/crypto/rsa/blinding.c +++ b/Sources/BoringSSL/crypto/fipsmodule/rsa/blinding.c @@ -115,14 +115,14 @@ #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" #define BN_BLINDING_COUNTER 32 struct bn_blinding_st { - BIGNUM *A; /* The base blinding factor, Montgomery-encoded. */ - BIGNUM *Ai; /* The inverse of the blinding factor, Montgomery-encoded. */ + BIGNUM *A; // The base blinding factor, Montgomery-encoded. + BIGNUM *Ai; // The inverse of the blinding factor, Montgomery-encoded. unsigned counter; }; @@ -147,7 +147,7 @@ BN_BLINDING *BN_BLINDING_new(void) { goto err; } - /* The blinding values need to be created before this blinding can be used. */ + // The blinding values need to be created before this blinding can be used. ret->counter = BN_BLINDING_COUNTER - 1; return ret; @@ -170,7 +170,7 @@ void BN_BLINDING_free(BN_BLINDING *r) { static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { if (++b->counter == BN_BLINDING_COUNTER) { - /* re-create blinding parameters */ + // re-create blinding parameters if (!bn_blinding_create_param(b, e, mont, ctx)) { goto err; } @@ -185,10 +185,10 @@ static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, return 1; err: - /* |A| and |Ai| may be in an inconsistent state so they both need to be - * replaced the next time this blinding is used. Note that this is only - * sufficient because support for |BN_BLINDING_NO_UPDATE| and - * |BN_BLINDING_NO_RECREATE| was previously dropped. */ + // |A| and |Ai| may be in an inconsistent state so they both need to be + // replaced the next time this blinding is used. Note that this is only + // sufficient because support for |BN_BLINDING_NO_UPDATE| and + // |BN_BLINDING_NO_RECREATE| was previously dropped. b->counter = BN_BLINDING_COUNTER - 1; return 0; @@ -196,9 +196,8 @@ static int bn_blinding_update(BN_BLINDING *b, const BIGNUM *e, int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, const BN_MONT_CTX *mont, BN_CTX *ctx) { - /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| - * cancels one Montgomery factor, so the resulting value of |n| is unencoded. - */ + // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| + // cancels one Montgomery factor, so the resulting value of |n| is unencoded. if (!bn_blinding_update(b, e, mont, ctx) || !BN_mod_mul_montgomery(n, n, b->A, mont, ctx)) { return 0; @@ -209,9 +208,8 @@ int BN_BLINDING_convert(BIGNUM *n, BN_BLINDING *b, const BIGNUM *e, int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont, BN_CTX *ctx) { - /* |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| - * cancels one Montgomery factor, so the resulting value of |n| is unencoded. - */ + // |n| is not Montgomery-encoded and |b->A| is. |BN_mod_mul_montgomery| + // cancels one Montgomery factor, so the resulting value of |n| is unencoded. return BN_mod_mul_montgomery(n, n, b->Ai, mont, ctx); } @@ -225,8 +223,8 @@ static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, return 0; } - /* |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but - * more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|. */ + // |BN_from_montgomery| + |BN_mod_inverse_blinded| is equivalent to, but + // more efficient than, |BN_mod_inverse_blinded| + |BN_to_montgomery|. if (!BN_from_montgomery(b->Ai, b->A, mont, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); return 0; @@ -242,8 +240,8 @@ static int bn_blinding_create_param(BN_BLINDING *b, const BIGNUM *e, return 0; } - /* For reasonably-sized RSA keys, it should almost never be the case that a - * random value doesn't have an inverse. */ + // For reasonably-sized RSA keys, it should almost never be the case that a + // random value doesn't have an inverse. if (retry_counter-- == 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS); return 0; diff --git a/Sources/BoringSSL/crypto/rsa/internal.h b/Sources/BoringSSL/crypto/fipsmodule/rsa/internal.h similarity index 62% rename from Sources/BoringSSL/crypto/rsa/internal.h rename to Sources/BoringSSL/crypto/fipsmodule/rsa/internal.h index c6ea97f09..0f0c763f5 100644 --- a/Sources/BoringSSL/crypto/rsa/internal.h +++ b/Sources/BoringSSL/crypto/fipsmodule/rsa/internal.h @@ -59,19 +59,19 @@ #include +#include + #if defined(__cplusplus) extern "C" { #endif -/* Default implementations of RSA operations. */ +// Default implementations of RSA operations. -extern const RSA_METHOD RSA_default_method; +const RSA_METHOD *RSA_default_method(void); size_t rsa_default_size(const RSA *rsa); -int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, - const uint8_t *in, size_t in_len, int padding); int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); @@ -79,12 +79,6 @@ int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); -int rsa_default_multi_prime_keygen(RSA *rsa, int bits, int num_primes, - BIGNUM *e_value, BN_GENCB *cb); -int rsa_default_keygen(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb); - - -#define RSA_PKCS1_PADDING_SIZE 11 BN_BLINDING *BN_BLINDING_new(void); @@ -95,54 +89,43 @@ int BN_BLINDING_invert(BIGNUM *n, const BN_BLINDING *b, BN_MONT_CTX *mont_ctx, BN_CTX *ctx); -int RSA_padding_add_PKCS1_type_1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len); -int RSA_padding_check_PKCS1_type_1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len); -int RSA_padding_add_PKCS1_type_2(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len); -int RSA_padding_check_PKCS1_type_2(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len); -int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len, - const uint8_t *param, unsigned plen, - const EVP_MD *md, const EVP_MD *mgf1md); -int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len, - const uint8_t *param, unsigned plen, - const EVP_MD *md, const EVP_MD *mgf1md); -int RSA_padding_add_none(uint8_t *to, unsigned to_len, const uint8_t *from, - unsigned from_len); - -/* RSA_private_transform calls either the method-specific |private_transform| - * function (if given) or the generic one. See the comment for - * |private_transform| in |rsa_meth_st|. */ +int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, + const uint8_t *from, size_t from_len); +int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len); +int RSA_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len, + const uint8_t *from, size_t from_len); +int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len); +int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len, const uint8_t *param, + size_t param_len, const EVP_MD *md, + const EVP_MD *mgf1md); +int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, + size_t from_len); + +// RSA_private_transform calls either the method-specific |private_transform| +// function (if given) or the generic one. See the comment for +// |private_transform| in |rsa_meth_st|. int RSA_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); -/* RSA_additional_prime contains information about the third, forth etc prime - * in a multi-prime RSA key. */ -typedef struct RSA_additional_prime_st { - BIGNUM *prime; - /* exp is d^{prime-1} mod prime */ - BIGNUM *exp; - /* coeff is such that r×coeff ≡ 1 mod prime. */ - BIGNUM *coeff; - - /* Values below here are not in the ASN.1 serialisation. */ +// The following utility functions are exported for test purposes. - /* r is the product of all primes (including p and q) prior to this one. */ - BIGNUM *r; - /* mont is a |BN_MONT_CTX| modulo |prime|. */ - BN_MONT_CTX *mont; -} RSA_additional_prime; +extern const BN_ULONG kBoringSSLRSASqrtTwo[]; +extern const size_t kBoringSSLRSASqrtTwoLen; -void RSA_additional_prime_free(RSA_additional_prime *ap); +// rsa_greater_than_pow2 returns one if |b| is greater than 2^|n| and zero +// otherwise. +int rsa_greater_than_pow2(const BIGNUM *b, int n); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_RSA_INTERNAL_H */ +#endif // OPENSSL_HEADER_RSA_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/rsa/padding.c b/Sources/BoringSSL/crypto/fipsmodule/rsa/padding.c similarity index 59% rename from Sources/BoringSSL/crypto/rsa/padding.c rename to Sources/BoringSSL/crypto/fipsmodule/rsa/padding.c index 3ed19adc1..9d88dba79 100644 --- a/Sources/BoringSSL/crypto/rsa/padding.c +++ b/Sources/BoringSSL/crypto/fipsmodule/rsa/padding.c @@ -67,206 +67,199 @@ #include #include "internal.h" -#include "../internal.h" +#include "../../internal.h" -/* TODO(fork): don't the check functions have to be constant time? */ -int RSA_padding_add_PKCS1_type_1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len) { - unsigned j; +#define RSA_PKCS1_PADDING_SIZE 11 +int RSA_padding_add_PKCS1_type_1(uint8_t *to, size_t to_len, + const uint8_t *from, size_t from_len) { + // See RFC 8017, section 9.2. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY); return 0; } - uint8_t *p = to; - - *(p++) = 0; - *(p++) = 1; /* Private Key BT (Block Type) */ - - /* pad out with 0xff data */ - j = to_len - 3 - from_len; - OPENSSL_memset(p, 0xff, j); - p += j; - *(p++) = 0; - OPENSSL_memcpy(p, from, from_len); + to[0] = 0; + to[1] = 1; + OPENSSL_memset(to + 2, 0xff, to_len - 3 - from_len); + to[to_len - from_len - 1] = 0; + OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } -int RSA_padding_check_PKCS1_type_1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len) { - unsigned i, j; - const uint8_t *p; - +int RSA_padding_check_PKCS1_type_1(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len) { + // See RFC 8017, section 9.2. This is part of signature verification and thus + // does not need to run in constant-time. if (from_len < 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); - return -1; + return 0; } - p = from; - if ((*(p++) != 0) || (*(p++) != 1)) { + // Check the header. + if (from[0] != 0 || from[1] != 1) { OPENSSL_PUT_ERROR(RSA, RSA_R_BLOCK_TYPE_IS_NOT_01); - return -1; - } - - /* scan over padding data */ - j = from_len - 2; /* one for leading 00, one for type. */ - for (i = 0; i < j; i++) { - /* should decrypt to 0xff */ - if (*p != 0xff) { - if (*p == 0) { - p++; - break; - } else { - OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_FIXED_HEADER_DECRYPT); - return -1; - } + return 0; + } + + // Scan over padded data, looking for the 00. + size_t pad; + for (pad = 2 /* header */; pad < from_len; pad++) { + if (from[pad] == 0x00) { + break; + } + + if (from[pad] != 0xff) { + OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_FIXED_HEADER_DECRYPT); + return 0; } - p++; } - if (i == j) { + if (pad == from_len) { OPENSSL_PUT_ERROR(RSA, RSA_R_NULL_BEFORE_BLOCK_MISSING); - return -1; + return 0; } - if (i < 8) { + if (pad < 2 /* header */ + 8) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_PAD_BYTE_COUNT); - return -1; + return 0; } - i++; /* Skip over the '\0' */ - j -= i; - if (j > to_len) { + + // Skip over the 00. + pad++; + + if (from_len - pad > max_out) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); - return -1; + return 0; } - OPENSSL_memcpy(to, p, j); - return j; + OPENSSL_memcpy(out, from + pad, from_len - pad); + *out_len = from_len - pad; + return 1; } -int RSA_padding_add_PKCS1_type_2(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len) { - unsigned i, j; +static int rand_nonzero(uint8_t *out, size_t len) { + if (!RAND_bytes(out, len)) { + return 0; + } + + for (size_t i = 0; i < len; i++) { + while (out[i] == 0) { + if (!RAND_bytes(out + i, 1)) { + return 0; + } + } + } + + return 1; +} +int RSA_padding_add_PKCS1_type_2(uint8_t *to, size_t to_len, + const uint8_t *from, size_t from_len) { + // See RFC 8017, section 7.2.1. if (to_len < RSA_PKCS1_PADDING_SIZE) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } if (from_len > to_len - RSA_PKCS1_PADDING_SIZE) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); return 0; } - uint8_t *p = to; - - *(p++) = 0; - *(p++) = 2; /* Public Key BT (Block Type) */ - - /* pad out with non-zero random data */ - j = to_len - 3 - from_len; + to[0] = 0; + to[1] = 2; - if (!RAND_bytes(p, j)) { + size_t padding_len = to_len - 3 - from_len; + if (!rand_nonzero(to + 2, padding_len)) { return 0; } - for (i = 0; i < j; i++) { - while (*p == 0) { - if (!RAND_bytes(p, 1)) { - return 0; - } - } - p++; - } - - *(p++) = 0; - - OPENSSL_memcpy(p, from, from_len); + to[2 + padding_len] = 0; + OPENSSL_memcpy(to + to_len - from_len, from, from_len); return 1; } -int RSA_padding_check_PKCS1_type_2(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len) { +int RSA_padding_check_PKCS1_type_2(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len) { if (from_len == 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); - return -1; + return 0; } - /* PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography - * Standard", section 7.2.2. */ + // PKCS#1 v1.5 decryption. See "PKCS #1 v2.2: RSA Cryptography + // Standard", section 7.2.2. if (from_len < RSA_PKCS1_PADDING_SIZE) { - /* |from| is zero-padded to the size of the RSA modulus, a public value, so - * this can be rejected in non-constant time. */ + // |from| is zero-padded to the size of the RSA modulus, a public value, so + // this can be rejected in non-constant time. OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); - return -1; + return 0; } - unsigned first_byte_is_zero = constant_time_eq(from[0], 0); - unsigned second_byte_is_two = constant_time_eq(from[1], 2); + crypto_word_t first_byte_is_zero = constant_time_eq_w(from[0], 0); + crypto_word_t second_byte_is_two = constant_time_eq_w(from[1], 2); - unsigned i, zero_index = 0, looking_for_index = ~0u; - for (i = 2; i < from_len; i++) { - unsigned equals0 = constant_time_is_zero(from[i]); - zero_index = constant_time_select(looking_for_index & equals0, (unsigned)i, - zero_index); - looking_for_index = constant_time_select(equals0, 0, looking_for_index); + crypto_word_t zero_index = 0, looking_for_index = CONSTTIME_TRUE_W; + for (size_t i = 2; i < from_len; i++) { + crypto_word_t equals0 = constant_time_is_zero_w(from[i]); + zero_index = + constant_time_select_w(looking_for_index & equals0, i, zero_index); + looking_for_index = constant_time_select_w(equals0, 0, looking_for_index); } - /* The input must begin with 00 02. */ - unsigned valid_index = first_byte_is_zero; + // The input must begin with 00 02. + crypto_word_t valid_index = first_byte_is_zero; valid_index &= second_byte_is_two; - /* We must have found the end of PS. */ + // We must have found the end of PS. valid_index &= ~looking_for_index; - /* PS must be at least 8 bytes long, and it starts two bytes into |from|. */ - valid_index &= constant_time_ge(zero_index, 2 + 8); + // PS must be at least 8 bytes long, and it starts two bytes into |from|. + valid_index &= constant_time_ge_w(zero_index, 2 + 8); - /* Skip the zero byte. */ + // Skip the zero byte. zero_index++; - /* NOTE: Although this logic attempts to be constant time, the API contracts - * of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it - * impossible to completely avoid Bleichenbacher's attack. Consumers should - * use |RSA_unpad_key_pkcs1|. */ + // NOTE: Although this logic attempts to be constant time, the API contracts + // of this function and |RSA_decrypt| with |RSA_PKCS1_PADDING| make it + // impossible to completely avoid Bleichenbacher's attack. Consumers should + // use |RSA_PADDING_NONE| and perform the padding check in constant-time + // combined with a swap to a random session key or other mitigation. if (!valid_index) { OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); - return -1; + return 0; } - const unsigned msg_len = from_len - zero_index; - if (msg_len > to_len) { - /* This shouldn't happen because this function is always called with - * |to_len| as the key size and |from_len| is bounded by the key size. */ + const size_t msg_len = from_len - zero_index; + if (msg_len > max_out) { + // This shouldn't happen because this function is always called with + // |max_out| as the key size and |from_len| is bounded by the key size. OPENSSL_PUT_ERROR(RSA, RSA_R_PKCS_DECODING_ERROR); - return -1; - } - - if (msg_len > INT_MAX) { - OPENSSL_PUT_ERROR(RSA, ERR_R_OVERFLOW); - return -1; + return 0; } - OPENSSL_memcpy(to, &from[zero_index], msg_len); - return (int)msg_len; + OPENSSL_memcpy(out, &from[zero_index], msg_len); + *out_len = msg_len; + return 1; } -int RSA_padding_add_none(uint8_t *to, unsigned to_len, const uint8_t *from, - unsigned from_len) { +int RSA_padding_add_none(uint8_t *to, size_t to_len, const uint8_t *from, + size_t from_len) { if (from_len > to_len) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); return 0; } if (from_len < to_len) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_SMALL); return 0; } @@ -317,15 +310,10 @@ static int PKCS1_MGF1(uint8_t *out, size_t len, const uint8_t *seed, return ret; } -int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len, - const uint8_t *param, unsigned param_len, +int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, size_t to_len, + const uint8_t *from, size_t from_len, + const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md) { - unsigned i, emlen, mdlen; - uint8_t *db, *seed; - uint8_t *dbmask = NULL, seedmask[EVP_MAX_MD_SIZE]; - int ret = 0; - if (md == NULL) { md = EVP_sha1(); } @@ -333,16 +321,16 @@ int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, mgf1md = md; } - mdlen = EVP_MD_size(md); + size_t mdlen = EVP_MD_size(md); if (to_len < 2 * mdlen + 2) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; } - emlen = to_len - 1; + size_t emlen = to_len - 1; if (from_len > emlen - 2 * mdlen - 1) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); return 0; } @@ -352,8 +340,8 @@ int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, } to[0] = 0; - seed = to + 1; - db = to + mdlen + 1; + uint8_t *seed = to + 1; + uint8_t *db = to + mdlen + 1; if (!EVP_Digest(param, param_len, db, NULL, md, NULL)) { return 0; @@ -365,23 +353,25 @@ int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, return 0; } - dbmask = OPENSSL_malloc(emlen - mdlen); + uint8_t *dbmask = OPENSSL_malloc(emlen - mdlen); if (dbmask == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); return 0; } + int ret = 0; if (!PKCS1_MGF1(dbmask, emlen - mdlen, seed, mdlen, mgf1md)) { goto out; } - for (i = 0; i < emlen - mdlen; i++) { + for (size_t i = 0; i < emlen - mdlen; i++) { db[i] ^= dbmask[i]; } + uint8_t seedmask[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seedmask, mdlen, db, emlen - mdlen, mgf1md)) { goto out; } - for (i = 0; i < mdlen; i++) { + for (size_t i = 0; i < mdlen; i++) { seed[i] ^= seedmask[i]; } ret = 1; @@ -391,13 +381,12 @@ int RSA_padding_add_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, return ret; } -int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, - const uint8_t *from, unsigned from_len, - const uint8_t *param, unsigned param_len, - const EVP_MD *md, const EVP_MD *mgf1md) { - unsigned i, dblen, mlen = -1, mdlen, bad, looking_for_one_byte, one_index = 0; - const uint8_t *maskeddb, *maskedseed; - uint8_t *db = NULL, seed[EVP_MAX_MD_SIZE], phash[EVP_MAX_MD_SIZE]; +int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *out, size_t *out_len, + size_t max_out, const uint8_t *from, + size_t from_len, const uint8_t *param, + size_t param_len, const EVP_MD *md, + const EVP_MD *mgf1md) { + uint8_t *db = NULL; if (md == NULL) { md = EVP_sha1(); @@ -406,56 +395,59 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, mgf1md = md; } - mdlen = EVP_MD_size(md); + size_t mdlen = EVP_MD_size(md); - /* The encoded message is one byte smaller than the modulus to ensure that it - * doesn't end up greater than the modulus. Thus there's an extra "+1" here - * compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. */ + // The encoded message is one byte smaller than the modulus to ensure that it + // doesn't end up greater than the modulus. Thus there's an extra "+1" here + // compared to https://tools.ietf.org/html/rfc2437#section-9.1.1.2. if (from_len < 1 + 2*mdlen + 1) { - /* 'from_len' is the length of the modulus, i.e. does not depend on the - * particular ciphertext. */ + // 'from_len' is the length of the modulus, i.e. does not depend on the + // particular ciphertext. goto decoding_err; } - dblen = from_len - mdlen - 1; + size_t dblen = from_len - mdlen - 1; db = OPENSSL_malloc(dblen); if (db == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); goto err; } - maskedseed = from + 1; - maskeddb = from + 1 + mdlen; + const uint8_t *maskedseed = from + 1; + const uint8_t *maskeddb = from + 1 + mdlen; + uint8_t seed[EVP_MAX_MD_SIZE]; if (!PKCS1_MGF1(seed, mdlen, maskeddb, dblen, mgf1md)) { goto err; } - for (i = 0; i < mdlen; i++) { + for (size_t i = 0; i < mdlen; i++) { seed[i] ^= maskedseed[i]; } if (!PKCS1_MGF1(db, dblen, seed, mdlen, mgf1md)) { goto err; } - for (i = 0; i < dblen; i++) { + for (size_t i = 0; i < dblen; i++) { db[i] ^= maskeddb[i]; } + uint8_t phash[EVP_MAX_MD_SIZE]; if (!EVP_Digest(param, param_len, phash, NULL, md, NULL)) { goto err; } - bad = ~constant_time_is_zero(CRYPTO_memcmp(db, phash, mdlen)); - bad |= ~constant_time_is_zero(from[0]); + crypto_word_t bad = ~constant_time_is_zero_w(CRYPTO_memcmp(db, phash, mdlen)); + bad |= ~constant_time_is_zero_w(from[0]); - looking_for_one_byte = ~0u; - for (i = mdlen; i < dblen; i++) { - unsigned equals1 = constant_time_eq(db[i], 1); - unsigned equals0 = constant_time_eq(db[i], 0); - one_index = constant_time_select(looking_for_one_byte & equals1, i, - one_index); + crypto_word_t looking_for_one_byte = CONSTTIME_TRUE_W; + size_t one_index = 0; + for (size_t i = mdlen; i < dblen; i++) { + crypto_word_t equals1 = constant_time_eq_w(db[i], 1); + crypto_word_t equals0 = constant_time_eq_w(db[i], 0); + one_index = + constant_time_select_w(looking_for_one_byte & equals1, i, one_index); looking_for_one_byte = - constant_time_select(equals1, 0, looking_for_one_byte); + constant_time_select_w(equals1, 0, looking_for_one_byte); bad |= looking_for_one_byte & ~equals0; } @@ -466,27 +458,27 @@ int RSA_padding_check_PKCS1_OAEP_mgf1(uint8_t *to, unsigned to_len, } one_index++; - mlen = dblen - one_index; - if (to_len < mlen) { + size_t mlen = dblen - one_index; + if (max_out < mlen) { OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); - mlen = -1; - } else { - OPENSSL_memcpy(to, db + one_index, mlen); + goto err; } + OPENSSL_memcpy(out, db + one_index, mlen); + *out_len = mlen; OPENSSL_free(db); - return mlen; + return 1; decoding_err: - /* to avoid chosen ciphertext attacks, the error message should not reveal - * which kind of decoding error happened */ + // to avoid chosen ciphertext attacks, the error message should not reveal + // which kind of decoding error happened OPENSSL_PUT_ERROR(RSA, RSA_R_OAEP_DECODING_ERROR); err: OPENSSL_free(db); - return -1; + return 0; } -static const unsigned char zeroes[] = {0,0,0,0,0,0,0,0}; +static const uint8_t kPSSZeroes[] = {0, 0, 0, 0, 0, 0, 0, 0}; int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, @@ -507,10 +499,10 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, hLen = EVP_MD_size(Hash); - /* Negative sLen has special meanings: - * -1 sLen == hLen - * -2 salt length is autorecovered from signature - * -N reserved */ + // Negative sLen has special meanings: + // -1 sLen == hLen + // -2 salt length is autorecovered from signature + // -N reserved if (sLen == -1) { sLen = hLen; } else if (sLen == -2) { @@ -530,8 +522,8 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, EM++; emLen--; } - if (emLen < ((int)hLen + sLen + 2)) { - /* sLen can be small negative */ + if (emLen < (int)hLen + 2 || emLen < ((int)hLen + sLen + 2)) { + // sLen can be small negative OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -567,16 +559,10 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, goto err; } if (!EVP_DigestInit_ex(&ctx, Hash, NULL) || - !EVP_DigestUpdate(&ctx, zeroes, sizeof zeroes) || - !EVP_DigestUpdate(&ctx, mHash, hLen)) { - goto err; - } - if (maskedDBLen - i) { - if (!EVP_DigestUpdate(&ctx, DB + i, maskedDBLen - i)) { - goto err; - } - } - if (!EVP_DigestFinal_ex(&ctx, H_, NULL)) { + !EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) || + !EVP_DigestUpdate(&ctx, mHash, hLen) || + !EVP_DigestUpdate(&ctx, DB + i, maskedDBLen - i) || + !EVP_DigestFinal_ex(&ctx, H_, NULL)) { goto err; } if (OPENSSL_memcmp(H_, H, hLen)) { @@ -601,7 +587,6 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, size_t maskedDBLen, MSBits, emLen; size_t hLen; unsigned char *H, *salt = NULL, *p; - EVP_MD_CTX ctx; if (mgf1Hash == NULL) { mgf1Hash = Hash; @@ -623,14 +608,14 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, } if (emLen < hLen + 2) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } - /* Negative sLenRequested has special meanings: - * -1 sLen == hLen - * -2 salt length is maximized - * -N reserved */ + // Negative sLenRequested has special meanings: + // -1 sLen == hLen + // -2 salt length is maximized + // -N reserved size_t sLen; if (sLenRequested == -1) { sLen = hLen; @@ -644,7 +629,7 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, } if (emLen - hLen - 2 < sLen) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_KEY_SIZE); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -660,30 +645,29 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, } maskedDBLen = emLen - hLen - 1; H = EM + maskedDBLen; + + EVP_MD_CTX ctx; EVP_MD_CTX_init(&ctx); - if (!EVP_DigestInit_ex(&ctx, Hash, NULL) || - !EVP_DigestUpdate(&ctx, zeroes, sizeof zeroes) || - !EVP_DigestUpdate(&ctx, mHash, hLen)) { - goto err; - } - if (sLen && !EVP_DigestUpdate(&ctx, salt, sLen)) { - goto err; - } - if (!EVP_DigestFinal_ex(&ctx, H, NULL)) { + int digest_ok = EVP_DigestInit_ex(&ctx, Hash, NULL) && + EVP_DigestUpdate(&ctx, kPSSZeroes, sizeof(kPSSZeroes)) && + EVP_DigestUpdate(&ctx, mHash, hLen) && + EVP_DigestUpdate(&ctx, salt, sLen) && + EVP_DigestFinal_ex(&ctx, H, NULL); + EVP_MD_CTX_cleanup(&ctx); + if (!digest_ok) { goto err; } - EVP_MD_CTX_cleanup(&ctx); - /* Generate dbMask in place then perform XOR on it */ + // Generate dbMask in place then perform XOR on it if (!PKCS1_MGF1(EM, maskedDBLen, H, hLen, mgf1Hash)) { goto err; } p = EM; - /* Initial PS XORs with all zeroes which is a NOP so just update - * pointer. Note from a test above this value is guaranteed to - * be non-negative. */ + // Initial PS XORs with all zeroes which is a NOP so just update + // pointer. Note from a test above this value is guaranteed to + // be non-negative. p += emLen - sLen - hLen - 2; *p++ ^= 0x1; if (sLen > 0) { @@ -695,7 +679,7 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM, EM[0] &= 0xFF >> (8 - MSBits); } - /* H is already in place so just set final 0xbc */ + // H is already in place so just set final 0xbc EM[emLen - 1] = 0xbc; diff --git a/Sources/BoringSSL/crypto/rsa/rsa.c b/Sources/BoringSSL/crypto/fipsmodule/rsa/rsa.c similarity index 67% rename from Sources/BoringSSL/crypto/rsa/rsa.c rename to Sources/BoringSSL/crypto/fipsmodule/rsa/rsa.c index 731293f84..4a8431419 100644 --- a/Sources/BoringSSL/crypto/rsa/rsa.c +++ b/Sources/BoringSSL/crypto/fipsmodule/rsa/rsa.c @@ -60,18 +60,23 @@ #include #include +#include #include #include #include +#include #include #include +#include #include +#include "../bn/internal.h" +#include "../delocate.h" +#include "../../internal.h" #include "internal.h" -#include "../internal.h" -static CRYPTO_EX_DATA_CLASS g_ex_data_class = CRYPTO_EX_DATA_CLASS_INIT; +DEFINE_STATIC_EX_DATA_CLASS(g_rsa_ex_data_class); RSA *RSA_new(void) { return RSA_new_method(NULL); } @@ -89,7 +94,7 @@ RSA *RSA_new_method(const ENGINE *engine) { } if (rsa->meth == NULL) { - rsa->meth = (RSA_METHOD*) &RSA_default_method; + rsa->meth = (RSA_METHOD *) RSA_default_method(); } METHOD_ref(rsa->meth); @@ -99,7 +104,7 @@ RSA *RSA_new_method(const ENGINE *engine) { CRYPTO_new_ex_data(&rsa->ex_data); if (rsa->meth->init && !rsa->meth->init(rsa)) { - CRYPTO_free_ex_data(&g_ex_data_class, rsa, &rsa->ex_data); + CRYPTO_free_ex_data(g_rsa_ex_data_class_bss_get(), rsa, &rsa->ex_data); CRYPTO_MUTEX_cleanup(&rsa->lock); METHOD_unref(rsa->meth); OPENSSL_free(rsa); @@ -109,19 +114,6 @@ RSA *RSA_new_method(const ENGINE *engine) { return rsa; } -void RSA_additional_prime_free(RSA_additional_prime *ap) { - if (ap == NULL) { - return; - } - - BN_clear_free(ap->prime); - BN_clear_free(ap->exp); - BN_clear_free(ap->coeff); - BN_clear_free(ap->r); - BN_MONT_CTX_free(ap->mont); - OPENSSL_free(ap); -} - void RSA_free(RSA *rsa) { unsigned u; @@ -138,7 +130,7 @@ void RSA_free(RSA *rsa) { } METHOD_unref(rsa->meth); - CRYPTO_free_ex_data(&g_ex_data_class, rsa, &rsa->ex_data); + CRYPTO_free_ex_data(g_rsa_ex_data_class_bss_get(), rsa, &rsa->ex_data); BN_clear_free(rsa->n); BN_clear_free(rsa->e); @@ -156,10 +148,6 @@ void RSA_free(RSA *rsa) { } OPENSSL_free(rsa->blindings); OPENSSL_free(rsa->blindings_inuse); - if (rsa->additional_primes != NULL) { - sk_RSA_additional_prime_pop_free(rsa->additional_primes, - RSA_additional_prime_free); - } CRYPTO_MUTEX_cleanup(&rsa->lock); OPENSSL_free(rsa); } @@ -169,6 +157,8 @@ int RSA_up_ref(RSA *rsa) { return 1; } +unsigned RSA_bits(const RSA *rsa) { return BN_num_bits(rsa->n); } + void RSA_get0_key(const RSA *rsa, const BIGNUM **out_n, const BIGNUM **out_e, const BIGNUM **out_d) { if (out_n != NULL) { @@ -205,30 +195,67 @@ void RSA_get0_crt_params(const RSA *rsa, const BIGNUM **out_dmp1, } } -int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { - if (rsa->meth->keygen) { - return rsa->meth->keygen(rsa, bits, e_value, cb); +int RSA_set0_key(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d) { + if ((rsa->n == NULL && n == NULL) || + (rsa->e == NULL && e == NULL)) { + return 0; } - return rsa_default_keygen(rsa, bits, e_value, cb); + if (n != NULL) { + BN_free(rsa->n); + rsa->n = n; + } + if (e != NULL) { + BN_free(rsa->e); + rsa->e = e; + } + if (d != NULL) { + BN_free(rsa->d); + rsa->d = d; + } + + return 1; } -int RSA_generate_multi_prime_key(RSA *rsa, int bits, int num_primes, - BIGNUM *e_value, BN_GENCB *cb) { - if (rsa->meth->multi_prime_keygen) { - return rsa->meth->multi_prime_keygen(rsa, bits, num_primes, e_value, cb); +int RSA_set0_factors(RSA *rsa, BIGNUM *p, BIGNUM *q) { + if ((rsa->p == NULL && p == NULL) || + (rsa->q == NULL && q == NULL)) { + return 0; } - return rsa_default_multi_prime_keygen(rsa, bits, num_primes, e_value, cb); + if (p != NULL) { + BN_free(rsa->p); + rsa->p = p; + } + if (q != NULL) { + BN_free(rsa->q); + rsa->q = q; + } + + return 1; } -int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, - const uint8_t *in, size_t in_len, int padding) { - if (rsa->meth->encrypt) { - return rsa->meth->encrypt(rsa, out_len, out, max_out, in, in_len, padding); +int RSA_set0_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp) { + if ((rsa->dmp1 == NULL && dmp1 == NULL) || + (rsa->dmq1 == NULL && dmq1 == NULL) || + (rsa->iqmp == NULL && iqmp == NULL)) { + return 0; } - return rsa_default_encrypt(rsa, out_len, out, max_out, in, in_len, padding); + if (dmp1 != NULL) { + BN_free(rsa->dmp1); + rsa->dmp1 = dmp1; + } + if (dmq1 != NULL) { + BN_free(rsa->dmq1); + rsa->dmq1 = dmq1; + } + if (iqmp != NULL) { + BN_free(rsa->iqmp); + rsa->iqmp = iqmp; + } + + return 1; } int RSA_public_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, @@ -321,87 +348,88 @@ int RSA_is_opaque(const RSA *rsa) { return rsa->meth && (rsa->meth->flags & RSA_FLAG_OPAQUE); } -int RSA_supports_digest(const RSA *rsa, const EVP_MD *md) { - if (rsa->meth && rsa->meth->supports_digest) { - return rsa->meth->supports_digest(rsa, md); - } - return 1; -} - int RSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; - if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, dup_func, - free_func)) { + if (!CRYPTO_get_ex_new_index(g_rsa_ex_data_class_bss_get(), &index, argl, + argp, free_func)) { return -1; } return index; } -int RSA_set_ex_data(RSA *d, int idx, void *arg) { - return CRYPTO_set_ex_data(&d->ex_data, idx, arg); +int RSA_set_ex_data(RSA *rsa, int idx, void *arg) { + return CRYPTO_set_ex_data(&rsa->ex_data, idx, arg); } -void *RSA_get_ex_data(const RSA *d, int idx) { - return CRYPTO_get_ex_data(&d->ex_data, idx); +void *RSA_get_ex_data(const RSA *rsa, int idx) { + return CRYPTO_get_ex_data(&rsa->ex_data, idx); } -/* SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's - * the length of an MD5 and SHA1 hash. */ +// SSL_SIG_LENGTH is the size of an SSL/TLS (prior to TLS 1.2) signature: it's +// the length of an MD5 and SHA1 hash. static const unsigned SSL_SIG_LENGTH = 36; -/* pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is - * to be signed with PKCS#1. */ +// pkcs1_sig_prefix contains the ASN.1, DER encoded prefix for a hash that is +// to be signed with PKCS#1. struct pkcs1_sig_prefix { - /* nid identifies the hash function. */ + // nid identifies the hash function. int nid; - /* len is the number of bytes of |bytes| which are valid. */ + // hash_len is the expected length of the hash function. + uint8_t hash_len; + // len is the number of bytes of |bytes| which are valid. uint8_t len; - /* bytes contains the DER bytes. */ + // bytes contains the DER bytes. uint8_t bytes[19]; }; -/* kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with - * different hash functions. */ +// kPKCS1SigPrefixes contains the ASN.1 prefixes for PKCS#1 signatures with +// different hash functions. static const struct pkcs1_sig_prefix kPKCS1SigPrefixes[] = { { NID_md5, + MD5_DIGEST_LENGTH, 18, {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}, }, { NID_sha1, + SHA_DIGEST_LENGTH, 15, {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, }, { NID_sha224, + SHA224_DIGEST_LENGTH, 19, {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, }, { NID_sha256, + SHA256_DIGEST_LENGTH, 19, {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, }, { NID_sha384, + SHA384_DIGEST_LENGTH, 19, {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, }, { NID_sha512, + SHA512_DIGEST_LENGTH, 19, {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, }, { - NID_undef, 0, {0}, + NID_undef, 0, 0, {0}, }, }; @@ -411,7 +439,7 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, unsigned i; if (hash_nid == NID_md5_sha1) { - /* Special case: SSL signature, just check the length. */ + // Special case: SSL signature, just check the length. if (msg_len != SSL_SIG_LENGTH) { OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); return 0; @@ -429,6 +457,11 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, continue; } + if (msg_len != sig_prefix->hash_len) { + OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); + return 0; + } + const uint8_t* prefix = sig_prefix->bytes; unsigned prefix_len = sig_prefix->len; unsigned signed_msg_len; @@ -464,8 +497,8 @@ int RSA_sign(int hash_nid, const uint8_t *in, unsigned in_len, uint8_t *out, unsigned *out_len, RSA *rsa) { const unsigned rsa_size = RSA_size(rsa); int ret = 0; - uint8_t *signed_msg; - size_t signed_msg_len; + uint8_t *signed_msg = NULL; + size_t signed_msg_len = 0; int signed_msg_is_alloced = 0; size_t size_t_out_len; @@ -474,26 +507,42 @@ int RSA_sign(int hash_nid, const uint8_t *in, unsigned in_len, uint8_t *out, } if (!RSA_add_pkcs1_prefix(&signed_msg, &signed_msg_len, - &signed_msg_is_alloced, hash_nid, in, in_len)) { - return 0; + &signed_msg_is_alloced, hash_nid, in, in_len) || + !RSA_sign_raw(rsa, &size_t_out_len, out, rsa_size, signed_msg, + signed_msg_len, RSA_PKCS1_PADDING)) { + goto err; } - if (rsa_size < RSA_PKCS1_PADDING_SIZE || - signed_msg_len > rsa_size - RSA_PKCS1_PADDING_SIZE) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DIGEST_TOO_BIG_FOR_RSA_KEY); - goto finish; + *out_len = size_t_out_len; + ret = 1; + +err: + if (signed_msg_is_alloced) { + OPENSSL_free(signed_msg); } + return ret; +} - if (RSA_sign_raw(rsa, &size_t_out_len, out, rsa_size, signed_msg, - signed_msg_len, RSA_PKCS1_PADDING)) { - *out_len = size_t_out_len; - ret = 1; +int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, + const uint8_t *in, size_t in_len, const EVP_MD *md, + const EVP_MD *mgf1_md, int salt_len) { + if (in_len != EVP_MD_size(md)) { + OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); + return 0; } -finish: - if (signed_msg_is_alloced) { - OPENSSL_free(signed_msg); + size_t padded_len = RSA_size(rsa); + uint8_t *padded = OPENSSL_malloc(padded_len); + if (padded == NULL) { + OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); + return 0; } + + int ret = + RSA_padding_add_PKCS1_PSS_mgf1(rsa, padded, in, md, mgf1_md, salt_len) && + RSA_sign_raw(rsa, out_len, out, max_out, padded, padded_len, + RSA_NO_PADDING); + OPENSSL_free(padded); return ret; } @@ -508,7 +557,7 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, uint8_t *buf = NULL; int ret = 0; uint8_t *signed_msg = NULL; - size_t signed_msg_len, len; + size_t signed_msg_len = 0, len; int signed_msg_is_alloced = 0; if (hash_nid == NID_md5_sha1 && msg_len != SSL_SIG_LENGTH) { @@ -532,6 +581,8 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, goto out; } + // Check that no other information follows the hash value (FIPS 186-4 Section + // 5.5) and it matches the expected hash. if (len != signed_msg_len || OPENSSL_memcmp(buf, signed_msg, len) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_SIGNATURE); goto out; @@ -547,9 +598,36 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, return ret; } -static void bn_free_and_null(BIGNUM **bn) { - BN_free(*bn); - *bn = NULL; +int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *msg, size_t msg_len, + const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len, + const uint8_t *sig, size_t sig_len) { + if (msg_len != EVP_MD_size(md)) { + OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH); + return 0; + } + + size_t em_len = RSA_size(rsa); + uint8_t *em = OPENSSL_malloc(em_len); + if (em == NULL) { + OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); + return 0; + } + + int ret = 0; + if (!RSA_verify_raw(rsa, &em_len, em, em_len, sig, sig_len, RSA_NO_PADDING)) { + goto err; + } + + if (em_len != RSA_size(rsa)) { + OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); + goto err; + } + + ret = RSA_verify_PKCS1_PSS_mgf1(rsa, msg, md, mgf1_md, em, salt_len); + +err: + OPENSSL_free(em); + return ret; } int RSA_check_key(const RSA *key) { @@ -558,7 +636,7 @@ int RSA_check_key(const RSA *key) { int ok = 0, has_crt_values; if (RSA_is_opaque(key)) { - /* Opaque keys can't be checked. */ + // Opaque keys can't be checked. return 1; } @@ -573,8 +651,8 @@ int RSA_check_key(const RSA *key) { } if (!key->d || !key->p) { - /* For a public key, or without p and q, there's nothing that can be - * checked. */ + // For a public key, or without p and q, there's nothing that can be + // checked. return 1; } @@ -595,7 +673,7 @@ int RSA_check_key(const RSA *key) { BN_init(&iqmp_times_q); if (!BN_mul(&n, key->p, key->q, ctx) || - /* lcm = lcm(prime-1, for all primes) */ + // lcm = lcm(p, q) !BN_sub(&pm1, key->p, BN_value_one()) || !BN_sub(&qm1, key->q, BN_value_one()) || !BN_mul(&lcm, &pm1, &qm1, ctx) || @@ -604,26 +682,9 @@ int RSA_check_key(const RSA *key) { goto out; } - size_t num_additional_primes = 0; - if (key->additional_primes != NULL) { - num_additional_primes = sk_RSA_additional_prime_num(key->additional_primes); - } - - for (size_t i = 0; i < num_additional_primes; i++) { - const RSA_additional_prime *ap = - sk_RSA_additional_prime_value(key->additional_primes, i); - if (!BN_mul(&n, &n, ap->prime, ctx) || - !BN_sub(&pm1, ap->prime, BN_value_one()) || - !BN_mul(&lcm, &lcm, &pm1, ctx) || - !BN_gcd(&gcd, &gcd, &pm1, ctx)) { - OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); - goto out; - } - } - if (!BN_div(&lcm, NULL, &lcm, &gcd, ctx) || !BN_gcd(&gcd, &pm1, &qm1, ctx) || - /* de = d*e mod lcm(prime-1, for all primes). */ + // de = d*e mod lcm(p, q). !BN_mod_mul(&de, key->d, key->e, &lcm, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; @@ -646,12 +707,12 @@ int RSA_check_key(const RSA *key) { goto out; } - if (has_crt_values && num_additional_primes == 0) { - if (/* dmp1 = d mod (p-1) */ + if (has_crt_values) { + if (// dmp1 = d mod (p-1) !BN_mod(&dmp1, key->d, &pm1, ctx) || - /* dmq1 = d mod (q-1) */ + // dmq1 = d mod (q-1) !BN_mod(&dmq1, key->d, &qm1, ctx) || - /* iqmp = q^-1 mod p */ + // iqmp = q^-1 mod p !BN_mod_mul(&iqmp_times_q, key->iqmp, key->q, key->p, ctx)) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); goto out; @@ -683,137 +744,103 @@ int RSA_check_key(const RSA *key) { return ok; } -int RSA_recover_crt_params(RSA *rsa) { - BN_CTX *ctx; - BIGNUM *totient, *rem, *multiple, *p_plus_q, *p_minus_q; - int ok = 0; - if (rsa->n == NULL || rsa->e == NULL || rsa->d == NULL) { - OPENSSL_PUT_ERROR(RSA, RSA_R_EMPTY_PUBLIC_KEY); - return 0; - } +// This is the product of the 132 smallest odd primes, from 3 to 751. +static const BN_ULONG kSmallFactorsLimbs[] = { + TOBN(0xc4309333, 0x3ef4e3e1), TOBN(0x71161eb6, 0xcd2d655f), + TOBN(0x95e2238c, 0x0bf94862), TOBN(0x3eb233d3, 0x24f7912b), + TOBN(0x6b55514b, 0xbf26c483), TOBN(0x0a84d817, 0x5a144871), + TOBN(0x77d12fee, 0x9b82210a), TOBN(0xdb5b93c2, 0x97f050b3), + TOBN(0x4acad6b9, 0x4d6c026b), TOBN(0xeb7751f3, 0x54aec893), + TOBN(0xdba53368, 0x36bc85c4), TOBN(0xd85a1b28, 0x7f5ec78e), + TOBN(0x2eb072d8, 0x6b322244), TOBN(0xbba51112, 0x5e2b3aea), + TOBN(0x36ed1a6c, 0x0e2486bf), TOBN(0x5f270460, 0xec0c5727), + 0x000017b1 +}; - if (rsa->p || rsa->q || rsa->dmp1 || rsa->dmq1 || rsa->iqmp) { - OPENSSL_PUT_ERROR(RSA, RSA_R_CRT_PARAMS_ALREADY_GIVEN); +DEFINE_LOCAL_DATA(BIGNUM, g_small_factors) { + out->d = (BN_ULONG *) kSmallFactorsLimbs; + out->top = OPENSSL_ARRAY_SIZE(kSmallFactorsLimbs); + out->dmax = out->top; + out->neg = 0; + out->flags = BN_FLG_STATIC_DATA; +} + +int RSA_check_fips(RSA *key) { + if (RSA_is_opaque(key)) { + // Opaque keys can't be checked. + OPENSSL_PUT_ERROR(RSA, RSA_R_PUBLIC_KEY_VALIDATION_FAILED); return 0; } - if (rsa->additional_primes != NULL) { - OPENSSL_PUT_ERROR(RSA, RSA_R_CANNOT_RECOVER_MULTI_PRIME_KEY); + if (!RSA_check_key(key)) { return 0; } - /* This uses the algorithm from section 9B of the RSA paper: - * http://people.csail.mit.edu/rivest/Rsapaper.pdf */ - - ctx = BN_CTX_new(); + BN_CTX *ctx = BN_CTX_new(); if (ctx == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); return 0; } - BN_CTX_start(ctx); - totient = BN_CTX_get(ctx); - rem = BN_CTX_get(ctx); - multiple = BN_CTX_get(ctx); - p_plus_q = BN_CTX_get(ctx); - p_minus_q = BN_CTX_get(ctx); + BIGNUM small_gcd; + BN_init(&small_gcd); - if (totient == NULL || rem == NULL || multiple == NULL || p_plus_q == NULL || - p_minus_q == NULL) { - OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); - goto err; - } + int ret = 1; - /* ed-1 is a small multiple of φ(n). */ - if (!BN_mul(totient, rsa->e, rsa->d, ctx) || - !BN_sub_word(totient, 1) || - /* φ(n) = - * pq - p - q + 1 = - * n - (p + q) + 1 - * - * Thus n is a reasonable estimate for φ(n). So, (ed-1)/n will be very - * close. But, when we calculate the quotient, we'll be truncating it - * because we discard the remainder. Thus (ed-1)/multiple will be >= n, - * which the totient cannot be. So we add one to the estimate. - * - * Consider ed-1 as: - * - * multiple * (n - (p+q) + 1) = - * multiple*n - multiple*(p+q) + multiple - * - * When we divide by n, the first term becomes multiple and, since - * multiple and p+q is tiny compared to n, the second and third terms can - * be ignored. Thus I claim that subtracting one from the estimate is - * sufficient. */ - !BN_div(multiple, NULL, totient, rsa->n, ctx) || - !BN_add_word(multiple, 1) || - !BN_div(totient, rem, totient, multiple, ctx)) { - OPENSSL_PUT_ERROR(RSA, ERR_R_BN_LIB); - goto err; + // Perform partial public key validation of RSA keys (SP 800-89 5.3.3). + enum bn_primality_result_t primality_result; + if (BN_num_bits(key->e) <= 16 || + BN_num_bits(key->e) > 256 || + !BN_is_odd(key->n) || + !BN_is_odd(key->e) || + !BN_gcd(&small_gcd, key->n, g_small_factors(), ctx) || + !BN_is_one(&small_gcd) || + !BN_enhanced_miller_rabin_primality_test(&primality_result, key->n, + BN_prime_checks, ctx, NULL) || + primality_result != bn_non_prime_power_composite) { + OPENSSL_PUT_ERROR(RSA, RSA_R_PUBLIC_KEY_VALIDATION_FAILED); + ret = 0; } - if (!BN_is_zero(rem)) { - OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); - goto err; - } + BN_free(&small_gcd); + BN_CTX_free(ctx); - rsa->p = BN_new(); - rsa->q = BN_new(); - rsa->dmp1 = BN_new(); - rsa->dmq1 = BN_new(); - rsa->iqmp = BN_new(); - if (rsa->p == NULL || rsa->q == NULL || rsa->dmp1 == NULL || rsa->dmq1 == - NULL || rsa->iqmp == NULL) { - OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); - goto err; + if (!ret || key->d == NULL || key->p == NULL) { + // On a failure or on only a public key, there's nothing else can be + // checked. + return ret; } - /* φ(n) = n - (p + q) + 1 => - * n - totient + 1 = p + q */ - if (!BN_sub(p_plus_q, rsa->n, totient) || - !BN_add_word(p_plus_q, 1) || - /* p - q = sqrt((p+q)^2 - 4n) */ - !BN_sqr(rem, p_plus_q, ctx) || - !BN_lshift(multiple, rsa->n, 2) || - !BN_sub(rem, rem, multiple) || - !BN_sqrt(p_minus_q, rem, ctx) || - /* q is 1/2 (p+q)-(p-q) */ - !BN_sub(rsa->q, p_plus_q, p_minus_q) || - !BN_rshift1(rsa->q, rsa->q) || - !BN_div(rsa->p, NULL, rsa->n, rsa->q, ctx) || - !BN_mul(multiple, rsa->p, rsa->q, ctx)) { - OPENSSL_PUT_ERROR(RSA, ERR_R_BN_LIB); - goto err; + // FIPS pairwise consistency test (FIPS 140-2 4.9.2). Per FIPS 140-2 IG, + // section 9.9, it is not known whether |rsa| will be used for signing or + // encryption, so either pair-wise consistency self-test is acceptable. We + // perform a signing test. + uint8_t data[32] = {0}; + unsigned sig_len = RSA_size(key); + uint8_t *sig = OPENSSL_malloc(sig_len); + if (sig == NULL) { + OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); + return 0; } - if (BN_cmp(multiple, rsa->n) != 0) { - OPENSSL_PUT_ERROR(RSA, RSA_R_INTERNAL_ERROR); - goto err; + if (!RSA_sign(NID_sha256, data, sizeof(data), sig, &sig_len, key)) { + OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); + ret = 0; + goto cleanup; } - - if (!BN_sub(rem, rsa->p, BN_value_one()) || - !BN_mod(rsa->dmp1, rsa->d, rem, ctx) || - !BN_sub(rem, rsa->q, BN_value_one()) || - !BN_mod(rsa->dmq1, rsa->d, rem, ctx) || - !BN_mod_inverse(rsa->iqmp, rsa->q, rsa->p, ctx)) { - OPENSSL_PUT_ERROR(RSA, ERR_R_BN_LIB); - goto err; +#if defined(BORINGSSL_FIPS_BREAK_RSA_PWCT) + data[0] = ~data[0]; +#endif + if (!RSA_verify(NID_sha256, data, sizeof(data), sig, sig_len, key)) { + OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); + ret = 0; } - ok = 1; +cleanup: + OPENSSL_free(sig); -err: - BN_CTX_end(ctx); - BN_CTX_free(ctx); - if (!ok) { - bn_free_and_null(&rsa->p); - bn_free_and_null(&rsa->q); - bn_free_and_null(&rsa->dmp1); - bn_free_and_null(&rsa->dmq1); - bn_free_and_null(&rsa->iqmp); - } - return ok; + return ret; } int RSA_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, diff --git a/Sources/BoringSSL/crypto/rsa/rsa_impl.c b/Sources/BoringSSL/crypto/fipsmodule/rsa/rsa_impl.c similarity index 53% rename from Sources/BoringSSL/crypto/rsa/rsa_impl.c rename to Sources/BoringSSL/crypto/fipsmodule/rsa/rsa_impl.c index 8e0aa9c62..fb27320e4 100644 --- a/Sources/BoringSSL/crypto/rsa/rsa_impl.c +++ b/Sources/BoringSSL/crypto/fipsmodule/rsa/rsa_impl.c @@ -57,16 +57,19 @@ #include #include +#include #include #include #include #include #include +#include #include "internal.h" #include "../bn/internal.h" -#include "../internal.h" +#include "../../internal.h" +#include "../delocate.h" static int check_modulus_and_exponent_sizes(const RSA *rsa) { @@ -77,15 +80,15 @@ static int check_modulus_and_exponent_sizes(const RSA *rsa) { return 0; } - /* Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as - * the limit based on the recommendations in [1] and [2]. Windows CryptoAPI - * doesn't support values larger than 32 bits [3], so it is unlikely that - * exponents larger than 32 bits are being used for anything Windows commonly - * does. - * - * [1] https://www.imperialviolet.org/2012/03/16/rsae.html - * [2] https://www.imperialviolet.org/2012/03/17/rsados.html - * [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx */ + // Mitigate DoS attacks by limiting the exponent size. 33 bits was chosen as + // the limit based on the recommendations in [1] and [2]. Windows CryptoAPI + // doesn't support values larger than 32 bits [3], so it is unlikely that + // exponents larger than 32 bits are being used for anything Windows commonly + // does. + // + // [1] https://www.imperialviolet.org/2012/03/16/rsae.html + // [2] https://www.imperialviolet.org/2012/03/17/rsados.html + // [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx static const unsigned kMaxExponentBits = 33; if (BN_num_bits(rsa->e) > kMaxExponentBits) { @@ -93,10 +96,10 @@ static int check_modulus_and_exponent_sizes(const RSA *rsa) { return 0; } - /* Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small - * shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits| - * is much smaller than the minimum RSA key size that any application should - * accept. */ + // Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small + // shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits| + // is much smaller than the minimum RSA key size that any application should + // accept. if (rsa_bits <= kMaxExponentBits) { OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); return 0; @@ -110,8 +113,13 @@ size_t rsa_default_size(const RSA *rsa) { return BN_num_bytes(rsa->n); } -int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, - const uint8_t *in, size_t in_len, int padding) { +int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, + const uint8_t *in, size_t in_len, int padding) { + if (rsa->n == NULL || rsa->e == NULL) { + OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); + return 0; + } + const unsigned rsa_size = RSA_size(rsa); BIGNUM *f, *result; uint8_t *buf = NULL; @@ -146,7 +154,7 @@ int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, i = RSA_padding_add_PKCS1_type_2(buf, rsa_size, in, in_len); break; case RSA_PKCS1_OAEP_PADDING: - /* Use the default parameters: SHA-1 for both hashes and no label. */ + // Use the default parameters: SHA-1 for both hashes and no label. i = RSA_padding_add_PKCS1_OAEP_mgf1(buf, rsa_size, in, in_len, NULL, 0, NULL, NULL); break; @@ -167,8 +175,8 @@ int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, } if (BN_ucmp(f, rsa->n) >= 0) { - /* usually the padding functions would catch this */ - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); + // usually the padding functions would catch this + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -177,8 +185,8 @@ int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, goto err; } - /* put in leading 0 bytes if the number is less than the length of the - * modulus */ + // put in leading 0 bytes if the number is less than the length of the + // modulus if (!BN_bn2bin_padded(out, rsa_size, result)) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; @@ -192,26 +200,23 @@ int rsa_default_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, BN_CTX_end(ctx); BN_CTX_free(ctx); } - if (buf != NULL) { - OPENSSL_cleanse(buf, rsa_size); - OPENSSL_free(buf); - } + OPENSSL_free(buf); return ret; } -/* MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per - * RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and - * destroyed as needed. */ +// MAX_BLINDINGS_PER_RSA defines the maximum number of cached BN_BLINDINGs per +// RSA*. Then this limit is exceeded, BN_BLINDING objects will be created and +// destroyed as needed. #define MAX_BLINDINGS_PER_RSA 1024 -/* rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by - * allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If - * none are free, the cache will be extended by a extra element and the new - * BN_BLINDING is returned. - * - * On success, the index of the assigned BN_BLINDING is written to - * |*index_used| and must be passed to |rsa_blinding_release| when finished. */ +// rsa_blinding_get returns a BN_BLINDING to use with |rsa|. It does this by +// allocating one of the cached BN_BLINDING objects in |rsa->blindings|. If +// none are free, the cache will be extended by a extra element and the new +// BN_BLINDING is returned. +// +// On success, the index of the assigned BN_BLINDING is written to +// |*index_used| and must be passed to |rsa_blinding_release| when finished. static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, BN_CTX *ctx) { assert(ctx != NULL); @@ -241,8 +246,8 @@ static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, overflow = rsa->num_blindings >= MAX_BLINDINGS_PER_RSA; - /* We didn't find a free BN_BLINDING to use so increase the length of - * the arrays by one and use the newly created element. */ + // We didn't find a free BN_BLINDING to use so increase the length of + // the arrays by one and use the newly created element. CRYPTO_MUTEX_unlock_write(&rsa->lock); ret = BN_BLINDING_new(); @@ -251,8 +256,8 @@ static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, } if (overflow) { - /* We cannot add any more cached BN_BLINDINGs so we use |ret| - * and mark it for destruction in |rsa_blinding_release|. */ + // We cannot add any more cached BN_BLINDINGs so we use |ret| + // and mark it for destruction in |rsa_blinding_release|. *index_used = MAX_BLINDINGS_PER_RSA; return ret; } @@ -294,12 +299,12 @@ static BN_BLINDING *rsa_blinding_get(RSA *rsa, unsigned *index_used, return NULL; } -/* rsa_blinding_release marks the cached BN_BLINDING at the given index as free - * for other threads to use. */ +// rsa_blinding_release marks the cached BN_BLINDING at the given index as free +// for other threads to use. static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding, unsigned blinding_index) { if (blinding_index == MAX_BLINDINGS_PER_RSA) { - /* This blinding wasn't cached. */ + // This blinding wasn't cached. BN_BLINDING_free(blinding); return; } @@ -309,7 +314,7 @@ static void rsa_blinding_release(RSA *rsa, BN_BLINDING *blinding, CRYPTO_MUTEX_unlock_write(&rsa->lock); } -/* signing */ +// signing int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { @@ -352,10 +357,7 @@ int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, ret = 1; err: - if (buf != NULL) { - OPENSSL_cleanse(buf, rsa_size); - OPENSSL_free(buf); - } + OPENSSL_free(buf); return ret; } @@ -363,7 +365,6 @@ int rsa_default_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding) { const unsigned rsa_size = RSA_size(rsa); - int r = -1; uint8_t *buf = NULL; int ret = 0; @@ -375,7 +376,7 @@ int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, if (padding == RSA_NO_PADDING) { buf = out; } else { - /* Allocate a temporary buffer to hold the padded plaintext. */ + // Allocate a temporary buffer to hold the padded plaintext. buf = OPENSSL_malloc(rsa_size); if (buf == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); @@ -394,31 +395,29 @@ int rsa_default_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, switch (padding) { case RSA_PKCS1_PADDING: - r = RSA_padding_check_PKCS1_type_2(out, rsa_size, buf, rsa_size); + ret = + RSA_padding_check_PKCS1_type_2(out, out_len, rsa_size, buf, rsa_size); break; case RSA_PKCS1_OAEP_PADDING: - /* Use the default parameters: SHA-1 for both hashes and no label. */ - r = RSA_padding_check_PKCS1_OAEP_mgf1(out, rsa_size, buf, rsa_size, - NULL, 0, NULL, NULL); + // Use the default parameters: SHA-1 for both hashes and no label. + ret = RSA_padding_check_PKCS1_OAEP_mgf1(out, out_len, rsa_size, buf, + rsa_size, NULL, 0, NULL, NULL); break; case RSA_NO_PADDING: - r = rsa_size; + *out_len = rsa_size; + ret = 1; break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } - if (r < 0) { + if (!ret) { OPENSSL_PUT_ERROR(RSA, RSA_R_PADDING_CHECK_FAILED); - } else { - *out_len = r; - ret = 1; } err: - if (padding != RSA_NO_PADDING && buf != NULL) { - OPENSSL_cleanse(buf, rsa_size); + if (padding != RSA_NO_PADDING) { OPENSSL_free(buf); } @@ -436,7 +435,6 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const unsigned rsa_size = RSA_size(rsa); BIGNUM *f, *result; - int r = -1; if (max_out < rsa_size) { OPENSSL_PUT_ERROR(RSA, RSA_R_OUTPUT_BUFFER_TOO_SMALL); @@ -471,7 +469,7 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, if (padding == RSA_NO_PADDING) { buf = out; } else { - /* Allocate a temporary buffer to hold the padded plaintext. */ + // Allocate a temporary buffer to hold the padded plaintext. buf = OPENSSL_malloc(rsa_size); if (buf == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); @@ -484,7 +482,7 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, } if (BN_ucmp(f, rsa->n) >= 0) { - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -500,21 +498,21 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, switch (padding) { case RSA_PKCS1_PADDING: - r = RSA_padding_check_PKCS1_type_1(out, rsa_size, buf, rsa_size); + ret = + RSA_padding_check_PKCS1_type_1(out, out_len, rsa_size, buf, rsa_size); break; case RSA_NO_PADDING: - r = rsa_size; + ret = 1; + *out_len = rsa_size; break; default: OPENSSL_PUT_ERROR(RSA, RSA_R_UNKNOWN_PADDING_TYPE); goto err; } - if (r < 0) { + if (!ret) { OPENSSL_PUT_ERROR(RSA, RSA_R_PADDING_CHECK_FAILED); - } else { - *out_len = r; - ret = 1; + goto err; } err: @@ -528,6 +526,11 @@ int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len) { + if (rsa->n == NULL || rsa->d == NULL) { + OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); + return 0; + } + BIGNUM *f, *result; BN_CTX *ctx = NULL; unsigned blinding_index = 0; @@ -552,8 +555,8 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, } if (BN_ucmp(f, rsa->n) >= 0) { - /* Usually the padding functions would catch this. */ - OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS); + // Usually the padding functions would catch this. + OPENSSL_PUT_ERROR(RSA, RSA_R_DATA_TOO_LARGE); goto err; } @@ -562,20 +565,18 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, goto err; } - /* We cannot do blinding or verification without |e|, and continuing without - * those countermeasures is dangerous. However, the Java/Android RSA API - * requires support for keys where only |d| and |n| (and not |e|) are known. - * The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|. */ - int disable_security = (rsa->flags & RSA_FLAG_NO_BLINDING) && rsa->e == NULL; + const int do_blinding = (rsa->flags & RSA_FLAG_NO_BLINDING) == 0; - if (!disable_security) { - /* Keys without public exponents must have blinding explicitly disabled to - * be used. */ - if (rsa->e == NULL) { - OPENSSL_PUT_ERROR(RSA, RSA_R_NO_PUBLIC_EXPONENT); - goto err; - } + if (rsa->e == NULL && do_blinding) { + // We cannot do blinding or verification without |e|, and continuing without + // those countermeasures is dangerous. However, the Java/Android RSA API + // requires support for keys where only |d| and |n| (and not |e|) are known. + // The callers that require that bad behavior set |RSA_FLAG_NO_BLINDING|. + OPENSSL_PUT_ERROR(RSA, RSA_R_NO_PUBLIC_EXPONENT); + goto err; + } + if (do_blinding) { blinding = rsa_blinding_get(rsa, &blinding_index, ctx); if (blinding == NULL) { OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); @@ -596,16 +597,16 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, goto err; } - /* Verify the result to protect against fault attacks as described in the - * 1997 paper "On the Importance of Checking Cryptographic Protocols for - * Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some - * implementations do this only when the CRT is used, but we do it in all - * cases. Section 6 of the aforementioned paper describes an attack that - * works when the CRT isn't used. That attack is much less likely to succeed - * than the CRT attack, but there have likely been improvements since 1997. - * - * This check is cheap assuming |e| is small; it almost always is. */ - if (!disable_security) { + // Verify the result to protect against fault attacks as described in the + // 1997 paper "On the Importance of Checking Cryptographic Protocols for + // Faults" by Dan Boneh, Richard A. DeMillo, and Richard J. Lipton. Some + // implementations do this only when the CRT is used, but we do it in all + // cases. Section 6 of the aforementioned paper describes an attack that + // works when the CRT isn't used. That attack is much less likely to succeed + // than the CRT attack, but there have likely been improvements since 1997. + // + // This check is cheap assuming |e| is small; it almost always is. + if (rsa->e != NULL) { BIGNUM *vrfy = BN_CTX_get(ctx); if (vrfy == NULL || !BN_mod_exp_mont(vrfy, result, rsa->e, rsa->n, ctx, rsa->mont_n) || @@ -614,9 +615,11 @@ int rsa_default_private_transform(RSA *rsa, uint8_t *out, const uint8_t *in, goto err; } - if (!BN_BLINDING_invert(result, blinding, rsa->mont_n, ctx)) { - goto err; - } + } + + if (do_blinding && + !BN_BLINDING_invert(result, blinding, rsa->mont_n, ctx)) { + goto err; } if (!BN_bn2bin_padded(out, len, result)) { @@ -652,11 +655,6 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { BIGNUM *r1, *m1, *vrfy; int ret = 0; - size_t i, num_additional_primes = 0; - - if (rsa->additional_primes != NULL) { - num_additional_primes = sk_RSA_additional_prime_num(rsa->additional_primes); - } BN_CTX_start(ctx); r1 = BN_CTX_get(ctx); @@ -677,22 +675,22 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { goto err; } - /* compute I mod q */ + // compute I mod q if (!BN_mod(r1, I, rsa->q, ctx)) { goto err; } - /* compute r1^dmq1 mod q */ + // compute r1^dmq1 mod q if (!BN_mod_exp_mont_consttime(m1, r1, rsa->dmq1, rsa->q, ctx, rsa->mont_q)) { goto err; } - /* compute I mod p */ + // compute I mod p if (!BN_mod(r1, I, rsa->p, ctx)) { goto err; } - /* compute r1^dmp1 mod p */ + // compute r1^dmp1 mod p if (!BN_mod_exp_mont_consttime(r0, r1, rsa->dmp1, rsa->p, ctx, rsa->mont_p)) { goto err; } @@ -700,8 +698,8 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { if (!BN_sub(r0, r0, m1)) { goto err; } - /* This will help stop the size of r0 increasing, which does - * affect the multiply if it optimised for a power of 2 size */ + // This will help stop the size of r0 increasing, which does + // affect the multiply if it optimised for a power of 2 size if (BN_is_negative(r0)) { if (!BN_add(r0, r0, rsa->p)) { goto err; @@ -716,12 +714,12 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { goto err; } - /* If p < q it is occasionally possible for the correction of - * adding 'p' if r0 is negative above to leave the result still - * negative. This can break the private key operations: the following - * second correction should *always* correct this rare occurrence. - * This will *never* happen with OpenSSL generated keys because - * they ensure p > q [steve] */ + // If p < q it is occasionally possible for the correction of + // adding 'p' if r0 is negative above to leave the result still + // negative. This can break the private key operations: the following + // second correction should *always* correct this rare occurrence. + // This will *never* happen with OpenSSL generated keys because + // they ensure p > q [steve] if (BN_is_negative(r0)) { if (!BN_add(r0, r0, rsa->p)) { goto err; @@ -734,31 +732,6 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { goto err; } - for (i = 0; i < num_additional_primes; i++) { - /* multi-prime RSA. */ - RSA_additional_prime *ap = - sk_RSA_additional_prime_value(rsa->additional_primes, i); - - /* c will already point to a BIGNUM with the correct flags. */ - if (!BN_mod(r1, I, ap->prime, ctx)) { - goto err; - } - - if (!BN_MONT_CTX_set_locked(&ap->mont, &rsa->lock, ap->prime, ctx) || - !BN_mod_exp_mont_consttime(m1, r1, ap->exp, ap->prime, ctx, ap->mont)) { - goto err; - } - - if (!BN_sub(m1, m1, r0) || - !BN_mul(m1, m1, ap->coeff, ctx) || - !BN_mod(m1, m1, ap->prime, ctx) || - (BN_is_negative(m1) && !BN_add(m1, m1, ap->prime)) || - !BN_mul(m1, m1, ap->r, ctx) || - !BN_add(r0, r0, m1)) { - goto err; - } - } - ret = 1; err: @@ -766,335 +739,313 @@ static int mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx) { return ret; } -int rsa_default_multi_prime_keygen(RSA *rsa, int bits, int num_primes, - BIGNUM *e_value, BN_GENCB *cb) { - BIGNUM *r0 = NULL, *r1 = NULL, *r2 = NULL, *r3 = NULL, *tmp; - int prime_bits, ok = -1, n = 0, i, j; - BN_CTX *ctx = NULL; - STACK_OF(RSA_additional_prime) *additional_primes = NULL; - - if (num_primes < 2) { - ok = 0; /* we set our own err */ - OPENSSL_PUT_ERROR(RSA, RSA_R_MUST_HAVE_AT_LEAST_TWO_PRIMES); - goto err; +static int ensure_bignum(BIGNUM **out) { + if (*out == NULL) { + *out = BN_new(); } + return *out != NULL; +} - ctx = BN_CTX_new(); - if (ctx == NULL) { - goto err; - } - BN_CTX_start(ctx); - r0 = BN_CTX_get(ctx); - r1 = BN_CTX_get(ctx); - r2 = BN_CTX_get(ctx); - r3 = BN_CTX_get(ctx); - if (r0 == NULL || r1 == NULL || r2 == NULL || r3 == NULL) { - goto err; - } +// kBoringSSLRSASqrtTwo is the BIGNUM representation of ⌊2¹⁵³⁵×√2⌋. This is +// chosen to give enough precision for 3072-bit RSA, the largest key size FIPS +// specifies. Key sizes beyond this will round up. +// +// To verify this number, check that n² < 2³⁰⁷¹ < (n+1)², where n is value +// represented here. Note the components are listed in little-endian order. Here +// is some sample Python code to check: +// +// >>> TOBN = lambda a, b: a << 32 | b +// >>> l = [ ] +// >>> n = sum(a * 2**(64*i) for i, a in enumerate(l)) +// >>> n**2 < 2**3071 < (n+1)**2 +// True +const BN_ULONG kBoringSSLRSASqrtTwo[] = { + TOBN(0xdea06241, 0xf7aa81c2), TOBN(0xf6a1be3f, 0xca221307), + TOBN(0x332a5e9f, 0x7bda1ebf), TOBN(0x0104dc01, 0xfe32352f), + TOBN(0xb8cf341b, 0x6f8236c7), TOBN(0x4264dabc, 0xd528b651), + TOBN(0xf4d3a02c, 0xebc93e0c), TOBN(0x81394ab6, 0xd8fd0efd), + TOBN(0xeaa4a089, 0x9040ca4a), TOBN(0xf52f120f, 0x836e582e), + TOBN(0xcb2a6343, 0x31f3c84d), TOBN(0xc6d5a8a3, 0x8bb7e9dc), + TOBN(0x460abc72, 0x2f7c4e33), TOBN(0xcab1bc91, 0x1688458a), + TOBN(0x53059c60, 0x11bc337b), TOBN(0xd2202e87, 0x42af1f4e), + TOBN(0x78048736, 0x3dfa2768), TOBN(0x0f74a85e, 0x439c7b4a), + TOBN(0xa8b1fe6f, 0xdc83db39), TOBN(0x4afc8304, 0x3ab8a2c3), + TOBN(0xed17ac85, 0x83339915), TOBN(0x1d6f60ba, 0x893ba84c), + TOBN(0x597d89b3, 0x754abe9f), TOBN(0xb504f333, 0xf9de6484), +}; +const size_t kBoringSSLRSASqrtTwoLen = OPENSSL_ARRAY_SIZE(kBoringSSLRSASqrtTwo); - if (num_primes > 2) { - additional_primes = sk_RSA_additional_prime_new_null(); - if (additional_primes == NULL) { - goto err; - } +int rsa_greater_than_pow2(const BIGNUM *b, int n) { + if (BN_is_negative(b) || n == INT_MAX) { + return 0; } - for (i = 2; i < num_primes; i++) { - RSA_additional_prime *ap = OPENSSL_malloc(sizeof(RSA_additional_prime)); - if (ap == NULL) { - goto err; - } - OPENSSL_memset(ap, 0, sizeof(RSA_additional_prime)); - ap->prime = BN_new(); - ap->exp = BN_new(); - ap->coeff = BN_new(); - ap->r = BN_new(); - if (ap->prime == NULL || - ap->exp == NULL || - ap->coeff == NULL || - ap->r == NULL || - !sk_RSA_additional_prime_push(additional_primes, ap)) { - RSA_additional_prime_free(ap); - goto err; - } - } + int b_bits = BN_num_bits(b); + return b_bits > n + 1 || (b_bits == n + 1 && !BN_is_pow2(b)); +} - /* We need the RSA components non-NULL */ - if (!rsa->n && ((rsa->n = BN_new()) == NULL)) { - goto err; - } - if (!rsa->d && ((rsa->d = BN_new()) == NULL)) { - goto err; - } - if (!rsa->e && ((rsa->e = BN_new()) == NULL)) { - goto err; - } - if (!rsa->p && ((rsa->p = BN_new()) == NULL)) { - goto err; - } - if (!rsa->q && ((rsa->q = BN_new()) == NULL)) { - goto err; - } - if (!rsa->dmp1 && ((rsa->dmp1 = BN_new()) == NULL)) { - goto err; - } - if (!rsa->dmq1 && ((rsa->dmq1 = BN_new()) == NULL)) { - goto err; - } - if (!rsa->iqmp && ((rsa->iqmp = BN_new()) == NULL)) { - goto err; +// generate_prime sets |out| to a prime with length |bits| such that |out|-1 is +// relatively prime to |e|. If |p| is non-NULL, |out| will also not be close to +// |p|. +static int generate_prime(BIGNUM *out, int bits, const BIGNUM *e, + const BIGNUM *p, BN_CTX *ctx, BN_GENCB *cb) { + if (bits < 128 || (bits % BN_BITS2) != 0) { + OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); + return 0; } - if (!BN_copy(rsa->e, e_value)) { - goto err; - } + // See FIPS 186-4 appendix B.3.3, steps 4 and 5. Note |bits| here is nlen/2. - /* generate p and q */ - prime_bits = (bits + (num_primes - 1)) / num_primes; - for (;;) { - if (!BN_generate_prime_ex(rsa->p, prime_bits, 0, NULL, NULL, cb) || - !BN_sub(r2, rsa->p, BN_value_one()) || - !BN_gcd(r1, r2, rsa->e, ctx)) { - goto err; - } - if (BN_is_one(r1)) { - break; - } - if (!BN_GENCB_call(cb, 2, n++)) { - goto err; - } + // Use the limit from steps 4.7 and 5.8 for most values of |e|. When |e| is 3, + // the 186-4 limit is too low, so we use a higher one. Note this case is not + // reachable from |RSA_generate_key_fips|. + if (bits >= INT_MAX/32) { + OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE); + return 0; } - if (!BN_GENCB_call(cb, 3, 0)) { + int limit = BN_is_word(e, 3) ? bits * 32 : bits * 5; + + int ret = 0, tries = 0, rand_tries = 0; + BN_CTX_start(ctx); + BIGNUM *tmp = BN_CTX_get(ctx); + if (tmp == NULL) { goto err; } - prime_bits = ((bits - prime_bits) + (num_primes - 2)) / (num_primes - 1); + for (;;) { - /* When generating ridiculously small keys, we can get stuck - * continually regenerating the same prime values. Check for - * this and bail if it happens 3 times. */ - unsigned int degenerate = 0; - do { - if (!BN_generate_prime_ex(rsa->q, prime_bits, 0, NULL, NULL, cb)) { - goto err; - } - } while ((BN_cmp(rsa->p, rsa->q) == 0) && (++degenerate < 3)); - if (degenerate == 3) { - ok = 0; /* we set our own err */ - OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); - goto err; - } - if (!BN_sub(r2, rsa->q, BN_value_one()) || - !BN_gcd(r1, r2, rsa->e, ctx)) { - goto err; - } - if (BN_is_one(r1)) { - break; - } - if (!BN_GENCB_call(cb, 2, n++)) { + // Generate a random number of length |bits| where the bottom bit is set + // (steps 4.2, 4.3, 5.2 and 5.3) and the top bit is set (implied by the + // bound checked below in steps 4.4 and 5.5). + if (!BN_rand(out, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ODD) || + !BN_GENCB_call(cb, BN_GENCB_GENERATED, rand_tries++)) { goto err; } - } - if (!BN_GENCB_call(cb, 3, 1) || - !BN_mul(rsa->n, rsa->p, rsa->q, ctx)) { - goto err; - } - - for (i = 2; i < num_primes; i++) { - RSA_additional_prime *ap = - sk_RSA_additional_prime_value(additional_primes, i - 2); - prime_bits = ((bits - BN_num_bits(rsa->n)) + (num_primes - (i + 1))) / - (num_primes - i); - - for (;;) { - if (!BN_generate_prime_ex(ap->prime, prime_bits, 0, NULL, NULL, cb)) { + if (p != NULL) { + // If |p| and |out| are too close, try again (step 5.4). + if (!BN_sub(tmp, out, p)) { goto err; } - if (BN_cmp(rsa->p, ap->prime) == 0 || - BN_cmp(rsa->q, ap->prime) == 0) { + BN_set_negative(tmp, 0); + if (!rsa_greater_than_pow2(tmp, bits - 100)) { continue; } + } - for (j = 0; j < i - 2; j++) { - if (BN_cmp(sk_RSA_additional_prime_value(additional_primes, j)->prime, - ap->prime) == 0) { - break; - } - } - if (j != i - 2) { - continue; - } - - if (!BN_sub(r2, ap->prime, BN_value_one()) || - !BN_gcd(r1, r2, rsa->e, ctx)) { - goto err; - } - - if (!BN_is_one(r1)) { - continue; - } - if (i != num_primes - 1) { - break; - } + // If out < 2^(bits-1)×√2, try again (steps 4.4 and 5.5). + // + // We check the most significant words, so we retry if ⌊out/2^k⌋ <= ⌊b/2^k⌋, + // where b = 2^(bits-1)×√2 and k = max(0, bits - 1536). For key sizes up to + // 3072 (bits = 1536), k = 0, so we are testing that ⌊out⌋ <= ⌊b⌋. out is an + // integer and b is not, so this is equivalent to out < b. That is, the + // comparison is exact for FIPS key sizes. + // + // For larger keys, the comparison is approximate, leaning towards + // retrying. That is, we reject a negligible fraction of primes that are + // within the FIPS bound, but we will never accept a prime outside the + // bound, ensuring the resulting RSA key is the right size. Specifically, if + // the FIPS bound holds, we have ⌊out/2^k⌋ < out/2^k < b/2^k. This implies + // ⌊out/2^k⌋ <= ⌊b/2^k⌋. That is, the FIPS bound implies our bound and so we + // are slightly tighter. + size_t out_len = (size_t)out->top; + assert(out_len == (size_t)bits / BN_BITS2); + size_t to_check = kBoringSSLRSASqrtTwoLen; + if (to_check > out_len) { + to_check = out_len; + } + if (!bn_less_than_words( + kBoringSSLRSASqrtTwo + kBoringSSLRSASqrtTwoLen - to_check, + out->d + out_len - to_check, to_check)) { + continue; + } - /* For the last prime we'll check that it makes n large enough. In the - * two prime case this isn't a problem because we generate primes with - * the top two bits set and so the product is always of the expected - * size. In the multi prime case, this doesn't follow. */ - if (!BN_mul(r1, rsa->n, ap->prime, ctx)) { + // Check gcd(out-1, e) is one (steps 4.5 and 5.6). + if (!BN_sub(tmp, out, BN_value_one()) || + !BN_gcd(tmp, tmp, e, ctx)) { + goto err; + } + if (BN_is_one(tmp)) { + // Test |out| for primality (steps 4.5.1 and 5.6.1). + int is_probable_prime; + if (!BN_primality_test(&is_probable_prime, out, BN_prime_checks, ctx, 1, + cb)) { goto err; } - if (BN_num_bits(r1) == (unsigned) bits) { - break; - } - - if (!BN_GENCB_call(cb, 2, n++)) { + if (is_probable_prime) { + ret = 1; goto err; } } - /* ap->r is is the product of all the primes prior to the current one - * (including p and q). */ - if (!BN_copy(ap->r, rsa->n)) { + // If we've tried too many times to find a prime, abort (steps 4.7 and + // 5.8). + tries++; + if (tries >= limit) { + OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_MANY_ITERATIONS); goto err; } - if (i == num_primes - 1) { - /* In the case of the last prime, we calculated n as |r1| in the loop - * above. */ - if (!BN_copy(rsa->n, r1)) { - goto err; - } - } else if (!BN_mul(rsa->n, rsa->n, ap->prime, ctx)) { - goto err; - } - - if (!BN_GENCB_call(cb, 3, 1)) { + if (!BN_GENCB_call(cb, 2, tries)) { goto err; } } - if (BN_cmp(rsa->p, rsa->q) < 0) { - tmp = rsa->p; - rsa->p = rsa->q; - rsa->q = tmp; - } +err: + BN_CTX_end(ctx); + return ret; +} + +int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { + // See FIPS 186-4 appendix B.3. This function implements a generalized version + // of the FIPS algorithm. |RSA_generate_key_fips| performs additional checks + // for FIPS-compliant key generation. - /* calculate d */ - if (!BN_sub(r1, rsa->p, BN_value_one())) { - goto err; /* p-1 */ + // Always generate RSA keys which are a multiple of 128 bits. Round |bits| + // down as needed. + bits &= ~127; + + // Reject excessively small keys. + if (bits < 256) { + OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL); + return 0; } - if (!BN_sub(r2, rsa->q, BN_value_one())) { - goto err; /* q-1 */ + + int ret = 0; + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + goto bn_err; } - if (!BN_mul(r0, r1, r2, ctx)) { - goto err; /* (p-1)(q-1) */ + BN_CTX_start(ctx); + BIGNUM *totient = BN_CTX_get(ctx); + BIGNUM *pm1 = BN_CTX_get(ctx); + BIGNUM *qm1 = BN_CTX_get(ctx); + BIGNUM *gcd = BN_CTX_get(ctx); + if (totient == NULL || pm1 == NULL || qm1 == NULL || gcd == NULL) { + goto bn_err; + } + + // We need the RSA components non-NULL. + if (!ensure_bignum(&rsa->n) || + !ensure_bignum(&rsa->d) || + !ensure_bignum(&rsa->e) || + !ensure_bignum(&rsa->p) || + !ensure_bignum(&rsa->q) || + !ensure_bignum(&rsa->dmp1) || + !ensure_bignum(&rsa->dmq1) || + !ensure_bignum(&rsa->iqmp)) { + goto bn_err; } - for (i = 2; i < num_primes; i++) { - RSA_additional_prime *ap = - sk_RSA_additional_prime_value(additional_primes, i - 2); - if (!BN_sub(r3, ap->prime, BN_value_one()) || - !BN_mul(r0, r0, r3, ctx)) { - goto err; + + if (!BN_copy(rsa->e, e_value)) { + goto bn_err; + } + + int prime_bits = bits / 2; + do { + // Generate p and q, each of size |prime_bits|, using the steps outlined in + // appendix FIPS 186-4 appendix B.3.3. + if (!generate_prime(rsa->p, prime_bits, rsa->e, NULL, ctx, cb) || + !BN_GENCB_call(cb, 3, 0) || + !generate_prime(rsa->q, prime_bits, rsa->e, rsa->p, ctx, cb) || + !BN_GENCB_call(cb, 3, 1)) { + goto bn_err; } - } - if (!BN_mod_inverse(rsa->d, rsa->e, r0, ctx)) { - goto err; /* d */ - } - /* calculate d mod (p-1) */ - if (!BN_mod(rsa->dmp1, rsa->d, r1, ctx)) { - goto err; + if (BN_cmp(rsa->p, rsa->q) < 0) { + BIGNUM *tmp = rsa->p; + rsa->p = rsa->q; + rsa->q = tmp; + } + + // Calculate d = e^(-1) (mod lcm(p-1, q-1)), per FIPS 186-4. This differs + // from typical RSA implementations which use (p-1)*(q-1). + // + // Note this means the size of d might reveal information about p-1 and + // q-1. However, we do operations with Chinese Remainder Theorem, so we only + // use d (mod p-1) and d (mod q-1) as exponents. Using a minimal totient + // does not affect those two values. + if (!BN_sub(pm1, rsa->p, BN_value_one()) || + !BN_sub(qm1, rsa->q, BN_value_one()) || + !BN_mul(totient, pm1, qm1, ctx) || + !BN_gcd(gcd, pm1, qm1, ctx) || + !BN_div(totient, NULL, totient, gcd, ctx) || + !BN_mod_inverse(rsa->d, rsa->e, totient, ctx)) { + goto bn_err; + } + + // Check that |rsa->d| > 2^|prime_bits| and try again if it fails. See + // appendix B.3.1's guidance on values for d. + } while (!rsa_greater_than_pow2(rsa->d, prime_bits)); + + if (// Calculate n. + !BN_mul(rsa->n, rsa->p, rsa->q, ctx) || + // Calculate d mod (p-1). + !BN_mod(rsa->dmp1, rsa->d, pm1, ctx) || + // Calculate d mod (q-1) + !BN_mod(rsa->dmq1, rsa->d, qm1, ctx)) { + goto bn_err; } - /* calculate d mod (q-1) */ - if (!BN_mod(rsa->dmq1, rsa->d, r2, ctx)) { + // Sanity-check that |rsa->n| has the specified size. This is implied by + // |generate_prime|'s bounds. + if (BN_num_bits(rsa->n) != (unsigned)bits) { + OPENSSL_PUT_ERROR(RSA, ERR_R_INTERNAL_ERROR); goto err; } - /* Calculate inverse of q mod p. Note that although RSA key generation is far - * from constant-time, |bn_mod_inverse_secret_prime| uses the same modular - * exponentation logic as in RSA private key operations and, if the RSAZ-1024 - * code is enabled, will be optimized for common RSA prime sizes. */ + // Calculate inverse of q mod p. Note that although RSA key generation is far + // from constant-time, |bn_mod_inverse_secret_prime| uses the same modular + // exponentation logic as in RSA private key operations and, if the RSAZ-1024 + // code is enabled, will be optimized for common RSA prime sizes. if (!BN_MONT_CTX_set_locked(&rsa->mont_p, &rsa->lock, rsa->p, ctx) || !bn_mod_inverse_secret_prime(rsa->iqmp, rsa->q, rsa->p, ctx, rsa->mont_p)) { - goto err; - } - - for (i = 2; i < num_primes; i++) { - RSA_additional_prime *ap = - sk_RSA_additional_prime_value(additional_primes, i - 2); - if (!BN_sub(ap->exp, ap->prime, BN_value_one()) || - !BN_mod(ap->exp, rsa->d, ap->exp, ctx) || - !BN_MONT_CTX_set_locked(&ap->mont, &rsa->lock, ap->prime, ctx) || - !bn_mod_inverse_secret_prime(ap->coeff, ap->r, ap->prime, ctx, - ap->mont)) { - goto err; - } + goto bn_err; } - rsa->additional_primes = additional_primes; - additional_primes = NULL; - - /* The key generation process is complex and thus error-prone. It could be - * disastrous to generate and then use a bad key so double-check that the key - * makes sense. */ - ok = RSA_check_key(rsa); - if (!ok) { + // The key generation process is complex and thus error-prone. It could be + // disastrous to generate and then use a bad key so double-check that the key + // makes sense. + if (!RSA_check_key(rsa)) { OPENSSL_PUT_ERROR(RSA, RSA_R_INTERNAL_ERROR); + goto err; } -err: - if (ok == -1) { + ret = 1; + +bn_err: + if (!ret) { OPENSSL_PUT_ERROR(RSA, ERR_LIB_BN); - ok = 0; } +err: if (ctx != NULL) { BN_CTX_end(ctx); BN_CTX_free(ctx); } - sk_RSA_additional_prime_pop_free(additional_primes, - RSA_additional_prime_free); - return ok; -} - -int rsa_default_keygen(RSA *rsa, int bits, BIGNUM *e_value, BN_GENCB *cb) { - return rsa_default_multi_prime_keygen(rsa, bits, 2 /* num primes */, e_value, - cb); + return ret; } -/* All of the methods are NULL to make it easier for the compiler/linker to drop - * unused functions. The wrapper functions will select the appropriate - * |rsa_default_*| implementation. */ -const RSA_METHOD RSA_default_method = { - { - 0 /* references */, - 1 /* is_static */, - }, - NULL /* app_data */, - - NULL /* init */, - NULL /* finish (defaults to rsa_default_finish) */, - - NULL /* size (defaults to rsa_default_size) */, - - NULL /* sign */, - NULL /* verify */, - - NULL /* encrypt (defaults to rsa_default_encrypt) */, - NULL /* sign_raw (defaults to rsa_default_sign_raw) */, - NULL /* decrypt (defaults to rsa_default_decrypt) */, - NULL /* verify_raw (defaults to rsa_default_verify_raw) */, - - NULL /* private_transform (defaults to rsa_default_private_transform) */, - - NULL /* mod_exp (ignored) */, - NULL /* bn_mod_exp (ignored) */, - - RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE, +int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb) { + // FIPS 186-4 allows 2048-bit and 3072-bit RSA keys (1024-bit and 1536-bit + // primes, respectively) with the prime generation method we use. + if (bits != 2048 && bits != 3072) { + OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS); + return 0; + } - NULL /* keygen (defaults to rsa_default_keygen) */, - NULL /* multi_prime_keygen (defaults to rsa_default_multi_prime_keygen) */, + BIGNUM *e = BN_new(); + int ret = e != NULL && + BN_set_word(e, RSA_F4) && + RSA_generate_key_ex(rsa, bits, e, cb) && + RSA_check_fips(rsa); + BN_free(e); + return ret; +} - NULL /* supports_digest */, -}; +DEFINE_METHOD_FUNCTION(RSA_METHOD, RSA_default_method) { + // All of the methods are NULL to make it easier for the compiler/linker to + // drop unused functions. The wrapper functions will select the appropriate + // |rsa_default_*| implementation. + OPENSSL_memset(out, 0, sizeof(RSA_METHOD)); + out->common.is_static = 1; + out->flags = RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE; +} diff --git a/Sources/BoringSSL/crypto/sha/sha1-altivec.c b/Sources/BoringSSL/crypto/fipsmodule/sha/sha1-altivec.c similarity index 85% rename from Sources/BoringSSL/crypto/sha/sha1-altivec.c rename to Sources/BoringSSL/crypto/fipsmodule/sha/sha1-altivec.c index 500986e14..3152827a0 100644 --- a/Sources/BoringSSL/crypto/sha/sha1-altivec.c +++ b/Sources/BoringSSL/crypto/fipsmodule/sha/sha1-altivec.c @@ -54,14 +54,14 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -/* Altivec-optimized SHA1 in C. This is tested on ppc64le only. - * - * References: - * https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 - * http://arctic.org/~dean/crypto/sha1.html - * - * This code used the generic SHA-1 from OpenSSL as a basis and AltiVec - * optimisations were added on top. */ +// Altivec-optimized SHA1 in C. This is tested on ppc64le only. +// +// References: +// https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 +// http://arctic.org/~dean/crypto/sha1.html +// +// This code used the generic SHA-1 from OpenSSL as a basis and AltiVec +// optimisations were added on top. #include @@ -76,11 +76,11 @@ static uint32_t rotate(uint32_t a, int n) { return (a << n) | (a >> (32 - n)); } typedef vector unsigned int vec_uint32_t; typedef vector unsigned char vec_uint8_t; -/* Vector constants */ +// Vector constants static const vec_uint8_t k_swap_endianness = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}; -/* Shift amounts for byte and bit shifts and rotations */ +// Shift amounts for byte and bit shifts and rotations static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}; static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96, @@ -91,37 +91,39 @@ static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96, #define K_40_59 0x8f1bbcdcUL #define K_60_79 0xca62c1d6UL -/* Vector versions of the above. */ +// Vector versions of the above. static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19}; static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39}; static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59}; static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79}; -/* vector message scheduling: compute message schedule for round i..i+3 where i - * is divisible by 4. We return the schedule w[i..i+3] as a vector. In - * addition, we also precompute sum w[i..+3] and an additive constant K. This - * is done to offload some computation of f() in the integer execution units. - * - * Byte shifting code below may not be correct for big-endian systems. */ +// vector message scheduling: compute message schedule for round i..i+3 where i +// is divisible by 4. We return the schedule w[i..i+3] as a vector. In +// addition, we also precompute sum w[i..+3] and an additive constant K. This +// is done to offload some computation of f() in the integer execution units. +// +// Byte shifting code below may not be correct for big-endian systems. static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data, vec_uint32_t k) { - const vec_uint32_t v = *((const vec_uint32_t *)data); + const vector unsigned char unaligned_data = + vec_vsx_ld(0, (const unsigned char*) data); + const vec_uint32_t v = (vec_uint32_t) unaligned_data; const vec_uint32_t w = vec_perm(v, v, k_swap_endianness); vec_st(w + k, 0, pre_added); return w; } -/* Compute w[i..i+3] using these steps for i in [16, 20, 24, 28] - * - * w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1 - * w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1 - * w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1 - * w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1 - * - * w[ i] = w'[ i] - * w[i+1] = w'[i+1] - * w[i+2] = w'[i+2] - * w[i+3] = w'[i+3] ^ (w'[i] <<< 1) */ +// Compute w[i..i+3] using these steps for i in [16, 20, 24, 28] +// +// w'[i ] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1 +// w'[i+1] = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1 +// w'[i+2] = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1 +// w'[i+3] = ( 0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1 +// +// w[ i] = w'[ i] +// w[i+1] = w'[i+1] +// w[i+2] = w'[i+2] +// w[i+3] = w'[i+3] ^ (w'[i] <<< 1) static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4, vec_uint32_t minus_8, vec_uint32_t minus_12, vec_uint32_t minus_16, vec_uint32_t k) { @@ -136,8 +138,8 @@ static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4, return w; } -/* Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76] - * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 */ +// Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76] +// w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4, vec_uint32_t minus_8, vec_uint32_t minus_16, vec_uint32_t minus_28, vec_uint32_t minus_32, @@ -150,17 +152,17 @@ static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4, return w; } -/* As pointed out by Wei Dai , F() below can be simplified - * to the code in F_00_19. Wei attributes these optimisations to Peter - * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define - * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another - * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */ +// As pointed out by Wei Dai , F() below can be simplified +// to the code in F_00_19. Wei attributes these optimisations to Peter +// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define +// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another +// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) #define F_60_79(b, c, d) F_20_39(b, c, d) -/* We pre-added the K constants during message scheduling. */ +// We pre-added the K constants during message scheduling. #define BODY_00_19(i, a, b, c, d, e, f) \ do { \ (f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \ @@ -316,7 +318,7 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { BODY_60_79(74, E, T, A, B, C, D); BODY_60_79(75, D, E, T, A, B, C); - /* We don't use the last value */ + // We don't use the last value (void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k); BODY_60_79(76, C, D, E, T, A, B); BODY_60_79(77, B, C, D, E, T, A); @@ -343,4 +345,17 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) { } } -#endif /* OPENSSL_PPC64LE */ +#endif // OPENSSL_PPC64LE + +#undef K_00_19 +#undef K_20_39 +#undef K_40_59 +#undef K_60_79 +#undef F_00_19 +#undef F_20_39 +#undef F_40_59 +#undef F_60_79 +#undef BODY_00_19 +#undef BODY_20_39 +#undef BODY_40_59 +#undef BODY_60_79 diff --git a/Sources/BoringSSL/crypto/sha/sha1.c b/Sources/BoringSSL/crypto/fipsmodule/sha/sha1.c similarity index 92% rename from Sources/BoringSSL/crypto/sha/sha1.c rename to Sources/BoringSSL/crypto/fipsmodule/sha/sha1.c index 7c7271322..e5b4ba622 100644 --- a/Sources/BoringSSL/crypto/sha/sha1.c +++ b/Sources/BoringSSL/crypto/fipsmodule/sha/sha1.c @@ -60,13 +60,13 @@ #include -#include "../internal.h" +#include "../../internal.h" -#if !defined(OPENSSL_NO_ASM) && \ - (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ - defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \ - defined(OPENSSL_PPC64LE)) +#if (!defined(OPENSSL_NO_ASM) && \ + (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ + defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))) || \ + defined(OPENSSL_PPC64LE) #define SHA1_ASM #endif @@ -82,15 +82,7 @@ int SHA1_Init(SHA_CTX *sha) { uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) { SHA_CTX ctx; - static uint8_t buf[SHA_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = buf; - } - if (!SHA1_Init(&ctx)) { - return NULL; - } + SHA1_Init(&ctx); SHA1_Update(&ctx, data, len); SHA1_Final(out, &ctx); OPENSSL_cleanse(&ctx, sizeof(ctx)); @@ -139,11 +131,11 @@ void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num); #define K_40_59 0x8f1bbcdcUL #define K_60_79 0xca62c1d6UL -/* As pointed out by Wei Dai , F() below can be simplified - * to the code in F_00_19. Wei attributes these optimisations to Peter - * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define - * F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another - * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */ +// As pointed out by Wei Dai , F() below can be simplified +// to the code in F_00_19. Wei attributes these optimisations to Peter +// Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define +// F(x,y,z) (((x) & (y)) | ((~(x)) & (z))) I've just become aware of another +// tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a #define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) #define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) #define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d))) @@ -353,3 +345,31 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data, } } #endif + +#undef DATA_ORDER_IS_BIG_ENDIAN +#undef HASH_CTX +#undef HASH_CBLOCK +#undef HASH_MAKE_STRING +#undef HASH_UPDATE +#undef HASH_TRANSFORM +#undef HASH_FINAL +#undef HASH_BLOCK_DATA_ORDER +#undef ROTATE +#undef Xupdate +#undef K_00_19 +#undef K_20_39 +#undef K_40_59 +#undef K_60_79 +#undef F_00_19 +#undef F_20_39 +#undef F_40_59 +#undef F_60_79 +#undef BODY_00_15 +#undef BODY_16_19 +#undef BODY_20_31 +#undef BODY_32_39 +#undef BODY_40_59 +#undef BODY_60_79 +#undef X +#undef HOST_c2l +#undef HOST_l2c diff --git a/Sources/BoringSSL/crypto/sha/sha256.c b/Sources/BoringSSL/crypto/fipsmodule/sha/sha256.c similarity index 91% rename from Sources/BoringSSL/crypto/sha/sha256.c rename to Sources/BoringSSL/crypto/fipsmodule/sha/sha256.c index fb950d75f..6d709a67b 100644 --- a/Sources/BoringSSL/crypto/sha/sha256.c +++ b/Sources/BoringSSL/crypto/fipsmodule/sha/sha256.c @@ -60,7 +60,7 @@ #include -#include "../internal.h" +#include "../../internal.h" #if !defined(OPENSSL_NO_ASM) && \ @@ -99,12 +99,6 @@ int SHA256_Init(SHA256_CTX *sha) { uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out) { SHA256_CTX ctx; - static uint8_t buf[SHA224_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = buf; - } SHA224_Init(&ctx); SHA224_Update(&ctx, data, len); SHA224_Final(out, &ctx); @@ -114,12 +108,6 @@ uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out) { uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t *out) { SHA256_CTX ctx; - static uint8_t buf[SHA256_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = buf; - } SHA256_Init(&ctx); SHA256_Update(&ctx, data, len); SHA256_Final(out, &ctx); @@ -140,15 +128,15 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) { #define HASH_CTX SHA256_CTX #define HASH_CBLOCK 64 -/* Note that FIPS180-2 discusses "Truncation of the Hash Function Output." - * default: case below covers for it. It's not clear however if it's permitted - * to truncate to amount of bytes not divisible by 4. I bet not, but if it is, - * then default: case shall be extended. For reference. Idea behind separate - * cases for pre-defined lenghts is to let the compiler decide if it's - * appropriate to unroll small loops. - * - * TODO(davidben): The small |md_len| case is one of the few places a low-level - * hash 'final' function can fail. This should never happen. */ +// Note that FIPS180-2 discusses "Truncation of the Hash Function Output." +// default: case below covers for it. It's not clear however if it's permitted +// to truncate to amount of bytes not divisible by 4. I bet not, but if it is, +// then default: case shall be extended. For reference. Idea behind separate +// cases for pre-defined lenghts is to let the compiler decide if it's +// appropriate to unroll small loops. +// +// TODO(davidben): The small |md_len| case is one of the few places a low-level +// hash 'final' function can fail. This should never happen. #define HASH_MAKE_STRING(c, s) \ do { \ uint32_t ll; \ @@ -208,9 +196,9 @@ static const uint32_t K256[64] = { #define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n)))) -/* FIPS specification refers to right rotations, while our ROTATE macro - * is left one. This is why you might notice that rotation coefficients - * differ from those observed in FIPS document by 32-N... */ +// FIPS specification refers to right rotations, while our ROTATE macro +// is left one. This is why you might notice that rotation coefficients +// differ from those observed in FIPS document by 32-N... #define Sigma0(x) (ROTATE((x), 30) ^ ROTATE((x), 19) ^ ROTATE((x), 10)) #define Sigma1(x) (ROTATE((x), 26) ^ ROTATE((x), 21) ^ ROTATE((x), 7)) #define sigma0(x) (ROTATE((x), 25) ^ ROTATE((x), 14) ^ ((x) >> 3)) @@ -326,4 +314,24 @@ static void sha256_block_data_order(uint32_t *state, const uint8_t *data, } } -#endif /* SHA256_ASM */ +#endif // !SHA256_ASM + +#undef DATA_ORDER_IS_BIG_ENDIAN +#undef HASH_CTX +#undef HASH_CBLOCK +#undef HASH_MAKE_STRING +#undef HASH_UPDATE +#undef HASH_TRANSFORM +#undef HASH_FINAL +#undef HASH_BLOCK_DATA_ORDER +#undef ROTATE +#undef Sigma0 +#undef Sigma1 +#undef sigma0 +#undef sigma1 +#undef Ch +#undef Maj +#undef ROUND_00_15 +#undef ROUND_16_63 +#undef HOST_c2l +#undef HOST_l2c diff --git a/Sources/BoringSSL/crypto/sha/sha512.c b/Sources/BoringSSL/crypto/fipsmodule/sha/sha512.c similarity index 93% rename from Sources/BoringSSL/crypto/sha/sha512.c rename to Sources/BoringSSL/crypto/fipsmodule/sha/sha512.c index 876115082..3902f50e5 100644 --- a/Sources/BoringSSL/crypto/sha/sha512.c +++ b/Sources/BoringSSL/crypto/fipsmodule/sha/sha512.c @@ -60,20 +60,20 @@ #include -#include "../internal.h" +#include "../../internal.h" -/* IMPLEMENTATION NOTES. - * - * The 32-bit hash algorithms share a common byte-order neutral collector and - * padding function implementations that operate on unaligned data, - * ../md32_common.h. This SHA-512 implementation does not. Reasons - * [in reverse order] are: - * - * - It's the only 64-bit hash algorithm for the moment of this writing, - * there is no need for common collector/padding implementation [yet]; - * - By supporting only a transform function that operates on *aligned* data - * the collector/padding function is simpler and easier to optimize. */ +// IMPLEMENTATION NOTES. +// +// The 32-bit hash algorithms share a common byte-order neutral collector and +// padding function implementations that operate on unaligned data, +// ../md32_common.h. This SHA-512 implementation does not. Reasons +// [in reverse order] are: +// +// - It's the only 64-bit hash algorithm for the moment of this writing, +// there is no need for common collector/padding implementation [yet]; +// - By supporting only a transform function that operates on *aligned* data +// the collector/padding function is simpler and easier to optimize. #if !defined(OPENSSL_NO_ASM) && \ (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \ @@ -123,13 +123,6 @@ int SHA512_Init(SHA512_CTX *sha) { uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out) { SHA512_CTX ctx; - static uint8_t buf[SHA384_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = buf; - } - SHA384_Init(&ctx); SHA384_Update(&ctx, data, len); SHA384_Final(out, &ctx); @@ -139,12 +132,6 @@ uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out) { uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out) { SHA512_CTX ctx; - static uint8_t buf[SHA512_DIGEST_LENGTH]; - - /* TODO(fork): remove this static buffer. */ - if (out == NULL) { - out = buf; - } SHA512_Init(&ctx); SHA512_Update(&ctx, data, len); SHA512_Final(out, &ctx); @@ -240,7 +227,7 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { uint8_t *p = (uint8_t *)sha->u.p; size_t n = sha->num; - p[n] = 0x80; /* There always is a room for one */ + p[n] = 0x80; // There always is a room for one n++; if (n > (sizeof(sha->u) - 16)) { OPENSSL_memset(p + n, 0, sizeof(sha->u) - n); @@ -269,13 +256,13 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { sha512_block_data_order(sha->h, (uint64_t *)p, 1); if (md == NULL) { - /* TODO(davidben): This NULL check is absent in other low-level hash 'final' - * functions and is one of the few places one can fail. */ + // TODO(davidben): This NULL check is absent in other low-level hash 'final' + // functions and is one of the few places one can fail. return 0; } switch (sha->md_len) { - /* Let compiler decide if it's appropriate to unroll... */ + // Let compiler decide if it's appropriate to unroll... case SHA384_DIGEST_LENGTH: for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) { uint64_t t = sha->h[n]; @@ -304,10 +291,10 @@ int SHA512_Final(uint8_t *md, SHA512_CTX *sha) { *(md++) = (uint8_t)(t); } break; - /* ... as well as make sure md_len is not abused. */ + // ... as well as make sure md_len is not abused. default: - /* TODO(davidben): This bad |md_len| case is one of the few places a - * low-level hash 'final' function can fail. This should never happen. */ + // TODO(davidben): This bad |md_len| case is one of the few places a + // low-level hash 'final' function can fail. This should never happen. return 0; } @@ -405,7 +392,7 @@ static const uint64_t K512[80] = { #endif #endif #elif defined(_MSC_VER) -#if defined(_WIN64) /* applies to both IA-64 and AMD64 */ +#if defined(_WIN64) // applies to both IA-64 and AMD64 #pragma intrinsic(_rotr64) #define ROTR(a, n) _rotr64((a), n) #endif @@ -445,10 +432,8 @@ static uint64_t __fastcall __pull64be(const void *x) { #if defined(__i386) || defined(__i386__) || defined(_M_IX86) -/* - * This code should give better results on 32-bit CPU with less than - * ~24 registers, both size and performance wise... - */ +// This code should give better results on 32-bit CPU with less than +// ~24 registers, both size and performance wise... static void sha512_block_data_order(uint64_t *state, const uint64_t *W, size_t num) { uint64_t A, E, T; @@ -606,4 +591,18 @@ static void sha512_block_data_order(uint64_t *state, const uint64_t *W, #endif -#endif /* SHA512_ASM */ +#endif // !SHA512_ASM + +#undef ROTR +#undef PULL64 +#undef B +#undef Sigma0 +#undef Sigma1 +#undef sigma0 +#undef sigma1 +#undef Ch +#undef Maj +#undef ROUND_00_15 +#undef ROUND_16_80 +#undef HOST_c2l +#undef HOST_l2c diff --git a/Sources/BoringSSL/crypto/hkdf/hkdf.c b/Sources/BoringSSL/crypto/hkdf/hkdf.c index ae43b69fe..23b60afe0 100644 --- a/Sources/BoringSSL/crypto/hkdf/hkdf.c +++ b/Sources/BoringSSL/crypto/hkdf/hkdf.c @@ -26,7 +26,7 @@ int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len, const uint8_t *info, size_t info_len) { - /* https://tools.ietf.org/html/rfc5869#section-2 */ + // https://tools.ietf.org/html/rfc5869#section-2 uint8_t prk[EVP_MAX_MD_SIZE]; size_t prk_len; @@ -42,10 +42,10 @@ int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len) { - /* https://tools.ietf.org/html/rfc5869#section-2.2 */ + // https://tools.ietf.org/html/rfc5869#section-2.2 - /* If salt is not given, HashLength zeros are used. However, HMAC does that - * internally already so we can ignore it.*/ + // If salt is not given, HashLength zeros are used. However, HMAC does that + // internally already so we can ignore it. unsigned len; if (HMAC(digest, salt, salt_len, secret, secret_len, out_key, &len) == NULL) { OPENSSL_PUT_ERROR(HKDF, ERR_R_HMAC_LIB); @@ -59,7 +59,7 @@ int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *prk, size_t prk_len, const uint8_t *info, size_t info_len) { - /* https://tools.ietf.org/html/rfc5869#section-2.3 */ + // https://tools.ietf.org/html/rfc5869#section-2.3 const size_t digest_len = EVP_MD_size(digest); uint8_t previous[EVP_MAX_MD_SIZE]; size_t n, done = 0; @@ -67,7 +67,7 @@ int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, int ret = 0; HMAC_CTX hmac; - /* Expand key material to desired length. */ + // Expand key material to desired length. n = (out_len + digest_len - 1) / digest_len; if (out_len + digest_len < out_len || n > 255) { OPENSSL_PUT_ERROR(HKDF, HKDF_R_OUTPUT_TOO_LARGE); diff --git a/Sources/BoringSSL/crypto/internal.h b/Sources/BoringSSL/crypto/internal.h index 272495676..76d39b742 100644 --- a/Sources/BoringSSL/crypto/internal.h +++ b/Sources/BoringSSL/crypto/internal.h @@ -110,18 +110,20 @@ #define OPENSSL_HEADER_CRYPTO_INTERNAL_H #include +#include #include +#include #include +#if !defined(__cplusplus) #if defined(_MSC_VER) -#if !defined(__cplusplus) || _MSC_VER < 1900 #define alignas(x) __declspec(align(x)) #define alignof __alignof -#endif #else #include #endif +#endif #if !defined(OPENSSL_NO_THREADS) && \ (!defined(OPENSSL_WINDOWS) || defined(__MINGW32__)) @@ -144,7 +146,7 @@ extern "C" { #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM) || \ defined(OPENSSL_AARCH64) || defined(OPENSSL_PPC64LE) -/* OPENSSL_cpuid_setup initializes the platform-specific feature cache. */ +// OPENSSL_cpuid_setup initializes the platform-specific feature cache. void OPENSSL_cpuid_setup(void); #endif @@ -156,160 +158,190 @@ typedef __uint128_t uint128_t; #define OPENSSL_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) -/* buffers_alias returns one if |a| and |b| alias and zero otherwise. */ +// buffers_alias returns one if |a| and |b| alias and zero otherwise. static inline int buffers_alias(const uint8_t *a, size_t a_len, const uint8_t *b, size_t b_len) { - /* Cast |a| and |b| to integers. In C, pointer comparisons between unrelated - * objects are undefined whereas pointer to integer conversions are merely - * implementation-defined. We assume the implementation defined it in a sane - * way. */ + // Cast |a| and |b| to integers. In C, pointer comparisons between unrelated + // objects are undefined whereas pointer to integer conversions are merely + // implementation-defined. We assume the implementation defined it in a sane + // way. uintptr_t a_u = (uintptr_t)a; uintptr_t b_u = (uintptr_t)b; return a_u + a_len > b_u && b_u + b_len > a_u; } -/* Constant-time utility functions. - * - * The following methods return a bitmask of all ones (0xff...f) for true and 0 - * for false. This is useful for choosing a value based on the result of a - * conditional in constant time. For example, - * - * if (a < b) { - * c = a; - * } else { - * c = b; - * } - * - * can be written as - * - * unsigned int lt = constant_time_lt(a, b); - * c = constant_time_select(lt, a, b); */ +// Constant-time utility functions. +// +// The following methods return a bitmask of all ones (0xff...f) for true and 0 +// for false. This is useful for choosing a value based on the result of a +// conditional in constant time. For example, +// +// if (a < b) { +// c = a; +// } else { +// c = b; +// } +// +// can be written as +// +// crypto_word_t lt = constant_time_lt_w(a, b); +// c = constant_time_select_w(lt, a, b); + +// crypto_word_t is the type that most constant-time functions use. Ideally we +// would like it to be |size_t|, but NaCl builds in 64-bit mode with 32-bit +// pointers, which means that |size_t| can be 32 bits when |BN_ULONG| is 64 +// bits. Since we want to be able to do constant-time operations on a +// |BN_ULONG|, |crypto_word_t| is defined as an unsigned value with the native +// word length. +#if defined(OPENSSL_64_BIT) +typedef uint64_t crypto_word_t; +#elif defined(OPENSSL_32_BIT) +typedef uint32_t crypto_word_t; +#else +#error "Must define either OPENSSL_32_BIT or OPENSSL_64_BIT" +#endif + +#define CONSTTIME_TRUE_W ~((crypto_word_t)0) +#define CONSTTIME_FALSE_W ((crypto_word_t)0) +#define CONSTTIME_TRUE_8 ((uint8_t)0xff) -/* constant_time_msb returns the given value with the MSB copied to all the - * other bits. */ -static inline unsigned int constant_time_msb(unsigned int a) { - return (unsigned int)((int)(a) >> (sizeof(int) * 8 - 1)); +#define CONSTTIME_TRUE_W ~((crypto_word_t)0) +#define CONSTTIME_FALSE_W ((crypto_word_t)0) +#define CONSTTIME_TRUE_8 ((uint8_t)0xff) +#define CONSTTIME_FALSE_8 ((uint8_t)0) + +// constant_time_msb_w returns the given value with the MSB copied to all the +// other bits. +static inline crypto_word_t constant_time_msb_w(crypto_word_t a) { + return 0u - (a >> (sizeof(a) * 8 - 1)); } -/* constant_time_lt returns 0xff..f if a < b and 0 otherwise. */ -static inline unsigned int constant_time_lt(unsigned int a, unsigned int b) { - /* Consider the two cases of the problem: - * msb(a) == msb(b): a < b iff the MSB of a - b is set. - * msb(a) != msb(b): a < b iff the MSB of b is set. - * - * If msb(a) == msb(b) then the following evaluates as: - * msb(a^((a^b)|((a-b)^a))) == - * msb(a^((a-b) ^ a)) == (because msb(a^b) == 0) - * msb(a^a^(a-b)) == (rearranging) - * msb(a-b) (because ∀x. x^x == 0) - * - * Else, if msb(a) != msb(b) then the following evaluates as: - * msb(a^((a^b)|((a-b)^a))) == - * msb(a^(𝟙 | ((a-b)^a))) == (because msb(a^b) == 1 and 𝟙 - * represents a value s.t. msb(𝟙) = 1) - * msb(a^𝟙) == (because ORing with 1 results in 1) - * msb(b) - * - * - * Here is an SMT-LIB verification of this formula: - * - * (define-fun lt ((a (_ BitVec 32)) (b (_ BitVec 32))) (_ BitVec 32) - * (bvxor a (bvor (bvxor a b) (bvxor (bvsub a b) a))) - * ) - * - * (declare-fun a () (_ BitVec 32)) - * (declare-fun b () (_ BitVec 32)) - * - * (assert (not (= (= #x00000001 (bvlshr (lt a b) #x0000001f)) (bvult a b)))) - * (check-sat) - * (get-model) - */ - return constant_time_msb(a^((a^b)|((a-b)^a))); +// constant_time_lt_w returns 0xff..f if a < b and 0 otherwise. +static inline crypto_word_t constant_time_lt_w(crypto_word_t a, + crypto_word_t b) { + // Consider the two cases of the problem: + // msb(a) == msb(b): a < b iff the MSB of a - b is set. + // msb(a) != msb(b): a < b iff the MSB of b is set. + // + // If msb(a) == msb(b) then the following evaluates as: + // msb(a^((a^b)|((a-b)^a))) == + // msb(a^((a-b) ^ a)) == (because msb(a^b) == 0) + // msb(a^a^(a-b)) == (rearranging) + // msb(a-b) (because ∀x. x^x == 0) + // + // Else, if msb(a) != msb(b) then the following evaluates as: + // msb(a^((a^b)|((a-b)^a))) == + // msb(a^(𝟙 | ((a-b)^a))) == (because msb(a^b) == 1 and 𝟙 + // represents a value s.t. msb(𝟙) = 1) + // msb(a^𝟙) == (because ORing with 1 results in 1) + // msb(b) + // + // + // Here is an SMT-LIB verification of this formula: + // + // (define-fun lt ((a (_ BitVec 32)) (b (_ BitVec 32))) (_ BitVec 32) + // (bvxor a (bvor (bvxor a b) (bvxor (bvsub a b) a))) + // ) + // + // (declare-fun a () (_ BitVec 32)) + // (declare-fun b () (_ BitVec 32)) + // + // (assert (not (= (= #x00000001 (bvlshr (lt a b) #x0000001f)) (bvult a b)))) + // (check-sat) + // (get-model) + return constant_time_msb_w(a^((a^b)|((a-b)^a))); } -/* constant_time_lt_8 acts like |constant_time_lt| but returns an 8-bit mask. */ -static inline uint8_t constant_time_lt_8(unsigned int a, unsigned int b) { - return (uint8_t)(constant_time_lt(a, b)); +// constant_time_lt_8 acts like |constant_time_lt_w| but returns an 8-bit +// mask. +static inline uint8_t constant_time_lt_8(crypto_word_t a, crypto_word_t b) { + return (uint8_t)(constant_time_lt_w(a, b)); } -/* constant_time_gt returns 0xff..f if a >= b and 0 otherwise. */ -static inline unsigned int constant_time_ge(unsigned int a, unsigned int b) { - return ~constant_time_lt(a, b); +// constant_time_ge_w returns 0xff..f if a >= b and 0 otherwise. +static inline crypto_word_t constant_time_ge_w(crypto_word_t a, + crypto_word_t b) { + return ~constant_time_lt_w(a, b); } -/* constant_time_ge_8 acts like |constant_time_ge| but returns an 8-bit mask. */ -static inline uint8_t constant_time_ge_8(unsigned int a, unsigned int b) { - return (uint8_t)(constant_time_ge(a, b)); +// constant_time_ge_8 acts like |constant_time_ge_w| but returns an 8-bit +// mask. +static inline uint8_t constant_time_ge_8(crypto_word_t a, crypto_word_t b) { + return (uint8_t)(constant_time_ge_w(a, b)); } -/* constant_time_is_zero returns 0xff..f if a == 0 and 0 otherwise. */ -static inline unsigned int constant_time_is_zero(unsigned int a) { - /* Here is an SMT-LIB verification of this formula: - * - * (define-fun is_zero ((a (_ BitVec 32))) (_ BitVec 32) - * (bvand (bvnot a) (bvsub a #x00000001)) - * ) - * - * (declare-fun a () (_ BitVec 32)) - * - * (assert (not (= (= #x00000001 (bvlshr (is_zero a) #x0000001f)) (= a #x00000000)))) - * (check-sat) - * (get-model) - */ - return constant_time_msb(~a & (a - 1)); +// constant_time_is_zero returns 0xff..f if a == 0 and 0 otherwise. +static inline crypto_word_t constant_time_is_zero_w(crypto_word_t a) { + // Here is an SMT-LIB verification of this formula: + // + // (define-fun is_zero ((a (_ BitVec 32))) (_ BitVec 32) + // (bvand (bvnot a) (bvsub a #x00000001)) + // ) + // + // (declare-fun a () (_ BitVec 32)) + // + // (assert (not (= (= #x00000001 (bvlshr (is_zero a) #x0000001f)) (= a #x00000000)))) + // (check-sat) + // (get-model) + return constant_time_msb_w(~a & (a - 1)); } -/* constant_time_is_zero_8 acts like constant_time_is_zero but returns an 8-bit - * mask. */ -static inline uint8_t constant_time_is_zero_8(unsigned int a) { - return (uint8_t)(constant_time_is_zero(a)); +// constant_time_is_zero_8 acts like |constant_time_is_zero_w| but returns an +// 8-bit mask. +static inline uint8_t constant_time_is_zero_8(crypto_word_t a) { + return (uint8_t)(constant_time_is_zero_w(a)); } -/* constant_time_eq returns 0xff..f if a == b and 0 otherwise. */ -static inline unsigned int constant_time_eq(unsigned int a, unsigned int b) { - return constant_time_is_zero(a ^ b); +// constant_time_eq_w returns 0xff..f if a == b and 0 otherwise. +static inline crypto_word_t constant_time_eq_w(crypto_word_t a, + crypto_word_t b) { + return constant_time_is_zero_w(a ^ b); } -/* constant_time_eq_8 acts like |constant_time_eq| but returns an 8-bit mask. */ -static inline uint8_t constant_time_eq_8(unsigned int a, unsigned int b) { - return (uint8_t)(constant_time_eq(a, b)); +// constant_time_eq_8 acts like |constant_time_eq_w| but returns an 8-bit +// mask. +static inline uint8_t constant_time_eq_8(crypto_word_t a, crypto_word_t b) { + return (uint8_t)(constant_time_eq_w(a, b)); } -/* constant_time_eq_int acts like |constant_time_eq| but works on int values. */ -static inline unsigned int constant_time_eq_int(int a, int b) { - return constant_time_eq((unsigned)(a), (unsigned)(b)); +// constant_time_eq_int acts like |constant_time_eq_w| but works on int +// values. +static inline crypto_word_t constant_time_eq_int(int a, int b) { + return constant_time_eq_w((crypto_word_t)(a), (crypto_word_t)(b)); } -/* constant_time_eq_int_8 acts like |constant_time_eq_int| but returns an 8-bit - * mask. */ +// constant_time_eq_int_8 acts like |constant_time_eq_int| but returns an 8-bit +// mask. static inline uint8_t constant_time_eq_int_8(int a, int b) { - return constant_time_eq_8((unsigned)(a), (unsigned)(b)); + return constant_time_eq_8((crypto_word_t)(a), (crypto_word_t)(b)); } -/* constant_time_select returns (mask & a) | (~mask & b). When |mask| is all 1s - * or all 0s (as returned by the methods above), the select methods return - * either |a| (if |mask| is nonzero) or |b| (if |mask| is zero). */ -static inline unsigned int constant_time_select(unsigned int mask, - unsigned int a, unsigned int b) { +// constant_time_select_w returns (mask & a) | (~mask & b). When |mask| is all +// 1s or all 0s (as returned by the methods above), the select methods return +// either |a| (if |mask| is nonzero) or |b| (if |mask| is zero). +static inline crypto_word_t constant_time_select_w(crypto_word_t mask, + crypto_word_t a, + crypto_word_t b) { return (mask & a) | (~mask & b); } -/* constant_time_select_8 acts like |constant_time_select| but operates on - * 8-bit values. */ +// constant_time_select_8 acts like |constant_time_select| but operates on +// 8-bit values. static inline uint8_t constant_time_select_8(uint8_t mask, uint8_t a, uint8_t b) { - return (uint8_t)(constant_time_select(mask, a, b)); + return (uint8_t)(constant_time_select_w(mask, a, b)); } -/* constant_time_select_int acts like |constant_time_select| but operates on - * ints. */ -static inline int constant_time_select_int(unsigned int mask, int a, int b) { - return (int)(constant_time_select(mask, (unsigned)(a), (unsigned)(b))); +// constant_time_select_int acts like |constant_time_select| but operates on +// ints. +static inline int constant_time_select_int(crypto_word_t mask, int a, int b) { + return (int)(constant_time_select_w(mask, (crypto_word_t)(a), + (crypto_word_t)(b))); } -/* Thread-safe initialisation. */ +// Thread-safe initialisation. #if defined(OPENSSL_NO_THREADS) typedef uint32_t CRYPTO_once_t; @@ -324,52 +356,52 @@ typedef pthread_once_t CRYPTO_once_t; #error "Unknown threading library" #endif -/* CRYPTO_once calls |init| exactly once per process. This is thread-safe: if - * concurrent threads call |CRYPTO_once| with the same |CRYPTO_once_t| argument - * then they will block until |init| completes, but |init| will have only been - * called once. - * - * The |once| argument must be a |CRYPTO_once_t| that has been initialised with - * the value |CRYPTO_ONCE_INIT|. */ +// CRYPTO_once calls |init| exactly once per process. This is thread-safe: if +// concurrent threads call |CRYPTO_once| with the same |CRYPTO_once_t| argument +// then they will block until |init| completes, but |init| will have only been +// called once. +// +// The |once| argument must be a |CRYPTO_once_t| that has been initialised with +// the value |CRYPTO_ONCE_INIT|. OPENSSL_EXPORT void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)); -/* Reference counting. */ +// Reference counting. -/* CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. */ +// CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates. #define CRYPTO_REFCOUNT_MAX 0xffffffff -/* CRYPTO_refcount_inc atomically increments the value at |*count| unless the - * value would overflow. It's safe for multiple threads to concurrently call - * this or |CRYPTO_refcount_dec_and_test_zero| on the same - * |CRYPTO_refcount_t|. */ +// CRYPTO_refcount_inc atomically increments the value at |*count| unless the +// value would overflow. It's safe for multiple threads to concurrently call +// this or |CRYPTO_refcount_dec_and_test_zero| on the same +// |CRYPTO_refcount_t|. OPENSSL_EXPORT void CRYPTO_refcount_inc(CRYPTO_refcount_t *count); -/* CRYPTO_refcount_dec_and_test_zero tests the value at |*count|: - * if it's zero, it crashes the address space. - * if it's the maximum value, it returns zero. - * otherwise, it atomically decrements it and returns one iff the resulting - * value is zero. - * - * It's safe for multiple threads to concurrently call this or - * |CRYPTO_refcount_inc| on the same |CRYPTO_refcount_t|. */ +// CRYPTO_refcount_dec_and_test_zero tests the value at |*count|: +// if it's zero, it crashes the address space. +// if it's the maximum value, it returns zero. +// otherwise, it atomically decrements it and returns one iff the resulting +// value is zero. +// +// It's safe for multiple threads to concurrently call this or +// |CRYPTO_refcount_inc| on the same |CRYPTO_refcount_t|. OPENSSL_EXPORT int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *count); -/* Locks. - * - * Two types of locks are defined: |CRYPTO_MUTEX|, which can be used in - * structures as normal, and |struct CRYPTO_STATIC_MUTEX|, which can be used as - * a global lock. A global lock must be initialised to the value - * |CRYPTO_STATIC_MUTEX_INIT|. - * - * |CRYPTO_MUTEX| can appear in public structures and so is defined in - * thread.h as a structure large enough to fit the real type. The global lock is - * a different type so it may be initialized with platform initializer macros.*/ +// Locks. +// +// Two types of locks are defined: |CRYPTO_MUTEX|, which can be used in +// structures as normal, and |struct CRYPTO_STATIC_MUTEX|, which can be used as +// a global lock. A global lock must be initialised to the value +// |CRYPTO_STATIC_MUTEX_INIT|. +// +// |CRYPTO_MUTEX| can appear in public structures and so is defined in +// thread.h as a structure large enough to fit the real type. The global lock is +// a different type so it may be initialized with platform initializer macros. #if defined(OPENSSL_NO_THREADS) struct CRYPTO_STATIC_MUTEX { - char padding; /* Empty structs have different sizes in C and C++. */ + char padding; // Empty structs have different sizes in C and C++. }; #define CRYPTO_STATIC_MUTEX_INIT { 0 } #elif defined(OPENSSL_WINDOWS_THREADS) @@ -386,101 +418,138 @@ struct CRYPTO_STATIC_MUTEX { #error "Unknown threading library" #endif -/* CRYPTO_MUTEX_init initialises |lock|. If |lock| is a static variable, use a - * |CRYPTO_STATIC_MUTEX|. */ +// CRYPTO_MUTEX_init initialises |lock|. If |lock| is a static variable, use a +// |CRYPTO_STATIC_MUTEX|. OPENSSL_EXPORT void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_lock_read locks |lock| such that other threads may also have a - * read lock, but none may have a write lock. */ +// CRYPTO_MUTEX_lock_read locks |lock| such that other threads may also have a +// read lock, but none may have a write lock. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_lock_write locks |lock| such that no other thread has any type - * of lock on it. */ +// CRYPTO_MUTEX_lock_write locks |lock| such that no other thread has any type +// of lock on it. OPENSSL_EXPORT void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_unlock_read unlocks |lock| for reading. */ +// CRYPTO_MUTEX_unlock_read unlocks |lock| for reading. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_unlock_write unlocks |lock| for writing. */ +// CRYPTO_MUTEX_unlock_write unlocks |lock| for writing. OPENSSL_EXPORT void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock); -/* CRYPTO_MUTEX_cleanup releases all resources held by |lock|. */ +// CRYPTO_MUTEX_cleanup releases all resources held by |lock|. OPENSSL_EXPORT void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_lock_read locks |lock| such that other threads may also - * have a read lock, but none may have a write lock. The |lock| variable does - * not need to be initialised by any function, but must have been statically - * initialised with |CRYPTO_STATIC_MUTEX_INIT|. */ +// CRYPTO_STATIC_MUTEX_lock_read locks |lock| such that other threads may also +// have a read lock, but none may have a write lock. The |lock| variable does +// not need to be initialised by any function, but must have been statically +// initialised with |CRYPTO_STATIC_MUTEX_INIT|. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_lock_read( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_lock_write locks |lock| such that no other thread has - * any type of lock on it. The |lock| variable does not need to be initialised - * by any function, but must have been statically initialised with - * |CRYPTO_STATIC_MUTEX_INIT|. */ +// CRYPTO_STATIC_MUTEX_lock_write locks |lock| such that no other thread has +// any type of lock on it. The |lock| variable does not need to be initialised +// by any function, but must have been statically initialised with +// |CRYPTO_STATIC_MUTEX_INIT|. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_lock_write( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_unlock_read unlocks |lock| for reading. */ +// CRYPTO_STATIC_MUTEX_unlock_read unlocks |lock| for reading. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_unlock_read( struct CRYPTO_STATIC_MUTEX *lock); -/* CRYPTO_STATIC_MUTEX_unlock_write unlocks |lock| for writing. */ +// CRYPTO_STATIC_MUTEX_unlock_write unlocks |lock| for writing. OPENSSL_EXPORT void CRYPTO_STATIC_MUTEX_unlock_write( struct CRYPTO_STATIC_MUTEX *lock); +#if defined(__cplusplus) +extern "C++" { + +namespace bssl { + +namespace internal { -/* Thread local storage. */ +// MutexLockBase is a RAII helper for CRYPTO_MUTEX locking. +template +class MutexLockBase { + public: + explicit MutexLockBase(CRYPTO_MUTEX *mu) : mu_(mu) { + assert(mu_ != nullptr); + LockFunc(mu_); + } + ~MutexLockBase() { ReleaseFunc(mu_); } + MutexLockBase(const MutexLockBase &) = delete; + MutexLockBase &operator=(const MutexLockBase &) = + delete; + + private: + CRYPTO_MUTEX *const mu_; +}; + +} // namespace internal + +using MutexWriteLock = + internal::MutexLockBase; +using MutexReadLock = + internal::MutexLockBase; + +} // namespace bssl + +} // extern "C++" +#endif // defined(__cplusplus) + + +// Thread local storage. -/* thread_local_data_t enumerates the types of thread-local data that can be - * stored. */ +// thread_local_data_t enumerates the types of thread-local data that can be +// stored. typedef enum { OPENSSL_THREAD_LOCAL_ERR = 0, OPENSSL_THREAD_LOCAL_RAND, - OPENSSL_THREAD_LOCAL_URANDOM_BUF, OPENSSL_THREAD_LOCAL_TEST, NUM_OPENSSL_THREAD_LOCALS, } thread_local_data_t; -/* thread_local_destructor_t is the type of a destructor function that will be - * called when a thread exits and its thread-local storage needs to be freed. */ +// thread_local_destructor_t is the type of a destructor function that will be +// called when a thread exits and its thread-local storage needs to be freed. typedef void (*thread_local_destructor_t)(void *); -/* CRYPTO_get_thread_local gets the pointer value that is stored for the - * current thread for the given index, or NULL if none has been set. */ +// CRYPTO_get_thread_local gets the pointer value that is stored for the +// current thread for the given index, or NULL if none has been set. OPENSSL_EXPORT void *CRYPTO_get_thread_local(thread_local_data_t value); -/* CRYPTO_set_thread_local sets a pointer value for the current thread at the - * given index. This function should only be called once per thread for a given - * |index|: rather than update the pointer value itself, update the data that - * is pointed to. - * - * The destructor function will be called when a thread exits to free this - * thread-local data. All calls to |CRYPTO_set_thread_local| with the same - * |index| should have the same |destructor| argument. The destructor may be - * called with a NULL argument if a thread that never set a thread-local - * pointer for |index|, exits. The destructor may be called concurrently with - * different arguments. - * - * This function returns one on success or zero on error. If it returns zero - * then |destructor| has been called with |value| already. */ +// CRYPTO_set_thread_local sets a pointer value for the current thread at the +// given index. This function should only be called once per thread for a given +// |index|: rather than update the pointer value itself, update the data that +// is pointed to. +// +// The destructor function will be called when a thread exits to free this +// thread-local data. All calls to |CRYPTO_set_thread_local| with the same +// |index| should have the same |destructor| argument. The destructor may be +// called with a NULL argument if a thread that never set a thread-local +// pointer for |index|, exits. The destructor may be called concurrently with +// different arguments. +// +// This function returns one on success or zero on error. If it returns zero +// then |destructor| has been called with |value| already. OPENSSL_EXPORT int CRYPTO_set_thread_local( thread_local_data_t index, void *value, thread_local_destructor_t destructor); -/* ex_data */ +// ex_data typedef struct crypto_ex_data_func_st CRYPTO_EX_DATA_FUNCS; -/* CRYPTO_EX_DATA_CLASS tracks the ex_indices registered for a type which - * supports ex_data. It should defined as a static global within the module - * which defines that type. */ +DECLARE_STACK_OF(CRYPTO_EX_DATA_FUNCS) + +// CRYPTO_EX_DATA_CLASS tracks the ex_indices registered for a type which +// supports ex_data. It should defined as a static global within the module +// which defines that type. typedef struct { struct CRYPTO_STATIC_MUTEX lock; STACK_OF(CRYPTO_EX_DATA_FUNCS) *meth; - /* num_reserved is one if the ex_data index zero is reserved for legacy - * |TYPE_get_app_data| functions. */ + // num_reserved is one if the ex_data index zero is reserved for legacy + // |TYPE_get_app_data| functions. uint8_t num_reserved; } CRYPTO_EX_DATA_CLASS; @@ -488,54 +557,47 @@ typedef struct { #define CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA \ {CRYPTO_STATIC_MUTEX_INIT, NULL, 1} -/* CRYPTO_get_ex_new_index allocates a new index for |ex_data_class| and writes - * it to |*out_index|. Each class of object should provide a wrapper function - * that uses the correct |CRYPTO_EX_DATA_CLASS|. It returns one on success and - * zero otherwise. */ +// CRYPTO_get_ex_new_index allocates a new index for |ex_data_class| and writes +// it to |*out_index|. Each class of object should provide a wrapper function +// that uses the correct |CRYPTO_EX_DATA_CLASS|. It returns one on success and +// zero otherwise. OPENSSL_EXPORT int CRYPTO_get_ex_new_index(CRYPTO_EX_DATA_CLASS *ex_data_class, int *out_index, long argl, - void *argp, CRYPTO_EX_dup *dup_func, + void *argp, CRYPTO_EX_free *free_func); -/* CRYPTO_set_ex_data sets an extra data pointer on a given object. Each class - * of object should provide a wrapper function. */ +// CRYPTO_set_ex_data sets an extra data pointer on a given object. Each class +// of object should provide a wrapper function. OPENSSL_EXPORT int CRYPTO_set_ex_data(CRYPTO_EX_DATA *ad, int index, void *val); -/* CRYPTO_get_ex_data returns an extra data pointer for a given object, or NULL - * if no such index exists. Each class of object should provide a wrapper - * function. */ +// CRYPTO_get_ex_data returns an extra data pointer for a given object, or NULL +// if no such index exists. Each class of object should provide a wrapper +// function. OPENSSL_EXPORT void *CRYPTO_get_ex_data(const CRYPTO_EX_DATA *ad, int index); -/* CRYPTO_new_ex_data initialises a newly allocated |CRYPTO_EX_DATA|. */ +// CRYPTO_new_ex_data initialises a newly allocated |CRYPTO_EX_DATA|. OPENSSL_EXPORT void CRYPTO_new_ex_data(CRYPTO_EX_DATA *ad); -/* CRYPTO_dup_ex_data duplicates |from| into a freshly allocated - * |CRYPTO_EX_DATA|, |to|. Both of which are inside objects of the given - * class. It returns one on success and zero otherwise. */ -OPENSSL_EXPORT int CRYPTO_dup_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, - CRYPTO_EX_DATA *to, - const CRYPTO_EX_DATA *from); - -/* CRYPTO_free_ex_data frees |ad|, which is embedded inside |obj|, which is an - * object of the given class. */ +// CRYPTO_free_ex_data frees |ad|, which is embedded inside |obj|, which is an +// object of the given class. OPENSSL_EXPORT void CRYPTO_free_ex_data(CRYPTO_EX_DATA_CLASS *ex_data_class, void *obj, CRYPTO_EX_DATA *ad); -/* Language bug workarounds. - * - * Most C standard library functions are undefined if passed NULL, even when the - * corresponding length is zero. This gives them (and, in turn, all functions - * which call them) surprising behavior on empty arrays. Some compilers will - * miscompile code due to this rule. See also - * https://www.imperialviolet.org/2016/06/26/nonnull.html - * - * These wrapper functions behave the same as the corresponding C standard - * functions, but behave as expected when passed NULL if the length is zero. - * - * Note |OPENSSL_memcmp| is a different function from |CRYPTO_memcmp|. */ - -/* C++ defines |memchr| as a const-correct overload. */ +// Language bug workarounds. +// +// Most C standard library functions are undefined if passed NULL, even when the +// corresponding length is zero. This gives them (and, in turn, all functions +// which call them) surprising behavior on empty arrays. Some compilers will +// miscompile code due to this rule. See also +// https://www.imperialviolet.org/2016/06/26/nonnull.html +// +// These wrapper functions behave the same as the corresponding C standard +// functions, but behave as expected when passed NULL if the length is zero. +// +// Note |OPENSSL_memcmp| is a different function from |CRYPTO_memcmp|. + +// C++ defines |memchr| as a const-correct overload. #if defined(__cplusplus) extern "C++" { @@ -555,8 +617,8 @@ static inline void *OPENSSL_memchr(void *s, int c, size_t n) { return memchr(s, c, n); } -} /* extern "C++" */ -#else /* __cplusplus */ +} // extern "C++" +#else // __cplusplus static inline void *OPENSSL_memchr(const void *s, int c, size_t n) { if (n == 0) { @@ -566,7 +628,7 @@ static inline void *OPENSSL_memchr(const void *s, int c, size_t n) { return memchr(s, c, n); } -#endif /* __cplusplus */ +#endif // __cplusplus static inline int OPENSSL_memcmp(const void *s1, const void *s2, size_t n) { if (n == 0) { @@ -600,9 +662,15 @@ static inline void *OPENSSL_memset(void *dst, int c, size_t n) { return memset(dst, c, n); } +#if defined(BORINGSSL_FIPS) +// BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test +// fails. It prevents any further cryptographic operations by the current +// process. +void BORINGSSL_FIPS_abort(void) __attribute__((noreturn)); +#endif #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_INTERNAL_H */ +#endif // OPENSSL_HEADER_CRYPTO_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/lhash/lhash.c b/Sources/BoringSSL/crypto/lhash/lhash.c index 27960d98b..7bfd2897f 100644 --- a/Sources/BoringSSL/crypto/lhash/lhash.c +++ b/Sources/BoringSSL/crypto/lhash/lhash.c @@ -65,14 +65,33 @@ #include "../internal.h" -/* kMinNumBuckets is the minimum size of the buckets array in an |_LHASH|. */ +// kMinNumBuckets is the minimum size of the buckets array in an |_LHASH|. static const size_t kMinNumBuckets = 16; -/* kMaxAverageChainLength contains the maximum, average chain length. When the - * average chain length exceeds this value, the hash table will be resized. */ +// kMaxAverageChainLength contains the maximum, average chain length. When the +// average chain length exceeds this value, the hash table will be resized. static const size_t kMaxAverageChainLength = 2; static const size_t kMinAverageChainLength = 1; +struct lhash_st { + // num_items contains the total number of items in the hash table. + size_t num_items; + // buckets is an array of |num_buckets| pointers. Each points to the head of + // a chain of LHASH_ITEM objects that have the same hash value, mod + // |num_buckets|. + LHASH_ITEM **buckets; + // num_buckets contains the length of |buckets|. This value is always >= + // kMinNumBuckets. + size_t num_buckets; + // callback_depth contains the current depth of |lh_doall| or |lh_doall_arg| + // calls. If non-zero then this suppresses resizing of the |buckets| array, + // which would otherwise disrupt the iteration. + unsigned callback_depth; + + lhash_cmp_func comp; + lhash_hash_func hash; +}; + _LHASH *lh_new(lhash_hash_func hash, lhash_cmp_func comp) { _LHASH *ret = OPENSSL_malloc(sizeof(_LHASH)); if (ret == NULL) { @@ -112,13 +131,13 @@ void lh_free(_LHASH *lh) { size_t lh_num_items(const _LHASH *lh) { return lh->num_items; } -/* get_next_ptr_and_hash returns a pointer to the pointer that points to the - * item equal to |data|. In other words, it searches for an item equal to |data| - * and, if it's at the start of a chain, then it returns a pointer to an - * element of |lh->buckets|, otherwise it returns a pointer to the |next| - * element of the previous item in the chain. If an element equal to |data| is - * not found, it returns a pointer that points to a NULL pointer. If |out_hash| - * is not NULL, then it also puts the hash value of |data| in |*out_hash|. */ +// get_next_ptr_and_hash returns a pointer to the pointer that points to the +// item equal to |data|. In other words, it searches for an item equal to |data| +// and, if it's at the start of a chain, then it returns a pointer to an +// element of |lh->buckets|, otherwise it returns a pointer to the |next| +// element of the previous item in the chain. If an element equal to |data| is +// not found, it returns a pointer that points to a NULL pointer. If |out_hash| +// is not NULL, then it also puts the hash value of |data| in |*out_hash|. static LHASH_ITEM **get_next_ptr_and_hash(const _LHASH *lh, uint32_t *out_hash, const void *data) { const uint32_t hash = lh->hash(data); @@ -151,9 +170,9 @@ void *lh_retrieve(const _LHASH *lh, const void *data) { return (*next_ptr)->data; } -/* lh_rebucket allocates a new array of |new_num_buckets| pointers and - * redistributes the existing items into it before making it |lh->buckets| and - * freeing the old array. */ +// lh_rebucket allocates a new array of |new_num_buckets| pointers and +// redistributes the existing items into it before making it |lh->buckets| and +// freeing the old array. static void lh_rebucket(_LHASH *lh, const size_t new_num_buckets) { LHASH_ITEM **new_buckets, *cur, *next; size_t i, alloc_size; @@ -184,12 +203,12 @@ static void lh_rebucket(_LHASH *lh, const size_t new_num_buckets) { lh->buckets = new_buckets; } -/* lh_maybe_resize resizes the |buckets| array if needed. */ +// lh_maybe_resize resizes the |buckets| array if needed. static void lh_maybe_resize(_LHASH *lh) { size_t avg_chain_length; if (lh->callback_depth > 0) { - /* Don't resize the hash if we are currently iterating over it. */ + // Don't resize the hash if we are currently iterating over it. return; } @@ -223,14 +242,14 @@ int lh_insert(_LHASH *lh, void **old_data, void *data) { if (*next_ptr != NULL) { - /* An element equal to |data| already exists in the hash table. It will be - * replaced. */ + // An element equal to |data| already exists in the hash table. It will be + // replaced. *old_data = (*next_ptr)->data; (*next_ptr)->data = data; return 1; } - /* An element equal to |data| doesn't exist in the hash table yet. */ + // An element equal to |data| doesn't exist in the hash table yet. item = OPENSSL_malloc(sizeof(LHASH_ITEM)); if (item == NULL) { return 0; @@ -252,7 +271,7 @@ void *lh_delete(_LHASH *lh, const void *data) { next_ptr = get_next_ptr_and_hash(lh, NULL, data); if (*next_ptr == NULL) { - /* No such element. */ + // No such element. return NULL; } @@ -274,7 +293,7 @@ static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *), } if (lh->callback_depth < UINT_MAX) { - /* |callback_depth| is a saturating counter. */ + // |callback_depth| is a saturating counter. lh->callback_depth++; } @@ -294,9 +313,9 @@ static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *), lh->callback_depth--; } - /* The callback may have added or removed elements and the non-zero value of - * |callback_depth| will have suppressed any resizing. Thus any needed - * resizing is done here. */ + // The callback may have added or removed elements and the non-zero value of + // |callback_depth| will have suppressed any resizing. Thus any needed + // resizing is done here. lh_maybe_resize(lh); } @@ -309,28 +328,9 @@ void lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) { } uint32_t lh_strhash(const char *c) { - /* The following hash seems to work very well on normal text strings - * no collisions on /usr/dict/words and it distributes on %2^n quite - * well, not as good as MD5, but still good. */ - unsigned long ret = 0; - long n; - unsigned long v; - int r; - - if ((c == NULL) || (*c == '\0')) { - return (ret); - } - - n = 0x100; - while (*c) { - v = n | (*c); - n += 0x100; - r = (int)((v >> 2) ^ v) & 0x0f; - ret = (ret << r) | (ret >> (32 - r)); - ret &= 0xFFFFFFFFL; - ret ^= v * v; - c++; + if (c == NULL) { + return 0; } - return ((ret >> 16) ^ ret); + return OPENSSL_hash32(c, strlen(c)); } diff --git a/Sources/BoringSSL/crypto/mem.c b/Sources/BoringSSL/crypto/mem.c index 390ca2e73..67f74b794 100644 --- a/Sources/BoringSSL/crypto/mem.c +++ b/Sources/BoringSSL/crypto/mem.c @@ -54,10 +54,6 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -#if !defined(_POSIX_C_SOURCE) -#define _POSIX_C_SOURCE 201410L /* needed for strdup, snprintf, vprintf etc */ -#endif - #include #include @@ -76,29 +72,53 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #include "internal.h" -void *OPENSSL_realloc_clean(void *ptr, size_t old_size, size_t new_size) { +#define OPENSSL_MALLOC_PREFIX 8 + + +void *OPENSSL_malloc(size_t size) { + void *ptr = malloc(size + OPENSSL_MALLOC_PREFIX); if (ptr == NULL) { - return OPENSSL_malloc(new_size); + return NULL; } - if (new_size == 0) { - return NULL; + *(size_t *)ptr = size; + + return ((uint8_t *)ptr) + OPENSSL_MALLOC_PREFIX; +} + +void OPENSSL_free(void *orig_ptr) { + if (orig_ptr == NULL) { + return; } - /* We don't support shrinking the buffer. Note the memcpy that copies - * |old_size| bytes to the new buffer, below. */ - if (new_size < old_size) { - return NULL; + void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; + + size_t size = *(size_t *)ptr; + OPENSSL_cleanse(ptr, size + OPENSSL_MALLOC_PREFIX); + free(ptr); +} + +void *OPENSSL_realloc(void *orig_ptr, size_t new_size) { + if (orig_ptr == NULL) { + return OPENSSL_malloc(new_size); } + void *ptr = ((uint8_t *)orig_ptr) - OPENSSL_MALLOC_PREFIX; + size_t old_size = *(size_t *)ptr; + void *ret = OPENSSL_malloc(new_size); if (ret == NULL) { return NULL; } - OPENSSL_memcpy(ret, ptr, old_size); - OPENSSL_cleanse(ptr, old_size); - OPENSSL_free(ptr); + size_t to_copy = new_size; + if (old_size < to_copy) { + to_copy = old_size; + } + + memcpy(ret, orig_ptr, to_copy); + OPENSSL_free(orig_ptr); + return ret; } @@ -114,7 +134,7 @@ void OPENSSL_cleanse(void *ptr, size_t len) { detect memset_s, it would be better to use that. */ __asm__ __volatile__("" : : "r"(ptr) : "memory"); #endif -#endif /* !OPENSSL_NO_ASM */ +#endif // !OPENSSL_NO_ASM } int CRYPTO_memcmp(const void *in_a, const void *in_b, size_t len) { @@ -130,7 +150,7 @@ int CRYPTO_memcmp(const void *in_a, const void *in_b, size_t len) { } uint32_t OPENSSL_hash32(const void *ptr, size_t len) { - /* These are the FNV-1a parameters for 32 bits. */ + // These are the FNV-1a parameters for 32 bits. static const uint32_t kPrime = 16777619u; static const uint32_t kOffsetBasis = 2166136261u; @@ -155,31 +175,54 @@ size_t OPENSSL_strnlen(const char *s, size_t len) { return len; } -#if defined(OPENSSL_WINDOWS) - -char *OPENSSL_strdup(const char *s) { return _strdup(s); } - -int OPENSSL_strcasecmp(const char *a, const char *b) { - return _stricmp(a, b); +char *OPENSSL_strdup(const char *s) { + const size_t len = strlen(s) + 1; + char *ret = OPENSSL_malloc(len); + if (ret == NULL) { + return NULL; + } + OPENSSL_memcpy(ret, s, len); + return ret; } -int OPENSSL_strncasecmp(const char *a, const char *b, size_t n) { - return _strnicmp(a, b, n); +int OPENSSL_tolower(int c) { + if (c >= 'A' && c <= 'Z') { + return c + ('a' - 'A'); + } + return c; } -#else - -char *OPENSSL_strdup(const char *s) { return strdup(s); } - int OPENSSL_strcasecmp(const char *a, const char *b) { - return strcasecmp(a, b); + for (size_t i = 0;; i++) { + const int aa = OPENSSL_tolower(a[i]); + const int bb = OPENSSL_tolower(b[i]); + + if (aa < bb) { + return -1; + } else if (aa > bb) { + return 1; + } else if (aa == 0) { + return 0; + } + } } int OPENSSL_strncasecmp(const char *a, const char *b, size_t n) { - return strncasecmp(a, b, n); -} + for (size_t i = 0; i < n; i++) { + const int aa = OPENSSL_tolower(a[i]); + const int bb = OPENSSL_tolower(b[i]); + + if (aa < bb) { + return -1; + } else if (aa > bb) { + return 1; + } else if (aa == 0) { + return 0; + } + } -#endif + return 0; +} int BIO_snprintf(char *buf, size_t n, const char *format, ...) { va_list args; diff --git a/Sources/BoringSSL/crypto/obj/obj.c b/Sources/BoringSSL/crypto/obj/obj.c index 173257fa7..8e1e6b14c 100644 --- a/Sources/BoringSSL/crypto/obj/obj.c +++ b/Sources/BoringSSL/crypto/obj/obj.c @@ -77,7 +77,7 @@ static struct CRYPTO_STATIC_MUTEX global_added_lock = CRYPTO_STATIC_MUTEX_INIT; -/* These globals are protected by |global_added_lock|. */ +// These globals are protected by |global_added_lock|. static LHASH_OF(ASN1_OBJECT) *global_added_by_data = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_nid = NULL; static LHASH_OF(ASN1_OBJECT) *global_added_by_short_name = NULL; @@ -107,7 +107,7 @@ ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o) { } if (!(o->flags & ASN1_OBJECT_FLAG_DYNAMIC)) { - /* TODO(fork): this is a little dangerous. */ + // TODO(fork): this is a little dangerous. return (ASN1_OBJECT *)o; } @@ -126,7 +126,7 @@ ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *o) { OPENSSL_memcpy(data, o->data, o->length); } - /* once data is attached to an object, it remains const */ + // once data is attached to an object, it remains const r->data = data; r->length = o->length; r->nid = o->nid; @@ -172,9 +172,25 @@ int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return OPENSSL_memcmp(a->data, b->data, a->length); } -/* obj_cmp is called to search the kNIDsInOIDOrder array. The |key| argument is - * an |ASN1_OBJECT|* that we're looking for and |element| is a pointer to an - * unsigned int in the array. */ +const uint8_t *OBJ_get0_data(const ASN1_OBJECT *obj) { + if (obj == NULL) { + return NULL; + } + + return obj->data; +} + +size_t OBJ_length(const ASN1_OBJECT *obj) { + if (obj == NULL || obj->length < 0) { + return 0; + } + + return (size_t)obj->length; +} + +// obj_cmp is called to search the kNIDsInOIDOrder array. The |key| argument is +// an |ASN1_OBJECT|* that we're looking for and |element| is a pointer to an +// unsigned int in the array. static int obj_cmp(const void *key, const void *element) { unsigned nid = *((const unsigned*) element); const ASN1_OBJECT *a = key; @@ -233,9 +249,9 @@ int OBJ_cbs2nid(const CBS *cbs) { return OBJ_obj2nid(&obj); } -/* short_name_cmp is called to search the kNIDsInShortNameOrder array. The - * |key| argument is name that we're looking for and |element| is a pointer to - * an unsigned int in the array. */ +// short_name_cmp is called to search the kNIDsInShortNameOrder array. The +// |key| argument is name that we're looking for and |element| is a pointer to +// an unsigned int in the array. static int short_name_cmp(const void *key, const void *element) { const char *name = (const char *) key; unsigned nid = *((unsigned*) element); @@ -269,9 +285,9 @@ int OBJ_sn2nid(const char *short_name) { return kObjects[*nid_ptr].nid; } -/* long_name_cmp is called to search the kNIDsInLongNameOrder array. The - * |key| argument is name that we're looking for and |element| is a pointer to - * an unsigned int in the array. */ +// long_name_cmp is called to search the kNIDsInLongNameOrder array. The +// |key| argument is name that we're looking for and |element| is a pointer to +// an unsigned int in the array. static int long_name_cmp(const void *key, const void *element) { const char *name = (const char *) key; unsigned nid = *((unsigned*) element); @@ -373,16 +389,30 @@ const char *OBJ_nid2ln(int nid) { return obj->ln; } -ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { - int nid = NID_undef; - ASN1_OBJECT *op = NULL; - unsigned char *buf; - unsigned char *p; - const unsigned char *bufp; - int contents_len, total_len; +static ASN1_OBJECT *create_object_with_text_oid(int (*get_nid)(void), + const char *oid, + const char *short_name, + const char *long_name) { + uint8_t *buf; + size_t len; + CBB cbb; + if (!CBB_init(&cbb, 32) || + !CBB_add_asn1_oid_from_text(&cbb, oid, strlen(oid)) || + !CBB_finish(&cbb, &buf, &len)) { + OPENSSL_PUT_ERROR(OBJ, OBJ_R_INVALID_OID_STRING); + CBB_cleanup(&cbb); + return NULL; + } + + ASN1_OBJECT *ret = ASN1_OBJECT_create(get_nid ? get_nid() : NID_undef, buf, + len, short_name, long_name); + OPENSSL_free(buf); + return ret; +} +ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { if (!dont_search_names) { - nid = OBJ_sn2nid(s); + int nid = OBJ_sn2nid(s); if (nid == NID_undef) { nid = OBJ_ln2nid(s); } @@ -392,31 +422,7 @@ ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) { } } - /* Work out size of content octets */ - contents_len = a2d_ASN1_OBJECT(NULL, 0, s, -1); - if (contents_len <= 0) { - return NULL; - } - /* Work out total size */ - total_len = ASN1_object_size(0, contents_len, V_ASN1_OBJECT); - - buf = OPENSSL_malloc(total_len); - if (buf == NULL) { - OPENSSL_PUT_ERROR(OBJ, ERR_R_MALLOC_FAILURE); - return NULL; - } - - p = buf; - /* Write out tag+length */ - ASN1_put_object(&p, 0, contents_len, V_ASN1_OBJECT, V_ASN1_UNIVERSAL); - /* Write out contents */ - a2d_ASN1_OBJECT(p, contents_len, s, -1); - - bufp = buf; - op = d2i_ASN1_OBJECT(NULL, &bufp, total_len); - OPENSSL_free(buf); - - return op; + return create_object_with_text_oid(NULL, s, NULL, NULL); } static int strlcpy_int(char *dst, const char *src, int dst_size) { @@ -436,16 +442,16 @@ static int parse_oid_component(CBS *cbs, uint64_t *out) { return 0; } if ((v >> (64 - 7)) != 0) { - /* The component is too large. */ + // The component is too large. return 0; } if (v == 0 && b == 0x80) { - /* The component must be minimally encoded. */ + // The component must be minimally encoded. return 0; } v = (v << 7) | (b & 0x7f); - /* Components end at an octet with the high bit cleared. */ + // Components end at an octet with the high bit cleared. } while (b & 0x80); *out = v; @@ -460,8 +466,8 @@ static int add_decimal(CBB *out, uint64_t v) { int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, int always_return_oid) { - /* Python depends on the empty OID successfully encoding as the empty - * string. */ + // Python depends on the empty OID successfully encoding as the empty + // string. if (obj == NULL || obj->length == 0) { return strlcpy_int(out, "", out_len); } @@ -487,7 +493,7 @@ int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, CBS cbs; CBS_init(&cbs, obj->data, obj->length); - /* The first component is 40 * value1 + value2, where value1 is 0, 1, or 2. */ + // The first component is 40 * value1 + value2, where value1 is 0, 1, or 2. uint64_t v; if (!parse_oid_component(&cbs, &v)) { goto err; @@ -567,8 +573,8 @@ static int cmp_long_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) { return strcmp(a->ln, b->ln); } -/* obj_add_object inserts |obj| into the various global hashes for run-time - * added objects. It returns one on success or zero otherwise. */ +// obj_add_object inserts |obj| into the various global hashes for run-time +// added objects. It returns one on success or zero otherwise. static int obj_add_object(ASN1_OBJECT *obj) { int ok; ASN1_OBJECT *old_object; @@ -584,10 +590,10 @@ static int obj_add_object(ASN1_OBJECT *obj) { global_added_by_long_name = lh_ASN1_OBJECT_new(hash_long_name, cmp_long_name); } - /* We don't pay attention to |old_object| (which contains any previous object - * that was evicted from the hashes) because we don't have a reference count - * on ASN1_OBJECT values. Also, we should never have duplicates nids and so - * should always have objects in |global_added_by_nid|. */ + // We don't pay attention to |old_object| (which contains any previous object + // that was evicted from the hashes) because we don't have a reference count + // on ASN1_OBJECT values. Also, we should never have duplicates nids and so + // should always have objects in |global_added_by_nid|. ok = lh_ASN1_OBJECT_insert(global_added_by_nid, &old_object, obj); if (obj->length != 0 && obj->data != NULL) { @@ -605,41 +611,11 @@ static int obj_add_object(ASN1_OBJECT *obj) { } int OBJ_create(const char *oid, const char *short_name, const char *long_name) { - int ret = NID_undef; - ASN1_OBJECT *op = NULL; - unsigned char *buf = NULL; - int len; - - len = a2d_ASN1_OBJECT(NULL, 0, oid, -1); - if (len <= 0) { - goto err; - } - - buf = OPENSSL_malloc(len); - if (buf == NULL) { - OPENSSL_PUT_ERROR(OBJ, ERR_R_MALLOC_FAILURE); - goto err; - } - - len = a2d_ASN1_OBJECT(buf, len, oid, -1); - if (len == 0) { - goto err; - } - - op = (ASN1_OBJECT *)ASN1_OBJECT_create(obj_next_nid(), buf, len, short_name, - long_name); - if (op == NULL) { - goto err; - } - - if (obj_add_object(op)) { - ret = op->nid; + ASN1_OBJECT *op = + create_object_with_text_oid(obj_next_nid, oid, short_name, long_name); + if (op == NULL || + !obj_add_object(op)) { + return NID_undef; } - op = NULL; - -err: - ASN1_OBJECT_free(op); - OPENSSL_free(buf); - - return ret; + return op->nid; } diff --git a/Sources/BoringSSL/crypto/obj/obj_dat.h b/Sources/BoringSSL/crypto/obj/obj_dat.h index 4905f0d02..dceaf03df 100644 --- a/Sources/BoringSSL/crypto/obj/obj_dat.h +++ b/Sources/BoringSSL/crypto/obj/obj_dat.h @@ -56,7 +56,8 @@ /* This file is generated by crypto/obj/objects.go. */ -#define NUM_NID 949 + +#define NUM_NID 959 static const uint8_t kObjectData[] = { /* NID_rsadsi */ @@ -1811,6 +1812,8 @@ static const uint8_t kObjectData[] = { 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x02, /* NID_dhSinglePass_cofactorDH_sha512kdf_scheme */ 0x2b, 0x81, 0x04, 0x01, 0x0e, 0x03, + /* NID_ED25519 */ + 0x2b, 0x65, 0x70, }; static const ASN1_OBJECT kObjects[NUM_NID] = { @@ -3440,6 +3443,17 @@ static const ASN1_OBJECT kObjects[NUM_NID] = { {"dh-std-kdf", "dh-std-kdf", NID_dh_std_kdf, 0, NULL, 0}, {"dh-cofactor-kdf", "dh-cofactor-kdf", NID_dh_cofactor_kdf, 0, NULL, 0}, {"X25519", "X25519", NID_X25519, 0, NULL, 0}, + {"ED25519", "ED25519", NID_ED25519, 3, &kObjectData[6175], 0}, + {"ChaCha20-Poly1305", "chacha20-poly1305", NID_chacha20_poly1305, 0, NULL, + 0}, + {"KxRSA", "kx-rsa", NID_kx_rsa, 0, NULL, 0}, + {"KxECDHE", "kx-ecdhe", NID_kx_ecdhe, 0, NULL, 0}, + {"KxPSK", "kx-psk", NID_kx_psk, 0, NULL, 0}, + {"AuthRSA", "auth-rsa", NID_auth_rsa, 0, NULL, 0}, + {"AuthECDSA", "auth-ecdsa", NID_auth_ecdsa, 0, NULL, 0}, + {"AuthPSK", "auth-psk", NID_auth_psk, 0, NULL, 0}, + {"KxANY", "kx-any", NID_kx_any, 0, NULL, 0}, + {"AuthANY", "auth-any", NID_auth_any, 0, NULL, 0}, }; static const unsigned kNIDsInShortNameOrder[] = { @@ -3470,6 +3484,10 @@ static const unsigned kNIDsInShortNameOrder[] = { 426 /* AES-256-ECB */, 428 /* AES-256-OFB */, 914 /* AES-256-XTS */, + 958 /* AuthANY */, + 955 /* AuthECDSA */, + 956 /* AuthPSK */, + 954 /* AuthRSA */, 91 /* BF-CBC */, 93 /* BF-CFB */, 92 /* BF-ECB */, @@ -3501,6 +3519,7 @@ static const unsigned kNIDsInShortNameOrder[] = { 13 /* CN */, 141 /* CRLReason */, 417 /* CSPName */, + 950 /* ChaCha20-Poly1305 */, 367 /* CrlID */, 391 /* DC */, 31 /* DES-CBC */, @@ -3528,6 +3547,7 @@ static const unsigned kNIDsInShortNameOrder[] = { 70 /* DSA-SHA1-old */, 67 /* DSA-old */, 297 /* DVCS */, + 949 /* ED25519 */, 99 /* GN */, 855 /* HMAC */, 780 /* HMAC-MD5 */, @@ -3542,6 +3562,10 @@ static const unsigned kNIDsInShortNameOrder[] = { 645 /* ITU-T */, 646 /* JOINT-ISO-ITU-T */, 773 /* KISA */, + 957 /* KxANY */, + 952 /* KxECDHE */, + 953 /* KxPSK */, + 951 /* KxRSA */, 15 /* L */, 856 /* LocalKeySet */, 3 /* MD2 */, @@ -4400,6 +4424,7 @@ static const unsigned kNIDsInLongNameOrder[] = { 382 /* Directory */, 392 /* Domain */, 132 /* E-mail Protection */, + 949 /* ED25519 */, 389 /* Enterprises */, 384 /* Experimental */, 372 /* Extended OCSP Status */, @@ -4564,6 +4589,10 @@ static const unsigned kNIDsInLongNameOrder[] = { 484 /* associatedDomain */, 485 /* associatedName */, 501 /* audio */, + 958 /* auth-any */, + 955 /* auth-ecdsa */, + 956 /* auth-psk */, + 954 /* auth-rsa */, 882 /* authorityRevocationList */, 91 /* bf-cbc */, 93 /* bf-cfb */, @@ -4634,6 +4663,7 @@ static const unsigned kNIDsInLongNameOrder[] = { 677 /* certicom-arc */, 517 /* certificate extensions */, 883 /* certificateRevocationList */, + 950 /* chacha20-poly1305 */, 54 /* challengePassword */, 407 /* characteristic-two-field */, 395 /* clearance */, @@ -4976,6 +5006,10 @@ static const unsigned kNIDsInLongNameOrder[] = { 646 /* joint-iso-itu-t */, 150 /* keyBag */, 773 /* kisa */, + 957 /* kx-any */, + 952 /* kx-ecdhe */, + 953 /* kx-psk */, + 951 /* kx-rsa */, 477 /* lastModifiedBy */, 476 /* lastModifiedTime */, 157 /* localKeyID */, @@ -5327,26 +5361,39 @@ static const unsigned kNIDsInLongNameOrder[] = { }; static const unsigned kNIDsInOIDOrder[] = { - 434 /* 0.9 (OBJ_data) */, 182 /* 1.2 (OBJ_member_body) */, - 379 /* 1.3 (OBJ_org) */, 676 /* 1.3 (OBJ_identified_organization) */, - 11 /* 2.5 (OBJ_X500) */, 647 /* 2.23 (OBJ_international_organizations) */, - 380 /* 1.3.6 (OBJ_dod) */, 12 /* 2.5.4 (OBJ_X509) */, - 378 /* 2.5.8 (OBJ_X500algorithms) */, 81 /* 2.5.29 (OBJ_id_ce) */, - 512 /* 2.23.42 (OBJ_id_set) */, 678 /* 2.23.43 (OBJ_wap) */, - 435 /* 0.9.2342 (OBJ_pss) */, 183 /* 1.2.840 (OBJ_ISO_US) */, - 381 /* 1.3.6.1 (OBJ_iana) */, 677 /* 1.3.132 (OBJ_certicom_arc) */, + 434 /* 0.9 (OBJ_data) */, + 182 /* 1.2 (OBJ_member_body) */, + 379 /* 1.3 (OBJ_org) */, + 676 /* 1.3 (OBJ_identified_organization) */, + 11 /* 2.5 (OBJ_X500) */, + 647 /* 2.23 (OBJ_international_organizations) */, + 380 /* 1.3.6 (OBJ_dod) */, + 12 /* 2.5.4 (OBJ_X509) */, + 378 /* 2.5.8 (OBJ_X500algorithms) */, + 81 /* 2.5.29 (OBJ_id_ce) */, + 512 /* 2.23.42 (OBJ_id_set) */, + 678 /* 2.23.43 (OBJ_wap) */, + 435 /* 0.9.2342 (OBJ_pss) */, + 183 /* 1.2.840 (OBJ_ISO_US) */, + 381 /* 1.3.6.1 (OBJ_iana) */, + 949 /* 1.3.101.112 (OBJ_ED25519) */, + 677 /* 1.3.132 (OBJ_certicom_arc) */, 394 /* 2.5.1.5 (OBJ_selected_attribute_types) */, - 13 /* 2.5.4.3 (OBJ_commonName) */, 100 /* 2.5.4.4 (OBJ_surname) */, - 105 /* 2.5.4.5 (OBJ_serialNumber) */, 14 /* 2.5.4.6 (OBJ_countryName) */, + 13 /* 2.5.4.3 (OBJ_commonName) */, + 100 /* 2.5.4.4 (OBJ_surname) */, + 105 /* 2.5.4.5 (OBJ_serialNumber) */, + 14 /* 2.5.4.6 (OBJ_countryName) */, 15 /* 2.5.4.7 (OBJ_localityName) */, 16 /* 2.5.4.8 (OBJ_stateOrProvinceName) */, 660 /* 2.5.4.9 (OBJ_streetAddress) */, 17 /* 2.5.4.10 (OBJ_organizationName) */, 18 /* 2.5.4.11 (OBJ_organizationalUnitName) */, - 106 /* 2.5.4.12 (OBJ_title) */, 107 /* 2.5.4.13 (OBJ_description) */, + 106 /* 2.5.4.12 (OBJ_title) */, + 107 /* 2.5.4.13 (OBJ_description) */, 859 /* 2.5.4.14 (OBJ_searchGuide) */, 860 /* 2.5.4.15 (OBJ_businessCategory) */, - 861 /* 2.5.4.16 (OBJ_postalAddress) */, 661 /* 2.5.4.17 (OBJ_postalCode) */, + 861 /* 2.5.4.16 (OBJ_postalAddress) */, + 661 /* 2.5.4.17 (OBJ_postalCode) */, 862 /* 2.5.4.18 (OBJ_postOfficeBox) */, 863 /* 2.5.4.19 (OBJ_physicalDeliveryOfficeName) */, 864 /* 2.5.4.20 (OBJ_telephoneNumber) */, @@ -5360,15 +5407,18 @@ static const unsigned kNIDsInOIDOrder[] = { 872 /* 2.5.4.28 (OBJ_preferredDeliveryMethod) */, 873 /* 2.5.4.29 (OBJ_presentationAddress) */, 874 /* 2.5.4.30 (OBJ_supportedApplicationContext) */, - 875 /* 2.5.4.31 (OBJ_member) */, 876 /* 2.5.4.32 (OBJ_owner) */, - 877 /* 2.5.4.33 (OBJ_roleOccupant) */, 878 /* 2.5.4.34 (OBJ_seeAlso) */, + 875 /* 2.5.4.31 (OBJ_member) */, + 876 /* 2.5.4.32 (OBJ_owner) */, + 877 /* 2.5.4.33 (OBJ_roleOccupant) */, + 878 /* 2.5.4.34 (OBJ_seeAlso) */, 879 /* 2.5.4.35 (OBJ_userPassword) */, 880 /* 2.5.4.36 (OBJ_userCertificate) */, 881 /* 2.5.4.37 (OBJ_cACertificate) */, 882 /* 2.5.4.38 (OBJ_authorityRevocationList) */, 883 /* 2.5.4.39 (OBJ_certificateRevocationList) */, 884 /* 2.5.4.40 (OBJ_crossCertificatePair) */, - 173 /* 2.5.4.41 (OBJ_name) */, 99 /* 2.5.4.42 (OBJ_givenName) */, + 173 /* 2.5.4.41 (OBJ_name) */, + 99 /* 2.5.4.42 (OBJ_givenName) */, 101 /* 2.5.4.43 (OBJ_initials) */, 509 /* 2.5.4.44 (OBJ_generationQualifier) */, 503 /* 2.5.4.45 (OBJ_x500UniqueIdentifier) */, @@ -5380,7 +5430,8 @@ static const unsigned kNIDsInOIDOrder[] = { 889 /* 2.5.4.51 (OBJ_houseIdentifier) */, 890 /* 2.5.4.52 (OBJ_supportedAlgorithms) */, 891 /* 2.5.4.53 (OBJ_deltaRevocationList) */, - 892 /* 2.5.4.54 (OBJ_dmdName) */, 510 /* 2.5.4.65 (OBJ_pseudonym) */, + 892 /* 2.5.4.54 (OBJ_dmdName) */, + 510 /* 2.5.4.65 (OBJ_pseudonym) */, 400 /* 2.5.4.72 (OBJ_role) */, 769 /* 2.5.29.9 (OBJ_subject_directory_attributes) */, 82 /* 2.5.29.14 (OBJ_subject_key_identifier) */, @@ -5389,7 +5440,8 @@ static const unsigned kNIDsInOIDOrder[] = { 85 /* 2.5.29.17 (OBJ_subject_alt_name) */, 86 /* 2.5.29.18 (OBJ_issuer_alt_name) */, 87 /* 2.5.29.19 (OBJ_basic_constraints) */, - 88 /* 2.5.29.20 (OBJ_crl_number) */, 141 /* 2.5.29.21 (OBJ_crl_reason) */, + 88 /* 2.5.29.20 (OBJ_crl_number) */, + 141 /* 2.5.29.21 (OBJ_crl_reason) */, 430 /* 2.5.29.23 (OBJ_hold_instruction_code) */, 142 /* 2.5.29.24 (OBJ_invalidity_date) */, 140 /* 2.5.29.27 (OBJ_delta_crl) */, @@ -5405,16 +5457,26 @@ static const unsigned kNIDsInOIDOrder[] = { 857 /* 2.5.29.46 (OBJ_freshest_crl) */, 748 /* 2.5.29.54 (OBJ_inhibit_any_policy) */, 402 /* 2.5.29.55 (OBJ_target_information) */, - 403 /* 2.5.29.56 (OBJ_no_rev_avail) */, 513 /* 2.23.42.0 (OBJ_set_ctype) */, - 514 /* 2.23.42.1 (OBJ_set_msgExt) */, 515 /* 2.23.42.3 (OBJ_set_attr) */, - 516 /* 2.23.42.5 (OBJ_set_policy) */, 517 /* 2.23.42.7 (OBJ_set_certExt) */, - 518 /* 2.23.42.8 (OBJ_set_brand) */, 679 /* 2.23.43.1 (OBJ_wap_wsg) */, - 382 /* 1.3.6.1.1 (OBJ_Directory) */, 383 /* 1.3.6.1.2 (OBJ_Management) */, - 384 /* 1.3.6.1.3 (OBJ_Experimental) */, 385 /* 1.3.6.1.4 (OBJ_Private) */, - 386 /* 1.3.6.1.5 (OBJ_Security) */, 387 /* 1.3.6.1.6 (OBJ_SNMPv2) */, - 388 /* 1.3.6.1.7 (OBJ_Mail) */, 376 /* 1.3.14.3.2 (OBJ_algorithm) */, - 395 /* 2.5.1.5.55 (OBJ_clearance) */, 19 /* 2.5.8.1.1 (OBJ_rsa) */, - 96 /* 2.5.8.3.100 (OBJ_mdc2WithRSA) */, 95 /* 2.5.8.3.101 (OBJ_mdc2) */, + 403 /* 2.5.29.56 (OBJ_no_rev_avail) */, + 513 /* 2.23.42.0 (OBJ_set_ctype) */, + 514 /* 2.23.42.1 (OBJ_set_msgExt) */, + 515 /* 2.23.42.3 (OBJ_set_attr) */, + 516 /* 2.23.42.5 (OBJ_set_policy) */, + 517 /* 2.23.42.7 (OBJ_set_certExt) */, + 518 /* 2.23.42.8 (OBJ_set_brand) */, + 679 /* 2.23.43.1 (OBJ_wap_wsg) */, + 382 /* 1.3.6.1.1 (OBJ_Directory) */, + 383 /* 1.3.6.1.2 (OBJ_Management) */, + 384 /* 1.3.6.1.3 (OBJ_Experimental) */, + 385 /* 1.3.6.1.4 (OBJ_Private) */, + 386 /* 1.3.6.1.5 (OBJ_Security) */, + 387 /* 1.3.6.1.6 (OBJ_SNMPv2) */, + 388 /* 1.3.6.1.7 (OBJ_Mail) */, + 376 /* 1.3.14.3.2 (OBJ_algorithm) */, + 395 /* 2.5.1.5.55 (OBJ_clearance) */, + 19 /* 2.5.8.1.1 (OBJ_rsa) */, + 96 /* 2.5.8.3.100 (OBJ_mdc2WithRSA) */, + 95 /* 2.5.8.3.101 (OBJ_mdc2) */, 746 /* 2.5.29.32.0 (OBJ_any_policy) */, 910 /* 2.5.29.37.0 (OBJ_anyExtendedKeyUsage) */, 519 /* 2.23.42.0.0 (OBJ_setct_PANData) */, @@ -5529,22 +5591,27 @@ static const unsigned kNIDsInOIDOrder[] = { 638 /* 2.23.42.8.34 (OBJ_set_brand_AmericanExpress) */, 639 /* 2.23.42.8.35 (OBJ_set_brand_JCB) */, 805 /* 1.2.643.2.2 (OBJ_cryptopro) */, - 806 /* 1.2.643.2.9 (OBJ_cryptocom) */, 184 /* 1.2.840.10040 (OBJ_X9_57) */, + 806 /* 1.2.643.2.9 (OBJ_cryptocom) */, + 184 /* 1.2.840.10040 (OBJ_X9_57) */, 405 /* 1.2.840.10045 (OBJ_ansi_X9_62) */, 389 /* 1.3.6.1.4.1 (OBJ_Enterprises) */, 504 /* 1.3.6.1.7.1 (OBJ_mime_mhs) */, 104 /* 1.3.14.3.2.3 (OBJ_md5WithRSA) */, - 29 /* 1.3.14.3.2.6 (OBJ_des_ecb) */, 31 /* 1.3.14.3.2.7 (OBJ_des_cbc) */, + 29 /* 1.3.14.3.2.6 (OBJ_des_ecb) */, + 31 /* 1.3.14.3.2.7 (OBJ_des_cbc) */, 45 /* 1.3.14.3.2.8 (OBJ_des_ofb64) */, 30 /* 1.3.14.3.2.9 (OBJ_des_cfb64) */, 377 /* 1.3.14.3.2.11 (OBJ_rsaSignature) */, - 67 /* 1.3.14.3.2.12 (OBJ_dsa_2) */, 66 /* 1.3.14.3.2.13 (OBJ_dsaWithSHA) */, + 67 /* 1.3.14.3.2.12 (OBJ_dsa_2) */, + 66 /* 1.3.14.3.2.13 (OBJ_dsaWithSHA) */, 42 /* 1.3.14.3.2.15 (OBJ_shaWithRSAEncryption) */, - 32 /* 1.3.14.3.2.17 (OBJ_des_ede_ecb) */, 41 /* 1.3.14.3.2.18 (OBJ_sha) */, + 32 /* 1.3.14.3.2.17 (OBJ_des_ede_ecb) */, + 41 /* 1.3.14.3.2.18 (OBJ_sha) */, 64 /* 1.3.14.3.2.26 (OBJ_sha1) */, 70 /* 1.3.14.3.2.27 (OBJ_dsaWithSHA1_2) */, 115 /* 1.3.14.3.2.29 (OBJ_sha1WithRSA) */, - 117 /* 1.3.36.3.2.1 (OBJ_ripemd160) */, 143 /* 1.3.101.1.4.1 (OBJ_sxnet) */, + 117 /* 1.3.36.3.2.1 (OBJ_ripemd160) */, + 143 /* 1.3.101.1.4.1 (OBJ_sxnet) */, 721 /* 1.3.132.0.1 (OBJ_sect163k1) */, 722 /* 1.3.132.0.2 (OBJ_sect163r1) */, 728 /* 1.3.132.0.3 (OBJ_sect239k1) */, @@ -5608,7 +5675,8 @@ static const unsigned kNIDsInOIDOrder[] = { 816 /* 1.2.643.2.2.23 (OBJ_id_GostR3411_94_prf) */, 817 /* 1.2.643.2.2.98 (OBJ_id_GostR3410_2001DH) */, 818 /* 1.2.643.2.2.99 (OBJ_id_GostR3410_94DH) */, - 1 /* 1.2.840.113549 (OBJ_rsadsi) */, 185 /* 1.2.840.10040.4 (OBJ_X9cm) */, + 1 /* 1.2.840.113549 (OBJ_rsadsi) */, + 185 /* 1.2.840.10040.4 (OBJ_X9cm) */, 127 /* 1.3.6.1.5.5.7 (OBJ_id_pkix) */, 505 /* 1.3.6.1.7.1.1 (OBJ_mime_mhs_headings) */, 506 /* 1.3.6.1.7.1.2 (OBJ_mime_mhs_bodies) */, diff --git a/Sources/BoringSSL/crypto/obj/obj_xref.c b/Sources/BoringSSL/crypto/obj/obj_xref.c index 7b4ff12ee..21bde279d 100644 --- a/Sources/BoringSSL/crypto/obj/obj_xref.c +++ b/Sources/BoringSSL/crypto/obj/obj_xref.c @@ -66,7 +66,7 @@ typedef struct { } nid_triple; static const nid_triple kTriples[] = { - /* RSA PKCS#1. */ + // RSA PKCS#1. {NID_md4WithRSAEncryption, NID_md4, NID_rsaEncryption}, {NID_md5WithRSAEncryption, NID_md5, NID_rsaEncryption}, {NID_sha1WithRSAEncryption, NID_sha1, NID_rsaEncryption}, @@ -74,21 +74,21 @@ static const nid_triple kTriples[] = { {NID_sha256WithRSAEncryption, NID_sha256, NID_rsaEncryption}, {NID_sha384WithRSAEncryption, NID_sha384, NID_rsaEncryption}, {NID_sha512WithRSAEncryption, NID_sha512, NID_rsaEncryption}, - /* DSA. */ + // DSA. {NID_dsaWithSHA1, NID_sha1, NID_dsa}, {NID_dsaWithSHA1_2, NID_sha1, NID_dsa_2}, {NID_dsa_with_SHA224, NID_sha224, NID_dsa}, {NID_dsa_with_SHA256, NID_sha256, NID_dsa}, - /* ECDSA. */ + // ECDSA. {NID_ecdsa_with_SHA1, NID_sha1, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA224, NID_sha224, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA256, NID_sha256, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA384, NID_sha384, NID_X9_62_id_ecPublicKey}, {NID_ecdsa_with_SHA512, NID_sha512, NID_X9_62_id_ecPublicKey}, - /* For PSS the digest algorithm can vary and depends on the included - * AlgorithmIdentifier. The digest "undef" indicates the public key method - * should handle this explicitly. */ + // The following algorithms use more complex (or simpler) parameters. The + // digest "undef" indicates the caller should handle this explicitly. {NID_rsassaPss, NID_undef, NID_rsaEncryption}, + {NID_ED25519, NID_undef, NID_ED25519}, }; int OBJ_find_sigid_algs(int sign_nid, int *out_digest_nid, int *out_pkey_nid) { diff --git a/Sources/BoringSSL/crypto/pem/pem_info.c b/Sources/BoringSSL/crypto/pem/pem_info.c index 57c87d4fc..d707e426b 100644 --- a/Sources/BoringSSL/crypto/pem/pem_info.c +++ b/Sources/BoringSSL/crypto/pem/pem_info.c @@ -297,7 +297,6 @@ int PEM_X509_INFO_write_bio(BIO *bp, X509_INFO *xi, EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) { - EVP_CIPHER_CTX ctx; int i, ret = 0; unsigned char *data = NULL; const char *objstr = NULL; @@ -374,8 +373,7 @@ int PEM_X509_INFO_write_bio(BIO *bp, X509_INFO *xi, EVP_CIPHER *enc, ret = 1; - err: - OPENSSL_cleanse((char *)&ctx, sizeof(ctx)); - OPENSSL_cleanse(buf, PEM_BUFSIZE); - return (ret); +err: + OPENSSL_cleanse(buf, PEM_BUFSIZE); + return ret; } diff --git a/Sources/BoringSSL/crypto/pem/pem_lib.c b/Sources/BoringSSL/crypto/pem/pem_lib.c index 8b7932e46..8f8909613 100644 --- a/Sources/BoringSSL/crypto/pem/pem_lib.c +++ b/Sources/BoringSSL/crypto/pem/pem_lib.c @@ -343,10 +343,7 @@ int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp, OPENSSL_cleanse(iv, sizeof(iv)); OPENSSL_cleanse((char *)&ctx, sizeof(ctx)); OPENSSL_cleanse(buf, PEM_BUFSIZE); - if (data != NULL) { - OPENSSL_cleanse(data, (unsigned int)dsize); - OPENSSL_free(data); - } + OPENSSL_free(data); return (ret); } @@ -562,7 +559,6 @@ int PEM_write_bio(BIO *bp, const char *name, const char *header, EVP_EncodeFinal(&ctx, buf, &outl); if ((outl > 0) && (BIO_write(bp, (char *)buf, outl) != outl)) goto err; - OPENSSL_cleanse(buf, PEM_BUFSIZE * 8); OPENSSL_free(buf); buf = NULL; if ((BIO_write(bp, "-----END ", 9) != 9) || @@ -572,7 +568,6 @@ int PEM_write_bio(BIO *bp, const char *name, const char *header, return (i + outl); err: if (buf) { - OPENSSL_cleanse(buf, PEM_BUFSIZE * 8); OPENSSL_free(buf); } OPENSSL_PUT_ERROR(PEM, reason); @@ -769,13 +764,13 @@ int PEM_read_bio(BIO *bp, char **name, char **header, unsigned char **data, int PEM_def_callback(char *buf, int size, int rwflag, void *userdata) { - if (!buf || !userdata) { + if (!buf || !userdata || size < 0) { return 0; } size_t len = strlen((char *)userdata); if (len >= (size_t)size) { return 0; } - strcpy(buf, (char *)userdata); + BUF_strlcpy(buf, userdata, (size_t)size); return len; } diff --git a/Sources/BoringSSL/crypto/pem/pem_pk8.c b/Sources/BoringSSL/crypto/pem/pem_pk8.c index 550661d64..15385ecaf 100644 --- a/Sources/BoringSSL/crypto/pem/pem_pk8.c +++ b/Sources/BoringSSL/crypto/pem/pem_pk8.c @@ -176,6 +176,7 @@ EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, } p8inf = PKCS8_decrypt(p8, psbuf, klen); X509_SIG_free(p8); + OPENSSL_cleanse(psbuf, klen); if (!p8inf) return NULL; ret = EVP_PKCS82PKEY(p8inf); diff --git a/Sources/BoringSSL/crypto/pem/pem_pkey.c b/Sources/BoringSSL/crypto/pem/pem_pkey.c index 058c03112..9fbaeef8d 100644 --- a/Sources/BoringSSL/crypto/pem/pem_pkey.c +++ b/Sources/BoringSSL/crypto/pem/pem_pkey.c @@ -114,6 +114,7 @@ EVP_PKEY *PEM_read_bio_PrivateKey(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, } p8inf = PKCS8_decrypt(p8, psbuf, klen); X509_SIG_free(p8); + OPENSSL_cleanse(psbuf, klen); if (!p8inf) goto p8err; ret = EVP_PKCS82PKEY(p8inf); @@ -139,7 +140,6 @@ EVP_PKEY *PEM_read_bio_PrivateKey(BIO *bp, EVP_PKEY **x, pem_password_cb *cb, err: OPENSSL_free(nm); - OPENSSL_cleanse(data, len); OPENSSL_free(data); return (ret); } diff --git a/Sources/BoringSSL/crypto/pem/pem_xaux.c b/Sources/BoringSSL/crypto/pem/pem_xaux.c index 386dd60d7..b0cceca30 100644 --- a/Sources/BoringSSL/crypto/pem/pem_xaux.c +++ b/Sources/BoringSSL/crypto/pem/pem_xaux.c @@ -63,5 +63,3 @@ #include IMPLEMENT_PEM_rw(X509_AUX, X509, PEM_STRING_X509_TRUSTED, X509_AUX) -IMPLEMENT_PEM_rw(X509_CERT_PAIR, X509_CERT_PAIR, PEM_STRING_X509_PAIR, - X509_CERT_PAIR) diff --git a/Sources/BoringSSL/crypto/pkcs7/internal.h b/Sources/BoringSSL/crypto/pkcs7/internal.h new file mode 100644 index 000000000..9541bea8e --- /dev/null +++ b/Sources/BoringSSL/crypto/pkcs7/internal.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_PKCS7_INTERNAL_H +#define OPENSSL_HEADER_PKCS7_INTERNAL_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + + +// pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 +// SignedData blob from |cbs| and sets |*out| to point to the rest of the +// input. If the input is in BER format, then |*der_bytes| will be set to a +// pointer that needs to be freed by the caller once they have finished +// processing |*out| (which will be pointing into |*der_bytes|). +// +// It returns one on success or zero on error. On error, |*der_bytes| is +// NULL. +int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs); + +// pkcs7_bundle writes a PKCS#7, SignedData structure to |out| and then calls +// |cb| with a CBB to which certificate or CRL data can be written, and the +// opaque context pointer, |arg|. The callback can return zero to indicate an +// error. +// +// pkcs7_bundle returns one on success or zero on error. +int pkcs7_bundle(CBB *out, int (*cb)(CBB *out, const void *arg), + const void *arg); + + +#if defined(__cplusplus) +} // extern C +#endif + +#endif // OPENSSL_HEADER_PKCS7_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/pkcs7/pkcs7.c b/Sources/BoringSSL/crypto/pkcs7/pkcs7.c new file mode 100644 index 000000000..fc175a940 --- /dev/null +++ b/Sources/BoringSSL/crypto/pkcs7/pkcs7.c @@ -0,0 +1,166 @@ +/* Copyright (c) 2014, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../bytestring/internal.h" + + +// 1.2.840.113549.1.7.1 +static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x07, 0x01}; + +// 1.2.840.113549.1.7.2 +static const uint8_t kPKCS7SignedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x07, 0x02}; + +// pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 +// SignedData blob from |cbs| and sets |*out| to point to the rest of the +// input. If the input is in BER format, then |*der_bytes| will be set to a +// pointer that needs to be freed by the caller once they have finished +// processing |*out| (which will be pointing into |*der_bytes|). +// +// It returns one on success or zero on error. On error, |*der_bytes| is +// NULL. +int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { + size_t der_len; + CBS in, content_info, content_type, wrapped_signed_data, signed_data; + uint64_t version; + + // The input may be in BER format. + *der_bytes = NULL; + if (!CBS_asn1_ber_to_der(cbs, der_bytes, &der_len)) { + return 0; + } + if (*der_bytes != NULL) { + CBS_init(&in, *der_bytes, der_len); + } else { + CBS_init(&in, CBS_data(cbs), CBS_len(cbs)); + } + + // See https://tools.ietf.org/html/rfc2315#section-7 + if (!CBS_get_asn1(&in, &content_info, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&content_info, &content_type, CBS_ASN1_OBJECT)) { + goto err; + } + + if (!CBS_mem_equal(&content_type, kPKCS7SignedData, + sizeof(kPKCS7SignedData))) { + OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_NOT_PKCS7_SIGNED_DATA); + goto err; + } + + // See https://tools.ietf.org/html/rfc2315#section-9.1 + if (!CBS_get_asn1(&content_info, &wrapped_signed_data, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || + !CBS_get_asn1(&wrapped_signed_data, &signed_data, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1_uint64(&signed_data, &version) || + !CBS_get_asn1(&signed_data, NULL /* digests */, CBS_ASN1_SET) || + !CBS_get_asn1(&signed_data, NULL /* content */, CBS_ASN1_SEQUENCE)) { + goto err; + } + + if (version < 1) { + OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_BAD_PKCS7_VERSION); + goto err; + } + + CBS_init(out, CBS_data(&signed_data), CBS_len(&signed_data)); + return 1; + +err: + OPENSSL_free(*der_bytes); + *der_bytes = NULL; + return 0; +} + +int PKCS7_get_raw_certificates(STACK_OF(CRYPTO_BUFFER) *out_certs, CBS *cbs, + CRYPTO_BUFFER_POOL *pool) { + CBS signed_data, certificates; + uint8_t *der_bytes = NULL; + int ret = 0; + const size_t initial_certs_len = sk_CRYPTO_BUFFER_num(out_certs); + + if (!pkcs7_parse_header(&der_bytes, &signed_data, cbs)) { + return 0; + } + + // See https://tools.ietf.org/html/rfc2315#section-9.1 + if (!CBS_get_asn1(&signed_data, &certificates, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { + OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_NO_CERTIFICATES_INCLUDED); + goto err; + } + + while (CBS_len(&certificates) > 0) { + CBS cert; + if (!CBS_get_asn1_element(&certificates, &cert, CBS_ASN1_SEQUENCE)) { + goto err; + } + + CRYPTO_BUFFER *buf = CRYPTO_BUFFER_new_from_CBS(&cert, pool); + if (buf == NULL || + !sk_CRYPTO_BUFFER_push(out_certs, buf)) { + CRYPTO_BUFFER_free(buf); + goto err; + } + } + + ret = 1; + +err: + OPENSSL_free(der_bytes); + + if (!ret) { + while (sk_CRYPTO_BUFFER_num(out_certs) != initial_certs_len) { + CRYPTO_BUFFER *buf = sk_CRYPTO_BUFFER_pop(out_certs); + CRYPTO_BUFFER_free(buf); + } + } + + return ret; +} + +int pkcs7_bundle(CBB *out, int (*cb)(CBB *out, const void *arg), + const void *arg) { + CBB outer_seq, oid, wrapped_seq, seq, version_bytes, digest_algos_set, + content_info; + + // See https://tools.ietf.org/html/rfc2315#section-7 + if (!CBB_add_asn1(out, &outer_seq, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&outer_seq, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, kPKCS7SignedData, sizeof(kPKCS7SignedData)) || + !CBB_add_asn1(&outer_seq, &wrapped_seq, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || + // See https://tools.ietf.org/html/rfc2315#section-9.1 + !CBB_add_asn1(&wrapped_seq, &seq, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&seq, &version_bytes, CBS_ASN1_INTEGER) || + !CBB_add_u8(&version_bytes, 1) || + !CBB_add_asn1(&seq, &digest_algos_set, CBS_ASN1_SET) || + !CBB_add_asn1(&seq, &content_info, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&content_info, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, kPKCS7Data, sizeof(kPKCS7Data)) || + !cb(&seq, arg)) { + return 0; + } + + return CBB_flush(out); +} diff --git a/Sources/BoringSSL/crypto/x509/pkcs7.c b/Sources/BoringSSL/crypto/pkcs7/pkcs7_x509.c similarity index 51% rename from Sources/BoringSSL/crypto/x509/pkcs7.c rename to Sources/BoringSSL/crypto/pkcs7/pkcs7_x509.c index 9e6a52f26..7bc39d27d 100644 --- a/Sources/BoringSSL/crypto/x509/pkcs7.c +++ b/Sources/BoringSSL/crypto/pkcs7/pkcs7_x509.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, Google Inc. +/* Copyright (c) 2017, Google Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -12,7 +12,7 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include +#include #include #include @@ -20,113 +20,28 @@ #include #include #include -#include #include +#include #include +#include -#include "../bytestring/internal.h" - - -/* pkcs7_parse_header reads the non-certificate/non-CRL prefix of a PKCS#7 - * SignedData blob from |cbs| and sets |*out| to point to the rest of the - * input. If the input is in BER format, then |*der_bytes| will be set to a - * pointer that needs to be freed by the caller once they have finished - * processing |*out| (which will be pointing into |*der_bytes|). - * - * It returns one on success or zero on error. On error, |*der_bytes| is - * NULL. */ -static int pkcs7_parse_header(uint8_t **der_bytes, CBS *out, CBS *cbs) { - size_t der_len; - CBS in, content_info, content_type, wrapped_signed_data, signed_data; - uint64_t version; - - /* The input may be in BER format. */ - *der_bytes = NULL; - if (!CBS_asn1_ber_to_der(cbs, der_bytes, &der_len)) { - return 0; - } - if (*der_bytes != NULL) { - CBS_init(&in, *der_bytes, der_len); - } else { - CBS_init(&in, CBS_data(cbs), CBS_len(cbs)); - } - - /* See https://tools.ietf.org/html/rfc2315#section-7 */ - if (!CBS_get_asn1(&in, &content_info, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&content_info, &content_type, CBS_ASN1_OBJECT)) { - goto err; - } - - if (OBJ_cbs2nid(&content_type) != NID_pkcs7_signed) { - OPENSSL_PUT_ERROR(X509, X509_R_NOT_PKCS7_SIGNED_DATA); - goto err; - } - - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ - if (!CBS_get_asn1(&content_info, &wrapped_signed_data, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || - !CBS_get_asn1(&wrapped_signed_data, &signed_data, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1_uint64(&signed_data, &version) || - !CBS_get_asn1(&signed_data, NULL /* digests */, CBS_ASN1_SET) || - !CBS_get_asn1(&signed_data, NULL /* content */, CBS_ASN1_SEQUENCE)) { - goto err; - } - - if (version < 1) { - OPENSSL_PUT_ERROR(X509, X509_R_BAD_PKCS7_VERSION); - goto err; - } - - CBS_init(out, CBS_data(&signed_data), CBS_len(&signed_data)); - return 1; - -err: - if (*der_bytes) { - OPENSSL_free(*der_bytes); - *der_bytes = NULL; - } +#include "internal.h" - return 0; -} int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs) { - CBS signed_data, certificates; - uint8_t *der_bytes = NULL; int ret = 0; const size_t initial_certs_len = sk_X509_num(out_certs); - - if (!pkcs7_parse_header(&der_bytes, &signed_data, cbs)) { - return 0; - } - - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ - if (!CBS_get_asn1(&signed_data, &certificates, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { - OPENSSL_PUT_ERROR(X509, X509_R_NO_CERTIFICATES_INCLUDED); + STACK_OF(CRYPTO_BUFFER) *raw = sk_CRYPTO_BUFFER_new_null(); + if (raw == NULL || + !PKCS7_get_raw_certificates(raw, cbs, NULL)) { goto err; } - while (CBS_len(&certificates) > 0) { - CBS cert; - X509 *x509; - const uint8_t *inp; - - if (!CBS_get_asn1_element(&certificates, &cert, CBS_ASN1_SEQUENCE)) { - goto err; - } - - if (CBS_len(&cert) > LONG_MAX) { - goto err; - } - inp = CBS_data(&cert); - x509 = d2i_X509(NULL, &inp, (long)CBS_len(&cert)); - if (!x509) { - goto err; - } - - assert(inp == CBS_data(&cert) + CBS_len(&cert)); - - if (sk_X509_push(out_certs, x509) == 0) { + for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(raw); i++) { + CRYPTO_BUFFER *buf = sk_CRYPTO_BUFFER_value(raw, i); + X509 *x509 = X509_parse_from_buffer(buf); + if (x509 == NULL || + !sk_X509_push(out_certs, x509)) { X509_free(x509); goto err; } @@ -135,10 +50,7 @@ int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs) { ret = 1; err: - if (der_bytes) { - OPENSSL_free(der_bytes); - } - + sk_CRYPTO_BUFFER_pop_free(raw, CRYPTO_BUFFER_free); if (!ret) { while (sk_X509_num(out_certs) != initial_certs_len) { X509 *x509 = sk_X509_pop(out_certs); @@ -159,10 +71,10 @@ int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs) { return 0; } - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 - /* Even if only CRLs are included, there may be an empty certificates block. - * OpenSSL does this, for example. */ + // Even if only CRLs are included, there may be an empty certificates block. + // OpenSSL does this, for example. if (CBS_peek_asn1_tag(&signed_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) && !CBS_get_asn1(&signed_data, NULL /* certificates */, @@ -172,7 +84,7 @@ int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs) { if (!CBS_get_asn1(&signed_data, &crls, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 1)) { - OPENSSL_PUT_ERROR(X509, X509_R_NO_CRLS_INCLUDED); + OPENSSL_PUT_ERROR(PKCS7, PKCS7_R_NO_CRLS_INCLUDED); goto err; } @@ -205,9 +117,7 @@ int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs) { ret = 1; err: - if (der_bytes) { - OPENSSL_free(der_bytes); - } + OPENSSL_free(der_bytes); if (!ret) { while (sk_X509_CRL_num(out_crls) != initial_crls_len) { @@ -223,9 +133,9 @@ int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, BIO *pem_bio) { long len; int ret; - /* Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM - * internally will actually allow several other values too, including - * "CERTIFICATE". */ + // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM + // internally will actually allow several other values too, including + // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, @@ -245,9 +155,9 @@ int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, BIO *pem_bio) { long len; int ret; - /* Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM - * internally will actually allow several other values too, including - * "CERTIFICATE". */ + // Even though we pass PEM_STRING_PKCS7 as the expected PEM type here, PEM + // internally will actually allow several other values too, including + // "CERTIFICATE". if (!PEM_bytes_read_bio(&data, &len, NULL /* PEM type output */, PEM_STRING_PKCS7, pem_bio, NULL /* password callback */, @@ -262,42 +172,12 @@ int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, BIO *pem_bio) { return ret; } -/* pkcs7_bundle writes a PKCS#7, SignedData structure to |out| and then calls - * |cb| with a CBB to which certificate or CRL data can be written, and the - * opaque context pointer, |arg|. The callback can return zero to indicate an - * error. - * - * pkcs7_bundle returns one on success or zero on error. */ -static int pkcs7_bundle(CBB *out, int (*cb)(CBB *out, const void *arg), - const void *arg) { - CBB outer_seq, wrapped_seq, seq, version_bytes, digest_algos_set, - content_info; - - /* See https://tools.ietf.org/html/rfc2315#section-7 */ - if (!CBB_add_asn1(out, &outer_seq, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&outer_seq, NID_pkcs7_signed) || - !CBB_add_asn1(&outer_seq, &wrapped_seq, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ - !CBB_add_asn1(&wrapped_seq, &seq, CBS_ASN1_SEQUENCE) || - !CBB_add_asn1(&seq, &version_bytes, CBS_ASN1_INTEGER) || - !CBB_add_u8(&version_bytes, 1) || - !CBB_add_asn1(&seq, &digest_algos_set, CBS_ASN1_SET) || - !CBB_add_asn1(&seq, &content_info, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&content_info, NID_pkcs7_data) || - !cb(&seq, arg)) { - return 0; - } - - return CBB_flush(out); -} - static int pkcs7_bundle_certificates_cb(CBB *out, const void *arg) { const STACK_OF(X509) *certs = arg; size_t i; CBB certificates; - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &certificates, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { return 0; @@ -327,7 +207,7 @@ static int pkcs7_bundle_crls_cb(CBB *out, const void *arg) { size_t i; CBB crl_data; - /* See https://tools.ietf.org/html/rfc2315#section-9.1 */ + // See https://tools.ietf.org/html/rfc2315#section-9.1 if (!CBB_add_asn1(out, &crl_data, CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 1)) { return 0; diff --git a/Sources/BoringSSL/crypto/pkcs8/internal.h b/Sources/BoringSSL/crypto/pkcs8/internal.h index 9cebe2962..939948984 100644 --- a/Sources/BoringSSL/crypto/pkcs8/internal.h +++ b/Sources/BoringSSL/crypto/pkcs8/internal.h @@ -63,40 +63,58 @@ extern "C" { #endif -#define PBE_UCS2_CONVERT_PASSWORD 0x1 +// pkcs8_pbe_decrypt decrypts |in| using the PBE scheme described by +// |algorithm|, which should be a serialized AlgorithmIdentifier structure. On +// success, it sets |*out| to a newly-allocated buffer containing the decrypted +// result and returns one. Otherwise, it returns zero. +int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, + const char *pass, size_t pass_len, const uint8_t *in, + size_t in_len); + +#define PKCS12_KEY_ID 1 +#define PKCS12_IV_ID 2 +#define PKCS12_MAC_ID 3 + +// pkcs12_key_gen runs the PKCS#12 key derivation function as specified in +// RFC 7292, appendix B. On success, it writes the resulting |out_len| bytes of +// key material to |out| and returns one. Otherwise, it returns zero. |id| +// should be one of the |PKCS12_*_ID| values. +int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, + size_t salt_len, uint8_t id, unsigned iterations, + size_t out_len, uint8_t *out, const EVP_MD *md); struct pbe_suite { int pbe_nid; + uint8_t oid[10]; + uint8_t oid_len; const EVP_CIPHER *(*cipher_func)(void); const EVP_MD *(*md_func)(void); - /* decrypt_init initialize |ctx| for decrypting. The password is specified by - * |pass_raw| and |pass_raw_len|. |param| contains the serialized parameters - * field of the AlgorithmIdentifier. - * - * It returns one on success and zero on error. */ + // decrypt_init initialize |ctx| for decrypting. The password is specified by + // |pass| and |pass_len|. |param| contains the serialized parameters field of + // the AlgorithmIdentifier. + // + // It returns one on success and zero on error. int (*decrypt_init)(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, - const uint8_t *pass_raw, size_t pass_raw_len, CBS *param); - int flags; + const char *pass, size_t pass_len, CBS *param); }; #define PKCS5_DEFAULT_ITERATIONS 2048 #define PKCS5_SALT_LEN 8 int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, - const uint8_t *pass_raw, size_t pass_raw_len, - CBS *param); + const char *pass, size_t pass_len, CBS *param); -/* PKCS5_pbe2_encrypt_init configures |ctx| for encrypting with PKCS #5 PBES2, - * as defined in RFC 2998, with the specified parameters. It writes the - * corresponding AlgorithmIdentifier to |out|. */ +// PKCS5_pbe2_encrypt_init configures |ctx| for encrypting with PKCS #5 PBES2, +// as defined in RFC 2998, with the specified parameters. It writes the +// corresponding AlgorithmIdentifier to |out|. int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, unsigned iterations, - const uint8_t *pass_raw, size_t pass_raw_len, + const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_PKCS8_INTERNAL_H */ +#endif // OPENSSL_HEADER_PKCS8_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/pkcs8/p5_pbev2.c b/Sources/BoringSSL/crypto/pkcs8/p5_pbev2.c index 59e206771..6686cf379 100644 --- a/Sources/BoringSSL/crypto/pkcs8/p5_pbev2.c +++ b/Sources/BoringSSL/crypto/pkcs8/p5_pbev2.c @@ -62,16 +62,86 @@ #include #include #include -#include +#include #include #include "internal.h" #include "../internal.h" +// 1.2.840.113549.1.5.12 +static const uint8_t kPBKDF2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x05, 0x0c}; + +// 1.2.840.113549.1.5.13 +static const uint8_t kPBES2[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x05, 0x0d}; + +// 1.2.840.113549.2.7 +static const uint8_t kHMACWithSHA1[] = {0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x02, 0x07}; + +static const struct { + uint8_t oid[9]; + uint8_t oid_len; + int nid; + const EVP_CIPHER *(*cipher_func)(void); +} kCipherOIDs[] = { + // 1.2.840.113549.3.2 + {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02}, + 8, + NID_rc2_cbc, + &EVP_rc2_cbc}, + // 1.2.840.113549.3.7 + {{0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07}, + 8, + NID_des_ede3_cbc, + &EVP_des_ede3_cbc}, + // 2.16.840.1.101.3.4.1.2 + {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02}, + 9, + NID_aes_128_cbc, + &EVP_aes_128_cbc}, + // 2.16.840.1.101.3.4.1.22 + {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16}, + 9, + NID_aes_192_cbc, + &EVP_aes_192_cbc}, + // 2.16.840.1.101.3.4.1.42 + {{0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a}, + 9, + NID_aes_256_cbc, + &EVP_aes_256_cbc}, +}; + +static const EVP_CIPHER *cbs_to_cipher(const CBS *cbs) { + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCipherOIDs); i++) { + if (CBS_mem_equal(cbs, kCipherOIDs[i].oid, kCipherOIDs[i].oid_len)) { + return kCipherOIDs[i].cipher_func(); + } + } + + return NULL; +} + +static int add_cipher_oid(CBB *out, int nid) { + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kCipherOIDs); i++) { + if (kCipherOIDs[i].nid == nid) { + CBB child; + return CBB_add_asn1(out, &child, CBS_ASN1_OBJECT) && + CBB_add_bytes(&child, kCipherOIDs[i].oid, + kCipherOIDs[i].oid_len) && + CBB_flush(out); + } + } + + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_CIPHER); + return 0; +} + static int pkcs5_pbe2_cipher_init(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, - unsigned iterations, const uint8_t *pass_raw, - size_t pass_raw_len, const uint8_t *salt, + unsigned iterations, const char *pass, + size_t pass_len, const uint8_t *salt, size_t salt_len, const uint8_t *iv, size_t iv_len, int enc) { if (iv_len != EVP_CIPHER_iv_length(cipher)) { @@ -80,8 +150,7 @@ static int pkcs5_pbe2_cipher_init(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, } uint8_t key[EVP_MAX_KEY_LENGTH]; - int ret = PKCS5_PBKDF2_HMAC_SHA1((const char *)pass_raw, pass_raw_len, salt, - salt_len, iterations, + int ret = PKCS5_PBKDF2_HMAC_SHA1(pass, pass_len, salt, salt_len, iterations, EVP_CIPHER_key_length(cipher), key) && EVP_CipherInit_ex(ctx, cipher, NULL /* engine */, key, iv, enc); OPENSSL_cleanse(key, EVP_MAX_KEY_LENGTH); @@ -90,7 +159,7 @@ static int pkcs5_pbe2_cipher_init(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, unsigned iterations, - const uint8_t *pass_raw, size_t pass_raw_len, + const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len) { int cipher_nid = EVP_CIPHER_nid(cipher); if (cipher_nid == NID_undef) { @@ -98,45 +167,47 @@ int PKCS5_pbe2_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, return 0; } - /* Generate a random IV. */ + // Generate a random IV. uint8_t iv[EVP_MAX_IV_LENGTH]; if (!RAND_bytes(iv, EVP_CIPHER_iv_length(cipher))) { return 0; } - /* See RFC 2898, appendix A. */ - CBB algorithm, param, kdf, kdf_param, salt_cbb, cipher_cbb, iv_cbb; + // See RFC 2898, appendix A. + CBB algorithm, oid, param, kdf, kdf_oid, kdf_param, salt_cbb, cipher_cbb, + iv_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&algorithm, NID_pbes2) || + !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, kPBES2, sizeof(kPBES2)) || !CBB_add_asn1(&algorithm, ¶m, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(¶m, &kdf, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&kdf, NID_id_pbkdf2) || + !CBB_add_asn1(&kdf, &kdf_oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&kdf_oid, kPBKDF2, sizeof(kPBKDF2)) || !CBB_add_asn1(&kdf, &kdf_param, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(&kdf_param, &salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&salt_cbb, salt, salt_len) || !CBB_add_asn1_uint64(&kdf_param, iterations) || - /* Specify a key length for RC2. */ + // Specify a key length for RC2. (cipher_nid == NID_rc2_cbc && !CBB_add_asn1_uint64(&kdf_param, EVP_CIPHER_key_length(cipher))) || - /* Omit the PRF. We use the default hmacWithSHA1. */ + // Omit the PRF. We use the default hmacWithSHA1. !CBB_add_asn1(¶m, &cipher_cbb, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&cipher_cbb, cipher_nid) || - /* RFC 2898 says RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and - * IV, but OpenSSL always uses an OCTET STRING IV, so we do the same. */ + !add_cipher_oid(&cipher_cbb, cipher_nid) || + // RFC 2898 says RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and + // IV, but OpenSSL always uses an OCTET STRING IV, so we do the same. !CBB_add_asn1(&cipher_cbb, &iv_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&iv_cbb, iv, EVP_CIPHER_iv_length(cipher)) || !CBB_flush(out)) { return 0; } - return pkcs5_pbe2_cipher_init(ctx, cipher, iterations, pass_raw, pass_raw_len, - salt, salt_len, iv, - EVP_CIPHER_iv_length(cipher), 1 /* encrypt */); + return pkcs5_pbe2_cipher_init(ctx, cipher, iterations, pass, pass_len, salt, + salt_len, iv, EVP_CIPHER_iv_length(cipher), + 1 /* encrypt */); } int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, - const uint8_t *pass_raw, size_t pass_raw_len, - CBS *param) { + const char *pass, size_t pass_len, CBS *param) { CBS pbe_param, kdf, kdf_obj, enc_scheme, enc_obj; if (!CBS_get_asn1(param, &pbe_param, CBS_ASN1_SEQUENCE) || CBS_len(param) != 0 || @@ -149,20 +220,20 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - /* Check that the key derivation function is PBKDF2. */ - if (OBJ_cbs2nid(&kdf_obj) != NID_id_pbkdf2) { + // Only PBKDF2 is supported. + if (!CBS_mem_equal(&kdf_obj, kPBKDF2, sizeof(kPBKDF2))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_KEY_DERIVATION_FUNCTION); return 0; } - /* See if we recognise the encryption algorithm. */ - const EVP_CIPHER *cipher = EVP_get_cipherbynid(OBJ_cbs2nid(&enc_obj)); + // See if we recognise the encryption algorithm. + const EVP_CIPHER *cipher = cbs_to_cipher(&enc_obj); if (cipher == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_CIPHER); return 0; } - /* Parse the KDF parameters. */ + // Parse the KDF parameters. See RFC 8018, appendix A.2. CBS pbkdf2_params, salt; uint64_t iterations; if (!CBS_get_asn1(&kdf, &pbkdf2_params, CBS_ASN1_SEQUENCE) || @@ -178,8 +249,8 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - /* The optional keyLength parameter, if present, must match the key length of - * the cipher. */ + // The optional keyLength parameter, if present, must match the key length of + // the cipher. if (CBS_peek_asn1_tag(&pbkdf2_params, CBS_ASN1_INTEGER)) { uint64_t key_len; if (!CBS_get_asn1_uint64(&pbkdf2_params, &key_len)) { @@ -194,25 +265,35 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, } if (CBS_len(&pbkdf2_params) != 0) { - CBS prf; - if (!CBS_get_asn1(&pbkdf2_params, &prf, CBS_ASN1_OBJECT) || + CBS alg_id, prf; + if (!CBS_get_asn1(&pbkdf2_params, &alg_id, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&alg_id, &prf, CBS_ASN1_OBJECT) || CBS_len(&pbkdf2_params) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); return 0; } - /* We only support hmacWithSHA1. It is the DEFAULT, so DER requires it be - * omitted, but we match OpenSSL in tolerating it being present. */ - if (OBJ_cbs2nid(&prf) != NID_hmacWithSHA1) { + // We only support hmacWithSHA1. It is the DEFAULT, so DER requires it be + // omitted, but we match OpenSSL in tolerating it being present. + if (!CBS_mem_equal(&prf, kHMACWithSHA1, sizeof(kHMACWithSHA1))) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNSUPPORTED_PRF); return 0; } + + // hmacWithSHA1 has a NULL parameter. + CBS null; + if (!CBS_get_asn1(&alg_id, &null, CBS_ASN1_NULL) || + CBS_len(&null) != 0 || + CBS_len(&alg_id) != 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); + return 0; + } } - /* Parse the encryption scheme parameters. Note OpenSSL does not match the - * specification. Per RFC 2898, this should depend on the encryption scheme. - * In particular, RC2-CBC and RC5-CBC-Pad use a SEQUENCE with version and IV. - * We align with OpenSSL. */ + // Parse the encryption scheme parameters. Note OpenSSL does not match the + // specification. Per RFC 2898, this should depend on the encryption scheme. + // In particular, RC2-CBC uses a SEQUENCE with version and IV. We align with + // OpenSSL. CBS iv; if (!CBS_get_asn1(&enc_scheme, &iv, CBS_ASN1_OCTETSTRING) || CBS_len(&enc_scheme) != 0) { @@ -220,7 +301,7 @@ int PKCS5_pbe2_decrypt_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, return 0; } - return pkcs5_pbe2_cipher_init(ctx, cipher, (unsigned)iterations, pass_raw, - pass_raw_len, CBS_data(&salt), CBS_len(&salt), + return pkcs5_pbe2_cipher_init(ctx, cipher, (unsigned)iterations, pass, + pass_len, CBS_data(&salt), CBS_len(&salt), CBS_data(&iv), CBS_len(&iv), 0 /* decrypt */); } diff --git a/Sources/BoringSSL/crypto/pkcs8/p8_pkey.c b/Sources/BoringSSL/crypto/pkcs8/p8_pkey.c deleted file mode 100644 index 69a7e293a..000000000 --- a/Sources/BoringSSL/crypto/pkcs8/p8_pkey.c +++ /dev/null @@ -1,85 +0,0 @@ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project 1999. - */ -/* ==================================================================== - * Copyright (c) 1999-2005 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). */ - - -#include - -#include -#include -#include - -/* Minor tweak to operation: zero private key data */ -static int pkey_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, - void *exarg) { - /* Since the structure must still be valid use ASN1_OP_FREE_PRE */ - if (operation == ASN1_OP_FREE_PRE) { - PKCS8_PRIV_KEY_INFO *key = (PKCS8_PRIV_KEY_INFO *)*pval; - if (key->pkey && key->pkey->type == V_ASN1_OCTET_STRING && - key->pkey->value.octet_string) { - OPENSSL_cleanse(key->pkey->value.octet_string->data, - key->pkey->value.octet_string->length); - } - } - return 1; -} - -ASN1_SEQUENCE_cb(PKCS8_PRIV_KEY_INFO, pkey_cb) = { - ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, version, ASN1_INTEGER), - ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkeyalg, X509_ALGOR), - ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkey, ASN1_ANY), - ASN1_IMP_SET_OF_OPT(PKCS8_PRIV_KEY_INFO, attributes, X509_ATTRIBUTE, 0) -} ASN1_SEQUENCE_END_cb(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO) - -IMPLEMENT_ASN1_FUNCTIONS(PKCS8_PRIV_KEY_INFO) diff --git a/Sources/BoringSSL/crypto/pkcs8/pkcs8.c b/Sources/BoringSSL/crypto/pkcs8/pkcs8.c index 64a2d0212..94205e63c 100644 --- a/Sources/BoringSSL/crypto/pkcs8/pkcs8.c +++ b/Sources/BoringSSL/crypto/pkcs8/pkcs8.c @@ -59,27 +59,18 @@ #include #include -#include -#include #include #include #include #include -#include #include -#include +#include #include -#include #include "internal.h" #include "../internal.h" -#include "../bytestring/internal.h" -#define PKCS12_KEY_ID 1 -#define PKCS12_IV_ID 2 -#define PKCS12_MAC_ID 3 - static int ascii_to_ucs2(const char *ascii, size_t ascii_len, uint8_t **out, size_t *out_len) { size_t ulen = ascii_len * 2 + 2; @@ -89,6 +80,7 @@ static int ascii_to_ucs2(const char *ascii, size_t ascii_len, uint8_t *unitmp = OPENSSL_malloc(ulen); if (unitmp == NULL) { + OPENSSL_PUT_ERROR(PKCS8, ERR_R_MALLOC_FAILURE); return 0; } for (size_t i = 0; i < ulen - 2; i += 2) { @@ -96,7 +88,7 @@ static int ascii_to_ucs2(const char *ascii, size_t ascii_len, unitmp[i + 1] = ascii[i >> 1]; } - /* Terminate the result with a UCS-2 NUL. */ + // Terminate the result with a UCS-2 NUL. unitmp[ulen - 2] = 0; unitmp[ulen - 1] = 0; *out_len = ulen; @@ -104,54 +96,64 @@ static int ascii_to_ucs2(const char *ascii, size_t ascii_len, return 1; } -static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len, - const uint8_t *salt, size_t salt_len, - uint8_t id, unsigned iterations, - size_t out_len, uint8_t *out, - const EVP_MD *md) { - /* See https://tools.ietf.org/html/rfc7292#appendix-B. Quoted parts of the - * specification have errata applied and other typos fixed. */ +int pkcs12_key_gen(const char *pass, size_t pass_len, const uint8_t *salt, + size_t salt_len, uint8_t id, unsigned iterations, + size_t out_len, uint8_t *out, const EVP_MD *md) { + // See https://tools.ietf.org/html/rfc7292#appendix-B. Quoted parts of the + // specification have errata applied and other typos fixed. if (iterations < 1) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_ITERATION_COUNT); return 0; } - /* In the spec, |block_size| is called "v", but measured in bits. */ + int ret = 0; + EVP_MD_CTX ctx; + EVP_MD_CTX_init(&ctx); + uint8_t *pass_raw = NULL, *I = NULL; + size_t pass_raw_len = 0, I_len = 0; + // If |pass| is NULL, we use the empty string rather than {0, 0} as the raw + // password. + if (pass != NULL && + !ascii_to_ucs2(pass, pass_len, &pass_raw, &pass_raw_len)) { + goto err; + } + + // In the spec, |block_size| is called "v", but measured in bits. size_t block_size = EVP_MD_block_size(md); - /* 1. Construct a string, D (the "diversifier"), by concatenating v/8 copies - * of ID. */ + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 copies + // of ID. uint8_t D[EVP_MAX_MD_BLOCK_SIZE]; OPENSSL_memset(D, id, block_size); - /* 2. Concatenate copies of the salt together to create a string S of length - * v(ceiling(s/v)) bits (the final copy of the salt may be truncated to - * create S). Note that if the salt is the empty string, then so is S. - * - * 3. Concatenate copies of the password together to create a string P of - * length v(ceiling(p/v)) bits (the final copy of the password may be - * truncated to create P). Note that if the password is the empty string, - * then so is P. - * - * 4. Set I=S||P to be the concatenation of S and P. */ + // 2. Concatenate copies of the salt together to create a string S of length + // v(ceiling(s/v)) bits (the final copy of the salt may be truncated to + // create S). Note that if the salt is the empty string, then so is S. + // + // 3. Concatenate copies of the password together to create a string P of + // length v(ceiling(p/v)) bits (the final copy of the password may be + // truncated to create P). Note that if the password is the empty string, + // then so is P. + // + // 4. Set I=S||P to be the concatenation of S and P. if (salt_len + block_size - 1 < salt_len || pass_raw_len + block_size - 1 < pass_raw_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); - return 0; + goto err; } size_t S_len = block_size * ((salt_len + block_size - 1) / block_size); size_t P_len = block_size * ((pass_raw_len + block_size - 1) / block_size); - size_t I_len = S_len + P_len; + I_len = S_len + P_len; if (I_len < S_len) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); - return 0; + goto err; } - uint8_t *I = OPENSSL_malloc(I_len); + I = OPENSSL_malloc(I_len); if (I_len != 0 && I == NULL) { OPENSSL_PUT_ERROR(PKCS8, ERR_R_MALLOC_FAILURE); - return 0; + goto err; } for (size_t i = 0; i < S_len; i++) { @@ -161,13 +163,9 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len, I[i + S_len] = pass_raw[i % pass_raw_len]; } - int ret = 0; - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); - while (out_len != 0) { - /* A. Set A_i=H^r(D||I). (i.e., the r-th hash of D||I, - * H(H(H(... H(D||I)))) */ + // A. Set A_i=H^r(D||I). (i.e., the r-th hash of D||I, + // H(H(H(... H(D||I)))) uint8_t A[EVP_MAX_MD_SIZE]; unsigned A_len; if (!EVP_DigestInit_ex(&ctx, md, NULL) || @@ -192,16 +190,16 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len, break; } - /* B. Concatenate copies of A_i to create a string B of length v bits (the - * final copy of A_i may be truncated to create B). */ + // B. Concatenate copies of A_i to create a string B of length v bits (the + // final copy of A_i may be truncated to create B). uint8_t B[EVP_MAX_MD_BLOCK_SIZE]; for (size_t i = 0; i < block_size; i++) { B[i] = A[i % A_len]; } - /* C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit blocks, - * where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod - * 2^v for each j. */ + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit blocks, + // where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod + // 2^v for each j. assert(I_len % block_size == 0); for (size_t i = 0; i < I_len; i += block_size) { unsigned carry = 1; @@ -216,32 +214,26 @@ static int pkcs12_key_gen_raw(const uint8_t *pass_raw, size_t pass_raw_len, ret = 1; err: - OPENSSL_cleanse(I, I_len); OPENSSL_free(I); + OPENSSL_free(pass_raw); EVP_MD_CTX_cleanup(&ctx); return ret; } static int pkcs12_pbe_cipher_init(const struct pbe_suite *suite, EVP_CIPHER_CTX *ctx, unsigned iterations, - const uint8_t *pass_raw, size_t pass_raw_len, + const char *pass, size_t pass_len, const uint8_t *salt, size_t salt_len, int is_encrypt) { const EVP_CIPHER *cipher = suite->cipher_func(); const EVP_MD *md = suite->md_func(); uint8_t key[EVP_MAX_KEY_LENGTH]; - if (!pkcs12_key_gen_raw(pass_raw, pass_raw_len, salt, - salt_len, PKCS12_KEY_ID, iterations, - EVP_CIPHER_key_length(cipher), key, md)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_KEY_GEN_ERROR); - return 0; - } - uint8_t iv[EVP_MAX_IV_LENGTH]; - if (!pkcs12_key_gen_raw(pass_raw, pass_raw_len, salt, - salt_len, PKCS12_IV_ID, iterations, - EVP_CIPHER_iv_length(cipher), iv, md)) { + if (!pkcs12_key_gen(pass, pass_len, salt, salt_len, PKCS12_KEY_ID, iterations, + EVP_CIPHER_key_length(cipher), key, md) || + !pkcs12_key_gen(pass, pass_len, salt, salt_len, PKCS12_IV_ID, iterations, + EVP_CIPHER_iv_length(cipher), iv, md)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_KEY_GEN_ERROR); return 0; } @@ -253,8 +245,8 @@ static int pkcs12_pbe_cipher_init(const struct pbe_suite *suite, } static int pkcs12_pbe_decrypt_init(const struct pbe_suite *suite, - EVP_CIPHER_CTX *ctx, const uint8_t *pass_raw, - size_t pass_raw_len, CBS *param) { + EVP_CIPHER_CTX *ctx, const char *pass, + size_t pass_len, CBS *param) { CBS pbe_param, salt; uint64_t iterations; if (!CBS_get_asn1(param, &pbe_param, CBS_ASN1_SEQUENCE) || @@ -271,32 +263,52 @@ static int pkcs12_pbe_decrypt_init(const struct pbe_suite *suite, return 0; } - return pkcs12_pbe_cipher_init(suite, ctx, (unsigned)iterations, pass_raw, - pass_raw_len, CBS_data(&salt), CBS_len(&salt), + return pkcs12_pbe_cipher_init(suite, ctx, (unsigned)iterations, pass, + pass_len, CBS_data(&salt), CBS_len(&salt), 0 /* decrypt */); } static const struct pbe_suite kBuiltinPBE[] = { { - NID_pbe_WithSHA1And40BitRC2_CBC, EVP_rc2_40_cbc, EVP_sha1, - pkcs12_pbe_decrypt_init, PBE_UCS2_CONVERT_PASSWORD, + NID_pbe_WithSHA1And40BitRC2_CBC, + // 1.2.840.113549.1.12.1.6 + {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x06}, + 10, + EVP_rc2_40_cbc, + EVP_sha1, + pkcs12_pbe_decrypt_init, }, { - NID_pbe_WithSHA1And128BitRC4, EVP_rc4, EVP_sha1, - pkcs12_pbe_decrypt_init, PBE_UCS2_CONVERT_PASSWORD, + NID_pbe_WithSHA1And128BitRC4, + // 1.2.840.113549.1.12.1.1 + {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x01}, + 10, + EVP_rc4, + EVP_sha1, + pkcs12_pbe_decrypt_init, }, { - NID_pbe_WithSHA1And3_Key_TripleDES_CBC, EVP_des_ede3_cbc, EVP_sha1, - pkcs12_pbe_decrypt_init, PBE_UCS2_CONVERT_PASSWORD, + NID_pbe_WithSHA1And3_Key_TripleDES_CBC, + // 1.2.840.113549.1.12.1.3 + {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x01, 0x03}, + 10, + EVP_des_ede3_cbc, + EVP_sha1, + pkcs12_pbe_decrypt_init, }, { - NID_pbes2, NULL, NULL, PKCS5_pbe2_decrypt_init, 0, + NID_pbes2, + // 1.2.840.113549.1.5.13 + {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x05, 0x0d}, + 9, + NULL, + NULL, + PKCS5_pbe2_decrypt_init, }, }; static const struct pbe_suite *get_pbe_suite(int pbe_nid) { - unsigned i; - for (i = 0; i < OPENSSL_ARRAY_SIZE(kBuiltinPBE); i++) { + for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kBuiltinPBE); i++) { if (kBuiltinPBE[i].pbe_nid == pbe_nid) { return &kBuiltinPBE[i]; } @@ -305,51 +317,9 @@ static const struct pbe_suite *get_pbe_suite(int pbe_nid) { return NULL; } -/* pass_to_pass_raw performs a password conversion (possibly a no-op) - * appropriate to the supplied |pbe_nid|. The input |pass| is treated as a - * NUL-terminated string if |pass_len| is -1, otherwise it is treated as a - * buffer of the specified length. If the supplied PBE NID sets the - * |PBE_UCS2_CONVERT_PASSWORD| flag, the supplied |pass| will be converted to - * UCS-2. - * - * It sets |*out_pass_raw| to a new buffer that must be freed by the caller. It - * returns one on success and zero on error. */ -static int pass_to_pass_raw(int pbe_nid, const char *pass, int pass_len, - uint8_t **out_pass_raw, size_t *out_pass_raw_len) { - if (pass == NULL) { - *out_pass_raw = NULL; - *out_pass_raw_len = 0; - return 1; - } - - if (pass_len == -1) { - pass_len = strlen(pass); - } else if (pass_len < 0 || pass_len > 2000000000) { - OPENSSL_PUT_ERROR(PKCS8, ERR_R_OVERFLOW); - return 0; - } - - const struct pbe_suite *suite = get_pbe_suite(pbe_nid); - if (suite != NULL && (suite->flags & PBE_UCS2_CONVERT_PASSWORD)) { - if (!ascii_to_ucs2(pass, pass_len, out_pass_raw, out_pass_raw_len)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - return 0; - } - } else { - *out_pass_raw = BUF_memdup(pass, pass_len); - if (*out_pass_raw == NULL) { - OPENSSL_PUT_ERROR(PKCS8, ERR_R_MALLOC_FAILURE); - return 0; - } - *out_pass_raw_len = (size_t)pass_len; - } - - return 1; -} - static int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, - unsigned iterations, const uint8_t *pass_raw, - size_t pass_raw_len, const uint8_t *salt, + unsigned iterations, const char *pass, + size_t pass_len, const uint8_t *salt, size_t salt_len) { const struct pbe_suite *suite = get_pbe_suite(alg); if (suite == NULL) { @@ -357,10 +327,11 @@ static int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, return 0; } - /* See RFC 2898, appendix A.3. */ - CBB algorithm, param, salt_cbb; + // See RFC 2898, appendix A.3. + CBB algorithm, oid, param, salt_cbb; if (!CBB_add_asn1(out, &algorithm, CBS_ASN1_SEQUENCE) || - !OBJ_nid2cbb(&algorithm, alg) || + !CBB_add_asn1(&algorithm, &oid, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&oid, suite->oid, suite->oid_len) || !CBB_add_asn1(&algorithm, ¶m, CBS_ASN1_SEQUENCE) || !CBB_add_asn1(¶m, &salt_cbb, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&salt_cbb, salt, salt_len) || @@ -369,13 +340,13 @@ static int pkcs12_pbe_encrypt_init(CBB *out, EVP_CIPHER_CTX *ctx, int alg, return 0; } - return pkcs12_pbe_cipher_init(suite, ctx, iterations, pass_raw, pass_raw_len, - salt, salt_len, 1 /* encrypt */); + return pkcs12_pbe_cipher_init(suite, ctx, iterations, pass, pass_len, salt, + salt_len, 1 /* encrypt */); } -static int pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, - const uint8_t *pass_raw, size_t pass_raw_len, - const uint8_t *in, size_t in_len) { +int pkcs8_pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, + const char *pass, size_t pass_len, const uint8_t *in, + size_t in_len) { int ret = 0; uint8_t *buf = NULL;; EVP_CIPHER_CTX ctx; @@ -387,13 +358,19 @@ static int pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, goto err; } - const struct pbe_suite *suite = get_pbe_suite(OBJ_cbs2nid(&obj)); + const struct pbe_suite *suite = NULL; + for (unsigned i = 0; i < OPENSSL_ARRAY_SIZE(kBuiltinPBE); i++) { + if (CBS_mem_equal(&obj, kBuiltinPBE[i].oid, kBuiltinPBE[i].oid_len)) { + suite = &kBuiltinPBE[i]; + break; + } + } if (suite == NULL) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNKNOWN_ALGORITHM); goto err; } - if (!suite->decrypt_init(suite, &ctx, pass_raw, pass_raw_len, algorithm)) { + if (!suite->decrypt_init(suite, &ctx, pass, pass_len, algorithm)) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_KEYGEN_FAILURE); goto err; } @@ -426,90 +403,44 @@ static int pbe_decrypt(uint8_t **out, size_t *out_len, CBS *algorithm, return ret; } -static PKCS8_PRIV_KEY_INFO *pkcs8_decrypt_raw(X509_SIG *pkcs8, - const uint8_t *pass_raw, - size_t pass_raw_len) { - PKCS8_PRIV_KEY_INFO *ret = NULL; - uint8_t *in = NULL, *out = NULL; - size_t out_len = 0; - - /* Convert the legacy ASN.1 object to a byte string. */ - int in_len = i2d_X509_SIG(pkcs8, &in); - if (in_len < 0) { - goto err; - } - - /* See RFC 5208, section 6. */ - CBS cbs, epki, algorithm, ciphertext; - CBS_init(&cbs, in, in_len); - if (!CBS_get_asn1(&cbs, &epki, CBS_ASN1_SEQUENCE) || +EVP_PKEY *PKCS8_parse_encrypted_private_key(CBS *cbs, const char *pass, + size_t pass_len) { + // See RFC 5208, section 6. + CBS epki, algorithm, ciphertext; + if (!CBS_get_asn1(cbs, &epki, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&epki, &algorithm, CBS_ASN1_SEQUENCE) || !CBS_get_asn1(&epki, &ciphertext, CBS_ASN1_OCTETSTRING) || - CBS_len(&epki) != 0 || - CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - goto err; - } - - if (!pbe_decrypt(&out, &out_len, &algorithm, pass_raw, pass_raw_len, - CBS_data(&ciphertext), CBS_len(&ciphertext))) { - goto err; - } - - if (out_len > LONG_MAX) { + CBS_len(&epki) != 0) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - goto err; + return 0; } - /* Convert back to legacy ASN.1 objects. */ - const uint8_t *ptr = out; - ret = d2i_PKCS8_PRIV_KEY_INFO(NULL, &ptr, (long)out_len); - OPENSSL_cleanse(out, out_len); - if (ret == NULL || ptr != out + out_len) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - PKCS8_PRIV_KEY_INFO_free(ret); - ret = NULL; + uint8_t *out; + size_t out_len; + if (!pkcs8_pbe_decrypt(&out, &out_len, &algorithm, pass, pass_len, + CBS_data(&ciphertext), CBS_len(&ciphertext))) { + return 0; } -err: - OPENSSL_free(in); - OPENSSL_cleanse(out, out_len); + CBS pki; + CBS_init(&pki, out, out_len); + EVP_PKEY *ret = EVP_parse_private_key(&pki); OPENSSL_free(out); return ret; } -PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, - int pass_len) { - uint8_t *pass_raw = NULL; - size_t pass_raw_len = 0; - if (!pass_to_pass_raw(OBJ_obj2nid(pkcs8->algor->algorithm), pass, pass_len, - &pass_raw, &pass_raw_len)) { - return NULL; - } - - PKCS8_PRIV_KEY_INFO *ret = pkcs8_decrypt_raw(pkcs8, pass_raw, pass_raw_len); - - if (pass_raw) { - OPENSSL_cleanse(pass_raw, pass_raw_len); - OPENSSL_free(pass_raw); - } - return ret; -} - -static X509_SIG *pkcs8_encrypt_raw(int pbe_nid, const EVP_CIPHER *cipher, - const uint8_t *pass_raw, size_t pass_raw_len, - const uint8_t *salt, size_t salt_len, - int iterations, PKCS8_PRIV_KEY_INFO *p8inf) { - X509_SIG *ret = NULL; - uint8_t *plaintext = NULL, *salt_buf = NULL, *der = NULL; - int plaintext_len = -1; - size_t der_len; - CBB cbb; - CBB_zero(&cbb); +int PKCS8_marshal_encrypted_private_key(CBB *out, int pbe_nid, + const EVP_CIPHER *cipher, + const char *pass, size_t pass_len, + const uint8_t *salt, size_t salt_len, + int iterations, const EVP_PKEY *pkey) { + int ret = 0; + uint8_t *plaintext = NULL, *salt_buf = NULL; + size_t plaintext_len = 0; EVP_CIPHER_CTX ctx; EVP_CIPHER_CTX_init(&ctx); - /* Generate a random salt if necessary. */ + // Generate a random salt if necessary. if (salt == NULL) { if (salt_len == 0) { salt_len = PKCS5_SALT_LEN; @@ -528,699 +459,55 @@ static X509_SIG *pkcs8_encrypt_raw(int pbe_nid, const EVP_CIPHER *cipher, iterations = PKCS5_DEFAULT_ITERATIONS; } - /* Convert the input from the legacy ASN.1 format. */ - plaintext_len = i2d_PKCS8_PRIV_KEY_INFO(p8inf, &plaintext); - if (plaintext_len < 0) { + // Serialize the input key. + CBB plaintext_cbb; + if (!CBB_init(&plaintext_cbb, 128) || + !EVP_marshal_private_key(&plaintext_cbb, pkey) || + !CBB_finish(&plaintext_cbb, &plaintext, &plaintext_len)) { + CBB_cleanup(&plaintext_cbb); goto err; } CBB epki; - if (!CBB_init(&cbb, 128) || - !CBB_add_asn1(&cbb, &epki, CBS_ASN1_SEQUENCE)) { + if (!CBB_add_asn1(out, &epki, CBS_ASN1_SEQUENCE)) { goto err; } int alg_ok; if (pbe_nid == -1) { alg_ok = PKCS5_pbe2_encrypt_init(&epki, &ctx, cipher, (unsigned)iterations, - pass_raw, pass_raw_len, salt, salt_len); + pass, pass_len, salt, salt_len); } else { alg_ok = pkcs12_pbe_encrypt_init(&epki, &ctx, pbe_nid, (unsigned)iterations, - pass_raw, pass_raw_len, salt, salt_len); + pass, pass_len, salt, salt_len); } if (!alg_ok) { goto err; } - size_t max_out = (size_t)plaintext_len + EVP_CIPHER_CTX_block_size(&ctx); - if (max_out < (size_t)plaintext_len) { + size_t max_out = plaintext_len + EVP_CIPHER_CTX_block_size(&ctx); + if (max_out < plaintext_len) { OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_TOO_LONG); goto err; } CBB ciphertext; - uint8_t *out; + uint8_t *ptr; int n1, n2; if (!CBB_add_asn1(&epki, &ciphertext, CBS_ASN1_OCTETSTRING) || - !CBB_reserve(&ciphertext, &out, max_out) || - !EVP_CipherUpdate(&ctx, out, &n1, plaintext, plaintext_len) || - !EVP_CipherFinal_ex(&ctx, out + n1, &n2) || + !CBB_reserve(&ciphertext, &ptr, max_out) || + !EVP_CipherUpdate(&ctx, ptr, &n1, plaintext, plaintext_len) || + !EVP_CipherFinal_ex(&ctx, ptr + n1, &n2) || !CBB_did_write(&ciphertext, n1 + n2) || - !CBB_finish(&cbb, &der, &der_len)) { + !CBB_flush(out)) { goto err; } - /* Convert back to legacy ASN.1 objects. */ - const uint8_t *ptr = der; - ret = d2i_X509_SIG(NULL, &ptr, der_len); - if (ret == NULL || ptr != der + der_len) { - OPENSSL_PUT_ERROR(PKCS8, ERR_R_INTERNAL_ERROR); - X509_SIG_free(ret); - ret = NULL; - } + ret = 1; err: - if (plaintext_len > 0) { - OPENSSL_cleanse(plaintext, plaintext_len); - } OPENSSL_free(plaintext); OPENSSL_free(salt_buf); - OPENSSL_free(der); - CBB_cleanup(&cbb); EVP_CIPHER_CTX_cleanup(&ctx); return ret; } - -X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, - int pass_len, const uint8_t *salt, size_t salt_len, - int iterations, PKCS8_PRIV_KEY_INFO *p8inf) { - uint8_t *pass_raw = NULL; - size_t pass_raw_len = 0; - if (!pass_to_pass_raw(pbe_nid, pass, pass_len, &pass_raw, &pass_raw_len)) { - return NULL; - } - - X509_SIG *ret = pkcs8_encrypt_raw(pbe_nid, cipher, pass_raw, pass_raw_len, - salt, salt_len, iterations, p8inf); - - if (pass_raw) { - OPENSSL_cleanse(pass_raw, pass_raw_len); - OPENSSL_free(pass_raw); - } - return ret; -} - -EVP_PKEY *EVP_PKCS82PKEY(PKCS8_PRIV_KEY_INFO *p8) { - uint8_t *der = NULL; - int der_len = i2d_PKCS8_PRIV_KEY_INFO(p8, &der); - if (der_len < 0) { - return NULL; - } - - CBS cbs; - CBS_init(&cbs, der, (size_t)der_len); - EVP_PKEY *ret = EVP_parse_private_key(&cbs); - if (ret == NULL || CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - EVP_PKEY_free(ret); - OPENSSL_free(der); - return NULL; - } - - OPENSSL_free(der); - return ret; -} - -PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(EVP_PKEY *pkey) { - CBB cbb; - uint8_t *der = NULL; - size_t der_len; - if (!CBB_init(&cbb, 0) || - !EVP_marshal_private_key(&cbb, pkey) || - !CBB_finish(&cbb, &der, &der_len) || - der_len > LONG_MAX) { - CBB_cleanup(&cbb); - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_ENCODE_ERROR); - goto err; - } - - const uint8_t *p = der; - PKCS8_PRIV_KEY_INFO *p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, (long)der_len); - if (p8 == NULL || p != der + der_len) { - PKCS8_PRIV_KEY_INFO_free(p8); - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - goto err; - } - - OPENSSL_free(der); - return p8; - -err: - OPENSSL_free(der); - return NULL; -} - -struct pkcs12_context { - EVP_PKEY **out_key; - STACK_OF(X509) *out_certs; - uint8_t *password; - size_t password_len; -}; - -/* PKCS12_handle_sequence parses a BER-encoded SEQUENCE of elements in a PKCS#12 - * structure. */ -static int PKCS12_handle_sequence( - CBS *sequence, struct pkcs12_context *ctx, - int (*handle_element)(CBS *cbs, struct pkcs12_context *ctx)) { - uint8_t *der_bytes = NULL; - size_t der_len; - CBS in; - int ret = 0; - - /* Although a BER->DER conversion is done at the beginning of |PKCS12_parse|, - * the ASN.1 data gets wrapped in OCTETSTRINGs and/or encrypted and the - * conversion cannot see through those wrappings. So each time we step - * through one we need to convert to DER again. */ - if (!CBS_asn1_ber_to_der(sequence, &der_bytes, &der_len)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - if (der_bytes != NULL) { - CBS_init(&in, der_bytes, der_len); - } else { - CBS_init(&in, CBS_data(sequence), CBS_len(sequence)); - } - - CBS child; - if (!CBS_get_asn1(&in, &child, CBS_ASN1_SEQUENCE) || - CBS_len(&in) != 0) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - while (CBS_len(&child) > 0) { - CBS element; - if (!CBS_get_asn1(&child, &element, CBS_ASN1_SEQUENCE)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - if (!handle_element(&element, ctx)) { - goto err; - } - } - - ret = 1; - -err: - OPENSSL_free(der_bytes); - return ret; -} - -/* PKCS12_handle_safe_bag parses a single SafeBag element in a PKCS#12 - * structure. */ -static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { - CBS bag_id, wrapped_value; - if (!CBS_get_asn1(safe_bag, &bag_id, CBS_ASN1_OBJECT) || - !CBS_get_asn1(safe_bag, &wrapped_value, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) - /* Ignore the bagAttributes field. */) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - int nid = OBJ_cbs2nid(&bag_id); - if (nid == NID_pkcs8ShroudedKeyBag) { - /* See RFC 7292, section 4.2.2. */ - if (*ctx->out_key) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MULTIPLE_PRIVATE_KEYS_IN_PKCS12); - return 0; - } - - if (CBS_len(&wrapped_value) > LONG_MAX) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - /* |encrypted| isn't actually an X.509 signature, but it has the same - * structure as one and so |X509_SIG| is reused to store it. */ - const uint8_t *inp = CBS_data(&wrapped_value); - X509_SIG *encrypted = - d2i_X509_SIG(NULL, &inp, (long)CBS_len(&wrapped_value)); - if (encrypted == NULL) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - if (inp != CBS_data(&wrapped_value) + CBS_len(&wrapped_value)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - X509_SIG_free(encrypted); - return 0; - } - - PKCS8_PRIV_KEY_INFO *pki = - pkcs8_decrypt_raw(encrypted, ctx->password, ctx->password_len); - X509_SIG_free(encrypted); - if (pki == NULL) { - return 0; - } - - *ctx->out_key = EVP_PKCS82PKEY(pki); - PKCS8_PRIV_KEY_INFO_free(pki); - return ctx->out_key != NULL; - } - - if (nid == NID_certBag) { - /* See RFC 7292, section 4.2.3. */ - CBS cert_bag, cert_type, wrapped_cert, cert; - if (!CBS_get_asn1(&wrapped_value, &cert_bag, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&cert_bag, &cert_type, CBS_ASN1_OBJECT) || - !CBS_get_asn1(&cert_bag, &wrapped_cert, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || - !CBS_get_asn1(&wrapped_cert, &cert, CBS_ASN1_OCTETSTRING)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - if (OBJ_cbs2nid(&cert_type) != NID_x509Certificate) { - return 1; - } - - if (CBS_len(&cert) > LONG_MAX) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - const uint8_t *inp = CBS_data(&cert); - X509 *x509 = d2i_X509(NULL, &inp, (long)CBS_len(&cert)); - if (!x509) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - - if (inp != CBS_data(&cert) + CBS_len(&cert)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - X509_free(x509); - return 0; - } - - if (0 == sk_X509_push(ctx->out_certs, x509)) { - X509_free(x509); - return 0; - } - - return 1; - } - - /* Unknown element type - ignore it. */ - return 1; -} - -/* PKCS12_handle_content_info parses a single PKCS#7 ContentInfo element in a - * PKCS#12 structure. */ -static int PKCS12_handle_content_info(CBS *content_info, - struct pkcs12_context *ctx) { - CBS content_type, wrapped_contents, contents; - int nid, ret = 0; - uint8_t *storage = NULL; - - if (!CBS_get_asn1(content_info, &content_type, CBS_ASN1_OBJECT) || - !CBS_get_asn1(content_info, &wrapped_contents, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || - CBS_len(content_info) != 0) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - nid = OBJ_cbs2nid(&content_type); - if (nid == NID_pkcs7_encrypted) { - /* See https://tools.ietf.org/html/rfc2315#section-13. - * - * PKCS#7 encrypted data inside a PKCS#12 structure is generally an - * encrypted certificate bag and it's generally encrypted with 40-bit - * RC2-CBC. */ - CBS version_bytes, eci, contents_type, ai, encrypted_contents; - uint8_t *out; - size_t out_len; - - if (!CBS_get_asn1(&wrapped_contents, &contents, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&contents, &version_bytes, CBS_ASN1_INTEGER) || - /* EncryptedContentInfo, see - * https://tools.ietf.org/html/rfc2315#section-10.1 */ - !CBS_get_asn1(&contents, &eci, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&eci, &contents_type, CBS_ASN1_OBJECT) || - /* AlgorithmIdentifier, see - * https://tools.ietf.org/html/rfc5280#section-4.1.1.2 */ - !CBS_get_asn1(&eci, &ai, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1_implicit_string( - &eci, &encrypted_contents, &storage, - CBS_ASN1_CONTEXT_SPECIFIC | 0, CBS_ASN1_OCTETSTRING)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - if (OBJ_cbs2nid(&contents_type) != NID_pkcs7_data) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - if (!pbe_decrypt(&out, &out_len, &ai, ctx->password, ctx->password_len, - CBS_data(&encrypted_contents), - CBS_len(&encrypted_contents))) { - goto err; - } - - CBS safe_contents; - CBS_init(&safe_contents, out, out_len); - ret = PKCS12_handle_sequence(&safe_contents, ctx, PKCS12_handle_safe_bag); - OPENSSL_free(out); - } else if (nid == NID_pkcs7_data) { - CBS octet_string_contents; - - if (!CBS_get_asn1(&wrapped_contents, &octet_string_contents, - CBS_ASN1_OCTETSTRING)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - ret = PKCS12_handle_sequence(&octet_string_contents, ctx, - PKCS12_handle_safe_bag); - } else { - /* Unknown element type - ignore it. */ - ret = 1; - } - -err: - OPENSSL_free(storage); - return ret; -} - -int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, - CBS *ber_in, const char *password) { - uint8_t *der_bytes = NULL; - size_t der_len; - CBS in, pfx, mac_data, authsafe, content_type, wrapped_authsafes, authsafes; - uint64_t version; - int ret = 0; - struct pkcs12_context ctx; - const size_t original_out_certs_len = sk_X509_num(out_certs); - - /* The input may be in BER format. */ - if (!CBS_asn1_ber_to_der(ber_in, &der_bytes, &der_len)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - return 0; - } - if (der_bytes != NULL) { - CBS_init(&in, der_bytes, der_len); - } else { - CBS_init(&in, CBS_data(ber_in), CBS_len(ber_in)); - } - - *out_key = NULL; - OPENSSL_memset(&ctx, 0, sizeof(ctx)); - - /* See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12v1.pdf, section - * four. */ - if (!CBS_get_asn1(&in, &pfx, CBS_ASN1_SEQUENCE) || - CBS_len(&in) != 0 || - !CBS_get_asn1_uint64(&pfx, &version)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - if (version < 3) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_VERSION); - goto err; - } - - if (!CBS_get_asn1(&pfx, &authsafe, CBS_ASN1_SEQUENCE)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - if (CBS_len(&pfx) == 0) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MISSING_MAC); - goto err; - } - - if (!CBS_get_asn1(&pfx, &mac_data, CBS_ASN1_SEQUENCE)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - /* authsafe is a PKCS#7 ContentInfo. See - * https://tools.ietf.org/html/rfc2315#section-7. */ - if (!CBS_get_asn1(&authsafe, &content_type, CBS_ASN1_OBJECT) || - !CBS_get_asn1(&authsafe, &wrapped_authsafes, - CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - /* The content type can either be |NID_pkcs7_data| or |NID_pkcs7_signed|. The - * latter indicates that it's signed by a public key, which isn't - * supported. */ - if (OBJ_cbs2nid(&content_type) != NID_pkcs7_data) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED); - goto err; - } - - if (!CBS_get_asn1(&wrapped_authsafes, &authsafes, CBS_ASN1_OCTETSTRING)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - ctx.out_key = out_key; - ctx.out_certs = out_certs; - if (!ascii_to_ucs2(password, password ? strlen(password) : 0, &ctx.password, - &ctx.password_len)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); - goto err; - } - - /* Verify the MAC. */ - { - CBS mac, hash_type_seq, hash_oid, salt, expected_mac; - uint64_t iterations; - int hash_nid; - const EVP_MD *md; - uint8_t hmac_key[EVP_MAX_MD_SIZE]; - uint8_t hmac[EVP_MAX_MD_SIZE]; - unsigned hmac_len; - - if (!CBS_get_asn1(&mac_data, &mac, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&mac, &hash_type_seq, CBS_ASN1_SEQUENCE) || - !CBS_get_asn1(&hash_type_seq, &hash_oid, CBS_ASN1_OBJECT) || - !CBS_get_asn1(&mac, &expected_mac, CBS_ASN1_OCTETSTRING) || - !CBS_get_asn1(&mac_data, &salt, CBS_ASN1_OCTETSTRING)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - - /* The iteration count is optional and the default is one. */ - iterations = 1; - if (CBS_len(&mac_data) > 0) { - if (!CBS_get_asn1_uint64(&mac_data, &iterations) || - iterations > UINT_MAX) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); - goto err; - } - } - - hash_nid = OBJ_cbs2nid(&hash_oid); - if (hash_nid == NID_undef || - (md = EVP_get_digestbynid(hash_nid)) == NULL) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_UNKNOWN_HASH); - goto err; - } - - if (!pkcs12_key_gen_raw(ctx.password, ctx.password_len, CBS_data(&salt), - CBS_len(&salt), PKCS12_MAC_ID, iterations, - EVP_MD_size(md), hmac_key, md)) { - goto err; - } - - if (NULL == HMAC(md, hmac_key, EVP_MD_size(md), CBS_data(&authsafes), - CBS_len(&authsafes), hmac, &hmac_len)) { - goto err; - } - - if (!CBS_mem_equal(&expected_mac, hmac, hmac_len)) { - OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INCORRECT_PASSWORD); - goto err; - } - } - - /* authsafes contains a series of PKCS#7 ContentInfos. */ - if (!PKCS12_handle_sequence(&authsafes, &ctx, PKCS12_handle_content_info)) { - goto err; - } - - ret = 1; - -err: - OPENSSL_free(ctx.password); - OPENSSL_free(der_bytes); - if (!ret) { - EVP_PKEY_free(*out_key); - *out_key = NULL; - while (sk_X509_num(out_certs) > original_out_certs_len) { - X509 *x509 = sk_X509_pop(out_certs); - X509_free(x509); - } - } - - return ret; -} - -void PKCS12_PBE_add(void) {} - -struct pkcs12_st { - uint8_t *ber_bytes; - size_t ber_len; -}; - -PKCS12 *d2i_PKCS12(PKCS12 **out_p12, const uint8_t **ber_bytes, - size_t ber_len) { - PKCS12 *p12; - - p12 = OPENSSL_malloc(sizeof(PKCS12)); - if (!p12) { - return NULL; - } - - p12->ber_bytes = OPENSSL_malloc(ber_len); - if (!p12->ber_bytes) { - OPENSSL_free(p12); - return NULL; - } - - OPENSSL_memcpy(p12->ber_bytes, *ber_bytes, ber_len); - p12->ber_len = ber_len; - *ber_bytes += ber_len; - - if (out_p12) { - PKCS12_free(*out_p12); - - *out_p12 = p12; - } - - return p12; -} - -PKCS12* d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12) { - size_t used = 0; - BUF_MEM *buf; - const uint8_t *dummy; - static const size_t kMaxSize = 256 * 1024; - PKCS12 *ret = NULL; - - buf = BUF_MEM_new(); - if (buf == NULL) { - return NULL; - } - if (BUF_MEM_grow(buf, 8192) == 0) { - goto out; - } - - for (;;) { - int n = BIO_read(bio, &buf->data[used], buf->length - used); - if (n < 0) { - if (used == 0) { - goto out; - } - /* Workaround a bug in node.js. It uses a memory BIO for this in the wrong - * mode. */ - n = 0; - } - - if (n == 0) { - break; - } - used += n; - - if (used < buf->length) { - continue; - } - - if (buf->length > kMaxSize || - BUF_MEM_grow(buf, buf->length * 2) == 0) { - goto out; - } - } - - dummy = (uint8_t*) buf->data; - ret = d2i_PKCS12(out_p12, &dummy, used); - -out: - BUF_MEM_free(buf); - return ret; -} - -PKCS12* d2i_PKCS12_fp(FILE *fp, PKCS12 **out_p12) { - BIO *bio; - PKCS12 *ret; - - bio = BIO_new_fp(fp, 0 /* don't take ownership */); - if (!bio) { - return NULL; - } - - ret = d2i_PKCS12_bio(bio, out_p12); - BIO_free(bio); - return ret; -} - -int PKCS12_parse(const PKCS12 *p12, const char *password, EVP_PKEY **out_pkey, - X509 **out_cert, STACK_OF(X509) **out_ca_certs) { - CBS ber_bytes; - STACK_OF(X509) *ca_certs = NULL; - char ca_certs_alloced = 0; - - if (out_ca_certs != NULL && *out_ca_certs != NULL) { - ca_certs = *out_ca_certs; - } - - if (!ca_certs) { - ca_certs = sk_X509_new_null(); - if (ca_certs == NULL) { - OPENSSL_PUT_ERROR(PKCS8, ERR_R_MALLOC_FAILURE); - return 0; - } - ca_certs_alloced = 1; - } - - CBS_init(&ber_bytes, p12->ber_bytes, p12->ber_len); - if (!PKCS12_get_key_and_certs(out_pkey, ca_certs, &ber_bytes, password)) { - if (ca_certs_alloced) { - sk_X509_free(ca_certs); - } - return 0; - } - - *out_cert = NULL; - if (sk_X509_num(ca_certs) > 0) { - *out_cert = sk_X509_shift(ca_certs); - } - - if (out_ca_certs) { - *out_ca_certs = ca_certs; - } else { - sk_X509_pop_free(ca_certs, X509_free); - } - - return 1; -} - -int PKCS12_verify_mac(const PKCS12 *p12, const char *password, - int password_len) { - if (password == NULL) { - if (password_len != 0) { - return 0; - } - } else if (password_len != -1 && - (password[password_len] != 0 || - OPENSSL_memchr(password, 0, password_len) != NULL)) { - return 0; - } - - EVP_PKEY *pkey = NULL; - X509 *cert = NULL; - if (!PKCS12_parse(p12, password, &pkey, &cert, NULL)) { - ERR_clear_error(); - return 0; - } - - EVP_PKEY_free(pkey); - X509_free(cert); - - return 1; -} - -void PKCS12_free(PKCS12 *p12) { - if (p12 == NULL) { - return; - } - OPENSSL_free(p12->ber_bytes); - OPENSSL_free(p12); -} diff --git a/Sources/BoringSSL/crypto/pkcs8/pkcs8_x509.c b/Sources/BoringSSL/crypto/pkcs8/pkcs8_x509.c new file mode 100644 index 000000000..b3e2d93fa --- /dev/null +++ b/Sources/BoringSSL/crypto/pkcs8/pkcs8_x509.c @@ -0,0 +1,789 @@ +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL + * project 1999. + */ +/* ==================================================================== + * Copyright (c) 1999 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * licensing@OpenSSL.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../bytestring/internal.h" +#include "../internal.h" + + +// Minor tweak to operation: zero private key data +static int pkey_cb(int operation, ASN1_VALUE **pval, const ASN1_ITEM *it, + void *exarg) { + // Since the structure must still be valid use ASN1_OP_FREE_PRE + if (operation == ASN1_OP_FREE_PRE) { + PKCS8_PRIV_KEY_INFO *key = (PKCS8_PRIV_KEY_INFO *)*pval; + if (key->pkey && key->pkey->type == V_ASN1_OCTET_STRING && + key->pkey->value.octet_string) { + OPENSSL_cleanse(key->pkey->value.octet_string->data, + key->pkey->value.octet_string->length); + } + } + return 1; +} + +ASN1_SEQUENCE_cb(PKCS8_PRIV_KEY_INFO, pkey_cb) = { + ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, version, ASN1_INTEGER), + ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkeyalg, X509_ALGOR), + ASN1_SIMPLE(PKCS8_PRIV_KEY_INFO, pkey, ASN1_ANY), + ASN1_IMP_SET_OF_OPT(PKCS8_PRIV_KEY_INFO, attributes, X509_ATTRIBUTE, 0) +} ASN1_SEQUENCE_END_cb(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO) + +IMPLEMENT_ASN1_FUNCTIONS(PKCS8_PRIV_KEY_INFO) + +EVP_PKEY *EVP_PKCS82PKEY(PKCS8_PRIV_KEY_INFO *p8) { + uint8_t *der = NULL; + int der_len = i2d_PKCS8_PRIV_KEY_INFO(p8, &der); + if (der_len < 0) { + return NULL; + } + + CBS cbs; + CBS_init(&cbs, der, (size_t)der_len); + EVP_PKEY *ret = EVP_parse_private_key(&cbs); + if (ret == NULL || CBS_len(&cbs) != 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); + EVP_PKEY_free(ret); + OPENSSL_free(der); + return NULL; + } + + OPENSSL_free(der); + return ret; +} + +PKCS8_PRIV_KEY_INFO *EVP_PKEY2PKCS8(EVP_PKEY *pkey) { + CBB cbb; + uint8_t *der = NULL; + size_t der_len; + if (!CBB_init(&cbb, 0) || + !EVP_marshal_private_key(&cbb, pkey) || + !CBB_finish(&cbb, &der, &der_len) || + der_len > LONG_MAX) { + CBB_cleanup(&cbb); + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_ENCODE_ERROR); + goto err; + } + + const uint8_t *p = der; + PKCS8_PRIV_KEY_INFO *p8 = d2i_PKCS8_PRIV_KEY_INFO(NULL, &p, (long)der_len); + if (p8 == NULL || p != der + der_len) { + PKCS8_PRIV_KEY_INFO_free(p8); + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_DECODE_ERROR); + goto err; + } + + OPENSSL_free(der); + return p8; + +err: + OPENSSL_free(der); + return NULL; +} + +PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, + int pass_len_in) { + size_t pass_len; + if (pass_len_in == -1 && pass != NULL) { + pass_len = strlen(pass); + } else { + pass_len = (size_t)pass_len_in; + } + + PKCS8_PRIV_KEY_INFO *ret = NULL; + EVP_PKEY *pkey = NULL; + uint8_t *in = NULL; + + // Convert the legacy ASN.1 object to a byte string. + int in_len = i2d_X509_SIG(pkcs8, &in); + if (in_len < 0) { + goto err; + } + + CBS cbs; + CBS_init(&cbs, in, in_len); + pkey = PKCS8_parse_encrypted_private_key(&cbs, pass, pass_len); + if (pkey == NULL || CBS_len(&cbs) != 0) { + goto err; + } + + ret = EVP_PKEY2PKCS8(pkey); + +err: + OPENSSL_free(in); + EVP_PKEY_free(pkey); + return ret; +} + +X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, + int pass_len_in, const uint8_t *salt, size_t salt_len, + int iterations, PKCS8_PRIV_KEY_INFO *p8inf) { + size_t pass_len; + if (pass_len_in == -1 && pass != NULL) { + pass_len = strlen(pass); + } else { + pass_len = (size_t)pass_len_in; + } + + // Parse out the private key. + EVP_PKEY *pkey = EVP_PKCS82PKEY(p8inf); + if (pkey == NULL) { + return NULL; + } + + X509_SIG *ret = NULL; + uint8_t *der = NULL; + size_t der_len; + CBB cbb; + if (!CBB_init(&cbb, 128) || + !PKCS8_marshal_encrypted_private_key(&cbb, pbe_nid, cipher, pass, + pass_len, salt, salt_len, iterations, + pkey) || + !CBB_finish(&cbb, &der, &der_len)) { + CBB_cleanup(&cbb); + goto err; + } + + // Convert back to legacy ASN.1 objects. + const uint8_t *ptr = der; + ret = d2i_X509_SIG(NULL, &ptr, der_len); + if (ret == NULL || ptr != der + der_len) { + OPENSSL_PUT_ERROR(PKCS8, ERR_R_INTERNAL_ERROR); + X509_SIG_free(ret); + ret = NULL; + } + +err: + OPENSSL_free(der); + EVP_PKEY_free(pkey); + return ret; +} + +struct pkcs12_context { + EVP_PKEY **out_key; + STACK_OF(X509) *out_certs; + const char *password; + size_t password_len; +}; + +// PKCS12_handle_sequence parses a BER-encoded SEQUENCE of elements in a PKCS#12 +// structure. +static int PKCS12_handle_sequence( + CBS *sequence, struct pkcs12_context *ctx, + int (*handle_element)(CBS *cbs, struct pkcs12_context *ctx)) { + uint8_t *der_bytes = NULL; + size_t der_len; + CBS in; + int ret = 0; + + // Although a BER->DER conversion is done at the beginning of |PKCS12_parse|, + // the ASN.1 data gets wrapped in OCTETSTRINGs and/or encrypted and the + // conversion cannot see through those wrappings. So each time we step + // through one we need to convert to DER again. + if (!CBS_asn1_ber_to_der(sequence, &der_bytes, &der_len)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + + if (der_bytes != NULL) { + CBS_init(&in, der_bytes, der_len); + } else { + CBS_init(&in, CBS_data(sequence), CBS_len(sequence)); + } + + CBS child; + if (!CBS_get_asn1(&in, &child, CBS_ASN1_SEQUENCE) || + CBS_len(&in) != 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + while (CBS_len(&child) > 0) { + CBS element; + if (!CBS_get_asn1(&child, &element, CBS_ASN1_SEQUENCE)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (!handle_element(&element, ctx)) { + goto err; + } + } + + ret = 1; + +err: + OPENSSL_free(der_bytes); + return ret; +} + +// 1.2.840.113549.1.12.10.1.2 +static const uint8_t kPKCS8ShroudedKeyBag[] = { + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x0c, 0x0a, 0x01, 0x02}; + +// 1.2.840.113549.1.12.10.1.3 +static const uint8_t kCertBag[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x0c, 0x0a, 0x01, 0x03}; + +// 1.2.840.113549.1.9.22.1 +static const uint8_t kX509Certificate[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x09, 0x16, 0x01}; + +// PKCS12_handle_safe_bag parses a single SafeBag element in a PKCS#12 +// structure. +static int PKCS12_handle_safe_bag(CBS *safe_bag, struct pkcs12_context *ctx) { + CBS bag_id, wrapped_value; + if (!CBS_get_asn1(safe_bag, &bag_id, CBS_ASN1_OBJECT) || + !CBS_get_asn1(safe_bag, &wrapped_value, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) + /* Ignore the bagAttributes field. */) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + + if (CBS_mem_equal(&bag_id, kPKCS8ShroudedKeyBag, + sizeof(kPKCS8ShroudedKeyBag))) { + // See RFC 7292, section 4.2.2. + if (*ctx->out_key) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MULTIPLE_PRIVATE_KEYS_IN_PKCS12); + return 0; + } + + EVP_PKEY *pkey = PKCS8_parse_encrypted_private_key( + &wrapped_value, ctx->password, ctx->password_len); + if (pkey == NULL) { + return 0; + } + + if (CBS_len(&wrapped_value) != 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + EVP_PKEY_free(pkey); + return 0; + } + + *ctx->out_key = pkey; + return 1; + } + + if (CBS_mem_equal(&bag_id, kCertBag, sizeof(kCertBag))) { + // See RFC 7292, section 4.2.3. + CBS cert_bag, cert_type, wrapped_cert, cert; + if (!CBS_get_asn1(&wrapped_value, &cert_bag, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&cert_bag, &cert_type, CBS_ASN1_OBJECT) || + !CBS_get_asn1(&cert_bag, &wrapped_cert, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || + !CBS_get_asn1(&wrapped_cert, &cert, CBS_ASN1_OCTETSTRING)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + + // Skip unknown certificate types. + if (!CBS_mem_equal(&cert_type, kX509Certificate, + sizeof(kX509Certificate))) { + return 1; + } + + if (CBS_len(&cert) > LONG_MAX) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + + const uint8_t *inp = CBS_data(&cert); + X509 *x509 = d2i_X509(NULL, &inp, (long)CBS_len(&cert)); + if (!x509) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + + if (inp != CBS_data(&cert) + CBS_len(&cert)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + X509_free(x509); + return 0; + } + + if (0 == sk_X509_push(ctx->out_certs, x509)) { + X509_free(x509); + return 0; + } + + return 1; + } + + // Unknown element type - ignore it. + return 1; +} + +// 1.2.840.113549.1.7.1 +static const uint8_t kPKCS7Data[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x07, 0x01}; + +// 1.2.840.113549.1.7.6 +static const uint8_t kPKCS7EncryptedData[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x07, 0x06}; + +// PKCS12_handle_content_info parses a single PKCS#7 ContentInfo element in a +// PKCS#12 structure. +static int PKCS12_handle_content_info(CBS *content_info, + struct pkcs12_context *ctx) { + CBS content_type, wrapped_contents, contents; + int ret = 0; + uint8_t *storage = NULL; + + if (!CBS_get_asn1(content_info, &content_type, CBS_ASN1_OBJECT) || + !CBS_get_asn1(content_info, &wrapped_contents, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || + CBS_len(content_info) != 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (CBS_mem_equal(&content_type, kPKCS7EncryptedData, + sizeof(kPKCS7EncryptedData))) { + // See https://tools.ietf.org/html/rfc2315#section-13. + // + // PKCS#7 encrypted data inside a PKCS#12 structure is generally an + // encrypted certificate bag and it's generally encrypted with 40-bit + // RC2-CBC. + CBS version_bytes, eci, contents_type, ai, encrypted_contents; + uint8_t *out; + size_t out_len; + + if (!CBS_get_asn1(&wrapped_contents, &contents, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&contents, &version_bytes, CBS_ASN1_INTEGER) || + // EncryptedContentInfo, see + // https://tools.ietf.org/html/rfc2315#section-10.1 + !CBS_get_asn1(&contents, &eci, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1(&eci, &contents_type, CBS_ASN1_OBJECT) || + // AlgorithmIdentifier, see + // https://tools.ietf.org/html/rfc5280#section-4.1.1.2 + !CBS_get_asn1(&eci, &ai, CBS_ASN1_SEQUENCE) || + !CBS_get_asn1_implicit_string( + &eci, &encrypted_contents, &storage, + CBS_ASN1_CONTEXT_SPECIFIC | 0, CBS_ASN1_OCTETSTRING)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (!CBS_mem_equal(&contents_type, kPKCS7Data, sizeof(kPKCS7Data))) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (!pkcs8_pbe_decrypt(&out, &out_len, &ai, ctx->password, + ctx->password_len, CBS_data(&encrypted_contents), + CBS_len(&encrypted_contents))) { + goto err; + } + + CBS safe_contents; + CBS_init(&safe_contents, out, out_len); + ret = PKCS12_handle_sequence(&safe_contents, ctx, PKCS12_handle_safe_bag); + OPENSSL_free(out); + } else if (CBS_mem_equal(&content_type, kPKCS7Data, sizeof(kPKCS7Data))) { + CBS octet_string_contents; + + if (!CBS_get_asn1(&wrapped_contents, &octet_string_contents, + CBS_ASN1_OCTETSTRING)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + ret = PKCS12_handle_sequence(&octet_string_contents, ctx, + PKCS12_handle_safe_bag); + } else { + // Unknown element type - ignore it. + ret = 1; + } + +err: + OPENSSL_free(storage); + return ret; +} + +int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, + CBS *ber_in, const char *password) { + uint8_t *der_bytes = NULL; + size_t der_len; + CBS in, pfx, mac_data, authsafe, content_type, wrapped_authsafes, authsafes; + uint64_t version; + int ret = 0; + struct pkcs12_context ctx; + const size_t original_out_certs_len = sk_X509_num(out_certs); + + // The input may be in BER format. + if (!CBS_asn1_ber_to_der(ber_in, &der_bytes, &der_len)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + return 0; + } + if (der_bytes != NULL) { + CBS_init(&in, der_bytes, der_len); + } else { + CBS_init(&in, CBS_data(ber_in), CBS_len(ber_in)); + } + + *out_key = NULL; + OPENSSL_memset(&ctx, 0, sizeof(ctx)); + + // See ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-12/pkcs-12v1.pdf, section + // four. + if (!CBS_get_asn1(&in, &pfx, CBS_ASN1_SEQUENCE) || + CBS_len(&in) != 0 || + !CBS_get_asn1_uint64(&pfx, &version)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (version < 3) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_VERSION); + goto err; + } + + if (!CBS_get_asn1(&pfx, &authsafe, CBS_ASN1_SEQUENCE)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + if (CBS_len(&pfx) == 0) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_MISSING_MAC); + goto err; + } + + if (!CBS_get_asn1(&pfx, &mac_data, CBS_ASN1_SEQUENCE)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + // authsafe is a PKCS#7 ContentInfo. See + // https://tools.ietf.org/html/rfc2315#section-7. + if (!CBS_get_asn1(&authsafe, &content_type, CBS_ASN1_OBJECT) || + !CBS_get_asn1(&authsafe, &wrapped_authsafes, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + // The content type can either be data or signedData. The latter indicates + // that it's signed by a public key, which isn't supported. + if (!CBS_mem_equal(&content_type, kPKCS7Data, sizeof(kPKCS7Data))) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED); + goto err; + } + + if (!CBS_get_asn1(&wrapped_authsafes, &authsafes, CBS_ASN1_OCTETSTRING)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + ctx.out_key = out_key; + ctx.out_certs = out_certs; + ctx.password = password; + ctx.password_len = password != NULL ? strlen(password) : 0; + + // Verify the MAC. + { + CBS mac, salt, expected_mac; + if (!CBS_get_asn1(&mac_data, &mac, CBS_ASN1_SEQUENCE)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + const EVP_MD *md = EVP_parse_digest_algorithm(&mac); + if (md == NULL) { + goto err; + } + + if (!CBS_get_asn1(&mac, &expected_mac, CBS_ASN1_OCTETSTRING) || + !CBS_get_asn1(&mac_data, &salt, CBS_ASN1_OCTETSTRING)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + + // The iteration count is optional and the default is one. + uint64_t iterations = 1; + if (CBS_len(&mac_data) > 0) { + if (!CBS_get_asn1_uint64(&mac_data, &iterations) || + iterations > UINT_MAX) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_BAD_PKCS12_DATA); + goto err; + } + } + + uint8_t hmac_key[EVP_MAX_MD_SIZE]; + if (!pkcs12_key_gen(ctx.password, ctx.password_len, CBS_data(&salt), + CBS_len(&salt), PKCS12_MAC_ID, iterations, + EVP_MD_size(md), hmac_key, md)) { + goto err; + } + + uint8_t hmac[EVP_MAX_MD_SIZE]; + unsigned hmac_len; + if (NULL == HMAC(md, hmac_key, EVP_MD_size(md), CBS_data(&authsafes), + CBS_len(&authsafes), hmac, &hmac_len)) { + goto err; + } + + if (!CBS_mem_equal(&expected_mac, hmac, hmac_len)) { + OPENSSL_PUT_ERROR(PKCS8, PKCS8_R_INCORRECT_PASSWORD); + goto err; + } + } + + // authsafes contains a series of PKCS#7 ContentInfos. + if (!PKCS12_handle_sequence(&authsafes, &ctx, PKCS12_handle_content_info)) { + goto err; + } + + ret = 1; + +err: + OPENSSL_free(der_bytes); + if (!ret) { + EVP_PKEY_free(*out_key); + *out_key = NULL; + while (sk_X509_num(out_certs) > original_out_certs_len) { + X509 *x509 = sk_X509_pop(out_certs); + X509_free(x509); + } + } + + return ret; +} + +void PKCS12_PBE_add(void) {} + +struct pkcs12_st { + uint8_t *ber_bytes; + size_t ber_len; +}; + +PKCS12 *d2i_PKCS12(PKCS12 **out_p12, const uint8_t **ber_bytes, + size_t ber_len) { + PKCS12 *p12; + + p12 = OPENSSL_malloc(sizeof(PKCS12)); + if (!p12) { + return NULL; + } + + p12->ber_bytes = OPENSSL_malloc(ber_len); + if (!p12->ber_bytes) { + OPENSSL_free(p12); + return NULL; + } + + OPENSSL_memcpy(p12->ber_bytes, *ber_bytes, ber_len); + p12->ber_len = ber_len; + *ber_bytes += ber_len; + + if (out_p12) { + PKCS12_free(*out_p12); + + *out_p12 = p12; + } + + return p12; +} + +PKCS12* d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12) { + size_t used = 0; + BUF_MEM *buf; + const uint8_t *dummy; + static const size_t kMaxSize = 256 * 1024; + PKCS12 *ret = NULL; + + buf = BUF_MEM_new(); + if (buf == NULL) { + return NULL; + } + if (BUF_MEM_grow(buf, 8192) == 0) { + goto out; + } + + for (;;) { + int n = BIO_read(bio, &buf->data[used], buf->length - used); + if (n < 0) { + if (used == 0) { + goto out; + } + // Workaround a bug in node.js. It uses a memory BIO for this in the wrong + // mode. + n = 0; + } + + if (n == 0) { + break; + } + used += n; + + if (used < buf->length) { + continue; + } + + if (buf->length > kMaxSize || + BUF_MEM_grow(buf, buf->length * 2) == 0) { + goto out; + } + } + + dummy = (uint8_t*) buf->data; + ret = d2i_PKCS12(out_p12, &dummy, used); + +out: + BUF_MEM_free(buf); + return ret; +} + +PKCS12* d2i_PKCS12_fp(FILE *fp, PKCS12 **out_p12) { + BIO *bio; + PKCS12 *ret; + + bio = BIO_new_fp(fp, 0 /* don't take ownership */); + if (!bio) { + return NULL; + } + + ret = d2i_PKCS12_bio(bio, out_p12); + BIO_free(bio); + return ret; +} + +int PKCS12_parse(const PKCS12 *p12, const char *password, EVP_PKEY **out_pkey, + X509 **out_cert, STACK_OF(X509) **out_ca_certs) { + CBS ber_bytes; + STACK_OF(X509) *ca_certs = NULL; + char ca_certs_alloced = 0; + + if (out_ca_certs != NULL && *out_ca_certs != NULL) { + ca_certs = *out_ca_certs; + } + + if (!ca_certs) { + ca_certs = sk_X509_new_null(); + if (ca_certs == NULL) { + OPENSSL_PUT_ERROR(PKCS8, ERR_R_MALLOC_FAILURE); + return 0; + } + ca_certs_alloced = 1; + } + + CBS_init(&ber_bytes, p12->ber_bytes, p12->ber_len); + if (!PKCS12_get_key_and_certs(out_pkey, ca_certs, &ber_bytes, password)) { + if (ca_certs_alloced) { + sk_X509_free(ca_certs); + } + return 0; + } + + *out_cert = NULL; + if (sk_X509_num(ca_certs) > 0) { + *out_cert = sk_X509_shift(ca_certs); + } + + if (out_ca_certs) { + *out_ca_certs = ca_certs; + } else { + sk_X509_pop_free(ca_certs, X509_free); + } + + return 1; +} + +int PKCS12_verify_mac(const PKCS12 *p12, const char *password, + int password_len) { + if (password == NULL) { + if (password_len != 0) { + return 0; + } + } else if (password_len != -1 && + (password[password_len] != 0 || + OPENSSL_memchr(password, 0, password_len) != NULL)) { + return 0; + } + + EVP_PKEY *pkey = NULL; + X509 *cert = NULL; + if (!PKCS12_parse(p12, password, &pkey, &cert, NULL)) { + ERR_clear_error(); + return 0; + } + + EVP_PKEY_free(pkey); + X509_free(cert); + + return 1; +} + +void PKCS12_free(PKCS12 *p12) { + if (p12 == NULL) { + return; + } + OPENSSL_free(p12->ber_bytes); + OPENSSL_free(p12); +} diff --git a/Sources/BoringSSL/crypto/poly1305/internal.h b/Sources/BoringSSL/crypto/poly1305/internal.h index df6769ea4..251b1f4f9 100644 --- a/Sources/BoringSSL/crypto/poly1305/internal.h +++ b/Sources/BoringSSL/crypto/poly1305/internal.h @@ -22,8 +22,9 @@ extern "C" { #endif +#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE) +#define OPENSSL_POLY1305_NEON -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]); void CRYPTO_poly1305_update_neon(poly1305_state *state, const uint8_t *in, @@ -34,7 +35,7 @@ void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_POLY1305_INTERNAL_H */ +#endif // OPENSSL_HEADER_POLY1305_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/poly1305/poly1305.c b/Sources/BoringSSL/crypto/poly1305/poly1305.c index 77e8046c5..c3e927218 100644 --- a/Sources/BoringSSL/crypto/poly1305/poly1305.c +++ b/Sources/BoringSSL/crypto/poly1305/poly1305.c @@ -12,9 +12,9 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation of poly1305 is by Andrew Moon - * (https://github.com/floodyberry/poly1305-donna) and released as public - * domain. */ +// This implementation of poly1305 is by Andrew Moon +// (https://github.com/floodyberry/poly1305-donna) and released as public +// domain. #include @@ -28,7 +28,7 @@ #if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) -/* We can assume little-endian. */ +// We can assume little-endian. static uint32_t U8TO32_LE(const uint8_t *m) { uint32_t r; OPENSSL_memcpy(&r, m, sizeof(r)); @@ -55,9 +55,9 @@ static inline struct poly1305_state_st *poly1305_aligned_state( return (struct poly1305_state_st *)(((uintptr_t)state + 63) & ~63); } -/* poly1305_blocks updates |state| given some amount of input data. This - * function may only be called with a |len| that is not a multiple of 16 at the - * end of the data. Otherwise the input must be buffered into 16 byte blocks. */ +// poly1305_blocks updates |state| given some amount of input data. This +// function may only be called with a |len| that is not a multiple of 16 at the +// end of the data. Otherwise the input must be buffered into 16 byte blocks. static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, size_t len) { uint32_t t0, t1, t2, t3; @@ -123,7 +123,7 @@ static void poly1305_update(struct poly1305_state_st *state, const uint8_t *in, goto poly1305_donna_16bytes; } -/* final bytes */ +// final bytes poly1305_donna_atmost15bytes: if (!len) { return; @@ -156,7 +156,7 @@ void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { struct poly1305_state_st *state = poly1305_aligned_state(statep); uint32_t t0, t1, t2, t3; -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) +#if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_init_neon(statep, key); return; @@ -168,7 +168,7 @@ void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { t2 = U8TO32_LE(key + 8); t3 = U8TO32_LE(key + 12); - /* precompute multipliers */ + // precompute multipliers state->r0 = t0 & 0x3ffffff; t0 >>= 26; t0 |= t1 << 6; @@ -187,7 +187,7 @@ void CRYPTO_poly1305_init(poly1305_state *statep, const uint8_t key[32]) { state->s3 = state->r3 * 5; state->s4 = state->r4 * 5; - /* init state */ + // init state state->h0 = 0; state->h1 = 0; state->h2 = 0; @@ -203,7 +203,7 @@ void CRYPTO_poly1305_update(poly1305_state *statep, const uint8_t *in, unsigned int i; struct poly1305_state_st *state = poly1305_aligned_state(statep); -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) +#if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_update_neon(statep, in, in_len); return; @@ -249,7 +249,7 @@ void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { uint32_t g0, g1, g2, g3, g4; uint32_t b, nb; -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) +#if defined(OPENSSL_POLY1305_NEON) if (CRYPTO_is_NEON_capable()) { CRYPTO_poly1305_finish_neon(statep, mac); return; @@ -315,4 +315,4 @@ void CRYPTO_poly1305_finish(poly1305_state *statep, uint8_t mac[16]) { U32TO8_LE(&mac[12], f3); } -#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 */ +#endif // OPENSSL_WINDOWS || !OPENSSL_X86_64 diff --git a/Sources/BoringSSL/crypto/poly1305/poly1305_arm.c b/Sources/BoringSSL/crypto/poly1305/poly1305_arm.c index 444413b8b..4aff713f5 100644 --- a/Sources/BoringSSL/crypto/poly1305/poly1305_arm.c +++ b/Sources/BoringSSL/crypto/poly1305/poly1305_arm.c @@ -12,21 +12,21 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation was taken from the public domain, neon2 version in - * SUPERCOP by D. J. Bernstein and Peter Schwabe. */ +// This implementation was taken from the public domain, neon2 version in +// SUPERCOP by D. J. Bernstein and Peter Schwabe. #include -#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) - #include #include "../internal.h" #include "internal.h" +#if defined(OPENSSL_POLY1305_NEON) + typedef struct { - uint32_t v[12]; /* for alignment; only using 10 */ + uint32_t v[12]; // for alignment; only using 10 } fe1305x2; #define addmulmod openssl_poly1305_neon2_addmulmod @@ -125,8 +125,8 @@ static void fe1305x2_tobytearray(uint8_t *r, fe1305x2 *x) { *(uint32_t *)(r + 12) = (x3 >> 18) + (x4 << 8); } -/* load32 exists to avoid breaking strict aliasing rules in - * fe1305x2_frombytearray. */ +// load32 exists to avoid breaking strict aliasing rules in +// fe1305x2_frombytearray. static uint32_t load32(uint8_t *t) { uint32_t tmp; OPENSSL_memcpy(&tmp, t, sizeof(tmp)); @@ -197,11 +197,11 @@ void CRYPTO_poly1305_init_neon(poly1305_state *state, const uint8_t key[32]) { r->v[9] = r->v[8] = 0x00fffff & ((*(uint32_t *)(key + 12)) >> 8); for (j = 0; j < 10; j++) { - h->v[j] = 0; /* XXX: should fast-forward a bit */ + h->v[j] = 0; // XXX: should fast-forward a bit } - addmulmod(precomp, r, r, &zero); /* precompute r^2 */ - addmulmod(precomp + 1, precomp, precomp, &zero); /* precompute r^4 */ + addmulmod(precomp, r, r, &zero); // precompute r^2 + addmulmod(precomp + 1, precomp, precomp, &zero); // precompute r^4 OPENSSL_memcpy(st->key, key + 16, 16); st->buf_used = 0; @@ -301,4 +301,4 @@ void CRYPTO_poly1305_finish_neon(poly1305_state *state, uint8_t mac[16]) { fe1305x2_tobytearray(mac, h); } -#endif /* OPENSSL_ARM && !OPENSSL_NO_ASM */ +#endif // OPENSSL_POLY1305_NEON diff --git a/Sources/BoringSSL/crypto/poly1305/poly1305_vec.c b/Sources/BoringSSL/crypto/poly1305/poly1305_vec.c index 3045a2f1b..480d9e56a 100644 --- a/Sources/BoringSSL/crypto/poly1305/poly1305_vec.c +++ b/Sources/BoringSSL/crypto/poly1305/poly1305_vec.c @@ -12,11 +12,11 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This implementation of poly1305 is by Andrew Moon - * (https://github.com/floodyberry/poly1305-donna) and released as public - * domain. It implements SIMD vectorization based on the algorithm described in - * http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte - * block size */ +// This implementation of poly1305 is by Andrew Moon +// (https://github.com/floodyberry/poly1305-donna) and released as public +// domain. It implements SIMD vectorization based on the algorithm described in +// http://cr.yp.to/papers.html#neoncrypto. Unrolled to 2 powers, i.e. 64 byte +// block size #include @@ -69,14 +69,14 @@ typedef struct poly1305_state_internal_t { poly1305_power P[2]; /* 288 bytes, top 32 bit halves unused = 144 bytes of free storage */ union { - xmmi H[5]; /* 80 bytes */ + xmmi H[5]; // 80 bytes uint64_t HH[10]; }; - /* uint64_t r0,r1,r2; [24 bytes] */ - /* uint64_t pad0,pad1; [16 bytes] */ - uint64_t started; /* 8 bytes */ - uint64_t leftover; /* 8 bytes */ - uint8_t buffer[64]; /* 64 bytes */ + // uint64_t r0,r1,r2; [24 bytes] + // uint64_t pad0,pad1; [16 bytes] + uint64_t started; // 8 bytes + uint64_t leftover; // 8 bytes + uint8_t buffer[64]; // 64 bytes } poly1305_state_internal; /* 448 bytes total + 63 bytes for alignment = 511 bytes raw */ @@ -85,57 +85,6 @@ static inline poly1305_state_internal *poly1305_aligned_state( return (poly1305_state_internal *)(((uint64_t)state + 63) & ~63); } -/* copy 0-63 bytes */ -static inline void -poly1305_block_copy(uint8_t *dst, const uint8_t *src, size_t bytes) { - size_t offset = src - dst; - if (bytes & 32) { - _mm_storeu_si128((xmmi *)(dst + 0), - _mm_loadu_si128((const xmmi *)(dst + offset + 0))); - _mm_storeu_si128((xmmi *)(dst + 16), - _mm_loadu_si128((const xmmi *)(dst + offset + 16))); - dst += 32; - } - if (bytes & 16) { - _mm_storeu_si128((xmmi *)dst, _mm_loadu_si128((const xmmi *)(dst + offset))); - dst += 16; - } - if (bytes & 8) { - *(uint64_t *)dst = *(const uint64_t *)(dst + offset); - dst += 8; - } - if (bytes & 4) { - *(uint32_t *)dst = *(const uint32_t *)(dst + offset); - dst += 4; - } - if (bytes & 2) { - *(uint16_t *)dst = *(uint16_t *)(dst + offset); - dst += 2; - } - if (bytes & 1) { - *(uint8_t *)dst = *(uint8_t *)(dst + offset); - } -} - -/* zero 0-15 bytes */ -static inline void poly1305_block_zero(uint8_t *dst, size_t bytes) { - if (bytes & 8) { - *(uint64_t *)dst = 0; - dst += 8; - } - if (bytes & 4) { - *(uint32_t *)dst = 0; - dst += 4; - } - if (bytes & 2) { - *(uint16_t *)dst = 0; - dst += 2; - } - if (bytes & 1) { - *(uint8_t *)dst = 0; - } -} - static inline size_t poly1305_min(size_t a, size_t b) { return (a < b) ? a : b; } @@ -146,7 +95,7 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { uint64_t r0, r1, r2; uint64_t t0, t1; - /* clamp key */ + // clamp key t0 = U8TO64_LE(key + 0); t1 = U8TO64_LE(key + 8); r0 = t0 & 0xffc0fffffff; @@ -156,7 +105,7 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { t1 >>= 24; r2 = t1 & 0x00ffffffc0f; - /* store r in un-used space of st->P[1] */ + // store r in un-used space of st->P[1] p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); @@ -165,13 +114,13 @@ void CRYPTO_poly1305_init(poly1305_state *state, const uint8_t key[32]) { p->R22.d[1] = (uint32_t)(r2); p->R22.d[3] = (uint32_t)(r2 >> 32); - /* store pad */ + // store pad p->R23.d[1] = U8TO32_LE(key + 16); p->R23.d[3] = U8TO32_LE(key + 20); p->R24.d[1] = U8TO32_LE(key + 24); p->R24.d[3] = U8TO32_LE(key + 28); - /* H = 0 */ + // H = 0 st->H[0] = _mm_setzero_si128(); st->H[1] = _mm_setzero_si128(); st->H[2] = _mm_setzero_si128(); @@ -196,7 +145,7 @@ static void poly1305_first_block(poly1305_state_internal *st, uint64_t c; uint64_t i; - /* pull out stored info */ + // pull out stored info p = &st->P[1]; r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; @@ -205,7 +154,7 @@ static void poly1305_first_block(poly1305_state_internal *st, pad0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; pad1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; - /* compute powers r^2,r^4 */ + // compute powers r^2,r^4 r20 = r0; r21 = r1; r22 = r2; @@ -249,7 +198,7 @@ static void poly1305_first_block(poly1305_state_internal *st, p--; } - /* put saved info back */ + // put saved info back p = &st->P[1]; p->R20.d[1] = (uint32_t)(r0); p->R20.d[3] = (uint32_t)(r0 >> 32); @@ -262,7 +211,7 @@ static void poly1305_first_block(poly1305_state_internal *st, p->R24.d[1] = (uint32_t)(pad1); p->R24.d[3] = (uint32_t)(pad1 >> 32); - /* H = [Mx,My] */ + // H = [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -294,7 +243,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, H4 = st->H[4]; while (bytes >= 64) { - /* H *= [r^4,r^4] */ + // H *= [r^4,r^4] p = &st->P[0]; T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); @@ -342,7 +291,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My]*[r^2,r^2] */ + // H += [Mx,My]*[r^2,r^2] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -406,7 +355,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(M4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My] */ + // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 32)), _mm_loadl_epi64((const xmmi *)(m + 48))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 40)), @@ -424,7 +373,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); - /* reduce */ + // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); @@ -447,7 +396,7 @@ static void poly1305_blocks(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) */ + // H = (H*[r^4,r^4] + [Mx,My]*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; @@ -488,11 +437,11 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, H3 = st->H[3]; H4 = st->H[4]; - /* p = [r^2,r^2] */ + // p = [r^2,r^2] p = &st->P[1]; if (bytes >= 32) { - /* H *= [r^2,r^2] */ + // H *= [r^2,r^2] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); @@ -539,7 +488,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T5 = _mm_mul_epu32(H4, p->R20.v); T4 = _mm_add_epi64(T4, T5); - /* H += [Mx,My] */ + // H += [Mx,My] T5 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 0)), _mm_loadl_epi64((const xmmi *)(m + 16))); T6 = _mm_unpacklo_epi64(_mm_loadl_epi64((const xmmi *)(m + 8)), @@ -557,7 +506,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_add_epi64(T3, M3); T4 = _mm_add_epi64(T4, M4); - /* reduce */ + // reduce C1 = _mm_srli_epi64(T0, 26); C2 = _mm_srli_epi64(T3, 26); T0 = _mm_and_si128(T0, MMASK); @@ -580,7 +529,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = (H*[r^2,r^2] + [Mx,My]) */ + // H = (H*[r^2,r^2] + [Mx,My]) H0 = T0; H1 = T1; H2 = T2; @@ -590,7 +539,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, consumed = 32; } - /* finalize, H *= [r^2,r] */ + // finalize, H *= [r^2,r] r0 = ((uint64_t)p->R20.d[3] << 32) | (uint64_t)p->R20.d[1]; r1 = ((uint64_t)p->R21.d[3] << 32) | (uint64_t)p->R21.d[1]; r2 = ((uint64_t)p->R22.d[3] << 32) | (uint64_t)p->R22.d[1]; @@ -605,7 +554,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, p->S23.d[2] = p->R23.d[2] * 5; p->S24.d[2] = p->R24.d[2] * 5; - /* H *= [r^2,r] */ + // H *= [r^2,r] T0 = _mm_mul_epu32(H0, p->R20.v); T1 = _mm_mul_epu32(H0, p->R21.v); T2 = _mm_mul_epu32(H0, p->R22.v); @@ -674,7 +623,7 @@ static size_t poly1305_combine(poly1305_state_internal *st, const uint8_t *m, T3 = _mm_and_si128(T3, MMASK); T4 = _mm_add_epi64(T4, C1); - /* H = H[0]+H[1] */ + // H = H[0]+H[1] H0 = _mm_add_epi64(T0, _mm_srli_si128(T0, 8)); H1 = _mm_add_epi64(T1, _mm_srli_si128(T1, 8)); H2 = _mm_add_epi64(T2, _mm_srli_si128(T2, 8)); @@ -713,7 +662,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, poly1305_state_internal *st = poly1305_aligned_state(state); size_t want; - /* need at least 32 initial bytes to start the accelerated branch */ + // need at least 32 initial bytes to start the accelerated branch if (!st->started) { if ((st->leftover == 0) && (bytes > 32)) { poly1305_first_block(st, m); @@ -721,7 +670,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, bytes -= 32; } else { want = poly1305_min(32 - st->leftover, bytes); - poly1305_block_copy(st->buffer + st->leftover, m, want); + OPENSSL_memcpy(st->buffer + st->leftover, m, want); bytes -= want; m += want; st->leftover += want; @@ -734,10 +683,10 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, st->started = 1; } - /* handle leftover */ + // handle leftover if (st->leftover) { want = poly1305_min(64 - st->leftover, bytes); - poly1305_block_copy(st->buffer + st->leftover, m, want); + OPENSSL_memcpy(st->buffer + st->leftover, m, want); bytes -= want; m += want; st->leftover += want; @@ -748,7 +697,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, st->leftover = 0; } - /* process 64 byte blocks */ + // process 64 byte blocks if (bytes >= 64) { want = (bytes & ~63); poly1305_blocks(st, m, want); @@ -757,7 +706,7 @@ void CRYPTO_poly1305_update(poly1305_state *state, const uint8_t *m, } if (bytes) { - poly1305_block_copy(st->buffer + st->leftover, m, bytes); + OPENSSL_memcpy(st->buffer + st->leftover, m, bytes); st->leftover += bytes; } } @@ -779,7 +728,7 @@ void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { m += consumed; } - /* st->HH will either be 0 or have the combined result */ + // st->HH will either be 0 or have the combined result h0 = st->HH[0]; h1 = st->HH[1]; h2 = st->HH[2]; @@ -826,14 +775,14 @@ void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { goto poly1305_donna_atleast16bytes; } -/* final bytes */ +// final bytes poly1305_donna_atmost15bytes: if (!leftover) { goto poly1305_donna_finish; } m[leftover++] = 1; - poly1305_block_zero(m + leftover, 16 - leftover); + OPENSSL_memset(m + leftover, 0, 16 - leftover); leftover = 16; t0 = U8TO64_LE(m + 0); @@ -870,7 +819,7 @@ void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { h1 = (h1 & nc) | (g1 & c); h2 = (h2 & nc) | (g2 & c); - /* pad */ + // pad t0 = ((uint64_t)p->R23.d[3] << 32) | (uint64_t)p->R23.d[1]; t1 = ((uint64_t)p->R24.d[3] << 32) | (uint64_t)p->R24.d[1]; h0 += (t0 & 0xfffffffffff); @@ -887,4 +836,4 @@ void CRYPTO_poly1305_finish(poly1305_state *state, uint8_t mac[16]) { U64TO8_LE(mac + 8, ((h1 >> 20) | (h2 << 24))); } -#endif /* !OPENSSL_WINDOWS && OPENSSL_X86_64 */ +#endif // !OPENSSL_WINDOWS && OPENSSL_X86_64 diff --git a/Sources/BoringSSL/crypto/pool/internal.h b/Sources/BoringSSL/crypto/pool/internal.h index 3ec2ec2eb..5b288ebbd 100644 --- a/Sources/BoringSSL/crypto/pool/internal.h +++ b/Sources/BoringSSL/crypto/pool/internal.h @@ -39,7 +39,7 @@ struct crypto_buffer_pool_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_POOL_INTERNAL_H */ +#endif // OPENSSL_HEADER_POOL_INTERNAL_H diff --git a/Sources/BoringSSL/crypto/pool/pool.c b/Sources/BoringSSL/crypto/pool/pool.c index 44d10af62..9cfbf1eeb 100644 --- a/Sources/BoringSSL/crypto/pool/pool.c +++ b/Sources/BoringSSL/crypto/pool/pool.c @@ -125,8 +125,8 @@ CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_MUTEX_unlock_write(&pool->lock); if (!inserted) { - /* We raced to insert |buf| into the pool and lost, or else there was an - * error inserting. */ + // We raced to insert |buf| into the pool and lost, or else there was an + // error inserting. OPENSSL_free(buf->data); OPENSSL_free(buf); return duplicate; @@ -147,9 +147,9 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { CRYPTO_BUFFER_POOL *const pool = buf->pool; if (pool == NULL) { if (CRYPTO_refcount_dec_and_test_zero(&buf->references)) { - /* If a reference count of zero is observed, there cannot be a reference - * from any pool to this buffer and thus we are able to free this - * buffer. */ + // If a reference count of zero is observed, there cannot be a reference + // from any pool to this buffer and thus we are able to free this + // buffer. OPENSSL_free(buf->data); OPENSSL_free(buf); } @@ -163,10 +163,10 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { return; } - /* We have an exclusive lock on the pool, therefore no concurrent lookups can - * find this buffer and increment the reference count. Thus, if the count is - * zero there are and can never be any more references and thus we can free - * this buffer. */ + // We have an exclusive lock on the pool, therefore no concurrent lookups can + // find this buffer and increment the reference count. Thus, if the count is + // zero there are and can never be any more references and thus we can free + // this buffer. void *found = lh_CRYPTO_BUFFER_delete(pool->bufs, buf); assert(found != NULL); assert(found == buf); @@ -177,12 +177,12 @@ void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf) { } int CRYPTO_BUFFER_up_ref(CRYPTO_BUFFER *buf) { - /* This is safe in the case that |buf->pool| is NULL because it's just - * standard reference counting in that case. - * - * This is also safe if |buf->pool| is non-NULL because, if it were racing - * with |CRYPTO_BUFFER_free| then the two callers must have independent - * references already and so the reference count will never hit zero. */ + // This is safe in the case that |buf->pool| is NULL because it's just + // standard reference counting in that case. + // + // This is also safe if |buf->pool| is non-NULL because, if it were racing + // with |CRYPTO_BUFFER_free| then the two callers must have independent + // references already and so the reference count will never hit zero. CRYPTO_refcount_inc(&buf->references); return 1; } diff --git a/Sources/BoringSSL/crypto/rand/rand.c b/Sources/BoringSSL/crypto/rand/rand.c deleted file mode 100644 index 51da6ba27..000000000 --- a/Sources/BoringSSL/crypto/rand/rand.c +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright (c) 2014, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include -#include - -#include -#include -#include - -#include "internal.h" -#include "../internal.h" - - -/* It's assumed that the operating system always has an unfailing source of - * entropy which is accessed via |CRYPTO_sysrand|. (If the operating system - * entropy source fails, it's up to |CRYPTO_sysrand| to abort the process—we - * don't try to handle it.) - * - * In addition, the hardware may provide a low-latency RNG. Intel's rdrand - * instruction is the canonical example of this. When a hardware RNG is - * available we don't need to worry about an RNG failure arising from fork()ing - * the process or moving a VM, so we can keep thread-local RNG state and XOR - * the hardware entropy in. - * - * (We assume that the OS entropy is safe from fork()ing and VM duplication. - * This might be a bit of a leap of faith, esp on Windows, but there's nothing - * that we can do about it.) */ - -/* rand_thread_state contains the per-thread state for the RNG. This is only - * used if the system has support for a hardware RNG. */ -struct rand_thread_state { - uint8_t key[32]; - uint64_t calls_used; - size_t bytes_used; - uint8_t partial_block[64]; - unsigned partial_block_used; -}; - -/* kMaxCallsPerRefresh is the maximum number of |RAND_bytes| calls that we'll - * serve before reading a new key from the operating system. This only applies - * if we have a hardware RNG. */ -static const unsigned kMaxCallsPerRefresh = 1024; - -/* kMaxBytesPerRefresh is the maximum number of bytes that we'll return from - * |RAND_bytes| before reading a new key from the operating system. This only - * applies if we have a hardware RNG. */ -static const uint64_t kMaxBytesPerRefresh = 1024 * 1024; - -/* rand_thread_state_free frees a |rand_thread_state|. This is called when a - * thread exits. */ -static void rand_thread_state_free(void *state) { - if (state == NULL) { - return; - } - - OPENSSL_cleanse(state, sizeof(struct rand_thread_state)); - OPENSSL_free(state); -} - -#if defined(OPENSSL_X86_64) && !defined(OPENSSL_NO_ASM) && \ - !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) - -/* These functions are defined in asm/rdrand-x86_64.pl */ -extern int CRYPTO_rdrand(uint8_t out[8]); -extern int CRYPTO_rdrand_multiple8_buf(uint8_t *buf, size_t len); - -static int have_rdrand(void) { - return (OPENSSL_ia32cap_P[1] & (1u << 30)) != 0; -} - -static int hwrand(uint8_t *buf, size_t len) { - if (!have_rdrand()) { - return 0; - } - - const size_t len_multiple8 = len & ~7; - if (!CRYPTO_rdrand_multiple8_buf(buf, len_multiple8)) { - return 0; - } - len -= len_multiple8; - - if (len != 0) { - assert(len < 8); - - uint8_t rand_buf[8]; - if (!CRYPTO_rdrand(rand_buf)) { - return 0; - } - OPENSSL_memcpy(buf + len_multiple8, rand_buf, len); - } - - return 1; -} - -#else - -static int hwrand(uint8_t *buf, size_t len) { - return 0; -} - -#endif - -int RAND_bytes(uint8_t *buf, size_t len) { - if (len == 0) { - return 1; - } - - if (!hwrand(buf, len)) { - /* Without a hardware RNG to save us from address-space duplication, the OS - * entropy is used directly. */ - CRYPTO_sysrand(buf, len); - return 1; - } - - struct rand_thread_state *state = - CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_RAND); - if (state == NULL) { - state = OPENSSL_malloc(sizeof(struct rand_thread_state)); - if (state == NULL || - !CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_RAND, state, - rand_thread_state_free)) { - CRYPTO_sysrand(buf, len); - return 1; - } - - OPENSSL_memset(state->partial_block, 0, sizeof(state->partial_block)); - state->calls_used = kMaxCallsPerRefresh; - } - - if (state->calls_used >= kMaxCallsPerRefresh || - state->bytes_used >= kMaxBytesPerRefresh) { - CRYPTO_sysrand(state->key, sizeof(state->key)); - state->calls_used = 0; - state->bytes_used = 0; - state->partial_block_used = sizeof(state->partial_block); - } - - if (len >= sizeof(state->partial_block)) { - size_t remaining = len; - while (remaining > 0) { - /* kMaxBytesPerCall is only 2GB, while ChaCha can handle 256GB. But this - * is sufficient and easier on 32-bit. */ - static const size_t kMaxBytesPerCall = 0x80000000; - size_t todo = remaining; - if (todo > kMaxBytesPerCall) { - todo = kMaxBytesPerCall; - } - uint8_t nonce[12]; - OPENSSL_memset(nonce, 0, 4); - OPENSSL_memcpy(nonce + 4, &state->calls_used, sizeof(state->calls_used)); - CRYPTO_chacha_20(buf, buf, todo, state->key, nonce, 0); - buf += todo; - remaining -= todo; - state->calls_used++; - } - } else { - if (sizeof(state->partial_block) - state->partial_block_used < len) { - uint8_t nonce[12]; - OPENSSL_memset(nonce, 0, 4); - OPENSSL_memcpy(nonce + 4, &state->calls_used, sizeof(state->calls_used)); - CRYPTO_chacha_20(state->partial_block, state->partial_block, - sizeof(state->partial_block), state->key, nonce, 0); - state->partial_block_used = 0; - } - - unsigned i; - for (i = 0; i < len; i++) { - buf[i] ^= state->partial_block[state->partial_block_used++]; - } - state->calls_used++; - } - state->bytes_used += len; - - return 1; -} - -int RAND_pseudo_bytes(uint8_t *buf, size_t len) { - return RAND_bytes(buf, len); -} - -void RAND_seed(const void *buf, int num) { - /* OpenSSH calls |RAND_seed| before jailing on the assumption that any needed - * file descriptors etc will be opened. */ - uint8_t unused; - RAND_bytes(&unused, sizeof(unused)); -} - -int RAND_load_file(const char *path, long num) { - if (num < 0) { /* read the "whole file" */ - return 1; - } else if (num <= INT_MAX) { - return (int) num; - } else { - return INT_MAX; - } -} - -const char *RAND_file_name(char *buf, size_t num) { return NULL; } - -void RAND_add(const void *buf, int num, double entropy) {} - -int RAND_egd(const char *path) { - return 255; -} - -int RAND_poll(void) { - return 1; -} - -int RAND_status(void) { - return 1; -} - -static const struct rand_meth_st kSSLeayMethod = { - RAND_seed, - RAND_bytes, - RAND_cleanup, - RAND_add, - RAND_pseudo_bytes, - RAND_status, -}; - -RAND_METHOD *RAND_SSLeay(void) { - return (RAND_METHOD*) &kSSLeayMethod; -} - -void RAND_set_rand_method(const RAND_METHOD *method) {} - -void RAND_cleanup(void) {} diff --git a/Sources/BoringSSL/crypto/rand/urandom.c b/Sources/BoringSSL/crypto/rand/urandom.c deleted file mode 100644 index 23bdcf487..000000000 --- a/Sources/BoringSSL/crypto/rand/urandom.c +++ /dev/null @@ -1,335 +0,0 @@ -/* Copyright (c) 2014, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#if !defined(_GNU_SOURCE) -#define _GNU_SOURCE /* needed for syscall() on Linux. */ -#endif - -#include - -#if !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_FUCHSIA) && \ - !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) - -#include -#include -#include -#include -#include -#include - -#if defined(OPENSSL_LINUX) -#include -#endif - -#include -#include - -#include "internal.h" -#include "../internal.h" - - -#if defined(OPENSSL_LINUX) - -#if defined(OPENSSL_X86_64) -#define EXPECTED_SYS_getrandom 318 -#elif defined(OPENSSL_X86) -#define EXPECTED_SYS_getrandom 355 -#elif defined(OPENSSL_AARCH64) -#define EXPECTED_SYS_getrandom 278 -#elif defined(OPENSSL_ARM) -#define EXPECTED_SYS_getrandom 384 -#elif defined(OPENSSL_PPC64LE) -#define EXPECTED_SYS_getrandom 359 -#endif - -#if defined(EXPECTED_SYS_getrandom) -#define USE_SYS_getrandom - -#if defined(SYS_getrandom) - -#if SYS_getrandom != EXPECTED_SYS_getrandom -#error "system call number for getrandom is not the expected value" -#endif - -#else /* SYS_getrandom */ - -#define SYS_getrandom EXPECTED_SYS_getrandom - -#endif /* SYS_getrandom */ - -#endif /* EXPECTED_SYS_getrandom */ - -#if !defined(GRND_NONBLOCK) -#define GRND_NONBLOCK 1 -#endif - -#endif /* OPENSSL_LINUX */ - -/* This file implements a PRNG by reading from /dev/urandom, optionally with a - * buffer, which is unsafe across |fork|. */ - -#define BUF_SIZE 4096 - -/* rand_buffer contains unused, random bytes, some of which may have been - * consumed already. */ -struct rand_buffer { - size_t used; - uint8_t rand[BUF_SIZE]; -}; - -/* requested_lock is used to protect the |*_requested| variables. */ -static struct CRYPTO_STATIC_MUTEX requested_lock = CRYPTO_STATIC_MUTEX_INIT; - -/* The following constants are magic values of |urandom_fd|. */ -static const int kUnset = -2; -static const int kHaveGetrandom = -3; - -/* urandom_fd_requested is set by |RAND_set_urandom_fd|. It's protected by - * |requested_lock|. */ -static int urandom_fd_requested = -2 /* kUnset */; - -/* urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|. */ -static int urandom_fd = -2 /* kUnset */; - -/* urandom_buffering_requested is set by |RAND_enable_fork_unsafe_buffering|. - * It's protected by |requested_lock|. */ -static int urandom_buffering_requested = 0; - -/* urandom_buffering controls whether buffering is enabled (1) or not (0). This - * is protected by |once|. */ -static int urandom_buffering = 0; - -static CRYPTO_once_t once = CRYPTO_ONCE_INIT; - -/* init_once initializes the state of this module to values previously - * requested. This is the only function that modifies |urandom_fd| and - * |urandom_buffering|, whose values may be read safely after calling the - * once. */ -static void init_once(void) { - CRYPTO_STATIC_MUTEX_lock_read(&requested_lock); - urandom_buffering = urandom_buffering_requested; - int fd = urandom_fd_requested; - CRYPTO_STATIC_MUTEX_unlock_read(&requested_lock); - -#if defined(USE_SYS_getrandom) - uint8_t dummy; - long getrandom_ret = - syscall(SYS_getrandom, &dummy, sizeof(dummy), GRND_NONBLOCK); - - if (getrandom_ret == 1) { - urandom_fd = kHaveGetrandom; - return; - } else if (getrandom_ret == -1 && errno == EAGAIN) { - fprintf(stderr, - "getrandom indicates that the entropy pool has not been " - "initialized. Rather than continue with poor entropy, this process " - "will block until entropy is available.\n"); - do { - getrandom_ret = - syscall(SYS_getrandom, &dummy, sizeof(dummy), 0 /* no flags */); - } while (getrandom_ret == -1 && errno == EINTR); - - if (getrandom_ret == 1) { - urandom_fd = kHaveGetrandom; - return; - } - } -#endif /* USE_SYS_getrandom */ - - if (fd == kUnset) { - do { - fd = open("/dev/urandom", O_RDONLY); - } while (fd == -1 && errno == EINTR); - } - - if (fd < 0) { - abort(); - } - - int flags = fcntl(fd, F_GETFD); - if (flags == -1) { - /* Native Client doesn't implement |fcntl|. */ - if (errno != ENOSYS) { - abort(); - } - } else { - flags |= FD_CLOEXEC; - if (fcntl(fd, F_SETFD, flags) == -1) { - abort(); - } - } - urandom_fd = fd; -} - -void RAND_set_urandom_fd(int fd) { - fd = dup(fd); - if (fd < 0) { - abort(); - } - - CRYPTO_STATIC_MUTEX_lock_write(&requested_lock); - urandom_fd_requested = fd; - CRYPTO_STATIC_MUTEX_unlock_write(&requested_lock); - - CRYPTO_once(&once, init_once); - if (urandom_fd == kHaveGetrandom) { - close(fd); - } else if (urandom_fd != fd) { - abort(); // Already initialized. - } -} - -void RAND_enable_fork_unsafe_buffering(int fd) { - if (fd >= 0) { - fd = dup(fd); - if (fd < 0) { - abort(); - } - } else { - fd = kUnset; - } - - CRYPTO_STATIC_MUTEX_lock_write(&requested_lock); - urandom_buffering_requested = 1; - urandom_fd_requested = fd; - CRYPTO_STATIC_MUTEX_unlock_write(&requested_lock); - - CRYPTO_once(&once, init_once); - if (urandom_buffering != 1) { - abort(); // Already initialized - } - - if (fd >= 0) { - if (urandom_fd == kHaveGetrandom) { - close(fd); - } else if (urandom_fd != fd) { - abort(); // Already initialized. - } - } -} - -static struct rand_buffer *get_thread_local_buffer(void) { - struct rand_buffer *buf = - CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_URANDOM_BUF); - if (buf != NULL) { - return buf; - } - - buf = OPENSSL_malloc(sizeof(struct rand_buffer)); - if (buf == NULL) { - return NULL; - } - buf->used = BUF_SIZE; /* To trigger a |fill_with_entropy| on first use. */ - if (!CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_URANDOM_BUF, buf, - OPENSSL_free)) { - OPENSSL_free(buf); - return NULL; - } - - return buf; -} - -#if defined(USE_SYS_getrandom) && defined(__has_feature) -#if __has_feature(memory_sanitizer) -void __msan_unpoison(void *, size_t); -#endif -#endif - -/* fill_with_entropy writes |len| bytes of entropy into |out|. It returns one - * on success and zero on error. */ -static char fill_with_entropy(uint8_t *out, size_t len) { - while (len > 0) { - ssize_t r; - - if (urandom_fd == kHaveGetrandom) { -#if defined(USE_SYS_getrandom) - do { - r = syscall(SYS_getrandom, out, len, 0 /* no flags */); - } while (r == -1 && errno == EINTR); - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) - if (r > 0) { - /* MSAN doesn't recognise |syscall| and thus doesn't notice that we - * have initialised the output buffer. */ - __msan_unpoison(out, r); - } -#endif /* memory_sanitizer */ -#endif /*__has_feature */ - -#else /* USE_SYS_getrandom */ - abort(); -#endif - } else { - do { - r = read(urandom_fd, out, len); - } while (r == -1 && errno == EINTR); - } - - if (r <= 0) { - return 0; - } - out += r; - len -= r; - } - - return 1; -} - -/* read_from_buffer reads |requested| random bytes from the buffer into |out|, - * refilling it if necessary to satisfy the request. */ -static void read_from_buffer(struct rand_buffer *buf, - uint8_t *out, size_t requested) { - size_t remaining = BUF_SIZE - buf->used; - - while (requested > remaining) { - OPENSSL_memcpy(out, &buf->rand[buf->used], remaining); - buf->used += remaining; - out += remaining; - requested -= remaining; - - if (!fill_with_entropy(buf->rand, BUF_SIZE)) { - abort(); - return; - } - buf->used = 0; - remaining = BUF_SIZE; - } - - OPENSSL_memcpy(out, &buf->rand[buf->used], requested); - buf->used += requested; -} - -/* CRYPTO_sysrand puts |requested| random bytes into |out|. */ -void CRYPTO_sysrand(uint8_t *out, size_t requested) { - if (requested == 0) { - return; - } - - CRYPTO_once(&once, init_once); - if (urandom_buffering && requested < BUF_SIZE) { - struct rand_buffer *buf = get_thread_local_buffer(); - if (buf != NULL) { - read_from_buffer(buf, out, requested); - return; - } - } - - if (!fill_with_entropy(out, requested)) { - abort(); - } -} - -#endif /* !OPENSSL_WINDOWS && !defined(OPENSSL_FUCHSIA) && \ - !BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ diff --git a/Sources/BoringSSL/crypto/rand/deterministic.c b/Sources/BoringSSL/crypto/rand_extra/deterministic.c similarity index 79% rename from Sources/BoringSSL/crypto/rand/deterministic.c rename to Sources/BoringSSL/crypto/rand_extra/deterministic.c index d96a50534..17fa71e64 100644 --- a/Sources/BoringSSL/crypto/rand/deterministic.c +++ b/Sources/BoringSSL/crypto/rand_extra/deterministic.c @@ -20,15 +20,15 @@ #include -#include "internal.h" #include "../internal.h" +#include "../fipsmodule/rand/internal.h" -/* g_num_calls is the number of calls to |CRYPTO_sysrand| that have occured. - * - * TODO(davidben): This is intentionally not thread-safe. If the fuzzer mode is - * ever used in a multi-threaded program, replace this with a thread-local. (A - * mutex would not be deterministic.) */ +// g_num_calls is the number of calls to |CRYPTO_sysrand| that have occurred. +// +// This is intentionally not thread-safe. If the fuzzer mode is ever used in a +// multi-threaded program, replace this with a thread-local. (A mutex would not +// be deterministic.) static uint64_t g_num_calls = 0; void RAND_reset_for_fuzzing(void) { g_num_calls = 0; } @@ -45,4 +45,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { g_num_calls++; } -#endif /* BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/BoringSSL/crypto/rand_extra/forkunsafe.c b/Sources/BoringSSL/crypto/rand_extra/forkunsafe.c new file mode 100644 index 000000000..0f1ececc8 --- /dev/null +++ b/Sources/BoringSSL/crypto/rand_extra/forkunsafe.c @@ -0,0 +1,46 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include "../fipsmodule/rand/internal.h" + + +// g_buffering_enabled is true if fork-unsafe buffering has been enabled. +static int g_buffering_enabled = 0; + +// g_lock protects |g_buffering_enabled|. +static struct CRYPTO_STATIC_MUTEX g_lock = CRYPTO_STATIC_MUTEX_INIT; + +#if !defined(OPENSSL_WINDOWS) +void RAND_enable_fork_unsafe_buffering(int fd) { + // We no longer support setting the file-descriptor with this function. + if (fd != -1) { + abort(); + } + + CRYPTO_STATIC_MUTEX_lock_write(&g_lock); + g_buffering_enabled = 1; + CRYPTO_STATIC_MUTEX_unlock_write(&g_lock); +} +#endif + +int rand_fork_unsafe_buffering_enabled(void) { + CRYPTO_STATIC_MUTEX_lock_read(&g_lock); + const int ret = g_buffering_enabled; + CRYPTO_STATIC_MUTEX_unlock_read(&g_lock); + return ret; +} diff --git a/Sources/BoringSSL/crypto/rand/fuchsia.c b/Sources/BoringSSL/crypto/rand_extra/fuchsia.c similarity index 79% rename from Sources/BoringSSL/crypto/rand/fuchsia.c rename to Sources/BoringSSL/crypto/rand_extra/fuchsia.c index 2e138d0ab..ce8356c0a 100644 --- a/Sources/BoringSSL/crypto/rand/fuchsia.c +++ b/Sources/BoringSSL/crypto/rand_extra/fuchsia.c @@ -19,20 +19,20 @@ #include #include -#include +#include -#include "internal.h" +#include "../fipsmodule/rand/internal.h" void CRYPTO_sysrand(uint8_t *out, size_t requested) { while (requested > 0) { - size_t output_bytes_this_pass = MX_CPRNG_DRAW_MAX_LEN; + size_t output_bytes_this_pass = ZX_CPRNG_DRAW_MAX_LEN; if (requested < output_bytes_this_pass) { output_bytes_this_pass = requested; } size_t bytes_drawn; - mx_status_t status = - mx_cprng_draw(out, output_bytes_this_pass, &bytes_drawn); - if (status != NO_ERROR) { + zx_status_t status = + zx_cprng_draw(out, output_bytes_this_pass, &bytes_drawn); + if (status != ZX_OK) { abort(); } requested -= bytes_drawn; @@ -40,4 +40,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { } } -#endif /* OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/BoringSSL/crypto/rand_extra/rand_extra.c b/Sources/BoringSSL/crypto/rand_extra/rand_extra.c new file mode 100644 index 000000000..bed9e1ef0 --- /dev/null +++ b/Sources/BoringSSL/crypto/rand_extra/rand_extra.c @@ -0,0 +1,70 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + + +void RAND_seed(const void *buf, int num) { + // OpenSSH calls |RAND_seed| before jailing on the assumption that any needed + // file descriptors etc will be opened. + uint8_t unused; + RAND_bytes(&unused, sizeof(unused)); +} + +int RAND_load_file(const char *path, long num) { + if (num < 0) { // read the "whole file" + return 1; + } else if (num <= INT_MAX) { + return (int) num; + } else { + return INT_MAX; + } +} + +const char *RAND_file_name(char *buf, size_t num) { return NULL; } + +void RAND_add(const void *buf, int num, double entropy) {} + +int RAND_egd(const char *path) { + return 255; +} + +int RAND_poll(void) { + return 1; +} + +int RAND_status(void) { + return 1; +} + +static const struct rand_meth_st kSSLeayMethod = { + RAND_seed, + RAND_bytes, + RAND_cleanup, + RAND_add, + RAND_pseudo_bytes, + RAND_status, +}; + +RAND_METHOD *RAND_SSLeay(void) { + return (RAND_METHOD*) &kSSLeayMethod; +} + +const RAND_METHOD *RAND_get_rand_method(void) { return RAND_SSLeay(); } + +void RAND_set_rand_method(const RAND_METHOD *method) {} + +void RAND_cleanup(void) {} diff --git a/Sources/BoringSSL/crypto/rand/windows.c b/Sources/BoringSSL/crypto/rand_extra/windows.c similarity index 84% rename from Sources/BoringSSL/crypto/rand/windows.c rename to Sources/BoringSSL/crypto/rand_extra/windows.c index f47182d90..c9555874e 100644 --- a/Sources/BoringSSL/crypto/rand/windows.c +++ b/Sources/BoringSSL/crypto/rand_extra/windows.c @@ -23,16 +23,16 @@ OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include -/* #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the - * "Community Additions" comment on MSDN here: - * http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx */ +// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the +// "Community Additions" comment on MSDN here: +// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx #define SystemFunction036 NTAPI SystemFunction036 #include #undef SystemFunction036 OPENSSL_MSVC_PRAGMA(warning(pop)) -#include "internal.h" +#include "../fipsmodule/rand/internal.h" void CRYPTO_sysrand(uint8_t *out, size_t requested) { @@ -50,4 +50,4 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) { return; } -#endif /* OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE */ +#endif // OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE diff --git a/Sources/BoringSSL/crypto/refcount_c11.c b/Sources/BoringSSL/crypto/refcount_c11.c index fbc0343d6..0a331a45d 100644 --- a/Sources/BoringSSL/crypto/refcount_c11.c +++ b/Sources/BoringSSL/crypto/refcount_c11.c @@ -25,7 +25,7 @@ #include -/* See comment above the typedef of CRYPTO_refcount_t about these tests. */ +// See comment above the typedef of CRYPTO_refcount_t about these tests. static_assert(alignof(CRYPTO_refcount_t) == alignof(_Atomic CRYPTO_refcount_t), "_Atomic alters the needed alignment of a reference count"); static_assert(sizeof(CRYPTO_refcount_t) == sizeof(_Atomic CRYPTO_refcount_t), @@ -64,4 +64,4 @@ int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *in_count) { } } -#endif /* OPENSSL_C11_ATOMIC */ +#endif // OPENSSL_C11_ATOMIC diff --git a/Sources/BoringSSL/crypto/refcount_lock.c b/Sources/BoringSSL/crypto/refcount_lock.c index ea6a06d39..8b855d627 100644 --- a/Sources/BoringSSL/crypto/refcount_lock.c +++ b/Sources/BoringSSL/crypto/refcount_lock.c @@ -50,4 +50,4 @@ int CRYPTO_refcount_dec_and_test_zero(CRYPTO_refcount_t *count) { return ret; } -#endif /* OPENSSL_C11_ATOMIC */ +#endif // OPENSSL_C11_ATOMIC diff --git a/Sources/BoringSSL/crypto/rsa/rsa_asn1.c b/Sources/BoringSSL/crypto/rsa_extra/rsa_asn1.c similarity index 64% rename from Sources/BoringSSL/crypto/rsa/rsa_asn1.c rename to Sources/BoringSSL/crypto/rsa_extra/rsa_asn1.c index 88b1dfb32..3cc6a9c3c 100644 --- a/Sources/BoringSSL/crypto/rsa/rsa_asn1.c +++ b/Sources/BoringSSL/crypto/rsa_extra/rsa_asn1.c @@ -64,44 +64,37 @@ #include #include -#include "internal.h" +#include "../fipsmodule/rsa/internal.h" #include "../bytestring/internal.h" #include "../internal.h" -static int parse_integer_buggy(CBS *cbs, BIGNUM **out, int buggy) { +static int parse_integer(CBS *cbs, BIGNUM **out) { assert(*out == NULL); *out = BN_new(); if (*out == NULL) { return 0; } - if (buggy) { - return BN_parse_asn1_unsigned_buggy(cbs, *out); - } return BN_parse_asn1_unsigned(cbs, *out); } -static int parse_integer(CBS *cbs, BIGNUM **out) { - return parse_integer_buggy(cbs, out, 0 /* not buggy */); -} - static int marshal_integer(CBB *cbb, BIGNUM *bn) { if (bn == NULL) { - /* An RSA object may be missing some components. */ + // An RSA object may be missing some components. OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING); return 0; } return BN_marshal_asn1(cbb, bn); } -static RSA *parse_public_key(CBS *cbs, int buggy) { +RSA *RSA_parse_public_key(CBS *cbs) { RSA *ret = RSA_new(); if (ret == NULL) { return NULL; } CBS child; if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || - !parse_integer_buggy(&child, &ret->n, buggy) || + !parse_integer(&child, &ret->n) || !parse_integer(&child, &ret->e) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); @@ -119,18 +112,6 @@ static RSA *parse_public_key(CBS *cbs, int buggy) { return ret; } -RSA *RSA_parse_public_key(CBS *cbs) { - return parse_public_key(cbs, 0 /* not buggy */); -} - -RSA *RSA_parse_public_key_buggy(CBS *cbs) { - /* Estonian IDs issued between September 2014 to September 2015 are - * broken. See https://crbug.com/532048 and https://crbug.com/534766. - * - * TODO(davidben): Remove this code and callers in March 2016. */ - return parse_public_key(cbs, 1 /* buggy */); -} - RSA *RSA_public_key_from_bytes(const uint8_t *in, size_t in_len) { CBS cbs; CBS_init(&cbs, in, in_len); @@ -169,40 +150,11 @@ int RSA_public_key_to_bytes(uint8_t **out_bytes, size_t *out_len, return 1; } -/* kVersionTwoPrime and kVersionMulti are the supported values of the version - * field of an RSAPrivateKey structure (RFC 3447). */ +// kVersionTwoPrime is the value of the version field for a two-prime +// RSAPrivateKey structure (RFC 3447). static const uint64_t kVersionTwoPrime = 0; -static const uint64_t kVersionMulti = 1; - -/* rsa_parse_additional_prime parses a DER-encoded OtherPrimeInfo from |cbs| and - * advances |cbs|. It returns a newly-allocated |RSA_additional_prime| on - * success or NULL on error. The |r| and |mont| fields of the result are set to - * NULL. */ -static RSA_additional_prime *rsa_parse_additional_prime(CBS *cbs) { - RSA_additional_prime *ret = OPENSSL_malloc(sizeof(RSA_additional_prime)); - if (ret == NULL) { - OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); - return 0; - } - OPENSSL_memset(ret, 0, sizeof(RSA_additional_prime)); - - CBS child; - if (!CBS_get_asn1(cbs, &child, CBS_ASN1_SEQUENCE) || - !parse_integer(&child, &ret->prime) || - !parse_integer(&child, &ret->exp) || - !parse_integer(&child, &ret->coeff) || - CBS_len(&child) != 0) { - OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); - RSA_additional_prime_free(ret); - return NULL; - } - - return ret; -} RSA *RSA_parse_private_key(CBS *cbs) { - BN_CTX *ctx = NULL; - BIGNUM *product_of_primes_so_far = NULL; RSA *ret = RSA_new(); if (ret == NULL) { return NULL; @@ -216,7 +168,7 @@ RSA *RSA_parse_private_key(CBS *cbs) { goto err; } - if (version != kVersionTwoPrime && version != kVersionMulti) { + if (version != kVersionTwoPrime) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_VERSION); goto err; } @@ -232,50 +184,6 @@ RSA *RSA_parse_private_key(CBS *cbs) { goto err; } - if (version == kVersionMulti) { - /* Although otherPrimeInfos is written as OPTIONAL in RFC 3447, it later - * says "[otherPrimeInfos] shall be omitted if version is 0 and shall - * contain at least one instance of OtherPrimeInfo if version is 1." The - * OPTIONAL is just so both versions share a single definition. */ - CBS other_prime_infos; - if (!CBS_get_asn1(&child, &other_prime_infos, CBS_ASN1_SEQUENCE) || - CBS_len(&other_prime_infos) == 0) { - OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); - goto err; - } - ret->additional_primes = sk_RSA_additional_prime_new_null(); - if (ret->additional_primes == NULL) { - OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); - goto err; - } - - ctx = BN_CTX_new(); - product_of_primes_so_far = BN_new(); - if (ctx == NULL || - product_of_primes_so_far == NULL || - !BN_mul(product_of_primes_so_far, ret->p, ret->q, ctx)) { - goto err; - } - - while (CBS_len(&other_prime_infos) > 0) { - RSA_additional_prime *ap = rsa_parse_additional_prime(&other_prime_infos); - if (ap == NULL) { - goto err; - } - if (!sk_RSA_additional_prime_push(ret->additional_primes, ap)) { - OPENSSL_PUT_ERROR(RSA, ERR_R_MALLOC_FAILURE); - RSA_additional_prime_free(ap); - goto err; - } - ap->r = BN_dup(product_of_primes_so_far); - if (ap->r == NULL || - !BN_mul(product_of_primes_so_far, product_of_primes_so_far, - ap->prime, ctx)) { - goto err; - } - } - } - if (CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_ENCODING); goto err; @@ -286,13 +194,9 @@ RSA *RSA_parse_private_key(CBS *cbs) { goto err; } - BN_CTX_free(ctx); - BN_free(product_of_primes_so_far); return ret; err: - BN_CTX_free(ctx); - BN_free(product_of_primes_so_far); RSA_free(ret); return NULL; } @@ -310,13 +214,9 @@ RSA *RSA_private_key_from_bytes(const uint8_t *in, size_t in_len) { } int RSA_marshal_private_key(CBB *cbb, const RSA *rsa) { - const int is_multiprime = - sk_RSA_additional_prime_num(rsa->additional_primes) > 0; - CBB child; if (!CBB_add_asn1(cbb, &child, CBS_ASN1_SEQUENCE) || - !CBB_add_asn1_uint64(&child, - is_multiprime ? kVersionMulti : kVersionTwoPrime) || + !CBB_add_asn1_uint64(&child, kVersionTwoPrime) || !marshal_integer(&child, rsa->n) || !marshal_integer(&child, rsa->e) || !marshal_integer(&child, rsa->d) || @@ -324,35 +224,8 @@ int RSA_marshal_private_key(CBB *cbb, const RSA *rsa) { !marshal_integer(&child, rsa->q) || !marshal_integer(&child, rsa->dmp1) || !marshal_integer(&child, rsa->dmq1) || - !marshal_integer(&child, rsa->iqmp)) { - OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); - return 0; - } - - CBB other_prime_infos; - if (is_multiprime) { - if (!CBB_add_asn1(&child, &other_prime_infos, CBS_ASN1_SEQUENCE)) { - OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); - return 0; - } - for (size_t i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes); - i++) { - RSA_additional_prime *ap = - sk_RSA_additional_prime_value(rsa->additional_primes, i); - CBB other_prime_info; - if (!CBB_add_asn1(&other_prime_infos, &other_prime_info, - CBS_ASN1_SEQUENCE) || - !marshal_integer(&other_prime_info, ap->prime) || - !marshal_integer(&other_prime_info, ap->exp) || - !marshal_integer(&other_prime_info, ap->coeff) || - !CBB_flush(&other_prime_infos)) { - OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); - return 0; - } - } - } - - if (!CBB_flush(cbb)) { + !marshal_integer(&child, rsa->iqmp) || + !CBB_flush(cbb)) { OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR); return 0; } diff --git a/Sources/BoringSSL/crypto/stack/stack.c b/Sources/BoringSSL/crypto/stack/stack.c index f78209d5a..f6b44123a 100644 --- a/Sources/BoringSSL/crypto/stack/stack.c +++ b/Sources/BoringSSL/crypto/stack/stack.c @@ -63,8 +63,8 @@ #include "../internal.h" -/* kMinSize is the number of pointers that will be initially allocated in a new - * stack. */ +// kMinSize is the number of pointers that will be initially allocated in a new +// stack. static const size_t kMinSize = 4; _STACK *sk_new(stack_cmp_func comp) { @@ -152,18 +152,18 @@ size_t sk_insert(_STACK *sk, void *p, size_t where) { } if (sk->num_alloc <= sk->num + 1) { - /* Attempt to double the size of the array. */ + // Attempt to double the size of the array. size_t new_alloc = sk->num_alloc << 1; size_t alloc_size = new_alloc * sizeof(void *); void **data; - /* If the doubling overflowed, try to increment. */ + // If the doubling overflowed, try to increment. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { new_alloc = sk->num_alloc + 1; alloc_size = new_alloc * sizeof(void *); } - /* If the increment also overflowed, fail. */ + // If the increment also overflowed, fail. if (new_alloc < sk->num_alloc || alloc_size / sizeof(void *) != new_alloc) { return 0; } @@ -229,7 +229,7 @@ int sk_find(_STACK *sk, size_t *out_index, void *p) { } if (sk->comp == NULL) { - /* Use pointer equality when no comparison function has been set. */ + // Use pointer equality when no comparison function has been set. for (size_t i = 0; i < sk->num; i++) { if (sk->data[i] == p) { if (out_index) { @@ -247,18 +247,18 @@ int sk_find(_STACK *sk, size_t *out_index, void *p) { sk_sort(sk); - /* sk->comp is a function that takes pointers to pointers to elements, but - * qsort and bsearch take a comparison function that just takes pointers to - * elements. However, since we're passing an array of pointers to - * qsort/bsearch, we can just cast the comparison function and everything - * works. */ + // sk->comp is a function that takes pointers to pointers to elements, but + // qsort and bsearch take a comparison function that just takes pointers to + // elements. However, since we're passing an array of pointers to + // qsort/bsearch, we can just cast the comparison function and everything + // works. const void *const *r = bsearch(&p, sk->data, sk->num, sizeof(void *), (int (*)(const void *, const void *))sk->comp); if (r == NULL) { return 0; } size_t idx = ((void **)r) - sk->data; - /* This function always returns the first result. */ + // This function always returns the first result. while (idx > 0 && sk->comp((const void **)&p, (const void **)&sk->data[idx - 1]) == 0) { idx--; @@ -329,7 +329,7 @@ void sk_sort(_STACK *sk) { return; } - /* See the comment in sk_find about this cast. */ + // See the comment in sk_find about this cast. comp_func = (int (*)(const void *, const void *))(sk->comp); qsort(sk->data, sk->num, sizeof(void *), comp_func); sk->sorted = 1; diff --git a/Sources/BoringSSL/crypto/thread_none.c b/Sources/BoringSSL/crypto/thread_none.c index 85768b4bf..718d96016 100644 --- a/Sources/BoringSSL/crypto/thread_none.c +++ b/Sources/BoringSSL/crypto/thread_none.c @@ -56,4 +56,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_NO_THREADS */ +#endif // OPENSSL_NO_THREADS diff --git a/Sources/BoringSSL/crypto/thread_pthread.c b/Sources/BoringSSL/crypto/thread_pthread.c index d9e87f2d6..90b3d6051 100644 --- a/Sources/BoringSSL/crypto/thread_pthread.c +++ b/Sources/BoringSSL/crypto/thread_pthread.c @@ -173,4 +173,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_PTHREADS */ +#endif // OPENSSL_PTHREADS diff --git a/Sources/BoringSSL/crypto/thread_win.c b/Sources/BoringSSL/crypto/thread_win.c index 62119b4ef..d6fa54849 100644 --- a/Sources/BoringSSL/crypto/thread_win.c +++ b/Sources/BoringSSL/crypto/thread_win.c @@ -63,7 +63,7 @@ void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) { } void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) { - /* SRWLOCKs require no cleanup. */ + // SRWLOCKs require no cleanup. } void CRYPTO_STATIC_MUTEX_lock_read(struct CRYPTO_STATIC_MUTEX *lock) { @@ -100,11 +100,11 @@ static void thread_local_init(void) { static void NTAPI thread_local_destructor(PVOID module, DWORD reason, PVOID reserved) { - /* Only free memory on |DLL_THREAD_DETACH|, not |DLL_PROCESS_DETACH|. In - * VS2015's debug runtime, the C runtime has been unloaded by the time - * |DLL_PROCESS_DETACH| runs. See https://crbug.com/575795. This is consistent - * with |pthread_key_create| which does not call destructors on process exit, - * only thread exit. */ + // Only free memory on |DLL_THREAD_DETACH|, not |DLL_PROCESS_DETACH|. In + // VS2015's debug runtime, the C runtime has been unloaded by the time + // |DLL_PROCESS_DETACH| runs. See https://crbug.com/575795. This is consistent + // with |pthread_key_create| which does not call destructors on process exit, + // only thread exit. if (reason != DLL_THREAD_DETACH) { return; } @@ -135,17 +135,17 @@ static void NTAPI thread_local_destructor(PVOID module, DWORD reason, OPENSSL_free(pointers); } -/* Thread Termination Callbacks. - * - * Windows doesn't support a per-thread destructor with its TLS primitives. - * So, we build it manually by inserting a function to be called on each - * thread's exit. This magic is from http://www.codeproject.com/threads/tls.asp - * and it works for VC++ 7.0 and later. - * - * Force a reference to _tls_used to make the linker create the TLS directory - * if it's not already there. (E.g. if __declspec(thread) is not used). Force - * a reference to p_thread_callback_boringssl to prevent whole program - * optimization from discarding the variable. */ +// Thread Termination Callbacks. +// +// Windows doesn't support a per-thread destructor with its TLS primitives. +// So, we build it manually by inserting a function to be called on each +// thread's exit. This magic is from http://www.codeproject.com/threads/tls.asp +// and it works for VC++ 7.0 and later. +// +// Force a reference to _tls_used to make the linker create the TLS directory +// if it's not already there. (E.g. if __declspec(thread) is not used). Force +// a reference to p_thread_callback_boringssl to prevent whole program +// optimization from discarding the variable. #ifdef _WIN64 #pragma comment(linker, "/INCLUDE:_tls_used") #pragma comment(linker, "/INCLUDE:p_thread_callback_boringssl") @@ -154,41 +154,41 @@ static void NTAPI thread_local_destructor(PVOID module, DWORD reason, #pragma comment(linker, "/INCLUDE:_p_thread_callback_boringssl") #endif -/* .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are - * called automatically by the OS loader code (not the CRT) when the module is - * loaded and on thread creation. They are NOT called if the module has been - * loaded by a LoadLibrary() call. It must have implicitly been loaded at - * process startup. - * - * By implicitly loaded, I mean that it is directly referenced by the main EXE - * or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being - * implicitly loaded. - * - * See VC\crt\src\tlssup.c for reference. */ - -/* The linker must not discard p_thread_callback_boringssl. (We force a reference - * to this variable with a linker /INCLUDE:symbol pragma to ensure that.) If - * this variable is discarded, the OnThreadExit function will never be - * called. */ +// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are +// called automatically by the OS loader code (not the CRT) when the module is +// loaded and on thread creation. They are NOT called if the module has been +// loaded by a LoadLibrary() call. It must have implicitly been loaded at +// process startup. +// +// By implicitly loaded, I mean that it is directly referenced by the main EXE +// or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being +// implicitly loaded. +// +// See VC\crt\src\tlssup.c for reference. + +// The linker must not discard p_thread_callback_boringssl. (We force a +// reference to this variable with a linker /INCLUDE:symbol pragma to ensure +// that.) If this variable is discarded, the OnThreadExit function will never +// be called. #ifdef _WIN64 -/* .CRT section is merged with .rdata on x64 so it must be constant data. */ +// .CRT section is merged with .rdata on x64 so it must be constant data. #pragma const_seg(".CRT$XLC") -/* When defining a const variable, it must have external linkage to be sure the - * linker doesn't discard it. */ +// When defining a const variable, it must have external linkage to be sure the +// linker doesn't discard it. extern const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl; const PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; -/* Reset the default section. */ +// Reset the default section. #pragma const_seg() #else #pragma data_seg(".CRT$XLC") PIMAGE_TLS_CALLBACK p_thread_callback_boringssl = thread_local_destructor; -/* Reset the default section. */ +// Reset the default section. #pragma data_seg() -#endif /* _WIN64 */ +#endif // _WIN64 void *CRYPTO_get_thread_local(thread_local_data_t index) { CRYPTO_once(&g_thread_local_init_once, thread_local_init); @@ -234,4 +234,4 @@ int CRYPTO_set_thread_local(thread_local_data_t index, void *value, return 1; } -#endif /* OPENSSL_WINDOWS_THREADS */ +#endif // OPENSSL_WINDOWS_THREADS diff --git a/Sources/BoringSSL/crypto/x509/a_sign.c b/Sources/BoringSSL/crypto/x509/a_sign.c index 13a3ac25e..6c7f7136d 100644 --- a/Sources/BoringSSL/crypto/x509/a_sign.c +++ b/Sources/BoringSSL/crypto/x509/a_sign.c @@ -83,7 +83,7 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it, { EVP_PKEY *pkey; unsigned char *buf_in = NULL, *buf_out = NULL; - size_t inl = 0, outl = 0, outll = 0; + size_t inl = 0, outl = 0; pkey = EVP_PKEY_CTX_get0_pkey(ctx->pctx); @@ -96,7 +96,7 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it, } inl = ASN1_item_i2d(asn, &buf_in, it); - outll = outl = EVP_PKEY_size(pkey); + outl = EVP_PKEY_size(pkey); buf_out = OPENSSL_malloc((unsigned int)outl); if ((buf_in == NULL) || (buf_out == NULL)) { outl = 0; @@ -104,8 +104,7 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it, goto err; } - if (!EVP_DigestSignUpdate(ctx, buf_in, inl) - || !EVP_DigestSignFinal(ctx, buf_out, &outl)) { + if (!EVP_DigestSign(ctx, buf_out, &outl, buf_in, inl)) { outl = 0; OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB); goto err; @@ -123,13 +122,7 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it, signature->flags |= ASN1_STRING_FLAG_BITS_LEFT; err: EVP_MD_CTX_cleanup(ctx); - if (buf_in != NULL) { - OPENSSL_cleanse((char *)buf_in, (unsigned int)inl); - OPENSSL_free(buf_in); - } - if (buf_out != NULL) { - OPENSSL_cleanse((char *)buf_out, outll); - OPENSSL_free(buf_out); - } + OPENSSL_free(buf_in); + OPENSSL_free(buf_out); return (outl); } diff --git a/Sources/BoringSSL/crypto/x509/a_verify.c b/Sources/BoringSSL/crypto/x509/a_verify.c index 0af4197c3..5b751675c 100644 --- a/Sources/BoringSSL/crypto/x509/a_verify.c +++ b/Sources/BoringSSL/crypto/x509/a_verify.c @@ -75,7 +75,7 @@ int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, { EVP_MD_CTX ctx; uint8_t *buf_in = NULL; - int ret = 0, inl; + int ret = 0, inl = 0; if (!pkey) { OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER); @@ -100,28 +100,16 @@ int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, goto err; } - if (!EVP_DigestVerifyUpdate(&ctx, buf_in, inl)) { - OPENSSL_cleanse(buf_in, (unsigned int)inl); - OPENSSL_free(buf_in); + if (!EVP_DigestVerify(&ctx, signature->data, (size_t)signature->length, + buf_in, inl)) { OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB); goto err; } - OPENSSL_cleanse(buf_in, (unsigned int)inl); - OPENSSL_free(buf_in); - - if (EVP_DigestVerifyFinal(&ctx, signature->data, - (size_t)signature->length) <= 0) { - OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB); - goto err; - } - /* - * we don't need to zero the 'ctx' because we just checked public - * information - */ - /* OPENSSL_memset(&ctx,0,sizeof(ctx)); */ ret = 1; - err: + +err: + OPENSSL_free(buf_in); EVP_MD_CTX_cleanup(&ctx); return ret; } diff --git a/Sources/BoringSSL/crypto/x509/algorithm.c b/Sources/BoringSSL/crypto/x509/algorithm.c index 78ae882b8..8f53fff6d 100644 --- a/Sources/BoringSSL/crypto/x509/algorithm.c +++ b/Sources/BoringSSL/crypto/x509/algorithm.c @@ -66,9 +66,8 @@ int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor) { - const EVP_MD *digest = EVP_MD_CTX_md(ctx); EVP_PKEY *pkey = EVP_PKEY_CTX_get0_pkey(ctx->pctx); - if (digest == NULL || pkey == NULL) { + if (pkey == NULL) { OPENSSL_PUT_ERROR(ASN1, ASN1_R_CONTEXT_NOT_INITIALISED); return 0; } @@ -84,8 +83,18 @@ int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor) { } } + if (EVP_PKEY_id(pkey) == EVP_PKEY_ED25519) { + return X509_ALGOR_set0(algor, OBJ_nid2obj(NID_ED25519), V_ASN1_UNDEF, NULL); + } + /* Default behavior: look up the OID for the algorithm/hash pair and encode * that. */ + const EVP_MD *digest = EVP_MD_CTX_md(ctx); + if (digest == NULL) { + OPENSSL_PUT_ERROR(ASN1, ASN1_R_CONTEXT_NOT_INITIALISED); + return 0; + } + int sign_nid; if (!OBJ_find_sigid_by_algs(&sign_nid, EVP_MD_type(digest), EVP_PKEY_id(pkey))) { @@ -119,11 +128,18 @@ int x509_digest_verify_init(EVP_MD_CTX *ctx, X509_ALGOR *sigalg, /* NID_undef signals that there are custom parameters to set. */ if (digest_nid == NID_undef) { - if (sigalg_nid != NID_rsassaPss) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); - return 0; + if (sigalg_nid == NID_rsassaPss) { + return x509_rsa_pss_to_ctx(ctx, sigalg, pkey); } - return x509_rsa_pss_to_ctx(ctx, sigalg, pkey); + if (sigalg_nid == NID_ED25519) { + if (sigalg->parameter != NULL) { + OPENSSL_PUT_ERROR(X509, X509_R_INVALID_PARAMETER); + return 0; + } + return EVP_DigestVerifyInit(ctx, NULL, NULL, NULL, pkey); + } + OPENSSL_PUT_ERROR(ASN1, ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); + return 0; } /* Otherwise, initialize with the digest from the OID. */ diff --git a/Sources/BoringSSL/crypto/x509/asn1_gen.c b/Sources/BoringSSL/crypto/x509/asn1_gen.c index c52a1ac01..5b74cd1ee 100644 --- a/Sources/BoringSSL/crypto/x509/asn1_gen.c +++ b/Sources/BoringSSL/crypto/x509/asn1_gen.c @@ -84,6 +84,8 @@ #define ASN1_GEN_STR(str,val) {str, sizeof(str) - 1, val} #define ASN1_FLAG_EXP_MAX 20 +/* Maximum number of nested sequences */ +#define ASN1_GEN_SEQ_MAX_DEPTH 50 /* Input formats */ @@ -120,13 +122,16 @@ typedef struct { int exp_count; } tag_exp_arg; +static ASN1_TYPE *generate_v3(char *str, X509V3_CTX *cnf, int depth, + int *perr); static int bitstr_cb(const char *elem, int len, void *bitstr); static int asn1_cb(const char *elem, int len, void *bitstr); static int append_exp(tag_exp_arg *arg, int exp_tag, int exp_class, int exp_constructed, int exp_pad, int imp_ok); static int parse_tagging(const char *vstart, int vlen, int *ptag, int *pclass); -static ASN1_TYPE *asn1_multi(int utype, const char *section, X509V3_CTX *cnf); +static ASN1_TYPE *asn1_multi(int utype, const char *section, X509V3_CTX *cnf, + int depth, int *perr); static ASN1_TYPE *asn1_str2type(const char *str, int format, int utype); static int asn1_str2tag(const char *tagstr, int len); @@ -142,6 +147,16 @@ ASN1_TYPE *ASN1_generate_nconf(char *str, CONF *nconf) } ASN1_TYPE *ASN1_generate_v3(char *str, X509V3_CTX *cnf) +{ + int err = 0; + ASN1_TYPE *ret = generate_v3(str, cnf, 0, &err); + if (err) + OPENSSL_PUT_ERROR(ASN1, err); + return ret; +} + +static ASN1_TYPE *generate_v3(char *str, X509V3_CTX *cnf, int depth, + int *perr) { ASN1_TYPE *ret; tag_exp_arg asn1_tags; @@ -162,16 +177,22 @@ ASN1_TYPE *ASN1_generate_v3(char *str, X509V3_CTX *cnf) asn1_tags.imp_class = -1; asn1_tags.format = ASN1_GEN_FORMAT_ASCII; asn1_tags.exp_count = 0; - if (CONF_parse_list(str, ',', 1, asn1_cb, &asn1_tags) != 0) + if (CONF_parse_list(str, ',', 1, asn1_cb, &asn1_tags) != 0) { + *perr = ASN1_R_UNKNOWN_TAG; return NULL; + } if ((asn1_tags.utype == V_ASN1_SEQUENCE) || (asn1_tags.utype == V_ASN1_SET)) { if (!cnf) { - OPENSSL_PUT_ERROR(ASN1, ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG); + *perr = ASN1_R_SEQUENCE_OR_SET_NEEDS_CONFIG; return NULL; } - ret = asn1_multi(asn1_tags.utype, asn1_tags.str, cnf); + if (depth >= ASN1_GEN_SEQ_MAX_DEPTH) { + *perr = ASN1_R_ILLEGAL_NESTED_TAGGING; + return NULL; + } + ret = asn1_multi(asn1_tags.utype, asn1_tags.str, cnf, depth, perr); } else ret = asn1_str2type(asn1_tags.str, asn1_tags.format, asn1_tags.utype); @@ -289,7 +310,7 @@ static int asn1_cb(const char *elem, int len, void *bitstr) int tmp_tag, tmp_class; if (elem == NULL) - return 0; + return -1; for (i = 0, p = elem; i < len; p++, i++) { /* Look for the ':' in name value pairs */ @@ -444,7 +465,8 @@ static int parse_tagging(const char *vstart, int vlen, int *ptag, int *pclass) /* Handle multiple types: SET and SEQUENCE */ -static ASN1_TYPE *asn1_multi(int utype, const char *section, X509V3_CTX *cnf) +static ASN1_TYPE *asn1_multi(int utype, const char *section, X509V3_CTX *cnf, + int depth, int *perr) { ASN1_TYPE *ret = NULL; STACK_OF(ASN1_TYPE) *sk = NULL; @@ -463,7 +485,8 @@ static ASN1_TYPE *asn1_multi(int utype, const char *section, X509V3_CTX *cnf) goto bad; for (i = 0; i < sk_CONF_VALUE_num(sect); i++) { ASN1_TYPE *typ = - ASN1_generate_v3(sk_CONF_VALUE_value(sect, i)->value, cnf); + generate_v3(sk_CONF_VALUE_value(sect, i)->value, cnf, + depth + 1, perr); if (!typ) goto bad; if (!sk_ASN1_TYPE_push(sk, typ)) diff --git a/Sources/BoringSSL/crypto/x509/by_dir.c b/Sources/BoringSSL/crypto/x509/by_dir.c index e68ca5a35..635b851f7 100644 --- a/Sources/BoringSSL/crypto/x509/by_dir.c +++ b/Sources/BoringSSL/crypto/x509/by_dir.c @@ -61,7 +61,6 @@ #include #include -#include #include #include #include @@ -84,8 +83,8 @@ typedef struct lookup_dir_st { STACK_OF(BY_DIR_ENTRY) *dirs; } BY_DIR; -DECLARE_STACK_OF(BY_DIR_HASH) -DECLARE_STACK_OF(BY_DIR_ENTRY) +DEFINE_STACK_OF(BY_DIR_HASH) +DEFINE_STACK_OF(BY_DIR_ENTRY) static int dir_ctrl(X509_LOOKUP *ctx, int cmd, const char *argp, long argl, char **ret); @@ -235,8 +234,7 @@ static int add_cert_dir(BY_DIR *ctx, const char *dir, int type) by_dir_entry_free(ent); return 0; } - strncpy(ent->dir, ss, len); - ent->dir[len] = '\0'; + BUF_strlcpy(ent->dir, ss, len + 1); if (!sk_BY_DIR_ENTRY_push(ctx->dirs, ent)) { by_dir_entry_free(ent); return 0; diff --git a/Sources/BoringSSL/crypto/x509/by_file.c b/Sources/BoringSSL/crypto/x509/by_file.c index ebeb72e5e..555cb8541 100644 --- a/Sources/BoringSSL/crypto/x509/by_file.c +++ b/Sources/BoringSSL/crypto/x509/by_file.c @@ -59,7 +59,6 @@ #include #include -#include #include #include @@ -89,12 +88,12 @@ static int by_file_ctrl(X509_LOOKUP *ctx, int cmd, const char *argp, long argl, char **ret) { int ok = 0; - char *file; + const char *file; switch (cmd) { case X509_L_FILE_LOAD: if (argl == X509_FILETYPE_DEFAULT) { - file = (char *)getenv(X509_get_default_cert_file_env()); + file = getenv(X509_get_default_cert_file_env()); if (file) ok = (X509_load_cert_crl_file(ctx, file, X509_FILETYPE_PEM) != 0); diff --git a/Sources/BoringSSL/crypto/x509/rsa_pss.c b/Sources/BoringSSL/crypto/x509/rsa_pss.c index 4913c3d20..923093430 100644 --- a/Sources/BoringSSL/crypto/x509/rsa_pss.c +++ b/Sources/BoringSSL/crypto/x509/rsa_pss.c @@ -279,11 +279,11 @@ int x509_rsa_pss_to_ctx(EVP_MD_CTX *ctx, X509_ALGOR *sigalg, EVP_PKEY *pkey) { goto err; } - EVP_PKEY_CTX *pkctx; - if (!EVP_DigestVerifyInit(ctx, &pkctx, md, NULL, pkey) || - !EVP_PKEY_CTX_set_rsa_padding(pkctx, RSA_PKCS1_PSS_PADDING) || - !EVP_PKEY_CTX_set_rsa_pss_saltlen(pkctx, saltlen) || - !EVP_PKEY_CTX_set_rsa_mgf1_md(pkctx, mgf1md)) { + EVP_PKEY_CTX *pctx; + if (!EVP_DigestVerifyInit(ctx, &pctx, md, NULL, pkey) || + !EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) || + !EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, saltlen) || + !EVP_PKEY_CTX_set_rsa_mgf1_md(pctx, mgf1md)) { goto err; } diff --git a/Sources/BoringSSL/crypto/x509/t_x509.c b/Sources/BoringSSL/crypto/x509/t_x509.c index d4f6bba58..3339523c2 100644 --- a/Sources/BoringSSL/crypto/x509/t_x509.c +++ b/Sources/BoringSSL/crypto/x509/t_x509.c @@ -299,7 +299,8 @@ int X509_ocspid_print(BIO *bp, X509 *x) return (0); } -int X509_signature_print(BIO *bp, X509_ALGOR *sigalg, ASN1_STRING *sig) +int X509_signature_print(BIO *bp, const X509_ALGOR *sigalg, + const ASN1_STRING *sig) { if (BIO_puts(bp, " Signature Algorithm: ") <= 0) return 0; diff --git a/Sources/BoringSSL/crypto/x509/x509_def.c b/Sources/BoringSSL/crypto/x509/x509_def.c index 2bf2240c3..cb34ea4a9 100644 --- a/Sources/BoringSSL/crypto/x509/x509_def.c +++ b/Sources/BoringSSL/crypto/x509/x509_def.c @@ -59,7 +59,12 @@ /* TODO(fork): cleanup */ +#if defined(OPENSSL_FUCHSIA) +#define OPENSSLDIR "/system/data/boringssl" +#else #define OPENSSLDIR "/etc/ssl" +#endif + #define X509_CERT_AREA OPENSSLDIR #define X509_CERT_DIR OPENSSLDIR "/certs" #define X509_CERT_FILE OPENSSLDIR "/cert.pem" diff --git a/Sources/BoringSSL/crypto/x509/x509_lu.c b/Sources/BoringSSL/crypto/x509/x509_lu.c index 9e4596405..1a841dbe6 100644 --- a/Sources/BoringSSL/crypto/x509/x509_lu.c +++ b/Sources/BoringSSL/crypto/x509/x509_lu.c @@ -58,7 +58,6 @@ #include #include -#include #include #include #include @@ -356,8 +355,12 @@ int X509_STORE_add_cert(X509_STORE *ctx, X509 *x) OPENSSL_free(obj); OPENSSL_PUT_ERROR(X509, X509_R_CERT_ALREADY_IN_HASH_TABLE); ret = 0; - } else - sk_X509_OBJECT_push(ctx->objs, obj); + } else if (!sk_X509_OBJECT_push(ctx->objs, obj)) { + X509_OBJECT_free_contents(obj); + OPENSSL_free(obj); + OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE); + ret = 0; + } CRYPTO_MUTEX_unlock_write(&ctx->objs_lock); @@ -388,8 +391,12 @@ int X509_STORE_add_crl(X509_STORE *ctx, X509_CRL *x) OPENSSL_free(obj); OPENSSL_PUT_ERROR(X509, X509_R_CERT_ALREADY_IN_HASH_TABLE); ret = 0; - } else - sk_X509_OBJECT_push(ctx->objs, obj); + } else if (!sk_X509_OBJECT_push(ctx->objs, obj)) { + X509_OBJECT_free_contents(obj); + OPENSSL_free(obj); + OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE); + ret = 0; + } CRYPTO_MUTEX_unlock_write(&ctx->objs_lock); @@ -426,6 +433,19 @@ void X509_OBJECT_free_contents(X509_OBJECT *a) } } +int X509_OBJECT_get_type(const X509_OBJECT *a) +{ + return a->type; +} + +X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a) +{ + if (a == NULL || a->type != X509_LU_X509) { + return NULL; + } + return a->data.x509; +} + static int x509_object_idx_cnt(STACK_OF(X509_OBJECT) *h, int type, X509_NAME *name, int *pnmatch) { @@ -488,6 +508,11 @@ X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT) *h, return sk_X509_OBJECT_value(h, idx); } +STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(X509_STORE *st) +{ + return st->objs; +} + STACK_OF (X509) * X509_STORE_get1_certs(X509_STORE_CTX *ctx, X509_NAME *nm) { int i, idx, cnt; @@ -676,6 +701,11 @@ int X509_STORE_set1_param(X509_STORE *ctx, X509_VERIFY_PARAM *param) return X509_VERIFY_PARAM_set1(ctx->param, param); } +X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *ctx) +{ + return ctx->param; +} + void X509_STORE_set_verify_cb(X509_STORE *ctx, int (*verify_cb) (int, X509_STORE_CTX *)) { diff --git a/Sources/BoringSSL/crypto/x509/x509_obj.c b/Sources/BoringSSL/crypto/x509/x509_obj.c index 33eafc421..65b1bfb50 100644 --- a/Sources/BoringSSL/crypto/x509/x509_obj.c +++ b/Sources/BoringSSL/crypto/x509/x509_obj.c @@ -59,7 +59,6 @@ #include #include -#include #include #include #include @@ -102,8 +101,7 @@ char *X509_NAME_oneline(X509_NAME *a, char *buf, int len) buf = b->data; OPENSSL_free(b); } - strncpy(buf, "NO X509_NAME", len); - buf[len - 1] = '\0'; + BUF_strlcpy(buf, "NO X509_NAME", len); return buf; } diff --git a/Sources/BoringSSL/crypto/x509/x509_set.c b/Sources/BoringSSL/crypto/x509/x509_set.c index 67c1842c2..413a20d05 100644 --- a/Sources/BoringSSL/crypto/x509/x509_set.c +++ b/Sources/BoringSSL/crypto/x509/x509_set.c @@ -124,6 +124,11 @@ int X509_set_notBefore(X509 *x, const ASN1_TIME *tm) return (in != NULL); } +const ASN1_TIME *X509_get0_notBefore(const X509 *x) +{ + return x->cert_info->validity->notBefore; +} + int X509_set_notAfter(X509 *x, const ASN1_TIME *tm) { ASN1_TIME *in; @@ -141,6 +146,11 @@ int X509_set_notAfter(X509 *x, const ASN1_TIME *tm) return (in != NULL); } +const ASN1_TIME *X509_get0_notAfter(const X509 *x) +{ + return x->cert_info->validity->notAfter; +} + int X509_set_pubkey(X509 *x, EVP_PKEY *pkey) { if ((x == NULL) || (x->cert_info == NULL)) diff --git a/Sources/BoringSSL/crypto/x509/x509_txt.c b/Sources/BoringSSL/crypto/x509/x509_txt.c index 17e6cdb9c..753e72027 100644 --- a/Sources/BoringSSL/crypto/x509/x509_txt.c +++ b/Sources/BoringSSL/crypto/x509/x509_txt.c @@ -54,13 +54,7 @@ * copied and put under another distribution licence * [including the GNU Public Licence.] */ -#include -#include -#include -#include -#include #include -#include #include const char *X509_verify_cert_error_string(long n) diff --git a/Sources/BoringSSL/crypto/x509/x509_vfy.c b/Sources/BoringSSL/crypto/x509/x509_vfy.c index 27b58f45b..aff2ee953 100644 --- a/Sources/BoringSSL/crypto/x509/x509_vfy.c +++ b/Sources/BoringSSL/crypto/x509/x509_vfy.c @@ -61,7 +61,6 @@ #include #include #include -#include #include #include #include @@ -146,12 +145,6 @@ static int null_callback(int ok, X509_STORE_CTX *e) return ok; } -#if 0 -static int x509_subject_cmp(X509 **a, X509 **b) -{ - return X509_subject_name_cmp(*a, *b); -} -#endif /* Return 1 is a certificate is self signed */ static int cert_self_signed(X509 *x) { @@ -475,13 +468,6 @@ int X509_verify_cert(X509_STORE_CTX *ctx) /* We have the chain complete: now we need to check its purpose */ ok = check_chain_extensions(ctx); - if (!ok) - goto end; - - /* Check name constraints */ - - ok = check_name_constraints(ctx); - if (!ok) goto end; @@ -517,6 +503,12 @@ int X509_verify_cert(X509_STORE_CTX *ctx) if (!ok) goto end; + /* Check name constraints */ + + ok = check_name_constraints(ctx); + if (!ok) + goto end; + /* If we get this far evaluate policies */ if (!bad_chain && (ctx->param->flags & X509_V_FLAG_POLICY_CHECK)) ok = ctx->check_policy(ctx); @@ -2101,7 +2093,7 @@ X509_CRL *X509_CRL_diff(X509_CRL *base, X509_CRL *newer, int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused * unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { /* @@ -2110,7 +2102,7 @@ int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, */ int index; if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, - dup_func, free_func)) { + free_func)) { return -1; } return index; @@ -2183,6 +2175,11 @@ void X509_STORE_CTX_set_chain(X509_STORE_CTX *ctx, STACK_OF(X509) *sk) ctx->untrusted = sk; } +STACK_OF(X509) *X509_STORE_CTX_get0_untrusted(X509_STORE_CTX *ctx) +{ + return ctx->untrusted; +} + void X509_STORE_CTX_set0_crls(X509_STORE_CTX *ctx, STACK_OF(X509_CRL) *sk) { ctx->crls = sk; @@ -2260,10 +2257,15 @@ X509_STORE_CTX *X509_STORE_CTX_new(void) OPENSSL_PUT_ERROR(X509, ERR_R_MALLOC_FAILURE); return NULL; } - OPENSSL_memset(ctx, 0, sizeof(X509_STORE_CTX)); + X509_STORE_CTX_zero(ctx); return ctx; } +void X509_STORE_CTX_zero(X509_STORE_CTX *ctx) +{ + OPENSSL_memset(ctx, 0, sizeof(X509_STORE_CTX)); +} + void X509_STORE_CTX_free(X509_STORE_CTX *ctx) { if (ctx == NULL) { @@ -2278,7 +2280,7 @@ int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store, X509 *x509, { int ret = 1; - OPENSSL_memset(ctx, 0, sizeof(X509_STORE_CTX)); + X509_STORE_CTX_zero(ctx); ctx->ctx = store; ctx->cert = x509; ctx->untrusted = chain; diff --git a/Sources/BoringSSL/crypto/x509/x509_vpm.c b/Sources/BoringSSL/crypto/x509/x509_vpm.c index 2317214c1..d0f8f7947 100644 --- a/Sources/BoringSSL/crypto/x509/x509_vpm.c +++ b/Sources/BoringSSL/crypto/x509/x509_vpm.c @@ -57,7 +57,6 @@ #include #include -#include #include #include #include diff --git a/Sources/BoringSSL/crypto/x509/x509type.c b/Sources/BoringSSL/crypto/x509/x509type.c deleted file mode 100644 index d4f5a4dc3..000000000 --- a/Sources/BoringSSL/crypto/x509/x509type.c +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] */ - -#include -#include -#include - -int X509_certificate_type(X509 *x, EVP_PKEY *pkey) -{ - EVP_PKEY *pk; - int ret = 0, i; - - if (x == NULL) - return (0); - - if (pkey == NULL) - pk = X509_get_pubkey(x); - else - pk = pkey; - - if (pk == NULL) - return (0); - - switch (pk->type) { - case EVP_PKEY_RSA: - ret = EVP_PK_RSA | EVP_PKT_SIGN; -/* if (!sign only extension) */ - ret |= EVP_PKT_ENC; - break; - case EVP_PKEY_DSA: - ret = EVP_PK_DSA | EVP_PKT_SIGN; - break; - case EVP_PKEY_EC: - ret = EVP_PK_EC | EVP_PKT_SIGN | EVP_PKT_EXCH; - break; - case EVP_PKEY_DH: - ret = EVP_PK_DH | EVP_PKT_EXCH; - break; - case NID_id_GostR3410_94: - case NID_id_GostR3410_2001: - ret = EVP_PKT_EXCH | EVP_PKT_SIGN; - break; - default: - break; - } - - i = OBJ_obj2nid(x->sig_alg->algorithm); - if (i && OBJ_find_sigid_algs(i, NULL, &i)) { - - switch (i) { - case NID_rsaEncryption: - case NID_rsa: - ret |= EVP_PKS_RSA; - break; - case NID_dsa: - case NID_dsa_2: - ret |= EVP_PKS_DSA; - break; - case NID_X9_62_id_ecPublicKey: - ret |= EVP_PKS_EC; - break; - default: - break; - } - } - - if (EVP_PKEY_size(pk) <= 1024 / 8) /* /8 because it's 1024 bits we look - * for, not bytes */ - ret |= EVP_PKT_EXP; - if (pkey == NULL) - EVP_PKEY_free(pk); - return (ret); -} diff --git a/Sources/BoringSSL/crypto/x509/x_algor.c b/Sources/BoringSSL/crypto/x509/x_algor.c index abacd061d..13c9a8cc8 100644 --- a/Sources/BoringSSL/crypto/x509/x_algor.c +++ b/Sources/BoringSSL/crypto/x509/x_algor.c @@ -105,8 +105,8 @@ int X509_ALGOR_set0(X509_ALGOR *alg, const ASN1_OBJECT *aobj, int ptype, return 1; } -void X509_ALGOR_get0(ASN1_OBJECT **paobj, int *pptype, void **ppval, - X509_ALGOR *algor) +void X509_ALGOR_get0(const ASN1_OBJECT **paobj, int *pptype, const void **ppval, + const X509_ALGOR *algor) { if (paobj) *paobj = algor->algorithm; diff --git a/Sources/BoringSSL/crypto/x509/x_name.c b/Sources/BoringSSL/crypto/x509/x_name.c index 4abdc9163..f132e6b6e 100644 --- a/Sources/BoringSSL/crypto/x509/x_name.c +++ b/Sources/BoringSSL/crypto/x509/x_name.c @@ -71,7 +71,7 @@ typedef STACK_OF(X509_NAME_ENTRY) STACK_OF_X509_NAME_ENTRY; -DECLARE_STACK_OF(STACK_OF_X509_NAME_ENTRY) +DEFINE_STACK_OF(STACK_OF_X509_NAME_ENTRY) /* * Maximum length of X509_NAME: much larger than anything we should @@ -244,7 +244,7 @@ static int x509_name_ex_d2i(ASN1_VALUE **val, entry->set = i; if (!sk_X509_NAME_ENTRY_push(nm.x->entries, entry)) goto err; - sk_X509_NAME_ENTRY_set(entries, j, NULL); + (void)sk_X509_NAME_ENTRY_set(entries, j, NULL); } } ret = x509_name_canon(nm.x); @@ -492,7 +492,7 @@ static int asn1_string_canon(ASN1_STRING *out, ASN1_STRING *in) } while (!(*from & 0x80) && isspace(*from)); } else { - *to++ = tolower(*from); + *to++ = OPENSSL_tolower(*from); from++; i++; } @@ -526,19 +526,16 @@ static int i2d_name_canon(STACK_OF(STACK_OF_X509_NAME_ENTRY) * _intname, int X509_NAME_set(X509_NAME **xn, X509_NAME *name) { - X509_NAME *in; - - if (!xn || !name) - return (0); - - if (*xn != name) { - in = X509_NAME_dup(name); - if (in != NULL) { - X509_NAME_free(*xn); - *xn = in; - } - } - return (*xn != NULL); + if ((name = X509_NAME_dup(name)) == NULL) + return 0; + X509_NAME_free(*xn); + *xn = name; + return 1; } IMPLEMENT_ASN1_SET_OF(X509_NAME_ENTRY) + +int X509_NAME_ENTRY_set(const X509_NAME_ENTRY *ne) +{ + return ne->set; +} diff --git a/Sources/BoringSSL/crypto/x509/x_x509.c b/Sources/BoringSSL/crypto/x509/x_x509.c index 15118d299..01464a1fe 100644 --- a/Sources/BoringSSL/crypto/x509/x_x509.c +++ b/Sources/BoringSSL/crypto/x509/x_x509.c @@ -188,11 +188,11 @@ int X509_up_ref(X509 *x) } int X509_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused * unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, - dup_func, free_func)) { + free_func)) { return -1; } return index; @@ -313,7 +313,7 @@ int i2d_X509_AUX(X509 *a, unsigned char **pp) return length; } -void X509_get0_signature(ASN1_BIT_STRING **psig, X509_ALGOR **palg, +void X509_get0_signature(const ASN1_BIT_STRING **psig, const X509_ALGOR **palg, const X509 *x) { if (psig) diff --git a/Sources/BoringSSL/crypto/x509/x_x509a.c b/Sources/BoringSSL/crypto/x509/x_x509a.c index a63ee4224..dccc46aed 100644 --- a/Sources/BoringSSL/crypto/x509/x_x509a.c +++ b/Sources/BoringSSL/crypto/x509/x_x509a.c @@ -196,10 +196,3 @@ void X509_reject_clear(X509 *x) x->aux->reject = NULL; } } - -ASN1_SEQUENCE(X509_CERT_PAIR) = { - ASN1_EXP_OPT(X509_CERT_PAIR, forward, X509, 0), - ASN1_EXP_OPT(X509_CERT_PAIR, reverse, X509, 1) -} ASN1_SEQUENCE_END(X509_CERT_PAIR) - -IMPLEMENT_ASN1_FUNCTIONS(X509_CERT_PAIR) diff --git a/Sources/BoringSSL/crypto/x509v3/ext_dat.h b/Sources/BoringSSL/crypto/x509v3/ext_dat.h index 9ece19c50..78fa79362 100644 --- a/Sources/BoringSSL/crypto/x509v3/ext_dat.h +++ b/Sources/BoringSSL/crypto/x509v3/ext_dat.h @@ -56,6 +56,10 @@ /* This file contains a table of "standard" extensions */ +#if defined(__cplusplus) +extern "C" { +#endif + extern const X509V3_EXT_METHOD v3_bcons, v3_nscert, v3_key_usage, v3_ext_ku; extern const X509V3_EXT_METHOD v3_pkey_usage_period, v3_sxnet, v3_info, v3_sinfo; @@ -133,3 +137,7 @@ static const X509V3_EXT_METHOD *const standard_exts[] = { /* Number of standard extensions */ #define STANDARD_EXTENSION_COUNT (sizeof(standard_exts)/sizeof(X509V3_EXT_METHOD *)) + +#if defined(__cplusplus) +} /* extern C */ +#endif diff --git a/Sources/BoringSSL/crypto/x509v3/pcy_int.h b/Sources/BoringSSL/crypto/x509v3/pcy_int.h index 1e7650374..fc6e20a97 100644 --- a/Sources/BoringSSL/crypto/x509v3/pcy_int.h +++ b/Sources/BoringSSL/crypto/x509v3/pcy_int.h @@ -59,7 +59,7 @@ typedef struct X509_POLICY_DATA_st X509_POLICY_DATA; -DECLARE_STACK_OF(X509_POLICY_DATA) +DEFINE_STACK_OF(X509_POLICY_DATA) /* Internal structures */ @@ -207,7 +207,7 @@ X509_POLICY_NODE *tree_find_sk(STACK_OF(X509_POLICY_NODE) *sk, const ASN1_OBJECT *id); X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level, - const X509_POLICY_DATA *data, + X509_POLICY_DATA *data, X509_POLICY_NODE *parent, X509_POLICY_TREE *tree); void policy_node_free(X509_POLICY_NODE *node); diff --git a/Sources/BoringSSL/crypto/x509v3/pcy_lib.c b/Sources/BoringSSL/crypto/x509v3/pcy_lib.c index 764f38f91..7d5f0675f 100644 --- a/Sources/BoringSSL/crypto/x509v3/pcy_lib.c +++ b/Sources/BoringSSL/crypto/x509v3/pcy_lib.c @@ -137,15 +137,6 @@ const ASN1_OBJECT *X509_policy_node_get0_policy(const X509_POLICY_NODE *node) return node->data->valid_policy; } -#if 0 -int X509_policy_node_get_critical(const X509_POLICY_NODE *node) -{ - if (node_critical(node)) - return 1; - return 0; -} -#endif - STACK_OF(POLICYQUALINFO) *X509_policy_node_get0_qualifiers(const X509_POLICY_NODE *node) diff --git a/Sources/BoringSSL/crypto/x509v3/pcy_node.c b/Sources/BoringSSL/crypto/x509v3/pcy_node.c index cf4e79d8b..b3edfe480 100644 --- a/Sources/BoringSSL/crypto/x509v3/pcy_node.c +++ b/Sources/BoringSSL/crypto/x509v3/pcy_node.c @@ -107,7 +107,7 @@ X509_POLICY_NODE *level_find_node(const X509_POLICY_LEVEL *level, } X509_POLICY_NODE *level_add_node(X509_POLICY_LEVEL *level, - const X509_POLICY_DATA *data, + X509_POLICY_DATA *data, X509_POLICY_NODE *parent, X509_POLICY_TREE *tree) { diff --git a/Sources/BoringSSL/crypto/x509v3/pcy_tree.c b/Sources/BoringSSL/crypto/x509v3/pcy_tree.c index a588107b8..256fe88e6 100644 --- a/Sources/BoringSSL/crypto/x509v3/pcy_tree.c +++ b/Sources/BoringSSL/crypto/x509v3/pcy_tree.c @@ -136,11 +136,14 @@ static void tree_print(char *str, X509_POLICY_TREE *tree, #endif -/* - * Initialize policy tree. Return values: 0 Some internal error occured. -1 - * Inconsistent or invalid extensions in certificates. 1 Tree initialized - * OK. 2 Policy tree is empty. 5 Tree OK and requireExplicitPolicy true. 6 - * Tree empty and requireExplicitPolicy true. +/*- + * Initialize policy tree. Return values: + * 0 Some internal error occurred. + * -1 Inconsistent or invalid extensions in certificates. + * 1 Tree initialized OK. + * 2 Policy tree is empty. + * 5 Tree OK and requireExplicitPolicy true. + * 6 Tree empty and requireExplicitPolicy true. */ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs, @@ -306,7 +309,7 @@ static int tree_init(X509_POLICY_TREE **ptree, STACK_OF(X509) *certs, } static int tree_link_matching_nodes(X509_POLICY_LEVEL *curr, - const X509_POLICY_DATA *data) + X509_POLICY_DATA *data) { X509_POLICY_LEVEL *last = curr - 1; X509_POLICY_NODE *node; @@ -720,10 +723,13 @@ void X509_policy_tree_free(X509_POLICY_TREE *tree) } -/* - * Application policy checking function. Return codes: 0 Internal Error. 1 - * Successful. -1 One or more certificates contain invalid or inconsistent - * extensions -2 User constrained policy set empty and requireExplicit true. +/*- + * Application policy checking function. + * Return codes: + * 0 Internal Error. + * 1 Successful. + * -1 One or more certificates contain invalid or inconsistent extensions + * -2 User constrained policy set empty and requireExplicit true. */ int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy, @@ -731,6 +737,7 @@ int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy, STACK_OF(ASN1_OBJECT) *policy_oids, unsigned int flags) { int ret; + int calc_ret; X509_POLICY_TREE *tree = NULL; STACK_OF(X509_POLICY_NODE) *nodes, *auth_nodes = NULL; *ptree = NULL; @@ -799,17 +806,20 @@ int X509_policy_check(X509_POLICY_TREE **ptree, int *pexplicit_policy, /* Tree is not empty: continue */ - ret = tree_calculate_authority_set(tree, &auth_nodes); + calc_ret = tree_calculate_authority_set(tree, &auth_nodes); - if (!ret) + if (!calc_ret) goto error; - if (!tree_calculate_user_set(tree, policy_oids, auth_nodes)) - goto error; + ret = tree_calculate_user_set(tree, policy_oids, auth_nodes); - if (ret == 2) + if (calc_ret == 2) sk_X509_POLICY_NODE_free(auth_nodes); + if (!ret) + goto error; + + if (tree) *ptree = tree; diff --git a/Sources/BoringSSL/crypto/x509v3/v3_alt.c b/Sources/BoringSSL/crypto/x509v3/v3_alt.c index 152bd79f8..b78a4105e 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_alt.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_alt.c @@ -121,32 +121,39 @@ STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME(X509V3_EXT_METHOD *method, int i; switch (gen->type) { case GEN_OTHERNAME: - X509V3_add_value("othername", "", &ret); + if (!X509V3_add_value("othername", "", &ret)) + return NULL; break; case GEN_X400: - X509V3_add_value("X400Name", "", &ret); + if (!X509V3_add_value("X400Name", "", &ret)) + return NULL; break; case GEN_EDIPARTY: - X509V3_add_value("EdiPartyName", "", &ret); + if (!X509V3_add_value("EdiPartyName", "", &ret)) + return NULL; break; case GEN_EMAIL: - X509V3_add_value_uchar("email", gen->d.ia5->data, &ret); + if (!X509V3_add_value_uchar("email", gen->d.ia5->data, &ret)) + return NULL; break; case GEN_DNS: - X509V3_add_value_uchar("DNS", gen->d.ia5->data, &ret); + if (!X509V3_add_value_uchar("DNS", gen->d.ia5->data, &ret)) + return NULL; break; case GEN_URI: - X509V3_add_value_uchar("URI", gen->d.ia5->data, &ret); + if (!X509V3_add_value_uchar("URI", gen->d.ia5->data, &ret)) + return NULL; break; case GEN_DIRNAME: - X509_NAME_oneline(gen->d.dirn, oline, 256); - X509V3_add_value("DirName", oline, &ret); + if (X509_NAME_oneline(gen->d.dirn, oline, 256) == NULL + || !X509V3_add_value("DirName", oline, &ret)) + return NULL; break; case GEN_IPADD: @@ -159,20 +166,23 @@ STACK_OF(CONF_VALUE) *i2v_GENERAL_NAME(X509V3_EXT_METHOD *method, for (i = 0; i < 8; i++) { BIO_snprintf(htmp, sizeof htmp, "%X", p[0] << 8 | p[1]); p += 2; - strcat(oline, htmp); + BUF_strlcat(oline, htmp, sizeof(oline)); if (i != 7) - strcat(oline, ":"); + BUF_strlcat(oline, ":", sizeof(oline)); } } else { - X509V3_add_value("IP Address", "", &ret); + if (!X509V3_add_value("IP Address", "", &ret)) + return NULL; break; } - X509V3_add_value("IP Address", oline, &ret); + if (!X509V3_add_value("IP Address", oline, &ret)) + return NULL; break; case GEN_RID: i2t_ASN1_OBJECT(oline, 256, gen->d.rid); - X509V3_add_value("Registered ID", oline, &ret); + if (!X509V3_add_value("Registered ID", oline, &ret)) + return NULL; break; } return ret; @@ -578,8 +588,7 @@ static int do_othername(GENERAL_NAME *gen, char *value, X509V3_CTX *ctx) objtmp = OPENSSL_malloc(objlen + 1); if (objtmp == NULL) return 0; - strncpy(objtmp, value, objlen); - objtmp[objlen] = 0; + BUF_strlcpy(objtmp, value, objlen + 1); gen->d.otherName->type_id = OBJ_txt2obj(objtmp, 0); OPENSSL_free(objtmp); if (!gen->d.otherName->type_id) diff --git a/Sources/BoringSSL/crypto/x509v3/v3_cpols.c b/Sources/BoringSSL/crypto/x509v3/v3_cpols.c index 7de54962b..4def530ab 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_cpols.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_cpols.c @@ -468,9 +468,15 @@ static void print_notice(BIO *out, USERNOTICE *notice, int indent) num = sk_ASN1_INTEGER_value(ref->noticenos, i); if (i) BIO_puts(out, ", "); - tmp = i2s_ASN1_INTEGER(NULL, num); - BIO_puts(out, tmp); - OPENSSL_free(tmp); + if (num == NULL) + BIO_puts(out, "(null)"); + else { + tmp = i2s_ASN1_INTEGER(NULL, num); + if (tmp == NULL) + return; + BIO_puts(out, tmp); + OPENSSL_free(tmp); + } } BIO_puts(out, "\n"); } diff --git a/Sources/BoringSSL/crypto/x509v3/v3_genn.c b/Sources/BoringSSL/crypto/x509v3/v3_genn.c index 2331cd490..8c926879e 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_genn.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_genn.c @@ -231,6 +231,7 @@ int GENERAL_NAME_set0_othername(GENERAL_NAME *gen, oth = OTHERNAME_new(); if (!oth) return 0; + ASN1_TYPE_free(oth->value); oth->type_id = oid; oth->value = value; GENERAL_NAME_set0_value(gen, GEN_OTHERNAME, oth); diff --git a/Sources/BoringSSL/crypto/x509v3/v3_info.c b/Sources/BoringSSL/crypto/x509v3/v3_info.c index 482208d7c..ff96489e4 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_info.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_info.c @@ -112,29 +112,31 @@ ASN1_ITEM_TEMPLATE_END(AUTHORITY_INFO_ACCESS) IMPLEMENT_ASN1_FUNCTIONS(AUTHORITY_INFO_ACCESS) -static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_INFO_ACCESS(X509V3_EXT_METHOD - *method, AUTHORITY_INFO_ACCESS - *ainfo, STACK_OF(CONF_VALUE) - *ret) +static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_INFO_ACCESS( + X509V3_EXT_METHOD *method, AUTHORITY_INFO_ACCESS *ainfo, + STACK_OF(CONF_VALUE) *ret) { ACCESS_DESCRIPTION *desc; size_t i; int nlen; char objtmp[80], *ntmp; CONF_VALUE *vtmp; + STACK_OF(CONF_VALUE) *tret = ret; + for (i = 0; i < sk_ACCESS_DESCRIPTION_num(ainfo); i++) { + STACK_OF(CONF_VALUE) *tmp; + desc = sk_ACCESS_DESCRIPTION_value(ainfo, i); - ret = i2v_GENERAL_NAME(method, desc->location, ret); - if (!ret) - break; - vtmp = sk_CONF_VALUE_value(ret, i); + tmp = i2v_GENERAL_NAME(method, desc->location, tret); + if (tmp == NULL) + goto err; + tret = tmp; + vtmp = sk_CONF_VALUE_value(tret, i); i2t_ASN1_OBJECT(objtmp, sizeof objtmp, desc->method); nlen = strlen(objtmp) + strlen(vtmp->name) + 5; ntmp = OPENSSL_malloc(nlen); - if (!ntmp) { - OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); - return NULL; - } + if (ntmp == NULL) + goto err; BUF_strlcpy(ntmp, objtmp, nlen); BUF_strlcat(ntmp, " - ", nlen); BUF_strlcat(ntmp, vtmp->name, nlen); @@ -142,9 +144,15 @@ static STACK_OF(CONF_VALUE) *i2v_AUTHORITY_INFO_ACCESS(X509V3_EXT_METHOD vtmp->name = ntmp; } - if (!ret) + if (ret == NULL && tret == NULL) return sk_CONF_VALUE_new_null(); - return ret; + + return tret; + err: + OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); + if (ret == NULL && tret != NULL) + sk_CONF_VALUE_pop_free(tret, X509V3_conf_free); + return NULL; } static AUTHORITY_INFO_ACCESS *v2i_AUTHORITY_INFO_ACCESS(X509V3_EXT_METHOD @@ -184,8 +192,7 @@ static AUTHORITY_INFO_ACCESS *v2i_AUTHORITY_INFO_ACCESS(X509V3_EXT_METHOD OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); goto err; } - strncpy(objtmp, cnf->name, objlen); - objtmp[objlen] = 0; + BUF_strlcpy(objtmp, cnf->name, objlen + 1); acc->method = OBJ_txt2obj(objtmp, 0); if (!acc->method) { OPENSSL_PUT_ERROR(X509V3, X509V3_R_BAD_OBJECT); diff --git a/Sources/BoringSSL/crypto/x509v3/v3_lib.c b/Sources/BoringSSL/crypto/x509v3/v3_lib.c index c4718e33b..8f5435d70 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_lib.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_lib.c @@ -288,9 +288,9 @@ void *X509V3_get_d2i(STACK_OF(X509_EXTENSION) *x, int nid, int *crit, int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, int crit, unsigned long flags) { - int extidx = -1; - int errcode; - X509_EXTENSION *ext, *extmp; + int errcode, extidx = -1; + X509_EXTENSION *ext = NULL, *extmp; + STACK_OF(X509_EXTENSION) *ret = NULL; unsigned long ext_op = flags & X509V3_ADD_OP_MASK; /* @@ -348,13 +348,21 @@ int X509V3_add1_i2d(STACK_OF(X509_EXTENSION) **x, int nid, void *value, return 1; } - if (!*x && !(*x = sk_X509_EXTENSION_new_null())) - return -1; - if (!sk_X509_EXTENSION_push(*x, ext)) - return -1; + if ((ret = *x) == NULL + && (ret = sk_X509_EXTENSION_new_null()) == NULL) + goto m_fail; + if (!sk_X509_EXTENSION_push(ret, ext)) + goto m_fail; + *x = ret; return 1; + m_fail: + if (ret != *x) + sk_X509_EXTENSION_free(ret); + X509_EXTENSION_free(ext); + return -1; + err: if (!(flags & X509V3_ADD_SILENT)) OPENSSL_PUT_ERROR(X509V3, errcode); diff --git a/Sources/BoringSSL/crypto/x509v3/v3_ncons.c b/Sources/BoringSSL/crypto/x509v3/v3_ncons.c index fc2843ef1..593a520e4 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_ncons.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_ncons.c @@ -214,17 +214,18 @@ static int print_nc_ipadd(BIO *bp, ASN1_OCTET_STRING *ip) return 1; } -/* - * Check a certificate conforms to a specified set of constraints. Return - * values: X509_V_OK: All constraints obeyed. - * X509_V_ERR_PERMITTED_VIOLATION: Permitted subtree violation. - * X509_V_ERR_EXCLUDED_VIOLATION: Excluded subtree violation. - * X509_V_ERR_SUBTREE_MINMAX: Min or max values present and matching type. - * X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE: Unsupported constraint type. - * X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX: bad unsupported constraint - * syntax. X509_V_ERR_UNSUPPORTED_NAME_SYNTAX: bad or unsupported syntax of - * name - * +/*- + * Check a certificate conforms to a specified set of constraints. + * Return values: + * X509_V_OK: All constraints obeyed. + * X509_V_ERR_PERMITTED_VIOLATION: Permitted subtree violation. + * X509_V_ERR_EXCLUDED_VIOLATION: Excluded subtree violation. + * X509_V_ERR_SUBTREE_MINMAX: Min or max values present and matching type. + * X509_V_ERR_UNSPECIFIED: Unspecified error. + * X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE: Unsupported constraint type. + * X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX: Bad or unsupported constraint + * syntax. + * X509_V_ERR_UNSUPPORTED_NAME_SYNTAX: Bad or unsupported syntax of name. */ int NAME_CONSTRAINTS_check(X509 *x, NAME_CONSTRAINTS *nc) @@ -235,6 +236,21 @@ int NAME_CONSTRAINTS_check(X509 *x, NAME_CONSTRAINTS *nc) nm = X509_get_subject_name(x); + /* Guard against certificates with an excessive number of names or + * constraints causing a computationally expensive name constraints + * check. */ + size_t name_count = + X509_NAME_entry_count(nm) + sk_GENERAL_NAME_num(x->altname); + size_t constraint_count = sk_GENERAL_SUBTREE_num(nc->permittedSubtrees) + + sk_GENERAL_SUBTREE_num(nc->excludedSubtrees); + size_t check_count = constraint_count * name_count; + if (name_count < (size_t)X509_NAME_entry_count(nm) || + constraint_count < sk_GENERAL_SUBTREE_num(nc->permittedSubtrees) || + (constraint_count && check_count / constraint_count != name_count) || + check_count > 1 << 20) { + return X509_V_ERR_UNSPECIFIED; + } + if (X509_NAME_entry_count(nm) > 0) { GENERAL_NAME gntmp; gntmp.type = GEN_DIRNAME; diff --git a/Sources/BoringSSL/crypto/x509v3/v3_pci.c b/Sources/BoringSSL/crypto/x509v3/v3_pci.c index 68dca5e7b..4352abee3 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_pci.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_pci.c @@ -35,7 +35,6 @@ * SUCH DAMAGE. */ -#include #include #include @@ -153,38 +152,6 @@ static int process_pci_value(CONF_VALUE *val, goto err; } OPENSSL_free(tmp_data2); - } else if (strncmp(val->value, "file:", 5) == 0) { - unsigned char buf[2048]; - int n; - BIO *b = BIO_new_file(val->value + 5, "r"); - if (!b) { - OPENSSL_PUT_ERROR(X509V3, ERR_R_BIO_LIB); - X509V3_conf_err(val); - goto err; - } - while ((n = BIO_read(b, buf, sizeof(buf))) > 0 - || (n == 0 && BIO_should_retry(b))) { - if (!n) - continue; - - tmp_data = OPENSSL_realloc((*policy)->data, - (*policy)->length + n + 1); - - if (!tmp_data) - break; - - (*policy)->data = tmp_data; - OPENSSL_memcpy(&(*policy)->data[(*policy)->length], buf, n); - (*policy)->length += n; - (*policy)->data[(*policy)->length] = '\0'; - } - BIO_free_all(b); - - if (n < 0) { - OPENSSL_PUT_ERROR(X509V3, ERR_R_BIO_LIB); - X509V3_conf_err(val); - goto err; - } } else if (strncmp(val->value, "text:", 5) == 0) { val_len = strlen(val->value + 5); tmp_data = OPENSSL_realloc((*policy)->data, diff --git a/Sources/BoringSSL/crypto/x509v3/v3_utl.c b/Sources/BoringSSL/crypto/x509v3/v3_utl.c index fe7787bbe..7d109ee6e 100644 --- a/Sources/BoringSSL/crypto/x509v3/v3_utl.c +++ b/Sources/BoringSSL/crypto/x509v3/v3_utl.c @@ -155,6 +155,45 @@ int X509V3_add_value_bool_nf(char *name, int asn1_bool, return 1; } +static char *bignum_to_string(const BIGNUM *bn) +{ + char *tmp, *ret; + size_t len; + + /* + * Display large numbers in hex and small numbers in decimal. Converting to + * decimal takes quadratic time and is no more useful than hex for large + * numbers. + */ + if (BN_num_bits(bn) < 32) { + return BN_bn2dec(bn); + } + + tmp = BN_bn2hex(bn); + if (tmp == NULL) { + return NULL; + } + + len = strlen(tmp) + 3; + ret = OPENSSL_malloc(len); + if (ret == NULL) { + OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); + OPENSSL_free(tmp); + return NULL; + } + + /* Prepend "0x", but place it after the "-" if negative. */ + if (tmp[0] == '-') { + BUF_strlcpy(ret, "-0x", len); + BUF_strlcat(ret, tmp + 1, len); + } else { + BUF_strlcpy(ret, "0x", len); + BUF_strlcat(ret, tmp, len); + } + OPENSSL_free(tmp); + return ret; +} + char *i2s_ASN1_ENUMERATED(X509V3_EXT_METHOD *method, ASN1_ENUMERATED *a) { BIGNUM *bntmp = NULL; @@ -162,7 +201,7 @@ char *i2s_ASN1_ENUMERATED(X509V3_EXT_METHOD *method, ASN1_ENUMERATED *a) if (!a) return NULL; if (!(bntmp = ASN1_ENUMERATED_to_BN(a, NULL)) || - !(strtmp = BN_bn2dec(bntmp))) + !(strtmp = bignum_to_string(bntmp))) OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); BN_free(bntmp); return strtmp; @@ -175,7 +214,7 @@ char *i2s_ASN1_INTEGER(X509V3_EXT_METHOD *method, ASN1_INTEGER *a) if (!a) return NULL; if (!(bntmp = ASN1_INTEGER_to_BN(a, NULL)) || - !(strtmp = BN_bn2dec(bntmp))) + !(strtmp = bignum_to_string(bntmp))) OPENSSL_PUT_ERROR(X509V3, ERR_R_MALLOC_FAILURE); BN_free(bntmp); return strtmp; @@ -454,15 +493,13 @@ unsigned char *string_to_hex(const char *str, long *len) OPENSSL_free(hexbuf); return NULL; } - if (isupper(ch)) - ch = tolower(ch); - if (isupper(cl)) - cl = tolower(cl); if ((ch >= '0') && (ch <= '9')) ch -= '0'; else if ((ch >= 'a') && (ch <= 'f')) ch -= 'a' - 10; + else if ((ch >= 'A') && (ch <= 'F')) + ch -= 'A' - 10; else goto badhex; @@ -470,6 +507,8 @@ unsigned char *string_to_hex(const char *str, long *len) cl -= '0'; else if ((cl >= 'a') && (cl <= 'f')) cl -= 'a' - 10; + else if ((cl >= 'A') && (cl <= 'F')) + cl -= 'A' - 10; else goto badhex; diff --git a/Sources/BoringSSL/err_data.c b/Sources/BoringSSL/err_data.c index 88462d137..e69de29bb 100644 --- a/Sources/BoringSSL/err_data.c +++ b/Sources/BoringSSL/err_data.c @@ -1,1288 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - - /* This file was generated by err_data_generate.go. */ - -#include -#include -#include - - -OPENSSL_COMPILE_ASSERT(ERR_LIB_NONE == 1, library_values_changed_1); -OPENSSL_COMPILE_ASSERT(ERR_LIB_SYS == 2, library_values_changed_2); -OPENSSL_COMPILE_ASSERT(ERR_LIB_BN == 3, library_values_changed_3); -OPENSSL_COMPILE_ASSERT(ERR_LIB_RSA == 4, library_values_changed_4); -OPENSSL_COMPILE_ASSERT(ERR_LIB_DH == 5, library_values_changed_5); -OPENSSL_COMPILE_ASSERT(ERR_LIB_EVP == 6, library_values_changed_6); -OPENSSL_COMPILE_ASSERT(ERR_LIB_BUF == 7, library_values_changed_7); -OPENSSL_COMPILE_ASSERT(ERR_LIB_OBJ == 8, library_values_changed_8); -OPENSSL_COMPILE_ASSERT(ERR_LIB_PEM == 9, library_values_changed_9); -OPENSSL_COMPILE_ASSERT(ERR_LIB_DSA == 10, library_values_changed_10); -OPENSSL_COMPILE_ASSERT(ERR_LIB_X509 == 11, library_values_changed_11); -OPENSSL_COMPILE_ASSERT(ERR_LIB_ASN1 == 12, library_values_changed_12); -OPENSSL_COMPILE_ASSERT(ERR_LIB_CONF == 13, library_values_changed_13); -OPENSSL_COMPILE_ASSERT(ERR_LIB_CRYPTO == 14, library_values_changed_14); -OPENSSL_COMPILE_ASSERT(ERR_LIB_EC == 15, library_values_changed_15); -OPENSSL_COMPILE_ASSERT(ERR_LIB_SSL == 16, library_values_changed_16); -OPENSSL_COMPILE_ASSERT(ERR_LIB_BIO == 17, library_values_changed_17); -OPENSSL_COMPILE_ASSERT(ERR_LIB_PKCS7 == 18, library_values_changed_18); -OPENSSL_COMPILE_ASSERT(ERR_LIB_PKCS8 == 19, library_values_changed_19); -OPENSSL_COMPILE_ASSERT(ERR_LIB_X509V3 == 20, library_values_changed_20); -OPENSSL_COMPILE_ASSERT(ERR_LIB_RAND == 21, library_values_changed_21); -OPENSSL_COMPILE_ASSERT(ERR_LIB_ENGINE == 22, library_values_changed_22); -OPENSSL_COMPILE_ASSERT(ERR_LIB_OCSP == 23, library_values_changed_23); -OPENSSL_COMPILE_ASSERT(ERR_LIB_UI == 24, library_values_changed_24); -OPENSSL_COMPILE_ASSERT(ERR_LIB_COMP == 25, library_values_changed_25); -OPENSSL_COMPILE_ASSERT(ERR_LIB_ECDSA == 26, library_values_changed_26); -OPENSSL_COMPILE_ASSERT(ERR_LIB_ECDH == 27, library_values_changed_27); -OPENSSL_COMPILE_ASSERT(ERR_LIB_HMAC == 28, library_values_changed_28); -OPENSSL_COMPILE_ASSERT(ERR_LIB_DIGEST == 29, library_values_changed_29); -OPENSSL_COMPILE_ASSERT(ERR_LIB_CIPHER == 30, library_values_changed_30); -OPENSSL_COMPILE_ASSERT(ERR_LIB_HKDF == 31, library_values_changed_31); -OPENSSL_COMPILE_ASSERT(ERR_LIB_USER == 32, library_values_changed_32); -OPENSSL_COMPILE_ASSERT(ERR_NUM_LIBS == 33, library_values_changed_num); - -const uint32_t kOpenSSLReasonValues[] = { - 0xc320838, - 0xc328852, - 0xc330861, - 0xc338871, - 0xc340880, - 0xc348899, - 0xc3508a5, - 0xc3588c2, - 0xc3608d4, - 0xc3688e2, - 0xc3708f2, - 0xc3788ff, - 0xc38090f, - 0xc38891a, - 0xc390930, - 0xc39893f, - 0xc3a0953, - 0xc3a8845, - 0xc3b00ea, - 0x10320845, - 0x103293ab, - 0x103313b7, - 0x103393d0, - 0x103413e3, - 0x10348e8b, - 0x10350c19, - 0x103593f6, - 0x1036140b, - 0x1036941e, - 0x1037143d, - 0x10379456, - 0x1038146b, - 0x10389489, - 0x10391498, - 0x103994b4, - 0x103a14cf, - 0x103a94de, - 0x103b14fa, - 0x103b9515, - 0x103c152c, - 0x103c80ea, - 0x103d153d, - 0x103d9551, - 0x103e1570, - 0x103e957f, - 0x103f1596, - 0x103f95a9, - 0x10400bea, - 0x104095bc, - 0x104115da, - 0x104195ed, - 0x10421607, - 0x10429617, - 0x1043162b, - 0x10439641, - 0x10441659, - 0x1044966e, - 0x10451682, - 0x10459694, - 0x104605fb, - 0x1046893f, - 0x104716a9, - 0x104796c0, - 0x104816d5, - 0x104896e3, - 0x14320bcd, - 0x14328bdb, - 0x14330bea, - 0x14338bfc, - 0x143400ac, - 0x143480ea, - 0x18320083, - 0x18328ee1, - 0x183300ac, - 0x18338ef7, - 0x18340f0b, - 0x183480ea, - 0x18350f20, - 0x18358f38, - 0x18360f4d, - 0x18368f61, - 0x18370f85, - 0x18378f9b, - 0x18380faf, - 0x18388fbf, - 0x18390a57, - 0x18398fcf, - 0x183a0fe4, - 0x183a8ff8, - 0x183b0c25, - 0x183b9005, - 0x183c1017, - 0x183c9022, - 0x183d1032, - 0x183d9043, - 0x183e1054, - 0x183e9066, - 0x183f108f, - 0x183f90a8, - 0x184010c0, - 0x184086d3, - 0x203210e7, - 0x243210f3, - 0x24328985, - 0x24331105, - 0x24339112, - 0x2434111f, - 0x24349131, - 0x24351140, - 0x2435915d, - 0x2436116a, - 0x24369178, - 0x24371186, - 0x24379194, - 0x2438119d, - 0x243891aa, - 0x243911bd, - 0x28320c0d, - 0x28328c25, - 0x28330bea, - 0x28338c38, - 0x28340c19, - 0x283480ac, - 0x283500ea, - 0x2c3229b1, - 0x2c32a9bf, - 0x2c3329d1, - 0x2c33a9e3, - 0x2c3429f7, - 0x2c34aa09, - 0x2c352a24, - 0x2c35aa36, - 0x2c362a49, - 0x2c36832d, - 0x2c372a56, - 0x2c37aa68, - 0x2c382a7b, - 0x2c38aa92, - 0x2c392aa0, - 0x2c39aab0, - 0x2c3a2ac2, - 0x2c3aaad6, - 0x2c3b2ae7, - 0x2c3bab06, - 0x2c3c2b1a, - 0x2c3cab30, - 0x2c3d2b49, - 0x2c3dab66, - 0x2c3e2b77, - 0x2c3eab85, - 0x2c3f2b9d, - 0x2c3fabb5, - 0x2c402bc2, - 0x2c4090e7, - 0x2c412bd3, - 0x2c41abe6, - 0x2c4210c0, - 0x2c42abf7, - 0x2c430720, - 0x2c43aaf8, - 0x30320000, - 0x30328015, - 0x3033001f, - 0x30338038, - 0x3034004a, - 0x30348064, - 0x3035006b, - 0x30358083, - 0x30360094, - 0x303680ac, - 0x303700b9, - 0x303780c8, - 0x303800ea, - 0x303880f7, - 0x3039010a, - 0x30398125, - 0x303a013a, - 0x303a814e, - 0x303b0162, - 0x303b8173, - 0x303c018c, - 0x303c81a9, - 0x303d01b7, - 0x303d81cb, - 0x303e01db, - 0x303e81f4, - 0x303f0204, - 0x303f8217, - 0x30400226, - 0x30408232, - 0x30410247, - 0x30418257, - 0x3042026e, - 0x3042827b, - 0x3043028e, - 0x3043829d, - 0x304402b2, - 0x304482d3, - 0x304502e6, - 0x304582f9, - 0x30460312, - 0x3046832d, - 0x3047034a, - 0x30478363, - 0x30480371, - 0x30488382, - 0x30490391, - 0x304983a9, - 0x304a03bb, - 0x304a83cf, - 0x304b03ee, - 0x304b8401, - 0x304c040c, - 0x304c841d, - 0x304d0429, - 0x304d843f, - 0x304e044d, - 0x304e8463, - 0x304f0475, - 0x304f8487, - 0x3050049a, - 0x305084ad, - 0x305104be, - 0x305184ce, - 0x305204e6, - 0x305284fb, - 0x30530513, - 0x30538527, - 0x3054053f, - 0x30548558, - 0x30550571, - 0x3055858e, - 0x30560599, - 0x305685b1, - 0x305705c1, - 0x305785d2, - 0x305805e5, - 0x305885fb, - 0x30590604, - 0x30598619, - 0x305a062c, - 0x305a863b, - 0x305b065b, - 0x305b866a, - 0x305c068b, - 0x305c86a7, - 0x305d06b3, - 0x305d86d3, - 0x305e06ef, - 0x305e8700, - 0x305f0716, - 0x305f8720, - 0x34320b47, - 0x34328b5b, - 0x34330b78, - 0x34338b8b, - 0x34340b9a, - 0x34348bb7, - 0x3c320083, - 0x3c328c62, - 0x3c330c7b, - 0x3c338c96, - 0x3c340cb3, - 0x3c348cdd, - 0x3c350cf8, - 0x3c358d1e, - 0x3c360d37, - 0x3c368d4f, - 0x3c370d60, - 0x3c378d6e, - 0x3c380d7b, - 0x3c388d8f, - 0x3c390c25, - 0x3c398da3, - 0x3c3a0db7, - 0x3c3a88ff, - 0x3c3b0dc7, - 0x3c3b8de2, - 0x3c3c0df4, - 0x3c3c8e0a, - 0x3c3d0e14, - 0x3c3d8e28, - 0x3c3e0e36, - 0x3c3e8e5b, - 0x3c3f0c4e, - 0x3c3f8e44, - 0x3c4000ac, - 0x3c4080ea, - 0x3c410cce, - 0x3c418d0d, - 0x403216fa, - 0x40329710, - 0x4033173e, - 0x40339748, - 0x4034175f, - 0x4034977d, - 0x4035178d, - 0x4035979f, - 0x403617ac, - 0x403697b8, - 0x403717cd, - 0x403797df, - 0x403817ea, - 0x403897fc, - 0x40390e8b, - 0x4039980c, - 0x403a181f, - 0x403a9840, - 0x403b1851, - 0x403b9861, - 0x403c0064, - 0x403c8083, - 0x403d18c1, - 0x403d98d7, - 0x403e18e6, - 0x403e98f9, - 0x403f1913, - 0x403f9921, - 0x40401936, - 0x4040994a, - 0x40411967, - 0x40419982, - 0x4042199b, - 0x404299ae, - 0x404319c2, - 0x404399da, - 0x404419f1, - 0x404480ac, - 0x40451a06, - 0x40459a18, - 0x40461a3c, - 0x40469a5c, - 0x40471a6a, - 0x40479a91, - 0x40481ace, - 0x40489ae7, - 0x40491afe, - 0x40499b18, - 0x404a1b2f, - 0x404a9b4d, - 0x404b1b65, - 0x404b9b7c, - 0x404c1b92, - 0x404c9ba4, - 0x404d1bc5, - 0x404d9be7, - 0x404e1bfb, - 0x404e9c08, - 0x404f1c35, - 0x404f9c5e, - 0x40501c99, - 0x40509cad, - 0x40511cc8, - 0x40519cd8, - 0x40521cef, - 0x40529d13, - 0x40531d2b, - 0x40539d3e, - 0x40541d53, - 0x40549d76, - 0x40551d84, - 0x40559da1, - 0x40561dae, - 0x40569dc7, - 0x40571ddf, - 0x40579df2, - 0x40581e07, - 0x40589e2e, - 0x40591e5d, - 0x40599e8a, - 0x405a1e9e, - 0x405a9eae, - 0x405b1ec6, - 0x405b9ed7, - 0x405c1eea, - 0x405c9f0b, - 0x405d1f18, - 0x405d9f2f, - 0x405e1f6d, - 0x405e8a95, - 0x405f1f8e, - 0x405f9f9b, - 0x40601fa9, - 0x40609fcb, - 0x4061200f, - 0x4061a047, - 0x4062205e, - 0x4062a06f, - 0x40632080, - 0x4063a095, - 0x406420ac, - 0x4064a0d8, - 0x406520f3, - 0x4065a10a, - 0x40662122, - 0x4066a14c, - 0x40672177, - 0x4067a198, - 0x406821ab, - 0x4068a1cc, - 0x406921fe, - 0x4069a22c, - 0x406a224d, - 0x406aa26d, - 0x406b23f5, - 0x406ba418, - 0x406c242e, - 0x406ca690, - 0x406d26bf, - 0x406da6e7, - 0x406e2715, - 0x406ea749, - 0x406f2768, - 0x406fa77d, - 0x40702790, - 0x4070a7ad, - 0x40710800, - 0x4071a7bf, - 0x407227d2, - 0x4072a7eb, - 0x40732803, - 0x4073936d, - 0x40742817, - 0x4074a831, - 0x40752842, - 0x4075a856, - 0x40762864, - 0x407691aa, - 0x40772889, - 0x4077a8ab, - 0x407828c6, - 0x4078a8ff, - 0x40792916, - 0x4079a92c, - 0x407a2938, - 0x407aa94b, - 0x407b2960, - 0x407ba972, - 0x407c2987, - 0x407ca990, - 0x407d21e7, - 0x407d9c6e, - 0x407e28db, - 0x407e9e3e, - 0x407f1a7e, - 0x407f9887, - 0x40801c45, - 0x40809aa6, - 0x40811d01, - 0x40819c1f, - 0x40822700, - 0x4082986d, - 0x40831e19, - 0x4083a0bd, - 0x40841aba, - 0x40849e76, - 0x40851efb, - 0x40859ff3, - 0x40861f4f, - 0x40869c88, - 0x4087272d, - 0x4087a024, - 0x408818aa, - 0x41f42320, - 0x41f923b2, - 0x41fe22a5, - 0x41fea481, - 0x41ff2572, - 0x42032339, - 0x4208235b, - 0x4208a397, - 0x42092289, - 0x4209a3d1, - 0x420a22e0, - 0x420aa2c0, - 0x420b2300, - 0x420ba379, - 0x420c258e, - 0x420ca44e, - 0x420d2468, - 0x420da49f, - 0x421224b9, - 0x42172555, - 0x4217a4fb, - 0x421c251d, - 0x421f24d8, - 0x422125a5, - 0x42262538, - 0x422b2674, - 0x422ba622, - 0x422c265c, - 0x422ca5e1, - 0x422d25c0, - 0x422da641, - 0x422e2607, - 0x4432072b, - 0x4432873a, - 0x44330746, - 0x44338754, - 0x44340767, - 0x44348778, - 0x4435077f, - 0x44358789, - 0x4436079c, - 0x443687b2, - 0x443707c4, - 0x443787d1, - 0x443807e0, - 0x443887e8, - 0x44390800, - 0x4439880e, - 0x443a0821, - 0x4c3211d4, - 0x4c3291e4, - 0x4c3311f7, - 0x4c339217, - 0x4c3400ac, - 0x4c3480ea, - 0x4c351223, - 0x4c359231, - 0x4c36124d, - 0x4c369260, - 0x4c37126f, - 0x4c37927d, - 0x4c381292, - 0x4c38929e, - 0x4c3912be, - 0x4c3992e8, - 0x4c3a1301, - 0x4c3a931a, - 0x4c3b05fb, - 0x4c3b9333, - 0x4c3c1345, - 0x4c3c9354, - 0x4c3d136d, - 0x4c3d937c, - 0x4c3e1389, - 0x50322c09, - 0x5032ac18, - 0x50332c23, - 0x5033ac33, - 0x50342c4c, - 0x5034ac66, - 0x50352c74, - 0x5035ac8a, - 0x50362c9c, - 0x5036acb2, - 0x50372ccb, - 0x5037acde, - 0x50382cf6, - 0x5038ad07, - 0x50392d1c, - 0x5039ad30, - 0x503a2d50, - 0x503aad66, - 0x503b2d7e, - 0x503bad90, - 0x503c2dac, - 0x503cadc3, - 0x503d2ddc, - 0x503dadf2, - 0x503e2dff, - 0x503eae15, - 0x503f2e27, - 0x503f8382, - 0x50402e3a, - 0x5040ae4a, - 0x50412e64, - 0x5041ae73, - 0x50422e8d, - 0x5042aeaa, - 0x50432eba, - 0x5043aeca, - 0x50442ed9, - 0x5044843f, - 0x50452eed, - 0x5045af0b, - 0x50462f1e, - 0x5046af34, - 0x50472f46, - 0x5047af5b, - 0x50482f81, - 0x5048af8f, - 0x50492fa2, - 0x5049afb7, - 0x504a2fcd, - 0x504aafdd, - 0x504b2ffd, - 0x504bb010, - 0x504c3033, - 0x504cb061, - 0x504d3073, - 0x504db090, - 0x504e30ab, - 0x504eb0c7, - 0x504f30d9, - 0x504fb0f0, - 0x505030ff, - 0x505086ef, - 0x50513112, - 0x58320ec9, - 0x68320e8b, - 0x68328c25, - 0x68330c38, - 0x68338e99, - 0x68340ea9, - 0x683480ea, - 0x6c320e67, - 0x6c328bfc, - 0x6c330e72, - 0x74320a0b, - 0x78320970, - 0x78328985, - 0x78330991, - 0x78338083, - 0x783409a0, - 0x783489b5, - 0x783509d4, - 0x783589f6, - 0x78360a0b, - 0x78368a21, - 0x78370a31, - 0x78378a44, - 0x78380a57, - 0x78388a69, - 0x78390a76, - 0x78398a95, - 0x783a0aaa, - 0x783a8ab8, - 0x783b0ac2, - 0x783b8ad6, - 0x783c0aed, - 0x783c8b02, - 0x783d0b19, - 0x783d8b2e, - 0x783e0a84, - 0x7c3210d6, -}; - -const size_t kOpenSSLReasonValuesLen = sizeof(kOpenSSLReasonValues) / sizeof(kOpenSSLReasonValues[0]); - -const char kOpenSSLReasonStringData[] = - "ASN1_LENGTH_MISMATCH\0" - "AUX_ERROR\0" - "BAD_GET_ASN1_OBJECT_CALL\0" - "BAD_OBJECT_HEADER\0" - "BMPSTRING_IS_WRONG_LENGTH\0" - "BN_LIB\0" - "BOOLEAN_IS_WRONG_LENGTH\0" - "BUFFER_TOO_SMALL\0" - "CONTEXT_NOT_INITIALISED\0" - "DECODE_ERROR\0" - "DEPTH_EXCEEDED\0" - "DIGEST_AND_KEY_TYPE_NOT_SUPPORTED\0" - "ENCODE_ERROR\0" - "ERROR_GETTING_TIME\0" - "EXPECTING_AN_ASN1_SEQUENCE\0" - "EXPECTING_AN_INTEGER\0" - "EXPECTING_AN_OBJECT\0" - "EXPECTING_A_BOOLEAN\0" - "EXPECTING_A_TIME\0" - "EXPLICIT_LENGTH_MISMATCH\0" - "EXPLICIT_TAG_NOT_CONSTRUCTED\0" - "FIELD_MISSING\0" - "FIRST_NUM_TOO_LARGE\0" - "HEADER_TOO_LONG\0" - "ILLEGAL_BITSTRING_FORMAT\0" - "ILLEGAL_BOOLEAN\0" - "ILLEGAL_CHARACTERS\0" - "ILLEGAL_FORMAT\0" - "ILLEGAL_HEX\0" - "ILLEGAL_IMPLICIT_TAG\0" - "ILLEGAL_INTEGER\0" - "ILLEGAL_NESTED_TAGGING\0" - "ILLEGAL_NULL\0" - "ILLEGAL_NULL_VALUE\0" - "ILLEGAL_OBJECT\0" - "ILLEGAL_OPTIONAL_ANY\0" - "ILLEGAL_OPTIONS_ON_ITEM_TEMPLATE\0" - "ILLEGAL_TAGGED_ANY\0" - "ILLEGAL_TIME_VALUE\0" - "INTEGER_NOT_ASCII_FORMAT\0" - "INTEGER_TOO_LARGE_FOR_LONG\0" - "INVALID_BIT_STRING_BITS_LEFT\0" - "INVALID_BMPSTRING_LENGTH\0" - "INVALID_DIGIT\0" - "INVALID_MODIFIER\0" - "INVALID_NUMBER\0" - "INVALID_OBJECT_ENCODING\0" - "INVALID_SEPARATOR\0" - "INVALID_TIME_FORMAT\0" - "INVALID_UNIVERSALSTRING_LENGTH\0" - "INVALID_UTF8STRING\0" - "LIST_ERROR\0" - "MISSING_ASN1_EOS\0" - "MISSING_EOC\0" - "MISSING_SECOND_NUMBER\0" - "MISSING_VALUE\0" - "MSTRING_NOT_UNIVERSAL\0" - "MSTRING_WRONG_TAG\0" - "NESTED_ASN1_ERROR\0" - "NESTED_ASN1_STRING\0" - "NON_HEX_CHARACTERS\0" - "NOT_ASCII_FORMAT\0" - "NOT_ENOUGH_DATA\0" - "NO_MATCHING_CHOICE_TYPE\0" - "NULL_IS_WRONG_LENGTH\0" - "OBJECT_NOT_ASCII_FORMAT\0" - "ODD_NUMBER_OF_CHARS\0" - "SECOND_NUMBER_TOO_LARGE\0" - "SEQUENCE_LENGTH_MISMATCH\0" - "SEQUENCE_NOT_CONSTRUCTED\0" - "SEQUENCE_OR_SET_NEEDS_CONFIG\0" - "SHORT_LINE\0" - "STREAMING_NOT_SUPPORTED\0" - "STRING_TOO_LONG\0" - "STRING_TOO_SHORT\0" - "TAG_VALUE_TOO_HIGH\0" - "TIME_NOT_ASCII_FORMAT\0" - "TOO_LONG\0" - "TYPE_NOT_CONSTRUCTED\0" - "TYPE_NOT_PRIMITIVE\0" - "UNEXPECTED_EOC\0" - "UNIVERSALSTRING_IS_WRONG_LENGTH\0" - "UNKNOWN_FORMAT\0" - "UNKNOWN_MESSAGE_DIGEST_ALGORITHM\0" - "UNKNOWN_SIGNATURE_ALGORITHM\0" - "UNKNOWN_TAG\0" - "UNSUPPORTED_ANY_DEFINED_BY_TYPE\0" - "UNSUPPORTED_PUBLIC_KEY_TYPE\0" - "UNSUPPORTED_TYPE\0" - "WRONG_PUBLIC_KEY_TYPE\0" - "WRONG_TAG\0" - "WRONG_TYPE\0" - "BAD_FOPEN_MODE\0" - "BROKEN_PIPE\0" - "CONNECT_ERROR\0" - "ERROR_SETTING_NBIO\0" - "INVALID_ARGUMENT\0" - "IN_USE\0" - "KEEPALIVE\0" - "NBIO_CONNECT_ERROR\0" - "NO_HOSTNAME_SPECIFIED\0" - "NO_PORT_SPECIFIED\0" - "NO_SUCH_FILE\0" - "NULL_PARAMETER\0" - "SYS_LIB\0" - "UNABLE_TO_CREATE_SOCKET\0" - "UNINITIALIZED\0" - "UNSUPPORTED_METHOD\0" - "WRITE_TO_READ_ONLY_BIO\0" - "ARG2_LT_ARG3\0" - "BAD_ENCODING\0" - "BAD_RECIPROCAL\0" - "BIGNUM_TOO_LONG\0" - "BITS_TOO_SMALL\0" - "CALLED_WITH_EVEN_MODULUS\0" - "DIV_BY_ZERO\0" - "EXPAND_ON_STATIC_BIGNUM_DATA\0" - "INPUT_NOT_REDUCED\0" - "INVALID_RANGE\0" - "NEGATIVE_NUMBER\0" - "NOT_A_SQUARE\0" - "NOT_INITIALIZED\0" - "NO_INVERSE\0" - "PRIVATE_KEY_TOO_LARGE\0" - "P_IS_NOT_PRIME\0" - "TOO_MANY_ITERATIONS\0" - "TOO_MANY_TEMPORARY_VARIABLES\0" - "AES_KEY_SETUP_FAILED\0" - "BAD_DECRYPT\0" - "BAD_KEY_LENGTH\0" - "CTRL_NOT_IMPLEMENTED\0" - "CTRL_OPERATION_NOT_IMPLEMENTED\0" - "DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH\0" - "INITIALIZATION_ERROR\0" - "INPUT_NOT_INITIALIZED\0" - "INVALID_AD_SIZE\0" - "INVALID_KEY_LENGTH\0" - "INVALID_NONCE_SIZE\0" - "INVALID_OPERATION\0" - "IV_TOO_LARGE\0" - "NO_CIPHER_SET\0" - "NO_DIRECTION_SET\0" - "OUTPUT_ALIASES_INPUT\0" - "TAG_TOO_LARGE\0" - "TOO_LARGE\0" - "UNSUPPORTED_AD_SIZE\0" - "UNSUPPORTED_INPUT_SIZE\0" - "UNSUPPORTED_KEY_SIZE\0" - "UNSUPPORTED_NONCE_SIZE\0" - "UNSUPPORTED_TAG_SIZE\0" - "WRONG_FINAL_BLOCK_LENGTH\0" - "LIST_CANNOT_BE_NULL\0" - "MISSING_CLOSE_SQUARE_BRACKET\0" - "MISSING_EQUAL_SIGN\0" - "NO_CLOSE_BRACE\0" - "UNABLE_TO_CREATE_NEW_SECTION\0" - "VARIABLE_HAS_NO_VALUE\0" - "BAD_GENERATOR\0" - "INVALID_PUBKEY\0" - "MODULUS_TOO_LARGE\0" - "NO_PRIVATE_VALUE\0" - "BAD_Q_VALUE\0" - "BAD_VERSION\0" - "MISSING_PARAMETERS\0" - "NEED_NEW_SETUP_VALUES\0" - "BIGNUM_OUT_OF_RANGE\0" - "COORDINATES_OUT_OF_RANGE\0" - "D2I_ECPKPARAMETERS_FAILURE\0" - "EC_GROUP_NEW_BY_NAME_FAILURE\0" - "GROUP2PKPARAMETERS_FAILURE\0" - "GROUP_MISMATCH\0" - "I2D_ECPKPARAMETERS_FAILURE\0" - "INCOMPATIBLE_OBJECTS\0" - "INVALID_COFACTOR\0" - "INVALID_COMPRESSED_POINT\0" - "INVALID_COMPRESSION_BIT\0" - "INVALID_ENCODING\0" - "INVALID_FIELD\0" - "INVALID_FORM\0" - "INVALID_GROUP_ORDER\0" - "INVALID_PRIVATE_KEY\0" - "MISSING_PRIVATE_KEY\0" - "NON_NAMED_CURVE\0" - "PKPARAMETERS2GROUP_FAILURE\0" - "POINT_AT_INFINITY\0" - "POINT_IS_NOT_ON_CURVE\0" - "SLOT_FULL\0" - "UNDEFINED_GENERATOR\0" - "UNKNOWN_GROUP\0" - "UNKNOWN_ORDER\0" - "WRONG_CURVE_PARAMETERS\0" - "WRONG_ORDER\0" - "KDF_FAILED\0" - "POINT_ARITHMETIC_FAILURE\0" - "BAD_SIGNATURE\0" - "NOT_IMPLEMENTED\0" - "RANDOM_NUMBER_GENERATION_FAILED\0" - "OPERATION_NOT_SUPPORTED\0" - "COMMAND_NOT_SUPPORTED\0" - "DIFFERENT_KEY_TYPES\0" - "DIFFERENT_PARAMETERS\0" - "EXPECTING_AN_EC_KEY_KEY\0" - "EXPECTING_AN_RSA_KEY\0" - "EXPECTING_A_DSA_KEY\0" - "ILLEGAL_OR_UNSUPPORTED_PADDING_MODE\0" - "INVALID_DIGEST_LENGTH\0" - "INVALID_DIGEST_TYPE\0" - "INVALID_KEYBITS\0" - "INVALID_MGF1_MD\0" - "INVALID_PADDING_MODE\0" - "INVALID_PSS_SALTLEN\0" - "KEYS_NOT_SET\0" - "NO_DEFAULT_DIGEST\0" - "NO_KEY_SET\0" - "NO_MDC2_SUPPORT\0" - "NO_NID_FOR_CURVE\0" - "NO_OPERATION_SET\0" - "NO_PARAMETERS_SET\0" - "OPERATION_NOT_SUPPORTED_FOR_THIS_KEYTYPE\0" - "OPERATON_NOT_INITIALIZED\0" - "UNKNOWN_PUBLIC_KEY_TYPE\0" - "UNSUPPORTED_ALGORITHM\0" - "OUTPUT_TOO_LARGE\0" - "UNKNOWN_NID\0" - "BAD_BASE64_DECODE\0" - "BAD_END_LINE\0" - "BAD_IV_CHARS\0" - "BAD_PASSWORD_READ\0" - "CIPHER_IS_NULL\0" - "ERROR_CONVERTING_PRIVATE_KEY\0" - "NOT_DEK_INFO\0" - "NOT_ENCRYPTED\0" - "NOT_PROC_TYPE\0" - "NO_START_LINE\0" - "READ_KEY\0" - "SHORT_HEADER\0" - "UNSUPPORTED_CIPHER\0" - "UNSUPPORTED_ENCRYPTION\0" - "BAD_PKCS12_DATA\0" - "BAD_PKCS12_VERSION\0" - "CIPHER_HAS_NO_OBJECT_IDENTIFIER\0" - "CRYPT_ERROR\0" - "ENCRYPT_ERROR\0" - "ERROR_SETTING_CIPHER_PARAMS\0" - "INCORRECT_PASSWORD\0" - "KEYGEN_FAILURE\0" - "KEY_GEN_ERROR\0" - "METHOD_NOT_SUPPORTED\0" - "MISSING_MAC\0" - "MULTIPLE_PRIVATE_KEYS_IN_PKCS12\0" - "PKCS12_PUBLIC_KEY_INTEGRITY_NOT_SUPPORTED\0" - "PKCS12_TOO_DEEPLY_NESTED\0" - "PRIVATE_KEY_DECODE_ERROR\0" - "PRIVATE_KEY_ENCODE_ERROR\0" - "UNKNOWN_ALGORITHM\0" - "UNKNOWN_CIPHER\0" - "UNKNOWN_CIPHER_ALGORITHM\0" - "UNKNOWN_DIGEST\0" - "UNKNOWN_HASH\0" - "UNSUPPORTED_PRIVATE_KEY_ALGORITHM\0" - "BAD_E_VALUE\0" - "BAD_FIXED_HEADER_DECRYPT\0" - "BAD_PAD_BYTE_COUNT\0" - "BAD_RSA_PARAMETERS\0" - "BLOCK_TYPE_IS_NOT_01\0" - "BN_NOT_INITIALIZED\0" - "CANNOT_RECOVER_MULTI_PRIME_KEY\0" - "CRT_PARAMS_ALREADY_GIVEN\0" - "CRT_VALUES_INCORRECT\0" - "DATA_LEN_NOT_EQUAL_TO_MOD_LEN\0" - "DATA_TOO_LARGE\0" - "DATA_TOO_LARGE_FOR_KEY_SIZE\0" - "DATA_TOO_LARGE_FOR_MODULUS\0" - "DATA_TOO_SMALL\0" - "DATA_TOO_SMALL_FOR_KEY_SIZE\0" - "DIGEST_TOO_BIG_FOR_RSA_KEY\0" - "D_E_NOT_CONGRUENT_TO_1\0" - "EMPTY_PUBLIC_KEY\0" - "FIRST_OCTET_INVALID\0" - "INCONSISTENT_SET_OF_CRT_VALUES\0" - "INTERNAL_ERROR\0" - "INVALID_MESSAGE_LENGTH\0" - "KEY_SIZE_TOO_SMALL\0" - "LAST_OCTET_INVALID\0" - "MUST_HAVE_AT_LEAST_TWO_PRIMES\0" - "NO_PUBLIC_EXPONENT\0" - "NULL_BEFORE_BLOCK_MISSING\0" - "N_NOT_EQUAL_P_Q\0" - "OAEP_DECODING_ERROR\0" - "ONLY_ONE_OF_P_Q_GIVEN\0" - "OUTPUT_BUFFER_TOO_SMALL\0" - "PADDING_CHECK_FAILED\0" - "PKCS_DECODING_ERROR\0" - "SLEN_CHECK_FAILED\0" - "SLEN_RECOVERY_FAILED\0" - "UNKNOWN_ALGORITHM_TYPE\0" - "UNKNOWN_PADDING_TYPE\0" - "VALUE_MISSING\0" - "WRONG_SIGNATURE_LENGTH\0" - "APP_DATA_IN_HANDSHAKE\0" - "ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT\0" - "BAD_ALERT\0" - "BAD_CHANGE_CIPHER_SPEC\0" - "BAD_DATA_RETURNED_BY_CALLBACK\0" - "BAD_DH_P_LENGTH\0" - "BAD_DIGEST_LENGTH\0" - "BAD_ECC_CERT\0" - "BAD_ECPOINT\0" - "BAD_HANDSHAKE_RECORD\0" - "BAD_HELLO_REQUEST\0" - "BAD_LENGTH\0" - "BAD_PACKET_LENGTH\0" - "BAD_RSA_ENCRYPT\0" - "BAD_SRTP_MKI_VALUE\0" - "BAD_SRTP_PROTECTION_PROFILE_LIST\0" - "BAD_SSL_FILETYPE\0" - "BAD_WRITE_RETRY\0" - "BIO_NOT_SET\0" - "BLOCK_CIPHER_PAD_IS_WRONG\0" - "BUFFERED_MESSAGES_ON_CIPHER_CHANGE\0" - "CANNOT_PARSE_LEAF_CERT\0" - "CA_DN_LENGTH_MISMATCH\0" - "CA_DN_TOO_LONG\0" - "CCS_RECEIVED_EARLY\0" - "CERTIFICATE_VERIFY_FAILED\0" - "CERT_CB_ERROR\0" - "CERT_LENGTH_MISMATCH\0" - "CHANNEL_ID_NOT_P256\0" - "CHANNEL_ID_SIGNATURE_INVALID\0" - "CIPHER_OR_HASH_UNAVAILABLE\0" - "CLIENTHELLO_PARSE_FAILED\0" - "CLIENTHELLO_TLSEXT\0" - "CONNECTION_REJECTED\0" - "CONNECTION_TYPE_NOT_SET\0" - "CUSTOM_EXTENSION_ERROR\0" - "DATA_LENGTH_TOO_LONG\0" - "DECRYPTION_FAILED\0" - "DECRYPTION_FAILED_OR_BAD_RECORD_MAC\0" - "DH_PUBLIC_VALUE_LENGTH_IS_WRONG\0" - "DH_P_TOO_LONG\0" - "DIGEST_CHECK_FAILED\0" - "DOWNGRADE_DETECTED\0" - "DTLS_MESSAGE_TOO_BIG\0" - "DUPLICATE_EXTENSION\0" - "DUPLICATE_KEY_SHARE\0" - "ECC_CERT_NOT_FOR_SIGNING\0" - "EMS_STATE_INCONSISTENT\0" - "ENCRYPTED_LENGTH_TOO_LONG\0" - "ERROR_ADDING_EXTENSION\0" - "ERROR_IN_RECEIVED_CIPHER_LIST\0" - "ERROR_PARSING_EXTENSION\0" - "EXCESSIVE_MESSAGE_SIZE\0" - "EXTRA_DATA_IN_MESSAGE\0" - "FRAGMENT_MISMATCH\0" - "GOT_NEXT_PROTO_WITHOUT_EXTENSION\0" - "HANDSHAKE_FAILURE_ON_CLIENT_HELLO\0" - "HTTPS_PROXY_REQUEST\0" - "HTTP_REQUEST\0" - "INAPPROPRIATE_FALLBACK\0" - "INVALID_ALPN_PROTOCOL\0" - "INVALID_COMMAND\0" - "INVALID_COMPRESSION_LIST\0" - "INVALID_MESSAGE\0" - "INVALID_OUTER_RECORD_TYPE\0" - "INVALID_SCT_LIST\0" - "INVALID_SSL_SESSION\0" - "INVALID_TICKET_KEYS_LENGTH\0" - "LENGTH_MISMATCH\0" - "LIBRARY_HAS_NO_CIPHERS\0" - "MISSING_EXTENSION\0" - "MISSING_KEY_SHARE\0" - "MISSING_RSA_CERTIFICATE\0" - "MISSING_TMP_DH_KEY\0" - "MISSING_TMP_ECDH_KEY\0" - "MIXED_SPECIAL_OPERATOR_WITH_GROUPS\0" - "MTU_TOO_SMALL\0" - "NEGOTIATED_BOTH_NPN_AND_ALPN\0" - "NESTED_GROUP\0" - "NO_CERTIFICATES_RETURNED\0" - "NO_CERTIFICATE_ASSIGNED\0" - "NO_CERTIFICATE_SET\0" - "NO_CIPHERS_AVAILABLE\0" - "NO_CIPHERS_PASSED\0" - "NO_CIPHERS_SPECIFIED\0" - "NO_CIPHER_MATCH\0" - "NO_COMMON_SIGNATURE_ALGORITHMS\0" - "NO_COMPRESSION_SPECIFIED\0" - "NO_GROUPS_SPECIFIED\0" - "NO_METHOD_SPECIFIED\0" - "NO_P256_SUPPORT\0" - "NO_PRIVATE_KEY_ASSIGNED\0" - "NO_RENEGOTIATION\0" - "NO_REQUIRED_DIGEST\0" - "NO_SHARED_CIPHER\0" - "NO_SHARED_GROUP\0" - "NULL_SSL_CTX\0" - "NULL_SSL_METHOD_PASSED\0" - "OLD_SESSION_CIPHER_NOT_RETURNED\0" - "OLD_SESSION_PRF_HASH_MISMATCH\0" - "OLD_SESSION_VERSION_NOT_RETURNED\0" - "PARSE_TLSEXT\0" - "PATH_TOO_LONG\0" - "PEER_DID_NOT_RETURN_A_CERTIFICATE\0" - "PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE\0" - "PRE_SHARED_KEY_MUST_BE_LAST\0" - "PROTOCOL_IS_SHUTDOWN\0" - "PSK_IDENTITY_BINDER_COUNT_MISMATCH\0" - "PSK_IDENTITY_NOT_FOUND\0" - "PSK_NO_CLIENT_CB\0" - "PSK_NO_SERVER_CB\0" - "READ_TIMEOUT_EXPIRED\0" - "RECORD_LENGTH_MISMATCH\0" - "RECORD_TOO_LARGE\0" - "RENEGOTIATION_EMS_MISMATCH\0" - "RENEGOTIATION_ENCODING_ERR\0" - "RENEGOTIATION_MISMATCH\0" - "REQUIRED_CIPHER_MISSING\0" - "RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION\0" - "RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION\0" - "SCSV_RECEIVED_WHEN_RENEGOTIATING\0" - "SERVERHELLO_TLSEXT\0" - "SESSION_ID_CONTEXT_UNINITIALIZED\0" - "SESSION_MAY_NOT_BE_CREATED\0" - "SHUTDOWN_WHILE_IN_INIT\0" - "SIGNATURE_ALGORITHMS_EXTENSION_SENT_BY_SERVER\0" - "SRTP_COULD_NOT_ALLOCATE_PROFILES\0" - "SRTP_UNKNOWN_PROTECTION_PROFILE\0" - "SSL3_EXT_INVALID_SERVERNAME\0" - "SSLV3_ALERT_BAD_CERTIFICATE\0" - "SSLV3_ALERT_BAD_RECORD_MAC\0" - "SSLV3_ALERT_CERTIFICATE_EXPIRED\0" - "SSLV3_ALERT_CERTIFICATE_REVOKED\0" - "SSLV3_ALERT_CERTIFICATE_UNKNOWN\0" - "SSLV3_ALERT_CLOSE_NOTIFY\0" - "SSLV3_ALERT_DECOMPRESSION_FAILURE\0" - "SSLV3_ALERT_HANDSHAKE_FAILURE\0" - "SSLV3_ALERT_ILLEGAL_PARAMETER\0" - "SSLV3_ALERT_NO_CERTIFICATE\0" - "SSLV3_ALERT_UNEXPECTED_MESSAGE\0" - "SSLV3_ALERT_UNSUPPORTED_CERTIFICATE\0" - "SSL_CTX_HAS_NO_DEFAULT_SSL_VERSION\0" - "SSL_HANDSHAKE_FAILURE\0" - "SSL_SESSION_ID_CONTEXT_TOO_LONG\0" - "TLSV1_ALERT_ACCESS_DENIED\0" - "TLSV1_ALERT_DECODE_ERROR\0" - "TLSV1_ALERT_DECRYPTION_FAILED\0" - "TLSV1_ALERT_DECRYPT_ERROR\0" - "TLSV1_ALERT_EXPORT_RESTRICTION\0" - "TLSV1_ALERT_INAPPROPRIATE_FALLBACK\0" - "TLSV1_ALERT_INSUFFICIENT_SECURITY\0" - "TLSV1_ALERT_INTERNAL_ERROR\0" - "TLSV1_ALERT_NO_RENEGOTIATION\0" - "TLSV1_ALERT_PROTOCOL_VERSION\0" - "TLSV1_ALERT_RECORD_OVERFLOW\0" - "TLSV1_ALERT_UNKNOWN_CA\0" - "TLSV1_ALERT_USER_CANCELLED\0" - "TLSV1_BAD_CERTIFICATE_HASH_VALUE\0" - "TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\0" - "TLSV1_CERTIFICATE_REQUIRED\0" - "TLSV1_CERTIFICATE_UNOBTAINABLE\0" - "TLSV1_UNKNOWN_PSK_IDENTITY\0" - "TLSV1_UNRECOGNIZED_NAME\0" - "TLSV1_UNSUPPORTED_EXTENSION\0" - "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\0" - "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\0" - "TOO_MANY_EMPTY_FRAGMENTS\0" - "TOO_MANY_KEY_UPDATES\0" - "TOO_MANY_WARNING_ALERTS\0" - "TOO_MUCH_SKIPPED_EARLY_DATA\0" - "UNABLE_TO_FIND_ECDH_PARAMETERS\0" - "UNEXPECTED_EXTENSION\0" - "UNEXPECTED_MESSAGE\0" - "UNEXPECTED_OPERATOR_IN_GROUP\0" - "UNEXPECTED_RECORD\0" - "UNKNOWN_ALERT_TYPE\0" - "UNKNOWN_CERTIFICATE_TYPE\0" - "UNKNOWN_CIPHER_RETURNED\0" - "UNKNOWN_CIPHER_TYPE\0" - "UNKNOWN_KEY_EXCHANGE_TYPE\0" - "UNKNOWN_PROTOCOL\0" - "UNKNOWN_SSL_VERSION\0" - "UNKNOWN_STATE\0" - "UNSAFE_LEGACY_RENEGOTIATION_DISABLED\0" - "UNSUPPORTED_COMPRESSION_ALGORITHM\0" - "UNSUPPORTED_ELLIPTIC_CURVE\0" - "UNSUPPORTED_PROTOCOL\0" - "UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY\0" - "WRONG_CERTIFICATE_TYPE\0" - "WRONG_CIPHER_RETURNED\0" - "WRONG_CURVE\0" - "WRONG_MESSAGE_TYPE\0" - "WRONG_SIGNATURE_TYPE\0" - "WRONG_SSL_VERSION\0" - "WRONG_VERSION_NUMBER\0" - "X509_LIB\0" - "X509_VERIFICATION_SETUP_PROBLEMS\0" - "AKID_MISMATCH\0" - "BAD_PKCS7_VERSION\0" - "BAD_X509_FILETYPE\0" - "BASE64_DECODE_ERROR\0" - "CANT_CHECK_DH_KEY\0" - "CERT_ALREADY_IN_HASH_TABLE\0" - "CRL_ALREADY_DELTA\0" - "CRL_VERIFY_FAILURE\0" - "IDP_MISMATCH\0" - "INVALID_DIRECTORY\0" - "INVALID_FIELD_NAME\0" - "INVALID_PSS_PARAMETERS\0" - "INVALID_TRUST\0" - "ISSUER_MISMATCH\0" - "KEY_TYPE_MISMATCH\0" - "KEY_VALUES_MISMATCH\0" - "LOADING_CERT_DIR\0" - "LOADING_DEFAULTS\0" - "NAME_TOO_LONG\0" - "NEWER_CRL_NOT_NEWER\0" - "NOT_PKCS7_SIGNED_DATA\0" - "NO_CERTIFICATES_INCLUDED\0" - "NO_CERT_SET_FOR_US_TO_VERIFY\0" - "NO_CRLS_INCLUDED\0" - "NO_CRL_NUMBER\0" - "PUBLIC_KEY_DECODE_ERROR\0" - "PUBLIC_KEY_ENCODE_ERROR\0" - "SHOULD_RETRY\0" - "UNKNOWN_KEY_TYPE\0" - "UNKNOWN_PURPOSE_ID\0" - "UNKNOWN_TRUST_ID\0" - "WRONG_LOOKUP_TYPE\0" - "BAD_IP_ADDRESS\0" - "BAD_OBJECT\0" - "BN_DEC2BN_ERROR\0" - "BN_TO_ASN1_INTEGER_ERROR\0" - "CANNOT_FIND_FREE_FUNCTION\0" - "DIRNAME_ERROR\0" - "DISTPOINT_ALREADY_SET\0" - "DUPLICATE_ZONE_ID\0" - "ERROR_CONVERTING_ZONE\0" - "ERROR_CREATING_EXTENSION\0" - "ERROR_IN_EXTENSION\0" - "EXPECTED_A_SECTION_NAME\0" - "EXTENSION_EXISTS\0" - "EXTENSION_NAME_ERROR\0" - "EXTENSION_NOT_FOUND\0" - "EXTENSION_SETTING_NOT_SUPPORTED\0" - "EXTENSION_VALUE_ERROR\0" - "ILLEGAL_EMPTY_EXTENSION\0" - "ILLEGAL_HEX_DIGIT\0" - "INCORRECT_POLICY_SYNTAX_TAG\0" - "INVALID_BOOLEAN_STRING\0" - "INVALID_EXTENSION_STRING\0" - "INVALID_MULTIPLE_RDNS\0" - "INVALID_NAME\0" - "INVALID_NULL_ARGUMENT\0" - "INVALID_NULL_NAME\0" - "INVALID_NULL_VALUE\0" - "INVALID_NUMBERS\0" - "INVALID_OBJECT_IDENTIFIER\0" - "INVALID_OPTION\0" - "INVALID_POLICY_IDENTIFIER\0" - "INVALID_PROXY_POLICY_SETTING\0" - "INVALID_PURPOSE\0" - "INVALID_SECTION\0" - "INVALID_SYNTAX\0" - "ISSUER_DECODE_ERROR\0" - "NEED_ORGANIZATION_AND_NUMBERS\0" - "NO_CONFIG_DATABASE\0" - "NO_ISSUER_CERTIFICATE\0" - "NO_ISSUER_DETAILS\0" - "NO_POLICY_IDENTIFIER\0" - "NO_PROXY_CERT_POLICY_LANGUAGE_DEFINED\0" - "NO_PUBLIC_KEY\0" - "NO_SUBJECT_DETAILS\0" - "ODD_NUMBER_OF_DIGITS\0" - "OPERATION_NOT_DEFINED\0" - "OTHERNAME_ERROR\0" - "POLICY_LANGUAGE_ALREADY_DEFINED\0" - "POLICY_PATH_LENGTH\0" - "POLICY_PATH_LENGTH_ALREADY_DEFINED\0" - "POLICY_WHEN_PROXY_LANGUAGE_REQUIRES_NO_POLICY\0" - "SECTION_NOT_FOUND\0" - "UNABLE_TO_GET_ISSUER_DETAILS\0" - "UNABLE_TO_GET_ISSUER_KEYID\0" - "UNKNOWN_BIT_STRING_ARGUMENT\0" - "UNKNOWN_EXTENSION\0" - "UNKNOWN_EXTENSION_NAME\0" - "UNKNOWN_OPTION\0" - "UNSUPPORTED_OPTION\0" - "USER_TOO_LONG\0" - ""; - diff --git a/Sources/BoringSSL/include/openssl/aead.h b/Sources/BoringSSL/include/openssl/aead.h index bd655d6c8..7424e29c0 100644 --- a/Sources/BoringSSL/include/openssl/aead.h +++ b/Sources/BoringSSL/include/openssl/aead.h @@ -22,242 +22,312 @@ extern "C" { #endif -/* Authenticated Encryption with Additional Data. - * - * AEAD couples confidentiality and integrity in a single primitive. AEAD - * algorithms take a key and then can seal and open individual messages. Each - * message has a unique, per-message nonce and, optionally, additional data - * which is authenticated but not included in the ciphertext. - * - * The |EVP_AEAD_CTX_init| function initialises an |EVP_AEAD_CTX| structure and - * performs any precomputation needed to use |aead| with |key|. The length of - * the key, |key_len|, is given in bytes. - * - * The |tag_len| argument contains the length of the tags, in bytes, and allows - * for the processing of truncated authenticators. A zero value indicates that - * the default tag length should be used and this is defined as - * |EVP_AEAD_DEFAULT_TAG_LENGTH| in order to make the code clear. Using - * truncated tags increases an attacker's chance of creating a valid forgery. - * Be aware that the attacker's chance may increase more than exponentially as - * would naively be expected. - * - * When no longer needed, the initialised |EVP_AEAD_CTX| structure must be - * passed to |EVP_AEAD_CTX_cleanup|, which will deallocate any memory used. - * - * With an |EVP_AEAD_CTX| in hand, one can seal and open messages. These - * operations are intended to meet the standard notions of privacy and - * authenticity for authenticated encryption. For formal definitions see - * Bellare and Namprempre, "Authenticated encryption: relations among notions - * and analysis of the generic composition paradigm," Lecture Notes in Computer - * Science B<1976> (2000), 531–545, - * http://www-cse.ucsd.edu/~mihir/papers/oem.html. - * - * When sealing messages, a nonce must be given. The length of the nonce is - * fixed by the AEAD in use and is returned by |EVP_AEAD_nonce_length|. *The - * nonce must be unique for all messages with the same key*. This is critically - * important - nonce reuse may completely undermine the security of the AEAD. - * Nonces may be predictable and public, so long as they are unique. Uniqueness - * may be achieved with a simple counter or, if large enough, may be generated - * randomly. The nonce must be passed into the "open" operation by the receiver - * so must either be implicit (e.g. a counter), or must be transmitted along - * with the sealed message. - * - * The "seal" and "open" operations are atomic - an entire message must be - * encrypted or decrypted in a single call. Large messages may have to be split - * up in order to accommodate this. When doing so, be mindful of the need not to - * repeat nonces and the possibility that an attacker could duplicate, reorder - * or drop message chunks. For example, using a single key for a given (large) - * message and sealing chunks with nonces counting from zero would be secure as - * long as the number of chunks was securely transmitted. (Otherwise an - * attacker could truncate the message by dropping chunks from the end.) - * - * The number of chunks could be transmitted by prefixing it to the plaintext, - * for example. This also assumes that no other message would ever use the same - * key otherwise the rule that nonces must be unique for a given key would be - * violated. - * - * The "seal" and "open" operations also permit additional data to be - * authenticated via the |ad| parameter. This data is not included in the - * ciphertext and must be identical for both the "seal" and "open" call. This - * permits implicit context to be authenticated but may be empty if not needed. - * - * The "seal" and "open" operations may work in-place if the |out| and |in| - * arguments are equal. Otherwise, if |out| and |in| alias, input data may be - * overwritten before it is read. This situation will cause an error. - * - * The "seal" and "open" operations return one on success and zero on error. */ - - -/* AEAD algorithms. */ - -/* EVP_aead_aes_128_gcm is AES-128 in Galois Counter Mode. */ +// Authenticated Encryption with Additional Data. +// +// AEAD couples confidentiality and integrity in a single primitive. AEAD +// algorithms take a key and then can seal and open individual messages. Each +// message has a unique, per-message nonce and, optionally, additional data +// which is authenticated but not included in the ciphertext. +// +// The |EVP_AEAD_CTX_init| function initialises an |EVP_AEAD_CTX| structure and +// performs any precomputation needed to use |aead| with |key|. The length of +// the key, |key_len|, is given in bytes. +// +// The |tag_len| argument contains the length of the tags, in bytes, and allows +// for the processing of truncated authenticators. A zero value indicates that +// the default tag length should be used and this is defined as +// |EVP_AEAD_DEFAULT_TAG_LENGTH| in order to make the code clear. Using +// truncated tags increases an attacker's chance of creating a valid forgery. +// Be aware that the attacker's chance may increase more than exponentially as +// would naively be expected. +// +// When no longer needed, the initialised |EVP_AEAD_CTX| structure must be +// passed to |EVP_AEAD_CTX_cleanup|, which will deallocate any memory used. +// +// With an |EVP_AEAD_CTX| in hand, one can seal and open messages. These +// operations are intended to meet the standard notions of privacy and +// authenticity for authenticated encryption. For formal definitions see +// Bellare and Namprempre, "Authenticated encryption: relations among notions +// and analysis of the generic composition paradigm," Lecture Notes in Computer +// Science B<1976> (2000), 531–545, +// http://www-cse.ucsd.edu/~mihir/papers/oem.html. +// +// When sealing messages, a nonce must be given. The length of the nonce is +// fixed by the AEAD in use and is returned by |EVP_AEAD_nonce_length|. *The +// nonce must be unique for all messages with the same key*. This is critically +// important - nonce reuse may completely undermine the security of the AEAD. +// Nonces may be predictable and public, so long as they are unique. Uniqueness +// may be achieved with a simple counter or, if large enough, may be generated +// randomly. The nonce must be passed into the "open" operation by the receiver +// so must either be implicit (e.g. a counter), or must be transmitted along +// with the sealed message. +// +// The "seal" and "open" operations are atomic - an entire message must be +// encrypted or decrypted in a single call. Large messages may have to be split +// up in order to accommodate this. When doing so, be mindful of the need not to +// repeat nonces and the possibility that an attacker could duplicate, reorder +// or drop message chunks. For example, using a single key for a given (large) +// message and sealing chunks with nonces counting from zero would be secure as +// long as the number of chunks was securely transmitted. (Otherwise an +// attacker could truncate the message by dropping chunks from the end.) +// +// The number of chunks could be transmitted by prefixing it to the plaintext, +// for example. This also assumes that no other message would ever use the same +// key otherwise the rule that nonces must be unique for a given key would be +// violated. +// +// The "seal" and "open" operations also permit additional data to be +// authenticated via the |ad| parameter. This data is not included in the +// ciphertext and must be identical for both the "seal" and "open" call. This +// permits implicit context to be authenticated but may be empty if not needed. +// +// The "seal" and "open" operations may work in-place if the |out| and |in| +// arguments are equal. Otherwise, if |out| and |in| alias, input data may be +// overwritten before it is read. This situation will cause an error. +// +// The "seal" and "open" operations return one on success and zero on error. + + +// AEAD algorithms. + +// EVP_aead_aes_128_gcm is AES-128 in Galois Counter Mode. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm(void); -/* EVP_aead_aes_256_gcm is AES-256 in Galois Counter Mode. */ +// EVP_aead_aes_256_gcm is AES-256 in Galois Counter Mode. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm(void); -/* EVP_aead_chacha20_poly1305 is the AEAD built from ChaCha20 and - * Poly1305 as described in RFC 7539. */ +// EVP_aead_chacha20_poly1305 is the AEAD built from ChaCha20 and +// Poly1305 as described in RFC 7539. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_chacha20_poly1305(void); -/* EVP_aead_aes_128_ctr_hmac_sha256 is AES-128 in CTR mode with HMAC-SHA256 for - * authentication. The nonce is 12 bytes; the bottom 32-bits are used as the - * block counter, thus the maximum plaintext size is 64GB. */ +// EVP_aead_aes_128_ctr_hmac_sha256 is AES-128 in CTR mode with HMAC-SHA256 for +// authentication. The nonce is 12 bytes; the bottom 32-bits are used as the +// block counter, thus the maximum plaintext size is 64GB. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_ctr_hmac_sha256(void); -/* EVP_aead_aes_256_ctr_hmac_sha256 is AES-256 in CTR mode with HMAC-SHA256 for - * authentication. See |EVP_aead_aes_128_ctr_hmac_sha256| for details. */ +// EVP_aead_aes_256_ctr_hmac_sha256 is AES-256 in CTR mode with HMAC-SHA256 for +// authentication. See |EVP_aead_aes_128_ctr_hmac_sha256| for details. OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_ctr_hmac_sha256(void); -/* EVP_aead_aes_128_gcm_siv is AES-128 in GCM-SIV mode. See - * https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02 */ +// EVP_aead_aes_128_gcm_siv is AES-128 in GCM-SIV mode. See +// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02 OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_siv(void); -/* EVP_aead_aes_256_gcm_siv is AES-256 in GCM-SIV mode. See - * https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02 */ +// EVP_aead_aes_256_gcm_siv is AES-256 in GCM-SIV mode. See +// https://tools.ietf.org/html/draft-irtf-cfrg-gcmsiv-02 OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_siv(void); -/* EVP_has_aes_hardware returns one if we enable hardware support for fast and - * constant-time AES-GCM. */ +// EVP_has_aes_hardware returns one if we enable hardware support for fast and +// constant-time AES-GCM. OPENSSL_EXPORT int EVP_has_aes_hardware(void); -/* Utility functions. */ +// Utility functions. -/* EVP_AEAD_key_length returns the length, in bytes, of the keys used by - * |aead|. */ +// EVP_AEAD_key_length returns the length, in bytes, of the keys used by +// |aead|. OPENSSL_EXPORT size_t EVP_AEAD_key_length(const EVP_AEAD *aead); -/* EVP_AEAD_nonce_length returns the length, in bytes, of the per-message nonce - * for |aead|. */ +// EVP_AEAD_nonce_length returns the length, in bytes, of the per-message nonce +// for |aead|. OPENSSL_EXPORT size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead); -/* EVP_AEAD_max_overhead returns the maximum number of additional bytes added - * by the act of sealing data with |aead|. */ +// EVP_AEAD_max_overhead returns the maximum number of additional bytes added +// by the act of sealing data with |aead|. OPENSSL_EXPORT size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead); -/* EVP_AEAD_max_tag_len returns the maximum tag length when using |aead|. This - * is the largest value that can be passed as |tag_len| to - * |EVP_AEAD_CTX_init|. */ +// EVP_AEAD_max_tag_len returns the maximum tag length when using |aead|. This +// is the largest value that can be passed as |tag_len| to +// |EVP_AEAD_CTX_init|. OPENSSL_EXPORT size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead); -/* AEAD operations. */ +// AEAD operations. -/* An EVP_AEAD_CTX represents an AEAD algorithm configured with a specific key - * and message-independent IV. */ +// An EVP_AEAD_CTX represents an AEAD algorithm configured with a specific key +// and message-independent IV. typedef struct evp_aead_ctx_st { const EVP_AEAD *aead; - /* aead_state is an opaque pointer to whatever state the AEAD needs to - * maintain. */ + // aead_state is an opaque pointer to whatever state the AEAD needs to + // maintain. void *aead_state; + // tag_len may contain the actual length of the authentication tag if it is + // known at initialization time. + uint8_t tag_len; } EVP_AEAD_CTX; -/* EVP_AEAD_MAX_KEY_LENGTH contains the maximum key length used by - * any AEAD defined in this header. */ +// EVP_AEAD_MAX_KEY_LENGTH contains the maximum key length used by +// any AEAD defined in this header. #define EVP_AEAD_MAX_KEY_LENGTH 80 -/* EVP_AEAD_MAX_NONCE_LENGTH contains the maximum nonce length used by - * any AEAD defined in this header. */ +// EVP_AEAD_MAX_NONCE_LENGTH contains the maximum nonce length used by +// any AEAD defined in this header. #define EVP_AEAD_MAX_NONCE_LENGTH 16 -/* EVP_AEAD_MAX_OVERHEAD contains the maximum overhead used by any AEAD - * defined in this header. */ +// EVP_AEAD_MAX_OVERHEAD contains the maximum overhead used by any AEAD +// defined in this header. #define EVP_AEAD_MAX_OVERHEAD 64 -/* EVP_AEAD_DEFAULT_TAG_LENGTH is a magic value that can be passed to - * EVP_AEAD_CTX_init to indicate that the default tag length for an AEAD should - * be used. */ +// EVP_AEAD_DEFAULT_TAG_LENGTH is a magic value that can be passed to +// EVP_AEAD_CTX_init to indicate that the default tag length for an AEAD should +// be used. #define EVP_AEAD_DEFAULT_TAG_LENGTH 0 -/* EVP_AEAD_CTX_zero sets an uninitialized |ctx| to the zero state. It must be - * initialized with |EVP_AEAD_CTX_init| before use. It is safe, but not - * necessary, to call |EVP_AEAD_CTX_cleanup| in this state. This may be used for - * more uniform cleanup of |EVP_AEAD_CTX|. */ +// EVP_AEAD_CTX_zero sets an uninitialized |ctx| to the zero state. It must be +// initialized with |EVP_AEAD_CTX_init| before use. It is safe, but not +// necessary, to call |EVP_AEAD_CTX_cleanup| in this state. This may be used for +// more uniform cleanup of |EVP_AEAD_CTX|. OPENSSL_EXPORT void EVP_AEAD_CTX_zero(EVP_AEAD_CTX *ctx); -/* EVP_AEAD_CTX_init initializes |ctx| for the given AEAD algorithm. The |impl| - * argument is ignored and should be NULL. Authentication tags may be truncated - * by passing a size as |tag_len|. A |tag_len| of zero indicates the default - * tag length and this is defined as EVP_AEAD_DEFAULT_TAG_LENGTH for - * readability. - * - * Returns 1 on success. Otherwise returns 0 and pushes to the error stack. In - * the error case, you do not need to call |EVP_AEAD_CTX_cleanup|, but it's - * harmless to do so. */ +// EVP_AEAD_CTX_new allocates an |EVP_AEAD_CTX|, calls |EVP_AEAD_CTX_init| and +// returns the |EVP_AEAD_CTX|, or NULL on error. +OPENSSL_EXPORT EVP_AEAD_CTX *EVP_AEAD_CTX_new(const EVP_AEAD *aead, + const uint8_t *key, + size_t key_len, size_t tag_len); + +// EVP_AEAD_CTX_free calls |EVP_AEAD_CTX_cleanup| and |OPENSSL_free| on +// |ctx|. +OPENSSL_EXPORT void EVP_AEAD_CTX_free(EVP_AEAD_CTX *ctx); + +// EVP_AEAD_CTX_init initializes |ctx| for the given AEAD algorithm. The |impl| +// argument is ignored and should be NULL. Authentication tags may be truncated +// by passing a size as |tag_len|. A |tag_len| of zero indicates the default +// tag length and this is defined as EVP_AEAD_DEFAULT_TAG_LENGTH for +// readability. +// +// Returns 1 on success. Otherwise returns 0 and pushes to the error stack. In +// the error case, you do not need to call |EVP_AEAD_CTX_cleanup|, but it's +// harmless to do so. OPENSSL_EXPORT int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, ENGINE *impl); -/* EVP_AEAD_CTX_cleanup frees any data allocated by |ctx|. It is a no-op to - * call |EVP_AEAD_CTX_cleanup| on a |EVP_AEAD_CTX| that has been |memset| to - * all zeros. */ +// EVP_AEAD_CTX_cleanup frees any data allocated by |ctx|. It is a no-op to +// call |EVP_AEAD_CTX_cleanup| on a |EVP_AEAD_CTX| that has been |memset| to +// all zeros. OPENSSL_EXPORT void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx); -/* EVP_AEAD_CTX_seal encrypts and authenticates |in_len| bytes from |in| and - * authenticates |ad_len| bytes from |ad| and writes the result to |out|. It - * returns one on success and zero otherwise. - * - * This function may be called (with the same |EVP_AEAD_CTX|) concurrently with - * itself or |EVP_AEAD_CTX_open|. - * - * At most |max_out_len| bytes are written to |out| and, in order to ensure - * success, |max_out_len| should be |in_len| plus the result of - * |EVP_AEAD_max_overhead|. On successful return, |*out_len| is set to the - * actual number of bytes written. - * - * The length of |nonce|, |nonce_len|, must be equal to the result of - * |EVP_AEAD_nonce_length| for this AEAD. - * - * |EVP_AEAD_CTX_seal| never results in a partial output. If |max_out_len| is - * insufficient, zero will be returned. (In this case, |*out_len| is set to - * zero.) - * - * If |in| and |out| alias then |out| must be == |in|. */ +// EVP_AEAD_CTX_seal encrypts and authenticates |in_len| bytes from |in| and +// authenticates |ad_len| bytes from |ad| and writes the result to |out|. It +// returns one on success and zero otherwise. +// +// This function may be called concurrently with itself or any other seal/open +// function on the same |EVP_AEAD_CTX|. +// +// At most |max_out_len| bytes are written to |out| and, in order to ensure +// success, |max_out_len| should be |in_len| plus the result of +// |EVP_AEAD_max_overhead|. On successful return, |*out_len| is set to the +// actual number of bytes written. +// +// The length of |nonce|, |nonce_len|, must be equal to the result of +// |EVP_AEAD_nonce_length| for this AEAD. +// +// |EVP_AEAD_CTX_seal| never results in a partial output. If |max_out_len| is +// insufficient, zero will be returned. If any error occurs, |out| will be +// filled with zero bytes and |*out_len| set to zero. +// +// If |in| and |out| alias then |out| must be == |in|. OPENSSL_EXPORT int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); -/* EVP_AEAD_CTX_open authenticates |in_len| bytes from |in| and |ad_len| bytes - * from |ad| and decrypts at most |in_len| bytes into |out|. It returns one on - * success and zero otherwise. - * - * This function may be called (with the same |EVP_AEAD_CTX|) concurrently with - * itself or |EVP_AEAD_CTX_seal|. - * - * At most |in_len| bytes are written to |out|. In order to ensure success, - * |max_out_len| should be at least |in_len|. On successful return, |*out_len| - * is set to the the actual number of bytes written. - * - * The length of |nonce|, |nonce_len|, must be equal to the result of - * |EVP_AEAD_nonce_length| for this AEAD. - * - * |EVP_AEAD_CTX_open| never results in a partial output. If |max_out_len| is - * insufficient, zero will be returned. (In this case, |*out_len| is set to - * zero.) - * - * If |in| and |out| alias then |out| must be == |in|. */ +// EVP_AEAD_CTX_open authenticates |in_len| bytes from |in| and |ad_len| bytes +// from |ad| and decrypts at most |in_len| bytes into |out|. It returns one on +// success and zero otherwise. +// +// This function may be called concurrently with itself or any other seal/open +// function on the same |EVP_AEAD_CTX|. +// +// At most |in_len| bytes are written to |out|. In order to ensure success, +// |max_out_len| should be at least |in_len|. On successful return, |*out_len| +// is set to the the actual number of bytes written. +// +// The length of |nonce|, |nonce_len|, must be equal to the result of +// |EVP_AEAD_nonce_length| for this AEAD. +// +// |EVP_AEAD_CTX_open| never results in a partial output. If |max_out_len| is +// insufficient, zero will be returned. If any error occurs, |out| will be +// filled with zero bytes and |*out_len| set to zero. +// +// If |in| and |out| alias then |out| must be == |in|. OPENSSL_EXPORT int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *nonce, size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *ad, size_t ad_len); -/* EVP_AEAD_CTX_aead returns the underlying AEAD for |ctx|, or NULL if one has - * not been set. */ +// EVP_AEAD_CTX_seal_scatter encrypts and authenticates |in_len| bytes from |in| +// and authenticates |ad_len| bytes from |ad|. It writes |in_len| bytes of +// ciphertext to |out| and the authentication tag to |out_tag|. It returns one +// on success and zero otherwise. +// +// This function may be called concurrently with itself or any other seal/open +// function on the same |EVP_AEAD_CTX|. +// +// Exactly |in_len| bytes are written to |out|, and up to +// |EVP_AEAD_max_overhead+extra_in_len| bytes to |out_tag|. On successful +// return, |*out_tag_len| is set to the actual number of bytes written to +// |out_tag|. +// +// |extra_in| may point to an additional plaintext input buffer if the cipher +// supports it. If present, |extra_in_len| additional bytes of plaintext are +// encrypted and authenticated, and the ciphertext is written (before the tag) +// to |out_tag|. |max_out_tag_len| must be sized to allow for the additional +// |extra_in_len| bytes. +// +// The length of |nonce|, |nonce_len|, must be equal to the result of +// |EVP_AEAD_nonce_length| for this AEAD. +// +// |EVP_AEAD_CTX_seal_scatter| never results in a partial output. If +// |max_out_tag_len| is insufficient, zero will be returned. If any error +// occurs, |out| and |out_tag| will be filled with zero bytes and |*out_tag_len| +// set to zero. +// +// If |in| and |out| alias then |out| must be == |in|. |out_tag| may not alias +// any other argument. +OPENSSL_EXPORT int EVP_AEAD_CTX_seal_scatter( + const EVP_AEAD_CTX *ctx, uint8_t *out, + uint8_t *out_tag, size_t *out_tag_len, size_t max_out_tag_len, + const uint8_t *nonce, size_t nonce_len, + const uint8_t *in, size_t in_len, + const uint8_t *extra_in, size_t extra_in_len, + const uint8_t *ad, size_t ad_len); + +// EVP_AEAD_CTX_open_gather decrypts and authenticates |in_len| bytes from |in| +// and authenticates |ad_len| bytes from |ad| using |in_tag_len| bytes of +// authentication tag from |in_tag|. If successful, it writes |in_len| bytes of +// plaintext to |out|. It returns one on success and zero otherwise. +// +// This function may be called concurrently with itself or any other seal/open +// function on the same |EVP_AEAD_CTX|. +// +// The length of |nonce|, |nonce_len|, must be equal to the result of +// |EVP_AEAD_nonce_length| for this AEAD. +// +// |EVP_AEAD_CTX_open_gather| never results in a partial output. If any error +// occurs, |out| will be filled with zero bytes. +// +// If |in| and |out| alias then |out| must be == |in|. +OPENSSL_EXPORT int EVP_AEAD_CTX_open_gather( + const EVP_AEAD_CTX *ctx, uint8_t *out, const uint8_t *nonce, + size_t nonce_len, const uint8_t *in, size_t in_len, const uint8_t *in_tag, + size_t in_tag_len, const uint8_t *ad, size_t ad_len); + +// EVP_AEAD_CTX_aead returns the underlying AEAD for |ctx|, or NULL if one has +// not been set. OPENSSL_EXPORT const EVP_AEAD *EVP_AEAD_CTX_aead(const EVP_AEAD_CTX *ctx); -/* TLS-specific AEAD algorithms. - * - * These AEAD primitives do not meet the definition of generic AEADs. They are - * all specific to TLS and should not be used outside of that context. They must - * be initialized with |EVP_AEAD_CTX_init_with_direction|, are stateful, and may - * not be used concurrently. Any nonces are used as IVs, so they must be - * unpredictable. They only accept an |ad| parameter of length 11 (the standard - * TLS one with length omitted). */ +// TLS-specific AEAD algorithms. +// +// These AEAD primitives do not meet the definition of generic AEADs. They are +// all specific to TLS and should not be used outside of that context. They must +// be initialized with |EVP_AEAD_CTX_init_with_direction|, are stateful, and may +// not be used concurrently. Any nonces are used as IVs, so they must be +// unpredictable. They only accept an |ad| parameter of length 11 (the standard +// TLS one with length omitted). OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void); @@ -273,14 +343,22 @@ OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls_implicit_iv(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_null_sha1_tls(void); +// EVP_aead_aes_128_gcm_tls12 is AES-128 in Galois Counter Mode using the TLS +// 1.2 nonce construction. +OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_gcm_tls12(void); + +// EVP_aead_aes_256_gcm_tls12 is AES-256 in Galois Counter Mode using the TLS +// 1.2 nonce construction. +OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_gcm_tls12(void); -/* SSLv3-specific AEAD algorithms. - * - * These AEAD primitives do not meet the definition of generic AEADs. They are - * all specific to SSLv3 and should not be used outside of that context. They - * must be initialized with |EVP_AEAD_CTX_init_with_direction|, are stateful, - * and may not be used concurrently. They only accept an |ad| parameter of - * length 9 (the standard TLS one with length and version omitted). */ + +// SSLv3-specific AEAD algorithms. +// +// These AEAD primitives do not meet the definition of generic AEADs. They are +// all specific to SSLv3 and should not be used outside of that context. They +// must be initialized with |EVP_AEAD_CTX_init_with_direction|, are stateful, +// and may not be used concurrently. They only accept an |ad| parameter of +// length 9 (the standard TLS one with length and version omitted). OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_ssl3(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_ssl3(void); @@ -288,32 +366,41 @@ OPENSSL_EXPORT const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_ssl3(void); OPENSSL_EXPORT const EVP_AEAD *EVP_aead_null_sha1_ssl3(void); -/* Obscure functions. */ +// Obscure functions. -/* evp_aead_direction_t denotes the direction of an AEAD operation. */ +// evp_aead_direction_t denotes the direction of an AEAD operation. enum evp_aead_direction_t { evp_aead_open, evp_aead_seal, }; -/* EVP_AEAD_CTX_init_with_direction calls |EVP_AEAD_CTX_init| for normal - * AEADs. For TLS-specific and SSL3-specific AEADs, it initializes |ctx| for a - * given direction. */ +// EVP_AEAD_CTX_init_with_direction calls |EVP_AEAD_CTX_init| for normal +// AEADs. For TLS-specific and SSL3-specific AEADs, it initializes |ctx| for a +// given direction. OPENSSL_EXPORT int EVP_AEAD_CTX_init_with_direction( EVP_AEAD_CTX *ctx, const EVP_AEAD *aead, const uint8_t *key, size_t key_len, size_t tag_len, enum evp_aead_direction_t dir); -/* EVP_AEAD_CTX_get_iv sets |*out_len| to the length of the IV for |ctx| and - * sets |*out_iv| to point to that many bytes of the current IV. This is only - * meaningful for AEADs with implicit IVs (i.e. CBC mode in SSLv3 and TLS 1.0). - * - * It returns one on success or zero on error. */ +// EVP_AEAD_CTX_get_iv sets |*out_len| to the length of the IV for |ctx| and +// sets |*out_iv| to point to that many bytes of the current IV. This is only +// meaningful for AEADs with implicit IVs (i.e. CBC mode in SSLv3 and TLS 1.0). +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_AEAD_CTX_get_iv(const EVP_AEAD_CTX *ctx, const uint8_t **out_iv, size_t *out_len); +// EVP_AEAD_CTX_tag_len computes the exact byte length of the tag written by +// |EVP_AEAD_CTX_seal_scatter| and writes it to |*out_tag_len|. It returns one +// on success or zero on error. |in_len| and |extra_in_len| must equal the +// arguments of the same names passed to |EVP_AEAD_CTX_seal_scatter|. +OPENSSL_EXPORT int EVP_AEAD_CTX_tag_len(const EVP_AEAD_CTX *ctx, + size_t *out_tag_len, + const size_t in_len, + const size_t extra_in_len); + #if defined(__cplusplus) -} /* extern C */ +} // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { @@ -324,6 +411,8 @@ using ScopedEVP_AEAD_CTX = internal::StackAllocated; +BORINGSSL_MAKE_DELETER(EVP_AEAD_CTX, EVP_AEAD_CTX_free) + } // namespace bssl } // extern C++ @@ -331,4 +420,4 @@ using ScopedEVP_AEAD_CTX = #endif -#endif /* OPENSSL_HEADER_AEAD_H */ +#endif // OPENSSL_HEADER_AEAD_H diff --git a/Sources/BoringSSL/include/openssl/aes.h b/Sources/BoringSSL/include/openssl/aes.h index 2aef91825..115658542 100644 --- a/Sources/BoringSSL/include/openssl/aes.h +++ b/Sources/BoringSSL/include/openssl/aes.h @@ -56,115 +56,115 @@ extern "C" { #endif -/* Raw AES functions. */ +// Raw AES functions. #define AES_ENCRYPT 1 #define AES_DECRYPT 0 -/* AES_MAXNR is the maximum number of AES rounds. */ +// AES_MAXNR is the maximum number of AES rounds. #define AES_MAXNR 14 #define AES_BLOCK_SIZE 16 -/* aes_key_st should be an opaque type, but EVP requires that the size be - * known. */ +// aes_key_st should be an opaque type, but EVP requires that the size be +// known. struct aes_key_st { uint32_t rd_key[4 * (AES_MAXNR + 1)]; unsigned rounds; }; typedef struct aes_key_st AES_KEY; -/* AES_set_encrypt_key configures |aeskey| to encrypt with the |bits|-bit key, - * |key|. - * - * WARNING: unlike other OpenSSL functions, this returns zero on success and a - * negative number on error. */ +// AES_set_encrypt_key configures |aeskey| to encrypt with the |bits|-bit key, +// |key|. +// +// WARNING: unlike other OpenSSL functions, this returns zero on success and a +// negative number on error. OPENSSL_EXPORT int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); -/* AES_set_decrypt_key configures |aeskey| to decrypt with the |bits|-bit key, - * |key|. - * - * WARNING: unlike other OpenSSL functions, this returns zero on success and a - * negative number on error. */ +// AES_set_decrypt_key configures |aeskey| to decrypt with the |bits|-bit key, +// |key|. +// +// WARNING: unlike other OpenSSL functions, this returns zero on success and a +// negative number on error. OPENSSL_EXPORT int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey); -/* AES_encrypt encrypts a single block from |in| to |out| with |key|. The |in| - * and |out| pointers may overlap. */ +// AES_encrypt encrypts a single block from |in| to |out| with |key|. The |in| +// and |out| pointers may overlap. OPENSSL_EXPORT void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -/* AES_decrypt decrypts a single block from |in| to |out| with |key|. The |in| - * and |out| pointers may overlap. */ +// AES_decrypt decrypts a single block from |in| to |out| with |key|. The |in| +// and |out| pointers may overlap. OPENSSL_EXPORT void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key); -/* Block cipher modes. */ +// Block cipher modes. -/* AES_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) |len| - * bytes from |in| to |out|. The |num| parameter must be set to zero on the - * first call and |ivec| will be incremented. */ +// AES_ctr128_encrypt encrypts (or decrypts, it's the same in CTR mode) |len| +// bytes from |in| to |out|. The |num| parameter must be set to zero on the +// first call and |ivec| will be incremented. OPENSSL_EXPORT void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE], uint8_t ecount_buf[AES_BLOCK_SIZE], unsigned int *num); -/* AES_ecb_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) a single, - * 16 byte block from |in| to |out|. */ +// AES_ecb_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) a single, +// 16 byte block from |in| to |out|. OPENSSL_EXPORT void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key, const int enc); -/* AES_cbc_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| - * bytes from |in| to |out|. The length must be a multiple of the block size. */ +// AES_cbc_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| +// bytes from |in| to |out|. The length must be a multiple of the block size. OPENSSL_EXPORT void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, const int enc); -/* AES_ofb128_encrypt encrypts (or decrypts, it's the same in OFB mode) |len| - * bytes from |in| to |out|. The |num| parameter must be set to zero on the - * first call. */ +// AES_ofb128_encrypt encrypts (or decrypts, it's the same in OFB mode) |len| +// bytes from |in| to |out|. The |num| parameter must be set to zero on the +// first call. OPENSSL_EXPORT void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num); -/* AES_cfb128_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| - * bytes from |in| to |out|. The |num| parameter must be set to zero on the - * first call. */ +// AES_cfb128_encrypt encrypts (or decrypts, if |enc| == |AES_DECRYPT|) |len| +// bytes from |in| to |out|. The |num| parameter must be set to zero on the +// first call. OPENSSL_EXPORT void AES_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len, const AES_KEY *key, uint8_t *ivec, int *num, int enc); -/* AES key wrap. - * - * These functions implement AES Key Wrap mode, as defined in RFC 3394. They - * should never be used except to interoperate with existing systems that use - * this mode. */ - -/* AES_wrap_key performs AES key wrap on |in| which must be a multiple of 8 - * bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. - * |key| must have been configured for encryption. On success, it writes - * |in_len| + 8 bytes to |out| and returns |in_len| + 8. Otherwise, it returns - * -1. */ +// AES key wrap. +// +// These functions implement AES Key Wrap mode, as defined in RFC 3394. They +// should never be used except to interoperate with existing systems that use +// this mode. + +// AES_wrap_key performs AES key wrap on |in| which must be a multiple of 8 +// bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. +// |key| must have been configured for encryption. On success, it writes +// |in_len| + 8 bytes to |out| and returns |in_len| + 8. Otherwise, it returns +// -1. OPENSSL_EXPORT int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len); -/* AES_unwrap_key performs AES key unwrap on |in| which must be a multiple of 8 - * bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. - * |key| must have been configured for decryption. On success, it writes - * |in_len| - 8 bytes to |out| and returns |in_len| - 8. Otherwise, it returns - * -1. */ +// AES_unwrap_key performs AES key unwrap on |in| which must be a multiple of 8 +// bytes. |iv| must point to an 8 byte value or be NULL to use the default IV. +// |key| must have been configured for decryption. On success, it writes +// |in_len| - 8 bytes to |out| and returns |in_len| - 8. Otherwise, it returns +// -1. OPENSSL_EXPORT int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out, const uint8_t *in, size_t in_len); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_AES_H */ +#endif // OPENSSL_HEADER_AES_H diff --git a/Sources/BoringSSL/include/openssl/arm_arch.h b/Sources/BoringSSL/include/openssl/arm_arch.h index e7010f402..faa2655e5 100644 --- a/Sources/BoringSSL/include/openssl/arm_arch.h +++ b/Sources/BoringSSL/include/openssl/arm_arch.h @@ -69,10 +69,10 @@ # else # define __ARMEL__ # endif - /* Why doesn't gcc define __ARM_ARCH__? Instead it defines - * bunch of below macros. See all_architectires[] table in - * gcc/config/arm/arm.c. On a side note it defines - * __ARMEL__/__ARMEB__ for little-/big-endian. */ + // Why doesn't gcc define __ARM_ARCH__? Instead it defines + // bunch of below macros. See all_architectires[] table in + // gcc/config/arm/arm.c. On a side note it defines + // __ARMEL__/__ARMEB__ for little-/big-endian. # elif defined(__ARM_ARCH) # define __ARM_ARCH__ __ARM_ARCH # elif defined(__ARM_ARCH_8A__) @@ -98,24 +98,24 @@ # endif #endif -/* Even when building for 32-bit ARM, support for aarch64 crypto instructions - * will be included. */ +// Even when building for 32-bit ARM, support for aarch64 crypto instructions +// will be included. #define __ARM_MAX_ARCH__ 8 -/* ARMV7_NEON is true when a NEON unit is present in the current CPU. */ +// ARMV7_NEON is true when a NEON unit is present in the current CPU. #define ARMV7_NEON (1 << 0) -/* ARMV8_AES indicates support for hardware AES instructions. */ +// ARMV8_AES indicates support for hardware AES instructions. #define ARMV8_AES (1 << 2) -/* ARMV8_SHA1 indicates support for hardware SHA-1 instructions. */ +// ARMV8_SHA1 indicates support for hardware SHA-1 instructions. #define ARMV8_SHA1 (1 << 3) -/* ARMV8_SHA256 indicates support for hardware SHA-256 instructions. */ +// ARMV8_SHA256 indicates support for hardware SHA-256 instructions. #define ARMV8_SHA256 (1 << 4) -/* ARMV8_PMULL indicates support for carryless multiplication. */ +// ARMV8_PMULL indicates support for carryless multiplication. #define ARMV8_PMULL (1 << 5) -#endif /* OPENSSL_HEADER_ARM_ARCH_H */ +#endif // OPENSSL_HEADER_ARM_ARCH_H diff --git a/Sources/BoringSSL/include/openssl/asn1.h b/Sources/BoringSSL/include/openssl/asn1.h index 42386e0cf..5c8bf4cde 100644 --- a/Sources/BoringSSL/include/openssl/asn1.h +++ b/Sources/BoringSSL/include/openssl/asn1.h @@ -71,6 +71,14 @@ extern "C" { #endif + +/* Legacy ASN.1 library. + * + * This header is part of OpenSSL's ASN.1 implementation. It is retained for + * compatibility but otherwise underdocumented and not actively maintained. Use + * the new |CBS| and |CBB| library in instead. */ + + #define V_ASN1_UNIVERSAL 0x00 #define V_ASN1_APPLICATION 0x40 #define V_ASN1_CONTEXT_SPECIFIC 0x80 @@ -78,7 +86,6 @@ extern "C" { #define V_ASN1_CONSTRUCTED 0x20 #define V_ASN1_PRIMITIVE_TAG 0x1f -#define V_ASN1_PRIMATIVE_TAG 0x1f #define V_ASN1_APP_CHOOSE -2 /* let the recipient choose */ #define V_ASN1_OTHER -3 /* used in ASN1_TYPE */ @@ -149,51 +156,12 @@ extern "C" { #define MBSTRING_BMP (MBSTRING_FLAG|2) #define MBSTRING_UNIV (MBSTRING_FLAG|4) -#define SMIME_OLDMIME 0x400 -#define SMIME_CRLFEOL 0x800 -#define SMIME_STREAM 0x1000 - #define DECLARE_ASN1_SET_OF(type) /* filled in by mkstack.pl */ #define IMPLEMENT_ASN1_SET_OF(type) /* nothing, no longer needed */ -/* We MUST make sure that, except for constness, asn1_ctx_st and - asn1_const_ctx are exactly the same. Fortunately, as soon as - the old ASN1 parsing macros are gone, we can throw this away - as well... */ -typedef struct asn1_ctx_st - { - unsigned char *p;/* work char pointer */ - int eos; /* end of sequence read for indefinite encoding */ - int error; /* error code to use when returning an error */ - int inf; /* constructed if 0x20, indefinite is 0x21 */ - int tag; /* tag from last 'get object' */ - int xclass; /* class from last 'get object' */ - long slen; /* length of last 'get object' */ - unsigned char *max; /* largest value of p allowed */ - unsigned char *q;/* temporary variable */ - unsigned char **pp;/* variable */ - int line; /* used in error processing */ - } ASN1_CTX; - -typedef struct asn1_const_ctx_st - { - const unsigned char *p;/* work char pointer */ - int eos; /* end of sequence read for indefinite encoding */ - int error; /* error code to use when returning an error */ - int inf; /* constructed if 0x20, indefinite is 0x21 */ - int tag; /* tag from last 'get object' */ - int xclass; /* class from last 'get object' */ - long slen; /* length of last 'get object' */ - const unsigned char *max; /* largest value of p allowed */ - const unsigned char *q;/* temporary variable */ - const unsigned char **pp;/* variable */ - int line; /* used in error processing */ - } ASN1_const_CTX; - /* These are used internally in the ASN1_OBJECT to keep track of * whether the names and data need to be free()ed */ #define ASN1_OBJECT_FLAG_DYNAMIC 0x01 /* internal use */ -#define ASN1_OBJECT_FLAG_CRITICAL 0x02 /* critical x509v3 object id */ #define ASN1_OBJECT_FLAG_DYNAMIC_STRINGS 0x04 /* internal use */ #define ASN1_OBJECT_FLAG_DYNAMIC_DATA 0x08 /* internal use */ struct asn1_object_st @@ -205,7 +173,7 @@ struct asn1_object_st int flags; /* Should we free this one */ }; -DECLARE_STACK_OF(ASN1_OBJECT) +DEFINE_STACK_OF(ASN1_OBJECT) #define ASN1_STRING_FLAG_BITS_LEFT 0x08 /* Set if 0x07 has bits left value */ /* This indicates that the ASN1_STRING is not a real value but just a place @@ -214,12 +182,6 @@ DECLARE_STACK_OF(ASN1_OBJECT) */ #define ASN1_STRING_FLAG_NDEF 0x010 -/* This flag is used by the CMS code to indicate that a string is not - * complete and is a place holder for content when it had all been - * accessed. The flag will be reset when content has been written to it. - */ - -#define ASN1_STRING_FLAG_CONT 0x020 /* This flag is used by ASN1 code to indicate an ASN1_STRING is an MSTRING * type. */ @@ -257,9 +219,6 @@ typedef struct ASN1_ENCODING_st unsigned alias_only_on_next_parse:1; } ASN1_ENCODING; -/* Used with ASN1 LONG type: if a long is set to this it is omitted */ -#define ASN1_LONG_UNDEF 0x7fffffffL - #define STABLE_FLAGS_MALLOC 0x01 #define STABLE_NO_MASK 0x02 #define DIRSTRING_TYPE \ @@ -349,11 +308,8 @@ typedef struct ASN1_VALUE_st ASN1_VALUE; #define CHECKED_PPTR_OF(type, p) \ ((void**) (1 ? p : (type**)0)) -#define TYPEDEF_D2I_OF(type) typedef type *d2i_of_##type(type **,const unsigned char **,long) -#define TYPEDEF_I2D_OF(type) typedef int i2d_of_##type(const type *,unsigned char **) -#define TYPEDEF_D2I2D_OF(type) TYPEDEF_D2I_OF(type); TYPEDEF_I2D_OF(type) - -TYPEDEF_D2I2D_OF(void); +typedef void *d2i_of_void(void **, const unsigned char **, long); +typedef int i2d_of_void(const void *, unsigned char **); /* The following macros and typedefs allow an ASN1_ITEM * to be embedded in a structure and referenced. Since @@ -483,6 +439,7 @@ typedef const ASN1_ITEM ASN1_ITEM_EXP; ASN1_STRFLGS_DUMP_UNKNOWN | \ ASN1_STRFLGS_DUMP_DER) +DEFINE_STACK_OF(ASN1_INTEGER) DECLARE_ASN1_SET_OF(ASN1_INTEGER) struct asn1_type_st @@ -515,6 +472,7 @@ struct asn1_type_st } value; }; +DEFINE_STACK_OF(ASN1_TYPE) DECLARE_ASN1_SET_OF(ASN1_TYPE) typedef STACK_OF(ASN1_TYPE) ASN1_SEQUENCE_ANY; @@ -530,12 +488,6 @@ struct X509_algor_st DECLARE_ASN1_FUNCTIONS(X509_ALGOR) -typedef struct NETSCAPE_X509_st - { - ASN1_OCTET_STRING *header; - X509 *cert; - } NETSCAPE_X509; - /* This is used to contain a list of bit names */ typedef struct BIT_STRING_BITNAME_st { int bitnum; @@ -708,6 +660,7 @@ OPENSSL_EXPORT int ASN1_STRING_length(const ASN1_STRING *x); OPENSSL_EXPORT void ASN1_STRING_length_set(ASN1_STRING *x, int n); OPENSSL_EXPORT int ASN1_STRING_type(ASN1_STRING *x); OPENSSL_EXPORT unsigned char * ASN1_STRING_data(ASN1_STRING *x); +OPENSSL_EXPORT const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x); DECLARE_ASN1_FUNCTIONS(ASN1_BIT_STRING) OPENSSL_EXPORT int i2c_ASN1_BIT_STRING(ASN1_BIT_STRING *a,unsigned char **pp); @@ -717,10 +670,6 @@ OPENSSL_EXPORT int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value OPENSSL_EXPORT int ASN1_BIT_STRING_get_bit(ASN1_BIT_STRING *a, int n); OPENSSL_EXPORT int ASN1_BIT_STRING_check(ASN1_BIT_STRING *a, unsigned char *flags, int flags_len); -OPENSSL_EXPORT int ASN1_BIT_STRING_name_print(BIO *out, ASN1_BIT_STRING *bs, BIT_STRING_BITNAME *tbl, int indent); -OPENSSL_EXPORT int ASN1_BIT_STRING_num_asc(char *name, BIT_STRING_BITNAME *tbl); -OPENSSL_EXPORT int ASN1_BIT_STRING_set_asc(ASN1_BIT_STRING *bs, char *name, int value, BIT_STRING_BITNAME *tbl); - OPENSSL_EXPORT int i2d_ASN1_BOOLEAN(int a,unsigned char **pp); OPENSSL_EXPORT int d2i_ASN1_BOOLEAN(int *a,const unsigned char **pp,long length); @@ -788,7 +737,6 @@ OPENSSL_EXPORT int i2a_ASN1_OBJECT(BIO *bp,ASN1_OBJECT *a); OPENSSL_EXPORT int i2a_ASN1_STRING(BIO *bp, ASN1_STRING *a, int type); OPENSSL_EXPORT int i2t_ASN1_OBJECT(char *buf,int buf_len,ASN1_OBJECT *a); -OPENSSL_EXPORT int a2d_ASN1_OBJECT(unsigned char *out,int olen, const char *buf, int num); OPENSSL_EXPORT ASN1_OBJECT *ASN1_OBJECT_create(int nid, unsigned char *data,int len, const char *sn, const char *ln); OPENSSL_EXPORT int ASN1_INTEGER_set(ASN1_INTEGER *a, long v); @@ -807,14 +755,8 @@ OPENSSL_EXPORT int ASN1_PRINTABLE_type(const unsigned char *s, int max); OPENSSL_EXPORT unsigned long ASN1_tag2bit(int tag); -/* PARSING */ -OPENSSL_EXPORT int asn1_Finish(ASN1_CTX *c); -OPENSSL_EXPORT int asn1_const_Finish(ASN1_const_CTX *c); - /* SPECIALS */ OPENSSL_EXPORT int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag, int *pclass, long omax); -OPENSSL_EXPORT int ASN1_check_infinite_end(unsigned char **p,long len); -OPENSSL_EXPORT int ASN1_const_check_infinite_end(const unsigned char **p,long len); OPENSSL_EXPORT void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag, int xclass); OPENSSL_EXPORT int ASN1_put_eoc(unsigned char **pp); OPENSSL_EXPORT int ASN1_object_size(int constructed, int length, int tag); @@ -899,10 +841,6 @@ OPENSSL_EXPORT const char *ASN1_tag2str(int tag); /* Used to load and write netscape format cert */ -DECLARE_ASN1_FUNCTIONS(NETSCAPE_X509) - -int ASN1_UNIVERSALSTRING_to_string(ASN1_UNIVERSALSTRING *s); - OPENSSL_EXPORT void *ASN1_item_unpack(ASN1_STRING *oct, const ASN1_ITEM *it); OPENSSL_EXPORT ASN1_STRING *ASN1_item_pack(void *obj, const ASN1_ITEM *it, ASN1_OCTET_STRING **oct); @@ -938,8 +876,6 @@ extern "C++" { namespace bssl { -BORINGSSL_MAKE_STACK_DELETER(ASN1_OBJECT, ASN1_OBJECT_free) - BORINGSSL_MAKE_DELETER(ASN1_OBJECT, ASN1_OBJECT_free) BORINGSSL_MAKE_DELETER(ASN1_STRING, ASN1_STRING_free) BORINGSSL_MAKE_DELETER(ASN1_TYPE, ASN1_TYPE_free) diff --git a/Sources/BoringSSL/include/openssl/asn1t.h b/Sources/BoringSSL/include/openssl/asn1t.h index 10d32c436..7bd77017a 100644 --- a/Sources/BoringSSL/include/openssl/asn1t.h +++ b/Sources/BoringSSL/include/openssl/asn1t.h @@ -60,18 +60,18 @@ #include #include -#ifdef OPENSSL_BUILD_SHLIBCRYPTO -# undef OPENSSL_EXTERN -# define OPENSSL_EXTERN OPENSSL_EXPORT -#endif - -/* ASN1 template defines, structures and functions */ - #ifdef __cplusplus extern "C" { #endif +/* Legacy ASN.1 library template definitions. + * + * This header is used to define new types in OpenSSL's ASN.1 implementation. It + * is deprecated and will be unexported from the library. Use the new |CBS| and + * |CBB| library in instead. */ + + /* Macro to obtain ASN1_ADB pointer from a type (only used internally) */ #define ASN1_ADB_ptr(iptr) ((const ASN1_ADB *)(iptr)) @@ -407,10 +407,12 @@ ASN1_ITEM_EXP *item; /* Relevant ASN1_ITEM or ASN1_ADB */ typedef struct ASN1_ADB_TABLE_st ASN1_ADB_TABLE; typedef struct ASN1_ADB_st ASN1_ADB; +typedef struct asn1_must_be_null_st ASN1_MUST_BE_NULL; + struct ASN1_ADB_st { unsigned long flags; /* Various flags */ unsigned long offset; /* Offset of selector field */ - STACK_OF(ASN1_ADB_TABLE) **app_items; /* Application defined items */ + ASN1_MUST_BE_NULL *unused; const ASN1_ADB_TABLE *tbl; /* Table of possible types */ long tblcount; /* Number of entries in tbl */ const ASN1_TEMPLATE *default_tt; /* Type to use if no match */ @@ -849,12 +851,8 @@ DECLARE_ASN1_ITEM(ASN1_BOOLEAN) DECLARE_ASN1_ITEM(ASN1_TBOOLEAN) DECLARE_ASN1_ITEM(ASN1_FBOOLEAN) DECLARE_ASN1_ITEM(ASN1_SEQUENCE) -DECLARE_ASN1_ITEM(CBIGNUM) -DECLARE_ASN1_ITEM(BIGNUM) -DECLARE_ASN1_ITEM(LONG) -DECLARE_ASN1_ITEM(ZLONG) -DECLARE_STACK_OF(ASN1_VALUE) +DEFINE_STACK_OF(ASN1_VALUE) /* Functions used internally by the ASN1 code */ @@ -864,12 +862,10 @@ int ASN1_template_new(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it); void ASN1_template_free(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt); -int ASN1_template_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_TEMPLATE *tt); int ASN1_item_ex_d2i(ASN1_VALUE **pval, const unsigned char **in, long len, const ASN1_ITEM *it, int tag, int aclass, char opt, ASN1_TLC *ctx); int ASN1_item_ex_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_ITEM *it, int tag, int aclass); -int ASN1_template_i2d(ASN1_VALUE **pval, unsigned char **out, const ASN1_TEMPLATE *tt); void ASN1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it); int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cont, int *putype, const ASN1_ITEM *it); diff --git a/Sources/BoringSSL/include/openssl/base.h b/Sources/BoringSSL/include/openssl/base.h index ceb693df6..1854a143a 100644 --- a/Sources/BoringSSL/include/openssl/base.h +++ b/Sources/BoringSSL/include/openssl/base.h @@ -56,17 +56,21 @@ #define OPENSSL_NO_ASM -/* This file should be the first included by all BoringSSL headers. */ +// This file should be the first included by all BoringSSL headers. #include #include #include #if defined(__MINGW32__) -/* stdio.h is needed on MinGW for __MINGW_PRINTF_FORMAT. */ +// stdio.h is needed on MinGW for __MINGW_PRINTF_FORMAT. #include #endif +// Include a BoringSSL-only header so consumers including this header without +// setting up include paths do not accidentally pick up the system +// opensslconf.h. +#include #include #if defined(BORINGSSL_PREFIX) @@ -105,6 +109,10 @@ extern "C" { #elif defined(__myriad2__) #define OPENSSL_32_BIT #else +// Note BoringSSL only supports standard 32-bit and 64-bit two's-complement, +// little-endian architectures. Functions will not produce the correct answer +// on other systems. Run the crypto_test binary, notably +// crypto/compiler_test.cc, before adding a new architecture. #error "Unknown target CPU" #endif @@ -129,21 +137,23 @@ extern "C" { #define OPENSSL_NO_THREADS #endif +#if !defined(OPENSSL_NO_THREADS) +#define OPENSSL_THREADS +#endif + #define OPENSSL_IS_BORINGSSL -#define BORINGSSL_201512 -#define BORINGSSL_201603 -#define OPENSSL_VERSION_NUMBER 0x100020af +#define OPENSSL_VERSION_NUMBER 0x1010007f #define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER -/* BORINGSSL_API_VERSION is a positive integer that increments as BoringSSL - * changes over time. The value itself is not meaningful. It will be incremented - * whenever is convenient to coordinate an API change with consumers. This will - * not denote any special point in development. - * - * A consumer may use this symbol in the preprocessor to temporarily build - * against multiple revisions of BoringSSL at the same time. It is not - * recommended to do so for longer than is necessary. */ -#define BORINGSSL_API_VERSION 2 +// BORINGSSL_API_VERSION is a positive integer that increments as BoringSSL +// changes over time. The value itself is not meaningful. It will be incremented +// whenever is convenient to coordinate an API change with consumers. This will +// not denote any special point in development. +// +// A consumer may use this symbol in the preprocessor to temporarily build +// against multiple revisions of BoringSSL at the same time. It is not +// recommended to do so for longer than is necessary. +#define BORINGSSL_API_VERSION 6 #if defined(BORINGSSL_SHARED_LIBRARY) @@ -155,7 +165,7 @@ extern "C" { #define OPENSSL_EXPORT __declspec(dllimport) #endif -#else /* defined(OPENSSL_WINDOWS) */ +#else // defined(OPENSSL_WINDOWS) #if defined(BORINGSSL_IMPLEMENTATION) #define OPENSSL_EXPORT __attribute__((visibility("default"))) @@ -163,19 +173,19 @@ extern "C" { #define OPENSSL_EXPORT #endif -#endif /* defined(OPENSSL_WINDOWS) */ +#endif // defined(OPENSSL_WINDOWS) -#else /* defined(BORINGSSL_SHARED_LIBRARY) */ +#else // defined(BORINGSSL_SHARED_LIBRARY) #define OPENSSL_EXPORT -#endif /* defined(BORINGSSL_SHARED_LIBRARY) */ +#endif // defined(BORINGSSL_SHARED_LIBRARY) -#if defined(__GNUC__) -/* MinGW has two different printf implementations. Ensure the format macro - * matches the selected implementation. See - * https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/. */ +#if defined(__GNUC__) || defined(__clang__) +// MinGW has two different printf implementations. Ensure the format macro +// matches the selected implementation. See +// https://sourceforge.net/p/mingw-w64/wiki2/gnu%20printf/. #if defined(__MINGW_PRINTF_FORMAT) #define OPENSSL_PRINTF_FORMAT_FUNC(string_index, first_to_check) \ __attribute__( \ @@ -188,19 +198,47 @@ extern "C" { #define OPENSSL_PRINTF_FORMAT_FUNC(string_index, first_to_check) #endif -/* OPENSSL_MSVC_PRAGMA emits a pragma on MSVC and nothing on other compilers. */ +// OPENSSL_MSVC_PRAGMA emits a pragma on MSVC and nothing on other compilers. #if defined(_MSC_VER) #define OPENSSL_MSVC_PRAGMA(arg) __pragma(arg) #else #define OPENSSL_MSVC_PRAGMA(arg) #endif +#if defined(__GNUC__) || defined(__clang__) +#define OPENSSL_UNUSED __attribute__((unused)) +#else +#define OPENSSL_UNUSED +#endif + #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) && \ !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) #define BORINGSSL_UNSAFE_DETERMINISTIC_MODE #endif -/* CRYPTO_THREADID is a dummy value. */ +#if defined(__has_feature) +#if __has_feature(address_sanitizer) +#define OPENSSL_ASAN +#endif +#if __has_feature(memory_sanitizer) +#define OPENSSL_MSAN +#endif +#endif + +// Have a generic fall-through for different versions of C/C++. +#if defined(__cplusplus) && __cplusplus >= 201703L +#define OPENSSL_FALLTHROUGH [[fallthrough]] +#elif defined(__cplusplus) && __cplusplus >= 201103L && defined(__clang__) +#define OPENSSL_FALLTHROUGH [[clang::fallthrough]] +#elif defined(__cplusplus) && __cplusplus >= 201103L && __GNUC__ >= 7 +#define OPENSSL_FALLTHROUGH [[gnu::fallthrough]] +#elif __GNUC__ >= 7 // gcc 7 +#define OPENSSL_FALLTHROUGH __attribute__ ((fallthrough)) +#else // C++11 on gcc 6, and all other cases +#define OPENSSL_FALLTHROUGH +#endif + +// CRYPTO_THREADID is a dummy value. typedef int CRYPTO_THREADID; typedef int ASN1_BOOLEAN; @@ -225,14 +263,12 @@ typedef struct asn1_string_st ASN1_UTCTIME; typedef struct asn1_string_st ASN1_UTF8STRING; typedef struct asn1_string_st ASN1_VISIBLESTRING; typedef struct asn1_type_st ASN1_TYPE; - typedef struct AUTHORITY_KEYID_st AUTHORITY_KEYID; typedef struct BASIC_CONSTRAINTS_st BASIC_CONSTRAINTS; typedef struct DIST_POINT_st DIST_POINT; typedef struct DSA_SIG_st DSA_SIG; typedef struct ISSUING_DIST_POINT_st ISSUING_DIST_POINT; typedef struct NAME_CONSTRAINTS_st NAME_CONSTRAINTS; -typedef struct Netscape_certificate_sequence NETSCAPE_CERT_SEQUENCE; typedef struct Netscape_spkac_st NETSCAPE_SPKAC; typedef struct Netscape_spki_st NETSCAPE_SPKI; typedef struct RIPEMD160state_st RIPEMD160_CTX; @@ -248,7 +284,6 @@ typedef struct X509_extension_st X509_EXTENSION; typedef struct X509_info_st X509_INFO; typedef struct X509_name_entry_st X509_NAME_ENTRY; typedef struct X509_name_st X509_NAME; -typedef struct X509_objects_st X509_OBJECTS; typedef struct X509_pubkey_st X509_PUBKEY; typedef struct X509_req_info_st X509_REQ_INFO; typedef struct X509_req_st X509_REQ; @@ -289,6 +324,7 @@ typedef struct evp_pkey_st EVP_PKEY; typedef struct hmac_ctx_st HMAC_CTX; typedef struct md4_state_st MD4_CTX; typedef struct md5_state_st MD5_CTX; +typedef struct ossl_init_settings_st OPENSSL_INIT_SETTINGS; typedef struct pkcs12_st PKCS12; typedef struct pkcs8_priv_key_info_st PKCS8_PRIV_KEY_INFO; typedef struct private_key_st X509_PKEY; @@ -303,15 +339,15 @@ typedef struct spake2_ctx_st SPAKE2_CTX; typedef struct srtp_protection_profile_st SRTP_PROTECTION_PROFILE; typedef struct ssl_cipher_st SSL_CIPHER; typedef struct ssl_ctx_st SSL_CTX; -typedef struct ssl_custom_extension SSL_CUSTOM_EXTENSION; typedef struct ssl_method_st SSL_METHOD; +typedef struct ssl_private_key_method_st SSL_PRIVATE_KEY_METHOD; typedef struct ssl_session_st SSL_SESSION; typedef struct ssl_st SSL; +typedef struct ssl_ticket_aead_method_st SSL_TICKET_AEAD_METHOD; typedef struct st_ERR_FNS ERR_FNS; typedef struct v3_ext_ctx X509V3_CTX; typedef struct x509_attributes_st X509_ATTRIBUTE; typedef struct x509_cert_aux_st X509_CERT_AUX; -typedef struct x509_cert_pair_st X509_CERT_PAIR; typedef struct x509_cinf_st X509_CINF; typedef struct x509_crl_method_st X509_CRL_METHOD; typedef struct x509_lookup_st X509_LOOKUP; @@ -325,7 +361,10 @@ typedef void *OPENSSL_BLOCK; #if defined(__cplusplus) -} /* extern C */ +} // extern C +#elif !defined(BORINGSSL_NO_CXX) +#define BORINGSSL_NO_CXX +#endif // MSVC doesn't set __cplusplus to 201103 to indicate C++11 support (see // https://connect.microsoft.com/VisualStudio/feedback/details/763051/a-value-of-predefined-macro-cplusplus-is-still-199711l) @@ -350,19 +389,18 @@ extern "C++" { #if defined(BORINGSSL_NO_CXX) #define BORINGSSL_MAKE_DELETER(type, deleter) -#define BORINGSSL_MAKE_STACK_DELETER(type, deleter) #else extern "C++" { -#include - namespace bssl { namespace internal { -template +// The Enable parameter is ignored and only exists so specializations can use +// SFINAE. +template struct DeleterImpl {}; template @@ -393,6 +431,9 @@ class StackAllocated { T *get() { return &ctx_; } const T *get() const { return &ctx_; } + T *operator->() { return &ctx_; } + const T *operator->() const { return &ctx_; } + void Reset() { cleanup(&ctx_); init(&ctx_); @@ -412,30 +453,16 @@ class StackAllocated { }; \ } -// This makes a unique_ptr to STACK_OF(type) that owns all elements on the -// stack, i.e. it uses sk_pop_free() to clean up. -#define BORINGSSL_MAKE_STACK_DELETER(type, deleter) \ - namespace internal { \ - template <> \ - struct DeleterImpl { \ - static void Free(STACK_OF(type) *ptr) { \ - sk_##type##_pop_free(ptr, deleter); \ - } \ - }; \ - } - // Holds ownership of heap-allocated BoringSSL structures. Sample usage: -// bssl::UniquePtr rsa(RSA_new()); +// bssl::UniquePtr rsa(RSA_new()); // bssl::UniquePtr bio(BIO_new(BIO_s_mem())); template using UniquePtr = std::unique_ptr>; } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif // !BORINGSSL_NO_CXX -#endif - -#endif /* OPENSSL_HEADER_BASE_H */ +#endif // OPENSSL_HEADER_BASE_H diff --git a/Sources/BoringSSL/include/openssl/base64.h b/Sources/BoringSSL/include/openssl/base64.h index 4bf3888c0..ef760886b 100644 --- a/Sources/BoringSSL/include/openssl/base64.h +++ b/Sources/BoringSSL/include/openssl/base64.h @@ -64,124 +64,124 @@ extern "C" { #endif -/* base64 functions. - * - * For historical reasons, these functions have the EVP_ prefix but just do - * base64 encoding and decoding. */ +// base64 functions. +// +// For historical reasons, these functions have the EVP_ prefix but just do +// base64 encoding and decoding. -/* Encoding */ +// Encoding -/* EVP_EncodeBlock encodes |src_len| bytes from |src| and writes the - * result to |dst| with a trailing NUL. It returns the number of bytes - * written, not including this trailing NUL. */ +// EVP_EncodeBlock encodes |src_len| bytes from |src| and writes the +// result to |dst| with a trailing NUL. It returns the number of bytes +// written, not including this trailing NUL. OPENSSL_EXPORT size_t EVP_EncodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len); -/* EVP_EncodedLength sets |*out_len| to the number of bytes that will be needed - * to call |EVP_EncodeBlock| on an input of length |len|. This includes the - * final NUL that |EVP_EncodeBlock| writes. It returns one on success or zero - * on error. */ +// EVP_EncodedLength sets |*out_len| to the number of bytes that will be needed +// to call |EVP_EncodeBlock| on an input of length |len|. This includes the +// final NUL that |EVP_EncodeBlock| writes. It returns one on success or zero +// on error. OPENSSL_EXPORT int EVP_EncodedLength(size_t *out_len, size_t len); -/* Decoding */ +// Decoding -/* EVP_DecodedLength sets |*out_len| to the maximum number of bytes that will - * be needed to call |EVP_DecodeBase64| on an input of length |len|. It returns - * one on success or zero if |len| is not a valid length for a base64-encoded - * string. */ +// EVP_DecodedLength sets |*out_len| to the maximum number of bytes that will +// be needed to call |EVP_DecodeBase64| on an input of length |len|. It returns +// one on success or zero if |len| is not a valid length for a base64-encoded +// string. OPENSSL_EXPORT int EVP_DecodedLength(size_t *out_len, size_t len); -/* EVP_DecodeBase64 decodes |in_len| bytes from base64 and writes - * |*out_len| bytes to |out|. |max_out| is the size of the output - * buffer. If it is not enough for the maximum output size, the - * operation fails. It returns one on success or zero on error. */ +// EVP_DecodeBase64 decodes |in_len| bytes from base64 and writes +// |*out_len| bytes to |out|. |max_out| is the size of the output +// buffer. If it is not enough for the maximum output size, the +// operation fails. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_DecodeBase64(uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); -/* Deprecated functions. - * - * OpenSSL provides a streaming base64 implementation, however its behavior is - * very specific to PEM. It is also very lenient of invalid input. Use of any of - * these functions is thus deprecated. */ +// Deprecated functions. +// +// OpenSSL provides a streaming base64 implementation, however its behavior is +// very specific to PEM. It is also very lenient of invalid input. Use of any of +// these functions is thus deprecated. -/* EVP_EncodeInit initialises |*ctx|, which is typically stack - * allocated, for an encoding operation. - * - * NOTE: The encoding operation breaks its output with newlines every - * 64 characters of output (48 characters of input). Use - * EVP_EncodeBlock to encode raw base64. */ +// EVP_EncodeInit initialises |*ctx|, which is typically stack +// allocated, for an encoding operation. +// +// NOTE: The encoding operation breaks its output with newlines every +// 64 characters of output (48 characters of input). Use +// EVP_EncodeBlock to encode raw base64. OPENSSL_EXPORT void EVP_EncodeInit(EVP_ENCODE_CTX *ctx); -/* EVP_EncodeUpdate encodes |in_len| bytes from |in| and writes an encoded - * version of them to |out| and sets |*out_len| to the number of bytes written. - * Some state may be contained in |ctx| so |EVP_EncodeFinal| must be used to - * flush it before using the encoded data. */ +// EVP_EncodeUpdate encodes |in_len| bytes from |in| and writes an encoded +// version of them to |out| and sets |*out_len| to the number of bytes written. +// Some state may be contained in |ctx| so |EVP_EncodeFinal| must be used to +// flush it before using the encoded data. OPENSSL_EXPORT void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len); -/* EVP_EncodeFinal flushes any remaining output bytes from |ctx| to |out| and - * sets |*out_len| to the number of bytes written. */ +// EVP_EncodeFinal flushes any remaining output bytes from |ctx| to |out| and +// sets |*out_len| to the number of bytes written. OPENSSL_EXPORT void EVP_EncodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len); -/* EVP_DecodeInit initialises |*ctx|, which is typically stack allocated, for - * a decoding operation. - * - * TODO(davidben): This isn't a straight-up base64 decode either. Document - * and/or fix exactly what's going on here; maximum line length and such. */ +// EVP_DecodeInit initialises |*ctx|, which is typically stack allocated, for +// a decoding operation. +// +// TODO(davidben): This isn't a straight-up base64 decode either. Document +// and/or fix exactly what's going on here; maximum line length and such. OPENSSL_EXPORT void EVP_DecodeInit(EVP_ENCODE_CTX *ctx); -/* EVP_DecodeUpdate decodes |in_len| bytes from |in| and writes the decoded - * data to |out| and sets |*out_len| to the number of bytes written. Some state - * may be contained in |ctx| so |EVP_DecodeFinal| must be used to flush it - * before using the encoded data. - * - * It returns -1 on error, one if a full line of input was processed and zero - * if the line was short (i.e. it was the last line). */ +// EVP_DecodeUpdate decodes |in_len| bytes from |in| and writes the decoded +// data to |out| and sets |*out_len| to the number of bytes written. Some state +// may be contained in |ctx| so |EVP_DecodeFinal| must be used to flush it +// before using the encoded data. +// +// It returns -1 on error, one if a full line of input was processed and zero +// if the line was short (i.e. it was the last line). OPENSSL_EXPORT int EVP_DecodeUpdate(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, size_t in_len); -/* EVP_DecodeFinal flushes any remaining output bytes from |ctx| to |out| and - * sets |*out_len| to the number of bytes written. It returns one on success - * and minus one on error. */ +// EVP_DecodeFinal flushes any remaining output bytes from |ctx| to |out| and +// sets |*out_len| to the number of bytes written. It returns one on success +// and minus one on error. OPENSSL_EXPORT int EVP_DecodeFinal(EVP_ENCODE_CTX *ctx, uint8_t *out, int *out_len); -/* EVP_DecodeBlock encodes |src_len| bytes from |src| and writes the result to - * |dst|. It returns the number of bytes written or -1 on error. - * - * WARNING: EVP_DecodeBlock's return value does not take padding into - * account. It also strips leading whitespace and trailing - * whitespace and minuses. */ +// EVP_DecodeBlock encodes |src_len| bytes from |src| and writes the result to +// |dst|. It returns the number of bytes written or -1 on error. +// +// WARNING: EVP_DecodeBlock's return value does not take padding into +// account. It also strips leading whitespace and trailing +// whitespace and minuses. OPENSSL_EXPORT int EVP_DecodeBlock(uint8_t *dst, const uint8_t *src, size_t src_len); struct evp_encode_ctx_st { - /* data_used indicates the number of bytes of |data| that are valid. When - * encoding, |data| will be filled and encoded as a lump. When decoding, only - * the first four bytes of |data| will be used. */ + // data_used indicates the number of bytes of |data| that are valid. When + // encoding, |data| will be filled and encoded as a lump. When decoding, only + // the first four bytes of |data| will be used. unsigned data_used; uint8_t data[48]; - /* eof_seen indicates that the end of the base64 data has been seen when - * decoding. Only whitespace can follow. */ + // eof_seen indicates that the end of the base64 data has been seen when + // decoding. Only whitespace can follow. char eof_seen; - /* error_encountered indicates that invalid base64 data was found. This will - * cause all future calls to fail. */ + // error_encountered indicates that invalid base64 data was found. This will + // cause all future calls to fail. char error_encountered; }; #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_BASE64_H */ +#endif // OPENSSL_HEADER_BASE64_H diff --git a/Sources/BoringSSL/include/openssl/bio.h b/Sources/BoringSSL/include/openssl/bio.h index 6ba1421dd..f87167ea6 100644 --- a/Sources/BoringSSL/include/openssl/bio.h +++ b/Sources/BoringSSL/include/openssl/bio.h @@ -59,10 +59,10 @@ #include -#include /* For FILE */ +#include // For FILE #include -#include /* for ERR_print_errors_fp */ +#include // for ERR_print_errors_fp #include #include #include @@ -72,492 +72,498 @@ extern "C" { #endif -/* BIO abstracts over a file-descriptor like interface. */ +// BIO abstracts over a file-descriptor like interface. -/* Allocation and freeing. */ +// Allocation and freeing. -/* BIO_new creates a new BIO with the given type and a reference count of one. - * It returns the fresh |BIO|, or NULL on error. */ -OPENSSL_EXPORT BIO *BIO_new(const BIO_METHOD *type); +DEFINE_STACK_OF(BIO) -/* BIO_free decrements the reference count of |bio|. If the reference count - * drops to zero, it (optionally) calls the BIO's callback with |BIO_CB_FREE|, - * frees the ex_data and then, if the BIO has a destroy callback for the - * method, calls it. Finally it frees |bio| itself. It then repeats that for - * the next BIO in the chain, if any. - * - * It returns one on success or zero otherwise. */ +// BIO_new creates a new BIO with the given method and a reference count of one. +// It returns the fresh |BIO|, or NULL on error. +OPENSSL_EXPORT BIO *BIO_new(const BIO_METHOD *method); + +// BIO_free decrements the reference count of |bio|. If the reference count +// drops to zero, it calls the destroy callback, if present, on the method and +// frees |bio| itself. It then repeats that for the next BIO in the chain, if +// any. +// +// It returns one on success or zero otherwise. OPENSSL_EXPORT int BIO_free(BIO *bio); -/* BIO_vfree performs the same actions as |BIO_free|, but has a void return - * value. This is provided for API-compat. - * - * TODO(fork): remove. */ +// BIO_vfree performs the same actions as |BIO_free|, but has a void return +// value. This is provided for API-compat. +// +// TODO(fork): remove. OPENSSL_EXPORT void BIO_vfree(BIO *bio); -/* BIO_up_ref increments the reference count of |bio| and returns one. */ +// BIO_up_ref increments the reference count of |bio| and returns one. OPENSSL_EXPORT int BIO_up_ref(BIO *bio); -/* Basic I/O. */ +// Basic I/O. -/* BIO_read attempts to read |len| bytes into |data|. It returns the number of - * bytes read, zero on EOF, or a negative number on error. */ +// BIO_read attempts to read |len| bytes into |data|. It returns the number of +// bytes read, zero on EOF, or a negative number on error. OPENSSL_EXPORT int BIO_read(BIO *bio, void *data, int len); -/* BIO_gets "reads a line" from |bio| and puts at most |size| bytes into |buf|. - * It returns the number of bytes read or a negative number on error. The - * phrase "reads a line" is in quotes in the previous sentence because the - * exact operation depends on the BIO's method. For example, a digest BIO will - * return the digest in response to a |BIO_gets| call. - * - * TODO(fork): audit the set of BIOs that we end up needing. If all actually - * return a line for this call, remove the warning above. */ +// BIO_gets "reads a line" from |bio| and puts at most |size| bytes into |buf|. +// It returns the number of bytes read or a negative number on error. The +// phrase "reads a line" is in quotes in the previous sentence because the +// exact operation depends on the BIO's method. For example, a digest BIO will +// return the digest in response to a |BIO_gets| call. +// +// TODO(fork): audit the set of BIOs that we end up needing. If all actually +// return a line for this call, remove the warning above. OPENSSL_EXPORT int BIO_gets(BIO *bio, char *buf, int size); -/* BIO_write writes |len| bytes from |data| to BIO. It returns the number of - * bytes written or a negative number on error. */ +// BIO_write writes |len| bytes from |data| to BIO. It returns the number of +// bytes written or a negative number on error. OPENSSL_EXPORT int BIO_write(BIO *bio, const void *data, int len); -/* BIO_puts writes a NUL terminated string from |buf| to |bio|. It returns the - * number of bytes written or a negative number on error. */ +// BIO_puts writes a NUL terminated string from |buf| to |bio|. It returns the +// number of bytes written or a negative number on error. OPENSSL_EXPORT int BIO_puts(BIO *bio, const char *buf); -/* BIO_flush flushes any buffered output. It returns one on success and zero - * otherwise. */ +// BIO_flush flushes any buffered output. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int BIO_flush(BIO *bio); -/* Low-level control functions. - * - * These are generic functions for sending control requests to a BIO. In - * general one should use the wrapper functions like |BIO_get_close|. */ +// Low-level control functions. +// +// These are generic functions for sending control requests to a BIO. In +// general one should use the wrapper functions like |BIO_get_close|. -/* BIO_ctrl sends the control request |cmd| to |bio|. The |cmd| argument should - * be one of the |BIO_C_*| values. */ +// BIO_ctrl sends the control request |cmd| to |bio|. The |cmd| argument should +// be one of the |BIO_C_*| values. OPENSSL_EXPORT long BIO_ctrl(BIO *bio, int cmd, long larg, void *parg); -/* BIO_ptr_ctrl acts like |BIO_ctrl| but passes the address of a |void*| - * pointer as |parg| and returns the value that is written to it, or NULL if - * the control request returns <= 0. */ +// BIO_ptr_ctrl acts like |BIO_ctrl| but passes the address of a |void*| +// pointer as |parg| and returns the value that is written to it, or NULL if +// the control request returns <= 0. OPENSSL_EXPORT char *BIO_ptr_ctrl(BIO *bp, int cmd, long larg); -/* BIO_int_ctrl acts like |BIO_ctrl| but passes the address of a copy of |iarg| - * as |parg|. */ +// BIO_int_ctrl acts like |BIO_ctrl| but passes the address of a copy of |iarg| +// as |parg|. OPENSSL_EXPORT long BIO_int_ctrl(BIO *bp, int cmd, long larg, int iarg); -/* BIO_reset resets |bio| to its initial state, the precise meaning of which - * depends on the concrete type of |bio|. It returns one on success and zero - * otherwise. */ +// BIO_reset resets |bio| to its initial state, the precise meaning of which +// depends on the concrete type of |bio|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int BIO_reset(BIO *bio); -/* BIO_eof returns non-zero when |bio| has reached end-of-file. The precise - * meaning of which depends on the concrete type of |bio|. Note that in the - * case of BIO_pair this always returns non-zero. */ +// BIO_eof returns non-zero when |bio| has reached end-of-file. The precise +// meaning of which depends on the concrete type of |bio|. Note that in the +// case of BIO_pair this always returns non-zero. OPENSSL_EXPORT int BIO_eof(BIO *bio); -/* BIO_set_flags ORs |flags| with |bio->flags|. */ +// BIO_set_flags ORs |flags| with |bio->flags|. OPENSSL_EXPORT void BIO_set_flags(BIO *bio, int flags); -/* BIO_test_flags returns |bio->flags| AND |flags|. */ +// BIO_test_flags returns |bio->flags| AND |flags|. OPENSSL_EXPORT int BIO_test_flags(const BIO *bio, int flags); -/* BIO_should_read returns non-zero if |bio| encountered a temporary error - * while reading (i.e. EAGAIN), indicating that the caller should retry the - * read. */ +// BIO_should_read returns non-zero if |bio| encountered a temporary error +// while reading (i.e. EAGAIN), indicating that the caller should retry the +// read. OPENSSL_EXPORT int BIO_should_read(const BIO *bio); -/* BIO_should_write returns non-zero if |bio| encountered a temporary error - * while writing (i.e. EAGAIN), indicating that the caller should retry the - * write. */ +// BIO_should_write returns non-zero if |bio| encountered a temporary error +// while writing (i.e. EAGAIN), indicating that the caller should retry the +// write. OPENSSL_EXPORT int BIO_should_write(const BIO *bio); -/* BIO_should_retry returns non-zero if the reason that caused a failed I/O - * operation is temporary and thus the operation should be retried. Otherwise, - * it was a permanent error and it returns zero. */ +// BIO_should_retry returns non-zero if the reason that caused a failed I/O +// operation is temporary and thus the operation should be retried. Otherwise, +// it was a permanent error and it returns zero. OPENSSL_EXPORT int BIO_should_retry(const BIO *bio); -/* BIO_should_io_special returns non-zero if |bio| encountered a temporary - * error while performing a special I/O operation, indicating that the caller - * should retry. The operation that caused the error is returned by - * |BIO_get_retry_reason|. */ +// BIO_should_io_special returns non-zero if |bio| encountered a temporary +// error while performing a special I/O operation, indicating that the caller +// should retry. The operation that caused the error is returned by +// |BIO_get_retry_reason|. OPENSSL_EXPORT int BIO_should_io_special(const BIO *bio); -/* BIO_RR_CONNECT indicates that a connect would have blocked */ +// BIO_RR_CONNECT indicates that a connect would have blocked #define BIO_RR_CONNECT 0x02 -/* BIO_RR_ACCEPT indicates that an accept would have blocked */ +// BIO_RR_ACCEPT indicates that an accept would have blocked #define BIO_RR_ACCEPT 0x03 -/* BIO_get_retry_reason returns the special I/O operation that needs to be - * retried. The return value is one of the |BIO_RR_*| values. */ +// BIO_get_retry_reason returns the special I/O operation that needs to be +// retried. The return value is one of the |BIO_RR_*| values. OPENSSL_EXPORT int BIO_get_retry_reason(const BIO *bio); -/* BIO_clear_flags ANDs |bio->flags| with the bitwise-complement of |flags|. */ +// BIO_clear_flags ANDs |bio->flags| with the bitwise-complement of |flags|. OPENSSL_EXPORT void BIO_clear_flags(BIO *bio, int flags); -/* BIO_set_retry_read sets the |BIO_FLAGS_READ| and |BIO_FLAGS_SHOULD_RETRY| - * flags on |bio|. */ +// BIO_set_retry_read sets the |BIO_FLAGS_READ| and |BIO_FLAGS_SHOULD_RETRY| +// flags on |bio|. OPENSSL_EXPORT void BIO_set_retry_read(BIO *bio); -/* BIO_set_retry_write sets the |BIO_FLAGS_WRITE| and |BIO_FLAGS_SHOULD_RETRY| - * flags on |bio|. */ +// BIO_set_retry_write sets the |BIO_FLAGS_WRITE| and |BIO_FLAGS_SHOULD_RETRY| +// flags on |bio|. OPENSSL_EXPORT void BIO_set_retry_write(BIO *bio); -/* BIO_get_retry_flags gets the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, - * |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. */ +// BIO_get_retry_flags gets the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, +// |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. OPENSSL_EXPORT int BIO_get_retry_flags(BIO *bio); -/* BIO_clear_retry_flags clears the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, - * |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. */ +// BIO_clear_retry_flags clears the |BIO_FLAGS_READ|, |BIO_FLAGS_WRITE|, +// |BIO_FLAGS_IO_SPECIAL| and |BIO_FLAGS_SHOULD_RETRY| flags from |bio|. OPENSSL_EXPORT void BIO_clear_retry_flags(BIO *bio); -/* BIO_method_type returns the type of |bio|, which is one of the |BIO_TYPE_*| - * values. */ +// BIO_method_type returns the type of |bio|, which is one of the |BIO_TYPE_*| +// values. OPENSSL_EXPORT int BIO_method_type(const BIO *bio); -/* bio_info_cb is the type of a callback function that can be called for most - * BIO operations. The |event| argument is one of |BIO_CB_*| and can be ORed - * with |BIO_CB_RETURN| if the callback is being made after the operation in - * question. In that case, |return_value| will contain the return value from - * the operation. */ +// These are passed to the BIO callback +#define BIO_CB_FREE 0x01 +#define BIO_CB_READ 0x02 +#define BIO_CB_WRITE 0x03 +#define BIO_CB_PUTS 0x04 +#define BIO_CB_GETS 0x05 +#define BIO_CB_CTRL 0x06 + +// The callback is called before and after the underling operation, +// The BIO_CB_RETURN flag indicates if it is after the call +#define BIO_CB_RETURN 0x80 + +// bio_info_cb is the type of a callback function that can be called for most +// BIO operations. The |event| argument is one of |BIO_CB_*| and can be ORed +// with |BIO_CB_RETURN| if the callback is being made after the operation in +// question. In that case, |return_value| will contain the return value from +// the operation. typedef long (*bio_info_cb)(BIO *bio, int event, const char *parg, int cmd, long larg, long return_value); -/* BIO_callback_ctrl allows the callback function to be manipulated. The |cmd| - * arg will generally be |BIO_CTRL_SET_CALLBACK| but arbitrary command values - * can be interpreted by the |BIO|. */ +// BIO_callback_ctrl allows the callback function to be manipulated. The |cmd| +// arg will generally be |BIO_CTRL_SET_CALLBACK| but arbitrary command values +// can be interpreted by the |BIO|. OPENSSL_EXPORT long BIO_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp); -/* BIO_pending returns the number of bytes pending to be read. */ +// BIO_pending returns the number of bytes pending to be read. OPENSSL_EXPORT size_t BIO_pending(const BIO *bio); -/* BIO_ctrl_pending calls |BIO_pending| and exists only for compatibility with - * OpenSSL. */ +// BIO_ctrl_pending calls |BIO_pending| and exists only for compatibility with +// OpenSSL. OPENSSL_EXPORT size_t BIO_ctrl_pending(const BIO *bio); -/* BIO_wpending returns the number of bytes pending to be written. */ +// BIO_wpending returns the number of bytes pending to be written. OPENSSL_EXPORT size_t BIO_wpending(const BIO *bio); -/* BIO_set_close sets the close flag for |bio|. The meaning of which depends on - * the type of |bio| but, for example, a memory BIO interprets the close flag - * as meaning that it owns its buffer. It returns one on success and zero - * otherwise. */ +// BIO_set_close sets the close flag for |bio|. The meaning of which depends on +// the type of |bio| but, for example, a memory BIO interprets the close flag +// as meaning that it owns its buffer. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int BIO_set_close(BIO *bio, int close_flag); -/* BIO_set_callback sets a callback function that will be called before and - * after most operations. See the comment above |bio_info_cb|. */ -OPENSSL_EXPORT void BIO_set_callback(BIO *bio, bio_info_cb callback_func); - -/* BIO_set_callback_arg sets the opaque pointer value that can be read within a - * callback with |BIO_get_callback_arg|. */ -OPENSSL_EXPORT void BIO_set_callback_arg(BIO *bio, char *arg); - -/* BIO_get_callback_arg returns the last value of the opaque callback pointer - * set by |BIO_set_callback_arg|. */ -OPENSSL_EXPORT char *BIO_get_callback_arg(const BIO *bio); - -/* BIO_number_read returns the number of bytes that have been read from - * |bio|. */ +// BIO_number_read returns the number of bytes that have been read from +// |bio|. OPENSSL_EXPORT size_t BIO_number_read(const BIO *bio); -/* BIO_number_written returns the number of bytes that have been written to - * |bio|. */ +// BIO_number_written returns the number of bytes that have been written to +// |bio|. OPENSSL_EXPORT size_t BIO_number_written(const BIO *bio); -/* Managing chains of BIOs. - * - * BIOs can be put into chains where the output of one is used as the input of - * the next etc. The most common case is a buffering BIO, which accepts and - * buffers writes until flushed into the next BIO in the chain. */ +// Managing chains of BIOs. +// +// BIOs can be put into chains where the output of one is used as the input of +// the next etc. The most common case is a buffering BIO, which accepts and +// buffers writes until flushed into the next BIO in the chain. -/* BIO_push adds |appended_bio| to the end of the chain with |bio| at the head. - * It returns |bio|. Note that |appended_bio| may be the head of a chain itself - * and thus this function can be used to join two chains. - * - * BIO_push takes ownership of the caller's reference to |appended_bio|. */ +// BIO_push adds |appended_bio| to the end of the chain with |bio| at the head. +// It returns |bio|. Note that |appended_bio| may be the head of a chain itself +// and thus this function can be used to join two chains. +// +// BIO_push takes ownership of the caller's reference to |appended_bio|. OPENSSL_EXPORT BIO *BIO_push(BIO *bio, BIO *appended_bio); -/* BIO_pop removes |bio| from the head of a chain and returns the next BIO in - * the chain, or NULL if there is no next BIO. - * - * The caller takes ownership of the chain's reference to |bio|. */ +// BIO_pop removes |bio| from the head of a chain and returns the next BIO in +// the chain, or NULL if there is no next BIO. +// +// The caller takes ownership of the chain's reference to |bio|. OPENSSL_EXPORT BIO *BIO_pop(BIO *bio); -/* BIO_next returns the next BIO in the chain after |bio|, or NULL if there is - * no such BIO. */ +// BIO_next returns the next BIO in the chain after |bio|, or NULL if there is +// no such BIO. OPENSSL_EXPORT BIO *BIO_next(BIO *bio); -/* BIO_free_all calls |BIO_free|. - * - * TODO(fork): update callers and remove. */ +// BIO_free_all calls |BIO_free|. +// +// TODO(fork): update callers and remove. OPENSSL_EXPORT void BIO_free_all(BIO *bio); -/* BIO_find_type walks a chain of BIOs and returns the first that matches - * |type|, which is one of the |BIO_TYPE_*| values. */ +// BIO_find_type walks a chain of BIOs and returns the first that matches +// |type|, which is one of the |BIO_TYPE_*| values. OPENSSL_EXPORT BIO *BIO_find_type(BIO *bio, int type); -/* BIO_copy_next_retry sets the retry flags and |retry_reason| of |bio| from - * the next BIO in the chain. */ +// BIO_copy_next_retry sets the retry flags and |retry_reason| of |bio| from +// the next BIO in the chain. OPENSSL_EXPORT void BIO_copy_next_retry(BIO *bio); -/* Printf functions. */ +// Printf functions. -/* BIO_printf behaves like |printf| but outputs to |bio| rather than a |FILE|. - * It returns the number of bytes written or a negative number on error. */ +// BIO_printf behaves like |printf| but outputs to |bio| rather than a |FILE|. +// It returns the number of bytes written or a negative number on error. OPENSSL_EXPORT int BIO_printf(BIO *bio, const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(2, 3); -/* Utility functions. */ +// Utility functions. -/* BIO_indent prints min(|indent|, |max_indent|) spaces. It returns one on - * success and zero otherwise. */ +// BIO_indent prints min(|indent|, |max_indent|) spaces. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int BIO_indent(BIO *bio, unsigned indent, unsigned max_indent); -/* BIO_hexdump writes a hex dump of |data| to |bio|. Each line will be indented - * by |indent| spaces. */ +// BIO_hexdump writes a hex dump of |data| to |bio|. Each line will be indented +// by |indent| spaces. OPENSSL_EXPORT int BIO_hexdump(BIO *bio, const uint8_t *data, size_t len, unsigned indent); -/* ERR_print_errors prints the current contents of the error stack to |bio| - * using human readable strings where possible. */ +// ERR_print_errors prints the current contents of the error stack to |bio| +// using human readable strings where possible. OPENSSL_EXPORT void ERR_print_errors(BIO *bio); -/* BIO_read_asn1 reads a single ASN.1 object from |bio|. If successful it sets - * |*out| to be an allocated buffer (that should be freed with |OPENSSL_free|), - * |*out_size| to the length, in bytes, of that buffer and returns one. - * Otherwise it returns zero. - * - * If the length of the object is greater than |max_len| or 2^32 then the - * function will fail. Long-form tags are not supported. If the length of the - * object is indefinite the full contents of |bio| are read, unless it would be - * greater than |max_len|, in which case the function fails. - * - * If the function fails then some unknown amount of data may have been read - * from |bio|. */ +// BIO_read_asn1 reads a single ASN.1 object from |bio|. If successful it sets +// |*out| to be an allocated buffer (that should be freed with |OPENSSL_free|), +// |*out_size| to the length, in bytes, of that buffer and returns one. +// Otherwise it returns zero. +// +// If the length of the object is greater than |max_len| or 2^32 then the +// function will fail. Long-form tags are not supported. If the length of the +// object is indefinite the full contents of |bio| are read, unless it would be +// greater than |max_len|, in which case the function fails. +// +// If the function fails then some unknown amount of data may have been read +// from |bio|. OPENSSL_EXPORT int BIO_read_asn1(BIO *bio, uint8_t **out, size_t *out_len, size_t max_len); -/* Memory BIOs. - * - * Memory BIOs can be used as a read-only source (with |BIO_new_mem_buf|) or a - * writable sink (with |BIO_new|, |BIO_s_mem| and |BIO_get_mem_buf|). Data - * written to a writable, memory BIO can be recalled by reading from it. - * - * Calling |BIO_reset| on a read-only BIO resets it to the original contents. - * On a writable BIO, it clears any data. - * - * If the close flag is set to |BIO_NOCLOSE| (not the default) then the - * underlying |BUF_MEM| will not be freed when the |BIO| is freed. - * - * Memory BIOs support |BIO_gets| and |BIO_puts|. - * - * |BIO_ctrl_pending| returns the number of bytes currently stored. */ +// Memory BIOs. +// +// Memory BIOs can be used as a read-only source (with |BIO_new_mem_buf|) or a +// writable sink (with |BIO_new|, |BIO_s_mem| and |BIO_get_mem_buf|). Data +// written to a writable, memory BIO can be recalled by reading from it. +// +// Calling |BIO_reset| on a read-only BIO resets it to the original contents. +// On a writable BIO, it clears any data. +// +// If the close flag is set to |BIO_NOCLOSE| (not the default) then the +// underlying |BUF_MEM| will not be freed when the |BIO| is freed. +// +// Memory BIOs support |BIO_gets| and |BIO_puts|. +// +// |BIO_ctrl_pending| returns the number of bytes currently stored. + +// BIO_NOCLOSE and |BIO_CLOSE| can be used as symbolic arguments when a "close +// flag" is passed to a BIO function. +#define BIO_NOCLOSE 0 +#define BIO_CLOSE 1 -/* BIO_s_mem returns a |BIO_METHOD| that uses a in-memory buffer. */ +// BIO_s_mem returns a |BIO_METHOD| that uses a in-memory buffer. OPENSSL_EXPORT const BIO_METHOD *BIO_s_mem(void); -/* BIO_new_mem_buf creates read-only BIO that reads from |len| bytes at |buf|. - * It does not take ownership of |buf|. It returns the BIO or NULL on error. - * - * If |len| is negative, then |buf| is treated as a NUL-terminated string, but - * don't depend on this in new code. */ +// BIO_new_mem_buf creates read-only BIO that reads from |len| bytes at |buf|. +// It does not take ownership of |buf|. It returns the BIO or NULL on error. +// +// If |len| is negative, then |buf| is treated as a NUL-terminated string, but +// don't depend on this in new code. OPENSSL_EXPORT BIO *BIO_new_mem_buf(const void *buf, int len); -/* BIO_mem_contents sets |*out_contents| to point to the current contents of - * |bio| and |*out_len| to contain the length of that data. It returns one on - * success and zero otherwise. */ +// BIO_mem_contents sets |*out_contents| to point to the current contents of +// |bio| and |*out_len| to contain the length of that data. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int BIO_mem_contents(const BIO *bio, const uint8_t **out_contents, size_t *out_len); -/* BIO_get_mem_data sets |*contents| to point to the current contents of |bio| - * and returns the length of the data. - * - * WARNING: don't use this, use |BIO_mem_contents|. A return value of zero from - * this function can mean either that it failed or that the memory buffer is - * empty. */ +// BIO_get_mem_data sets |*contents| to point to the current contents of |bio| +// and returns the length of the data. +// +// WARNING: don't use this, use |BIO_mem_contents|. A return value of zero from +// this function can mean either that it failed or that the memory buffer is +// empty. OPENSSL_EXPORT long BIO_get_mem_data(BIO *bio, char **contents); -/* BIO_get_mem_ptr sets |*out| to a BUF_MEM containing the current contents of - * |bio|. It returns one on success or zero on error. */ +// BIO_get_mem_ptr sets |*out| to a BUF_MEM containing the current contents of +// |bio|. It returns one on success or zero on error. OPENSSL_EXPORT int BIO_get_mem_ptr(BIO *bio, BUF_MEM **out); -/* BIO_set_mem_buf sets |b| as the contents of |bio|. If |take_ownership| is - * non-zero, then |b| will be freed when |bio| is closed. Returns one on - * success or zero otherwise. */ +// BIO_set_mem_buf sets |b| as the contents of |bio|. If |take_ownership| is +// non-zero, then |b| will be freed when |bio| is closed. Returns one on +// success or zero otherwise. OPENSSL_EXPORT int BIO_set_mem_buf(BIO *bio, BUF_MEM *b, int take_ownership); -/* BIO_set_mem_eof_return sets the value that will be returned from reading - * |bio| when empty. If |eof_value| is zero then an empty memory BIO will - * return EOF (that is it will return zero and |BIO_should_retry| will be - * false). If |eof_value| is non zero then it will return |eof_value| when it - * is empty and it will set the read retry flag (that is |BIO_read_retry| is - * true). To avoid ambiguity with a normal positive return value, |eof_value| - * should be set to a negative value, typically -1. - * - * For a read-only BIO, the default is zero (EOF). For a writable BIO, the - * default is -1 so that additional data can be written once exhausted. */ +// BIO_set_mem_eof_return sets the value that will be returned from reading +// |bio| when empty. If |eof_value| is zero then an empty memory BIO will +// return EOF (that is it will return zero and |BIO_should_retry| will be +// false). If |eof_value| is non zero then it will return |eof_value| when it +// is empty and it will set the read retry flag (that is |BIO_read_retry| is +// true). To avoid ambiguity with a normal positive return value, |eof_value| +// should be set to a negative value, typically -1. +// +// For a read-only BIO, the default is zero (EOF). For a writable BIO, the +// default is -1 so that additional data can be written once exhausted. OPENSSL_EXPORT int BIO_set_mem_eof_return(BIO *bio, int eof_value); -/* File descriptor BIOs. - * - * File descriptor BIOs are wrappers around the system's |read| and |write| - * functions. If the close flag is set then then |close| is called on the - * underlying file descriptor when the BIO is freed. - * - * |BIO_reset| attempts to seek the file pointer to the start of file using - * |lseek|. */ +// File descriptor BIOs. +// +// File descriptor BIOs are wrappers around the system's |read| and |write| +// functions. If the close flag is set then then |close| is called on the +// underlying file descriptor when the BIO is freed. +// +// |BIO_reset| attempts to seek the file pointer to the start of file using +// |lseek|. -/* BIO_s_fd returns a |BIO_METHOD| for file descriptor fds. */ +// BIO_s_fd returns a |BIO_METHOD| for file descriptor fds. OPENSSL_EXPORT const BIO_METHOD *BIO_s_fd(void); -/* BIO_new_fd creates a new file descriptor BIO wrapping |fd|. If |close_flag| - * is non-zero, then |fd| will be closed when the BIO is. */ +// BIO_new_fd creates a new file descriptor BIO wrapping |fd|. If |close_flag| +// is non-zero, then |fd| will be closed when the BIO is. OPENSSL_EXPORT BIO *BIO_new_fd(int fd, int close_flag); -/* BIO_set_fd sets the file descriptor of |bio| to |fd|. If |close_flag| is - * non-zero then |fd| will be closed when |bio| is. It returns one on success - * or zero on error. - * - * This function may also be used with socket BIOs (see |BIO_s_socket| and - * |BIO_new_socket|). */ +// BIO_set_fd sets the file descriptor of |bio| to |fd|. If |close_flag| is +// non-zero then |fd| will be closed when |bio| is. It returns one on success +// or zero on error. +// +// This function may also be used with socket BIOs (see |BIO_s_socket| and +// |BIO_new_socket|). OPENSSL_EXPORT int BIO_set_fd(BIO *bio, int fd, int close_flag); -/* BIO_get_fd returns the file descriptor currently in use by |bio| or -1 if - * |bio| does not wrap a file descriptor. If there is a file descriptor and - * |out_fd| is not NULL, it also sets |*out_fd| to the file descriptor. - * - * This function may also be used with socket BIOs (see |BIO_s_socket| and - * |BIO_new_socket|). */ +// BIO_get_fd returns the file descriptor currently in use by |bio| or -1 if +// |bio| does not wrap a file descriptor. If there is a file descriptor and +// |out_fd| is not NULL, it also sets |*out_fd| to the file descriptor. +// +// This function may also be used with socket BIOs (see |BIO_s_socket| and +// |BIO_new_socket|). OPENSSL_EXPORT int BIO_get_fd(BIO *bio, int *out_fd); -/* File BIOs. - * - * File BIOs are wrappers around a C |FILE| object. - * - * |BIO_flush| on a file BIO calls |fflush| on the wrapped stream. - * - * |BIO_reset| attempts to seek the file pointer to the start of file using - * |fseek|. - * - * Setting the close flag causes |fclose| to be called on the stream when the - * BIO is freed. */ +// File BIOs. +// +// File BIOs are wrappers around a C |FILE| object. +// +// |BIO_flush| on a file BIO calls |fflush| on the wrapped stream. +// +// |BIO_reset| attempts to seek the file pointer to the start of file using +// |fseek|. +// +// Setting the close flag causes |fclose| to be called on the stream when the +// BIO is freed. -/* BIO_s_file returns a BIO_METHOD that wraps a |FILE|. */ +// BIO_s_file returns a BIO_METHOD that wraps a |FILE|. OPENSSL_EXPORT const BIO_METHOD *BIO_s_file(void); -/* BIO_new_file creates a file BIO by opening |filename| with the given mode. - * See the |fopen| manual page for details of the mode argument. */ +// BIO_new_file creates a file BIO by opening |filename| with the given mode. +// See the |fopen| manual page for details of the mode argument. OPENSSL_EXPORT BIO *BIO_new_file(const char *filename, const char *mode); -/* BIO_new_fp creates a new file BIO that wraps the given |FILE|. If - * |close_flag| is |BIO_CLOSE|, then |fclose| will be called on |stream| when - * the BIO is closed. */ +// BIO_new_fp creates a new file BIO that wraps the given |FILE|. If +// |close_flag| is |BIO_CLOSE|, then |fclose| will be called on |stream| when +// the BIO is closed. OPENSSL_EXPORT BIO *BIO_new_fp(FILE *stream, int close_flag); -/* BIO_get_fp sets |*out_file| to the current |FILE| for |bio|. It returns one - * on success and zero otherwise. */ +// BIO_get_fp sets |*out_file| to the current |FILE| for |bio|. It returns one +// on success and zero otherwise. OPENSSL_EXPORT int BIO_get_fp(BIO *bio, FILE **out_file); -/* BIO_set_fp sets the |FILE| for |bio|. If |close_flag| is |BIO_CLOSE| then - * |fclose| will be called on |file| when |bio| is closed. It returns one on - * success and zero otherwise. */ +// BIO_set_fp sets the |FILE| for |bio|. If |close_flag| is |BIO_CLOSE| then +// |fclose| will be called on |file| when |bio| is closed. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int BIO_set_fp(BIO *bio, FILE *file, int close_flag); -/* BIO_read_filename opens |filename| for reading and sets the result as the - * |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| - * will be closed when |bio| is freed. */ +// BIO_read_filename opens |filename| for reading and sets the result as the +// |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| +// will be closed when |bio| is freed. OPENSSL_EXPORT int BIO_read_filename(BIO *bio, const char *filename); -/* BIO_write_filename opens |filename| for writing and sets the result as the - * |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| - * will be closed when |bio| is freed. */ +// BIO_write_filename opens |filename| for writing and sets the result as the +// |FILE| for |bio|. It returns one on success and zero otherwise. The |FILE| +// will be closed when |bio| is freed. OPENSSL_EXPORT int BIO_write_filename(BIO *bio, const char *filename); -/* BIO_append_filename opens |filename| for appending and sets the result as - * the |FILE| for |bio|. It returns one on success and zero otherwise. The - * |FILE| will be closed when |bio| is freed. */ +// BIO_append_filename opens |filename| for appending and sets the result as +// the |FILE| for |bio|. It returns one on success and zero otherwise. The +// |FILE| will be closed when |bio| is freed. OPENSSL_EXPORT int BIO_append_filename(BIO *bio, const char *filename); -/* BIO_rw_filename opens |filename| for reading and writing and sets the result - * as the |FILE| for |bio|. It returns one on success and zero otherwise. The - * |FILE| will be closed when |bio| is freed. */ +// BIO_rw_filename opens |filename| for reading and writing and sets the result +// as the |FILE| for |bio|. It returns one on success and zero otherwise. The +// |FILE| will be closed when |bio| is freed. OPENSSL_EXPORT int BIO_rw_filename(BIO *bio, const char *filename); -/* Socket BIOs. - * - * Socket BIOs behave like file descriptor BIOs but, on Windows systems, wrap - * the system's |recv| and |send| functions instead of |read| and |write|. On - * Windows, file descriptors are provided by C runtime and are not - * interchangeable with sockets. - * - * Socket BIOs may be used with |BIO_set_fd| and |BIO_get_fd|. - * - * TODO(davidben): Add separate APIs and fix the internals to use |SOCKET|s - * around rather than rely on int casts. */ +// Socket BIOs. +// +// Socket BIOs behave like file descriptor BIOs but, on Windows systems, wrap +// the system's |recv| and |send| functions instead of |read| and |write|. On +// Windows, file descriptors are provided by C runtime and are not +// interchangeable with sockets. +// +// Socket BIOs may be used with |BIO_set_fd| and |BIO_get_fd|. +// +// TODO(davidben): Add separate APIs and fix the internals to use |SOCKET|s +// around rather than rely on int casts. OPENSSL_EXPORT const BIO_METHOD *BIO_s_socket(void); -/* BIO_new_socket allocates and initialises a fresh BIO which will read and - * write to the socket |fd|. If |close_flag| is |BIO_CLOSE| then closing the - * BIO will close |fd|. It returns the fresh |BIO| or NULL on error. */ +// BIO_new_socket allocates and initialises a fresh BIO which will read and +// write to the socket |fd|. If |close_flag| is |BIO_CLOSE| then closing the +// BIO will close |fd|. It returns the fresh |BIO| or NULL on error. OPENSSL_EXPORT BIO *BIO_new_socket(int fd, int close_flag); -/* Connect BIOs. - * - * A connection BIO creates a network connection and transfers data over the - * resulting socket. */ +// Connect BIOs. +// +// A connection BIO creates a network connection and transfers data over the +// resulting socket. OPENSSL_EXPORT const BIO_METHOD *BIO_s_connect(void); -/* BIO_new_connect returns a BIO that connects to the given hostname and port. - * The |host_and_optional_port| argument should be of the form - * "www.example.com" or "www.example.com:443". If the port is omitted, it must - * be provided with |BIO_set_conn_port|. - * - * It returns the new BIO on success, or NULL on error. */ +// BIO_new_connect returns a BIO that connects to the given hostname and port. +// The |host_and_optional_port| argument should be of the form +// "www.example.com" or "www.example.com:443". If the port is omitted, it must +// be provided with |BIO_set_conn_port|. +// +// It returns the new BIO on success, or NULL on error. OPENSSL_EXPORT BIO *BIO_new_connect(const char *host_and_optional_port); -/* BIO_set_conn_hostname sets |host_and_optional_port| as the hostname and - * optional port that |bio| will connect to. If the port is omitted, it must be - * provided with |BIO_set_conn_port|. - * - * It returns one on success and zero otherwise. */ +// BIO_set_conn_hostname sets |host_and_optional_port| as the hostname and +// optional port that |bio| will connect to. If the port is omitted, it must be +// provided with |BIO_set_conn_port|. +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_hostname(BIO *bio, const char *host_and_optional_port); -/* BIO_set_conn_port sets |port_str| as the port or service name that |bio| - * will connect to. It returns one on success and zero otherwise. */ +// BIO_set_conn_port sets |port_str| as the port or service name that |bio| +// will connect to. It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_port(BIO *bio, const char *port_str); -/* BIO_set_conn_int_port sets |*port| as the port that |bio| will connect to. - * It returns one on success and zero otherwise. */ +// BIO_set_conn_int_port sets |*port| as the port that |bio| will connect to. +// It returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_conn_int_port(BIO *bio, const int *port); -/* BIO_set_nbio sets whether |bio| will use non-blocking I/O operations. It - * returns one on success and zero otherwise. */ +// BIO_set_nbio sets whether |bio| will use non-blocking I/O operations. It +// returns one on success and zero otherwise. OPENSSL_EXPORT int BIO_set_nbio(BIO *bio, int on); -/* BIO_do_connect connects |bio| if it has not been connected yet. It returns - * one on success and <= 0 otherwise. */ +// BIO_do_connect connects |bio| if it has not been connected yet. It returns +// one on success and <= 0 otherwise. OPENSSL_EXPORT int BIO_do_connect(BIO *bio); -/* Datagram BIOs. - * - * TODO(fork): not implemented. */ +// Datagram BIOs. +// +// TODO(fork): not implemented. -#define BIO_CTRL_DGRAM_QUERY_MTU 40 /* as kernel for current MTU */ +#define BIO_CTRL_DGRAM_QUERY_MTU 40 // as kernel for current MTU #define BIO_CTRL_DGRAM_SET_MTU 42 /* set cached value for MTU. want to use this if asking the kernel fails */ @@ -565,101 +571,161 @@ OPENSSL_EXPORT int BIO_do_connect(BIO *bio); #define BIO_CTRL_DGRAM_MTU_EXCEEDED 43 /* check whether the MTU was exceed in the previous write operation. */ -/* BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT sets a read deadline to drive - * retransmits. The |parg| argument to |BIO_ctrl| will be a pointer to a - * |timeval| struct. If the structure is all zeros, it clears the read - * deadline. Otherwise, |BIO_read| must fail with a temporary error - * (e.g. |EAGAIN|) after the deadline. */ -#define BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT 45 +// BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT is unsupported as it is unused by consumers +// and depends on |timeval|, which is not 2038-clean on all platforms. #define BIO_CTRL_DGRAM_GET_PEER 46 #define BIO_CTRL_DGRAM_GET_FALLBACK_MTU 47 -/* BIO Pairs. - * - * BIO pairs provide a "loopback" like system: a pair of BIOs where data - * written to one can be read from the other and vice versa. */ +// BIO Pairs. +// +// BIO pairs provide a "loopback" like system: a pair of BIOs where data +// written to one can be read from the other and vice versa. -/* BIO_new_bio_pair sets |*out1| and |*out2| to two freshly created BIOs where - * data written to one can be read from the other and vice versa. The - * |writebuf1| argument gives the size of the buffer used in |*out1| and - * |writebuf2| for |*out2|. It returns one on success and zero on error. */ +// BIO_new_bio_pair sets |*out1| and |*out2| to two freshly created BIOs where +// data written to one can be read from the other and vice versa. The +// |writebuf1| argument gives the size of the buffer used in |*out1| and +// |writebuf2| for |*out2|. It returns one on success and zero on error. OPENSSL_EXPORT int BIO_new_bio_pair(BIO **out1, size_t writebuf1, BIO **out2, size_t writebuf2); -/* BIO_ctrl_get_read_request returns the number of bytes that the other side of - * |bio| tried (unsuccessfully) to read. */ +// BIO_ctrl_get_read_request returns the number of bytes that the other side of +// |bio| tried (unsuccessfully) to read. OPENSSL_EXPORT size_t BIO_ctrl_get_read_request(BIO *bio); -/* BIO_ctrl_get_write_guarantee returns the number of bytes that |bio| (which - * must have been returned by |BIO_new_bio_pair|) will accept on the next - * |BIO_write| call. */ +// BIO_ctrl_get_write_guarantee returns the number of bytes that |bio| (which +// must have been returned by |BIO_new_bio_pair|) will accept on the next +// |BIO_write| call. OPENSSL_EXPORT size_t BIO_ctrl_get_write_guarantee(BIO *bio); -/* BIO_shutdown_wr marks |bio| as closed, from the point of view of the other - * side of the pair. Future |BIO_write| calls on |bio| will fail. It returns - * one on success and zero otherwise. */ +// BIO_shutdown_wr marks |bio| as closed, from the point of view of the other +// side of the pair. Future |BIO_write| calls on |bio| will fail. It returns +// one on success and zero otherwise. OPENSSL_EXPORT int BIO_shutdown_wr(BIO *bio); -/* BIO_NOCLOSE and |BIO_CLOSE| can be used as symbolic arguments when a "close - * flag" is passed to a BIO function. */ -#define BIO_NOCLOSE 0 -#define BIO_CLOSE 1 - -/* These are passed to the BIO callback */ -#define BIO_CB_FREE 0x01 -#define BIO_CB_READ 0x02 -#define BIO_CB_WRITE 0x03 -#define BIO_CB_PUTS 0x04 -#define BIO_CB_GETS 0x05 -#define BIO_CB_CTRL 0x06 - -/* The callback is called before and after the underling operation, - * The BIO_CB_RETURN flag indicates if it is after the call */ -#define BIO_CB_RETURN 0x80 - -/* These are values of the |cmd| argument to |BIO_ctrl|. */ -#define BIO_CTRL_RESET 1 /* opt - rewind/zero etc */ -#define BIO_CTRL_EOF 2 /* opt - are we at the eof */ -#define BIO_CTRL_INFO 3 /* opt - extra tit-bits */ -#define BIO_CTRL_SET 4 /* man - set the 'IO' type */ -#define BIO_CTRL_GET 5 /* man - get the 'IO' type */ +// Custom BIOs. +// +// Consumers can create custom |BIO|s by filling in a |BIO_METHOD| and using +// low-level control functions to set state. + +// BIO_get_new_index returns a new "type" value for a custom |BIO|. +OPENSSL_EXPORT int BIO_get_new_index(void); + +// BIO_meth_new returns a newly-allocated |BIO_METHOD| or NULL on allocation +// error. The |type| specifies the type that will be returned by +// |BIO_method_type|. If this is unnecessary, this value may be zero. The |name| +// parameter is vestigial and may be NULL. +// +// Use the |BIO_meth_set_*| functions below to initialize the |BIO_METHOD|. The +// function implementations may use |BIO_set_data| and |BIO_get_data| to add +// method-specific state to associated |BIO|s. Additionally, |BIO_set_init| must +// be called after an associated |BIO| is fully initialized. State set via +// |BIO_set_data| may be released by configuring a destructor with +// |BIO_meth_set_destroy|. +OPENSSL_EXPORT BIO_METHOD *BIO_meth_new(int type, const char *name); + +// BIO_meth_free releases memory associated with |method|. +OPENSSL_EXPORT void BIO_meth_free(BIO_METHOD *method); + +// BIO_meth_set_create sets a function to be called on |BIO_new| for |method| +// and returns one. The function should return one on success and zero on +// error. +OPENSSL_EXPORT int BIO_meth_set_create(BIO_METHOD *method, + int (*create)(BIO *)); + +// BIO_meth_set_destroy sets a function to release data associated with a |BIO| +// and returns one. The function's return value is ignored. +OPENSSL_EXPORT int BIO_meth_set_destroy(BIO_METHOD *method, + int (*destroy)(BIO *)); + +// BIO_meth_set_write sets the implementation of |BIO_write| for |method| and +// returns one. |BIO_METHOD|s which implement |BIO_write| should also implement +// |BIO_CTRL_FLUSH|. (See |BIO_meth_set_ctrl|.) +OPENSSL_EXPORT int BIO_meth_set_write(BIO_METHOD *method, + int (*write)(BIO *, const char *, int)); + +// BIO_meth_set_read sets the implementation of |BIO_read| for |method| and +// returns one. +OPENSSL_EXPORT int BIO_meth_set_read(BIO_METHOD *method, + int (*read)(BIO *, char *, int)); + +// BIO_meth_set_gets sets the implementation of |BIO_gets| for |method| and +// returns one. +OPENSSL_EXPORT int BIO_meth_set_gets(BIO_METHOD *method, + int (*gets)(BIO *, char *, int)); + +// BIO_meth_set_ctrl sets the implementation of |BIO_ctrl| for |method| and +// returns one. +OPENSSL_EXPORT int BIO_meth_set_ctrl(BIO_METHOD *method, + long (*ctrl)(BIO *, int, long, void *)); + +// BIO_set_data sets custom data on |bio|. It may be retried with +// |BIO_get_data|. +OPENSSL_EXPORT void BIO_set_data(BIO *bio, void *ptr); + +// BIO_get_data returns custom data on |bio| set by |BIO_get_data|. +OPENSSL_EXPORT void *BIO_get_data(BIO *bio); + +// BIO_set_init sets whether |bio| has been fully initialized. Until fully +// initialized, |BIO_read| and |BIO_write| will fail. +OPENSSL_EXPORT void BIO_set_init(BIO *bio, int init); + +// BIO_get_init returns whether |bio| has been fully initialized. +OPENSSL_EXPORT int BIO_get_init(BIO *bio); + +// These are values of the |cmd| argument to |BIO_ctrl|. +#define BIO_CTRL_RESET 1 // opt - rewind/zero etc +#define BIO_CTRL_EOF 2 // opt - are we at the eof +#define BIO_CTRL_INFO 3 // opt - extra tit-bits +#define BIO_CTRL_SET 4 // man - set the 'IO' type +#define BIO_CTRL_GET 5 // man - get the 'IO' type #define BIO_CTRL_PUSH 6 #define BIO_CTRL_POP 7 -#define BIO_CTRL_GET_CLOSE 8 /* man - set the 'close' on free */ -#define BIO_CTRL_SET_CLOSE 9 /* man - set the 'close' on free */ -#define BIO_CTRL_PENDING 10 /* opt - is their more data buffered */ -#define BIO_CTRL_FLUSH 11 /* opt - 'flush' buffered output */ -#define BIO_CTRL_WPENDING 13 /* opt - number of bytes still to write */ -/* callback is int cb(BIO *bio,state,ret); */ -#define BIO_CTRL_SET_CALLBACK 14 /* opt - set callback function */ -#define BIO_CTRL_GET_CALLBACK 15 /* opt - set callback function */ -#define BIO_CTRL_SET_FILENAME 30 /* BIO_s_file special */ - -/* BIO_CTRL_DUP is never used, but exists to allow code to compile more - * easily. */ +#define BIO_CTRL_GET_CLOSE 8 // man - set the 'close' on free +#define BIO_CTRL_SET_CLOSE 9 // man - set the 'close' on free +#define BIO_CTRL_PENDING 10 // opt - is their more data buffered +#define BIO_CTRL_FLUSH 11 // opt - 'flush' buffered output +#define BIO_CTRL_WPENDING 13 // opt - number of bytes still to write +// callback is int cb(BIO *bio,state,ret); +#define BIO_CTRL_SET_CALLBACK 14 // opt - set callback function +#define BIO_CTRL_GET_CALLBACK 15 // opt - set callback function +#define BIO_CTRL_SET_FILENAME 30 // BIO_s_file special + +// BIO_CTRL_DUP is never used, but exists to allow code to compile more +// easily. #define BIO_CTRL_DUP 12 -/* Deprecated functions. */ +// Deprecated functions. -/* BIO_f_base64 returns a filter |BIO| that base64-encodes data written into - * it, and decodes data read from it. |BIO_gets| is not supported. Call - * |BIO_flush| when done writing, to signal that no more data are to be - * encoded. The flag |BIO_FLAGS_BASE64_NO_NL| may be set to encode all the data - * on one line. */ +// BIO_f_base64 returns a filter |BIO| that base64-encodes data written into +// it, and decodes data read from it. |BIO_gets| is not supported. Call +// |BIO_flush| when done writing, to signal that no more data are to be +// encoded. The flag |BIO_FLAGS_BASE64_NO_NL| may be set to encode all the data +// on one line. OPENSSL_EXPORT const BIO_METHOD *BIO_f_base64(void); OPENSSL_EXPORT void BIO_set_retry_special(BIO *bio); -/* BIO_set_write_buffer_size returns zero. */ +// BIO_set_write_buffer_size returns zero. OPENSSL_EXPORT int BIO_set_write_buffer_size(BIO *bio, int buffer_size); +// BIO_set_shutdown sets a method-specific "shutdown" bit on |bio|. +OPENSSL_EXPORT void BIO_set_shutdown(BIO *bio, int shutdown); -/* Private functions */ +// BIO_get_shutdown returns the method-specific "shutdown" bit. +OPENSSL_EXPORT int BIO_get_shutdown(BIO *bio); + +// BIO_meth_set_puts returns one. |BIO_puts| is implemented with |BIO_write| in +// BoringSSL. +OPENSSL_EXPORT int BIO_meth_set_puts(BIO_METHOD *method, + int (*puts)(BIO *, const char *)); + + +// Private functions #define BIO_FLAGS_READ 0x01 #define BIO_FLAGS_WRITE 0x02 @@ -667,11 +733,11 @@ OPENSSL_EXPORT int BIO_set_write_buffer_size(BIO *bio, int buffer_size); #define BIO_FLAGS_RWS (BIO_FLAGS_READ | BIO_FLAGS_WRITE | BIO_FLAGS_IO_SPECIAL) #define BIO_FLAGS_SHOULD_RETRY 0x08 #define BIO_FLAGS_BASE64_NO_NL 0x100 -/* This is used with memory BIOs: it means we shouldn't free up or change the - * data in any way. */ +// This is used with memory BIOs: it means we shouldn't free up or change the +// data in any way. #define BIO_FLAGS_MEM_RDONLY 0x200 -/* These are the 'types' of BIOs */ +// These are the 'types' of BIOs #define BIO_TYPE_NONE 0 #define BIO_TYPE_MEM (1 | 0x0400) #define BIO_TYPE_FILE (2 | 0x0400) @@ -679,33 +745,39 @@ OPENSSL_EXPORT int BIO_set_write_buffer_size(BIO *bio, int buffer_size); #define BIO_TYPE_SOCKET (5 | 0x0400 | 0x0100) #define BIO_TYPE_NULL (6 | 0x0400) #define BIO_TYPE_SSL (7 | 0x0200) -#define BIO_TYPE_MD (8 | 0x0200) /* passive filter */ -#define BIO_TYPE_BUFFER (9 | 0x0200) /* filter */ -#define BIO_TYPE_CIPHER (10 | 0x0200) /* filter */ -#define BIO_TYPE_BASE64 (11 | 0x0200) /* filter */ -#define BIO_TYPE_CONNECT (12 | 0x0400 | 0x0100) /* socket - connect */ -#define BIO_TYPE_ACCEPT (13 | 0x0400 | 0x0100) /* socket for accept */ -#define BIO_TYPE_PROXY_CLIENT (14 | 0x0200) /* client proxy BIO */ -#define BIO_TYPE_PROXY_SERVER (15 | 0x0200) /* server proxy BIO */ -#define BIO_TYPE_NBIO_TEST (16 | 0x0200) /* server proxy BIO */ +#define BIO_TYPE_MD (8 | 0x0200) // passive filter +#define BIO_TYPE_BUFFER (9 | 0x0200) // filter +#define BIO_TYPE_CIPHER (10 | 0x0200) // filter +#define BIO_TYPE_BASE64 (11 | 0x0200) // filter +#define BIO_TYPE_CONNECT (12 | 0x0400 | 0x0100) // socket - connect +#define BIO_TYPE_ACCEPT (13 | 0x0400 | 0x0100) // socket for accept +#define BIO_TYPE_PROXY_CLIENT (14 | 0x0200) // client proxy BIO +#define BIO_TYPE_PROXY_SERVER (15 | 0x0200) // server proxy BIO +#define BIO_TYPE_NBIO_TEST (16 | 0x0200) // server proxy BIO #define BIO_TYPE_NULL_FILTER (17 | 0x0200) -#define BIO_TYPE_BER (18 | 0x0200) /* BER -> bin filter */ -#define BIO_TYPE_BIO (19 | 0x0400) /* (half a) BIO pair */ -#define BIO_TYPE_LINEBUFFER (20 | 0x0200) /* filter */ +#define BIO_TYPE_BER (18 | 0x0200) // BER -> bin filter +#define BIO_TYPE_BIO (19 | 0x0400) // (half a) BIO pair +#define BIO_TYPE_LINEBUFFER (20 | 0x0200) // filter #define BIO_TYPE_DGRAM (21 | 0x0400 | 0x0100) -#define BIO_TYPE_ASN1 (22 | 0x0200) /* filter */ -#define BIO_TYPE_COMP (23 | 0x0200) /* filter */ +#define BIO_TYPE_ASN1 (22 | 0x0200) // filter +#define BIO_TYPE_COMP (23 | 0x0200) // filter -#define BIO_TYPE_DESCRIPTOR 0x0100 /* socket, fd, connect or accept */ +// |BIO_TYPE_DESCRIPTOR| denotes that the |BIO| responds to the |BIO_C_SET_FD| +// (|BIO_set_fd|) and |BIO_C_GET_FD| (|BIO_get_fd|) control hooks. +#define BIO_TYPE_DESCRIPTOR 0x0100 // socket, fd, connect or accept #define BIO_TYPE_FILTER 0x0200 #define BIO_TYPE_SOURCE_SINK 0x0400 +// BIO_TYPE_START is the first user-allocated |BIO| type. No pre-defined type, +// flag bits aside, may exceed this value. +#define BIO_TYPE_START 128 + struct bio_method_st { int type; const char *name; int (*bwrite)(BIO *, const char *, int); int (*bread)(BIO *, char *, int); - /* TODO(fork): remove bputs. */ + // TODO(fork): remove bputs. int (*bputs)(BIO *, const char *); int (*bgets)(BIO *, char *, int); long (*ctrl)(BIO *, int, long, void *); @@ -716,27 +788,24 @@ struct bio_method_st { struct bio_st { const BIO_METHOD *method; - /* bio, mode, argp, argi, argl, ret */ - long (*callback)(BIO *, int, const char *, int, long, long); - char *cb_arg; /* first argument for the callback */ - /* init is non-zero if this |BIO| has been initialised. */ + // init is non-zero if this |BIO| has been initialised. int init; - /* shutdown is often used by specific |BIO_METHOD|s to determine whether - * they own some underlying resource. This flag can often by controlled by - * |BIO_set_close|. For example, whether an fd BIO closes the underlying fd - * when it, itself, is closed. */ + // shutdown is often used by specific |BIO_METHOD|s to determine whether + // they own some underlying resource. This flag can often by controlled by + // |BIO_set_close|. For example, whether an fd BIO closes the underlying fd + // when it, itself, is closed. int shutdown; int flags; int retry_reason; - /* num is a BIO-specific value. For example, in fd BIOs it's used to store a - * file descriptor. */ + // num is a BIO-specific value. For example, in fd BIOs it's used to store a + // file descriptor. int num; CRYPTO_refcount_t references; void *ptr; - /* next_bio points to the next |BIO| in a chain. This |BIO| owns a reference - * to |next_bio|. */ - BIO *next_bio; /* used by filter BIOs */ + // next_bio points to the next |BIO| in a chain. This |BIO| owns a reference + // to |next_bio|. + BIO *next_bio; // used by filter BIOs size_t num_read, num_write; }; @@ -762,21 +831,21 @@ struct bio_st { #define BIO_C_SSL_MODE 119 #define BIO_C_GET_MD_CTX 120 #define BIO_C_GET_PROXY_PARAM 121 -#define BIO_C_SET_BUFF_READ_DATA 122 /* data to read first */ +#define BIO_C_SET_BUFF_READ_DATA 122 // data to read first #define BIO_C_GET_ACCEPT 124 #define BIO_C_SET_SSL_RENEGOTIATE_BYTES 125 #define BIO_C_GET_SSL_NUM_RENEGOTIATES 126 #define BIO_C_SET_SSL_RENEGOTIATE_TIMEOUT 127 #define BIO_C_FILE_SEEK 128 #define BIO_C_GET_CIPHER_CTX 129 -#define BIO_C_SET_BUF_MEM_EOF_RETURN 130/*return end of input value*/ +#define BIO_C_SET_BUF_MEM_EOF_RETURN 130 //return end of input value #define BIO_C_SET_BIND_MODE 131 #define BIO_C_GET_BIND_MODE 132 #define BIO_C_FILE_TELL 133 #define BIO_C_GET_SOCKS 134 #define BIO_C_SET_SOCKS 135 -#define BIO_C_SET_WRITE_BUF_SIZE 136/* for BIO_s_bio */ +#define BIO_C_SET_WRITE_BUF_SIZE 136 // for BIO_s_bio #define BIO_C_GET_WRITE_BUF_SIZE 137 #define BIO_C_GET_WRITE_GUARANTEE 140 #define BIO_C_GET_READ_REQUEST 141 @@ -798,7 +867,7 @@ struct bio_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -808,7 +877,7 @@ BORINGSSL_MAKE_DELETER(BIO, BIO_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -830,4 +899,4 @@ BORINGSSL_MAKE_DELETER(BIO, BIO_free) #define BIO_R_UNSUPPORTED_METHOD 115 #define BIO_R_WRITE_TO_READ_ONLY_BIO 116 -#endif /* OPENSSL_HEADER_BIO_H */ +#endif // OPENSSL_HEADER_BIO_H diff --git a/Sources/BoringSSL/include/openssl/blowfish.h b/Sources/BoringSSL/include/openssl/blowfish.h index fa60d5336..ecf9d4561 100644 --- a/Sources/BoringSSL/include/openssl/blowfish.h +++ b/Sources/BoringSSL/include/openssl/blowfish.h @@ -90,4 +90,4 @@ OPENSSL_EXPORT void BF_cbc_encrypt(const uint8_t *in, uint8_t *out, long length, } #endif -#endif /* OPENSSL_HEADER_BLOWFISH_H */ +#endif // OPENSSL_HEADER_BLOWFISH_H diff --git a/Sources/BoringSSL/include/openssl/bn.h b/Sources/BoringSSL/include/openssl/bn.h index a57c23a96..bb32c2f56 100644 --- a/Sources/BoringSSL/include/openssl/bn.h +++ b/Sources/BoringSSL/include/openssl/bn.h @@ -126,25 +126,25 @@ #include #include -#include /* for PRIu64 and friends */ -#include /* for FILE* */ +#include // for PRIu64 and friends +#include // for FILE* #if defined(__cplusplus) extern "C" { #endif -/* BN provides support for working with arbitrary sized integers. For example, - * although the largest integer supported by the compiler might be 64 bits, BN - * will allow you to work with numbers until you run out of memory. */ +// BN provides support for working with arbitrary sized integers. For example, +// although the largest integer supported by the compiler might be 64 bits, BN +// will allow you to work with numbers until you run out of memory. -/* BN_ULONG is the native word size when working with big integers. - * - * Note: on some platforms, inttypes.h does not define print format macros in - * C++ unless |__STDC_FORMAT_MACROS| defined. As this is a public header, bn.h - * does not define |__STDC_FORMAT_MACROS| itself. C++ source files which use the - * FMT macros must define it externally. */ +// BN_ULONG is the native word size when working with big integers. +// +// Note: on some platforms, inttypes.h does not define print format macros in +// C++ unless |__STDC_FORMAT_MACROS| defined. As this is a public header, bn.h +// does not define |__STDC_FORMAT_MACROS| itself. C++ source files which use the +// FMT macros must define it externally. #if defined(OPENSSL_64_BIT) #define BN_ULONG uint64_t #define BN_BITS2 64 @@ -164,699 +164,688 @@ extern "C" { #endif -/* Allocation and freeing. */ +// Allocation and freeing. -/* BN_new creates a new, allocated BIGNUM and initialises it. */ +// BN_new creates a new, allocated BIGNUM and initialises it. OPENSSL_EXPORT BIGNUM *BN_new(void); -/* BN_init initialises a stack allocated |BIGNUM|. */ +// BN_init initialises a stack allocated |BIGNUM|. OPENSSL_EXPORT void BN_init(BIGNUM *bn); -/* BN_free frees the data referenced by |bn| and, if |bn| was originally - * allocated on the heap, frees |bn| also. */ +// BN_free frees the data referenced by |bn| and, if |bn| was originally +// allocated on the heap, frees |bn| also. OPENSSL_EXPORT void BN_free(BIGNUM *bn); -/* BN_clear_free erases and frees the data referenced by |bn| and, if |bn| was - * originally allocated on the heap, frees |bn| also. */ +// BN_clear_free erases and frees the data referenced by |bn| and, if |bn| was +// originally allocated on the heap, frees |bn| also. OPENSSL_EXPORT void BN_clear_free(BIGNUM *bn); -/* BN_dup allocates a new BIGNUM and sets it equal to |src|. It returns the - * allocated BIGNUM on success or NULL otherwise. */ +// BN_dup allocates a new BIGNUM and sets it equal to |src|. It returns the +// allocated BIGNUM on success or NULL otherwise. OPENSSL_EXPORT BIGNUM *BN_dup(const BIGNUM *src); -/* BN_copy sets |dest| equal to |src| and returns |dest| or NULL on allocation - * failure. */ +// BN_copy sets |dest| equal to |src| and returns |dest| or NULL on allocation +// failure. OPENSSL_EXPORT BIGNUM *BN_copy(BIGNUM *dest, const BIGNUM *src); -/* BN_clear sets |bn| to zero and erases the old data. */ +// BN_clear sets |bn| to zero and erases the old data. OPENSSL_EXPORT void BN_clear(BIGNUM *bn); -/* BN_value_one returns a static BIGNUM with value 1. */ +// BN_value_one returns a static BIGNUM with value 1. OPENSSL_EXPORT const BIGNUM *BN_value_one(void); -/* Basic functions. */ +// Basic functions. -/* BN_num_bits returns the minimum number of bits needed to represent the - * absolute value of |bn|. */ +// BN_num_bits returns the minimum number of bits needed to represent the +// absolute value of |bn|. OPENSSL_EXPORT unsigned BN_num_bits(const BIGNUM *bn); -/* BN_num_bytes returns the minimum number of bytes needed to represent the - * absolute value of |bn|. */ +// BN_num_bytes returns the minimum number of bytes needed to represent the +// absolute value of |bn|. OPENSSL_EXPORT unsigned BN_num_bytes(const BIGNUM *bn); -/* BN_zero sets |bn| to zero. */ +// BN_zero sets |bn| to zero. OPENSSL_EXPORT void BN_zero(BIGNUM *bn); -/* BN_one sets |bn| to one. It returns one on success or zero on allocation - * failure. */ +// BN_one sets |bn| to one. It returns one on success or zero on allocation +// failure. OPENSSL_EXPORT int BN_one(BIGNUM *bn); -/* BN_set_word sets |bn| to |value|. It returns one on success or zero on - * allocation failure. */ +// BN_set_word sets |bn| to |value|. It returns one on success or zero on +// allocation failure. OPENSSL_EXPORT int BN_set_word(BIGNUM *bn, BN_ULONG value); -/* BN_set_u64 sets |bn| to |value|. It returns one on success or zero on - * allocation failure. */ +// BN_set_u64 sets |bn| to |value|. It returns one on success or zero on +// allocation failure. OPENSSL_EXPORT int BN_set_u64(BIGNUM *bn, uint64_t value); -/* BN_set_negative sets the sign of |bn|. */ +// BN_set_negative sets the sign of |bn|. OPENSSL_EXPORT void BN_set_negative(BIGNUM *bn, int sign); -/* BN_is_negative returns one if |bn| is negative and zero otherwise. */ +// BN_is_negative returns one if |bn| is negative and zero otherwise. OPENSSL_EXPORT int BN_is_negative(const BIGNUM *bn); -/* Conversion functions. */ +// Conversion functions. -/* BN_bin2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as - * a big-endian number, and returns |ret|. If |ret| is NULL then a fresh - * |BIGNUM| is allocated and returned. It returns NULL on allocation - * failure. */ +// BN_bin2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as +// a big-endian number, and returns |ret|. If |ret| is NULL then a fresh +// |BIGNUM| is allocated and returned. It returns NULL on allocation +// failure. OPENSSL_EXPORT BIGNUM *BN_bin2bn(const uint8_t *in, size_t len, BIGNUM *ret); -/* BN_bn2bin serialises the absolute value of |in| to |out| as a big-endian - * integer, which must have |BN_num_bytes| of space available. It returns the - * number of bytes written. */ +// BN_bn2bin serialises the absolute value of |in| to |out| as a big-endian +// integer, which must have |BN_num_bytes| of space available. It returns the +// number of bytes written. OPENSSL_EXPORT size_t BN_bn2bin(const BIGNUM *in, uint8_t *out); -/* BN_le2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as - * a little-endian number, and returns |ret|. If |ret| is NULL then a fresh - * |BIGNUM| is allocated and returned. It returns NULL on allocation - * failure. */ +// BN_le2bn sets |*ret| to the value of |len| bytes from |in|, interpreted as +// a little-endian number, and returns |ret|. If |ret| is NULL then a fresh +// |BIGNUM| is allocated and returned. It returns NULL on allocation +// failure. OPENSSL_EXPORT BIGNUM *BN_le2bn(const uint8_t *in, size_t len, BIGNUM *ret); -/* BN_bn2le_padded serialises the absolute value of |in| to |out| as a - * little-endian integer, which must have |len| of space available, padding - * out the remainder of out with zeros. If |len| is smaller than |BN_num_bytes|, - * the function fails and returns 0. Otherwise, it returns 1. */ +// BN_bn2le_padded serialises the absolute value of |in| to |out| as a +// little-endian integer, which must have |len| of space available, padding +// out the remainder of out with zeros. If |len| is smaller than |BN_num_bytes|, +// the function fails and returns 0. Otherwise, it returns 1. OPENSSL_EXPORT int BN_bn2le_padded(uint8_t *out, size_t len, const BIGNUM *in); -/* BN_bn2bin_padded serialises the absolute value of |in| to |out| as a - * big-endian integer. The integer is padded with leading zeros up to size - * |len|. If |len| is smaller than |BN_num_bytes|, the function fails and - * returns 0. Otherwise, it returns 1. */ +// BN_bn2bin_padded serialises the absolute value of |in| to |out| as a +// big-endian integer. The integer is padded with leading zeros up to size +// |len|. If |len| is smaller than |BN_num_bytes|, the function fails and +// returns 0. Otherwise, it returns 1. OPENSSL_EXPORT int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in); -/* BN_bn2cbb_padded behaves like |BN_bn2bin_padded| but writes to a |CBB|. */ +// BN_bn2cbb_padded behaves like |BN_bn2bin_padded| but writes to a |CBB|. OPENSSL_EXPORT int BN_bn2cbb_padded(CBB *out, size_t len, const BIGNUM *in); -/* BN_bn2hex returns an allocated string that contains a NUL-terminated, hex - * representation of |bn|. If |bn| is negative, the first char in the resulting - * string will be '-'. Returns NULL on allocation failure. */ +// BN_bn2hex returns an allocated string that contains a NUL-terminated, hex +// representation of |bn|. If |bn| is negative, the first char in the resulting +// string will be '-'. Returns NULL on allocation failure. OPENSSL_EXPORT char *BN_bn2hex(const BIGNUM *bn); -/* BN_hex2bn parses the leading hex number from |in|, which may be proceeded by - * a '-' to indicate a negative number and may contain trailing, non-hex data. - * If |outp| is not NULL, it constructs a BIGNUM equal to the hex number and - * stores it in |*outp|. If |*outp| is NULL then it allocates a new BIGNUM and - * updates |*outp|. It returns the number of bytes of |in| processed or zero on - * error. */ +// BN_hex2bn parses the leading hex number from |in|, which may be proceeded by +// a '-' to indicate a negative number and may contain trailing, non-hex data. +// If |outp| is not NULL, it constructs a BIGNUM equal to the hex number and +// stores it in |*outp|. If |*outp| is NULL then it allocates a new BIGNUM and +// updates |*outp|. It returns the number of bytes of |in| processed or zero on +// error. OPENSSL_EXPORT int BN_hex2bn(BIGNUM **outp, const char *in); -/* BN_bn2dec returns an allocated string that contains a NUL-terminated, - * decimal representation of |bn|. If |bn| is negative, the first char in the - * resulting string will be '-'. Returns NULL on allocation failure. */ +// BN_bn2dec returns an allocated string that contains a NUL-terminated, +// decimal representation of |bn|. If |bn| is negative, the first char in the +// resulting string will be '-'. Returns NULL on allocation failure. OPENSSL_EXPORT char *BN_bn2dec(const BIGNUM *a); -/* BN_dec2bn parses the leading decimal number from |in|, which may be - * proceeded by a '-' to indicate a negative number and may contain trailing, - * non-decimal data. If |outp| is not NULL, it constructs a BIGNUM equal to the - * decimal number and stores it in |*outp|. If |*outp| is NULL then it - * allocates a new BIGNUM and updates |*outp|. It returns the number of bytes - * of |in| processed or zero on error. */ +// BN_dec2bn parses the leading decimal number from |in|, which may be +// proceeded by a '-' to indicate a negative number and may contain trailing, +// non-decimal data. If |outp| is not NULL, it constructs a BIGNUM equal to the +// decimal number and stores it in |*outp|. If |*outp| is NULL then it +// allocates a new BIGNUM and updates |*outp|. It returns the number of bytes +// of |in| processed or zero on error. OPENSSL_EXPORT int BN_dec2bn(BIGNUM **outp, const char *in); -/* BN_asc2bn acts like |BN_dec2bn| or |BN_hex2bn| depending on whether |in| - * begins with "0X" or "0x" (indicating hex) or not (indicating decimal). A - * leading '-' is still permitted and comes before the optional 0X/0x. It - * returns one on success or zero on error. */ +// BN_asc2bn acts like |BN_dec2bn| or |BN_hex2bn| depending on whether |in| +// begins with "0X" or "0x" (indicating hex) or not (indicating decimal). A +// leading '-' is still permitted and comes before the optional 0X/0x. It +// returns one on success or zero on error. OPENSSL_EXPORT int BN_asc2bn(BIGNUM **outp, const char *in); -/* BN_print writes a hex encoding of |a| to |bio|. It returns one on success - * and zero on error. */ +// BN_print writes a hex encoding of |a| to |bio|. It returns one on success +// and zero on error. OPENSSL_EXPORT int BN_print(BIO *bio, const BIGNUM *a); -/* BN_print_fp acts like |BIO_print|, but wraps |fp| in a |BIO| first. */ +// BN_print_fp acts like |BIO_print|, but wraps |fp| in a |BIO| first. OPENSSL_EXPORT int BN_print_fp(FILE *fp, const BIGNUM *a); -/* BN_get_word returns the absolute value of |bn| as a single word. If |bn| is - * too large to be represented as a single word, the maximum possible value - * will be returned. */ +// BN_get_word returns the absolute value of |bn| as a single word. If |bn| is +// too large to be represented as a single word, the maximum possible value +// will be returned. OPENSSL_EXPORT BN_ULONG BN_get_word(const BIGNUM *bn); -/* BN_get_u64 sets |*out| to the absolute value of |bn| as a |uint64_t| and - * returns one. If |bn| is too large to be represented as a |uint64_t|, it - * returns zero. */ +// BN_get_u64 sets |*out| to the absolute value of |bn| as a |uint64_t| and +// returns one. If |bn| is too large to be represented as a |uint64_t|, it +// returns zero. OPENSSL_EXPORT int BN_get_u64(const BIGNUM *bn, uint64_t *out); -/* ASN.1 functions. */ +// ASN.1 functions. -/* BN_parse_asn1_unsigned parses a non-negative DER INTEGER from |cbs| writes - * the result to |ret|. It returns one on success and zero on failure. */ +// BN_parse_asn1_unsigned parses a non-negative DER INTEGER from |cbs| writes +// the result to |ret|. It returns one on success and zero on failure. OPENSSL_EXPORT int BN_parse_asn1_unsigned(CBS *cbs, BIGNUM *ret); -/* BN_parse_asn1_unsigned_buggy acts like |BN_parse_asn1_unsigned| but tolerates - * some invalid encodings. Do not use this function. */ -OPENSSL_EXPORT int BN_parse_asn1_unsigned_buggy(CBS *cbs, BIGNUM *ret); - -/* BN_marshal_asn1 marshals |bn| as a non-negative DER INTEGER and appends the - * result to |cbb|. It returns one on success and zero on failure. */ +// BN_marshal_asn1 marshals |bn| as a non-negative DER INTEGER and appends the +// result to |cbb|. It returns one on success and zero on failure. OPENSSL_EXPORT int BN_marshal_asn1(CBB *cbb, const BIGNUM *bn); -/* Internal functions. - * - * These functions are useful for code that is doing low-level manipulations of - * BIGNUM values. However, be sure that no other function in this file does - * what you want before turning to these. */ - -/* bn_correct_top decrements |bn->top| until |bn->d[top-1]| is non-zero or - * until |top| is zero. If |bn| is zero, |bn->neg| is set to zero. */ -OPENSSL_EXPORT void bn_correct_top(BIGNUM *bn); - -/* bn_wexpand ensures that |bn| has at least |words| works of space without - * altering its value. It returns |bn| on success or NULL on allocation - * failure. */ -OPENSSL_EXPORT BIGNUM *bn_wexpand(BIGNUM *bn, size_t words); - - -/* BIGNUM pools. - * - * Certain BIGNUM operations need to use many temporary variables and - * allocating and freeing them can be quite slow. Thus such operations typically - * take a |BN_CTX| parameter, which contains a pool of |BIGNUMs|. The |ctx| - * argument to a public function may be NULL, in which case a local |BN_CTX| - * will be created just for the lifetime of that call. - * - * A function must call |BN_CTX_start| first. Then, |BN_CTX_get| may be called - * repeatedly to obtain temporary |BIGNUM|s. All |BN_CTX_get| calls must be made - * before calling any other functions that use the |ctx| as an argument. - * - * Finally, |BN_CTX_end| must be called before returning from the function. - * When |BN_CTX_end| is called, the |BIGNUM| pointers obtained from - * |BN_CTX_get| become invalid. */ - -/* BN_CTX_new returns a new, empty BN_CTX or NULL on allocation failure. */ +// BIGNUM pools. +// +// Certain BIGNUM operations need to use many temporary variables and +// allocating and freeing them can be quite slow. Thus such operations typically +// take a |BN_CTX| parameter, which contains a pool of |BIGNUMs|. The |ctx| +// argument to a public function may be NULL, in which case a local |BN_CTX| +// will be created just for the lifetime of that call. +// +// A function must call |BN_CTX_start| first. Then, |BN_CTX_get| may be called +// repeatedly to obtain temporary |BIGNUM|s. All |BN_CTX_get| calls must be made +// before calling any other functions that use the |ctx| as an argument. +// +// Finally, |BN_CTX_end| must be called before returning from the function. +// When |BN_CTX_end| is called, the |BIGNUM| pointers obtained from +// |BN_CTX_get| become invalid. + +// BN_CTX_new returns a new, empty BN_CTX or NULL on allocation failure. OPENSSL_EXPORT BN_CTX *BN_CTX_new(void); -/* BN_CTX_free frees all BIGNUMs contained in |ctx| and then frees |ctx| - * itself. */ +// BN_CTX_free frees all BIGNUMs contained in |ctx| and then frees |ctx| +// itself. OPENSSL_EXPORT void BN_CTX_free(BN_CTX *ctx); -/* BN_CTX_start "pushes" a new entry onto the |ctx| stack and allows future - * calls to |BN_CTX_get|. */ +// BN_CTX_start "pushes" a new entry onto the |ctx| stack and allows future +// calls to |BN_CTX_get|. OPENSSL_EXPORT void BN_CTX_start(BN_CTX *ctx); -/* BN_CTX_get returns a new |BIGNUM|, or NULL on allocation failure. Once - * |BN_CTX_get| has returned NULL, all future calls will also return NULL until - * |BN_CTX_end| is called. */ +// BN_CTX_get returns a new |BIGNUM|, or NULL on allocation failure. Once +// |BN_CTX_get| has returned NULL, all future calls will also return NULL until +// |BN_CTX_end| is called. OPENSSL_EXPORT BIGNUM *BN_CTX_get(BN_CTX *ctx); -/* BN_CTX_end invalidates all |BIGNUM|s returned from |BN_CTX_get| since the - * matching |BN_CTX_start| call. */ +// BN_CTX_end invalidates all |BIGNUM|s returned from |BN_CTX_get| since the +// matching |BN_CTX_start| call. OPENSSL_EXPORT void BN_CTX_end(BN_CTX *ctx); -/* Simple arithmetic */ +// Simple arithmetic -/* BN_add sets |r| = |a| + |b|, where |r| may be the same pointer as either |a| - * or |b|. It returns one on success and zero on allocation failure. */ +// BN_add sets |r| = |a| + |b|, where |r| may be the same pointer as either |a| +// or |b|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); -/* BN_uadd sets |r| = |a| + |b|, where |a| and |b| are non-negative and |r| may - * be the same pointer as either |a| or |b|. It returns one on success and zero - * on allocation failure. */ +// BN_uadd sets |r| = |a| + |b|, where |a| and |b| are non-negative and |r| may +// be the same pointer as either |a| or |b|. It returns one on success and zero +// on allocation failure. OPENSSL_EXPORT int BN_uadd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); -/* BN_add_word adds |w| to |a|. It returns one on success and zero otherwise. */ +// BN_add_word adds |w| to |a|. It returns one on success and zero otherwise. OPENSSL_EXPORT int BN_add_word(BIGNUM *a, BN_ULONG w); -/* BN_sub sets |r| = |a| - |b|, where |r| may be the same pointer as either |a| - * or |b|. It returns one on success and zero on allocation failure. */ +// BN_sub sets |r| = |a| - |b|, where |r| may be the same pointer as either |a| +// or |b|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); -/* BN_usub sets |r| = |a| - |b|, where |a| and |b| are non-negative integers, - * |b| < |a| and |r| may be the same pointer as either |a| or |b|. It returns - * one on success and zero on allocation failure. */ +// BN_usub sets |r| = |a| - |b|, where |a| and |b| are non-negative integers, +// |b| < |a| and |r| may be the same pointer as either |a| or |b|. It returns +// one on success and zero on allocation failure. OPENSSL_EXPORT int BN_usub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b); -/* BN_sub_word subtracts |w| from |a|. It returns one on success and zero on - * allocation failure. */ +// BN_sub_word subtracts |w| from |a|. It returns one on success and zero on +// allocation failure. OPENSSL_EXPORT int BN_sub_word(BIGNUM *a, BN_ULONG w); -/* BN_mul sets |r| = |a| * |b|, where |r| may be the same pointer as |a| or - * |b|. Returns one on success and zero otherwise. */ +// BN_mul sets |r| = |a| * |b|, where |r| may be the same pointer as |a| or +// |b|. Returns one on success and zero otherwise. OPENSSL_EXPORT int BN_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); -/* BN_mul_word sets |bn| = |bn| * |w|. It returns one on success or zero on - * allocation failure. */ +// BN_mul_word sets |bn| = |bn| * |w|. It returns one on success or zero on +// allocation failure. OPENSSL_EXPORT int BN_mul_word(BIGNUM *bn, BN_ULONG w); -/* BN_sqr sets |r| = |a|^2 (i.e. squares), where |r| may be the same pointer as - * |a|. Returns one on success and zero otherwise. This is more efficient than - * BN_mul(r, a, a, ctx). */ +// BN_sqr sets |r| = |a|^2 (i.e. squares), where |r| may be the same pointer as +// |a|. Returns one on success and zero otherwise. This is more efficient than +// BN_mul(r, a, a, ctx). OPENSSL_EXPORT int BN_sqr(BIGNUM *r, const BIGNUM *a, BN_CTX *ctx); -/* BN_div divides |numerator| by |divisor| and places the result in |quotient| - * and the remainder in |rem|. Either of |quotient| or |rem| may be NULL, in - * which case the respective value is not returned. The result is rounded - * towards zero; thus if |numerator| is negative, the remainder will be zero or - * negative. It returns one on success or zero on error. */ +// BN_div divides |numerator| by |divisor| and places the result in |quotient| +// and the remainder in |rem|. Either of |quotient| or |rem| may be NULL, in +// which case the respective value is not returned. The result is rounded +// towards zero; thus if |numerator| is negative, the remainder will be zero or +// negative. It returns one on success or zero on error. OPENSSL_EXPORT int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator, const BIGNUM *divisor, BN_CTX *ctx); -/* BN_div_word sets |numerator| = |numerator|/|divisor| and returns the - * remainder or (BN_ULONG)-1 on error. */ +// BN_div_word sets |numerator| = |numerator|/|divisor| and returns the +// remainder or (BN_ULONG)-1 on error. OPENSSL_EXPORT BN_ULONG BN_div_word(BIGNUM *numerator, BN_ULONG divisor); -/* BN_sqrt sets |*out_sqrt| (which may be the same |BIGNUM| as |in|) to the - * square root of |in|, using |ctx|. It returns one on success or zero on - * error. Negative numbers and non-square numbers will result in an error with - * appropriate errors on the error queue. */ +// BN_sqrt sets |*out_sqrt| (which may be the same |BIGNUM| as |in|) to the +// square root of |in|, using |ctx|. It returns one on success or zero on +// error. Negative numbers and non-square numbers will result in an error with +// appropriate errors on the error queue. OPENSSL_EXPORT int BN_sqrt(BIGNUM *out_sqrt, const BIGNUM *in, BN_CTX *ctx); -/* Comparison functions */ +// Comparison functions -/* BN_cmp returns a value less than, equal to or greater than zero if |a| is - * less than, equal to or greater than |b|, respectively. */ +// BN_cmp returns a value less than, equal to or greater than zero if |a| is +// less than, equal to or greater than |b|, respectively. OPENSSL_EXPORT int BN_cmp(const BIGNUM *a, const BIGNUM *b); -/* BN_cmp_word is like |BN_cmp| except it takes its second argument as a - * |BN_ULONG| instead of a |BIGNUM|. */ +// BN_cmp_word is like |BN_cmp| except it takes its second argument as a +// |BN_ULONG| instead of a |BIGNUM|. OPENSSL_EXPORT int BN_cmp_word(const BIGNUM *a, BN_ULONG b); -/* BN_ucmp returns a value less than, equal to or greater than zero if the - * absolute value of |a| is less than, equal to or greater than the absolute - * value of |b|, respectively. */ +// BN_ucmp returns a value less than, equal to or greater than zero if the +// absolute value of |a| is less than, equal to or greater than the absolute +// value of |b|, respectively. OPENSSL_EXPORT int BN_ucmp(const BIGNUM *a, const BIGNUM *b); -/* BN_equal_consttime returns one if |a| is equal to |b|, and zero otherwise. - * It takes an amount of time dependent on the sizes of |a| and |b|, but - * independent of the contents (including the signs) of |a| and |b|. */ +// BN_equal_consttime returns one if |a| is equal to |b|, and zero otherwise. +// It takes an amount of time dependent on the sizes of |a| and |b|, but +// independent of the contents (including the signs) of |a| and |b|. OPENSSL_EXPORT int BN_equal_consttime(const BIGNUM *a, const BIGNUM *b); -/* BN_abs_is_word returns one if the absolute value of |bn| equals |w| and zero - * otherwise. */ +// BN_abs_is_word returns one if the absolute value of |bn| equals |w| and zero +// otherwise. OPENSSL_EXPORT int BN_abs_is_word(const BIGNUM *bn, BN_ULONG w); -/* BN_is_zero returns one if |bn| is zero and zero otherwise. */ +// BN_is_zero returns one if |bn| is zero and zero otherwise. OPENSSL_EXPORT int BN_is_zero(const BIGNUM *bn); -/* BN_is_one returns one if |bn| equals one and zero otherwise. */ +// BN_is_one returns one if |bn| equals one and zero otherwise. OPENSSL_EXPORT int BN_is_one(const BIGNUM *bn); -/* BN_is_word returns one if |bn| is exactly |w| and zero otherwise. */ +// BN_is_word returns one if |bn| is exactly |w| and zero otherwise. OPENSSL_EXPORT int BN_is_word(const BIGNUM *bn, BN_ULONG w); -/* BN_is_odd returns one if |bn| is odd and zero otherwise. */ +// BN_is_odd returns one if |bn| is odd and zero otherwise. OPENSSL_EXPORT int BN_is_odd(const BIGNUM *bn); -/* BN_is_pow2 returns 1 if |a| is a power of two, and 0 otherwise. */ +// BN_is_pow2 returns 1 if |a| is a power of two, and 0 otherwise. OPENSSL_EXPORT int BN_is_pow2(const BIGNUM *a); -/* Bitwise operations. */ +// Bitwise operations. -/* BN_lshift sets |r| equal to |a| << n. The |a| and |r| arguments may be the - * same |BIGNUM|. It returns one on success and zero on allocation failure. */ +// BN_lshift sets |r| equal to |a| << n. The |a| and |r| arguments may be the +// same |BIGNUM|. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_lshift(BIGNUM *r, const BIGNUM *a, int n); -/* BN_lshift1 sets |r| equal to |a| << 1, where |r| and |a| may be the same - * pointer. It returns one on success and zero on allocation failure. */ +// BN_lshift1 sets |r| equal to |a| << 1, where |r| and |a| may be the same +// pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_lshift1(BIGNUM *r, const BIGNUM *a); -/* BN_rshift sets |r| equal to |a| >> n, where |r| and |a| may be the same - * pointer. It returns one on success and zero on allocation failure. */ +// BN_rshift sets |r| equal to |a| >> n, where |r| and |a| may be the same +// pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_rshift(BIGNUM *r, const BIGNUM *a, int n); -/* BN_rshift1 sets |r| equal to |a| >> 1, where |r| and |a| may be the same - * pointer. It returns one on success and zero on allocation failure. */ +// BN_rshift1 sets |r| equal to |a| >> 1, where |r| and |a| may be the same +// pointer. It returns one on success and zero on allocation failure. OPENSSL_EXPORT int BN_rshift1(BIGNUM *r, const BIGNUM *a); -/* BN_set_bit sets the |n|th, least-significant bit in |a|. For example, if |a| - * is 2 then setting bit zero will make it 3. It returns one on success or zero - * on allocation failure. */ +// BN_set_bit sets the |n|th, least-significant bit in |a|. For example, if |a| +// is 2 then setting bit zero will make it 3. It returns one on success or zero +// on allocation failure. OPENSSL_EXPORT int BN_set_bit(BIGNUM *a, int n); -/* BN_clear_bit clears the |n|th, least-significant bit in |a|. For example, if - * |a| is 3, clearing bit zero will make it two. It returns one on success or - * zero on allocation failure. */ +// BN_clear_bit clears the |n|th, least-significant bit in |a|. For example, if +// |a| is 3, clearing bit zero will make it two. It returns one on success or +// zero on allocation failure. OPENSSL_EXPORT int BN_clear_bit(BIGNUM *a, int n); -/* BN_is_bit_set returns the value of the |n|th, least-significant bit in |a|, - * or zero if the bit doesn't exist. */ +// BN_is_bit_set returns one if the |n|th least-significant bit in |a| exists +// and is set. Otherwise, it returns zero. OPENSSL_EXPORT int BN_is_bit_set(const BIGNUM *a, int n); -/* BN_mask_bits truncates |a| so that it is only |n| bits long. It returns one - * on success or zero if |n| is greater than the length of |a| already. */ +// BN_mask_bits truncates |a| so that it is only |n| bits long. It returns one +// on success or zero if |n| is greater than the length of |a| already. OPENSSL_EXPORT int BN_mask_bits(BIGNUM *a, int n); -/* Modulo arithmetic. */ +// Modulo arithmetic. -/* BN_mod_word returns |a| mod |w| or (BN_ULONG)-1 on error. */ +// BN_mod_word returns |a| mod |w| or (BN_ULONG)-1 on error. OPENSSL_EXPORT BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w); -/* BN_mod_pow2 sets |r| = |a| mod 2^|e|. It returns 1 on success and - * 0 on error. */ +// BN_mod_pow2 sets |r| = |a| mod 2^|e|. It returns 1 on success and +// 0 on error. OPENSSL_EXPORT int BN_mod_pow2(BIGNUM *r, const BIGNUM *a, size_t e); -/* BN_nnmod_pow2 sets |r| = |a| mod 2^|e| where |r| is always positive. - * It returns 1 on success and 0 on error. */ +// BN_nnmod_pow2 sets |r| = |a| mod 2^|e| where |r| is always positive. +// It returns 1 on success and 0 on error. OPENSSL_EXPORT int BN_nnmod_pow2(BIGNUM *r, const BIGNUM *a, size_t e); -/* BN_mod is a helper macro that calls |BN_div| and discards the quotient. */ +// BN_mod is a helper macro that calls |BN_div| and discards the quotient. #define BN_mod(rem, numerator, divisor, ctx) \ BN_div(NULL, (rem), (numerator), (divisor), (ctx)) -/* BN_nnmod is a non-negative modulo function. It acts like |BN_mod|, but 0 <= - * |rem| < |divisor| is always true. It returns one on success and zero on - * error. */ +// BN_nnmod is a non-negative modulo function. It acts like |BN_mod|, but 0 <= +// |rem| < |divisor| is always true. It returns one on success and zero on +// error. OPENSSL_EXPORT int BN_nnmod(BIGNUM *rem, const BIGNUM *numerator, const BIGNUM *divisor, BN_CTX *ctx); -/* BN_mod_add sets |r| = |a| + |b| mod |m|. It returns one on success and zero - * on error. */ +// BN_mod_add sets |r| = |a| + |b| mod |m|. It returns one on success and zero +// on error. OPENSSL_EXPORT int BN_mod_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_add_quick acts like |BN_mod_add| but requires that |a| and |b| be - * non-negative and less than |m|. */ +// BN_mod_add_quick acts like |BN_mod_add| but requires that |a| and |b| be +// non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_add_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); -/* BN_mod_sub sets |r| = |a| - |b| mod |m|. It returns one on success and zero - * on error. */ +// BN_mod_sub sets |r| = |a| - |b| mod |m|. It returns one on success and zero +// on error. OPENSSL_EXPORT int BN_mod_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_sub_quick acts like |BN_mod_sub| but requires that |a| and |b| be - * non-negative and less than |m|. */ +// BN_mod_sub_quick acts like |BN_mod_sub| but requires that |a| and |b| be +// non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_sub_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m); -/* BN_mod_mul sets |r| = |a|*|b| mod |m|. It returns one on success and zero - * on error. */ +// BN_mod_mul sets |r| = |a|*|b| mod |m|. It returns one on success and zero +// on error. OPENSSL_EXPORT int BN_mod_mul(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_sqr sets |r| = |a|^2 mod |m|. It returns one on success and zero - * on error. */ +// BN_mod_sqr sets |r| = |a|^2 mod |m|. It returns one on success and zero +// on error. OPENSSL_EXPORT int BN_mod_sqr(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_lshift sets |r| = (|a| << n) mod |m|, where |r| and |a| may be the - * same pointer. It returns one on success and zero on error. */ +// BN_mod_lshift sets |r| = (|a| << n) mod |m|, where |r| and |a| may be the +// same pointer. It returns one on success and zero on error. OPENSSL_EXPORT int BN_mod_lshift(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_lshift_quick acts like |BN_mod_lshift| but requires that |a| be - * non-negative and less than |m|. */ +// BN_mod_lshift_quick acts like |BN_mod_lshift| but requires that |a| be +// non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_lshift_quick(BIGNUM *r, const BIGNUM *a, int n, const BIGNUM *m); -/* BN_mod_lshift1 sets |r| = (|a| << 1) mod |m|, where |r| and |a| may be the - * same pointer. It returns one on success and zero on error. */ +// BN_mod_lshift1 sets |r| = (|a| << 1) mod |m|, where |r| and |a| may be the +// same pointer. It returns one on success and zero on error. OPENSSL_EXPORT int BN_mod_lshift1(BIGNUM *r, const BIGNUM *a, const BIGNUM *m, BN_CTX *ctx); -/* BN_mod_lshift1_quick acts like |BN_mod_lshift1| but requires that |a| be - * non-negative and less than |m|. */ +// BN_mod_lshift1_quick acts like |BN_mod_lshift1| but requires that |a| be +// non-negative and less than |m|. OPENSSL_EXPORT int BN_mod_lshift1_quick(BIGNUM *r, const BIGNUM *a, const BIGNUM *m); -/* BN_mod_sqrt returns a newly-allocated |BIGNUM|, r, such that - * r^2 == a (mod p). |p| must be a prime. It returns NULL on error or if |a| is - * not a square mod |p|. In the latter case, it will add |BN_R_NOT_A_SQUARE| to - * the error queue. */ +// BN_mod_sqrt returns a newly-allocated |BIGNUM|, r, such that +// r^2 == a (mod p). |p| must be a prime. It returns NULL on error or if |a| is +// not a square mod |p|. In the latter case, it will add |BN_R_NOT_A_SQUARE| to +// the error queue. OPENSSL_EXPORT BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); -/* Random and prime number generation. */ +// Random and prime number generation. -/* The following are values for the |top| parameter of |BN_rand|. */ +// The following are values for the |top| parameter of |BN_rand|. #define BN_RAND_TOP_ANY (-1) #define BN_RAND_TOP_ONE 0 #define BN_RAND_TOP_TWO 1 -/* The following are values for the |bottom| parameter of |BN_rand|. */ +// The following are values for the |bottom| parameter of |BN_rand|. #define BN_RAND_BOTTOM_ANY 0 #define BN_RAND_BOTTOM_ODD 1 -/* BN_rand sets |rnd| to a random number of length |bits|. It returns one on - * success and zero otherwise. - * - * |top| must be one of the |BN_RAND_TOP_*| values. If |BN_RAND_TOP_ONE|, the - * most-significant bit, if any, will be set. If |BN_RAND_TOP_TWO|, the two - * most significant bits, if any, will be set. If |BN_RAND_TOP_ANY|, no extra - * action will be taken and |BN_num_bits(rnd)| may not equal |bits| if the most - * significant bits randomly ended up as zeros. - * - * |bottom| must be one of the |BN_RAND_BOTTOM_*| values. If - * |BN_RAND_BOTTOM_ODD|, the least-significant bit, if any, will be set. If - * |BN_RAND_BOTTOM_ANY|, no extra action will be taken. */ +// BN_rand sets |rnd| to a random number of length |bits|. It returns one on +// success and zero otherwise. +// +// |top| must be one of the |BN_RAND_TOP_*| values. If |BN_RAND_TOP_ONE|, the +// most-significant bit, if any, will be set. If |BN_RAND_TOP_TWO|, the two +// most significant bits, if any, will be set. If |BN_RAND_TOP_ANY|, no extra +// action will be taken and |BN_num_bits(rnd)| may not equal |bits| if the most +// significant bits randomly ended up as zeros. +// +// |bottom| must be one of the |BN_RAND_BOTTOM_*| values. If +// |BN_RAND_BOTTOM_ODD|, the least-significant bit, if any, will be set. If +// |BN_RAND_BOTTOM_ANY|, no extra action will be taken. OPENSSL_EXPORT int BN_rand(BIGNUM *rnd, int bits, int top, int bottom); -/* BN_pseudo_rand is an alias for |BN_rand|. */ +// BN_pseudo_rand is an alias for |BN_rand|. OPENSSL_EXPORT int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom); -/* BN_rand_range is equivalent to |BN_rand_range_ex| with |min_inclusive| set - * to zero and |max_exclusive| set to |range|. */ +// BN_rand_range is equivalent to |BN_rand_range_ex| with |min_inclusive| set +// to zero and |max_exclusive| set to |range|. OPENSSL_EXPORT int BN_rand_range(BIGNUM *rnd, const BIGNUM *range); -/* BN_rand_range_ex sets |rnd| to a random value in - * [min_inclusive..max_exclusive). It returns one on success and zero - * otherwise. */ +// BN_rand_range_ex sets |rnd| to a random value in +// [min_inclusive..max_exclusive). It returns one on success and zero +// otherwise. OPENSSL_EXPORT int BN_rand_range_ex(BIGNUM *r, BN_ULONG min_inclusive, const BIGNUM *max_exclusive); -/* BN_pseudo_rand_range is an alias for BN_rand_range. */ +// BN_pseudo_rand_range is an alias for BN_rand_range. OPENSSL_EXPORT int BN_pseudo_rand_range(BIGNUM *rnd, const BIGNUM *range); -/* BN_generate_dsa_nonce generates a random number 0 <= out < range. Unlike - * BN_rand_range, it also includes the contents of |priv| and |message| in the - * generation so that an RNG failure isn't fatal as long as |priv| remains - * secret. This is intended for use in DSA and ECDSA where an RNG weakness - * leads directly to private key exposure unless this function is used. - * It returns one on success and zero on error. */ -OPENSSL_EXPORT int BN_generate_dsa_nonce(BIGNUM *out, const BIGNUM *range, - const BIGNUM *priv, - const uint8_t *message, - size_t message_len, BN_CTX *ctx); - -/* BN_GENCB holds a callback function that is used by generation functions that - * can take a very long time to complete. Use |BN_GENCB_set| to initialise a - * |BN_GENCB| structure. - * - * The callback receives the address of that |BN_GENCB| structure as its last - * argument and the user is free to put an arbitrary pointer in |arg|. The other - * arguments are set as follows: - * event=BN_GENCB_GENERATED, n=i: after generating the i'th possible prime - * number. - * event=BN_GENCB_PRIME_TEST, n=-1: when finished trial division primality - * checks. - * event=BN_GENCB_PRIME_TEST, n=i: when the i'th primality test has finished. - * - * The callback can return zero to abort the generation progress or one to - * allow it to continue. - * - * When other code needs to call a BN generation function it will often take a - * BN_GENCB argument and may call the function with other argument values. */ +// BN_GENCB holds a callback function that is used by generation functions that +// can take a very long time to complete. Use |BN_GENCB_set| to initialise a +// |BN_GENCB| structure. +// +// The callback receives the address of that |BN_GENCB| structure as its last +// argument and the user is free to put an arbitrary pointer in |arg|. The other +// arguments are set as follows: +// event=BN_GENCB_GENERATED, n=i: after generating the i'th possible prime +// number. +// event=BN_GENCB_PRIME_TEST, n=-1: when finished trial division primality +// checks. +// event=BN_GENCB_PRIME_TEST, n=i: when the i'th primality test has finished. +// +// The callback can return zero to abort the generation progress or one to +// allow it to continue. +// +// When other code needs to call a BN generation function it will often take a +// BN_GENCB argument and may call the function with other argument values. #define BN_GENCB_GENERATED 0 #define BN_GENCB_PRIME_TEST 1 struct bn_gencb_st { - void *arg; /* callback-specific data */ + void *arg; // callback-specific data int (*callback)(int event, int n, struct bn_gencb_st *); }; -/* BN_GENCB_set configures |callback| to call |f| and sets |callout->arg| to - * |arg|. */ +// BN_GENCB_set configures |callback| to call |f| and sets |callout->arg| to +// |arg|. OPENSSL_EXPORT void BN_GENCB_set(BN_GENCB *callback, int (*f)(int event, int n, struct bn_gencb_st *), void *arg); -/* BN_GENCB_call calls |callback|, if not NULL, and returns the return value of - * the callback, or 1 if |callback| is NULL. */ +// BN_GENCB_call calls |callback|, if not NULL, and returns the return value of +// the callback, or 1 if |callback| is NULL. OPENSSL_EXPORT int BN_GENCB_call(BN_GENCB *callback, int event, int n); -/* BN_generate_prime_ex sets |ret| to a prime number of |bits| length. If safe - * is non-zero then the prime will be such that (ret-1)/2 is also a prime. - * (This is needed for Diffie-Hellman groups to ensure that the only subgroups - * are of size 2 and (p-1)/2.). - * - * If |add| is not NULL, the prime will fulfill the condition |ret| % |add| == - * |rem| in order to suit a given generator. (If |rem| is NULL then |ret| % - * |add| == 1.) - * - * If |cb| is not NULL, it will be called during processing to give an - * indication of progress. See the comments for |BN_GENCB|. It returns one on - * success and zero otherwise. */ +// BN_generate_prime_ex sets |ret| to a prime number of |bits| length. If safe +// is non-zero then the prime will be such that (ret-1)/2 is also a prime. +// (This is needed for Diffie-Hellman groups to ensure that the only subgroups +// are of size 2 and (p-1)/2.). +// +// If |add| is not NULL, the prime will fulfill the condition |ret| % |add| == +// |rem| in order to suit a given generator. (If |rem| is NULL then |ret| % +// |add| == 1.) +// +// If |cb| is not NULL, it will be called during processing to give an +// indication of progress. See the comments for |BN_GENCB|. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int BN_generate_prime_ex(BIGNUM *ret, int bits, int safe, const BIGNUM *add, const BIGNUM *rem, BN_GENCB *cb); -/* BN_prime_checks is magic value that can be used as the |checks| argument to - * the primality testing functions in order to automatically select a number of - * Miller-Rabin checks that gives a false positive rate of ~2^{-80}. */ +// BN_prime_checks is magic value that can be used as the |checks| argument to +// the primality testing functions in order to automatically select a number of +// Miller-Rabin checks that gives a false positive rate of ~2^{-80}. #define BN_prime_checks 0 -/* BN_primality_test sets |*is_probably_prime| to one if |candidate| is - * probably a prime number by the Miller-Rabin test or zero if it's certainly - * not. - * - * If |do_trial_division| is non-zero then |candidate| will be tested against a - * list of small primes before Miller-Rabin tests. The probability of this - * function returning a false positive is 2^{2*checks}. If |checks| is - * |BN_prime_checks| then a value that results in approximately 2^{-80} false - * positive probability is used. If |cb| is not NULL then it is called during - * the checking process. See the comment above |BN_GENCB|. - * - * The function returns one on success and zero on error. - * - * (If you are unsure whether you want |do_trial_division|, don't set it.) */ +// bn_primality_result_t enumerates the outcomes of primality-testing. +enum bn_primality_result_t { + bn_probably_prime, + bn_composite, + bn_non_prime_power_composite, +}; + +// BN_enhanced_miller_rabin_primality_test tests whether |w| is probably a prime +// number using the Enhanced Miller-Rabin Test (FIPS 186-4 C.3.2) with +// |iterations| iterations and returns the result in |out_result|. Enhanced +// Miller-Rabin tests primality for odd integers greater than 3, returning +// |bn_probably_prime| if the number is probably prime, +// |bn_non_prime_power_composite| if the number is a composite that is not the +// power of a single prime, and |bn_composite| otherwise. If |iterations| is +// |BN_prime_checks|, then a value that results in a false positive rate lower +// than the number-field sieve security level of |w| is used. It returns one on +// success and zero on failure. If |cb| is not NULL, then it is called during +// each iteration of the primality test. +int BN_enhanced_miller_rabin_primality_test( + enum bn_primality_result_t *out_result, const BIGNUM *w, int iterations, + BN_CTX *ctx, BN_GENCB *cb); + +// BN_primality_test sets |*is_probably_prime| to one if |candidate| is +// probably a prime number by the Miller-Rabin test or zero if it's certainly +// not. +// +// If |do_trial_division| is non-zero then |candidate| will be tested against a +// list of small primes before Miller-Rabin tests. The probability of this +// function returning a false positive is 2^{2*checks}. If |checks| is +// |BN_prime_checks| then a value that results in a false positive rate lower +// than the number-field sieve security level of |candidate| is used. If |cb| is +// not NULL then it is called during the checking process. See the comment above +// |BN_GENCB|. +// +// The function returns one on success and zero on error. +// +// (If you are unsure whether you want |do_trial_division|, don't set it.) OPENSSL_EXPORT int BN_primality_test(int *is_probably_prime, const BIGNUM *candidate, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb); -/* BN_is_prime_fasttest_ex returns one if |candidate| is probably a prime - * number by the Miller-Rabin test, zero if it's certainly not and -1 on error. - * - * If |do_trial_division| is non-zero then |candidate| will be tested against a - * list of small primes before Miller-Rabin tests. The probability of this - * function returning one when |candidate| is composite is 2^{2*checks}. If - * |checks| is |BN_prime_checks| then a value that results in approximately - * 2^{-80} false positive probability is used. If |cb| is not NULL then it is - * called during the checking process. See the comment above |BN_GENCB|. - * - * WARNING: deprecated. Use |BN_primality_test|. */ +// BN_is_prime_fasttest_ex returns one if |candidate| is probably a prime +// number by the Miller-Rabin test, zero if it's certainly not and -1 on error. +// +// If |do_trial_division| is non-zero then |candidate| will be tested against a +// list of small primes before Miller-Rabin tests. The probability of this +// function returning one when |candidate| is composite is 2^{2*checks}. If +// |checks| is |BN_prime_checks| then a value that results in a false positive +// rate lower than the number-field sieve security level of |candidate| is used. +// If |cb| is not NULL then it is called during the checking process. See the +// comment above |BN_GENCB|. +// +// WARNING: deprecated. Use |BN_primality_test|. OPENSSL_EXPORT int BN_is_prime_fasttest_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, int do_trial_division, BN_GENCB *cb); -/* BN_is_prime_ex acts the same as |BN_is_prime_fasttest_ex| with - * |do_trial_division| set to zero. - * - * WARNING: deprecated: Use |BN_primality_test|. */ +// BN_is_prime_ex acts the same as |BN_is_prime_fasttest_ex| with +// |do_trial_division| set to zero. +// +// WARNING: deprecated: Use |BN_primality_test|. OPENSSL_EXPORT int BN_is_prime_ex(const BIGNUM *candidate, int checks, BN_CTX *ctx, BN_GENCB *cb); -/* Number theory functions */ +// Number theory functions -/* BN_gcd sets |r| = gcd(|a|, |b|). It returns one on success and zero - * otherwise. */ +// BN_gcd sets |r| = gcd(|a|, |b|). It returns one on success and zero +// otherwise. OPENSSL_EXPORT int BN_gcd(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); -/* BN_mod_inverse sets |out| equal to |a|^-1, mod |n|. If |out| is NULL, a - * fresh BIGNUM is allocated. It returns the result or NULL on error. - * - * If |n| is even then the operation is performed using an algorithm that avoids - * some branches but which isn't constant-time. This function shouldn't be used - * for secret values; use |BN_mod_inverse_blinded| instead. Or, if |n| is - * guaranteed to be prime, use - * |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking - * advantage of Fermat's Little Theorem. */ +// BN_mod_inverse sets |out| equal to |a|^-1, mod |n|. If |out| is NULL, a +// fresh BIGNUM is allocated. It returns the result or NULL on error. +// +// If |n| is even then the operation is performed using an algorithm that avoids +// some branches but which isn't constant-time. This function shouldn't be used +// for secret values; use |BN_mod_inverse_blinded| instead. Or, if |n| is +// guaranteed to be prime, use +// |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking +// advantage of Fermat's Little Theorem. OPENSSL_EXPORT BIGNUM *BN_mod_inverse(BIGNUM *out, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); -/* BN_mod_inverse_blinded sets |out| equal to |a|^-1, mod |n|, where |n| is the - * Montgomery modulus for |mont|. |a| must be non-negative and must be less - * than |n|. |n| must be greater than 1. |a| is blinded (masked by a random - * value) to protect it against side-channel attacks. On failure, if the failure - * was caused by |a| having no inverse mod |n| then |*out_no_inverse| will be - * set to one; otherwise it will be set to zero. */ +// BN_mod_inverse_blinded sets |out| equal to |a|^-1, mod |n|, where |n| is the +// Montgomery modulus for |mont|. |a| must be non-negative and must be less +// than |n|. |n| must be greater than 1. |a| is blinded (masked by a random +// value) to protect it against side-channel attacks. On failure, if the failure +// was caused by |a| having no inverse mod |n| then |*out_no_inverse| will be +// set to one; otherwise it will be set to zero. int BN_mod_inverse_blinded(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); -/* BN_mod_inverse_odd sets |out| equal to |a|^-1, mod |n|. |a| must be - * non-negative and must be less than |n|. |n| must be odd. This function - * shouldn't be used for secret values; use |BN_mod_inverse_blinded| instead. - * Or, if |n| is guaranteed to be prime, use - * |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking - * advantage of Fermat's Little Theorem. It returns one on success or zero on - * failure. On failure, if the failure was caused by |a| having no inverse mod - * |n| then |*out_no_inverse| will be set to one; otherwise it will be set to - * zero. */ +// BN_mod_inverse_odd sets |out| equal to |a|^-1, mod |n|. |a| must be +// non-negative and must be less than |n|. |n| must be odd. This function +// shouldn't be used for secret values; use |BN_mod_inverse_blinded| instead. +// Or, if |n| is guaranteed to be prime, use +// |BN_mod_exp_mont_consttime(out, a, m_minus_2, m, ctx, m_mont)|, taking +// advantage of Fermat's Little Theorem. It returns one on success or zero on +// failure. On failure, if the failure was caused by |a| having no inverse mod +// |n| then |*out_no_inverse| will be set to one; otherwise it will be set to +// zero. int BN_mod_inverse_odd(BIGNUM *out, int *out_no_inverse, const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx); -/* BN_kronecker returns the Kronecker symbol of |a| and |b| (which is -1, 0 or - * 1), or -2 on error. */ -OPENSSL_EXPORT int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx); +// Montgomery arithmetic. -/* Montgomery arithmetic. */ +// BN_MONT_CTX contains the precomputed values needed to work in a specific +// Montgomery domain. -/* BN_MONT_CTX contains the precomputed values needed to work in a specific - * Montgomery domain. */ - -/* BN_MONT_CTX_new returns a fresh BN_MONT_CTX or NULL on allocation failure. */ +// BN_MONT_CTX_new returns a fresh BN_MONT_CTX or NULL on allocation failure. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_new(void); -/* BN_MONT_CTX_free frees memory associated with |mont|. */ +// BN_MONT_CTX_free frees memory associated with |mont|. OPENSSL_EXPORT void BN_MONT_CTX_free(BN_MONT_CTX *mont); -/* BN_MONT_CTX_copy sets |to| equal to |from|. It returns |to| on success or - * NULL on error. */ +// BN_MONT_CTX_copy sets |to| equal to |from|. It returns |to| on success or +// NULL on error. OPENSSL_EXPORT BN_MONT_CTX *BN_MONT_CTX_copy(BN_MONT_CTX *to, const BN_MONT_CTX *from); -/* BN_MONT_CTX_set sets up a Montgomery context given the modulus, |mod|. It - * returns one on success and zero on error. */ +// BN_MONT_CTX_set sets up a Montgomery context given the modulus, |mod|. It +// returns one on success and zero on error. OPENSSL_EXPORT int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx); -/* BN_MONT_CTX_set_locked takes |lock| and checks whether |*pmont| is NULL. If - * so, it creates a new |BN_MONT_CTX| and sets the modulus for it to |mod|. It - * then stores it as |*pmont|. It returns one on success and zero on error. - * - * If |*pmont| is already non-NULL then it does nothing and returns one. */ +// BN_MONT_CTX_set_locked takes |lock| and checks whether |*pmont| is NULL. If +// so, it creates a new |BN_MONT_CTX| and sets the modulus for it to |mod|. It +// then stores it as |*pmont|. It returns one on success and zero on error. +// +// If |*pmont| is already non-NULL then it does nothing and returns one. int BN_MONT_CTX_set_locked(BN_MONT_CTX **pmont, CRYPTO_MUTEX *lock, const BIGNUM *mod, BN_CTX *bn_ctx); -/* BN_to_montgomery sets |ret| equal to |a| in the Montgomery domain. |a| is - * assumed to be in the range [0, n), where |n| is the Montgomery modulus. It - * returns one on success or zero on error. */ +// BN_to_montgomery sets |ret| equal to |a| in the Montgomery domain. |a| is +// assumed to be in the range [0, n), where |n| is the Montgomery modulus. It +// returns one on success or zero on error. OPENSSL_EXPORT int BN_to_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); -/* BN_from_montgomery sets |ret| equal to |a| * R^-1, i.e. translates values out - * of the Montgomery domain. |a| is assumed to be in the range [0, n), where |n| - * is the Montgomery modulus. It returns one on success or zero on error. */ +// BN_from_montgomery sets |ret| equal to |a| * R^-1, i.e. translates values out +// of the Montgomery domain. |a| is assumed to be in the range [0, n), where |n| +// is the Montgomery modulus. It returns one on success or zero on error. OPENSSL_EXPORT int BN_from_montgomery(BIGNUM *ret, const BIGNUM *a, const BN_MONT_CTX *mont, BN_CTX *ctx); -/* BN_mod_mul_montgomery set |r| equal to |a| * |b|, in the Montgomery domain. - * Both |a| and |b| must already be in the Montgomery domain (by - * |BN_to_montgomery|). In particular, |a| and |b| are assumed to be in the - * range [0, n), where |n| is the Montgomery modulus. It returns one on success - * or zero on error. */ +// BN_mod_mul_montgomery set |r| equal to |a| * |b|, in the Montgomery domain. +// Both |a| and |b| must already be in the Montgomery domain (by +// |BN_to_montgomery|). In particular, |a| and |b| are assumed to be in the +// range [0, n), where |n| is the Montgomery modulus. It returns one on success +// or zero on error. OPENSSL_EXPORT int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b, const BN_MONT_CTX *mont, BN_CTX *ctx); -/* Exponentiation. */ +// Exponentiation. -/* BN_exp sets |r| equal to |a|^{|p|}. It does so with a square-and-multiply - * algorithm that leaks side-channel information. It returns one on success or - * zero otherwise. */ +// BN_exp sets |r| equal to |a|^{|p|}. It does so with a square-and-multiply +// algorithm that leaks side-channel information. It returns one on success or +// zero otherwise. OPENSSL_EXPORT int BN_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx); -/* BN_mod_exp sets |r| equal to |a|^{|p|} mod |m|. It does so with the best - * algorithm for the values provided. It returns one on success or zero - * otherwise. The |BN_mod_exp_mont_consttime| variant must be used if the - * exponent is secret. */ +// BN_mod_exp sets |r| equal to |a|^{|p|} mod |m|. It does so with the best +// algorithm for the values provided. It returns one on success or zero +// otherwise. The |BN_mod_exp_mont_consttime| variant must be used if the +// exponent is secret. OPENSSL_EXPORT int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx); @@ -870,70 +859,71 @@ OPENSSL_EXPORT int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BN_MONT_CTX *mont); -/* Deprecated functions */ +// Deprecated functions -/* BN_bn2mpi serialises the value of |in| to |out|, using a format that consists - * of the number's length in bytes represented as a 4-byte big-endian number, - * and the number itself in big-endian format, where the most significant bit - * signals a negative number. (The representation of numbers with the MSB set is - * prefixed with null byte). |out| must have sufficient space available; to - * find the needed amount of space, call the function with |out| set to NULL. */ +// BN_bn2mpi serialises the value of |in| to |out|, using a format that consists +// of the number's length in bytes represented as a 4-byte big-endian number, +// and the number itself in big-endian format, where the most significant bit +// signals a negative number. (The representation of numbers with the MSB set is +// prefixed with null byte). |out| must have sufficient space available; to +// find the needed amount of space, call the function with |out| set to NULL. OPENSSL_EXPORT size_t BN_bn2mpi(const BIGNUM *in, uint8_t *out); -/* BN_mpi2bn parses |len| bytes from |in| and returns the resulting value. The - * bytes at |in| are expected to be in the format emitted by |BN_bn2mpi|. - * - * If |out| is NULL then a fresh |BIGNUM| is allocated and returned, otherwise - * |out| is reused and returned. On error, NULL is returned and the error queue - * is updated. */ +// BN_mpi2bn parses |len| bytes from |in| and returns the resulting value. The +// bytes at |in| are expected to be in the format emitted by |BN_bn2mpi|. +// +// If |out| is NULL then a fresh |BIGNUM| is allocated and returned, otherwise +// |out| is reused and returned. On error, NULL is returned and the error queue +// is updated. OPENSSL_EXPORT BIGNUM *BN_mpi2bn(const uint8_t *in, size_t len, BIGNUM *out); -/* BN_mod_exp_mont_word is like |BN_mod_exp_mont| except that the base |a| is - * given as a |BN_ULONG| instead of a |BIGNUM *|. It returns one on success - * or zero otherwise. */ +// BN_mod_exp_mont_word is like |BN_mod_exp_mont| except that the base |a| is +// given as a |BN_ULONG| instead of a |BIGNUM *|. It returns one on success +// or zero otherwise. OPENSSL_EXPORT int BN_mod_exp_mont_word(BIGNUM *r, BN_ULONG a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); -/* BN_mod_exp2_mont calculates (a1^p1) * (a2^p2) mod m. It returns 1 on success - * or zero otherwise. */ +// BN_mod_exp2_mont calculates (a1^p1) * (a2^p2) mod m. It returns 1 on success +// or zero otherwise. OPENSSL_EXPORT int BN_mod_exp2_mont(BIGNUM *r, const BIGNUM *a1, const BIGNUM *p1, const BIGNUM *a2, const BIGNUM *p2, const BIGNUM *m, BN_CTX *ctx, const BN_MONT_CTX *mont); -/* Private functions */ +// Private functions struct bignum_st { BN_ULONG *d; /* Pointer to an array of 'BN_BITS2' bit chunks in little-endian order. */ - int top; /* Index of last used element in |d|, plus one. */ - int dmax; /* Size of |d|, in words. */ - int neg; /* one if the number is negative */ - int flags; /* bitmask of BN_FLG_* values */ + int top; // Index of last used element in |d|, plus one. + int dmax; // Size of |d|, in words. + int neg; // one if the number is negative + int flags; // bitmask of BN_FLG_* values }; struct bn_mont_ctx_st { - BIGNUM RR; /* used to convert to montgomery form */ - BIGNUM N; /* The modulus */ - BN_ULONG n0[2]; /* least significant words of (R*Ri-1)/N */ + BIGNUM RR; // used to convert to montgomery form + BIGNUM N; // The modulus + BN_ULONG n0[2]; // least significant words of (R*Ri-1)/N }; OPENSSL_EXPORT unsigned BN_num_bits_word(BN_ULONG l); #define BN_FLG_MALLOCED 0x01 #define BN_FLG_STATIC_DATA 0x02 -/* |BN_FLG_CONSTTIME| has been removed and intentionally omitted so code relying - * on it will not compile. Consumers outside BoringSSL should use the - * higher-level cryptographic algorithms exposed by other modules. Consumers - * within the library should call the appropriate timing-sensitive algorithm - * directly. */ +// |BN_FLG_CONSTTIME| has been removed and intentionally omitted so code relying +// on it will not compile. Consumers outside BoringSSL should use the +// higher-level cryptographic algorithms exposed by other modules. Consumers +// within the library should call the appropriate timing-sensitive algorithm +// directly. #if defined(__cplusplus) -} /* extern C */ +} // extern C +#if !defined(BORINGSSL_NO_CXX) extern "C++" { namespace bssl { @@ -942,9 +932,22 @@ BORINGSSL_MAKE_DELETER(BIGNUM, BN_free) BORINGSSL_MAKE_DELETER(BN_CTX, BN_CTX_free) BORINGSSL_MAKE_DELETER(BN_MONT_CTX, BN_MONT_CTX_free) +class BN_CTXScope { + public: + BN_CTXScope(BN_CTX *ctx) : ctx_(ctx) { BN_CTX_start(ctx_); } + ~BN_CTXScope() { BN_CTX_end(ctx_); } + + private: + BN_CTX *ctx_; + + BN_CTXScope(BN_CTXScope &) = delete; + BN_CTXScope &operator=(BN_CTXScope &) = delete; +}; + } // namespace bssl -} /* extern C++ */ +} // extern C++ +#endif #endif @@ -967,5 +970,6 @@ BORINGSSL_MAKE_DELETER(BN_MONT_CTX, BN_MONT_CTX_free) #define BN_R_TOO_MANY_TEMPORARY_VARIABLES 116 #define BN_R_BAD_ENCODING 117 #define BN_R_ENCODE_ERROR 118 +#define BN_R_INVALID_INPUT 119 -#endif /* OPENSSL_HEADER_BN_H */ +#endif // OPENSSL_HEADER_BN_H diff --git a/Sources/BoringSSL/include/openssl/buf.h b/Sources/BoringSSL/include/openssl/buf.h index 30f3af790..3f961b87e 100644 --- a/Sources/BoringSSL/include/openssl/buf.h +++ b/Sources/BoringSSL/include/openssl/buf.h @@ -64,59 +64,63 @@ extern "C" { #endif -/* Memory and string functions, see also mem.h. */ +// Memory and string functions, see also mem.h. -/* buf_mem_st (aka |BUF_MEM|) is a generic buffer object used by OpenSSL. */ +// buf_mem_st (aka |BUF_MEM|) is a generic buffer object used by OpenSSL. struct buf_mem_st { - size_t length; /* current number of bytes */ + size_t length; // current number of bytes char *data; - size_t max; /* size of buffer */ + size_t max; // size of buffer }; -/* BUF_MEM_new creates a new BUF_MEM which has no allocated data buffer. */ +// BUF_MEM_new creates a new BUF_MEM which has no allocated data buffer. OPENSSL_EXPORT BUF_MEM *BUF_MEM_new(void); -/* BUF_MEM_free frees |buf->data| if needed and then frees |buf| itself. */ +// BUF_MEM_free frees |buf->data| if needed and then frees |buf| itself. OPENSSL_EXPORT void BUF_MEM_free(BUF_MEM *buf); -/* BUF_MEM_reserve ensures |buf| has capacity |cap| and allocates memory if - * needed. It returns one on success and zero on error. */ +// BUF_MEM_reserve ensures |buf| has capacity |cap| and allocates memory if +// needed. It returns one on success and zero on error. OPENSSL_EXPORT int BUF_MEM_reserve(BUF_MEM *buf, size_t cap); -/* BUF_MEM_grow ensures that |buf| has length |len| and allocates memory if - * needed. If the length of |buf| increased, the new bytes are filled with - * zeros. It returns the length of |buf|, or zero if there's an error. */ +// BUF_MEM_grow ensures that |buf| has length |len| and allocates memory if +// needed. If the length of |buf| increased, the new bytes are filled with +// zeros. It returns the length of |buf|, or zero if there's an error. OPENSSL_EXPORT size_t BUF_MEM_grow(BUF_MEM *buf, size_t len); -/* BUF_MEM_grow_clean acts the same as |BUF_MEM_grow|, but clears the previous - * contents of memory if reallocing. */ -OPENSSL_EXPORT size_t BUF_MEM_grow_clean(BUF_MEM *str, size_t len); +// BUF_MEM_grow_clean calls |BUF_MEM_grow|. BoringSSL always zeros memory +// allocated memory on free. +OPENSSL_EXPORT size_t BUF_MEM_grow_clean(BUF_MEM *buf, size_t len); -/* BUF_strdup returns an allocated, duplicate of |str|. */ +// BUF_MEM_append appends |in| to |buf|. It returns one on success and zero on +// error. +OPENSSL_EXPORT int BUF_MEM_append(BUF_MEM *buf, const void *in, size_t len); + +// BUF_strdup returns an allocated, duplicate of |str|. OPENSSL_EXPORT char *BUF_strdup(const char *str); -/* BUF_strnlen returns the number of characters in |str|, excluding the NUL - * byte, but at most |max_len|. This function never reads more than |max_len| - * bytes from |str|. */ +// BUF_strnlen returns the number of characters in |str|, excluding the NUL +// byte, but at most |max_len|. This function never reads more than |max_len| +// bytes from |str|. OPENSSL_EXPORT size_t BUF_strnlen(const char *str, size_t max_len); -/* BUF_strndup returns an allocated, duplicate of |str|, which is, at most, - * |size| bytes. The result is always NUL terminated. */ +// BUF_strndup returns an allocated, duplicate of |str|, which is, at most, +// |size| bytes. The result is always NUL terminated. OPENSSL_EXPORT char *BUF_strndup(const char *str, size_t size); -/* BUF_memdup returns an allocated, duplicate of |size| bytes from |data|. */ +// BUF_memdup returns an allocated, duplicate of |size| bytes from |data|. OPENSSL_EXPORT void *BUF_memdup(const void *data, size_t size); -/* BUF_strlcpy acts like strlcpy(3). */ +// BUF_strlcpy acts like strlcpy(3). OPENSSL_EXPORT size_t BUF_strlcpy(char *dst, const char *src, size_t dst_size); -/* BUF_strlcat acts like strlcat(3). */ -OPENSSL_EXPORT size_t BUF_strlcat(char *dst, const char *src, size_t size); +// BUF_strlcat acts like strlcat(3). +OPENSSL_EXPORT size_t BUF_strlcat(char *dst, const char *src, size_t dst_size); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -126,8 +130,8 @@ BORINGSSL_MAKE_DELETER(BUF_MEM, BUF_MEM_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_BUFFER_H */ +#endif // OPENSSL_HEADER_BUFFER_H diff --git a/Sources/BoringSSL/include/openssl/bytestring.h b/Sources/BoringSSL/include/openssl/bytestring.h index bab268b5c..309c6a357 100644 --- a/Sources/BoringSSL/include/openssl/bytestring.h +++ b/Sources/BoringSSL/include/openssl/bytestring.h @@ -17,275 +17,290 @@ #include +#include + #if defined(__cplusplus) extern "C" { #endif -/* Bytestrings are used for parsing and building TLS and ASN.1 messages. - * - * A "CBS" (CRYPTO ByteString) represents a string of bytes in memory and - * provides utility functions for safely parsing length-prefixed structures - * like TLS and ASN.1 from it. - * - * A "CBB" (CRYPTO ByteBuilder) is a memory buffer that grows as needed and - * provides utility functions for building length-prefixed messages. */ +// Bytestrings are used for parsing and building TLS and ASN.1 messages. +// +// A "CBS" (CRYPTO ByteString) represents a string of bytes in memory and +// provides utility functions for safely parsing length-prefixed structures +// like TLS and ASN.1 from it. +// +// A "CBB" (CRYPTO ByteBuilder) is a memory buffer that grows as needed and +// provides utility functions for building length-prefixed messages. -/* CRYPTO ByteString */ +// CRYPTO ByteString struct cbs_st { const uint8_t *data; size_t len; + +#if !defined(BORINGSSL_NO_CXX) + // Allow implicit conversions to and from bssl::Span. + cbs_st(bssl::Span span) + : data(span.data()), len(span.size()) {} + operator bssl::Span() const { + return bssl::MakeConstSpan(data, len); + } + + // Defining any constructors requires we explicitly default the others. + cbs_st() = default; + cbs_st(const cbs_st &) = default; +#endif }; -/* CBS_init sets |cbs| to point to |data|. It does not take ownership of - * |data|. */ +// CBS_init sets |cbs| to point to |data|. It does not take ownership of +// |data|. OPENSSL_EXPORT void CBS_init(CBS *cbs, const uint8_t *data, size_t len); -/* CBS_skip advances |cbs| by |len| bytes. It returns one on success and zero - * otherwise. */ +// CBS_skip advances |cbs| by |len| bytes. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CBS_skip(CBS *cbs, size_t len); -/* CBS_data returns a pointer to the contents of |cbs|. */ +// CBS_data returns a pointer to the contents of |cbs|. OPENSSL_EXPORT const uint8_t *CBS_data(const CBS *cbs); -/* CBS_len returns the number of bytes remaining in |cbs|. */ +// CBS_len returns the number of bytes remaining in |cbs|. OPENSSL_EXPORT size_t CBS_len(const CBS *cbs); -/* CBS_stow copies the current contents of |cbs| into |*out_ptr| and - * |*out_len|. If |*out_ptr| is not NULL, the contents are freed with - * OPENSSL_free. It returns one on success and zero on allocation failure. On - * success, |*out_ptr| should be freed with OPENSSL_free. If |cbs| is empty, - * |*out_ptr| will be NULL. */ +// CBS_stow copies the current contents of |cbs| into |*out_ptr| and +// |*out_len|. If |*out_ptr| is not NULL, the contents are freed with +// OPENSSL_free. It returns one on success and zero on allocation failure. On +// success, |*out_ptr| should be freed with OPENSSL_free. If |cbs| is empty, +// |*out_ptr| will be NULL. OPENSSL_EXPORT int CBS_stow(const CBS *cbs, uint8_t **out_ptr, size_t *out_len); -/* CBS_strdup copies the current contents of |cbs| into |*out_ptr| as a - * NUL-terminated C string. If |*out_ptr| is not NULL, the contents are freed - * with OPENSSL_free. It returns one on success and zero on allocation - * failure. On success, |*out_ptr| should be freed with OPENSSL_free. - * - * NOTE: If |cbs| contains NUL bytes, the string will be truncated. Call - * |CBS_contains_zero_byte(cbs)| to check for NUL bytes. */ +// CBS_strdup copies the current contents of |cbs| into |*out_ptr| as a +// NUL-terminated C string. If |*out_ptr| is not NULL, the contents are freed +// with OPENSSL_free. It returns one on success and zero on allocation +// failure. On success, |*out_ptr| should be freed with OPENSSL_free. +// +// NOTE: If |cbs| contains NUL bytes, the string will be truncated. Call +// |CBS_contains_zero_byte(cbs)| to check for NUL bytes. OPENSSL_EXPORT int CBS_strdup(const CBS *cbs, char **out_ptr); -/* CBS_contains_zero_byte returns one if the current contents of |cbs| contains - * a NUL byte and zero otherwise. */ +// CBS_contains_zero_byte returns one if the current contents of |cbs| contains +// a NUL byte and zero otherwise. OPENSSL_EXPORT int CBS_contains_zero_byte(const CBS *cbs); -/* CBS_mem_equal compares the current contents of |cbs| with the |len| bytes - * starting at |data|. If they're equal, it returns one, otherwise zero. If the - * lengths match, it uses a constant-time comparison. */ +// CBS_mem_equal compares the current contents of |cbs| with the |len| bytes +// starting at |data|. If they're equal, it returns one, otherwise zero. If the +// lengths match, it uses a constant-time comparison. OPENSSL_EXPORT int CBS_mem_equal(const CBS *cbs, const uint8_t *data, size_t len); -/* CBS_get_u8 sets |*out| to the next uint8_t from |cbs| and advances |cbs|. It - * returns one on success and zero on error. */ +// CBS_get_u8 sets |*out| to the next uint8_t from |cbs| and advances |cbs|. It +// returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u8(CBS *cbs, uint8_t *out); -/* CBS_get_u16 sets |*out| to the next, big-endian uint16_t from |cbs| and - * advances |cbs|. It returns one on success and zero on error. */ +// CBS_get_u16 sets |*out| to the next, big-endian uint16_t from |cbs| and +// advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u16(CBS *cbs, uint16_t *out); -/* CBS_get_u24 sets |*out| to the next, big-endian 24-bit value from |cbs| and - * advances |cbs|. It returns one on success and zero on error. */ +// CBS_get_u24 sets |*out| to the next, big-endian 24-bit value from |cbs| and +// advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u24(CBS *cbs, uint32_t *out); -/* CBS_get_u32 sets |*out| to the next, big-endian uint32_t value from |cbs| - * and advances |cbs|. It returns one on success and zero on error. */ +// CBS_get_u32 sets |*out| to the next, big-endian uint32_t value from |cbs| +// and advances |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u32(CBS *cbs, uint32_t *out); -/* CBS_get_last_u8 sets |*out| to the last uint8_t from |cbs| and shortens - * |cbs|. It returns one on success and zero on error. */ +// CBS_get_last_u8 sets |*out| to the last uint8_t from |cbs| and shortens +// |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_last_u8(CBS *cbs, uint8_t *out); -/* CBS_get_bytes sets |*out| to the next |len| bytes from |cbs| and advances - * |cbs|. It returns one on success and zero on error. */ +// CBS_get_bytes sets |*out| to the next |len| bytes from |cbs| and advances +// |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_bytes(CBS *cbs, CBS *out, size_t len); -/* CBS_copy_bytes copies the next |len| bytes from |cbs| to |out| and advances - * |cbs|. It returns one on success and zero on error. */ +// CBS_copy_bytes copies the next |len| bytes from |cbs| to |out| and advances +// |cbs|. It returns one on success and zero on error. OPENSSL_EXPORT int CBS_copy_bytes(CBS *cbs, uint8_t *out, size_t len); -/* CBS_get_u8_length_prefixed sets |*out| to the contents of an 8-bit, - * length-prefixed value from |cbs| and advances |cbs| over it. It returns one - * on success and zero on error. */ +// CBS_get_u8_length_prefixed sets |*out| to the contents of an 8-bit, +// length-prefixed value from |cbs| and advances |cbs| over it. It returns one +// on success and zero on error. OPENSSL_EXPORT int CBS_get_u8_length_prefixed(CBS *cbs, CBS *out); -/* CBS_get_u16_length_prefixed sets |*out| to the contents of a 16-bit, - * big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It - * returns one on success and zero on error. */ +// CBS_get_u16_length_prefixed sets |*out| to the contents of a 16-bit, +// big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It +// returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u16_length_prefixed(CBS *cbs, CBS *out); -/* CBS_get_u24_length_prefixed sets |*out| to the contents of a 24-bit, - * big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It - * returns one on success and zero on error. */ +// CBS_get_u24_length_prefixed sets |*out| to the contents of a 24-bit, +// big-endian, length-prefixed value from |cbs| and advances |cbs| over it. It +// returns one on success and zero on error. OPENSSL_EXPORT int CBS_get_u24_length_prefixed(CBS *cbs, CBS *out); -/* Parsing ASN.1 */ - -/* The following values are tag numbers for UNIVERSAL elements. */ -#define CBS_ASN1_BOOLEAN 0x1 -#define CBS_ASN1_INTEGER 0x2 -#define CBS_ASN1_BITSTRING 0x3 -#define CBS_ASN1_OCTETSTRING 0x4 -#define CBS_ASN1_NULL 0x5 -#define CBS_ASN1_OBJECT 0x6 -#define CBS_ASN1_ENUMERATED 0xa -#define CBS_ASN1_UTF8STRING 0xc -#define CBS_ASN1_SEQUENCE (0x10 | CBS_ASN1_CONSTRUCTED) -#define CBS_ASN1_SET (0x11 | CBS_ASN1_CONSTRUCTED) -#define CBS_ASN1_NUMERICSTRING 0x12 -#define CBS_ASN1_PRINTABLESTRING 0x13 -#define CBS_ASN1_T16STRING 0x14 -#define CBS_ASN1_VIDEOTEXSTRING 0x15 -#define CBS_ASN1_IA5STRING 0x16 -#define CBS_ASN1_UTCTIME 0x17 -#define CBS_ASN1_GENERALIZEDTIME 0x18 -#define CBS_ASN1_GRAPHICSTRING 0x19 -#define CBS_ASN1_VISIBLESTRING 0x1a -#define CBS_ASN1_GENERALSTRING 0x1b -#define CBS_ASN1_UNIVERSALSTRING 0x1c -#define CBS_ASN1_BMPSTRING 0x1e - -/* CBS_ASN1_CONSTRUCTED may be ORed into a tag to toggle the constructed - * bit. |CBS| and |CBB| APIs consider the constructed bit to be part of the - * tag. */ -#define CBS_ASN1_CONSTRUCTED 0x20 - -/* The following values specify the constructed bit or tag class and may be ORed - * into a tag number to produce the final tag. If none is used, the tag will be - * UNIVERSAL. - * - * Note that although they currently match the DER serialization, consumers must - * use these bits rather than make assumptions about the representation. This is - * to allow for tag numbers beyond 31 in the future. */ -#define CBS_ASN1_APPLICATION 0x40 -#define CBS_ASN1_CONTEXT_SPECIFIC 0x80 -#define CBS_ASN1_PRIVATE 0xc0 - -/* CBS_ASN1_CLASS_MASK may be ANDed with a tag to query its class. */ -#define CBS_ASN1_CLASS_MASK 0xc0 - -/* CBS_ASN1_TAG_NUMBER_MASK may be ANDed with a tag to query its number. */ -#define CBS_ASN1_TAG_NUMBER_MASK 0x1f - -/* CBS_get_asn1 sets |*out| to the contents of DER-encoded, ASN.1 element (not - * including tag and length bytes) and advances |cbs| over it. The ASN.1 - * element must match |tag_value|. It returns one on success and zero - * on error. - * - * Tag numbers greater than 30 are not supported (i.e. short form only). */ +// Parsing ASN.1 + +// The following values are tag numbers for UNIVERSAL elements. +#define CBS_ASN1_BOOLEAN 0x1u +#define CBS_ASN1_INTEGER 0x2u +#define CBS_ASN1_BITSTRING 0x3u +#define CBS_ASN1_OCTETSTRING 0x4u +#define CBS_ASN1_NULL 0x5u +#define CBS_ASN1_OBJECT 0x6u +#define CBS_ASN1_ENUMERATED 0xau +#define CBS_ASN1_UTF8STRING 0xcu +#define CBS_ASN1_SEQUENCE (0x10u | CBS_ASN1_CONSTRUCTED) +#define CBS_ASN1_SET (0x11u | CBS_ASN1_CONSTRUCTED) +#define CBS_ASN1_NUMERICSTRING 0x12u +#define CBS_ASN1_PRINTABLESTRING 0x13u +#define CBS_ASN1_T61STRING 0x14u +#define CBS_ASN1_VIDEOTEXSTRING 0x15u +#define CBS_ASN1_IA5STRING 0x16u +#define CBS_ASN1_UTCTIME 0x17u +#define CBS_ASN1_GENERALIZEDTIME 0x18u +#define CBS_ASN1_GRAPHICSTRING 0x19u +#define CBS_ASN1_VISIBLESTRING 0x1au +#define CBS_ASN1_GENERALSTRING 0x1bu +#define CBS_ASN1_UNIVERSALSTRING 0x1cu +#define CBS_ASN1_BMPSTRING 0x1eu + +// CBS_ASN1_CONSTRUCTED may be ORed into a tag to toggle the constructed +// bit. |CBS| and |CBB| APIs consider the constructed bit to be part of the +// tag. +#define CBS_ASN1_CONSTRUCTED 0x20u + +// The following values specify the constructed bit or tag class and may be ORed +// into a tag number to produce the final tag. If none is used, the tag will be +// UNIVERSAL. +// +// Note that although they currently match the DER serialization, consumers must +// use these bits rather than make assumptions about the representation. This is +// to allow for tag numbers beyond 31 in the future. +#define CBS_ASN1_APPLICATION 0x40u +#define CBS_ASN1_CONTEXT_SPECIFIC 0x80u +#define CBS_ASN1_PRIVATE 0xc0u + +// CBS_ASN1_CLASS_MASK may be ANDed with a tag to query its class. +#define CBS_ASN1_CLASS_MASK 0xc0u + +// CBS_ASN1_TAG_NUMBER_MASK may be ANDed with a tag to query its number. +#define CBS_ASN1_TAG_NUMBER_MASK 0x1fu + +// CBS_get_asn1 sets |*out| to the contents of DER-encoded, ASN.1 element (not +// including tag and length bytes) and advances |cbs| over it. The ASN.1 +// element must match |tag_value|. It returns one on success and zero +// on error. +// +// Tag numbers greater than 30 are not supported (i.e. short form only). OPENSSL_EXPORT int CBS_get_asn1(CBS *cbs, CBS *out, unsigned tag_value); -/* CBS_get_asn1_element acts like |CBS_get_asn1| but |out| will include the - * ASN.1 header bytes too. */ +// CBS_get_asn1_element acts like |CBS_get_asn1| but |out| will include the +// ASN.1 header bytes too. OPENSSL_EXPORT int CBS_get_asn1_element(CBS *cbs, CBS *out, unsigned tag_value); -/* CBS_peek_asn1_tag looks ahead at the next ASN.1 tag and returns one - * if the next ASN.1 element on |cbs| would have tag |tag_value|. If - * |cbs| is empty or the tag does not match, it returns zero. Note: if - * it returns one, CBS_get_asn1 may still fail if the rest of the - * element is malformed. */ +// CBS_peek_asn1_tag looks ahead at the next ASN.1 tag and returns one +// if the next ASN.1 element on |cbs| would have tag |tag_value|. If +// |cbs| is empty or the tag does not match, it returns zero. Note: if +// it returns one, CBS_get_asn1 may still fail if the rest of the +// element is malformed. OPENSSL_EXPORT int CBS_peek_asn1_tag(const CBS *cbs, unsigned tag_value); -/* CBS_get_any_asn1 sets |*out| to contain the next ASN.1 element from |*cbs| - * (not including tag and length bytes), sets |*out_tag| to the tag number, and - * advances |*cbs|. It returns one on success and zero on error. Either of |out| - * and |out_tag| may be NULL to ignore the value. - * - * Tag numbers greater than 30 are not supported (i.e. short form only). */ +// CBS_get_any_asn1 sets |*out| to contain the next ASN.1 element from |*cbs| +// (not including tag and length bytes), sets |*out_tag| to the tag number, and +// advances |*cbs|. It returns one on success and zero on error. Either of |out| +// and |out_tag| may be NULL to ignore the value. +// +// Tag numbers greater than 30 are not supported (i.e. short form only). OPENSSL_EXPORT int CBS_get_any_asn1(CBS *cbs, CBS *out, unsigned *out_tag); -/* CBS_get_any_asn1_element sets |*out| to contain the next ASN.1 element from - * |*cbs| (including header bytes) and advances |*cbs|. It sets |*out_tag| to - * the tag number and |*out_header_len| to the length of the ASN.1 header. Each - * of |out|, |out_tag|, and |out_header_len| may be NULL to ignore the value. - * - * Tag numbers greater than 30 are not supported (i.e. short form only). */ +// CBS_get_any_asn1_element sets |*out| to contain the next ASN.1 element from +// |*cbs| (including header bytes) and advances |*cbs|. It sets |*out_tag| to +// the tag number and |*out_header_len| to the length of the ASN.1 header. Each +// of |out|, |out_tag|, and |out_header_len| may be NULL to ignore the value. +// +// Tag numbers greater than 30 are not supported (i.e. short form only). OPENSSL_EXPORT int CBS_get_any_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, size_t *out_header_len); -/* CBS_get_any_ber_asn1_element acts the same as |CBS_get_any_asn1_element| but - * also allows indefinite-length elements to be returned. In that case, - * |*out_header_len| and |CBS_len(out)| will both be two as only the header is - * returned, otherwise it behaves the same as the previous function. */ +// CBS_get_any_ber_asn1_element acts the same as |CBS_get_any_asn1_element| but +// also allows indefinite-length elements to be returned. In that case, +// |*out_header_len| and |CBS_len(out)| will both be two as only the header is +// returned, otherwise it behaves the same as the previous function. OPENSSL_EXPORT int CBS_get_any_ber_asn1_element(CBS *cbs, CBS *out, unsigned *out_tag, size_t *out_header_len); -/* CBS_get_asn1_uint64 gets an ASN.1 INTEGER from |cbs| using |CBS_get_asn1| - * and sets |*out| to its value. It returns one on success and zero on error, - * where error includes the integer being negative, or too large to represent - * in 64 bits. */ +// CBS_get_asn1_uint64 gets an ASN.1 INTEGER from |cbs| using |CBS_get_asn1| +// and sets |*out| to its value. It returns one on success and zero on error, +// where error includes the integer being negative, or too large to represent +// in 64 bits. OPENSSL_EXPORT int CBS_get_asn1_uint64(CBS *cbs, uint64_t *out); -/* CBS_get_optional_asn1 gets an optional explicitly-tagged element from |cbs| - * tagged with |tag| and sets |*out| to its contents. If present and if - * |out_present| is not NULL, it sets |*out_present| to one, otherwise zero. It - * returns one on success, whether or not the element was present, and zero on - * decode failure. */ +// CBS_get_optional_asn1 gets an optional explicitly-tagged element from |cbs| +// tagged with |tag| and sets |*out| to its contents. If present and if +// |out_present| is not NULL, it sets |*out_present| to one, otherwise zero. It +// returns one on success, whether or not the element was present, and zero on +// decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1(CBS *cbs, CBS *out, int *out_present, unsigned tag); -/* CBS_get_optional_asn1_octet_string gets an optional - * explicitly-tagged OCTET STRING from |cbs|. If present, it sets - * |*out| to the string and |*out_present| to one. Otherwise, it sets - * |*out| to empty and |*out_present| to zero. |out_present| may be - * NULL. It returns one on success, whether or not the element was - * present, and zero on decode failure. */ +// CBS_get_optional_asn1_octet_string gets an optional +// explicitly-tagged OCTET STRING from |cbs|. If present, it sets +// |*out| to the string and |*out_present| to one. Otherwise, it sets +// |*out| to empty and |*out_present| to zero. |out_present| may be +// NULL. It returns one on success, whether or not the element was +// present, and zero on decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1_octet_string(CBS *cbs, CBS *out, int *out_present, unsigned tag); -/* CBS_get_optional_asn1_uint64 gets an optional explicitly-tagged - * INTEGER from |cbs|. If present, it sets |*out| to the - * value. Otherwise, it sets |*out| to |default_value|. It returns one - * on success, whether or not the element was present, and zero on - * decode failure. */ +// CBS_get_optional_asn1_uint64 gets an optional explicitly-tagged +// INTEGER from |cbs|. If present, it sets |*out| to the +// value. Otherwise, it sets |*out| to |default_value|. It returns one +// on success, whether or not the element was present, and zero on +// decode failure. OPENSSL_EXPORT int CBS_get_optional_asn1_uint64(CBS *cbs, uint64_t *out, unsigned tag, uint64_t default_value); -/* CBS_get_optional_asn1_bool gets an optional, explicitly-tagged BOOLEAN from - * |cbs|. If present, it sets |*out| to either zero or one, based on the - * boolean. Otherwise, it sets |*out| to |default_value|. It returns one on - * success, whether or not the element was present, and zero on decode - * failure. */ +// CBS_get_optional_asn1_bool gets an optional, explicitly-tagged BOOLEAN from +// |cbs|. If present, it sets |*out| to either zero or one, based on the +// boolean. Otherwise, it sets |*out| to |default_value|. It returns one on +// success, whether or not the element was present, and zero on decode +// failure. OPENSSL_EXPORT int CBS_get_optional_asn1_bool(CBS *cbs, int *out, unsigned tag, int default_value); -/* CBS_is_valid_asn1_bitstring returns one if |cbs| is a valid ASN.1 BIT STRING - * and zero otherwise. */ +// CBS_is_valid_asn1_bitstring returns one if |cbs| is a valid ASN.1 BIT STRING +// and zero otherwise. OPENSSL_EXPORT int CBS_is_valid_asn1_bitstring(const CBS *cbs); -/* CBS_asn1_bitstring_has_bit returns one if |cbs| is a valid ASN.1 BIT STRING - * and the specified bit is present and set. Otherwise, it returns zero. |bit| - * is indexed starting from zero. */ +// CBS_asn1_bitstring_has_bit returns one if |cbs| is a valid ASN.1 BIT STRING +// and the specified bit is present and set. Otherwise, it returns zero. |bit| +// is indexed starting from zero. OPENSSL_EXPORT int CBS_asn1_bitstring_has_bit(const CBS *cbs, unsigned bit); -/* CRYPTO ByteBuilder. - * - * |CBB| objects allow one to build length-prefixed serialisations. A |CBB| - * object is associated with a buffer and new buffers are created with - * |CBB_init|. Several |CBB| objects can point at the same buffer when a - * length-prefix is pending, however only a single |CBB| can be 'current' at - * any one time. For example, if one calls |CBB_add_u8_length_prefixed| then - * the new |CBB| points at the same buffer as the original. But if the original - * |CBB| is used then the length prefix is written out and the new |CBB| must - * not be used again. - * - * If one needs to force a length prefix to be written out because a |CBB| is - * going out of scope, use |CBB_flush|. If an operation on a |CBB| fails, it is - * in an undefined state and must not be used except to call |CBB_cleanup|. */ +// CRYPTO ByteBuilder. +// +// |CBB| objects allow one to build length-prefixed serialisations. A |CBB| +// object is associated with a buffer and new buffers are created with +// |CBB_init|. Several |CBB| objects can point at the same buffer when a +// length-prefix is pending, however only a single |CBB| can be 'current' at +// any one time. For example, if one calls |CBB_add_u8_length_prefixed| then +// the new |CBB| points at the same buffer as the original. But if the original +// |CBB| is used then the length prefix is written out and the new |CBB| must +// not be used again. +// +// If one needs to force a length prefix to be written out because a |CBB| is +// going out of scope, use |CBB_flush|. If an operation on a |CBB| fails, it is +// in an undefined state and must not be used except to call |CBB_cleanup|. struct cbb_buffer_st { uint8_t *buf; - size_t len; /* The number of valid bytes. */ - size_t cap; /* The size of buf. */ + size_t len; // The number of valid bytes. + size_t cap; // The size of buf. char can_resize; /* One iff |buf| is owned by this object. If not then |buf| cannot be resized. */ char error; /* One iff there was an error writing to this CBB. All future @@ -294,147 +309,158 @@ struct cbb_buffer_st { struct cbb_st { struct cbb_buffer_st *base; - /* child points to a child CBB if a length-prefix is pending. */ + // child points to a child CBB if a length-prefix is pending. CBB *child; - /* offset is the number of bytes from the start of |base->buf| to this |CBB|'s - * pending length prefix. */ + // offset is the number of bytes from the start of |base->buf| to this |CBB|'s + // pending length prefix. size_t offset; - /* pending_len_len contains the number of bytes in this |CBB|'s pending - * length-prefix, or zero if no length-prefix is pending. */ + // pending_len_len contains the number of bytes in this |CBB|'s pending + // length-prefix, or zero if no length-prefix is pending. uint8_t pending_len_len; char pending_is_asn1; - /* is_top_level is true iff this is a top-level |CBB| (as opposed to a child - * |CBB|). Top-level objects are valid arguments for |CBB_finish|. */ + // is_top_level is true iff this is a top-level |CBB| (as opposed to a child + // |CBB|). Top-level objects are valid arguments for |CBB_finish|. char is_top_level; }; -/* CBB_zero sets an uninitialised |cbb| to the zero state. It must be - * initialised with |CBB_init| or |CBB_init_fixed| before use, but it is safe to - * call |CBB_cleanup| without a successful |CBB_init|. This may be used for more - * uniform cleanup of a |CBB|. */ +// CBB_zero sets an uninitialised |cbb| to the zero state. It must be +// initialised with |CBB_init| or |CBB_init_fixed| before use, but it is safe to +// call |CBB_cleanup| without a successful |CBB_init|. This may be used for more +// uniform cleanup of a |CBB|. OPENSSL_EXPORT void CBB_zero(CBB *cbb); -/* CBB_init initialises |cbb| with |initial_capacity|. Since a |CBB| grows as - * needed, the |initial_capacity| is just a hint. It returns one on success or - * zero on error. */ +// CBB_init initialises |cbb| with |initial_capacity|. Since a |CBB| grows as +// needed, the |initial_capacity| is just a hint. It returns one on success or +// zero on error. OPENSSL_EXPORT int CBB_init(CBB *cbb, size_t initial_capacity); -/* CBB_init_fixed initialises |cbb| to write to |len| bytes at |buf|. Since - * |buf| cannot grow, trying to write more than |len| bytes will cause CBB - * functions to fail. It returns one on success or zero on error. */ +// CBB_init_fixed initialises |cbb| to write to |len| bytes at |buf|. Since +// |buf| cannot grow, trying to write more than |len| bytes will cause CBB +// functions to fail. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_init_fixed(CBB *cbb, uint8_t *buf, size_t len); -/* CBB_cleanup frees all resources owned by |cbb| and other |CBB| objects - * writing to the same buffer. This should be used in an error case where a - * serialisation is abandoned. - * - * This function can only be called on a "top level" |CBB|, i.e. one initialised - * with |CBB_init| or |CBB_init_fixed|, or a |CBB| set to the zero state with - * |CBB_zero|. */ +// CBB_cleanup frees all resources owned by |cbb| and other |CBB| objects +// writing to the same buffer. This should be used in an error case where a +// serialisation is abandoned. +// +// This function can only be called on a "top level" |CBB|, i.e. one initialised +// with |CBB_init| or |CBB_init_fixed|, or a |CBB| set to the zero state with +// |CBB_zero|. OPENSSL_EXPORT void CBB_cleanup(CBB *cbb); -/* CBB_finish completes any pending length prefix and sets |*out_data| to a - * malloced buffer and |*out_len| to the length of that buffer. The caller - * takes ownership of the buffer and, unless the buffer was fixed with - * |CBB_init_fixed|, must call |OPENSSL_free| when done. - * - * It can only be called on a "top level" |CBB|, i.e. one initialised with - * |CBB_init| or |CBB_init_fixed|. It returns one on success and zero on - * error. */ +// CBB_finish completes any pending length prefix and sets |*out_data| to a +// malloced buffer and |*out_len| to the length of that buffer. The caller +// takes ownership of the buffer and, unless the buffer was fixed with +// |CBB_init_fixed|, must call |OPENSSL_free| when done. +// +// It can only be called on a "top level" |CBB|, i.e. one initialised with +// |CBB_init| or |CBB_init_fixed|. It returns one on success and zero on +// error. OPENSSL_EXPORT int CBB_finish(CBB *cbb, uint8_t **out_data, size_t *out_len); -/* CBB_flush causes any pending length prefixes to be written out and any child - * |CBB| objects of |cbb| to be invalidated. This allows |cbb| to continue to be - * used after the children go out of scope, e.g. when local |CBB| objects are - * added as children to a |CBB| that persists after a function returns. This - * function returns one on success or zero on error. */ +// CBB_flush causes any pending length prefixes to be written out and any child +// |CBB| objects of |cbb| to be invalidated. This allows |cbb| to continue to be +// used after the children go out of scope, e.g. when local |CBB| objects are +// added as children to a |CBB| that persists after a function returns. This +// function returns one on success or zero on error. OPENSSL_EXPORT int CBB_flush(CBB *cbb); -/* CBB_data returns a pointer to the bytes written to |cbb|. It does not flush - * |cbb|. The pointer is valid until the next operation to |cbb|. - * - * To avoid unfinalized length prefixes, it is a fatal error to call this on a - * CBB with any active children. */ +// CBB_data returns a pointer to the bytes written to |cbb|. It does not flush +// |cbb|. The pointer is valid until the next operation to |cbb|. +// +// To avoid unfinalized length prefixes, it is a fatal error to call this on a +// CBB with any active children. OPENSSL_EXPORT const uint8_t *CBB_data(const CBB *cbb); -/* CBB_len returns the number of bytes written to |cbb|. It does not flush - * |cbb|. - * - * To avoid unfinalized length prefixes, it is a fatal error to call this on a - * CBB with any active children. */ +// CBB_len returns the number of bytes written to |cbb|. It does not flush +// |cbb|. +// +// To avoid unfinalized length prefixes, it is a fatal error to call this on a +// CBB with any active children. OPENSSL_EXPORT size_t CBB_len(const CBB *cbb); -/* CBB_add_u8_length_prefixed sets |*out_contents| to a new child of |cbb|. The - * data written to |*out_contents| will be prefixed in |cbb| with an 8-bit - * length. It returns one on success or zero on error. */ +// CBB_add_u8_length_prefixed sets |*out_contents| to a new child of |cbb|. The +// data written to |*out_contents| will be prefixed in |cbb| with an 8-bit +// length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u8_length_prefixed(CBB *cbb, CBB *out_contents); -/* CBB_add_u16_length_prefixed sets |*out_contents| to a new child of |cbb|. - * The data written to |*out_contents| will be prefixed in |cbb| with a 16-bit, - * big-endian length. It returns one on success or zero on error. */ +// CBB_add_u16_length_prefixed sets |*out_contents| to a new child of |cbb|. +// The data written to |*out_contents| will be prefixed in |cbb| with a 16-bit, +// big-endian length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u16_length_prefixed(CBB *cbb, CBB *out_contents); -/* CBB_add_u24_length_prefixed sets |*out_contents| to a new child of |cbb|. - * The data written to |*out_contents| will be prefixed in |cbb| with a 24-bit, - * big-endian length. It returns one on success or zero on error. */ +// CBB_add_u24_length_prefixed sets |*out_contents| to a new child of |cbb|. +// The data written to |*out_contents| will be prefixed in |cbb| with a 24-bit, +// big-endian length. It returns one on success or zero on error. OPENSSL_EXPORT int CBB_add_u24_length_prefixed(CBB *cbb, CBB *out_contents); -/* CBB_add_asn1 sets |*out_contents| to a |CBB| into which the contents of an - * ASN.1 object can be written. The |tag| argument will be used as the tag for - * the object. Passing in |tag| number 31 will return in an error since only - * single octet identifiers are supported. It returns one on success or zero - * on error. */ +// CBB_add_asn1 sets |*out_contents| to a |CBB| into which the contents of an +// ASN.1 object can be written. The |tag| argument will be used as the tag for +// the object. Passing in |tag| number 31 will return in an error since only +// single octet identifiers are supported. It returns one on success or zero +// on error. OPENSSL_EXPORT int CBB_add_asn1(CBB *cbb, CBB *out_contents, unsigned tag); -/* CBB_add_bytes appends |len| bytes from |data| to |cbb|. It returns one on - * success and zero otherwise. */ +// CBB_add_bytes appends |len| bytes from |data| to |cbb|. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int CBB_add_bytes(CBB *cbb, const uint8_t *data, size_t len); -/* CBB_add_space appends |len| bytes to |cbb| and sets |*out_data| to point to - * the beginning of that space. The caller must then write |len| bytes of - * actual contents to |*out_data|. It returns one on success and zero - * otherwise. */ +// CBB_add_space appends |len| bytes to |cbb| and sets |*out_data| to point to +// the beginning of that space. The caller must then write |len| bytes of +// actual contents to |*out_data|. It returns one on success and zero +// otherwise. OPENSSL_EXPORT int CBB_add_space(CBB *cbb, uint8_t **out_data, size_t len); -/* CBB_reserve ensures |cbb| has room for |len| additional bytes and sets - * |*out_data| to point to the beginning of that space. It returns one on - * success and zero otherwise. The caller may write up to |len| bytes to - * |*out_data| and call |CBB_did_write| to complete the write. |*out_data| is - * valid until the next operation on |cbb| or an ancestor |CBB|. */ +// CBB_reserve ensures |cbb| has room for |len| additional bytes and sets +// |*out_data| to point to the beginning of that space. It returns one on +// success and zero otherwise. The caller may write up to |len| bytes to +// |*out_data| and call |CBB_did_write| to complete the write. |*out_data| is +// valid until the next operation on |cbb| or an ancestor |CBB|. OPENSSL_EXPORT int CBB_reserve(CBB *cbb, uint8_t **out_data, size_t len); -/* CBB_did_write advances |cbb| by |len| bytes, assuming the space has been - * written to by the caller. It returns one on success and zero on error. */ +// CBB_did_write advances |cbb| by |len| bytes, assuming the space has been +// written to by the caller. It returns one on success and zero on error. OPENSSL_EXPORT int CBB_did_write(CBB *cbb, size_t len); -/* CBB_add_u8 appends an 8-bit number from |value| to |cbb|. It returns one on - * success and zero otherwise. */ +// CBB_add_u8 appends an 8-bit number from |value| to |cbb|. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int CBB_add_u8(CBB *cbb, uint8_t value); -/* CBB_add_u16 appends a 16-bit, big-endian number from |value| to |cbb|. It - * returns one on success and zero otherwise. */ +// CBB_add_u16 appends a 16-bit, big-endian number from |value| to |cbb|. It +// returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u16(CBB *cbb, uint16_t value); -/* CBB_add_u24 appends a 24-bit, big-endian number from |value| to |cbb|. It - * returns one on success and zero otherwise. */ +// CBB_add_u24 appends a 24-bit, big-endian number from |value| to |cbb|. It +// returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u24(CBB *cbb, uint32_t value); -/* CBB_add_u32 appends a 32-bit, big-endian number from |value| to |cbb|. It - * returns one on success and zero otherwise. */ +// CBB_add_u32 appends a 32-bit, big-endian number from |value| to |cbb|. It +// returns one on success and zero otherwise. OPENSSL_EXPORT int CBB_add_u32(CBB *cbb, uint32_t value); -/* CBB_discard_child discards the current unflushed child of |cbb|. Neither the - * child's contents nor the length prefix will be included in the output. */ +// CBB_discard_child discards the current unflushed child of |cbb|. Neither the +// child's contents nor the length prefix will be included in the output. OPENSSL_EXPORT void CBB_discard_child(CBB *cbb); -/* CBB_add_asn1_uint64 writes an ASN.1 INTEGER into |cbb| using |CBB_add_asn1| - * and writes |value| in its contents. It returns one on success and zero on - * error. */ +// CBB_add_asn1_uint64 writes an ASN.1 INTEGER into |cbb| using |CBB_add_asn1| +// and writes |value| in its contents. It returns one on success and zero on +// error. OPENSSL_EXPORT int CBB_add_asn1_uint64(CBB *cbb, uint64_t value); +// CBB_add_asn1_oid_from_text decodes |len| bytes from |text| as an ASCII OID +// representation, e.g. "1.2.840.113554.4.1.72585", and writes the DER-encoded +// contents to |cbb|. It returns one on success and zero on malloc failure or if +// |text| was invalid. It does not include the OBJECT IDENTIFER framing, only +// the element's contents. +// +// This function considers OID strings with components which do not fit in a +// |uint32_t| to be invalid. +OPENSSL_EXPORT int CBB_add_asn1_oid_from_text(CBB *cbb, const char *text, + size_t len); + #if defined(__cplusplus) -} /* extern C */ +} // extern C #if !defined(BORINGSSL_NO_CXX) @@ -451,4 +477,4 @@ using ScopedCBB = internal::StackAllocated; #endif -#endif /* OPENSSL_HEADER_BYTESTRING_H */ +#endif // OPENSSL_HEADER_BYTESTRING_H diff --git a/Sources/BoringSSL/include/openssl/cast.h b/Sources/BoringSSL/include/openssl/cast.h index 802172394..2978a67e8 100644 --- a/Sources/BoringSSL/include/openssl/cast.h +++ b/Sources/BoringSSL/include/openssl/cast.h @@ -72,7 +72,7 @@ extern "C" { typedef struct cast_key_st { uint32_t data[32]; - int short_key; /* Use reduced rounds for short key */ + int short_key; // Use reduced rounds for short key } CAST_KEY; OPENSSL_EXPORT void CAST_set_key(CAST_KEY *key, size_t len, @@ -93,4 +93,4 @@ OPENSSL_EXPORT void CAST_cfb64_encrypt(const uint8_t *in, uint8_t *out, } #endif -#endif /* OPENSSL_HEADER_CAST_H */ +#endif // OPENSSL_HEADER_CAST_H diff --git a/Sources/BoringSSL/include/openssl/chacha.h b/Sources/BoringSSL/include/openssl/chacha.h index 3d035e69a..684fc5b0b 100644 --- a/Sources/BoringSSL/include/openssl/chacha.h +++ b/Sources/BoringSSL/include/openssl/chacha.h @@ -17,21 +17,25 @@ #include -#ifdef __cplusplus +#if defined(__cplusplus) extern "C" { #endif +// ChaCha20. +// +// ChaCha20 is a stream cipher. See https://tools.ietf.org/html/rfc7539. + -/* CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and - * nonce and writes the result to |out|. If |in| and |out| alias, they must be - * equal. The initial block counter is specified by |counter|. */ +// CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and +// nonce and writes the result to |out|. If |in| and |out| alias, they must be +// equal. The initial block counter is specified by |counter|. OPENSSL_EXPORT void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len, const uint8_t key[32], const uint8_t nonce[12], uint32_t counter); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CHACHA_H */ +#endif // OPENSSL_HEADER_CHACHA_H diff --git a/Sources/BoringSSL/include/openssl/cipher.h b/Sources/BoringSSL/include/openssl/cipher.h index 2ee74efcd..643bf040e 100644 --- a/Sources/BoringSSL/include/openssl/cipher.h +++ b/Sources/BoringSSL/include/openssl/cipher.h @@ -64,19 +64,20 @@ extern "C" { #endif -/* Ciphers. */ +// Ciphers. -/* Cipher primitives. - * - * The following functions return |EVP_CIPHER| objects that implement the named - * cipher algorithm. */ +// Cipher primitives. +// +// The following functions return |EVP_CIPHER| objects that implement the named +// cipher algorithm. OPENSSL_EXPORT const EVP_CIPHER *EVP_rc4(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede(void); +OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_des_ede3_cbc(void); @@ -91,242 +92,246 @@ OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_ctr(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_ofb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_xts(void); -/* EVP_enc_null returns a 'cipher' that passes plaintext through as - * ciphertext. */ +// EVP_enc_null returns a 'cipher' that passes plaintext through as +// ciphertext. OPENSSL_EXPORT const EVP_CIPHER *EVP_enc_null(void); -/* EVP_rc2_cbc returns a cipher that implements 128-bit RC2 in CBC mode. */ +// EVP_rc2_cbc returns a cipher that implements 128-bit RC2 in CBC mode. OPENSSL_EXPORT const EVP_CIPHER *EVP_rc2_cbc(void); -/* EVP_rc2_40_cbc returns a cipher that implements 40-bit RC2 in CBC mode. This - * is obviously very, very weak and is included only in order to read PKCS#12 - * files, which often encrypt the certificate chain using this cipher. It is - * deliberately not exported. */ +// EVP_rc2_40_cbc returns a cipher that implements 40-bit RC2 in CBC mode. This +// is obviously very, very weak and is included only in order to read PKCS#12 +// files, which often encrypt the certificate chain using this cipher. It is +// deliberately not exported. const EVP_CIPHER *EVP_rc2_40_cbc(void); -/* EVP_get_cipherbynid returns the cipher corresponding to the given NID, or - * NULL if no such cipher is known. */ +// EVP_get_cipherbynid returns the cipher corresponding to the given NID, or +// NULL if no such cipher is known. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbynid(int nid); -/* Cipher context allocation. - * - * An |EVP_CIPHER_CTX| represents the state of an encryption or decryption in - * progress. */ +// Cipher context allocation. +// +// An |EVP_CIPHER_CTX| represents the state of an encryption or decryption in +// progress. -/* EVP_CIPHER_CTX_init initialises an, already allocated, |EVP_CIPHER_CTX|. */ +// EVP_CIPHER_CTX_init initialises an, already allocated, |EVP_CIPHER_CTX|. OPENSSL_EXPORT void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_new allocates a fresh |EVP_CIPHER_CTX|, calls - * |EVP_CIPHER_CTX_init| and returns it, or NULL on allocation failure. */ +// EVP_CIPHER_CTX_new allocates a fresh |EVP_CIPHER_CTX|, calls +// |EVP_CIPHER_CTX_init| and returns it, or NULL on allocation failure. OPENSSL_EXPORT EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void); -/* EVP_CIPHER_CTX_cleanup frees any memory referenced by |ctx|. It returns - * one. */ +// EVP_CIPHER_CTX_cleanup frees any memory referenced by |ctx|. It returns +// one. OPENSSL_EXPORT int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_free calls |EVP_CIPHER_CTX_cleanup| on |ctx| and then frees - * |ctx| itself. */ +// EVP_CIPHER_CTX_free calls |EVP_CIPHER_CTX_cleanup| on |ctx| and then frees +// |ctx| itself. OPENSSL_EXPORT void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_copy sets |out| to be a duplicate of the current state of - * |in|. The |out| argument must have been previously initialised. */ +// EVP_CIPHER_CTX_copy sets |out| to be a duplicate of the current state of +// |in|. The |out| argument must have been previously initialised. OPENSSL_EXPORT int EVP_CIPHER_CTX_copy(EVP_CIPHER_CTX *out, const EVP_CIPHER_CTX *in); +// EVP_CIPHER_CTX_reset calls |EVP_CIPHER_CTX_cleanup| followed by +// |EVP_CIPHER_CTX_init|. +OPENSSL_EXPORT void EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx); + -/* Cipher context configuration. */ +// Cipher context configuration. -/* EVP_CipherInit_ex configures |ctx| for a fresh encryption (or decryption, if - * |enc| is zero) operation using |cipher|. If |ctx| has been previously - * configured with a cipher then |cipher|, |key| and |iv| may be |NULL| and - * |enc| may be -1 to reuse the previous values. The operation will use |key| - * as the key and |iv| as the IV (if any). These should have the correct - * lengths given by |EVP_CIPHER_key_length| and |EVP_CIPHER_iv_length|. It - * returns one on success and zero on error. */ +// EVP_CipherInit_ex configures |ctx| for a fresh encryption (or decryption, if +// |enc| is zero) operation using |cipher|. If |ctx| has been previously +// configured with a cipher then |cipher|, |key| and |iv| may be |NULL| and +// |enc| may be -1 to reuse the previous values. The operation will use |key| +// as the key and |iv| as the IV (if any). These should have the correct +// lengths given by |EVP_CIPHER_key_length| and |EVP_CIPHER_iv_length|. It +// returns one on success and zero on error. OPENSSL_EXPORT int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *engine, const uint8_t *key, const uint8_t *iv, int enc); -/* EVP_EncryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to one. */ +// EVP_EncryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to one. OPENSSL_EXPORT int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv); -/* EVP_DecryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to zero. */ +// EVP_DecryptInit_ex calls |EVP_CipherInit_ex| with |enc| equal to zero. OPENSSL_EXPORT int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const uint8_t *key, const uint8_t *iv); -/* Cipher operations. */ +// Cipher operations. -/* EVP_EncryptUpdate encrypts |in_len| bytes from |in| to |out|. The number - * of output bytes may be up to |in_len| plus the block length minus one and - * |out| must have sufficient space. The number of bytes actually output is - * written to |*out_len|. It returns one on success and zero otherwise. */ +// EVP_EncryptUpdate encrypts |in_len| bytes from |in| to |out|. The number +// of output bytes may be up to |in_len| plus the block length minus one and +// |out| must have sufficient space. The number of bytes actually output is +// written to |*out_len|. It returns one on success and zero otherwise. OPENSSL_EXPORT int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); -/* EVP_EncryptFinal_ex writes at most a block of ciphertext to |out| and sets - * |*out_len| to the number of bytes written. If padding is enabled (the - * default) then standard padding is applied to create the final block. If - * padding is disabled (with |EVP_CIPHER_CTX_set_padding|) then any partial - * block remaining will cause an error. The function returns one on success and - * zero otherwise. */ +// EVP_EncryptFinal_ex writes at most a block of ciphertext to |out| and sets +// |*out_len| to the number of bytes written. If padding is enabled (the +// default) then standard padding is applied to create the final block. If +// padding is disabled (with |EVP_CIPHER_CTX_set_padding|) then any partial +// block remaining will cause an error. The function returns one on success and +// zero otherwise. OPENSSL_EXPORT int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); -/* EVP_DecryptUpdate decrypts |in_len| bytes from |in| to |out|. The number of - * output bytes may be up to |in_len| plus the block length minus one and |out| - * must have sufficient space. The number of bytes actually output is written - * to |*out_len|. It returns one on success and zero otherwise. */ +// EVP_DecryptUpdate decrypts |in_len| bytes from |in| to |out|. The number of +// output bytes may be up to |in_len| plus the block length minus one and |out| +// must have sufficient space. The number of bytes actually output is written +// to |*out_len|. It returns one on success and zero otherwise. OPENSSL_EXPORT int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); -/* EVP_DecryptFinal_ex writes at most a block of ciphertext to |out| and sets - * |*out_len| to the number of bytes written. If padding is enabled (the - * default) then padding is removed from the final block. - * - * WARNING: it is unsafe to call this function with unauthenticated - * ciphertext if padding is enabled. */ +// EVP_DecryptFinal_ex writes at most a block of ciphertext to |out| and sets +// |*out_len| to the number of bytes written. If padding is enabled (the +// default) then padding is removed from the final block. +// +// WARNING: it is unsafe to call this function with unauthenticated +// ciphertext if padding is enabled. OPENSSL_EXPORT int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *out_len); -/* EVP_Cipher performs a one-shot encryption/decryption operation. No partial - * blocks are maintained between calls. However, any internal cipher state is - * still updated. For CBC-mode ciphers, the IV is updated to the final - * ciphertext block. For stream ciphers, the stream is advanced past the bytes - * used. It returns one on success and zero otherwise, unless |EVP_CIPHER_flags| - * has |EVP_CIPH_FLAG_CUSTOM_CIPHER| set. Then it returns the number of bytes - * written or -1 on error. - * - * WARNING: this differs from the usual return value convention when using - * |EVP_CIPH_FLAG_CUSTOM_CIPHER|. - * - * TODO(davidben): The normal ciphers currently never fail, even if, e.g., - * |in_len| is not a multiple of the block size for CBC-mode decryption. The - * input just gets rounded up while the output gets truncated. This should - * either be officially documented or fail. */ +// EVP_Cipher performs a one-shot encryption/decryption operation. No partial +// blocks are maintained between calls. However, any internal cipher state is +// still updated. For CBC-mode ciphers, the IV is updated to the final +// ciphertext block. For stream ciphers, the stream is advanced past the bytes +// used. It returns one on success and zero otherwise, unless |EVP_CIPHER_flags| +// has |EVP_CIPH_FLAG_CUSTOM_CIPHER| set. Then it returns the number of bytes +// written or -1 on error. +// +// WARNING: this differs from the usual return value convention when using +// |EVP_CIPH_FLAG_CUSTOM_CIPHER|. +// +// TODO(davidben): The normal ciphers currently never fail, even if, e.g., +// |in_len| is not a multiple of the block size for CBC-mode decryption. The +// input just gets rounded up while the output gets truncated. This should +// either be officially documented or fail. OPENSSL_EXPORT int EVP_Cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t in_len); -/* EVP_CipherUpdate calls either |EVP_EncryptUpdate| or |EVP_DecryptUpdate| - * depending on how |ctx| has been setup. */ +// EVP_CipherUpdate calls either |EVP_EncryptUpdate| or |EVP_DecryptUpdate| +// depending on how |ctx| has been setup. OPENSSL_EXPORT int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len, const uint8_t *in, int in_len); -/* EVP_CipherFinal_ex calls either |EVP_EncryptFinal_ex| or - * |EVP_DecryptFinal_ex| depending on how |ctx| has been setup. */ +// EVP_CipherFinal_ex calls either |EVP_EncryptFinal_ex| or +// |EVP_DecryptFinal_ex| depending on how |ctx| has been setup. OPENSSL_EXPORT int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uint8_t *out, int *out_len); -/* Cipher context accessors. */ +// Cipher context accessors. -/* EVP_CIPHER_CTX_cipher returns the |EVP_CIPHER| underlying |ctx|, or NULL if - * none has been set. */ +// EVP_CIPHER_CTX_cipher returns the |EVP_CIPHER| underlying |ctx|, or NULL if +// none has been set. OPENSSL_EXPORT const EVP_CIPHER *EVP_CIPHER_CTX_cipher( const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_nid returns a NID identifying the |EVP_CIPHER| underlying - * |ctx| (e.g. |NID_aes_128_gcm|). It will crash if no cipher has been - * configured. */ +// EVP_CIPHER_CTX_nid returns a NID identifying the |EVP_CIPHER| underlying +// |ctx| (e.g. |NID_aes_128_gcm|). It will crash if no cipher has been +// configured. OPENSSL_EXPORT int EVP_CIPHER_CTX_nid(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_block_size returns the block size, in bytes, of the cipher - * underlying |ctx|, or one if the cipher is a stream cipher. It will crash if - * no cipher has been configured. */ +// EVP_CIPHER_CTX_block_size returns the block size, in bytes, of the cipher +// underlying |ctx|, or one if the cipher is a stream cipher. It will crash if +// no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_block_size(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_key_length returns the key size, in bytes, of the cipher - * underlying |ctx| or zero if no cipher has been configured. */ +// EVP_CIPHER_CTX_key_length returns the key size, in bytes, of the cipher +// underlying |ctx| or zero if no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_iv_length returns the IV size, in bytes, of the cipher - * underlying |ctx|. It will crash if no cipher has been configured. */ +// EVP_CIPHER_CTX_iv_length returns the IV size, in bytes, of the cipher +// underlying |ctx|. It will crash if no cipher has been configured. OPENSSL_EXPORT unsigned EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_get_app_data returns the opaque, application data pointer for - * |ctx|, or NULL if none has been set. */ +// EVP_CIPHER_CTX_get_app_data returns the opaque, application data pointer for +// |ctx|, or NULL if none has been set. OPENSSL_EXPORT void *EVP_CIPHER_CTX_get_app_data(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_set_app_data sets the opaque, application data pointer for - * |ctx| to |data|. */ +// EVP_CIPHER_CTX_set_app_data sets the opaque, application data pointer for +// |ctx| to |data|. OPENSSL_EXPORT void EVP_CIPHER_CTX_set_app_data(EVP_CIPHER_CTX *ctx, void *data); -/* EVP_CIPHER_CTX_flags returns a value which is the OR of zero or more - * |EVP_CIPH_*| flags. It will crash if no cipher has been configured. */ +// EVP_CIPHER_CTX_flags returns a value which is the OR of zero or more +// |EVP_CIPH_*| flags. It will crash if no cipher has been configured. OPENSSL_EXPORT uint32_t EVP_CIPHER_CTX_flags(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_mode returns one of the |EVP_CIPH_*| cipher mode values - * enumerated below. It will crash if no cipher has been configured. */ +// EVP_CIPHER_CTX_mode returns one of the |EVP_CIPH_*| cipher mode values +// enumerated below. It will crash if no cipher has been configured. OPENSSL_EXPORT uint32_t EVP_CIPHER_CTX_mode(const EVP_CIPHER_CTX *ctx); -/* EVP_CIPHER_CTX_ctrl is an |ioctl| like function. The |command| argument - * should be one of the |EVP_CTRL_*| values. The |arg| and |ptr| arguments are - * specific to the command in question. */ +// EVP_CIPHER_CTX_ctrl is an |ioctl| like function. The |command| argument +// should be one of the |EVP_CTRL_*| values. The |arg| and |ptr| arguments are +// specific to the command in question. OPENSSL_EXPORT int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int command, int arg, void *ptr); -/* EVP_CIPHER_CTX_set_padding sets whether padding is enabled for |ctx| and - * returns one. Pass a non-zero |pad| to enable padding (the default) or zero - * to disable. */ +// EVP_CIPHER_CTX_set_padding sets whether padding is enabled for |ctx| and +// returns one. Pass a non-zero |pad| to enable padding (the default) or zero +// to disable. OPENSSL_EXPORT int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad); -/* EVP_CIPHER_CTX_set_key_length sets the key length for |ctx|. This is only - * valid for ciphers that can take a variable length key. It returns one on - * success and zero on error. */ +// EVP_CIPHER_CTX_set_key_length sets the key length for |ctx|. This is only +// valid for ciphers that can take a variable length key. It returns one on +// success and zero on error. OPENSSL_EXPORT int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *ctx, unsigned key_len); -/* Cipher accessors. */ +// Cipher accessors. -/* EVP_CIPHER_nid returns a NID identifying |cipher|. (For example, - * |NID_aes_128_gcm|.) */ +// EVP_CIPHER_nid returns a NID identifying |cipher|. (For example, +// |NID_aes_128_gcm|.) OPENSSL_EXPORT int EVP_CIPHER_nid(const EVP_CIPHER *cipher); -/* EVP_CIPHER_block_size returns the block size, in bytes, for |cipher|, or one - * if |cipher| is a stream cipher. */ +// EVP_CIPHER_block_size returns the block size, in bytes, for |cipher|, or one +// if |cipher| is a stream cipher. OPENSSL_EXPORT unsigned EVP_CIPHER_block_size(const EVP_CIPHER *cipher); -/* EVP_CIPHER_key_length returns the key size, in bytes, for |cipher|. If - * |cipher| can take a variable key length then this function returns the - * default key length and |EVP_CIPHER_flags| will return a value with - * |EVP_CIPH_VARIABLE_LENGTH| set. */ +// EVP_CIPHER_key_length returns the key size, in bytes, for |cipher|. If +// |cipher| can take a variable key length then this function returns the +// default key length and |EVP_CIPHER_flags| will return a value with +// |EVP_CIPH_VARIABLE_LENGTH| set. OPENSSL_EXPORT unsigned EVP_CIPHER_key_length(const EVP_CIPHER *cipher); -/* EVP_CIPHER_iv_length returns the IV size, in bytes, of |cipher|, or zero if - * |cipher| doesn't take an IV. */ +// EVP_CIPHER_iv_length returns the IV size, in bytes, of |cipher|, or zero if +// |cipher| doesn't take an IV. OPENSSL_EXPORT unsigned EVP_CIPHER_iv_length(const EVP_CIPHER *cipher); -/* EVP_CIPHER_flags returns a value which is the OR of zero or more - * |EVP_CIPH_*| flags. */ +// EVP_CIPHER_flags returns a value which is the OR of zero or more +// |EVP_CIPH_*| flags. OPENSSL_EXPORT uint32_t EVP_CIPHER_flags(const EVP_CIPHER *cipher); -/* EVP_CIPHER_mode returns one of the cipher mode values enumerated below. */ +// EVP_CIPHER_mode returns one of the cipher mode values enumerated below. OPENSSL_EXPORT uint32_t EVP_CIPHER_mode(const EVP_CIPHER *cipher); -/* Key derivation. */ +// Key derivation. -/* EVP_BytesToKey generates a key and IV for the cipher |type| by iterating - * |md| |count| times using |data| and |salt|. On entry, the |key| and |iv| - * buffers must have enough space to hold a key and IV for |type|. It returns - * the length of the key on success or zero on error. */ +// EVP_BytesToKey generates a key and IV for the cipher |type| by iterating +// |md| |count| times using |data| and |salt|. On entry, the |key| and |iv| +// buffers must have enough space to hold a key and IV for |type|. It returns +// the length of the key on success or zero on error. OPENSSL_EXPORT int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, const uint8_t *salt, const uint8_t *data, size_t data_len, unsigned count, uint8_t *key, uint8_t *iv); -/* Cipher modes (for |EVP_CIPHER_mode|). */ +// Cipher modes (for |EVP_CIPHER_mode|). #define EVP_CIPH_STREAM_CIPHER 0x0 #define EVP_CIPH_ECB_MODE 0x1 @@ -338,84 +343,97 @@ OPENSSL_EXPORT int EVP_BytesToKey(const EVP_CIPHER *type, const EVP_MD *md, #define EVP_CIPH_XTS_MODE 0x7 -/* Cipher flags (for |EVP_CIPHER_flags|). */ +// Cipher flags (for |EVP_CIPHER_flags|). -/* EVP_CIPH_VARIABLE_LENGTH indicates that the cipher takes a variable length - * key. */ +// EVP_CIPH_VARIABLE_LENGTH indicates that the cipher takes a variable length +// key. #define EVP_CIPH_VARIABLE_LENGTH 0x40 -/* EVP_CIPH_ALWAYS_CALL_INIT indicates that the |init| function for the cipher - * should always be called when initialising a new operation, even if the key - * is NULL to indicate that the same key is being used. */ +// EVP_CIPH_ALWAYS_CALL_INIT indicates that the |init| function for the cipher +// should always be called when initialising a new operation, even if the key +// is NULL to indicate that the same key is being used. #define EVP_CIPH_ALWAYS_CALL_INIT 0x80 -/* EVP_CIPH_CUSTOM_IV indicates that the cipher manages the IV itself rather - * than keeping it in the |iv| member of |EVP_CIPHER_CTX|. */ +// EVP_CIPH_CUSTOM_IV indicates that the cipher manages the IV itself rather +// than keeping it in the |iv| member of |EVP_CIPHER_CTX|. #define EVP_CIPH_CUSTOM_IV 0x100 -/* EVP_CIPH_CTRL_INIT indicates that EVP_CTRL_INIT should be used when - * initialising an |EVP_CIPHER_CTX|. */ +// EVP_CIPH_CTRL_INIT indicates that EVP_CTRL_INIT should be used when +// initialising an |EVP_CIPHER_CTX|. #define EVP_CIPH_CTRL_INIT 0x200 -/* EVP_CIPH_FLAG_CUSTOM_CIPHER indicates that the cipher manages blocking - * itself. This causes EVP_(En|De)crypt_ex to be simple wrapper functions. */ +// EVP_CIPH_FLAG_CUSTOM_CIPHER indicates that the cipher manages blocking +// itself. This causes EVP_(En|De)crypt_ex to be simple wrapper functions. #define EVP_CIPH_FLAG_CUSTOM_CIPHER 0x400 -/* EVP_CIPH_FLAG_AEAD_CIPHER specifies that the cipher is an AEAD. This is an - * older version of the proper AEAD interface. See aead.h for the current - * one. */ +// EVP_CIPH_FLAG_AEAD_CIPHER specifies that the cipher is an AEAD. This is an +// older version of the proper AEAD interface. See aead.h for the current +// one. #define EVP_CIPH_FLAG_AEAD_CIPHER 0x800 -/* EVP_CIPH_CUSTOM_COPY indicates that the |ctrl| callback should be called - * with |EVP_CTRL_COPY| at the end of normal |EVP_CIPHER_CTX_copy| - * processing. */ +// EVP_CIPH_CUSTOM_COPY indicates that the |ctrl| callback should be called +// with |EVP_CTRL_COPY| at the end of normal |EVP_CIPHER_CTX_copy| +// processing. #define EVP_CIPH_CUSTOM_COPY 0x1000 -/* Deprecated functions */ +// Deprecated functions -/* EVP_CipherInit acts like EVP_CipherInit_ex except that |EVP_CIPHER_CTX_init| - * is called on |cipher| first, if |cipher| is not NULL. */ +// EVP_CipherInit acts like EVP_CipherInit_ex except that |EVP_CIPHER_CTX_init| +// is called on |cipher| first, if |cipher| is not NULL. OPENSSL_EXPORT int EVP_CipherInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv, int enc); -/* EVP_EncryptInit calls |EVP_CipherInit| with |enc| equal to one. */ +// EVP_EncryptInit calls |EVP_CipherInit| with |enc| equal to one. OPENSSL_EXPORT int EVP_EncryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv); -/* EVP_DecryptInit calls |EVP_CipherInit| with |enc| equal to zero. */ +// EVP_DecryptInit calls |EVP_CipherInit| with |enc| equal to zero. OPENSSL_EXPORT int EVP_DecryptInit(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, const uint8_t *key, const uint8_t *iv); -/* EVP_add_cipher_alias does nothing and returns one. */ +// EVP_add_cipher_alias does nothing and returns one. OPENSSL_EXPORT int EVP_add_cipher_alias(const char *a, const char *b); -/* EVP_get_cipherbyname returns an |EVP_CIPHER| given a human readable name in - * |name|, or NULL if the name is unknown. */ +// EVP_get_cipherbyname returns an |EVP_CIPHER| given a human readable name in +// |name|, or NULL if the name is unknown. OPENSSL_EXPORT const EVP_CIPHER *EVP_get_cipherbyname(const char *name); -/* These AEADs are deprecated AES-GCM implementations that set - * |EVP_CIPH_FLAG_CUSTOM_CIPHER|. Use |EVP_aead_aes_128_gcm| and - * |EVP_aead_aes_256_gcm| instead. */ +// These AEADs are deprecated AES-GCM implementations that set +// |EVP_CIPH_FLAG_CUSTOM_CIPHER|. Use |EVP_aead_aes_128_gcm| and +// |EVP_aead_aes_256_gcm| instead. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_gcm(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_256_gcm(void); -/* These are deprecated, 192-bit version of AES. */ +// These are deprecated, 192-bit version of AES. OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_ecb(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_cbc(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_ctr(void); OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_gcm(void); +// EVP_aes_128_cfb128 is only available in decrepit. +OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_128_cfb128(void); + +// The following flags do nothing and are included only to make it easier to +// compile code with BoringSSL. +#define EVP_CIPH_CCM_MODE 0 +#define EVP_CIPH_WRAP_MODE 0 +#define EVP_CIPHER_CTX_FLAG_WRAP_ALLOW 0 + +// EVP_CIPHER_CTX_set_flags does nothing. +OPENSSL_EXPORT void EVP_CIPHER_CTX_set_flags(const EVP_CIPHER_CTX *ctx, + uint32_t flags); + -/* Private functions. */ +// Private functions. -/* EVP_CIPH_NO_PADDING disables padding in block ciphers. */ +// EVP_CIPH_NO_PADDING disables padding in block ciphers. #define EVP_CIPH_NO_PADDING 0x800 -/* EVP_CIPHER_CTX_ctrl commands. */ +// EVP_CIPHER_CTX_ctrl commands. #define EVP_CTRL_INIT 0x0 #define EVP_CTRL_SET_KEY_LENGTH 0x1 #define EVP_CTRL_GET_RC2_KEY_BITS 0x2 @@ -431,15 +449,15 @@ OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_gcm(void); #define EVP_CTRL_GCM_SET_IV_FIXED 0x12 #define EVP_CTRL_GCM_IV_GEN 0x13 #define EVP_CTRL_AEAD_SET_MAC_KEY 0x17 -/* Set the GCM invocation field, decrypt only */ +// Set the GCM invocation field, decrypt only #define EVP_CTRL_GCM_SET_IV_INV 0x18 -/* GCM TLS constants */ -/* Length of fixed part of IV derived from PRF */ +// GCM TLS constants +// Length of fixed part of IV derived from PRF #define EVP_GCM_TLS_FIXED_IV_LEN 4 -/* Length of explicit part of IV part of TLS records */ +// Length of explicit part of IV part of TLS records #define EVP_GCM_TLS_EXPLICIT_IV_LEN 8 -/* Length of tag for TLS */ +// Length of tag for TLS #define EVP_GCM_TLS_TAG_LEN 16 #define EVP_MAX_KEY_LENGTH 64 @@ -447,51 +465,51 @@ OPENSSL_EXPORT const EVP_CIPHER *EVP_aes_192_gcm(void); #define EVP_MAX_BLOCK_LENGTH 32 struct evp_cipher_ctx_st { - /* cipher contains the underlying cipher for this context. */ + // cipher contains the underlying cipher for this context. const EVP_CIPHER *cipher; - /* app_data is a pointer to opaque, user data. */ - void *app_data; /* application stuff */ + // app_data is a pointer to opaque, user data. + void *app_data; // application stuff - /* cipher_data points to the |cipher| specific state. */ + // cipher_data points to the |cipher| specific state. void *cipher_data; - /* key_len contains the length of the key, which may differ from - * |cipher->key_len| if the cipher can take a variable key length. */ + // key_len contains the length of the key, which may differ from + // |cipher->key_len| if the cipher can take a variable key length. unsigned key_len; - /* encrypt is one if encrypting and zero if decrypting. */ + // encrypt is one if encrypting and zero if decrypting. int encrypt; - /* flags contains the OR of zero or more |EVP_CIPH_*| flags, above. */ + // flags contains the OR of zero or more |EVP_CIPH_*| flags, above. uint32_t flags; - /* oiv contains the original IV value. */ + // oiv contains the original IV value. uint8_t oiv[EVP_MAX_IV_LENGTH]; - /* iv contains the current IV value, which may have been updated. */ + // iv contains the current IV value, which may have been updated. uint8_t iv[EVP_MAX_IV_LENGTH]; - /* buf contains a partial block which is used by, for example, CTR mode to - * store unused keystream bytes. */ + // buf contains a partial block which is used by, for example, CTR mode to + // store unused keystream bytes. uint8_t buf[EVP_MAX_BLOCK_LENGTH]; - /* buf_len contains the number of bytes of a partial block contained in - * |buf|. */ + // buf_len contains the number of bytes of a partial block contained in + // |buf|. int buf_len; - /* num contains the number of bytes of |iv| which are valid for modes that - * manage partial blocks themselves. */ + // num contains the number of bytes of |iv| which are valid for modes that + // manage partial blocks themselves. unsigned num; - /* final_used is non-zero if the |final| buffer contains plaintext. */ + // final_used is non-zero if the |final| buffer contains plaintext. int final_used; - /* block_mask contains |cipher->block_size| minus one. (The block size - * assumed to be a power of two.) */ + // block_mask contains |cipher->block_size| minus one. (The block size + // assumed to be a power of two.) int block_mask; - uint8_t final[EVP_MAX_BLOCK_LENGTH]; /* possible final block */ + uint8_t final[EVP_MAX_BLOCK_LENGTH]; // possible final block } /* EVP_CIPHER_CTX */; typedef struct evp_cipher_info_st { @@ -500,28 +518,28 @@ typedef struct evp_cipher_info_st { } EVP_CIPHER_INFO; struct evp_cipher_st { - /* type contains a NID identifing the cipher. (e.g. NID_aes_128_gcm.) */ + // type contains a NID identifing the cipher. (e.g. NID_aes_128_gcm.) int nid; - /* block_size contains the block size, in bytes, of the cipher, or 1 for a - * stream cipher. */ + // block_size contains the block size, in bytes, of the cipher, or 1 for a + // stream cipher. unsigned block_size; - /* key_len contains the key size, in bytes, for the cipher. If the cipher - * takes a variable key size then this contains the default size. */ + // key_len contains the key size, in bytes, for the cipher. If the cipher + // takes a variable key size then this contains the default size. unsigned key_len; - /* iv_len contains the IV size, in bytes, or zero if inapplicable. */ + // iv_len contains the IV size, in bytes, or zero if inapplicable. unsigned iv_len; - /* ctx_size contains the size, in bytes, of the per-key context for this - * cipher. */ + // ctx_size contains the size, in bytes, of the per-key context for this + // cipher. unsigned ctx_size; - /* flags contains the OR of a number of flags. See |EVP_CIPH_*|. */ + // flags contains the OR of a number of flags. See |EVP_CIPH_*|. uint32_t flags; - /* app_data is a pointer to opaque, user data. */ + // app_data is a pointer to opaque, user data. void *app_data; int (*init)(EVP_CIPHER_CTX *ctx, const uint8_t *key, const uint8_t *iv, @@ -530,9 +548,9 @@ struct evp_cipher_st { int (*cipher)(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in, size_t inl); - /* cleanup, if non-NULL, releases memory associated with the context. It is - * called if |EVP_CTRL_INIT| succeeds. Note that |init| may not have been - * called at this point. */ + // cleanup, if non-NULL, releases memory associated with the context. It is + // called if |EVP_CTRL_INIT| succeeds. Note that |init| may not have been + // called at this point. void (*cleanup)(EVP_CIPHER_CTX *); int (*ctrl)(EVP_CIPHER_CTX *, int type, int arg, void *ptr); @@ -540,7 +558,7 @@ struct evp_cipher_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { @@ -585,5 +603,6 @@ using ScopedEVP_CIPHER_CTX = #define CIPHER_R_UNSUPPORTED_TAG_SIZE 122 #define CIPHER_R_WRONG_FINAL_BLOCK_LENGTH 123 #define CIPHER_R_NO_DIRECTION_SET 124 +#define CIPHER_R_INVALID_NONCE 125 -#endif /* OPENSSL_HEADER_CIPHER_H */ +#endif // OPENSSL_HEADER_CIPHER_H diff --git a/Sources/BoringSSL/include/openssl/cmac.h b/Sources/BoringSSL/include/openssl/cmac.h index 0f05bc935..dfcd37b9f 100644 --- a/Sources/BoringSSL/include/openssl/cmac.h +++ b/Sources/BoringSSL/include/openssl/cmac.h @@ -22,55 +22,55 @@ extern "C" { #endif -/* CMAC. - * - * CMAC is a MAC based on AES-CBC and defined in - * https://tools.ietf.org/html/rfc4493#section-2.3. */ +// CMAC. +// +// CMAC is a MAC based on AES-CBC and defined in +// https://tools.ietf.org/html/rfc4493#section-2.3. -/* One-shot functions. */ +// One-shot functions. -/* AES_CMAC calculates the 16-byte, CMAC authenticator of |in_len| bytes of - * |in| and writes it to |out|. The |key_len| may be 16 or 32 bytes to select - * between AES-128 and AES-256. It returns one on success or zero on error. */ +// AES_CMAC calculates the 16-byte, CMAC authenticator of |in_len| bytes of +// |in| and writes it to |out|. The |key_len| may be 16 or 32 bytes to select +// between AES-128 and AES-256. It returns one on success or zero on error. OPENSSL_EXPORT int AES_CMAC(uint8_t out[16], const uint8_t *key, size_t key_len, const uint8_t *in, size_t in_len); -/* Incremental interface. */ +// Incremental interface. -/* CMAC_CTX_new allocates a fresh |CMAC_CTX| and returns it, or NULL on - * error. */ +// CMAC_CTX_new allocates a fresh |CMAC_CTX| and returns it, or NULL on +// error. OPENSSL_EXPORT CMAC_CTX *CMAC_CTX_new(void); -/* CMAC_CTX_free frees a |CMAC_CTX|. */ +// CMAC_CTX_free frees a |CMAC_CTX|. OPENSSL_EXPORT void CMAC_CTX_free(CMAC_CTX *ctx); -/* CMAC_Init configures |ctx| to use the given |key| and |cipher|. The CMAC RFC - * only specifies the use of AES-128 thus |key_len| should be 16 and |cipher| - * should be |EVP_aes_128_cbc()|. However, this implementation also supports - * AES-256 by setting |key_len| to 32 and |cipher| to |EVP_aes_256_cbc()|. The - * |engine| argument is ignored. - * - * It returns one on success or zero on error. */ +// CMAC_Init configures |ctx| to use the given |key| and |cipher|. The CMAC RFC +// only specifies the use of AES-128 thus |key_len| should be 16 and |cipher| +// should be |EVP_aes_128_cbc()|. However, this implementation also supports +// AES-256 by setting |key_len| to 32 and |cipher| to |EVP_aes_256_cbc()|. The +// |engine| argument is ignored. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t key_len, const EVP_CIPHER *cipher, ENGINE *engine); -/* CMAC_Reset resets |ctx| so that a fresh message can be authenticated. */ +// CMAC_Reset resets |ctx| so that a fresh message can be authenticated. OPENSSL_EXPORT int CMAC_Reset(CMAC_CTX *ctx); -/* CMAC_Update processes |in_len| bytes of message from |in|. It returns one on - * success or zero on error. */ +// CMAC_Update processes |in_len| bytes of message from |in|. It returns one on +// success or zero on error. OPENSSL_EXPORT int CMAC_Update(CMAC_CTX *ctx, const uint8_t *in, size_t in_len); -/* CMAC_Final sets |*out_len| to 16 and, if |out| is not NULL, writes 16 bytes - * of authenticator to it. It returns one on success or zero on error. */ +// CMAC_Final sets |*out_len| to 16 and, if |out| is not NULL, writes 16 bytes +// of authenticator to it. It returns one on success or zero on error. OPENSSL_EXPORT int CMAC_Final(CMAC_CTX *ctx, uint8_t *out, size_t *out_len); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -80,8 +80,8 @@ BORINGSSL_MAKE_DELETER(CMAC_CTX, CMAC_CTX_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_CMAC_H */ +#endif // OPENSSL_HEADER_CMAC_H diff --git a/Sources/BoringSSL/include/openssl/conf.h b/Sources/BoringSSL/include/openssl/conf.h index 8b82fd459..4ffce378a 100644 --- a/Sources/BoringSSL/include/openssl/conf.h +++ b/Sources/BoringSSL/include/openssl/conf.h @@ -67,17 +67,17 @@ extern "C" { #endif -/* Config files look like: - * - * # Comment - * - * # This key is in the default section. - * key=value - * - * [section_name] - * key2=value2 - * - * Config files are represented by a |CONF|. */ +// Config files look like: +// +// # Comment +// +// # This key is in the default section. +// key=value +// +// [section_name] +// key2=value2 +// +// Config files are represented by a |CONF|. struct conf_value_st { char *section; @@ -89,78 +89,76 @@ struct conf_st { LHASH_OF(CONF_VALUE) *data; }; +DEFINE_STACK_OF(CONF_VALUE) + -/* NCONF_new returns a fresh, empty |CONF|, or NULL on error. The |method| - * argument must be NULL. */ +// NCONF_new returns a fresh, empty |CONF|, or NULL on error. The |method| +// argument must be NULL. OPENSSL_EXPORT CONF *NCONF_new(void *method); -/* NCONF_free frees all the data owned by |conf| and then |conf| itself. */ +// NCONF_free frees all the data owned by |conf| and then |conf| itself. OPENSSL_EXPORT void NCONF_free(CONF *conf); -/* NCONF_load parses the file named |filename| and adds the values found to - * |conf|. It returns one on success and zero on error. In the event of an - * error, if |out_error_line| is not NULL, |*out_error_line| is set to the - * number of the line that contained the error. */ +// NCONF_load parses the file named |filename| and adds the values found to +// |conf|. It returns one on success and zero on error. In the event of an +// error, if |out_error_line| is not NULL, |*out_error_line| is set to the +// number of the line that contained the error. int NCONF_load(CONF *conf, const char *filename, long *out_error_line); -/* NCONF_load_bio acts like |NCONF_load| but reads from |bio| rather than from - * a named file. */ +// NCONF_load_bio acts like |NCONF_load| but reads from |bio| rather than from +// a named file. int NCONF_load_bio(CONF *conf, BIO *bio, long *out_error_line); -/* NCONF_get_section returns a stack of values for a given section in |conf|. - * If |section| is NULL, the default section is returned. It returns NULL on - * error. */ +// NCONF_get_section returns a stack of values for a given section in |conf|. +// If |section| is NULL, the default section is returned. It returns NULL on +// error. STACK_OF(CONF_VALUE) *NCONF_get_section(const CONF *conf, const char *section); -/* NCONF_get_string returns the value of the key |name|, in section |section|. - * The |section| argument may be NULL to indicate the default section. It - * returns the value or NULL on error. */ +// NCONF_get_string returns the value of the key |name|, in section |section|. +// The |section| argument may be NULL to indicate the default section. It +// returns the value or NULL on error. const char *NCONF_get_string(const CONF *conf, const char *section, const char *name); -/* Utility functions */ +// Utility functions -/* CONF_parse_list takes a list separated by 'sep' and calls |list_cb| giving - * the start and length of each member, optionally stripping leading and - * trailing whitespace. This can be used to parse comma separated lists for - * example. If |list_cb| returns <= 0, then the iteration is halted and that - * value is returned immediately. Otherwise it returns one. Note that |list_cb| - * may be called on an empty member. */ +// CONF_parse_list takes a list separated by 'sep' and calls |list_cb| giving +// the start and length of each member, optionally stripping leading and +// trailing whitespace. This can be used to parse comma separated lists for +// example. If |list_cb| returns <= 0, then the iteration is halted and that +// value is returned immediately. Otherwise it returns one. Note that |list_cb| +// may be called on an empty member. int CONF_parse_list(const char *list, char sep, int remove_whitespace, int (*list_cb)(const char *elem, int len, void *usr), void *arg); -/* Deprecated functions */ +// Deprecated functions -/* These defines do nothing but are provided to make old code easier to - * compile. */ +// These defines do nothing but are provided to make old code easier to +// compile. #define CONF_MFLAGS_DEFAULT_SECTION 0 #define CONF_MFLAGS_IGNORE_MISSING_FILE 0 -typedef struct conf_must_be_null_st CONF_MUST_BE_NULL; - -/* CONF_modules_load_file returns one. |filename| was originally a string, with - * NULL indicating the default. BoringSSL does not support configuration files, - * so this stub emulates the "default" no-op file but intentionally breaks - * compilation of consumers actively attempting to use this subsystem. */ -OPENSSL_EXPORT int CONF_modules_load_file(CONF_MUST_BE_NULL *filename, +// CONF_modules_load_file returns one. BoringSSL is defined to have no config +// file options, thus loading from |filename| always succeeds by doing nothing. +OPENSSL_EXPORT int CONF_modules_load_file(const char *filename, const char *appname, unsigned long flags); -/* CONF_modules_free does nothing. */ +// CONF_modules_free does nothing. OPENSSL_EXPORT void CONF_modules_free(void); -/* OPENSSL_config does nothing. */ -OPENSSL_EXPORT void OPENSSL_config(CONF_MUST_BE_NULL *config_name); +// OPENSSL_config does nothing. +OPENSSL_EXPORT void OPENSSL_config(const char *config_name); -/* OPENSSL_no_config does nothing. */ +// OPENSSL_no_config does nothing. OPENSSL_EXPORT void OPENSSL_no_config(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -170,7 +168,7 @@ BORINGSSL_MAKE_DELETER(CONF, NCONF_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -180,5 +178,6 @@ BORINGSSL_MAKE_DELETER(CONF, NCONF_free) #define CONF_R_NO_CLOSE_BRACE 103 #define CONF_R_UNABLE_TO_CREATE_NEW_SECTION 104 #define CONF_R_VARIABLE_HAS_NO_VALUE 105 +#define CONF_R_VARIABLE_EXPANSION_TOO_LONG 106 -#endif /* OPENSSL_HEADER_THREAD_H */ +#endif // OPENSSL_HEADER_THREAD_H diff --git a/Sources/BoringSSL/include/openssl/cpu.h b/Sources/BoringSSL/include/openssl/cpu.h index 457a47681..dd95ddc62 100644 --- a/Sources/BoringSSL/include/openssl/cpu.h +++ b/Sources/BoringSSL/include/openssl/cpu.h @@ -68,53 +68,62 @@ extern "C" { #endif -/* Runtime CPU feature support */ +// Runtime CPU feature support #if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) -/* OPENSSL_ia32cap_P contains the Intel CPUID bits when running on an x86 or - * x86-64 system. - * - * Index 0: - * EDX for CPUID where EAX = 1 - * Bit 20 is always zero - * Bit 28 is adjusted to reflect whether the data cache is shared between - * multiple logical cores - * Bit 30 is used to indicate an Intel CPU - * Index 1: - * ECX for CPUID where EAX = 1 - * Bit 11 is used to indicate AMD XOP support, not SDBG - * Index 2: - * EBX for CPUID where EAX = 7 - * Index 3 is set to zero. - * - * Note: the CPUID bits are pre-adjusted for the OSXSAVE bit and the YMM and XMM - * bits in XCR0, so it is not necessary to check those. */ +// OPENSSL_ia32cap_P contains the Intel CPUID bits when running on an x86 or +// x86-64 system. +// +// Index 0: +// EDX for CPUID where EAX = 1 +// Bit 20 is always zero +// Bit 28 is adjusted to reflect whether the data cache is shared between +// multiple logical cores +// Bit 30 is used to indicate an Intel CPU +// Index 1: +// ECX for CPUID where EAX = 1 +// Bit 11 is used to indicate AMD XOP support, not SDBG +// Index 2: +// EBX for CPUID where EAX = 7 +// Index 3 is set to zero. +// +// Note: the CPUID bits are pre-adjusted for the OSXSAVE bit and the YMM and XMM +// bits in XCR0, so it is not necessary to check those. extern uint32_t OPENSSL_ia32cap_P[4]; + +#if defined(BORINGSSL_FIPS) +const uint32_t *OPENSSL_ia32cap_get(void); +#else +static inline const uint32_t *OPENSSL_ia32cap_get(void) { + return OPENSSL_ia32cap_P; +} +#endif + #endif #if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) #if defined(OPENSSL_APPLE) -/* iOS builds use the static ARM configuration. */ +// iOS builds use the static ARM configuration. #define OPENSSL_STATIC_ARMCAP #endif #if !defined(OPENSSL_STATIC_ARMCAP) -/* CRYPTO_is_NEON_capable_at_runtime returns true if the current CPU has a NEON - * unit. Note that |OPENSSL_armcap_P| also exists and contains the same - * information in a form that's easier for assembly to use. */ +// CRYPTO_is_NEON_capable_at_runtime returns true if the current CPU has a NEON +// unit. Note that |OPENSSL_armcap_P| also exists and contains the same +// information in a form that's easier for assembly to use. OPENSSL_EXPORT char CRYPTO_is_NEON_capable_at_runtime(void); -/* CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If - * this is known statically then it returns one immediately. */ +// CRYPTO_is_NEON_capable returns true if the current CPU has a NEON unit. If +// this is known statically then it returns one immediately. static inline int CRYPTO_is_NEON_capable(void) { - /* Only statically skip the runtime lookup on aarch64. On arm, one CPU is - * known to have a broken NEON unit which is known to fail with on some - * hand-written NEON assembly. For now, continue to apply the workaround even - * when the compiler is instructed to freely emit NEON code. See - * https://crbug.com/341598 and https://crbug.com/606629. */ + // Only statically skip the runtime lookup on aarch64. On arm, one CPU is + // known to have a broken NEON unit which is known to fail with on some + // hand-written NEON assembly. For now, continue to apply the workaround even + // when the compiler is instructed to freely emit NEON code. See + // https://crbug.com/341598 and https://crbug.com/606629. #if defined(__ARM_NEON__) && !defined(OPENSSL_ARM) return 1; #else @@ -123,17 +132,21 @@ static inline int CRYPTO_is_NEON_capable(void) { } #if defined(OPENSSL_ARM) -/* CRYPTO_has_broken_NEON returns one if the current CPU is known to have a - * broken NEON unit. See https://crbug.com/341598. */ +// CRYPTO_has_broken_NEON returns one if the current CPU is known to have a +// broken NEON unit. See https://crbug.com/341598. OPENSSL_EXPORT int CRYPTO_has_broken_NEON(void); + +// CRYPTO_needs_hwcap2_workaround returns one if the ARMv8 AArch32 AT_HWCAP2 +// workaround was needed. See https://crbug.com/boringssl/46. +OPENSSL_EXPORT int CRYPTO_needs_hwcap2_workaround(void); #endif -/* CRYPTO_is_ARMv8_AES_capable returns true if the current CPU supports the - * ARMv8 AES instruction. */ +// CRYPTO_is_ARMv8_AES_capable returns true if the current CPU supports the +// ARMv8 AES instruction. int CRYPTO_is_ARMv8_AES_capable(void); -/* CRYPTO_is_ARMv8_PMULL_capable returns true if the current CPU supports the - * ARMv8 PMULL instruction. */ +// CRYPTO_is_ARMv8_PMULL_capable returns true if the current CPU supports the +// ARMv8 PMULL instruction. int CRYPTO_is_ARMv8_PMULL_capable(void); #else @@ -147,7 +160,7 @@ static inline int CRYPTO_is_NEON_capable(void) { } static inline int CRYPTO_is_ARMv8_AES_capable(void) { -#if defined(OPENSSL_STATIC_ARMCAP_AES) +#if defined(OPENSSL_STATIC_ARMCAP_AES) || defined(__ARM_FEATURE_CRYPTO) return 1; #else return 0; @@ -155,27 +168,29 @@ static inline int CRYPTO_is_ARMv8_AES_capable(void) { } static inline int CRYPTO_is_ARMv8_PMULL_capable(void) { -#if defined(OPENSSL_STATIC_ARMCAP_PMULL) +#if defined(OPENSSL_STATIC_ARMCAP_PMULL) || defined(__ARM_FEATURE_CRYPTO) return 1; #else return 0; #endif } -#endif /* OPENSSL_STATIC_ARMCAP */ -#endif /* OPENSSL_ARM || OPENSSL_AARCH64 */ +#endif // OPENSSL_STATIC_ARMCAP +#endif // OPENSSL_ARM || OPENSSL_AARCH64 #if defined(OPENSSL_PPC64LE) -/* CRYPTO_is_PPC64LE_vcrypto_capable returns true iff the current CPU supports - * the Vector.AES category of instructions. */ +// CRYPTO_is_PPC64LE_vcrypto_capable returns true iff the current CPU supports +// the Vector.AES category of instructions. int CRYPTO_is_PPC64LE_vcrypto_capable(void); -#endif /* OPENSSL_PPC64LE */ +extern unsigned long OPENSSL_ppc64le_hwcap2; + +#endif // OPENSSL_PPC64LE #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CPU_H */ +#endif // OPENSSL_HEADER_CPU_H diff --git a/Sources/BoringSSL/include/openssl/crypto.h b/Sources/BoringSSL/include/openssl/crypto.h index 3a7e6b1be..dc87dd2d8 100644 --- a/Sources/BoringSSL/include/openssl/crypto.h +++ b/Sources/BoringSSL/include/openssl/crypto.h @@ -17,12 +17,12 @@ #include -/* Upstream OpenSSL defines |OPENSSL_malloc|, etc., in crypto.h rather than - * mem.h. */ +// Upstream OpenSSL defines |OPENSSL_malloc|, etc., in crypto.h rather than +// mem.h. #include -/* Upstream OpenSSL defines |CRYPTO_LOCK|, etc., in crypto.h rather than - * thread.h. */ +// Upstream OpenSSL defines |CRYPTO_LOCK|, etc., in crypto.h rather than +// thread.h. #include @@ -31,64 +31,88 @@ extern "C" { #endif -/* crypto.h contains functions for initializing the crypto library. */ +// crypto.h contains functions for initializing the crypto library. -/* CRYPTO_library_init initializes the crypto library. It must be called if the - * library is built with BORINGSSL_NO_STATIC_INITIALIZER. Otherwise, it does - * nothing and a static initializer is used instead. It is safe to call this - * function multiple times and concurrently from multiple threads. - * - * On some ARM configurations, this function may require filesystem access and - * should be called before entering a sandbox. */ +// CRYPTO_library_init initializes the crypto library. It must be called if the +// library is built with BORINGSSL_NO_STATIC_INITIALIZER. Otherwise, it does +// nothing and a static initializer is used instead. It is safe to call this +// function multiple times and concurrently from multiple threads. +// +// On some ARM configurations, this function may require filesystem access and +// should be called before entering a sandbox. OPENSSL_EXPORT void CRYPTO_library_init(void); -/* CRYPTO_is_confidential_build returns one if the linked version of BoringSSL - * has been built with the BORINGSSL_CONFIDENTIAL define and zero otherwise. - * - * This is used by some consumers to identify whether they are using an - * internal version of BoringSSL. */ +// CRYPTO_is_confidential_build returns one if the linked version of BoringSSL +// has been built with the BORINGSSL_CONFIDENTIAL define and zero otherwise. +// +// This is used by some consumers to identify whether they are using an +// internal version of BoringSSL. OPENSSL_EXPORT int CRYPTO_is_confidential_build(void); -/* CRYPTO_has_asm returns one unless BoringSSL was built with OPENSSL_NO_ASM, - * in which case it returns zero. */ +// CRYPTO_has_asm returns one unless BoringSSL was built with OPENSSL_NO_ASM, +// in which case it returns zero. OPENSSL_EXPORT int CRYPTO_has_asm(void); +// FIPS_mode returns zero unless BoringSSL is built with BORINGSSL_FIPS, in +// which case it returns one. +OPENSSL_EXPORT int FIPS_mode(void); -/* Deprecated functions. */ -/* OPENSSL_VERSION_TEXT contains a string the identifies the version of - * “OpenSSL”. node.js requires a version number in this text. */ -#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2 (compatible; BoringSSL)" +// Deprecated functions. + +// OPENSSL_VERSION_TEXT contains a string the identifies the version of +// “OpenSSL”. node.js requires a version number in this text. +#define OPENSSL_VERSION_TEXT "OpenSSL 1.1.0 (compatible; BoringSSL)" #define SSLEAY_VERSION 0 -/* SSLeay_version is a compatibility function that returns the string - * "BoringSSL". */ +// SSLeay_version is a compatibility function that returns the string +// "BoringSSL". OPENSSL_EXPORT const char *SSLeay_version(int unused); -/* SSLeay is a compatibility function that returns OPENSSL_VERSION_NUMBER from - * base.h. */ +#define OPENSSL_VERSION 0 + +// OpenSSL_version is a compatibility function that returns the string +// "BoringSSL". +OPENSSL_EXPORT const char *OpenSSL_version(int unused); + +// SSLeay is a compatibility function that returns OPENSSL_VERSION_NUMBER from +// base.h. OPENSSL_EXPORT unsigned long SSLeay(void); -/* CRYPTO_malloc_init returns one. */ +// OpenSSL_version_num is a compatibility function that returns +// OPENSSL_VERSION_NUMBER from base.h. +OPENSSL_EXPORT unsigned long OpenSSL_version_num(void); + +// CRYPTO_malloc_init returns one. OPENSSL_EXPORT int CRYPTO_malloc_init(void); -/* ENGINE_load_builtin_engines does nothing. */ +// ENGINE_load_builtin_engines does nothing. OPENSSL_EXPORT void ENGINE_load_builtin_engines(void); -/* ENGINE_register_all_complete returns one. */ +// ENGINE_register_all_complete returns one. OPENSSL_EXPORT int ENGINE_register_all_complete(void); -/* OPENSSL_load_builtin_modules does nothing. */ +// OPENSSL_load_builtin_modules does nothing. OPENSSL_EXPORT void OPENSSL_load_builtin_modules(void); -/* FIPS_mode returns zero. */ -OPENSSL_EXPORT int FIPS_mode(void); +#define OPENSSL_INIT_NO_LOAD_CRYPTO_STRINGS 0 +#define OPENSSL_INIT_LOAD_CRYPTO_STRINGS 0 +#define OPENSSL_INIT_ADD_ALL_CIPHERS 0 +#define OPENSSL_INIT_ADD_ALL_DIGESTS 0 +#define OPENSSL_INIT_NO_ADD_ALL_CIPHERS 0 +#define OPENSSL_INIT_NO_ADD_ALL_DIGESTS 0 +#define OPENSSL_INIT_LOAD_CONFIG 0 +#define OPENSSL_INIT_NO_LOAD_CONFIG 0 + +// OPENSSL_init_crypto calls |CRYPTO_library_init| and returns one. +OPENSSL_EXPORT int OPENSSL_init_crypto(uint64_t opts, + const OPENSSL_INIT_SETTINGS *settings); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_CRYPTO_H */ +#endif // OPENSSL_HEADER_CRYPTO_H diff --git a/Sources/BoringSSL/include/openssl/curve25519.h b/Sources/BoringSSL/include/openssl/curve25519.h index 1bbb69a95..58a181f69 100644 --- a/Sources/BoringSSL/include/openssl/curve25519.h +++ b/Sources/BoringSSL/include/openssl/curve25519.h @@ -22,155 +22,160 @@ extern "C" { #endif -/* Curve25519. - * - * Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748. */ +// Curve25519. +// +// Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748. -/* X25519. - * - * X25519 is the Diffie-Hellman primitive built from curve25519. It is - * sometimes referred to as “curve25519”, but “X25519” is a more precise name. - * See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748. */ +// X25519. +// +// X25519 is the Diffie-Hellman primitive built from curve25519. It is +// sometimes referred to as “curve25519”, but “X25519” is a more precise name. +// See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748. #define X25519_PRIVATE_KEY_LEN 32 #define X25519_PUBLIC_VALUE_LEN 32 #define X25519_SHARED_KEY_LEN 32 -/* X25519_keypair sets |out_public_value| and |out_private_key| to a freshly - * generated, public–private key pair. */ +// X25519_keypair sets |out_public_value| and |out_private_key| to a freshly +// generated, public–private key pair. OPENSSL_EXPORT void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]); -/* X25519 writes a shared key to |out_shared_key| that is calculated from the - * given private key and the peer's public value. It returns one on success and - * zero on error. - * - * Don't use the shared key directly, rather use a KDF and also include the two - * public values as inputs. */ +// X25519 writes a shared key to |out_shared_key| that is calculated from the +// given private key and the peer's public value. It returns one on success and +// zero on error. +// +// Don't use the shared key directly, rather use a KDF and also include the two +// public values as inputs. OPENSSL_EXPORT int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], - const uint8_t peers_public_value[32]); + const uint8_t peer_public_value[32]); -/* X25519_public_from_private calculates a Diffie-Hellman public value from the - * given private key and writes it to |out_public_value|. */ +// X25519_public_from_private calculates a Diffie-Hellman public value from the +// given private key and writes it to |out_public_value|. OPENSSL_EXPORT void X25519_public_from_private(uint8_t out_public_value[32], const uint8_t private_key[32]); -/* Ed25519. - * - * Ed25519 is a signature scheme using a twisted-Edwards curve that is - * birationally equivalent to curve25519. */ +// Ed25519. +// +// Ed25519 is a signature scheme using a twisted-Edwards curve that is +// birationally equivalent to curve25519. +// +// Note that, unlike RFC 8032's formulation, our private key representation +// includes a public key suffix to make multiple key signing operations with the +// same key more efficient. The RFC 8032 key private key is referred to in this +// implementation as the "seed" and is the first 32 bytes of our private key. #define ED25519_PRIVATE_KEY_LEN 64 #define ED25519_PUBLIC_KEY_LEN 32 #define ED25519_SIGNATURE_LEN 64 -/* ED25519_keypair sets |out_public_key| and |out_private_key| to a freshly - * generated, public–private key pair. */ +// ED25519_keypair sets |out_public_key| and |out_private_key| to a freshly +// generated, public–private key pair. OPENSSL_EXPORT void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]); -/* ED25519_sign sets |out_sig| to be a signature of |message_len| bytes from - * |message| using |private_key|. It returns one on success or zero on - * error. */ +// ED25519_sign sets |out_sig| to be a signature of |message_len| bytes from +// |message| using |private_key|. It returns one on success or zero on +// error. OPENSSL_EXPORT int ED25519_sign(uint8_t out_sig[64], const uint8_t *message, size_t message_len, const uint8_t private_key[64]); -/* ED25519_verify returns one iff |signature| is a valid signature, by - * |public_key| of |message_len| bytes from |message|. It returns zero - * otherwise. */ +// ED25519_verify returns one iff |signature| is a valid signature, by +// |public_key| of |message_len| bytes from |message|. It returns zero +// otherwise. OPENSSL_EXPORT int ED25519_verify(const uint8_t *message, size_t message_len, const uint8_t signature[64], const uint8_t public_key[32]); -/* ED25519_keypair_from_seed calculates a public and private key from an - * Ed25519 “seed”. Seed values are not exposed by this API (although they - * happen to be the first 32 bytes of a private key) so this function is for - * interoperating with systems that may store just a seed instead of a full - * private key. */ +// ED25519_keypair_from_seed calculates a public and private key from an +// Ed25519 “seed”. Seed values are not exposed by this API (although they +// happen to be the first 32 bytes of a private key) so this function is for +// interoperating with systems that may store just a seed instead of a full +// private key. OPENSSL_EXPORT void ED25519_keypair_from_seed(uint8_t out_public_key[32], uint8_t out_private_key[64], const uint8_t seed[32]); -/* SPAKE2. - * - * SPAKE2 is a password-authenticated key-exchange. It allows two parties, - * who share a low-entropy secret (i.e. password), to agree on a shared key. - * An attacker can only make one guess of the password per execution of the - * protocol. - * - * See https://tools.ietf.org/html/draft-irtf-cfrg-spake2-02. */ +// SPAKE2. +// +// SPAKE2 is a password-authenticated key-exchange. It allows two parties, +// who share a low-entropy secret (i.e. password), to agree on a shared key. +// An attacker can only make one guess of the password per execution of the +// protocol. +// +// See https://tools.ietf.org/html/draft-irtf-cfrg-spake2-02. -/* spake2_role_t enumerates the different “roles” in SPAKE2. The protocol - * requires that the symmetry of the two parties be broken so one participant - * must be “Alice” and the other be “Bob”. */ +// spake2_role_t enumerates the different “roles” in SPAKE2. The protocol +// requires that the symmetry of the two parties be broken so one participant +// must be “Alice” and the other be “Bob”. enum spake2_role_t { spake2_role_alice, spake2_role_bob, }; -/* SPAKE2_CTX_new creates a new |SPAKE2_CTX| (which can only be used for a - * single execution of the protocol). SPAKE2 requires the symmetry of the two - * parties to be broken which is indicated via |my_role| – each party must pass - * a different value for this argument. - * - * The |my_name| and |their_name| arguments allow optional, opaque names to be - * bound into the protocol. For example MAC addresses, hostnames, usernames - * etc. These values are not exposed and can avoid context-confusion attacks - * when a password is shared between several devices. */ +// SPAKE2_CTX_new creates a new |SPAKE2_CTX| (which can only be used for a +// single execution of the protocol). SPAKE2 requires the symmetry of the two +// parties to be broken which is indicated via |my_role| – each party must pass +// a different value for this argument. +// +// The |my_name| and |their_name| arguments allow optional, opaque names to be +// bound into the protocol. For example MAC addresses, hostnames, usernames +// etc. These values are not exposed and can avoid context-confusion attacks +// when a password is shared between several devices. OPENSSL_EXPORT SPAKE2_CTX *SPAKE2_CTX_new( enum spake2_role_t my_role, const uint8_t *my_name, size_t my_name_len, const uint8_t *their_name, size_t their_name_len); -/* SPAKE2_CTX_free frees |ctx| and all the resources that it has allocated. */ +// SPAKE2_CTX_free frees |ctx| and all the resources that it has allocated. OPENSSL_EXPORT void SPAKE2_CTX_free(SPAKE2_CTX *ctx); -/* SPAKE2_MAX_MSG_SIZE is the maximum size of a SPAKE2 message. */ +// SPAKE2_MAX_MSG_SIZE is the maximum size of a SPAKE2 message. #define SPAKE2_MAX_MSG_SIZE 32 -/* SPAKE2_generate_msg generates a SPAKE2 message given |password|, writes - * it to |out| and sets |*out_len| to the number of bytes written. - * - * At most |max_out_len| bytes are written to |out| and, in order to ensure - * success, |max_out_len| should be at least |SPAKE2_MAX_MSG_SIZE| bytes. - * - * This function can only be called once for a given |SPAKE2_CTX|. - * - * It returns one on success and zero on error. */ +// SPAKE2_generate_msg generates a SPAKE2 message given |password|, writes +// it to |out| and sets |*out_len| to the number of bytes written. +// +// At most |max_out_len| bytes are written to |out| and, in order to ensure +// success, |max_out_len| should be at least |SPAKE2_MAX_MSG_SIZE| bytes. +// +// This function can only be called once for a given |SPAKE2_CTX|. +// +// It returns one on success and zero on error. OPENSSL_EXPORT int SPAKE2_generate_msg(SPAKE2_CTX *ctx, uint8_t *out, size_t *out_len, size_t max_out_len, const uint8_t *password, size_t password_len); -/* SPAKE2_MAX_KEY_SIZE is the maximum amount of key material that SPAKE2 will - * produce. */ +// SPAKE2_MAX_KEY_SIZE is the maximum amount of key material that SPAKE2 will +// produce. #define SPAKE2_MAX_KEY_SIZE 64 -/* SPAKE2_process_msg completes the SPAKE2 exchange given the peer's message in - * |their_msg|, writes at most |max_out_key_len| bytes to |out_key| and sets - * |*out_key_len| to the number of bytes written. - * - * The resulting keying material is suitable for: - * a) Using directly in a key-confirmation step: i.e. each side could - * transmit a hash of their role, a channel-binding value and the key - * material to prove to the other side that they know the shared key. - * b) Using as input keying material to HKDF to generate a variety of subkeys - * for encryption etc. - * - * If |max_out_key_key| is smaller than the amount of key material generated - * then the key is silently truncated. If you want to ensure that no truncation - * occurs then |max_out_key| should be at least |SPAKE2_MAX_KEY_SIZE|. - * - * You must call |SPAKE2_generate_msg| on a given |SPAKE2_CTX| before calling - * this function. On successful return, |ctx| is complete and calling - * |SPAKE2_CTX_free| is the only acceptable operation on it. - * - * Returns one on success or zero on error. */ +// SPAKE2_process_msg completes the SPAKE2 exchange given the peer's message in +// |their_msg|, writes at most |max_out_key_len| bytes to |out_key| and sets +// |*out_key_len| to the number of bytes written. +// +// The resulting keying material is suitable for: +// a) Using directly in a key-confirmation step: i.e. each side could +// transmit a hash of their role, a channel-binding value and the key +// material to prove to the other side that they know the shared key. +// b) Using as input keying material to HKDF to generate a variety of subkeys +// for encryption etc. +// +// If |max_out_key_key| is smaller than the amount of key material generated +// then the key is silently truncated. If you want to ensure that no truncation +// occurs then |max_out_key| should be at least |SPAKE2_MAX_KEY_SIZE|. +// +// You must call |SPAKE2_generate_msg| on a given |SPAKE2_CTX| before calling +// this function. On successful return, |ctx| is complete and calling +// |SPAKE2_CTX_free| is the only acceptable operation on it. +// +// Returns one on success or zero on error. OPENSSL_EXPORT int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, size_t *out_key_len, size_t max_out_key_len, @@ -179,7 +184,7 @@ OPENSSL_EXPORT int SPAKE2_process_msg(SPAKE2_CTX *ctx, uint8_t *out_key, #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -189,8 +194,8 @@ BORINGSSL_MAKE_DELETER(SPAKE2_CTX, SPAKE2_CTX_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_CURVE25519_H */ +#endif // OPENSSL_HEADER_CURVE25519_H diff --git a/Sources/BoringSSL/include/openssl/des.h b/Sources/BoringSSL/include/openssl/des.h index 2b8dd0f62..af1c822dd 100644 --- a/Sources/BoringSSL/include/openssl/des.h +++ b/Sources/BoringSSL/include/openssl/des.h @@ -64,7 +64,7 @@ extern "C" { #endif -/* DES. */ +// DES. typedef struct DES_cblock_st { @@ -85,30 +85,30 @@ typedef struct DES_ks { #define DES_CBC_MODE 0 #define DES_PCBC_MODE 1 -/* DES_set_key performs a key schedule and initialises |schedule| with |key|. */ +// DES_set_key performs a key schedule and initialises |schedule| with |key|. OPENSSL_EXPORT void DES_set_key(const DES_cblock *key, DES_key_schedule *schedule); -/* DES_set_odd_parity sets the parity bits (the least-significant bits in each - * byte) of |key| given the other bits in each byte. */ +// DES_set_odd_parity sets the parity bits (the least-significant bits in each +// byte) of |key| given the other bits in each byte. OPENSSL_EXPORT void DES_set_odd_parity(DES_cblock *key); -/* DES_ecb_encrypt encrypts (or decrypts, if |is_encrypt| is |DES_DECRYPT|) a - * single DES block (8 bytes) from in to out, using the key configured in - * |schedule|. */ +// DES_ecb_encrypt encrypts (or decrypts, if |is_encrypt| is |DES_DECRYPT|) a +// single DES block (8 bytes) from in to out, using the key configured in +// |schedule|. OPENSSL_EXPORT void DES_ecb_encrypt(const DES_cblock *in, DES_cblock *out, const DES_key_schedule *schedule, int is_encrypt); -/* DES_ncbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| - * bytes from |in| to |out| with DES in CBC mode. */ +// DES_ncbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| +// bytes from |in| to |out| with DES in CBC mode. OPENSSL_EXPORT void DES_ncbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *schedule, DES_cblock *ivec, int enc); -/* DES_ecb3_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) a single - * block (8 bytes) of data from |input| to |output| using 3DES. */ +// DES_ecb3_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) a single +// block (8 bytes) of data from |input| to |output| using 3DES. OPENSSL_EXPORT void DES_ecb3_encrypt(const DES_cblock *input, DES_cblock *output, const DES_key_schedule *ks1, @@ -116,9 +116,9 @@ OPENSSL_EXPORT void DES_ecb3_encrypt(const DES_cblock *input, const DES_key_schedule *ks3, int enc); -/* DES_ede3_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| - * bytes from |in| to |out| with 3DES in CBC mode. 3DES uses three keys, thus - * the function takes three different |DES_key_schedule|s. */ +// DES_ede3_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| +// bytes from |in| to |out| with 3DES in CBC mode. 3DES uses three keys, thus +// the function takes three different |DES_key_schedule|s. OPENSSL_EXPORT void DES_ede3_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, @@ -126,10 +126,10 @@ OPENSSL_EXPORT void DES_ede3_cbc_encrypt(const uint8_t *in, uint8_t *out, const DES_key_schedule *ks3, DES_cblock *ivec, int enc); -/* DES_ede2_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| - * bytes from |in| to |out| with 3DES in CBC mode. With this keying option, the - * first and third 3DES keys are identical. Thus, this function takes only two - * different |DES_key_schedule|s. */ +// DES_ede2_cbc_encrypt encrypts (or decrypts, if |enc| is |DES_DECRYPT|) |len| +// bytes from |in| to |out| with 3DES in CBC mode. With this keying option, the +// first and third 3DES keys are identical. Thus, this function takes only two +// different |DES_key_schedule|s. OPENSSL_EXPORT void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len, const DES_key_schedule *ks1, @@ -137,9 +137,9 @@ OPENSSL_EXPORT void DES_ede2_cbc_encrypt(const uint8_t *in, uint8_t *out, DES_cblock *ivec, int enc); -/* Deprecated functions. */ +// Deprecated functions. -/* DES_set_key_unchecked calls |DES_set_key|. */ +// DES_set_key_unchecked calls |DES_set_key|. OPENSSL_EXPORT void DES_set_key_unchecked(const DES_cblock *key, DES_key_schedule *schedule); @@ -157,9 +157,9 @@ OPENSSL_EXPORT void DES_ede3_cfb_encrypt(const uint8_t *in, uint8_t *out, DES_cblock *ivec, int enc); -/* Private functions. - * - * These functions are only exported for use in |decrepit|. */ +// Private functions. +// +// These functions are only exported for use in |decrepit|. OPENSSL_EXPORT void DES_decrypt3(uint32_t *data, const DES_key_schedule *ks1, const DES_key_schedule *ks2, @@ -171,7 +171,7 @@ OPENSSL_EXPORT void DES_encrypt3(uint32_t *data, const DES_key_schedule *ks1, #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_DES_H */ +#endif // OPENSSL_HEADER_DES_H diff --git a/Sources/BoringSSL/include/openssl/dh.h b/Sources/BoringSSL/include/openssl/dh.h index ed2396d1e..ae24c25d1 100644 --- a/Sources/BoringSSL/include/openssl/dh.h +++ b/Sources/BoringSSL/include/openssl/dh.h @@ -59,7 +59,6 @@ #include -#include #include #include @@ -68,92 +67,94 @@ extern "C" { #endif -/* DH contains functions for performing Diffie-Hellman key agreement in - * multiplicative groups. */ +// DH contains functions for performing Diffie-Hellman key agreement in +// multiplicative groups. -/* Allocation and destruction. */ +// Allocation and destruction. -/* DH_new returns a new, empty DH object or NULL on error. */ +// DH_new returns a new, empty DH object or NULL on error. OPENSSL_EXPORT DH *DH_new(void); -/* DH_free decrements the reference count of |dh| and frees it if the reference - * count drops to zero. */ +// DH_free decrements the reference count of |dh| and frees it if the reference +// count drops to zero. OPENSSL_EXPORT void DH_free(DH *dh); -/* DH_up_ref increments the reference count of |dh| and returns one. */ +// DH_up_ref increments the reference count of |dh| and returns one. OPENSSL_EXPORT int DH_up_ref(DH *dh); -/* Properties. */ +// Properties. -/* DH_get0_key sets |*out_pub_key| and |*out_priv_key|, if non-NULL, to |dh|'s - * public and private key, respectively. If |dh| is a public key, the private - * key will be set to NULL. */ +// DH_get0_key sets |*out_pub_key| and |*out_priv_key|, if non-NULL, to |dh|'s +// public and private key, respectively. If |dh| is a public key, the private +// key will be set to NULL. OPENSSL_EXPORT void DH_get0_key(const DH *dh, const BIGNUM **out_pub_key, const BIGNUM **out_priv_key); -/* DH_get0_pqg sets |*out_p|, |*out_q|, and |*out_g|, if non-NULL, to |dh|'s p, - * q, and g parameters, respectively. */ +// DH_set0_key sets |dh|'s public and private key to the specified values. If +// NULL, the field is left unchanged. On success, it takes ownership of each +// argument and returns one. Otherwise, it returns zero. +OPENSSL_EXPORT int DH_set0_key(DH *dh, BIGNUM *pub_key, BIGNUM *priv_key); + +// DH_get0_pqg sets |*out_p|, |*out_q|, and |*out_g|, if non-NULL, to |dh|'s p, +// q, and g parameters, respectively. OPENSSL_EXPORT void DH_get0_pqg(const DH *dh, const BIGNUM **out_p, const BIGNUM **out_q, const BIGNUM **out_g); +// DH_set0_pqg sets |dh|'s p, q, and g parameters to the specified values. If +// NULL, the field is left unchanged. On success, it takes ownership of each +// argument and returns one. Otherwise, it returns zero. |q| may be NULL, but +// |p| and |g| must either be specified or already configured on |dh|. +OPENSSL_EXPORT int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g); -/* Standard parameters. - * - * These functions return new DH objects with standard parameters. They return - * NULL on allocation failure. The |engine| parameter is ignored. */ - -/* These parameters are taken from RFC 5114. */ -OPENSSL_EXPORT DH *DH_get_1024_160(const ENGINE *engine); -OPENSSL_EXPORT DH *DH_get_2048_224(const ENGINE *engine); -OPENSSL_EXPORT DH *DH_get_2048_256(const ENGINE *engine); +// Standard parameters. -/* BN_get_rfc3526_prime_1536 sets |*ret| to the 1536-bit MODP group from RFC - * 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated - * and returned. It returns NULL on allocation failure. */ +// BN_get_rfc3526_prime_1536 sets |*ret| to the 1536-bit MODP group from RFC +// 3526 and returns |ret|. If |ret| is NULL then a fresh |BIGNUM| is allocated +// and returned. It returns NULL on allocation failure. OPENSSL_EXPORT BIGNUM *BN_get_rfc3526_prime_1536(BIGNUM *ret); -/* Parameter generation. */ +// Parameter generation. #define DH_GENERATOR_2 2 #define DH_GENERATOR_5 5 -/* DH_generate_parameters_ex generates a suitable Diffie-Hellman group with a - * prime that is |prime_bits| long and stores it in |dh|. The generator of the - * group will be |generator|, which should be |DH_GENERATOR_2| unless there's a - * good reason to use a different value. The |cb| argument contains a callback - * function that will be called during the generation. See the documentation in - * |bn.h| about this. In addition to the callback invocations from |BN|, |cb| - * will also be called with |event| equal to three when the generation is - * complete. */ +// DH_generate_parameters_ex generates a suitable Diffie-Hellman group with a +// prime that is |prime_bits| long and stores it in |dh|. The generator of the +// group will be |generator|, which should be |DH_GENERATOR_2| unless there's a +// good reason to use a different value. The |cb| argument contains a callback +// function that will be called during the generation. See the documentation in +// |bn.h| about this. In addition to the callback invocations from |BN|, |cb| +// will also be called with |event| equal to three when the generation is +// complete. OPENSSL_EXPORT int DH_generate_parameters_ex(DH *dh, int prime_bits, int generator, BN_GENCB *cb); -/* Diffie-Hellman operations. */ +// Diffie-Hellman operations. -/* DH_generate_key generates a new, random, private key and stores it in - * |dh|. It returns one on success and zero on error. */ +// DH_generate_key generates a new, random, private key and stores it in +// |dh|. It returns one on success and zero on error. OPENSSL_EXPORT int DH_generate_key(DH *dh); -/* DH_compute_key calculates the shared key between |dh| and |peers_key| and - * writes it as a big-endian integer into |out|, which must have |DH_size| - * bytes of space. It returns the number of bytes written, or a negative number - * on error. */ +// DH_compute_key calculates the shared key between |dh| and |peers_key| and +// writes it as a big-endian integer into |out|, which must have |DH_size| +// bytes of space. It returns the number of bytes written, or a negative number +// on error. OPENSSL_EXPORT int DH_compute_key(uint8_t *out, const BIGNUM *peers_key, DH *dh); -/* Utility functions. */ +// Utility functions. -/* DH_size returns the number of bytes in the DH group's prime. */ +// DH_size returns the number of bytes in the DH group's prime. OPENSSL_EXPORT int DH_size(const DH *dh); -/* DH_num_bits returns the minimum number of bits needed to represent the - * absolute value of the DH group's prime. */ +// DH_num_bits returns the minimum number of bits needed to represent the +// absolute value of the DH group's prime. OPENSSL_EXPORT unsigned DH_num_bits(const DH *dh); #define DH_CHECK_P_NOT_PRIME 0x01 @@ -164,102 +165,102 @@ OPENSSL_EXPORT unsigned DH_num_bits(const DH *dh); #define DH_CHECK_INVALID_Q_VALUE 0x20 #define DH_CHECK_INVALID_J_VALUE 0x40 -/* These are compatibility defines. */ +// These are compatibility defines. #define DH_NOT_SUITABLE_GENERATOR DH_CHECK_NOT_SUITABLE_GENERATOR #define DH_UNABLE_TO_CHECK_GENERATOR DH_CHECK_UNABLE_TO_CHECK_GENERATOR -/* DH_check checks the suitability of |dh| as a Diffie-Hellman group. and sets - * |DH_CHECK_*| flags in |*out_flags| if it finds any errors. It returns one if - * |*out_flags| was successfully set and zero on error. - * - * Note: these checks may be quite computationally expensive. */ +// DH_check checks the suitability of |dh| as a Diffie-Hellman group. and sets +// |DH_CHECK_*| flags in |*out_flags| if it finds any errors. It returns one if +// |*out_flags| was successfully set and zero on error. +// +// Note: these checks may be quite computationally expensive. OPENSSL_EXPORT int DH_check(const DH *dh, int *out_flags); #define DH_CHECK_PUBKEY_TOO_SMALL 0x1 #define DH_CHECK_PUBKEY_TOO_LARGE 0x2 #define DH_CHECK_PUBKEY_INVALID 0x4 -/* DH_check_pub_key checks the suitability of |pub_key| as a public key for the - * DH group in |dh| and sets |DH_CHECK_PUBKEY_*| flags in |*out_flags| if it - * finds any errors. It returns one if |*out_flags| was successfully set and - * zero on error. */ +// DH_check_pub_key checks the suitability of |pub_key| as a public key for the +// DH group in |dh| and sets |DH_CHECK_PUBKEY_*| flags in |*out_flags| if it +// finds any errors. It returns one if |*out_flags| was successfully set and +// zero on error. OPENSSL_EXPORT int DH_check_pub_key(const DH *dh, const BIGNUM *pub_key, int *out_flags); -/* DHparams_dup allocates a fresh |DH| and copies the parameters from |dh| into - * it. It returns the new |DH| or NULL on error. */ +// DHparams_dup allocates a fresh |DH| and copies the parameters from |dh| into +// it. It returns the new |DH| or NULL on error. OPENSSL_EXPORT DH *DHparams_dup(const DH *dh); -/* ASN.1 functions. */ +// ASN.1 functions. -/* DH_parse_parameters decodes a DER-encoded DHParameter structure (PKCS #3) - * from |cbs| and advances |cbs|. It returns a newly-allocated |DH| or NULL on - * error. */ +// DH_parse_parameters decodes a DER-encoded DHParameter structure (PKCS #3) +// from |cbs| and advances |cbs|. It returns a newly-allocated |DH| or NULL on +// error. OPENSSL_EXPORT DH *DH_parse_parameters(CBS *cbs); -/* DH_marshal_parameters marshals |dh| as a DER-encoded DHParameter structure - * (PKCS #3) and appends the result to |cbb|. It returns one on success and zero - * on error. */ +// DH_marshal_parameters marshals |dh| as a DER-encoded DHParameter structure +// (PKCS #3) and appends the result to |cbb|. It returns one on success and zero +// on error. OPENSSL_EXPORT int DH_marshal_parameters(CBB *cbb, const DH *dh); -/* ex_data functions. - * - * See |ex_data.h| for details. */ +// ex_data functions. +// +// See |ex_data.h| for details. OPENSSL_EXPORT int DH_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int DH_set_ex_data(DH *d, int idx, void *arg); OPENSSL_EXPORT void *DH_get_ex_data(DH *d, int idx); -/* Deprecated functions. */ +// Deprecated functions. -/* DH_generate_parameters behaves like |DH_generate_parameters_ex|, which is - * what you should use instead. It returns NULL on error, or a newly-allocated - * |DH| on success. This function is provided for compatibility only. */ +// DH_generate_parameters behaves like |DH_generate_parameters_ex|, which is +// what you should use instead. It returns NULL on error, or a newly-allocated +// |DH| on success. This function is provided for compatibility only. OPENSSL_EXPORT DH *DH_generate_parameters(int prime_len, int generator, void (*callback)(int, int, void *), void *cb_arg); -/* d2i_DHparams parses an ASN.1, DER encoded Diffie-Hellman parameters structure - * from |len| bytes at |*inp|. If |ret| is not NULL then, on exit, a pointer to - * the result is in |*ret|. Note that, even if |*ret| is already non-NULL on - * entry, it will not be written to. Rather, a fresh |DH| is allocated and the - * previous one is freed. - * - * On successful exit, |*inp| is advanced past the DER structure. It - * returns the result or NULL on error. - * - * Use |DH_parse_parameters| instead. */ +// d2i_DHparams parses an ASN.1, DER encoded Diffie-Hellman parameters structure +// from |len| bytes at |*inp|. If |ret| is not NULL then, on exit, a pointer to +// the result is in |*ret|. Note that, even if |*ret| is already non-NULL on +// entry, it will not be written to. Rather, a fresh |DH| is allocated and the +// previous one is freed. +// +// On successful exit, |*inp| is advanced past the DER structure. It +// returns the result or NULL on error. +// +// Use |DH_parse_parameters| instead. OPENSSL_EXPORT DH *d2i_DHparams(DH **ret, const unsigned char **inp, long len); -/* i2d_DHparams marshals |in| to an ASN.1, DER structure. If |outp| is not NULL - * then the result is written to |*outp| and |*outp| is advanced just past the - * output. It returns the number of bytes in the result, whether written or - * not, or a negative value on error. - * - * Use |DH_marshal_parameters| instead. */ +// i2d_DHparams marshals |in| to an ASN.1, DER structure. If |outp| is not NULL +// then the result is written to |*outp| and |*outp| is advanced just past the +// output. It returns the number of bytes in the result, whether written or +// not, or a negative value on error. +// +// Use |DH_marshal_parameters| instead. OPENSSL_EXPORT int i2d_DHparams(const DH *in, unsigned char **outp); struct dh_st { BIGNUM *p; BIGNUM *g; - BIGNUM *pub_key; /* g^x mod p */ - BIGNUM *priv_key; /* x */ + BIGNUM *pub_key; // g^x mod p + BIGNUM *priv_key; // x - /* priv_length contains the length, in bits, of the private value. If zero, - * the private value will be the same length as |p|. */ + // priv_length contains the length, in bits, of the private value. If zero, + // the private value will be the same length as |p|. unsigned priv_length; CRYPTO_MUTEX method_mont_p_lock; BN_MONT_CTX *method_mont_p; - /* Place holders if we want to do X9.42 DH */ + // Place holders if we want to do X9.42 DH BIGNUM *q; BIGNUM *j; unsigned char *seed; @@ -273,7 +274,7 @@ struct dh_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -283,7 +284,7 @@ BORINGSSL_MAKE_DELETER(DH, DH_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -294,4 +295,4 @@ BORINGSSL_MAKE_DELETER(DH, DH_free) #define DH_R_DECODE_ERROR 104 #define DH_R_ENCODE_ERROR 105 -#endif /* OPENSSL_HEADER_DH_H */ +#endif // OPENSSL_HEADER_DH_H diff --git a/Sources/BoringSSL/include/openssl/digest.h b/Sources/BoringSSL/include/openssl/digest.h index 87de3dfe4..81f58925e 100644 --- a/Sources/BoringSSL/include/openssl/digest.h +++ b/Sources/BoringSSL/include/openssl/digest.h @@ -64,17 +64,17 @@ extern "C" { #endif -/* Digest functions. - * - * An EVP_MD abstracts the details of a specific hash function allowing code to - * deal with the concept of a "hash function" without needing to know exactly - * which hash function it is. */ +// Digest functions. +// +// An EVP_MD abstracts the details of a specific hash function allowing code to +// deal with the concept of a "hash function" without needing to know exactly +// which hash function it is. -/* Hash algorithms. - * - * The following functions return |EVP_MD| objects that implement the named hash - * function. */ +// Hash algorithms. +// +// The following functions return |EVP_MD| objects that implement the named hash +// function. OPENSSL_EXPORT const EVP_MD *EVP_md4(void); OPENSSL_EXPORT const EVP_MD *EVP_md5(void); @@ -84,192 +84,219 @@ OPENSSL_EXPORT const EVP_MD *EVP_sha256(void); OPENSSL_EXPORT const EVP_MD *EVP_sha384(void); OPENSSL_EXPORT const EVP_MD *EVP_sha512(void); -/* EVP_md5_sha1 is a TLS-specific |EVP_MD| which computes the concatenation of - * MD5 and SHA-1, as used in TLS 1.1 and below. */ +// EVP_md5_sha1 is a TLS-specific |EVP_MD| which computes the concatenation of +// MD5 and SHA-1, as used in TLS 1.1 and below. OPENSSL_EXPORT const EVP_MD *EVP_md5_sha1(void); -/* EVP_get_digestbynid returns an |EVP_MD| for the given NID, or NULL if no - * such digest is known. */ +// EVP_get_digestbynid returns an |EVP_MD| for the given NID, or NULL if no +// such digest is known. OPENSSL_EXPORT const EVP_MD *EVP_get_digestbynid(int nid); -/* EVP_get_digestbyobj returns an |EVP_MD| for the given |ASN1_OBJECT|, or NULL - * if no such digest is known. */ +// EVP_get_digestbyobj returns an |EVP_MD| for the given |ASN1_OBJECT|, or NULL +// if no such digest is known. OPENSSL_EXPORT const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj); -/* Digest contexts. - * - * An EVP_MD_CTX represents the state of a specific digest operation in - * progress. */ +// Digest contexts. +// +// An EVP_MD_CTX represents the state of a specific digest operation in +// progress. -/* EVP_MD_CTX_init initialises an, already allocated, |EVP_MD_CTX|. This is the - * same as setting the structure to zero. */ +// EVP_MD_CTX_init initialises an, already allocated, |EVP_MD_CTX|. This is the +// same as setting the structure to zero. OPENSSL_EXPORT void EVP_MD_CTX_init(EVP_MD_CTX *ctx); -/* EVP_MD_CTX_create allocates and initialises a fresh |EVP_MD_CTX| and returns - * it, or NULL on allocation failure. */ -OPENSSL_EXPORT EVP_MD_CTX *EVP_MD_CTX_create(void); +// EVP_MD_CTX_new allocates and initialises a fresh |EVP_MD_CTX| and returns +// it, or NULL on allocation failure. The caller must use |EVP_MD_CTX_free| to +// release the resulting object. +OPENSSL_EXPORT EVP_MD_CTX *EVP_MD_CTX_new(void); -/* EVP_MD_CTX_cleanup frees any resources owned by |ctx| and resets it to a - * freshly initialised state. It does not free |ctx| itself. It returns one. */ +// EVP_MD_CTX_cleanup frees any resources owned by |ctx| and resets it to a +// freshly initialised state. It does not free |ctx| itself. It returns one. OPENSSL_EXPORT int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx); -/* EVP_MD_CTX_destroy calls |EVP_MD_CTX_cleanup| and then frees |ctx| itself. */ -OPENSSL_EXPORT void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx); +// EVP_MD_CTX_free calls |EVP_MD_CTX_cleanup| and then frees |ctx| itself. +OPENSSL_EXPORT void EVP_MD_CTX_free(EVP_MD_CTX *ctx); -/* EVP_MD_CTX_copy_ex sets |out|, which must already be initialised, to be a - * copy of |in|. It returns one on success and zero on error. */ +// EVP_MD_CTX_copy_ex sets |out|, which must already be initialised, to be a +// copy of |in|. It returns one on success and zero on error. OPENSSL_EXPORT int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in); +// EVP_MD_CTX_reset calls |EVP_MD_CTX_cleanup| followed by |EVP_MD_CTX_init|. +OPENSSL_EXPORT void EVP_MD_CTX_reset(EVP_MD_CTX *ctx); + -/* Digest operations. */ +// Digest operations. -/* EVP_DigestInit_ex configures |ctx|, which must already have been - * initialised, for a fresh hashing operation using |type|. It returns one on - * success and zero otherwise. */ +// EVP_DigestInit_ex configures |ctx|, which must already have been +// initialised, for a fresh hashing operation using |type|. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *engine); -/* EVP_DigestInit acts like |EVP_DigestInit_ex| except that |ctx| is - * initialised before use. */ +// EVP_DigestInit acts like |EVP_DigestInit_ex| except that |ctx| is +// initialised before use. OPENSSL_EXPORT int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type); -/* EVP_DigestUpdate hashes |len| bytes from |data| into the hashing operation - * in |ctx|. It returns one. */ +// EVP_DigestUpdate hashes |len| bytes from |data| into the hashing operation +// in |ctx|. It returns one. OPENSSL_EXPORT int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); -/* EVP_MAX_MD_SIZE is the largest digest size supported, in bytes. - * Functions that output a digest generally require the buffer have - * at least this much space. */ -#define EVP_MAX_MD_SIZE 64 /* SHA-512 is the longest so far. */ - -/* EVP_MAX_MD_BLOCK_SIZE is the largest digest block size supported, in - * bytes. */ -#define EVP_MAX_MD_BLOCK_SIZE 128 /* SHA-512 is the longest so far. */ - -/* EVP_DigestFinal_ex finishes the digest in |ctx| and writes the output to - * |md_out|. |EVP_MD_CTX_size| bytes are written, which is at most - * |EVP_MAX_MD_SIZE|. If |out_size| is not NULL then |*out_size| is set to the - * number of bytes written. It returns one. After this call, the hash cannot be - * updated or finished again until |EVP_DigestInit_ex| is called to start - * another hashing operation. */ +// EVP_MAX_MD_SIZE is the largest digest size supported, in bytes. +// Functions that output a digest generally require the buffer have +// at least this much space. +#define EVP_MAX_MD_SIZE 64 // SHA-512 is the longest so far. + +// EVP_MAX_MD_BLOCK_SIZE is the largest digest block size supported, in +// bytes. +#define EVP_MAX_MD_BLOCK_SIZE 128 // SHA-512 is the longest so far. + +// EVP_DigestFinal_ex finishes the digest in |ctx| and writes the output to +// |md_out|. |EVP_MD_CTX_size| bytes are written, which is at most +// |EVP_MAX_MD_SIZE|. If |out_size| is not NULL then |*out_size| is set to the +// number of bytes written. It returns one. After this call, the hash cannot be +// updated or finished again until |EVP_DigestInit_ex| is called to start +// another hashing operation. OPENSSL_EXPORT int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *out_size); -/* EVP_DigestFinal acts like |EVP_DigestFinal_ex| except that - * |EVP_MD_CTX_cleanup| is called on |ctx| before returning. */ +// EVP_DigestFinal acts like |EVP_DigestFinal_ex| except that +// |EVP_MD_CTX_cleanup| is called on |ctx| before returning. OPENSSL_EXPORT int EVP_DigestFinal(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *out_size); -/* EVP_Digest performs a complete hashing operation in one call. It hashes |len| - * bytes from |data| and writes the digest to |md_out|. |EVP_MD_CTX_size| bytes - * are written, which is at most |EVP_MAX_MD_SIZE|. If |out_size| is not NULL - * then |*out_size| is set to the number of bytes written. It returns one on - * success and zero otherwise. */ +// EVP_Digest performs a complete hashing operation in one call. It hashes |len| +// bytes from |data| and writes the digest to |md_out|. |EVP_MD_CTX_size| bytes +// are written, which is at most |EVP_MAX_MD_SIZE|. If |out_size| is not NULL +// then |*out_size| is set to the number of bytes written. It returns one on +// success and zero otherwise. OPENSSL_EXPORT int EVP_Digest(const void *data, size_t len, uint8_t *md_out, unsigned int *md_out_size, const EVP_MD *type, ENGINE *impl); -/* Digest function accessors. - * - * These functions allow code to learn details about an abstract hash - * function. */ +// Digest function accessors. +// +// These functions allow code to learn details about an abstract hash +// function. -/* EVP_MD_type returns a NID identifying |md|. (For example, |NID_sha256|.) */ +// EVP_MD_type returns a NID identifying |md|. (For example, |NID_sha256|.) OPENSSL_EXPORT int EVP_MD_type(const EVP_MD *md); -/* EVP_MD_flags returns the flags for |md|, which is a set of |EVP_MD_FLAG_*| - * values, ORed together. */ +// EVP_MD_flags returns the flags for |md|, which is a set of |EVP_MD_FLAG_*| +// values, ORed together. OPENSSL_EXPORT uint32_t EVP_MD_flags(const EVP_MD *md); -/* EVP_MD_size returns the digest size of |md|, in bytes. */ +// EVP_MD_size returns the digest size of |md|, in bytes. OPENSSL_EXPORT size_t EVP_MD_size(const EVP_MD *md); -/* EVP_MD_block_size returns the native block-size of |md|, in bytes. */ +// EVP_MD_block_size returns the native block-size of |md|, in bytes. OPENSSL_EXPORT size_t EVP_MD_block_size(const EVP_MD *md); -/* EVP_MD_FLAG_PKEY_DIGEST indicates the the digest function is used with a - * specific public key in order to verify signatures. (For example, - * EVP_dss1.) */ +// EVP_MD_FLAG_PKEY_DIGEST indicates the the digest function is used with a +// specific public key in order to verify signatures. (For example, +// EVP_dss1.) #define EVP_MD_FLAG_PKEY_DIGEST 1 -/* EVP_MD_FLAG_DIGALGID_ABSENT indicates that the parameter type in an X.509 - * DigestAlgorithmIdentifier representing this digest function should be - * undefined rather than NULL. */ +// EVP_MD_FLAG_DIGALGID_ABSENT indicates that the parameter type in an X.509 +// DigestAlgorithmIdentifier representing this digest function should be +// undefined rather than NULL. #define EVP_MD_FLAG_DIGALGID_ABSENT 2 -/* Deprecated functions. */ +// Digest operation accessors. -/* EVP_MD_CTX_copy sets |out|, which must /not/ be initialised, to be a copy of - * |in|. It returns one on success and zero on error. */ -OPENSSL_EXPORT int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in); +// EVP_MD_CTX_md returns the underlying digest function, or NULL if one has not +// been set. +OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx); -/* EVP_add_digest does nothing and returns one. It exists only for - * compatibility with OpenSSL. */ -OPENSSL_EXPORT int EVP_add_digest(const EVP_MD *digest); +// EVP_MD_CTX_size returns the digest size of |ctx|, in bytes. It +// will crash if a digest hasn't been set on |ctx|. +OPENSSL_EXPORT size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx); -/* EVP_get_digestbyname returns an |EVP_MD| given a human readable name in - * |name|, or NULL if the name is unknown. */ -OPENSSL_EXPORT const EVP_MD *EVP_get_digestbyname(const char *); +// EVP_MD_CTX_block_size returns the block size of the digest function used by +// |ctx|, in bytes. It will crash if a digest hasn't been set on |ctx|. +OPENSSL_EXPORT size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx); -/* EVP_dss1 returns the value of EVP_sha1(). This was provided by OpenSSL to - * specifiy the original DSA signatures, which were fixed to use SHA-1. Note, - * however, that attempting to sign or verify DSA signatures with the EVP - * interface will always fail. */ -OPENSSL_EXPORT const EVP_MD *EVP_dss1(void); +// EVP_MD_CTX_type returns a NID describing the digest function used by |ctx|. +// (For example, |NID_sha256|.) It will crash if a digest hasn't been set on +// |ctx|. +OPENSSL_EXPORT int EVP_MD_CTX_type(const EVP_MD_CTX *ctx); -/* Digest operation accessors. */ +// ASN.1 functions. +// +// These functions allow code to parse and serialize AlgorithmIdentifiers for +// hash functions. -/* EVP_MD_CTX_md returns the underlying digest function, or NULL if one has not - * been set. */ -OPENSSL_EXPORT const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx); +// EVP_parse_digest_algorithm parses an AlgorithmIdentifier structure containing +// a hash function OID (for example, 2.16.840.1.101.3.4.2.1 is SHA-256) and +// advances |cbs|. The parameters field may either be omitted or a NULL. It +// returns the digest function or NULL on error. +OPENSSL_EXPORT const EVP_MD *EVP_parse_digest_algorithm(CBS *cbs); -/* EVP_MD_CTX_size returns the digest size of |ctx|, in bytes. It - * will crash if a digest hasn't been set on |ctx|. */ -OPENSSL_EXPORT size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx); +// EVP_marshal_digest_algorithm marshals |md| as an AlgorithmIdentifier +// structure and appends the result to |cbb|. It returns one on success and zero +// on error. +OPENSSL_EXPORT int EVP_marshal_digest_algorithm(CBB *cbb, const EVP_MD *md); -/* EVP_MD_CTX_block_size returns the block size of the digest function used by - * |ctx|, in bytes. It will crash if a digest hasn't been set on |ctx|. */ -OPENSSL_EXPORT size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx); -/* EVP_MD_CTX_type returns a NID describing the digest function used by |ctx|. - * (For example, |NID_sha256|.) It will crash if a digest hasn't been set on - * |ctx|. */ -OPENSSL_EXPORT int EVP_MD_CTX_type(const EVP_MD_CTX *ctx); +// Deprecated functions. + +// EVP_MD_CTX_copy sets |out|, which must /not/ be initialised, to be a copy of +// |in|. It returns one on success and zero on error. +OPENSSL_EXPORT int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in); + +// EVP_add_digest does nothing and returns one. It exists only for +// compatibility with OpenSSL. +OPENSSL_EXPORT int EVP_add_digest(const EVP_MD *digest); + +// EVP_get_digestbyname returns an |EVP_MD| given a human readable name in +// |name|, or NULL if the name is unknown. +OPENSSL_EXPORT const EVP_MD *EVP_get_digestbyname(const char *); + +// EVP_dss1 returns the value of EVP_sha1(). This was provided by OpenSSL to +// specifiy the original DSA signatures, which were fixed to use SHA-1. Note, +// however, that attempting to sign or verify DSA signatures with the EVP +// interface will always fail. +OPENSSL_EXPORT const EVP_MD *EVP_dss1(void); + +// EVP_MD_CTX_create calls |EVP_MD_CTX_new|. +OPENSSL_EXPORT EVP_MD_CTX *EVP_MD_CTX_create(void); + +// EVP_MD_CTX_destroy calls |EVP_MD_CTX_free|. +OPENSSL_EXPORT void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx); struct evp_md_pctx_ops; struct env_md_ctx_st { - /* digest is the underlying digest function, or NULL if not set. */ + // digest is the underlying digest function, or NULL if not set. const EVP_MD *digest; - /* md_data points to a block of memory that contains the hash-specific - * context. */ + // md_data points to a block of memory that contains the hash-specific + // context. void *md_data; - /* pctx is an opaque (at this layer) pointer to additional context that - * EVP_PKEY functions may store in this object. */ + // pctx is an opaque (at this layer) pointer to additional context that + // EVP_PKEY functions may store in this object. EVP_PKEY_CTX *pctx; - /* pctx_ops, if not NULL, points to a vtable that contains functions to - * manipulate |pctx|. */ + // pctx_ops, if not NULL, points to a vtable that contains functions to + // manipulate |pctx|. const struct evp_md_pctx_ops *pctx_ops; } /* EVP_MD_CTX */; #if defined(__cplusplus) -} /* extern C */ +} // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { namespace bssl { -BORINGSSL_MAKE_DELETER(EVP_MD_CTX, EVP_MD_CTX_destroy) +BORINGSSL_MAKE_DELETER(EVP_MD_CTX, EVP_MD_CTX_free) using ScopedEVP_MD_CTX = internal::StackAllocated #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -366,7 +365,7 @@ BORINGSSL_MAKE_DELETER(EC_GROUP, EC_GROUP_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -402,5 +401,7 @@ BORINGSSL_MAKE_DELETER(EC_GROUP, EC_GROUP_free) #define EC_R_ENCODE_ERROR 129 #define EC_R_GROUP_MISMATCH 130 #define EC_R_INVALID_COFACTOR 131 +#define EC_R_PUBLIC_KEY_VALIDATION_FAILED 132 +#define EC_R_INVALID_SCALAR 133 -#endif /* OPENSSL_HEADER_EC_H */ +#endif // OPENSSL_HEADER_EC_H diff --git a/Sources/BoringSSL/include/openssl/ec_key.h b/Sources/BoringSSL/include/openssl/ec_key.h index 1dbae62db..7ef1b1418 100644 --- a/Sources/BoringSSL/include/openssl/ec_key.h +++ b/Sources/BoringSSL/include/openssl/ec_key.h @@ -79,157 +79,165 @@ extern "C" { #endif -/* ec_key.h contains functions that handle elliptic-curve points that are - * public/private keys. */ +// ec_key.h contains functions that handle elliptic-curve points that are +// public/private keys. -/* EC key objects. */ +// EC key objects. -/* EC_KEY_new returns a fresh |EC_KEY| object or NULL on error. */ +// EC_KEY_new returns a fresh |EC_KEY| object or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_new(void); -/* EC_KEY_new_method acts the same as |EC_KEY_new|, but takes an explicit - * |ENGINE|. */ +// EC_KEY_new_method acts the same as |EC_KEY_new|, but takes an explicit +// |ENGINE|. OPENSSL_EXPORT EC_KEY *EC_KEY_new_method(const ENGINE *engine); -/* EC_KEY_new_by_curve_name returns a fresh EC_KEY for group specified by |nid| - * or NULL on error. */ +// EC_KEY_new_by_curve_name returns a fresh EC_KEY for group specified by |nid| +// or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_new_by_curve_name(int nid); -/* EC_KEY_free frees all the data owned by |key| and |key| itself. */ +// EC_KEY_free frees all the data owned by |key| and |key| itself. OPENSSL_EXPORT void EC_KEY_free(EC_KEY *key); -/* EC_KEY_copy sets |dst| equal to |src| and returns |dst| or NULL on error. */ +// EC_KEY_copy sets |dst| equal to |src| and returns |dst| or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src); -/* EC_KEY_dup returns a fresh copy of |src| or NULL on error. */ +// EC_KEY_dup returns a fresh copy of |src| or NULL on error. OPENSSL_EXPORT EC_KEY *EC_KEY_dup(const EC_KEY *src); -/* EC_KEY_up_ref increases the reference count of |key| and returns one. */ +// EC_KEY_up_ref increases the reference count of |key| and returns one. OPENSSL_EXPORT int EC_KEY_up_ref(EC_KEY *key); -/* EC_KEY_is_opaque returns one if |key| is opaque and doesn't expose its key - * material. Otherwise it return zero. */ +// EC_KEY_is_opaque returns one if |key| is opaque and doesn't expose its key +// material. Otherwise it return zero. OPENSSL_EXPORT int EC_KEY_is_opaque(const EC_KEY *key); -/* EC_KEY_get0_group returns a pointer to the |EC_GROUP| object inside |key|. */ +// EC_KEY_get0_group returns a pointer to the |EC_GROUP| object inside |key|. OPENSSL_EXPORT const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key); -/* EC_KEY_set_group sets the |EC_GROUP| object that |key| will use to |group|. - * It returns one on success and zero otherwise. */ +// EC_KEY_set_group sets the |EC_GROUP| object that |key| will use to |group|. +// It returns one on success and zero otherwise. OPENSSL_EXPORT int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group); -/* EC_KEY_get0_private_key returns a pointer to the private key inside |key|. */ +// EC_KEY_get0_private_key returns a pointer to the private key inside |key|. OPENSSL_EXPORT const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key); -/* EC_KEY_set_private_key sets the private key of |key| to |priv|. It returns - * one on success and zero otherwise. */ +// EC_KEY_set_private_key sets the private key of |key| to |priv|. It returns +// one on success and zero otherwise. OPENSSL_EXPORT int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv); -/* EC_KEY_get0_public_key returns a pointer to the public key point inside - * |key|. */ +// EC_KEY_get0_public_key returns a pointer to the public key point inside +// |key|. OPENSSL_EXPORT const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key); -/* EC_KEY_set_public_key sets the public key of |key| to |pub|, by copying it. - * It returns one on success and zero otherwise. */ +// EC_KEY_set_public_key sets the public key of |key| to |pub|, by copying it. +// It returns one on success and zero otherwise. OPENSSL_EXPORT int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub); #define EC_PKEY_NO_PARAMETERS 0x001 #define EC_PKEY_NO_PUBKEY 0x002 -/* EC_KEY_get_enc_flags returns the encoding flags for |key|, which is a - * bitwise-OR of |EC_PKEY_*| values. */ +// EC_KEY_get_enc_flags returns the encoding flags for |key|, which is a +// bitwise-OR of |EC_PKEY_*| values. OPENSSL_EXPORT unsigned EC_KEY_get_enc_flags(const EC_KEY *key); -/* EC_KEY_set_enc_flags sets the encoding flags for |key|, which is a - * bitwise-OR of |EC_PKEY_*| values. */ +// EC_KEY_set_enc_flags sets the encoding flags for |key|, which is a +// bitwise-OR of |EC_PKEY_*| values. OPENSSL_EXPORT void EC_KEY_set_enc_flags(EC_KEY *key, unsigned flags); -/* EC_KEY_get_conv_form returns the conversation form that will be used by - * |key|. */ +// EC_KEY_get_conv_form returns the conversation form that will be used by +// |key|. OPENSSL_EXPORT point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key); -/* EC_KEY_set_conv_form sets the conversion form to be used by |key|. */ +// EC_KEY_set_conv_form sets the conversion form to be used by |key|. OPENSSL_EXPORT void EC_KEY_set_conv_form(EC_KEY *key, point_conversion_form_t cform); -/* EC_KEY_check_key performs several checks on |key| (possibly including an - * expensive check that the public key is in the primary subgroup). It returns - * one if all checks pass and zero otherwise. If it returns zero then detail - * about the problem can be found on the error stack. */ +// EC_KEY_check_key performs several checks on |key| (possibly including an +// expensive check that the public key is in the primary subgroup). It returns +// one if all checks pass and zero otherwise. If it returns zero then detail +// about the problem can be found on the error stack. OPENSSL_EXPORT int EC_KEY_check_key(const EC_KEY *key); -/* EC_KEY_set_public_key_affine_coordinates sets the public key in |key| to - * (|x|, |y|). It returns one on success and zero otherwise. */ +// EC_KEY_check_fips performs a signing pairwise consistency test (FIPS 140-2 +// 4.9.2). It returns one if it passes and zero otherwise. +OPENSSL_EXPORT int EC_KEY_check_fips(const EC_KEY *key); + +// EC_KEY_set_public_key_affine_coordinates sets the public key in |key| to +// (|x|, |y|). It returns one on success and zero otherwise. OPENSSL_EXPORT int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y); -/* Key generation. */ +// Key generation. -/* EC_KEY_generate_key generates a random, private key, calculates the - * corresponding public key and stores both in |key|. It returns one on success - * or zero otherwise. */ +// EC_KEY_generate_key generates a random, private key, calculates the +// corresponding public key and stores both in |key|. It returns one on success +// or zero otherwise. OPENSSL_EXPORT int EC_KEY_generate_key(EC_KEY *key); +// EC_KEY_generate_key_fips behaves like |EC_KEY_generate_key| but performs +// additional checks for FIPS compliance. +OPENSSL_EXPORT int EC_KEY_generate_key_fips(EC_KEY *key); + -/* Serialisation. */ +// Serialisation. -/* EC_KEY_parse_private_key parses a DER-encoded ECPrivateKey structure (RFC - * 5915) from |cbs| and advances |cbs|. It returns a newly-allocated |EC_KEY| or - * NULL on error. If |group| is non-null, the parameters field of the - * ECPrivateKey may be omitted (but must match |group| if present). Otherwise, - * the parameters field is required. */ +// EC_KEY_parse_private_key parses a DER-encoded ECPrivateKey structure (RFC +// 5915) from |cbs| and advances |cbs|. It returns a newly-allocated |EC_KEY| or +// NULL on error. If |group| is non-null, the parameters field of the +// ECPrivateKey may be omitted (but must match |group| if present). Otherwise, +// the parameters field is required. OPENSSL_EXPORT EC_KEY *EC_KEY_parse_private_key(CBS *cbs, const EC_GROUP *group); -/* EC_KEY_marshal_private_key marshals |key| as a DER-encoded ECPrivateKey - * structure (RFC 5915) and appends the result to |cbb|. It returns one on - * success and zero on failure. |enc_flags| is a combination of |EC_PKEY_*| - * values and controls whether corresponding fields are omitted. */ +// EC_KEY_marshal_private_key marshals |key| as a DER-encoded ECPrivateKey +// structure (RFC 5915) and appends the result to |cbb|. It returns one on +// success and zero on failure. |enc_flags| is a combination of |EC_PKEY_*| +// values and controls whether corresponding fields are omitted. OPENSSL_EXPORT int EC_KEY_marshal_private_key(CBB *cbb, const EC_KEY *key, unsigned enc_flags); -/* EC_KEY_parse_curve_name parses a DER-encoded OBJECT IDENTIFIER as a curve - * name from |cbs| and advances |cbs|. It returns a newly-allocated |EC_GROUP| - * or NULL on error. */ +// EC_KEY_parse_curve_name parses a DER-encoded OBJECT IDENTIFIER as a curve +// name from |cbs| and advances |cbs|. It returns a newly-allocated |EC_GROUP| +// or NULL on error. OPENSSL_EXPORT EC_GROUP *EC_KEY_parse_curve_name(CBS *cbs); -/* EC_KEY_marshal_curve_name marshals |group| as a DER-encoded OBJECT IDENTIFIER - * and appends the result to |cbb|. It returns one on success and zero on - * failure. */ +// EC_KEY_marshal_curve_name marshals |group| as a DER-encoded OBJECT IDENTIFIER +// and appends the result to |cbb|. It returns one on success and zero on +// failure. OPENSSL_EXPORT int EC_KEY_marshal_curve_name(CBB *cbb, const EC_GROUP *group); -/* EC_KEY_parse_parameters parses a DER-encoded ECParameters structure (RFC - * 5480) from |cbs| and advances |cbs|. It returns a newly-allocated |EC_GROUP| - * or NULL on error. It supports the namedCurve and specifiedCurve options, but - * use of specifiedCurve is deprecated. Use |EC_KEY_parse_curve_name| - * instead. */ +// EC_KEY_parse_parameters parses a DER-encoded ECParameters structure (RFC +// 5480) from |cbs| and advances |cbs|. It returns a newly-allocated |EC_GROUP| +// or NULL on error. It supports the namedCurve and specifiedCurve options, but +// use of specifiedCurve is deprecated. Use |EC_KEY_parse_curve_name| +// instead. OPENSSL_EXPORT EC_GROUP *EC_KEY_parse_parameters(CBS *cbs); -/* ex_data functions. - * - * These functions are wrappers. See |ex_data.h| for details. */ +// ex_data functions. +// +// These functions are wrappers. See |ex_data.h| for details. OPENSSL_EXPORT int EC_KEY_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int EC_KEY_set_ex_data(EC_KEY *r, int idx, void *arg); OPENSSL_EXPORT void *EC_KEY_get_ex_data(const EC_KEY *r, int idx); -/* ECDSA method. */ +// ECDSA method. -/* ECDSA_FLAG_OPAQUE specifies that this ECDSA_METHOD does not expose its key - * material. This may be set if, for instance, it is wrapping some other crypto - * API, like a platform key store. */ +// ECDSA_FLAG_OPAQUE specifies that this ECDSA_METHOD does not expose its key +// material. This may be set if, for instance, it is wrapping some other crypto +// API, like a platform key store. #define ECDSA_FLAG_OPAQUE 1 -/* ecdsa_method_st is a structure of function pointers for implementing ECDSA. - * See engine.h. */ +// ecdsa_method_st is a structure of function pointers for implementing ECDSA. +// See engine.h. struct ecdsa_method_st { struct openssl_method_common_st common; @@ -238,89 +246,85 @@ struct ecdsa_method_st { int (*init)(EC_KEY *key); int (*finish)(EC_KEY *key); - /* group_order_size returns the number of bytes needed to represent the order - * of the group. This is used to calculate the maximum size of an ECDSA - * signature in |ECDSA_size|. */ + // group_order_size returns the number of bytes needed to represent the order + // of the group. This is used to calculate the maximum size of an ECDSA + // signature in |ECDSA_size|. size_t (*group_order_size)(const EC_KEY *key); - /* sign matches the arguments and behaviour of |ECDSA_sign|. */ + // sign matches the arguments and behaviour of |ECDSA_sign|. int (*sign)(const uint8_t *digest, size_t digest_len, uint8_t *sig, unsigned int *sig_len, EC_KEY *eckey); - /* Ignored. Set this to NULL. */ - int (*verify)(const uint8_t *digest, size_t digest_len, const uint8_t *sig, - size_t sig_len, EC_KEY *eckey); - int flags; }; -/* Deprecated functions. */ +// Deprecated functions. -/* EC_KEY_set_asn1_flag does nothing. */ +// EC_KEY_set_asn1_flag does nothing. OPENSSL_EXPORT void EC_KEY_set_asn1_flag(EC_KEY *key, int flag); -/* d2i_ECPrivateKey parses an ASN.1, DER-encoded, private key from |len| bytes - * at |*inp|. If |out_key| is not NULL then, on exit, a pointer to the result - * is in |*out_key|. Note that, even if |*out_key| is already non-NULL on entry, - * it * will not be written to. Rather, a fresh |EC_KEY| is allocated and the - * previous * one is freed. On successful exit, |*inp| is advanced past the DER - * structure. It returns the result or NULL on error. - * - * On input, if |*out_key| is non-NULL and has a group configured, the - * parameters field may be omitted but must match that group if present. - * - * Use |EC_KEY_parse_private_key| instead. */ +// d2i_ECPrivateKey parses an ASN.1, DER-encoded, private key from |len| bytes +// at |*inp|. If |out_key| is not NULL then, on exit, a pointer to the result +// is in |*out_key|. Note that, even if |*out_key| is already non-NULL on entry, +// it * will not be written to. Rather, a fresh |EC_KEY| is allocated and the +// previous * one is freed. On successful exit, |*inp| is advanced past the DER +// structure. It returns the result or NULL on error. +// +// On input, if |*out_key| is non-NULL and has a group configured, the +// parameters field may be omitted but must match that group if present. +// +// Use |EC_KEY_parse_private_key| instead. OPENSSL_EXPORT EC_KEY *d2i_ECPrivateKey(EC_KEY **out_key, const uint8_t **inp, long len); -/* i2d_ECPrivateKey marshals an EC private key from |key| to an ASN.1, DER - * structure. If |outp| is not NULL then the result is written to |*outp| and - * |*outp| is advanced just past the output. It returns the number of bytes in - * the result, whether written or not, or a negative value on error. - * - * Use |EC_KEY_marshal_private_key| instead. */ +// i2d_ECPrivateKey marshals an EC private key from |key| to an ASN.1, DER +// structure. If |outp| is not NULL then the result is written to |*outp| and +// |*outp| is advanced just past the output. It returns the number of bytes in +// the result, whether written or not, or a negative value on error. +// +// Use |EC_KEY_marshal_private_key| instead. OPENSSL_EXPORT int i2d_ECPrivateKey(const EC_KEY *key, uint8_t **outp); -/* d2i_ECParameters parses an ASN.1, DER-encoded, set of EC parameters from - * |len| bytes at |*inp|. If |out_key| is not NULL then, on exit, a pointer to - * the result is in |*out_key|. Note that, even if |*out_key| is already - * non-NULL on entry, it will not be written to. Rather, a fresh |EC_KEY| is - * allocated and the previous one is freed. On successful exit, |*inp| is - * advanced past the DER structure. It returns the result or NULL on error. - * - * Use |EC_KEY_parse_parameters| or |EC_KEY_parse_curve_name| instead. */ +// d2i_ECParameters parses an ASN.1, DER-encoded, set of EC parameters from +// |len| bytes at |*inp|. If |out_key| is not NULL then, on exit, a pointer to +// the result is in |*out_key|. Note that, even if |*out_key| is already +// non-NULL on entry, it will not be written to. Rather, a fresh |EC_KEY| is +// allocated and the previous one is freed. On successful exit, |*inp| is +// advanced past the DER structure. It returns the result or NULL on error. +// +// Use |EC_KEY_parse_parameters| or |EC_KEY_parse_curve_name| instead. OPENSSL_EXPORT EC_KEY *d2i_ECParameters(EC_KEY **out_key, const uint8_t **inp, long len); -/* i2d_ECParameters marshals EC parameters from |key| to an ASN.1, DER - * structure. If |outp| is not NULL then the result is written to |*outp| and - * |*outp| is advanced just past the output. It returns the number of bytes in - * the result, whether written or not, or a negative value on error. - * - * Use |EC_KEY_marshal_curve_name| instead. */ +// i2d_ECParameters marshals EC parameters from |key| to an ASN.1, DER +// structure. If |outp| is not NULL then the result is written to |*outp| and +// |*outp| is advanced just past the output. It returns the number of bytes in +// the result, whether written or not, or a negative value on error. +// +// Use |EC_KEY_marshal_curve_name| instead. OPENSSL_EXPORT int i2d_ECParameters(const EC_KEY *key, uint8_t **outp); -/* o2i_ECPublicKey parses an EC point from |len| bytes at |*inp| into - * |*out_key|. Note that this differs from the d2i format in that |*out_key| - * must be non-NULL with a group set. On successful exit, |*inp| is advanced by - * |len| bytes. It returns |*out_key| or NULL on error. - * - * Use |EC_POINT_oct2point| instead. */ +// o2i_ECPublicKey parses an EC point from |len| bytes at |*inp| into +// |*out_key|. Note that this differs from the d2i format in that |*out_key| +// must be non-NULL with a group set. On successful exit, |*inp| is advanced by +// |len| bytes. It returns |*out_key| or NULL on error. +// +// Use |EC_POINT_oct2point| instead. OPENSSL_EXPORT EC_KEY *o2i_ECPublicKey(EC_KEY **out_key, const uint8_t **inp, long len); -/* i2o_ECPublicKey marshals an EC point from |key|. If |outp| is not NULL then - * the result is written to |*outp| and |*outp| is advanced just past the - * output. It returns the number of bytes in the result, whether written or - * not, or a negative value on error. - * - * Use |EC_POINT_point2cbb| instead. */ +// i2o_ECPublicKey marshals an EC point from |key|. If |outp| is not NULL then +// the result is written to |*outp| and |*outp| is advanced just past the +// output. It returns the number of bytes in the result, whether written or +// not, or a negative value on error. +// +// Use |EC_POINT_point2cbb| instead. OPENSSL_EXPORT int i2o_ECPublicKey(const EC_KEY *key, unsigned char **outp); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -330,8 +334,8 @@ BORINGSSL_MAKE_DELETER(EC_KEY, EC_KEY_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_EC_KEY_H */ +#endif // OPENSSL_HEADER_EC_KEY_H diff --git a/Sources/BoringSSL/include/openssl/ecdh.h b/Sources/BoringSSL/include/openssl/ecdh.h index c16750309..73e2140e4 100644 --- a/Sources/BoringSSL/include/openssl/ecdh.h +++ b/Sources/BoringSSL/include/openssl/ecdh.h @@ -76,26 +76,26 @@ extern "C" { #endif -/* Elliptic curve Diffie-Hellman. */ +// Elliptic curve Diffie-Hellman. -/* ECDH_compute_key calculates the shared key between |pub_key| and |priv_key|. - * If |kdf| is not NULL, then it is called with the bytes of the shared key and - * the parameter |out|. When |kdf| returns, the value of |*outlen| becomes the - * return value. Otherwise, as many bytes of the shared key as will fit are - * copied directly to, at most, |outlen| bytes at |out|. It returns the number - * of bytes written to |out|, or -1 on error. */ +// ECDH_compute_key calculates the shared key between |pub_key| and |priv_key|. +// If |kdf| is not NULL, then it is called with the bytes of the shared key and +// the parameter |out|. When |kdf| returns, the value of |*outlen| becomes the +// return value. Otherwise, as many bytes of the shared key as will fit are +// copied directly to, at most, |outlen| bytes at |out|. It returns the number +// of bytes written to |out|, or -1 on error. OPENSSL_EXPORT int ECDH_compute_key( void *out, size_t outlen, const EC_POINT *pub_key, const EC_KEY *priv_key, void *(*kdf)(const void *in, size_t inlen, void *out, size_t *outlen)); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif #define ECDH_R_KDF_FAILED 100 #define ECDH_R_NO_PRIVATE_VALUE 101 #define ECDH_R_POINT_ARITHMETIC_FAILURE 102 -#endif /* OPENSSL_HEADER_ECDH_H */ +#endif // OPENSSL_HEADER_ECDH_H diff --git a/Sources/BoringSSL/include/openssl/ecdsa.h b/Sources/BoringSSL/include/openssl/ecdsa.h index 8a158b87f..42da1c612 100644 --- a/Sources/BoringSSL/include/openssl/ecdsa.h +++ b/Sources/BoringSSL/include/openssl/ecdsa.h @@ -62,138 +62,120 @@ extern "C" { #endif -/* ECDSA contains functions for signing and verifying with the Digital Signature - * Algorithm over elliptic curves. */ +// ECDSA contains functions for signing and verifying with the Digital Signature +// Algorithm over elliptic curves. -/* Signing and verifying. */ +// Signing and verifying. -/* ECDSA_sign signs |digest_len| bytes from |digest| with |key| and writes the - * resulting signature to |sig|, which must have |ECDSA_size(key)| bytes of - * space. On successful exit, |*sig_len| is set to the actual number of bytes - * written. The |type| argument should be zero. It returns one on success and - * zero otherwise. */ +// ECDSA_sign signs |digest_len| bytes from |digest| with |key| and writes the +// resulting signature to |sig|, which must have |ECDSA_size(key)| bytes of +// space. On successful exit, |*sig_len| is set to the actual number of bytes +// written. The |type| argument should be zero. It returns one on success and +// zero otherwise. OPENSSL_EXPORT int ECDSA_sign(int type, const uint8_t *digest, size_t digest_len, uint8_t *sig, unsigned int *sig_len, const EC_KEY *key); -/* ECDSA_verify verifies that |sig_len| bytes from |sig| constitute a valid - * signature by |key| of |digest|. (The |type| argument should be zero.) It - * returns one on success or zero if the signature is invalid or an error - * occurred. */ +// ECDSA_verify verifies that |sig_len| bytes from |sig| constitute a valid +// signature by |key| of |digest|. (The |type| argument should be zero.) It +// returns one on success or zero if the signature is invalid or an error +// occurred. OPENSSL_EXPORT int ECDSA_verify(int type, const uint8_t *digest, size_t digest_len, const uint8_t *sig, size_t sig_len, const EC_KEY *key); -/* ECDSA_size returns the maximum size of an ECDSA signature using |key|. It - * returns zero on error. */ +// ECDSA_size returns the maximum size of an ECDSA signature using |key|. It +// returns zero on error. OPENSSL_EXPORT size_t ECDSA_size(const EC_KEY *key); -/* Low-level signing and verification. - * - * Low-level functions handle signatures as |ECDSA_SIG| structures which allow - * the two values in an ECDSA signature to be handled separately. */ +// Low-level signing and verification. +// +// Low-level functions handle signatures as |ECDSA_SIG| structures which allow +// the two values in an ECDSA signature to be handled separately. struct ecdsa_sig_st { BIGNUM *r; BIGNUM *s; }; -/* ECDSA_SIG_new returns a fresh |ECDSA_SIG| structure or NULL on error. */ +// ECDSA_SIG_new returns a fresh |ECDSA_SIG| structure or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_new(void); -/* ECDSA_SIG_free frees |sig| its member |BIGNUM|s. */ +// ECDSA_SIG_free frees |sig| its member |BIGNUM|s. OPENSSL_EXPORT void ECDSA_SIG_free(ECDSA_SIG *sig); -/* ECDSA_do_sign signs |digest_len| bytes from |digest| with |key| and returns - * the resulting signature structure, or NULL on error. */ +// ECDSA_SIG_get0 sets |*out_r| and |*out_s|, if non-NULL, to the two +// components of |sig|. +OPENSSL_EXPORT void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **out_r, + const BIGNUM **out_s); + +// ECDSA_SIG_set0 sets |sig|'s components to |r| and |s|, neither of which may +// be NULL. On success, it takes ownership of each argument and returns one. +// Otherwise, it returns zero. +OPENSSL_EXPORT int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s); + +// ECDSA_do_sign signs |digest_len| bytes from |digest| with |key| and returns +// the resulting signature structure, or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_do_sign(const uint8_t *digest, size_t digest_len, const EC_KEY *key); -/* ECDSA_do_verify verifies that |sig| constitutes a valid signature by |key| - * of |digest|. It returns one on success or zero if the signature is invalid - * or on error. */ +// ECDSA_do_verify verifies that |sig| constitutes a valid signature by |key| +// of |digest|. It returns one on success or zero if the signature is invalid +// or on error. OPENSSL_EXPORT int ECDSA_do_verify(const uint8_t *digest, size_t digest_len, const ECDSA_SIG *sig, const EC_KEY *key); -/* Signing with precomputation. - * - * Parts of the ECDSA signature can be independent of the message to be signed - * thus it's possible to precompute them and reduce the signing latency. - * - * TODO(fork): remove support for this as it cannot support safe-randomness. */ - -/* ECDSA_sign_setup precomputes parts of an ECDSA signing operation. It sets - * |*kinv| and |*rp| to the precomputed values and uses the |ctx| argument, if - * not NULL. It returns one on success and zero otherwise. */ -OPENSSL_EXPORT int ECDSA_sign_setup(const EC_KEY *eckey, BN_CTX *ctx, - BIGNUM **kinv, BIGNUM **rp); - -/* ECDSA_do_sign_ex is the same as |ECDSA_do_sign| but takes precomputed values - * as generated by |ECDSA_sign_setup|. */ -OPENSSL_EXPORT ECDSA_SIG *ECDSA_do_sign_ex(const uint8_t *digest, - size_t digest_len, - const BIGNUM *kinv, const BIGNUM *rp, - const EC_KEY *eckey); - -/* ECDSA_sign_ex is the same as |ECDSA_sign| but takes precomputed values as - * generated by |ECDSA_sign_setup|. */ -OPENSSL_EXPORT int ECDSA_sign_ex(int type, const uint8_t *digest, - size_t digest_len, uint8_t *sig, - unsigned int *sig_len, const BIGNUM *kinv, - const BIGNUM *rp, const EC_KEY *eckey); - - -/* ASN.1 functions. */ +// ASN.1 functions. -/* ECDSA_SIG_parse parses a DER-encoded ECDSA-Sig-Value structure from |cbs| and - * advances |cbs|. It returns a newly-allocated |ECDSA_SIG| or NULL on error. */ +// ECDSA_SIG_parse parses a DER-encoded ECDSA-Sig-Value structure from |cbs| and +// advances |cbs|. It returns a newly-allocated |ECDSA_SIG| or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_parse(CBS *cbs); -/* ECDSA_SIG_from_bytes parses |in| as a DER-encoded ECDSA-Sig-Value structure. - * It returns a newly-allocated |ECDSA_SIG| structure or NULL on error. */ +// ECDSA_SIG_from_bytes parses |in| as a DER-encoded ECDSA-Sig-Value structure. +// It returns a newly-allocated |ECDSA_SIG| structure or NULL on error. OPENSSL_EXPORT ECDSA_SIG *ECDSA_SIG_from_bytes(const uint8_t *in, size_t in_len); -/* ECDSA_SIG_marshal marshals |sig| as a DER-encoded ECDSA-Sig-Value and appends - * the result to |cbb|. It returns one on success and zero on error. */ +// ECDSA_SIG_marshal marshals |sig| as a DER-encoded ECDSA-Sig-Value and appends +// the result to |cbb|. It returns one on success and zero on error. OPENSSL_EXPORT int ECDSA_SIG_marshal(CBB *cbb, const ECDSA_SIG *sig); -/* ECDSA_SIG_to_bytes marshals |sig| as a DER-encoded ECDSA-Sig-Value and, on - * success, sets |*out_bytes| to a newly allocated buffer containing the result - * and returns one. Otherwise, it returns zero. The result should be freed with - * |OPENSSL_free|. */ +// ECDSA_SIG_to_bytes marshals |sig| as a DER-encoded ECDSA-Sig-Value and, on +// success, sets |*out_bytes| to a newly allocated buffer containing the result +// and returns one. Otherwise, it returns zero. The result should be freed with +// |OPENSSL_free|. OPENSSL_EXPORT int ECDSA_SIG_to_bytes(uint8_t **out_bytes, size_t *out_len, const ECDSA_SIG *sig); -/* ECDSA_SIG_max_len returns the maximum length of a DER-encoded ECDSA-Sig-Value - * structure for a group whose order is represented in |order_len| bytes, or - * zero on overflow. */ +// ECDSA_SIG_max_len returns the maximum length of a DER-encoded ECDSA-Sig-Value +// structure for a group whose order is represented in |order_len| bytes, or +// zero on overflow. OPENSSL_EXPORT size_t ECDSA_SIG_max_len(size_t order_len); -/* Deprecated functions. */ +// Deprecated functions. -/* d2i_ECDSA_SIG parses an ASN.1, DER-encoded, signature from |len| bytes at - * |*inp|. If |out| is not NULL then, on exit, a pointer to the result is in - * |*out|. Note that, even if |*out| is already non-NULL on entry, it will not - * be written to. Rather, a fresh |ECDSA_SIG| is allocated and the previous one - * is freed. On successful exit, |*inp| is advanced past the DER structure. It - * returns the result or NULL on error. */ +// d2i_ECDSA_SIG parses an ASN.1, DER-encoded, signature from |len| bytes at +// |*inp|. If |out| is not NULL then, on exit, a pointer to the result is in +// |*out|. Note that, even if |*out| is already non-NULL on entry, it will not +// be written to. Rather, a fresh |ECDSA_SIG| is allocated and the previous one +// is freed. On successful exit, |*inp| is advanced past the DER structure. It +// returns the result or NULL on error. OPENSSL_EXPORT ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **out, const uint8_t **inp, long len); -/* i2d_ECDSA_SIG marshals a signature from |sig| to an ASN.1, DER - * structure. If |outp| is not NULL then the result is written to |*outp| and - * |*outp| is advanced just past the output. It returns the number of bytes in - * the result, whether written or not, or a negative value on error. */ +// i2d_ECDSA_SIG marshals a signature from |sig| to an ASN.1, DER +// structure. If |outp| is not NULL then the result is written to |*outp| and +// |*outp| is advanced just past the output. It returns the number of bytes in +// the result, whether written or not, or a negative value on error. OPENSSL_EXPORT int i2d_ECDSA_SIG(const ECDSA_SIG *sig, uint8_t **outp); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -203,7 +185,7 @@ BORINGSSL_MAKE_DELETER(ECDSA_SIG, ECDSA_SIG_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -214,4 +196,4 @@ BORINGSSL_MAKE_DELETER(ECDSA_SIG, ECDSA_SIG_free) #define ECDSA_R_RANDOM_NUMBER_GENERATION_FAILED 104 #define ECDSA_R_ENCODE_ERROR 105 -#endif /* OPENSSL_HEADER_ECDSA_H */ +#endif // OPENSSL_HEADER_ECDSA_H diff --git a/Sources/BoringSSL/include/openssl/engine.h b/Sources/BoringSSL/include/openssl/engine.h index b029ef948..595e53c00 100644 --- a/Sources/BoringSSL/include/openssl/engine.h +++ b/Sources/BoringSSL/include/openssl/engine.h @@ -22,36 +22,36 @@ extern "C" { #endif -/* Engines are collections of methods. Methods are tables of function pointers, - * defined for certain algorithms, that allow operations on those algorithms to - * be overridden via a callback. This can be used, for example, to implement an - * RSA* that forwards operations to a hardware module. - * - * Methods are reference counted but |ENGINE|s are not. When creating a method, - * you should zero the whole structure and fill in the function pointers that - * you wish before setting it on an |ENGINE|. Any functions pointers that - * are NULL indicate that the default behaviour should be used. */ +// Engines are collections of methods. Methods are tables of function pointers, +// defined for certain algorithms, that allow operations on those algorithms to +// be overridden via a callback. This can be used, for example, to implement an +// RSA* that forwards operations to a hardware module. +// +// Methods are reference counted but |ENGINE|s are not. When creating a method, +// you should zero the whole structure and fill in the function pointers that +// you wish before setting it on an |ENGINE|. Any functions pointers that +// are NULL indicate that the default behaviour should be used. -/* Allocation and destruction. */ +// Allocation and destruction. -/* ENGINE_new returns an empty ENGINE that uses the default method for all - * algorithms. */ +// ENGINE_new returns an empty ENGINE that uses the default method for all +// algorithms. OPENSSL_EXPORT ENGINE *ENGINE_new(void); -/* ENGINE_free decrements the reference counts for all methods linked from - * |engine| and frees |engine| itself. */ +// ENGINE_free decrements the reference counts for all methods linked from +// |engine| and frees |engine| itself. OPENSSL_EXPORT void ENGINE_free(ENGINE *engine); -/* Method accessors. - * - * Method accessors take a method pointer and the size of the structure. The - * size allows for ABI compatibility in the case that the method structure is - * extended with extra elements at the end. Methods are always copied by the - * set functions. - * - * Set functions return one on success and zero on allocation failure. */ +// Method accessors. +// +// Method accessors take a method pointer and the size of the structure. The +// size allows for ABI compatibility in the case that the method structure is +// extended with extra elements at the end. Methods are always copied by the +// set functions. +// +// Set functions return one on success and zero on allocation failure. OPENSSL_EXPORT int ENGINE_set_RSA_method(ENGINE *engine, const RSA_METHOD *method, @@ -64,33 +64,33 @@ OPENSSL_EXPORT int ENGINE_set_ECDSA_method(ENGINE *engine, OPENSSL_EXPORT ECDSA_METHOD *ENGINE_get_ECDSA_method(const ENGINE *engine); -/* Generic method functions. - * - * These functions take a void* type but actually operate on all method - * structures. */ +// Generic method functions. +// +// These functions take a void* type but actually operate on all method +// structures. -/* METHOD_ref increments the reference count of |method|. This is a no-op for - * now because all methods are currently static. */ +// METHOD_ref increments the reference count of |method|. This is a no-op for +// now because all methods are currently static. void METHOD_ref(void *method); -/* METHOD_unref decrements the reference count of |method| and frees it if the - * reference count drops to zero. This is a no-op for now because all methods - * are currently static. */ +// METHOD_unref decrements the reference count of |method| and frees it if the +// reference count drops to zero. This is a no-op for now because all methods +// are currently static. void METHOD_unref(void *method); -/* Private functions. */ +// Private functions. -/* openssl_method_common_st contains the common part of all method structures. - * This must be the first member of all method structures. */ +// openssl_method_common_st contains the common part of all method structures. +// This must be the first member of all method structures. struct openssl_method_common_st { - int references; /* dummy – not used. */ + int references; // dummy – not used. char is_static; }; #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -100,10 +100,10 @@ BORINGSSL_MAKE_DELETER(ENGINE, ENGINE_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif #define ENGINE_R_OPERATION_NOT_SUPPORTED 100 -#endif /* OPENSSL_HEADER_ENGINE_H */ +#endif // OPENSSL_HEADER_ENGINE_H diff --git a/Sources/BoringSSL/include/openssl/err.h b/Sources/BoringSSL/include/openssl/err.h index a747b3037..9a65ffb6f 100644 --- a/Sources/BoringSSL/include/openssl/err.h +++ b/Sources/BoringSSL/include/openssl/err.h @@ -118,73 +118,86 @@ extern "C" { #endif -/* Error queue handling functions. - * - * Errors in OpenSSL are generally signaled by the return value of a function. - * When a function fails it may add an entry to a per-thread error queue, - * which is managed by the functions in this header. - * - * Each error contains: - * 1) The library (i.e. ec, pem, rsa) which created it. - * 2) The file and line number of the call that added the error. - * 3) A pointer to some error specific data, which may be NULL. - * - * The library identifier and reason code are packed in a uint32_t and there - * exist various functions for unpacking it. - * - * The typical behaviour is that an error will occur deep in a call queue and - * that code will push an error onto the error queue. As the error queue - * unwinds, other functions will push their own errors. Thus, the "least - * recent" error is the most specific and the other errors will provide a - * backtrace of sorts. */ - - -/* Startup and shutdown. */ - -/* ERR_load_BIO_strings does nothing. - * - * TODO(fork): remove. libjingle calls this. */ +// Error queue handling functions. +// +// Errors in OpenSSL are generally signaled by the return value of a function. +// When a function fails it may add an entry to a per-thread error queue, +// which is managed by the functions in this header. +// +// Each error contains: +// 1) The library (i.e. ec, pem, rsa) which created it. +// 2) The file and line number of the call that added the error. +// 3) A pointer to some error specific data, which may be NULL. +// +// The library identifier and reason code are packed in a uint32_t and there +// exist various functions for unpacking it. +// +// The typical behaviour is that an error will occur deep in a call queue and +// that code will push an error onto the error queue. As the error queue +// unwinds, other functions will push their own errors. Thus, the "least +// recent" error is the most specific and the other errors will provide a +// backtrace of sorts. + + +// Startup and shutdown. + +// ERR_load_BIO_strings does nothing. +// +// TODO(fork): remove. libjingle calls this. OPENSSL_EXPORT void ERR_load_BIO_strings(void); -/* ERR_load_ERR_strings does nothing. */ +// ERR_load_ERR_strings does nothing. OPENSSL_EXPORT void ERR_load_ERR_strings(void); -/* ERR_load_crypto_strings does nothing. */ +// ERR_load_crypto_strings does nothing. OPENSSL_EXPORT void ERR_load_crypto_strings(void); -/* ERR_free_strings does nothing. */ +// ERR_free_strings does nothing. OPENSSL_EXPORT void ERR_free_strings(void); -/* Reading and formatting errors. */ +// Reading and formatting errors. + +// ERR_GET_LIB returns the library code for the error. This is one of +// the |ERR_LIB_*| values. +#define ERR_GET_LIB(packed_error) ((int)(((packed_error) >> 24) & 0xff)) + +// ERR_GET_REASON returns the reason code for the error. This is one of +// library-specific |LIB_R_*| values where |LIB| is the library (see +// |ERR_GET_LIB|). Note that reason codes are specific to the library. +#define ERR_GET_REASON(packed_error) ((int)((packed_error) & 0xfff)) -/* ERR_get_error gets the packed error code for the least recent error and - * removes that error from the queue. If there are no errors in the queue then - * it returns zero. */ +// ERR_get_error gets the packed error code for the least recent error and +// removes that error from the queue. If there are no errors in the queue then +// it returns zero. OPENSSL_EXPORT uint32_t ERR_get_error(void); -/* ERR_get_error_line acts like |ERR_get_error|, except that the file and line - * number of the call that added the error are also returned. */ +// ERR_get_error_line acts like |ERR_get_error|, except that the file and line +// number of the call that added the error are also returned. OPENSSL_EXPORT uint32_t ERR_get_error_line(const char **file, int *line); -/* ERR_get_error_line_data acts like |ERR_get_error_line|, but also returns the - * error-specific data pointer and flags. The flags are a bitwise-OR of - * |ERR_FLAG_*| values. The error-specific data is owned by the error queue - * and the pointer becomes invalid after the next call that affects the same - * thread's error queue. If |*flags| contains |ERR_FLAG_STRING| then |*data| is - * human-readable. */ +// ERR_FLAG_STRING means that the |data| member is a NUL-terminated string that +// can be printed. This is always set if |data| is non-NULL. +#define ERR_FLAG_STRING 1 + +// ERR_get_error_line_data acts like |ERR_get_error_line|, but also returns the +// error-specific data pointer and flags. The flags are a bitwise-OR of +// |ERR_FLAG_*| values. The error-specific data is owned by the error queue +// and the pointer becomes invalid after the next call that affects the same +// thread's error queue. If |*flags| contains |ERR_FLAG_STRING| then |*data| is +// human-readable. OPENSSL_EXPORT uint32_t ERR_get_error_line_data(const char **file, int *line, const char **data, int *flags); -/* The "peek" functions act like the |ERR_get_error| functions, above, but they - * do not remove the error from the queue. */ +// The "peek" functions act like the |ERR_get_error| functions, above, but they +// do not remove the error from the queue. OPENSSL_EXPORT uint32_t ERR_peek_error(void); OPENSSL_EXPORT uint32_t ERR_peek_error_line(const char **file, int *line); OPENSSL_EXPORT uint32_t ERR_peek_error_line_data(const char **file, int *line, const char **data, int *flags); -/* The "peek last" functions act like the "peek" functions, above, except that - * they return the most recent error. */ +// The "peek last" functions act like the "peek" functions, above, except that +// they return the most recent error. OPENSSL_EXPORT uint32_t ERR_peek_last_error(void); OPENSSL_EXPORT uint32_t ERR_peek_last_error_line(const char **file, int *line); OPENSSL_EXPORT uint32_t ERR_peek_last_error_line_data(const char **file, @@ -192,198 +205,93 @@ OPENSSL_EXPORT uint32_t ERR_peek_last_error_line_data(const char **file, const char **data, int *flags); -/* ERR_error_string generates a human-readable string representing - * |packed_error|, places it at |buf| (which must be at least - * ERR_ERROR_STRING_BUF_LEN bytes long) and returns |buf|. If |buf| is NULL, - * the error string is placed in a static buffer which is returned. (The static - * buffer may be overridden by concurrent calls in other threads so this form - * is deprecated.) - * - * The string will have the following format: - * - * error:[error code]:[library name]:OPENSSL_internal:[reason string] - * - * error code is an 8 digit hexadecimal number; library name and reason string - * are ASCII text. - * - * TODO(fork): remove in favour of |ERR_error_string_n|. */ -OPENSSL_EXPORT char *ERR_error_string(uint32_t packed_error, char *buf); -#define ERR_ERROR_STRING_BUF_LEN 256 - -/* ERR_error_string_n is a variant of |ERR_error_string| that writes at most - * len characters (including the terminating NUL) and truncates the string if - * necessary. If |len| is greater than zero then |buf| is always NUL - * terminated. */ +// ERR_error_string_n generates a human-readable string representing +// |packed_error| and places it at |buf|. It writes at most |len| bytes +// (including the terminating NUL) and truncates the string if necessary. If +// |len| is greater than zero then |buf| is always NUL terminated. +// +// The string will have the following format: +// +// error:[error code]:[library name]:OPENSSL_internal:[reason string] +// +// error code is an 8 digit hexadecimal number; library name and reason string +// are ASCII text. OPENSSL_EXPORT void ERR_error_string_n(uint32_t packed_error, char *buf, size_t len); -/* ERR_lib_error_string returns a string representation of the library that - * generated |packed_error|. */ +// ERR_lib_error_string returns a string representation of the library that +// generated |packed_error|. OPENSSL_EXPORT const char *ERR_lib_error_string(uint32_t packed_error); -/* ERR_reason_error_string returns a string representation of the reason for - * |packed_error|. */ +// ERR_reason_error_string returns a string representation of the reason for +// |packed_error|. OPENSSL_EXPORT const char *ERR_reason_error_string(uint32_t packed_error); -/* ERR_print_errors_callback_t is the type of a function used by - * |ERR_print_errors_cb|. It takes a pointer to a human readable string (and - * its length) that describes an entry in the error queue. The |ctx| argument - * is an opaque pointer given to |ERR_print_errors_cb|. - * - * It should return one on success or zero on error, which will stop the - * iteration over the error queue. */ +// ERR_print_errors_callback_t is the type of a function used by +// |ERR_print_errors_cb|. It takes a pointer to a human readable string (and +// its length) that describes an entry in the error queue. The |ctx| argument +// is an opaque pointer given to |ERR_print_errors_cb|. +// +// It should return one on success or zero on error, which will stop the +// iteration over the error queue. typedef int (*ERR_print_errors_callback_t)(const char *str, size_t len, void *ctx); -/* ERR_print_errors_cb calls |callback| with a string representation of each - * error in the current thread's error queue, from the least recent to the most - * recent error. - * - * The string will have the following format (which differs from - * |ERR_error_string|): - * - * [thread id]:error:[error code]:[library name]:OPENSSL_internal: - * [reason string]:[file]:[line number]:[optional string data] - * - * (All in one line.) - * - * The callback can return one to continue the iteration or zero to stop it. - * The |ctx| argument is an opaque value that is passed through to the - * callback. */ +// ERR_print_errors_cb clears the current thread's error queue, calling +// |callback| with a string representation of each error, from the least recent +// to the most recent error. +// +// The string will have the following format (which differs from +// |ERR_error_string|): +// +// [thread id]:error:[error code]:[library name]:OPENSSL_internal:[reason string]:[file]:[line number]:[optional string data] +// +// The callback can return one to continue the iteration or zero to stop it. +// The |ctx| argument is an opaque value that is passed through to the +// callback. OPENSSL_EXPORT void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx); -/* ERR_print_errors_fp prints the current contents of the error stack to |file| - * using human readable strings where possible. */ +// ERR_print_errors_fp clears the current thread's error queue, printing each +// error to |file|. See |ERR_print_errors_cb| for the format. OPENSSL_EXPORT void ERR_print_errors_fp(FILE *file); -/* Clearing errors. */ +// Clearing errors. -/* ERR_clear_error clears the error queue for the current thread. */ +// ERR_clear_error clears the error queue for the current thread. OPENSSL_EXPORT void ERR_clear_error(void); -/* ERR_remove_thread_state clears the error queue for the current thread if - * |tid| is NULL. Otherwise it calls |assert(0)|, because it's no longer - * possible to delete the error queue for other threads. - * - * Error queues are thread-local data and are deleted automatically. You do not - * need to call this function. Use |ERR_clear_error|. */ +// ERR_remove_thread_state clears the error queue for the current thread if +// |tid| is NULL. Otherwise it calls |assert(0)|, because it's no longer +// possible to delete the error queue for other threads. +// +// Error queues are thread-local data and are deleted automatically. You do not +// need to call this function. Use |ERR_clear_error|. OPENSSL_EXPORT void ERR_remove_thread_state(const CRYPTO_THREADID *tid); - -/* Custom errors. */ - -/* ERR_get_next_error_library returns a value suitable for passing as the - * |library| argument to |ERR_put_error|. This is intended for code that wishes - * to push its own, non-standard errors to the error queue. */ -OPENSSL_EXPORT int ERR_get_next_error_library(void); - - -/* Deprecated functions. */ - -/* ERR_remove_state calls |ERR_clear_error|. */ -OPENSSL_EXPORT void ERR_remove_state(unsigned long pid); - -/* ERR_func_error_string returns the string "OPENSSL_internal". */ -OPENSSL_EXPORT const char *ERR_func_error_string(uint32_t packed_error); - - -/* Private functions. */ - -/* ERR_clear_system_error clears the system's error value (i.e. errno). */ -OPENSSL_EXPORT void ERR_clear_system_error(void); - -/* OPENSSL_PUT_ERROR is used by OpenSSL code to add an error to the error - * queue. */ -#define OPENSSL_PUT_ERROR(library, reason) \ - ERR_put_error(ERR_LIB_##library, 0, reason, __FILE__, __LINE__) - -/* OPENSSL_PUT_SYSTEM_ERROR is used by OpenSSL code to add an error from the - * operating system to the error queue. - * TODO(fork): include errno. */ -#define OPENSSL_PUT_SYSTEM_ERROR() \ - ERR_put_error(ERR_LIB_SYS, 0, 0, __FILE__, __LINE__); - -/* ERR_put_error adds an error to the error queue, dropping the least recent - * error if necessary for space reasons. */ -OPENSSL_EXPORT void ERR_put_error(int library, int unused, int reason, - const char *file, unsigned line); - -/* ERR_add_error_data takes a variable number (|count|) of const char* - * pointers, concatenates them and sets the result as the data on the most - * recent error. */ -OPENSSL_EXPORT void ERR_add_error_data(unsigned count, ...); - -/* ERR_add_error_dataf takes a printf-style format and arguments, and sets the - * result as the data on the most recent error. */ -OPENSSL_EXPORT void ERR_add_error_dataf(const char *format, ...) - OPENSSL_PRINTF_FORMAT_FUNC(1, 2); - -/* ERR_set_mark "marks" the most recent error for use with |ERR_pop_to_mark|. - * It returns one if an error was marked and zero if there are no errors. */ +// ERR_set_mark "marks" the most recent error for use with |ERR_pop_to_mark|. +// It returns one if an error was marked and zero if there are no errors. OPENSSL_EXPORT int ERR_set_mark(void); -/* ERR_pop_to_mark removes errors from the most recent to the least recent - * until (and not including) a "marked" error. It returns zero if no marked - * error was found (and thus all errors were removed) and one otherwise. Errors - * are marked using |ERR_set_mark|. */ +// ERR_pop_to_mark removes errors from the most recent to the least recent +// until (and not including) a "marked" error. It returns zero if no marked +// error was found (and thus all errors were removed) and one otherwise. Errors +// are marked using |ERR_set_mark|. OPENSSL_EXPORT int ERR_pop_to_mark(void); -struct err_error_st { - /* file contains the filename where the error occurred. */ - const char *file; - /* data contains optional data. It must be freed with |OPENSSL_free| if - * |flags&ERR_FLAG_MALLOCED|. */ - char *data; - /* packed contains the error library and reason, as packed by ERR_PACK. */ - uint32_t packed; - /* line contains the line number where the error occurred. */ - uint16_t line; - /* flags contains a bitwise-OR of ERR_FLAG_* values. */ - uint8_t flags; -}; -/* ERR_FLAG_STRING means that the |data| member is a NUL-terminated string that - * can be printed. */ -#define ERR_FLAG_STRING 1 -/* ERR_TXT_STRING is provided for compatibility with code that assumes that - * it's using OpenSSL. */ -#define ERR_TXT_STRING ERR_FLAG_STRING - -/* ERR_FLAG_PUBLIC_MASK is applied to the flags field before it is returned - * from functions like |ERR_get_error_line_data|. */ -#define ERR_FLAG_PUBLIC_MASK 0xf - -/* The following flag values are internal and are masked when flags are - * returned from functions like |ERR_get_error_line_data|. */ +// Custom errors. -/* ERR_FLAG_MALLOCED means the the |data| member must be freed when no longer - * needed. */ -#define ERR_FLAG_MALLOCED 16 -/* ERR_FLAG_MARK is used to indicate a reversion point in the queue. See - * |ERR_pop_to_mark|. */ -#define ERR_FLAG_MARK 32 +// ERR_get_next_error_library returns a value suitable for passing as the +// |library| argument to |ERR_put_error|. This is intended for code that wishes +// to push its own, non-standard errors to the error queue. +OPENSSL_EXPORT int ERR_get_next_error_library(void); -/* ERR_NUM_ERRORS is the limit of the number of errors in the queue. */ -#define ERR_NUM_ERRORS 16 -/* err_state_st (aka |ERR_STATE|) contains the per-thread, error queue. */ -typedef struct err_state_st { - /* errors contains the ERR_NUM_ERRORS most recent errors, organised as a ring - * buffer. */ - struct err_error_st errors[ERR_NUM_ERRORS]; - /* top contains the index one past the most recent error. If |top| equals - * |bottom| then the queue is empty. */ - unsigned top; - /* bottom contains the index of the last error in the queue. */ - unsigned bottom; - - /* to_free, if not NULL, contains a pointer owned by this structure that was - * previously a |data| pointer of one of the elements of |errors|. */ - void *to_free; -} ERR_STATE; +// Built-in library and reason codes. +// The following values are built-in library codes. enum { ERR_LIB_NONE = 1, ERR_LIB_SYS, @@ -420,6 +328,8 @@ enum { ERR_NUM_LIBS }; +// The following reason codes used to denote an error occuring in another +// library. They are sometimes used for a stack trace. #define ERR_R_SYS_LIB ERR_LIB_SYS #define ERR_R_BN_LIB ERR_LIB_BN #define ERR_R_RSA_LIB ERR_LIB_RSA @@ -458,7 +368,7 @@ enum { #define ERR_R_CIPHER_LIB ERR_LIB_CIPHER #define ERR_R_HKDF_LIB ERR_LIB_HKDF -/* Global reasons. */ +// The following values are global reason codes. They may occur in any library. #define ERR_R_FATAL 64 #define ERR_R_MALLOC_FAILURE (1 | ERR_R_FATAL) #define ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED (2 | ERR_R_FATAL) @@ -466,23 +376,83 @@ enum { #define ERR_R_INTERNAL_ERROR (4 | ERR_R_FATAL) #define ERR_R_OVERFLOW (5 | ERR_R_FATAL) -#define ERR_PACK(lib, reason) \ - (((((uint32_t)(lib)) & 0xff) << 24) | ((((uint32_t)(reason)) & 0xfff))) -#define ERR_GET_LIB(packed_error) ((int)(((packed_error) >> 24) & 0xff)) +// Deprecated functions. + +// ERR_remove_state calls |ERR_clear_error|. +OPENSSL_EXPORT void ERR_remove_state(unsigned long pid); + +// ERR_func_error_string returns the string "OPENSSL_internal". +OPENSSL_EXPORT const char *ERR_func_error_string(uint32_t packed_error); + +// ERR_error_string behaves like |ERR_error_string_n| but |len| is implicitly +// |ERR_ERROR_STRING_BUF_LEN| and it returns |buf|. If |buf| is NULL, the error +// string is placed in a static buffer which is returned. (The static buffer may +// be overridden by concurrent calls in other threads so this form should not be +// used.) +// +// Use |ERR_error_string_n| instead. +// +// TODO(fork): remove this function. +OPENSSL_EXPORT char *ERR_error_string(uint32_t packed_error, char *buf); +#define ERR_ERROR_STRING_BUF_LEN 256 + +// ERR_GET_FUNC returns zero. BoringSSL errors do not report a function code. #define ERR_GET_FUNC(packed_error) 0 -#define ERR_GET_REASON(packed_error) ((int)((packed_error) & 0xfff)) -/* OPENSSL_DECLARE_ERROR_REASON is used by util/make_errors.h (which generates - * the error defines) to recognise that an additional reason value is needed. - * This is needed when the reason value is used outside of an - * |OPENSSL_PUT_ERROR| macro. The resulting define will be - * ${lib}_R_${reason}. */ +// ERR_TXT_STRING is provided for compatibility with code that assumes that +// it's using OpenSSL. +#define ERR_TXT_STRING ERR_FLAG_STRING + + +// Private functions. + +// ERR_clear_system_error clears the system's error value (i.e. errno). +OPENSSL_EXPORT void ERR_clear_system_error(void); + +// OPENSSL_PUT_ERROR is used by OpenSSL code to add an error to the error +// queue. +#define OPENSSL_PUT_ERROR(library, reason) \ + ERR_put_error(ERR_LIB_##library, 0, reason, __FILE__, __LINE__) + +// OPENSSL_PUT_SYSTEM_ERROR is used by OpenSSL code to add an error from the +// operating system to the error queue. +// TODO(fork): include errno. +#define OPENSSL_PUT_SYSTEM_ERROR() \ + ERR_put_error(ERR_LIB_SYS, 0, 0, __FILE__, __LINE__); + +// ERR_put_error adds an error to the error queue, dropping the least recent +// error if necessary for space reasons. +OPENSSL_EXPORT void ERR_put_error(int library, int unused, int reason, + const char *file, unsigned line); + +// ERR_add_error_data takes a variable number (|count|) of const char* +// pointers, concatenates them and sets the result as the data on the most +// recent error. +OPENSSL_EXPORT void ERR_add_error_data(unsigned count, ...); + +// ERR_add_error_dataf takes a printf-style format and arguments, and sets the +// result as the data on the most recent error. +OPENSSL_EXPORT void ERR_add_error_dataf(const char *format, ...) + OPENSSL_PRINTF_FORMAT_FUNC(1, 2); + +// ERR_NUM_ERRORS is one more than the limit of the number of errors in the +// queue. +#define ERR_NUM_ERRORS 16 + +#define ERR_PACK(lib, reason) \ + (((((uint32_t)(lib)) & 0xff) << 24) | ((((uint32_t)(reason)) & 0xfff))) + +// OPENSSL_DECLARE_ERROR_REASON is used by util/make_errors.h (which generates +// the error defines) to recognise that an additional reason value is needed. +// This is needed when the reason value is used outside of an +// |OPENSSL_PUT_ERROR| macro. The resulting define will be +// ${lib}_R_${reason}. #define OPENSSL_DECLARE_ERROR_REASON(lib, reason) #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_ERR_H */ +#endif // OPENSSL_HEADER_ERR_H diff --git a/Sources/BoringSSL/include/openssl/evp.h b/Sources/BoringSSL/include/openssl/evp.h index 7debbc5a3..f5eb20073 100644 --- a/Sources/BoringSSL/include/openssl/evp.h +++ b/Sources/BoringSSL/include/openssl/evp.h @@ -61,10 +61,10 @@ #include -/* OpenSSL included digest and cipher functions in this header so we include - * them for users that still expect that. - * - * TODO(fork): clean up callers so that they include what they use. */ +// OpenSSL included digest and cipher functions in this header so we include +// them for users that still expect that. +// +// TODO(fork): clean up callers so that they include what they use. #include #include #include @@ -76,77 +76,71 @@ extern "C" { #endif -/* EVP abstracts over public/private key algorithms. */ +// EVP abstracts over public/private key algorithms. -/* Public key objects. */ +// Public key objects. -/* EVP_PKEY_new creates a new, empty public-key object and returns it or NULL - * on allocation failure. */ +// EVP_PKEY_new creates a new, empty public-key object and returns it or NULL +// on allocation failure. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new(void); -/* EVP_PKEY_free frees all data referenced by |pkey| and then frees |pkey| - * itself. */ +// EVP_PKEY_free frees all data referenced by |pkey| and then frees |pkey| +// itself. OPENSSL_EXPORT void EVP_PKEY_free(EVP_PKEY *pkey); -/* EVP_PKEY_up_ref increments the reference count of |pkey| and returns one. */ +// EVP_PKEY_up_ref increments the reference count of |pkey| and returns one. OPENSSL_EXPORT int EVP_PKEY_up_ref(EVP_PKEY *pkey); -/* EVP_PKEY_is_opaque returns one if |pkey| is opaque. Opaque keys are backed by - * custom implementations which do not expose key material and parameters. It is - * an error to attempt to duplicate, export, or compare an opaque key. */ +// EVP_PKEY_is_opaque returns one if |pkey| is opaque. Opaque keys are backed by +// custom implementations which do not expose key material and parameters. It is +// an error to attempt to duplicate, export, or compare an opaque key. OPENSSL_EXPORT int EVP_PKEY_is_opaque(const EVP_PKEY *pkey); -/* EVP_PKEY_supports_digest returns one if |pkey| supports digests of - * type |md|. This is intended for use with EVP_PKEYs backing custom - * implementations which can't sign all digests. */ -OPENSSL_EXPORT int EVP_PKEY_supports_digest(const EVP_PKEY *pkey, - const EVP_MD *md); - -/* EVP_PKEY_cmp compares |a| and |b| and returns one if they are equal, zero if - * not and a negative number on error. - * - * WARNING: this differs from the traditional return value of a "cmp" - * function. */ +// EVP_PKEY_cmp compares |a| and |b| and returns one if they are equal, zero if +// not and a negative number on error. +// +// WARNING: this differs from the traditional return value of a "cmp" +// function. OPENSSL_EXPORT int EVP_PKEY_cmp(const EVP_PKEY *a, const EVP_PKEY *b); -/* EVP_PKEY_copy_parameters sets the parameters of |to| to equal the parameters - * of |from|. It returns one on success and zero on error. */ +// EVP_PKEY_copy_parameters sets the parameters of |to| to equal the parameters +// of |from|. It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_copy_parameters(EVP_PKEY *to, const EVP_PKEY *from); -/* EVP_PKEY_missing_parameters returns one if |pkey| is missing needed - * parameters or zero if not, or if the algorithm doesn't take parameters. */ +// EVP_PKEY_missing_parameters returns one if |pkey| is missing needed +// parameters or zero if not, or if the algorithm doesn't take parameters. OPENSSL_EXPORT int EVP_PKEY_missing_parameters(const EVP_PKEY *pkey); -/* EVP_PKEY_size returns the maximum size, in bytes, of a signature signed by - * |pkey|. For an RSA key, this returns the number of bytes needed to represent - * the modulus. For an EC key, this returns the maximum size of a DER-encoded - * ECDSA signature. */ +// EVP_PKEY_size returns the maximum size, in bytes, of a signature signed by +// |pkey|. For an RSA key, this returns the number of bytes needed to represent +// the modulus. For an EC key, this returns the maximum size of a DER-encoded +// ECDSA signature. OPENSSL_EXPORT int EVP_PKEY_size(const EVP_PKEY *pkey); -/* EVP_PKEY_bits returns the "size", in bits, of |pkey|. For an RSA key, this - * returns the bit length of the modulus. For an EC key, this returns the bit - * length of the group order. */ +// EVP_PKEY_bits returns the "size", in bits, of |pkey|. For an RSA key, this +// returns the bit length of the modulus. For an EC key, this returns the bit +// length of the group order. OPENSSL_EXPORT int EVP_PKEY_bits(EVP_PKEY *pkey); -/* EVP_PKEY_id returns the type of |pkey|, which is one of the |EVP_PKEY_*| - * values. */ +// EVP_PKEY_id returns the type of |pkey|, which is one of the |EVP_PKEY_*| +// values. OPENSSL_EXPORT int EVP_PKEY_id(const EVP_PKEY *pkey); -/* EVP_PKEY_type returns |nid| if |nid| is a known key type and |NID_undef| - * otherwise. */ +// EVP_PKEY_type returns |nid| if |nid| is a known key type and |NID_undef| +// otherwise. OPENSSL_EXPORT int EVP_PKEY_type(int nid); -/* Getting and setting concrete public key types. - * - * The following functions get and set the underlying public key in an - * |EVP_PKEY| object. The |set1| functions take an additional reference to the - * underlying key and return one on success or zero on error. The |assign| - * functions adopt the caller's reference. The |get1| functions return a fresh - * reference to the underlying object or NULL if |pkey| is not of the correct - * type. The |get0| functions behave the same but return a non-owning - * pointer. */ +// Getting and setting concrete public key types. +// +// The following functions get and set the underlying public key in an +// |EVP_PKEY| object. The |set1| functions take an additional reference to the +// underlying key and return one on success or zero on error. The |assign| +// functions adopt the caller's reference. The |get1| functions return a fresh +// reference to the underlying object or NULL if |pkey| is not of the correct +// type. The |get0| functions behave the same but return a non-owning +// pointer. OPENSSL_EXPORT int EVP_PKEY_set1_RSA(EVP_PKEY *pkey, RSA *key); OPENSSL_EXPORT int EVP_PKEY_assign_RSA(EVP_PKEY *pkey, RSA *key); @@ -163,511 +157,585 @@ OPENSSL_EXPORT int EVP_PKEY_assign_EC_KEY(EVP_PKEY *pkey, EC_KEY *key); OPENSSL_EXPORT EC_KEY *EVP_PKEY_get0_EC_KEY(EVP_PKEY *pkey); OPENSSL_EXPORT EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey); +// EVP_PKEY_new_ed25519_public returns a newly allocated |EVP_PKEY| wrapping an +// Ed25519 public key, or NULL on allocation error. +OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new_ed25519_public( + const uint8_t public_key[32]); + +// EVP_PKEY_new_ed25519_private returns a newly allocated |EVP_PKEY| wrapping an +// Ed25519 private key, or NULL on allocation error. +OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_new_ed25519_private( + const uint8_t private_key[64]); + #define EVP_PKEY_NONE NID_undef #define EVP_PKEY_RSA NID_rsaEncryption #define EVP_PKEY_DSA NID_dsa #define EVP_PKEY_EC NID_X9_62_id_ecPublicKey +#define EVP_PKEY_ED25519 NID_ED25519 -/* EVP_PKEY_assign sets the underlying key of |pkey| to |key|, which must be of - * the given type. The |type| argument should be one of the |EVP_PKEY_*| - * values. */ +// EVP_PKEY_assign sets the underlying key of |pkey| to |key|, which must be of +// the given type. The |type| argument should be one of the |EVP_PKEY_*| +// values. OPENSSL_EXPORT int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key); -/* EVP_PKEY_set_type sets the type of |pkey| to |type|, which should be one of - * the |EVP_PKEY_*| values. It returns one if successful or zero otherwise. If - * |pkey| is NULL, it simply reports whether the type is known. */ +// EVP_PKEY_set_type sets the type of |pkey| to |type|, which should be one of +// the |EVP_PKEY_*| values. It returns one if successful or zero otherwise. If +// |pkey| is NULL, it simply reports whether the type is known. OPENSSL_EXPORT int EVP_PKEY_set_type(EVP_PKEY *pkey, int type); -/* EVP_PKEY_cmp_parameters compares the parameters of |a| and |b|. It returns - * one if they match, zero if not, or a negative number of on error. - * - * WARNING: the return value differs from the usual return value convention. */ +// EVP_PKEY_cmp_parameters compares the parameters of |a| and |b|. It returns +// one if they match, zero if not, or a negative number of on error. +// +// WARNING: the return value differs from the usual return value convention. OPENSSL_EXPORT int EVP_PKEY_cmp_parameters(const EVP_PKEY *a, const EVP_PKEY *b); -/* ASN.1 functions */ +// ASN.1 functions -/* EVP_parse_public_key decodes a DER-encoded SubjectPublicKeyInfo structure - * (RFC 5280) from |cbs| and advances |cbs|. It returns a newly-allocated - * |EVP_PKEY| or NULL on error. - * - * The caller must check the type of the parsed public key to ensure it is - * suitable and validate other desired key properties such as RSA modulus size - * or EC curve. */ +// EVP_parse_public_key decodes a DER-encoded SubjectPublicKeyInfo structure +// (RFC 5280) from |cbs| and advances |cbs|. It returns a newly-allocated +// |EVP_PKEY| or NULL on error. +// +// The caller must check the type of the parsed public key to ensure it is +// suitable and validate other desired key properties such as RSA modulus size +// or EC curve. OPENSSL_EXPORT EVP_PKEY *EVP_parse_public_key(CBS *cbs); -/* EVP_marshal_public_key marshals |key| as a DER-encoded SubjectPublicKeyInfo - * structure (RFC 5280) and appends the result to |cbb|. It returns one on - * success and zero on error. */ +// EVP_marshal_public_key marshals |key| as a DER-encoded SubjectPublicKeyInfo +// structure (RFC 5280) and appends the result to |cbb|. It returns one on +// success and zero on error. OPENSSL_EXPORT int EVP_marshal_public_key(CBB *cbb, const EVP_PKEY *key); -/* EVP_parse_private_key decodes a DER-encoded PrivateKeyInfo structure (RFC - * 5208) from |cbs| and advances |cbs|. It returns a newly-allocated |EVP_PKEY| - * or NULL on error. - * - * The caller must check the type of the parsed private key to ensure it is - * suitable and validate other desired key properties such as RSA modulus size - * or EC curve. - * - * A PrivateKeyInfo ends with an optional set of attributes. These are not - * processed and so this function will silently ignore any trailing data in the - * structure. */ +// EVP_parse_private_key decodes a DER-encoded PrivateKeyInfo structure (RFC +// 5208) from |cbs| and advances |cbs|. It returns a newly-allocated |EVP_PKEY| +// or NULL on error. +// +// The caller must check the type of the parsed private key to ensure it is +// suitable and validate other desired key properties such as RSA modulus size +// or EC curve. +// +// A PrivateKeyInfo ends with an optional set of attributes. These are not +// processed and so this function will silently ignore any trailing data in the +// structure. OPENSSL_EXPORT EVP_PKEY *EVP_parse_private_key(CBS *cbs); -/* EVP_marshal_private_key marshals |key| as a DER-encoded PrivateKeyInfo - * structure (RFC 5208) and appends the result to |cbb|. It returns one on - * success and zero on error. */ +// EVP_marshal_private_key marshals |key| as a DER-encoded PrivateKeyInfo +// structure (RFC 5208) and appends the result to |cbb|. It returns one on +// success and zero on error. OPENSSL_EXPORT int EVP_marshal_private_key(CBB *cbb, const EVP_PKEY *key); -/* Signing */ +// Signing -/* EVP_DigestSignInit sets up |ctx| for a signing operation with |type| and - * |pkey|. The |ctx| argument must have been initialised with - * |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing - * operation will be written to |*pctx|; this can be used to set alternative - * signing options. - * - * It returns one on success, or zero on error. */ +// EVP_DigestSignInit sets up |ctx| for a signing operation with |type| and +// |pkey|. The |ctx| argument must have been initialised with +// |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing +// operation will be written to |*pctx|; this can be used to set alternative +// signing options. +// +// For single-shot signing algorithms which do not use a pre-hash, such as +// Ed25519, |type| should be NULL. The |EVP_MD_CTX| itself is unused but is +// present so the API is uniform. See |EVP_DigestSign|. +// +// It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); -/* EVP_DigestSignUpdate appends |len| bytes from |data| to the data which will - * be signed in |EVP_DigestSignFinal|. It returns one. */ +// EVP_DigestSignUpdate appends |len| bytes from |data| to the data which will +// be signed in |EVP_DigestSignFinal|. It returns one. +// +// This function performs a streaming signing operation and will fail for +// signature algorithms which do not support this. Use |EVP_DigestSign| for a +// single-shot operation. OPENSSL_EXPORT int EVP_DigestSignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); -/* EVP_DigestSignFinal signs the data that has been included by one or more - * calls to |EVP_DigestSignUpdate|. If |out_sig| is NULL then |*out_sig_len| is - * set to the maximum number of output bytes. Otherwise, on entry, - * |*out_sig_len| must contain the length of the |out_sig| buffer. If the call - * is successful, the signature is written to |out_sig| and |*out_sig_len| is - * set to its length. - * - * It returns one on success, or zero on error. */ +// EVP_DigestSignFinal signs the data that has been included by one or more +// calls to |EVP_DigestSignUpdate|. If |out_sig| is NULL then |*out_sig_len| is +// set to the maximum number of output bytes. Otherwise, on entry, +// |*out_sig_len| must contain the length of the |out_sig| buffer. If the call +// is successful, the signature is written to |out_sig| and |*out_sig_len| is +// set to its length. +// +// This function performs a streaming signing operation and will fail for +// signature algorithms which do not support this. Use |EVP_DigestSign| for a +// single-shot operation. +// +// It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestSignFinal(EVP_MD_CTX *ctx, uint8_t *out_sig, size_t *out_sig_len); - -/* Verifying */ - -/* EVP_DigestVerifyInit sets up |ctx| for a signature verification operation - * with |type| and |pkey|. The |ctx| argument must have been initialised with - * |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing - * operation will be written to |*pctx|; this can be used to set alternative - * signing options. - * - * It returns one on success, or zero on error. */ +// EVP_DigestSign signs |data_len| bytes from |data| using |ctx|. If |out_sig| +// is NULL then |*out_sig_len| is set to the maximum number of output +// bytes. Otherwise, on entry, |*out_sig_len| must contain the length of the +// |out_sig| buffer. If the call is successful, the signature is written to +// |out_sig| and |*out_sig_len| is set to its length. +// +// It returns one on success and zero on error. +OPENSSL_EXPORT int EVP_DigestSign(EVP_MD_CTX *ctx, uint8_t *out_sig, + size_t *out_sig_len, const uint8_t *data, + size_t data_len); + + +// Verifying + +// EVP_DigestVerifyInit sets up |ctx| for a signature verification operation +// with |type| and |pkey|. The |ctx| argument must have been initialised with +// |EVP_MD_CTX_init|. If |pctx| is not NULL, the |EVP_PKEY_CTX| of the signing +// operation will be written to |*pctx|; this can be used to set alternative +// signing options. +// +// For single-shot signing algorithms which do not use a pre-hash, such as +// Ed25519, |type| should be NULL. The |EVP_MD_CTX| itself is unused but is +// present so the API is uniform. See |EVP_DigestVerify|. +// +// It returns one on success, or zero on error. OPENSSL_EXPORT int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); -/* EVP_DigestVerifyUpdate appends |len| bytes from |data| to the data which - * will be verified by |EVP_DigestVerifyFinal|. It returns one. */ +// EVP_DigestVerifyUpdate appends |len| bytes from |data| to the data which +// will be verified by |EVP_DigestVerifyFinal|. It returns one. +// +// This function performs streaming signature verification and will fail for +// signature algorithms which do not support this. Use |EVP_PKEY_verify_message| +// for a single-shot verification. OPENSSL_EXPORT int EVP_DigestVerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); -/* EVP_DigestVerifyFinal verifies that |sig_len| bytes of |sig| are a valid - * signature for the data that has been included by one or more calls to - * |EVP_DigestVerifyUpdate|. It returns one on success and zero otherwise. */ +// EVP_DigestVerifyFinal verifies that |sig_len| bytes of |sig| are a valid +// signature for the data that has been included by one or more calls to +// |EVP_DigestVerifyUpdate|. It returns one on success and zero otherwise. +// +// This function performs streaming signature verification and will fail for +// signature algorithms which do not support this. Use |EVP_PKEY_verify_message| +// for a single-shot verification. OPENSSL_EXPORT int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len); +// EVP_DigestVerify verifies that |sig_len| bytes from |sig| are a valid +// signature for |data|. It returns one on success or zero on error. +OPENSSL_EXPORT int EVP_DigestVerify(EVP_MD_CTX *ctx, const uint8_t *sig, + size_t sig_len, const uint8_t *data, + size_t len); -/* Signing (old functions) */ -/* EVP_SignInit_ex configures |ctx|, which must already have been initialised, - * for a fresh signing operation using the hash function |type|. It returns one - * on success and zero otherwise. - * - * (In order to initialise |ctx|, either obtain it initialised with - * |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) */ +// Signing (old functions) + +// EVP_SignInit_ex configures |ctx|, which must already have been initialised, +// for a fresh signing operation using the hash function |type|. It returns one +// on success and zero otherwise. +// +// (In order to initialise |ctx|, either obtain it initialised with +// |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) OPENSSL_EXPORT int EVP_SignInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); -/* EVP_SignInit is a deprecated version of |EVP_SignInit_ex|. - * - * TODO(fork): remove. */ +// EVP_SignInit is a deprecated version of |EVP_SignInit_ex|. +// +// TODO(fork): remove. OPENSSL_EXPORT int EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type); -/* EVP_SignUpdate appends |len| bytes from |data| to the data which will be - * signed in |EVP_SignFinal|. */ +// EVP_SignUpdate appends |len| bytes from |data| to the data which will be +// signed in |EVP_SignFinal|. OPENSSL_EXPORT int EVP_SignUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); -/* EVP_SignFinal signs the data that has been included by one or more calls to - * |EVP_SignUpdate|, using the key |pkey|, and writes it to |sig|. On entry, - * |sig| must point to at least |EVP_PKEY_size(pkey)| bytes of space. The - * actual size of the signature is written to |*out_sig_len|. - * - * It returns one on success and zero otherwise. - * - * It does not modify |ctx|, thus it's possible to continue to use |ctx| in - * order to sign a longer message. */ +// EVP_SignFinal signs the data that has been included by one or more calls to +// |EVP_SignUpdate|, using the key |pkey|, and writes it to |sig|. On entry, +// |sig| must point to at least |EVP_PKEY_size(pkey)| bytes of space. The +// actual size of the signature is written to |*out_sig_len|. +// +// It returns one on success and zero otherwise. +// +// It does not modify |ctx|, thus it's possible to continue to use |ctx| in +// order to sign a longer message. OPENSSL_EXPORT int EVP_SignFinal(const EVP_MD_CTX *ctx, uint8_t *sig, unsigned int *out_sig_len, EVP_PKEY *pkey); -/* Verifying (old functions) */ +// Verifying (old functions) -/* EVP_VerifyInit_ex configures |ctx|, which must already have been - * initialised, for a fresh signature verification operation using the hash - * function |type|. It returns one on success and zero otherwise. - * - * (In order to initialise |ctx|, either obtain it initialised with - * |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) */ +// EVP_VerifyInit_ex configures |ctx|, which must already have been +// initialised, for a fresh signature verification operation using the hash +// function |type|. It returns one on success and zero otherwise. +// +// (In order to initialise |ctx|, either obtain it initialised with +// |EVP_MD_CTX_create|, or use |EVP_MD_CTX_init|.) OPENSSL_EXPORT int EVP_VerifyInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); -/* EVP_VerifyInit is a deprecated version of |EVP_VerifyInit_ex|. - * - * TODO(fork): remove. */ +// EVP_VerifyInit is a deprecated version of |EVP_VerifyInit_ex|. +// +// TODO(fork): remove. OPENSSL_EXPORT int EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type); -/* EVP_VerifyUpdate appends |len| bytes from |data| to the data which will be - * signed in |EVP_VerifyFinal|. */ +// EVP_VerifyUpdate appends |len| bytes from |data| to the data which will be +// signed in |EVP_VerifyFinal|. OPENSSL_EXPORT int EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *data, size_t len); -/* EVP_VerifyFinal verifies that |sig_len| bytes of |sig| are a valid - * signature, by |pkey|, for the data that has been included by one or more - * calls to |EVP_VerifyUpdate|. - * - * It returns one on success and zero otherwise. - * - * It does not modify |ctx|, thus it's possible to continue to use |ctx| in - * order to sign a longer message. */ +// EVP_VerifyFinal verifies that |sig_len| bytes of |sig| are a valid +// signature, by |pkey|, for the data that has been included by one or more +// calls to |EVP_VerifyUpdate|. +// +// It returns one on success and zero otherwise. +// +// It does not modify |ctx|, thus it's possible to continue to use |ctx| in +// order to sign a longer message. OPENSSL_EXPORT int EVP_VerifyFinal(EVP_MD_CTX *ctx, const uint8_t *sig, size_t sig_len, EVP_PKEY *pkey); -/* Printing */ +// Printing -/* EVP_PKEY_print_public prints a textual representation of the public key in - * |pkey| to |out|. Returns one on success or zero otherwise. */ +// EVP_PKEY_print_public prints a textual representation of the public key in +// |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_public(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); -/* EVP_PKEY_print_private prints a textual representation of the private key in - * |pkey| to |out|. Returns one on success or zero otherwise. */ +// EVP_PKEY_print_private prints a textual representation of the private key in +// |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_private(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); -/* EVP_PKEY_print_params prints a textual representation of the parameters in - * |pkey| to |out|. Returns one on success or zero otherwise. */ +// EVP_PKEY_print_params prints a textual representation of the parameters in +// |pkey| to |out|. Returns one on success or zero otherwise. OPENSSL_EXPORT int EVP_PKEY_print_params(BIO *out, const EVP_PKEY *pkey, int indent, ASN1_PCTX *pctx); -/* Password stretching. - * - * Password stretching functions take a low-entropy password and apply a slow - * function that results in a key suitable for use in symmetric - * cryptography. */ +// Password stretching. +// +// Password stretching functions take a low-entropy password and apply a slow +// function that results in a key suitable for use in symmetric +// cryptography. -/* PKCS5_PBKDF2_HMAC computes |iterations| iterations of PBKDF2 of |password| - * and |salt|, using |digest|, and outputs |key_len| bytes to |out_key|. It - * returns one on success and zero on error. */ +// PKCS5_PBKDF2_HMAC computes |iterations| iterations of PBKDF2 of |password| +// and |salt|, using |digest|, and outputs |key_len| bytes to |out_key|. It +// returns one on success and zero on error. OPENSSL_EXPORT int PKCS5_PBKDF2_HMAC(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, unsigned iterations, const EVP_MD *digest, size_t key_len, uint8_t *out_key); -/* PKCS5_PBKDF2_HMAC_SHA1 is the same as PKCS5_PBKDF2_HMAC, but with |digest| - * fixed to |EVP_sha1|. */ +// PKCS5_PBKDF2_HMAC_SHA1 is the same as PKCS5_PBKDF2_HMAC, but with |digest| +// fixed to |EVP_sha1|. OPENSSL_EXPORT int PKCS5_PBKDF2_HMAC_SHA1(const char *password, size_t password_len, const uint8_t *salt, size_t salt_len, unsigned iterations, size_t key_len, uint8_t *out_key); - -/* Public key contexts. - * - * |EVP_PKEY_CTX| objects hold the context of an operation (e.g. signing or - * encrypting) that uses a public key. */ - -/* EVP_PKEY_CTX_new allocates a fresh |EVP_PKEY_CTX| for use with |pkey|. It - * returns the context or NULL on error. */ +// EVP_PBE_scrypt expands |password| into a secret key of length |key_len| using +// scrypt, as described in RFC 7914, and writes the result to |out_key|. It +// returns one on success and zero on error. +// +// |N|, |r|, and |p| are as described in RFC 7914 section 6. They determine the +// cost of the operation. If the memory required exceeds |max_mem|, the +// operation will fail instead. If |max_mem| is zero, a defult limit of 32MiB +// will be used. +OPENSSL_EXPORT int EVP_PBE_scrypt(const char *password, size_t password_len, + const uint8_t *salt, size_t salt_len, + uint64_t N, uint64_t r, uint64_t p, + size_t max_mem, uint8_t *out_key, + size_t key_len); + + +// Public key contexts. +// +// |EVP_PKEY_CTX| objects hold the context of an operation (e.g. signing or +// encrypting) that uses a public key. + +// EVP_PKEY_CTX_new allocates a fresh |EVP_PKEY_CTX| for use with |pkey|. It +// returns the context or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_new(EVP_PKEY *pkey, ENGINE *e); -/* EVP_PKEY_CTX_new_id allocates a fresh |EVP_PKEY_CTX| for a key of type |id| - * (e.g. |EVP_PKEY_HMAC|). This can be used for key generation where - * |EVP_PKEY_CTX_new| can't be used because there isn't an |EVP_PKEY| to pass - * it. It returns the context or NULL on error. */ +// EVP_PKEY_CTX_new_id allocates a fresh |EVP_PKEY_CTX| for a key of type |id| +// (e.g. |EVP_PKEY_HMAC|). This can be used for key generation where +// |EVP_PKEY_CTX_new| can't be used because there isn't an |EVP_PKEY| to pass +// it. It returns the context or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_new_id(int id, ENGINE *e); -/* EVP_PKEY_CTX_free frees |ctx| and the data it owns. */ +// EVP_PKEY_CTX_free frees |ctx| and the data it owns. OPENSSL_EXPORT void EVP_PKEY_CTX_free(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_CTX_dup allocates a fresh |EVP_PKEY_CTX| and sets it equal to the - * state of |ctx|. It returns the fresh |EVP_PKEY_CTX| or NULL on error. */ +// EVP_PKEY_CTX_dup allocates a fresh |EVP_PKEY_CTX| and sets it equal to the +// state of |ctx|. It returns the fresh |EVP_PKEY_CTX| or NULL on error. OPENSSL_EXPORT EVP_PKEY_CTX *EVP_PKEY_CTX_dup(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_CTX_get0_pkey returns the |EVP_PKEY| associated with |ctx|. */ +// EVP_PKEY_CTX_get0_pkey returns the |EVP_PKEY| associated with |ctx|. OPENSSL_EXPORT EVP_PKEY *EVP_PKEY_CTX_get0_pkey(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_sign_init initialises an |EVP_PKEY_CTX| for a signing operation. It - * should be called before |EVP_PKEY_sign|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_sign_init initialises an |EVP_PKEY_CTX| for a signing operation. It +// should be called before |EVP_PKEY_sign|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_sign_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_sign signs |data_len| bytes from |data| using |ctx|. If |sig| is - * NULL, the maximum size of the signature is written to - * |out_sig_len|. Otherwise, |*sig_len| must contain the number of bytes of - * space available at |sig|. If sufficient, the signature will be written to - * |sig| and |*sig_len| updated with the true length. - * - * WARNING: Setting |sig| to NULL only gives the maximum size of the - * signature. The actual signature may be smaller. - * - * It returns one on success or zero on error. (Note: this differs from - * OpenSSL, which can also return negative values to indicate an error. ) */ +// EVP_PKEY_sign signs |digest_len| bytes from |digest| using |ctx|. If |sig| is +// NULL, the maximum size of the signature is written to +// |out_sig_len|. Otherwise, |*sig_len| must contain the number of bytes of +// space available at |sig|. If sufficient, the signature will be written to +// |sig| and |*sig_len| updated with the true length. +// +// This function expects a pre-hashed input and will fail for signature +// algorithms which do not support this. Use |EVP_DigestSignInit| to sign an +// unhashed input. +// +// WARNING: Setting |sig| to NULL only gives the maximum size of the +// signature. The actual signature may be smaller. +// +// It returns one on success or zero on error. (Note: this differs from +// OpenSSL, which can also return negative values to indicate an error. ) OPENSSL_EXPORT int EVP_PKEY_sign(EVP_PKEY_CTX *ctx, uint8_t *sig, - size_t *sig_len, const uint8_t *data, - size_t data_len); + size_t *sig_len, const uint8_t *digest, + size_t digest_len); -/* EVP_PKEY_verify_init initialises an |EVP_PKEY_CTX| for a signature - * verification operation. It should be called before |EVP_PKEY_verify|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_verify_init initialises an |EVP_PKEY_CTX| for a signature +// verification operation. It should be called before |EVP_PKEY_verify|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_verify verifies that |sig_len| bytes from |sig| are a valid - * signature for |data|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_verify verifies that |sig_len| bytes from |sig| are a valid +// signature for |digest|. +// +// This function expects a pre-hashed input and will fail for signature +// algorithms which do not support this. Use |EVP_DigestVerifyInit| to verify a +// signature given the unhashed input. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify(EVP_PKEY_CTX *ctx, const uint8_t *sig, - size_t sig_len, const uint8_t *data, - size_t data_len); + size_t sig_len, const uint8_t *digest, + size_t digest_len); -/* EVP_PKEY_encrypt_init initialises an |EVP_PKEY_CTX| for an encryption - * operation. It should be called before |EVP_PKEY_encrypt|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_encrypt_init initialises an |EVP_PKEY_CTX| for an encryption +// operation. It should be called before |EVP_PKEY_encrypt|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_encrypt_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_encrypt encrypts |in_len| bytes from |in|. If |out| is NULL, the - * maximum size of the ciphertext is written to |out_len|. Otherwise, |*out_len| - * must contain the number of bytes of space available at |out|. If sufficient, - * the ciphertext will be written to |out| and |*out_len| updated with the true - * length. - * - * WARNING: Setting |out| to NULL only gives the maximum size of the - * ciphertext. The actual ciphertext may be smaller. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_encrypt encrypts |in_len| bytes from |in|. If |out| is NULL, the +// maximum size of the ciphertext is written to |out_len|. Otherwise, |*out_len| +// must contain the number of bytes of space available at |out|. If sufficient, +// the ciphertext will be written to |out| and |*out_len| updated with the true +// length. +// +// WARNING: Setting |out| to NULL only gives the maximum size of the +// ciphertext. The actual ciphertext may be smaller. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_encrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *in, size_t in_len); -/* EVP_PKEY_decrypt_init initialises an |EVP_PKEY_CTX| for a decryption - * operation. It should be called before |EVP_PKEY_decrypt|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_decrypt_init initialises an |EVP_PKEY_CTX| for a decryption +// operation. It should be called before |EVP_PKEY_decrypt|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_decrypt_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_decrypt decrypts |in_len| bytes from |in|. If |out| is NULL, the - * maximum size of the plaintext is written to |out_len|. Otherwise, |*out_len| - * must contain the number of bytes of space available at |out|. If sufficient, - * the ciphertext will be written to |out| and |*out_len| updated with the true - * length. - * - * WARNING: Setting |out| to NULL only gives the maximum size of the - * plaintext. The actual plaintext may be smaller. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_decrypt decrypts |in_len| bytes from |in|. If |out| is NULL, the +// maximum size of the plaintext is written to |out_len|. Otherwise, |*out_len| +// must contain the number of bytes of space available at |out|. If sufficient, +// the ciphertext will be written to |out| and |*out_len| updated with the true +// length. +// +// WARNING: Setting |out| to NULL only gives the maximum size of the +// plaintext. The actual plaintext may be smaller. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_decrypt(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *in, size_t in_len); -/* EVP_PKEY_verify_recover_init initialises an |EVP_PKEY_CTX| for a public-key - * decryption operation. It should be called before |EVP_PKEY_verify_recover|. - * - * Public-key decryption is a very obscure operation that is only implemented - * by RSA keys. It is effectively a signature verification operation that - * returns the signed message directly. It is almost certainly not what you - * want. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_verify_recover_init initialises an |EVP_PKEY_CTX| for a public-key +// decryption operation. It should be called before |EVP_PKEY_verify_recover|. +// +// Public-key decryption is a very obscure operation that is only implemented +// by RSA keys. It is effectively a signature verification operation that +// returns the signed message directly. It is almost certainly not what you +// want. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_recover_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_verify_recover decrypts |sig_len| bytes from |sig|. If |out| is - * NULL, the maximum size of the plaintext is written to |out_len|. Otherwise, - * |*out_len| must contain the number of bytes of space available at |out|. If - * sufficient, the ciphertext will be written to |out| and |*out_len| updated - * with the true length. - * - * WARNING: Setting |out| to NULL only gives the maximum size of the - * plaintext. The actual plaintext may be smaller. - * - * See the warning about this operation in |EVP_PKEY_verify_recover_init|. It - * is probably not what you want. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_verify_recover decrypts |sig_len| bytes from |sig|. If |out| is +// NULL, the maximum size of the plaintext is written to |out_len|. Otherwise, +// |*out_len| must contain the number of bytes of space available at |out|. If +// sufficient, the ciphertext will be written to |out| and |*out_len| updated +// with the true length. +// +// WARNING: Setting |out| to NULL only gives the maximum size of the +// plaintext. The actual plaintext may be smaller. +// +// See the warning about this operation in |EVP_PKEY_verify_recover_init|. It +// is probably not what you want. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_verify_recover(EVP_PKEY_CTX *ctx, uint8_t *out, size_t *out_len, const uint8_t *sig, size_t siglen); -/* EVP_PKEY_derive_init initialises an |EVP_PKEY_CTX| for a key derivation - * operation. It should be called before |EVP_PKEY_derive_set_peer| and - * |EVP_PKEY_derive|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_derive_init initialises an |EVP_PKEY_CTX| for a key derivation +// operation. It should be called before |EVP_PKEY_derive_set_peer| and +// |EVP_PKEY_derive|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_derive_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_derive_set_peer sets the peer's key to be used for key derivation - * by |ctx| to |peer|. It should be called after |EVP_PKEY_derive_init|. (For - * example, this is used to set the peer's key in (EC)DH.) It returns one on - * success and zero on error. */ +// EVP_PKEY_derive_set_peer sets the peer's key to be used for key derivation +// by |ctx| to |peer|. It should be called after |EVP_PKEY_derive_init|. (For +// example, this is used to set the peer's key in (EC)DH.) It returns one on +// success and zero on error. OPENSSL_EXPORT int EVP_PKEY_derive_set_peer(EVP_PKEY_CTX *ctx, EVP_PKEY *peer); -/* EVP_PKEY_derive derives a shared key between the two keys configured in - * |ctx|. If |key| is non-NULL then, on entry, |out_key_len| must contain the - * amount of space at |key|. If sufficient then the shared key will be written - * to |key| and |*out_key_len| will be set to the length. If |key| is NULL then - * |out_key_len| will be set to the maximum length. - * - * WARNING: Setting |out| to NULL only gives the maximum size of the key. The - * actual key may be smaller. - * - * It returns one on success and zero on error. */ +// EVP_PKEY_derive derives a shared key between the two keys configured in +// |ctx|. If |key| is non-NULL then, on entry, |out_key_len| must contain the +// amount of space at |key|. If sufficient then the shared key will be written +// to |key| and |*out_key_len| will be set to the length. If |key| is NULL then +// |out_key_len| will be set to the maximum length. +// +// WARNING: Setting |out| to NULL only gives the maximum size of the key. The +// actual key may be smaller. +// +// It returns one on success and zero on error. OPENSSL_EXPORT int EVP_PKEY_derive(EVP_PKEY_CTX *ctx, uint8_t *key, size_t *out_key_len); -/* EVP_PKEY_keygen_init initialises an |EVP_PKEY_CTX| for a key generation - * operation. It should be called before |EVP_PKEY_keygen|. - * - * It returns one on success or zero on error. */ +// EVP_PKEY_keygen_init initialises an |EVP_PKEY_CTX| for a key generation +// operation. It should be called before |EVP_PKEY_keygen|. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_keygen_init(EVP_PKEY_CTX *ctx); -/* EVP_PKEY_keygen performs a key generation operation using the values from - * |ctx| and sets |*ppkey| to a fresh |EVP_PKEY| containing the resulting key. - * It returns one on success or zero on error. */ +// EVP_PKEY_keygen performs a key generation operation using the values from +// |ctx| and sets |*ppkey| to a fresh |EVP_PKEY| containing the resulting key. +// It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_keygen(EVP_PKEY_CTX *ctx, EVP_PKEY **ppkey); -/* Generic control functions. */ +// Generic control functions. -/* EVP_PKEY_CTX_set_signature_md sets |md| as the digest to be used in a - * signature operation. It returns one on success or zero on error. */ +// EVP_PKEY_CTX_set_signature_md sets |md| as the digest to be used in a +// signature operation. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); -/* EVP_PKEY_CTX_get_signature_md sets |*out_md| to the digest to be used in a - * signature operation. It returns one on success or zero on error. */ +// EVP_PKEY_CTX_get_signature_md sets |*out_md| to the digest to be used in a +// signature operation. It returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); -/* RSA specific control functions. */ +// RSA specific control functions. -/* EVP_PKEY_CTX_set_rsa_padding sets the padding type to use. It should be one - * of the |RSA_*_PADDING| values. Returns one on success or zero on error. */ +// EVP_PKEY_CTX_set_rsa_padding sets the padding type to use. It should be one +// of the |RSA_*_PADDING| values. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_padding(EVP_PKEY_CTX *ctx, int padding); -/* EVP_PKEY_CTX_get_rsa_padding sets |*out_padding| to the current padding - * value, which is one of the |RSA_*_PADDING| values. Returns one on success or - * zero on error. */ +// EVP_PKEY_CTX_get_rsa_padding sets |*out_padding| to the current padding +// value, which is one of the |RSA_*_PADDING| values. Returns one on success or +// zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_padding(EVP_PKEY_CTX *ctx, int *out_padding); -/* EVP_PKEY_CTX_set_rsa_pss_saltlen sets the length of the salt in a PSS-padded - * signature. A value of -1 cause the salt to be the same length as the digest - * in the signature. A value of -2 causes the salt to be the maximum length - * that will fit when signing and recovered from the signature when verifying. - * Otherwise the value gives the size of the salt in bytes. - * - * If unsure, use -1. - * - * Returns one on success or zero on error. */ +// EVP_PKEY_CTX_set_rsa_pss_saltlen sets the length of the salt in a PSS-padded +// signature. A value of -1 cause the salt to be the same length as the digest +// in the signature. A value of -2 causes the salt to be the maximum length +// that will fit when signing and recovered from the signature when verifying. +// Otherwise the value gives the size of the salt in bytes. +// +// If unsure, use -1. +// +// Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int salt_len); -/* EVP_PKEY_CTX_get_rsa_pss_saltlen sets |*out_salt_len| to the salt length of - * a PSS-padded signature. See the documentation for - * |EVP_PKEY_CTX_set_rsa_pss_saltlen| for details of the special values that it - * can take. - * - * Returns one on success or zero on error. */ +// EVP_PKEY_CTX_get_rsa_pss_saltlen sets |*out_salt_len| to the salt length of +// a PSS-padded signature. See the documentation for +// |EVP_PKEY_CTX_set_rsa_pss_saltlen| for details of the special values that it +// can take. +// +// Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_pss_saltlen(EVP_PKEY_CTX *ctx, int *out_salt_len); -/* EVP_PKEY_CTX_set_rsa_keygen_bits sets the size of the desired RSA modulus, - * in bits, for key generation. Returns one on success or zero on - * error. */ +// EVP_PKEY_CTX_set_rsa_keygen_bits sets the size of the desired RSA modulus, +// in bits, for key generation. Returns one on success or zero on +// error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_keygen_bits(EVP_PKEY_CTX *ctx, int bits); -/* EVP_PKEY_CTX_set_rsa_keygen_pubexp sets |e| as the public exponent for key - * generation. Returns one on success or zero on error. */ +// EVP_PKEY_CTX_set_rsa_keygen_pubexp sets |e| as the public exponent for key +// generation. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_keygen_pubexp(EVP_PKEY_CTX *ctx, BIGNUM *e); -/* EVP_PKEY_CTX_set_rsa_oaep_md sets |md| as the digest used in OAEP padding. - * Returns one on success or zero on error. */ +// EVP_PKEY_CTX_set_rsa_oaep_md sets |md| as the digest used in OAEP padding. +// Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); -/* EVP_PKEY_CTX_get_rsa_oaep_md sets |*out_md| to the digest function used in - * OAEP padding. Returns one on success or zero on error. */ +// EVP_PKEY_CTX_get_rsa_oaep_md sets |*out_md| to the digest function used in +// OAEP padding. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_oaep_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); -/* EVP_PKEY_CTX_set_rsa_mgf1_md sets |md| as the digest used in MGF1. Returns - * one on success or zero on error. */ +// EVP_PKEY_CTX_set_rsa_mgf1_md sets |md| as the digest used in MGF1. Returns +// one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD *md); -/* EVP_PKEY_CTX_get_rsa_mgf1_md sets |*out_md| to the digest function used in - * MGF1. Returns one on success or zero on error. */ +// EVP_PKEY_CTX_get_rsa_mgf1_md sets |*out_md| to the digest function used in +// MGF1. Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_get_rsa_mgf1_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md); -/* EVP_PKEY_CTX_set0_rsa_oaep_label sets |label_len| bytes from |label| as the - * label used in OAEP. DANGER: On success, this call takes ownership of |label| - * and will call |OPENSSL_free| on it when |ctx| is destroyed. - * - * Returns one on success or zero on error. */ +// EVP_PKEY_CTX_set0_rsa_oaep_label sets |label_len| bytes from |label| as the +// label used in OAEP. DANGER: On success, this call takes ownership of |label| +// and will call |OPENSSL_free| on it when |ctx| is destroyed. +// +// Returns one on success or zero on error. OPENSSL_EXPORT int EVP_PKEY_CTX_set0_rsa_oaep_label(EVP_PKEY_CTX *ctx, uint8_t *label, size_t label_len); -/* EVP_PKEY_CTX_get0_rsa_oaep_label sets |*out_label| to point to the internal - * buffer containing the OAEP label (which may be NULL) and returns the length - * of the label or a negative value on error. - * - * WARNING: the return value differs from the usual return value convention. */ +// EVP_PKEY_CTX_get0_rsa_oaep_label sets |*out_label| to point to the internal +// buffer containing the OAEP label (which may be NULL) and returns the length +// of the label or a negative value on error. +// +// WARNING: the return value differs from the usual return value convention. OPENSSL_EXPORT int EVP_PKEY_CTX_get0_rsa_oaep_label(EVP_PKEY_CTX *ctx, const uint8_t **out_label); -/* Deprecated functions. */ +// Deprecated functions. -/* EVP_PKEY_DH is defined for compatibility, but it is impossible to create an - * |EVP_PKEY| of that type. */ +// EVP_PKEY_DH is defined for compatibility, but it is impossible to create an +// |EVP_PKEY| of that type. #define EVP_PKEY_DH NID_dhKeyAgreement -/* EVP_PKEY_RSA2 was historically an alternate form for RSA public keys (OID - * 2.5.8.1.1), but is no longer accepted. */ +// EVP_PKEY_RSA2 was historically an alternate form for RSA public keys (OID +// 2.5.8.1.1), but is no longer accepted. #define EVP_PKEY_RSA2 NID_rsa -/* OpenSSL_add_all_algorithms does nothing. */ +// OpenSSL_add_all_algorithms does nothing. OPENSSL_EXPORT void OpenSSL_add_all_algorithms(void); -/* OPENSSL_add_all_algorithms_conf does nothing. */ +// OPENSSL_add_all_algorithms_conf does nothing. OPENSSL_EXPORT void OPENSSL_add_all_algorithms_conf(void); -/* OpenSSL_add_all_ciphers does nothing. */ +// OpenSSL_add_all_ciphers does nothing. OPENSSL_EXPORT void OpenSSL_add_all_ciphers(void); -/* OpenSSL_add_all_digests does nothing. */ +// OpenSSL_add_all_digests does nothing. OPENSSL_EXPORT void OpenSSL_add_all_digests(void); -/* EVP_cleanup does nothing. */ +// EVP_cleanup does nothing. OPENSSL_EXPORT void EVP_cleanup(void); OPENSSL_EXPORT void EVP_CIPHER_do_all_sorted( @@ -681,79 +749,79 @@ OPENSSL_EXPORT void EVP_MD_do_all_sorted(void (*callback)(const EVP_MD *cipher, void *arg), void *arg); -/* i2d_PrivateKey marshals a private key from |key| to an ASN.1, DER - * structure. If |outp| is not NULL then the result is written to |*outp| and - * |*outp| is advanced just past the output. It returns the number of bytes in - * the result, whether written or not, or a negative value on error. - * - * RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 3447) structure. - * EC keys are serialized as a DER-encoded ECPrivateKey (RFC 5915) structure. - * - * Use |RSA_marshal_private_key| or |EC_marshal_private_key| instead. */ +// i2d_PrivateKey marshals a private key from |key| to an ASN.1, DER +// structure. If |outp| is not NULL then the result is written to |*outp| and +// |*outp| is advanced just past the output. It returns the number of bytes in +// the result, whether written or not, or a negative value on error. +// +// RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 3447) structure. +// EC keys are serialized as a DER-encoded ECPrivateKey (RFC 5915) structure. +// +// Use |RSA_marshal_private_key| or |EC_marshal_private_key| instead. OPENSSL_EXPORT int i2d_PrivateKey(const EVP_PKEY *key, uint8_t **outp); -/* i2d_PublicKey marshals a public key from |key| to a type-specific format. - * If |outp| is not NULL then the result is written to |*outp| and - * |*outp| is advanced just past the output. It returns the number of bytes in - * the result, whether written or not, or a negative value on error. - * - * RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 3447) structure. - * EC keys are serialized as an EC point per SEC 1. - * - * Use |RSA_marshal_public_key| or |EC_POINT_point2cbb| instead. */ +// i2d_PublicKey marshals a public key from |key| to a type-specific format. +// If |outp| is not NULL then the result is written to |*outp| and +// |*outp| is advanced just past the output. It returns the number of bytes in +// the result, whether written or not, or a negative value on error. +// +// RSA keys are serialized as a DER-encoded RSAPublicKey (RFC 3447) structure. +// EC keys are serialized as an EC point per SEC 1. +// +// Use |RSA_marshal_public_key| or |EC_POINT_point2cbb| instead. OPENSSL_EXPORT int i2d_PublicKey(EVP_PKEY *key, uint8_t **outp); -/* d2i_PrivateKey parses an ASN.1, DER-encoded, private key from |len| bytes at - * |*inp|. If |out| is not NULL then, on exit, a pointer to the result is in - * |*out|. Note that, even if |*out| is already non-NULL on entry, it will not - * be written to. Rather, a fresh |EVP_PKEY| is allocated and the previous one - * is freed. On successful exit, |*inp| is advanced past the DER structure. It - * returns the result or NULL on error. - * - * This function tries to detect one of several formats. Instead, use - * |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an - * RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. */ +// d2i_PrivateKey parses an ASN.1, DER-encoded, private key from |len| bytes at +// |*inp|. If |out| is not NULL then, on exit, a pointer to the result is in +// |*out|. Note that, even if |*out| is already non-NULL on entry, it will not +// be written to. Rather, a fresh |EVP_PKEY| is allocated and the previous one +// is freed. On successful exit, |*inp| is advanced past the DER structure. It +// returns the result or NULL on error. +// +// This function tries to detect one of several formats. Instead, use +// |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an +// RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. OPENSSL_EXPORT EVP_PKEY *d2i_PrivateKey(int type, EVP_PKEY **out, const uint8_t **inp, long len); -/* d2i_AutoPrivateKey acts the same as |d2i_PrivateKey|, but detects the type - * of the private key. - * - * This function tries to detect one of several formats. Instead, use - * |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an - * RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. */ +// d2i_AutoPrivateKey acts the same as |d2i_PrivateKey|, but detects the type +// of the private key. +// +// This function tries to detect one of several formats. Instead, use +// |EVP_parse_private_key| for a PrivateKeyInfo, |RSA_parse_private_key| for an +// RSAPrivateKey, and |EC_parse_private_key| for an ECPrivateKey. OPENSSL_EXPORT EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **out, const uint8_t **inp, long len); -/* EVP_PKEY_get0_DH returns NULL. */ +// EVP_PKEY_get0_DH returns NULL. OPENSSL_EXPORT DH *EVP_PKEY_get0_DH(EVP_PKEY *pkey); -/* Private structures. */ +// Private structures. struct evp_pkey_st { CRYPTO_refcount_t references; - /* type contains one of the EVP_PKEY_* values or NID_undef and determines - * which element (if any) of the |pkey| union is valid. */ + // type contains one of the EVP_PKEY_* values or NID_undef and determines + // which element (if any) of the |pkey| union is valid. int type; union { - char *ptr; + void *ptr; RSA *rsa; DSA *dsa; DH *dh; EC_KEY *ec; } pkey; - /* ameth contains a pointer to a method table that contains many ASN.1 - * methods for the key type. */ + // ameth contains a pointer to a method table that contains many ASN.1 + // methods for the key type. const EVP_PKEY_ASN1_METHOD *ameth; } /* EVP_PKEY */; #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { namespace bssl { @@ -763,7 +831,7 @@ BORINGSSL_MAKE_DELETER(EVP_PKEY_CTX, EVP_PKEY_CTX_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -797,5 +865,9 @@ BORINGSSL_MAKE_DELETER(EVP_PKEY_CTX, EVP_PKEY_CTX_free) #define EVP_R_UNKNOWN_PUBLIC_KEY_TYPE 127 #define EVP_R_UNSUPPORTED_ALGORITHM 128 #define EVP_R_UNSUPPORTED_PUBLIC_KEY_TYPE 129 +#define EVP_R_NOT_A_PRIVATE_KEY 130 +#define EVP_R_INVALID_SIGNATURE 131 +#define EVP_R_MEMORY_LIMIT_EXCEEDED 132 +#define EVP_R_INVALID_PARAMETERS 133 -#endif /* OPENSSL_HEADER_EVP_H */ +#endif // OPENSSL_HEADER_EVP_H diff --git a/Sources/BoringSSL/include/openssl/ex_data.h b/Sources/BoringSSL/include/openssl/ex_data.h index e78e0703a..102f8a8f6 100644 --- a/Sources/BoringSSL/include/openssl/ex_data.h +++ b/Sources/BoringSSL/include/openssl/ex_data.h @@ -118,87 +118,77 @@ extern "C" { #endif -/* ex_data is a mechanism for associating arbitrary extra data with objects. - * For each type of object that supports ex_data, different users can be - * assigned indexes in which to store their data. Each index has callback - * functions that are called when an object of that type is freed or - * duplicated. */ +// ex_data is a mechanism for associating arbitrary extra data with objects. +// For each type of object that supports ex_data, different users can be +// assigned indexes in which to store their data. Each index has callback +// functions that are called when an object of that type is freed or +// duplicated. typedef struct crypto_ex_data_st CRYPTO_EX_DATA; -/* Type-specific functions. - * - * Each type that supports ex_data provides three functions: */ +// Type-specific functions. +// +// Each type that supports ex_data provides three functions: -#if 0 /* Sample */ +#if 0 // Sample -/* TYPE_get_ex_new_index allocates a new index for |TYPE|. See the - * descriptions of the callback typedefs for details of when they are - * called. Any of the callback arguments may be NULL. The |argl| and |argp| - * arguments are opaque values that are passed to the callbacks. It returns the - * new index or a negative number on error. - * - * TODO(fork): this should follow the standard calling convention. */ +// TYPE_get_ex_new_index allocates a new index for |TYPE|. An optional +// |free_func| argument may be provided which is called when the owning object +// is destroyed. See |CRYPTO_EX_free| for details. The |argl| and |argp| +// arguments are opaque values that are passed to the callback. It returns the +// new index or a negative number on error. OPENSSL_EXPORT int TYPE_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); -/* TYPE_set_ex_data sets an extra data pointer on |t|. The |index| argument - * should have been returned from a previous call to |TYPE_get_ex_new_index|. */ +// TYPE_set_ex_data sets an extra data pointer on |t|. The |index| argument +// should have been returned from a previous call to |TYPE_get_ex_new_index|. OPENSSL_EXPORT int TYPE_set_ex_data(TYPE *t, int index, void *arg); -/* TYPE_get_ex_data returns an extra data pointer for |t|, or NULL if no such - * pointer exists. The |index| argument should have been returned from a - * previous call to |TYPE_get_ex_new_index|. */ +// TYPE_get_ex_data returns an extra data pointer for |t|, or NULL if no such +// pointer exists. The |index| argument should have been returned from a +// previous call to |TYPE_get_ex_new_index|. OPENSSL_EXPORT void *TYPE_get_ex_data(const TYPE *t, int index); -#endif /* Sample */ +#endif // Sample -/* Callback types. */ +// Callback types. -/* CRYPTO_EX_free is a callback function that is called when an object of the - * class with extra data pointers is being destroyed. For example, if this - * callback has been passed to |SSL_get_ex_new_index| then it may be called each - * time an |SSL*| is destroyed. - * - * The callback is passed the new object (i.e. the |SSL*|) in |parent|. The - * arguments |argl| and |argp| contain opaque values that were given to - * |CRYPTO_get_ex_new_index|. The callback should return one on success, but - * the value is ignored. - * - * This callback may be called with a NULL value for |ptr| if |parent| has no - * value set for this index. However, the callbacks may also be skipped entirely - * if no extra data pointers are set on |parent| at all. */ +// CRYPTO_EX_free is a callback function that is called when an object of the +// class with extra data pointers is being destroyed. For example, if this +// callback has been passed to |SSL_get_ex_new_index| then it may be called each +// time an |SSL*| is destroyed. +// +// The callback is passed the new object (i.e. the |SSL*|) in |parent|. The +// arguments |argl| and |argp| contain opaque values that were given to +// |CRYPTO_get_ex_new_index|. The callback should return one on success, but +// the value is ignored. +// +// This callback may be called with a NULL value for |ptr| if |parent| has no +// value set for this index. However, the callbacks may also be skipped entirely +// if no extra data pointers are set on |parent| at all. typedef void CRYPTO_EX_free(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int index, long argl, void *argp); -/* CRYPTO_EX_dup is a callback function that is called when an object of the - * class is being copied and thus the ex_data linked to it also needs to be - * copied. On entry, |*from_d| points to the data for this index from the - * original object. When the callback returns, |*from_d| will be set as the - * data for this index in |to|. - * - * This callback may be called with a NULL value for |*from_d| if |from| has no - * value set for this index. However, the callbacks may also be skipped entirely - * if no extra data pointers are set on |from| at all. */ -typedef int CRYPTO_EX_dup(CRYPTO_EX_DATA *to, const CRYPTO_EX_DATA *from, - void **from_d, int index, long argl, void *argp); - -/* Deprecated functions. */ +// Deprecated functions. -/* CRYPTO_cleanup_all_ex_data does nothing. */ +// CRYPTO_cleanup_all_ex_data does nothing. OPENSSL_EXPORT void CRYPTO_cleanup_all_ex_data(void); +// CRYPTO_EX_dup is a legacy callback function type which is ignored. +typedef int CRYPTO_EX_dup(CRYPTO_EX_DATA *to, const CRYPTO_EX_DATA *from, + void **from_d, int index, long argl, void *argp); + -/* Private structures. */ +// Private structures. -/* CRYPTO_EX_unused is a placeholder for an unused callback. It is aliased to - * int to ensure non-NULL callers fail to compile rather than fail silently. */ +// CRYPTO_EX_unused is a placeholder for an unused callback. It is aliased to +// int to ensure non-NULL callers fail to compile rather than fail silently. typedef int CRYPTO_EX_unused; struct crypto_ex_data_st { @@ -207,7 +197,7 @@ struct crypto_ex_data_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_EX_DATA_H */ +#endif // OPENSSL_HEADER_EX_DATA_H diff --git a/Sources/BoringSSL/include/openssl/hkdf.h b/Sources/BoringSSL/include/openssl/hkdf.h index bffb01ec7..59aaa4936 100644 --- a/Sources/BoringSSL/include/openssl/hkdf.h +++ b/Sources/BoringSSL/include/openssl/hkdf.h @@ -22,33 +22,33 @@ extern "C" { #endif -/* HKDF. */ +// HKDF. -/* HKDF computes HKDF (as specified by RFC 5869) of initial keying material - * |secret| with |salt| and |info| using |digest|, and outputs |out_len| bytes - * to |out_key|. It returns one on success and zero on error. - * - * HKDF is an Extract-and-Expand algorithm. It does not do any key stretching, - * and as such, is not suited to be used alone to generate a key from a - * password. */ +// HKDF computes HKDF (as specified by RFC 5869) of initial keying material +// |secret| with |salt| and |info| using |digest|, and outputs |out_len| bytes +// to |out_key|. It returns one on success and zero on error. +// +// HKDF is an Extract-and-Expand algorithm. It does not do any key stretching, +// and as such, is not suited to be used alone to generate a key from a +// password. OPENSSL_EXPORT int HKDF(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len, const uint8_t *info, size_t info_len); -/* HKDF_extract computes a HKDF PRK (as specified by RFC 5869) from initial - * keying material |secret| and salt |salt| using |digest|, and outputs - * |out_len| bytes to |out_key|. The maximum output size is |EVP_MAX_MD_SIZE|. - * It returns one on success and zero on error. */ +// HKDF_extract computes a HKDF PRK (as specified by RFC 5869) from initial +// keying material |secret| and salt |salt| using |digest|, and outputs +// |out_len| bytes to |out_key|. The maximum output size is |EVP_MAX_MD_SIZE|. +// It returns one on success and zero on error. OPENSSL_EXPORT int HKDF_extract(uint8_t *out_key, size_t *out_len, const EVP_MD *digest, const uint8_t *secret, size_t secret_len, const uint8_t *salt, size_t salt_len); -/* HKDF_expand computes a HKDF OKM (as specified by RFC 5869) of length - * |out_len| from the PRK |prk| and info |info| using |digest|, and outputs - * the result to |out_key|. It returns one on success and zero on error. */ +// HKDF_expand computes a HKDF OKM (as specified by RFC 5869) of length +// |out_len| from the PRK |prk| and info |info| using |digest|, and outputs +// the result to |out_key|. It returns one on success and zero on error. OPENSSL_EXPORT int HKDF_expand(uint8_t *out_key, size_t out_len, const EVP_MD *digest, const uint8_t *prk, size_t prk_len, const uint8_t *info, @@ -56,9 +56,9 @@ OPENSSL_EXPORT int HKDF_expand(uint8_t *out_key, size_t out_len, #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif #define HKDF_R_OUTPUT_TOO_LARGE 100 -#endif /* OPENSSL_HEADER_HKDF_H */ +#endif // OPENSSL_HEADER_HKDF_H diff --git a/Sources/BoringSSL/include/openssl/hmac.h b/Sources/BoringSSL/include/openssl/hmac.h index e4cc04e6e..8491b8d05 100644 --- a/Sources/BoringSSL/include/openssl/hmac.h +++ b/Sources/BoringSSL/include/openssl/hmac.h @@ -66,84 +66,94 @@ extern "C" { #endif -/* HMAC contains functions for constructing PRFs from Merkle–Damgård hash - * functions using HMAC. */ +// HMAC contains functions for constructing PRFs from Merkle–Damgård hash +// functions using HMAC. -/* One-shot operation. */ +// One-shot operation. -/* HMAC calculates the HMAC of |data_len| bytes of |data|, using the given key - * and hash function, and writes the result to |out|. On entry, |out| must - * contain at least |EVP_MD_size| bytes of space. The actual length of the - * result is written to |*out_len|. An output size of |EVP_MAX_MD_SIZE| will - * always be large enough. It returns |out| or NULL on error. */ +// HMAC calculates the HMAC of |data_len| bytes of |data|, using the given key +// and hash function, and writes the result to |out|. On entry, |out| must +// contain at least |EVP_MD_size| bytes of space. The actual length of the +// result is written to |*out_len|. An output size of |EVP_MAX_MD_SIZE| will +// always be large enough. It returns |out| or NULL on error. OPENSSL_EXPORT uint8_t *HMAC(const EVP_MD *evp_md, const void *key, size_t key_len, const uint8_t *data, size_t data_len, uint8_t *out, unsigned int *out_len); -/* Incremental operation. */ +// Incremental operation. -/* HMAC_CTX_init initialises |ctx| for use in an HMAC operation. It's assumed - * that HMAC_CTX objects will be allocated on the stack thus no allocation - * function is provided. If needed, allocate |sizeof(HMAC_CTX)| and call - * |HMAC_CTX_init| on it. */ +// HMAC_CTX_init initialises |ctx| for use in an HMAC operation. It's assumed +// that HMAC_CTX objects will be allocated on the stack thus no allocation +// function is provided. OPENSSL_EXPORT void HMAC_CTX_init(HMAC_CTX *ctx); -/* HMAC_CTX_cleanup frees data owned by |ctx|. */ +// HMAC_CTX_new allocates and initialises a new |HMAC_CTX| and returns it, or +// NULL on allocation failure. The caller must use |HMAC_CTX_free| to release +// the resulting object. +OPENSSL_EXPORT HMAC_CTX *HMAC_CTX_new(void); + +// HMAC_CTX_cleanup frees data owned by |ctx|. It does not free |ctx| itself. OPENSSL_EXPORT void HMAC_CTX_cleanup(HMAC_CTX *ctx); -/* HMAC_Init_ex sets up an initialised |HMAC_CTX| to use |md| as the hash - * function and |key| as the key. For a non-initial call, |md| may be NULL, in - * which case the previous hash function will be used. If the hash function has - * not changed and |key| is NULL, |ctx| reuses the previous key. It returns one - * on success or zero otherwise. - * - * WARNING: NULL and empty keys are ambiguous on non-initial calls. Passing NULL - * |key| but repeating the previous |md| reuses the previous key rather than the - * empty key. */ +// HMAC_CTX_free calls |HMAC_CTX_cleanup| and then frees |ctx| itself. +OPENSSL_EXPORT void HMAC_CTX_free(HMAC_CTX *ctx); + +// HMAC_Init_ex sets up an initialised |HMAC_CTX| to use |md| as the hash +// function and |key| as the key. For a non-initial call, |md| may be NULL, in +// which case the previous hash function will be used. If the hash function has +// not changed and |key| is NULL, |ctx| reuses the previous key. It returns one +// on success or zero otherwise. +// +// WARNING: NULL and empty keys are ambiguous on non-initial calls. Passing NULL +// |key| but repeating the previous |md| reuses the previous key rather than the +// empty key. OPENSSL_EXPORT int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len, const EVP_MD *md, ENGINE *impl); -/* HMAC_Update hashes |data_len| bytes from |data| into the current HMAC - * operation in |ctx|. It returns one. */ +// HMAC_Update hashes |data_len| bytes from |data| into the current HMAC +// operation in |ctx|. It returns one. OPENSSL_EXPORT int HMAC_Update(HMAC_CTX *ctx, const uint8_t *data, size_t data_len); -/* HMAC_Final completes the HMAC operation in |ctx| and writes the result to - * |out| and the sets |*out_len| to the length of the result. On entry, |out| - * must contain at least |HMAC_size| bytes of space. An output size of - * |EVP_MAX_MD_SIZE| will always be large enough. It returns one on success or - * zero on error. */ +// HMAC_Final completes the HMAC operation in |ctx| and writes the result to +// |out| and the sets |*out_len| to the length of the result. On entry, |out| +// must contain at least |HMAC_size| bytes of space. An output size of +// |EVP_MAX_MD_SIZE| will always be large enough. It returns one on success or +// zero on error. OPENSSL_EXPORT int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len); -/* Utility functions. */ +// Utility functions. -/* HMAC_size returns the size, in bytes, of the HMAC that will be produced by - * |ctx|. On entry, |ctx| must have been setup with |HMAC_Init_ex|. */ +// HMAC_size returns the size, in bytes, of the HMAC that will be produced by +// |ctx|. On entry, |ctx| must have been setup with |HMAC_Init_ex|. OPENSSL_EXPORT size_t HMAC_size(const HMAC_CTX *ctx); -/* HMAC_CTX_copy_ex sets |dest| equal to |src|. On entry, |dest| must have been - * initialised by calling |HMAC_CTX_init|. It returns one on success and zero - * on error. */ +// HMAC_CTX_copy_ex sets |dest| equal to |src|. On entry, |dest| must have been +// initialised by calling |HMAC_CTX_init|. It returns one on success and zero +// on error. OPENSSL_EXPORT int HMAC_CTX_copy_ex(HMAC_CTX *dest, const HMAC_CTX *src); +// HMAC_CTX_reset calls |HMAC_CTX_cleanup| followed by |HMAC_CTX_init|. +OPENSSL_EXPORT void HMAC_CTX_reset(HMAC_CTX *ctx); -/* Deprecated functions. */ + +// Deprecated functions. OPENSSL_EXPORT int HMAC_Init(HMAC_CTX *ctx, const void *key, int key_len, const EVP_MD *md); -/* HMAC_CTX_copy calls |HMAC_CTX_init| on |dest| and then sets it equal to - * |src|. On entry, |dest| must /not/ be initialised for an operation with - * |HMAC_Init_ex|. It returns one on success and zero on error. */ +// HMAC_CTX_copy calls |HMAC_CTX_init| on |dest| and then sets it equal to +// |src|. On entry, |dest| must /not/ be initialised for an operation with +// |HMAC_Init_ex|. It returns one on success and zero on error. OPENSSL_EXPORT int HMAC_CTX_copy(HMAC_CTX *dest, const HMAC_CTX *src); -/* Private functions */ +// Private functions struct hmac_ctx_st { const EVP_MD *md; @@ -154,13 +164,15 @@ struct hmac_ctx_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #if !defined(BORINGSSL_NO_CXX) extern "C++" { namespace bssl { +BORINGSSL_MAKE_DELETER(HMAC_CTX, HMAC_CTX_free) + using ScopedHMAC_CTX = internal::StackAllocated; @@ -171,4 +183,4 @@ using ScopedHMAC_CTX = #endif -#endif /* OPENSSL_HEADER_HMAC_H */ +#endif // OPENSSL_HEADER_HMAC_H diff --git a/Sources/BoringSSL/include/openssl/is_boringssl.h b/Sources/BoringSSL/include/openssl/is_boringssl.h new file mode 100644 index 000000000..302cbe292 --- /dev/null +++ b/Sources/BoringSSL/include/openssl/is_boringssl.h @@ -0,0 +1,16 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +// This header is provided in order to catch include path errors in consuming +// BoringSSL. diff --git a/Sources/BoringSSL/include/openssl/lhash.h b/Sources/BoringSSL/include/openssl/lhash.h index b95d4f21b..1ceeb6998 100644 --- a/Sources/BoringSSL/include/openssl/lhash.h +++ b/Sources/BoringSSL/include/openssl/lhash.h @@ -65,24 +65,24 @@ extern "C" { #endif -/* lhash is a traditional, chaining hash table that automatically expands and - * contracts as needed. One should not use the lh_* functions directly, rather - * use the type-safe macro wrappers: - * - * A hash table of a specific type of object has type |LHASH_OF(type)|. This - * can be defined (once) with |DEFINE_LHASH_OF(type)| and declared where needed - * with |DECLARE_LHASH_OF(type)|. For example: - * - * struct foo { - * int bar; - * }; - * - * DEFINE_LHASH_OF(struct foo); - * - * Although note that the hash table will contain /pointers/ to |foo|. - * - * A macro will be defined for each of the lh_* functions below. For - * LHASH_OF(foo), the macros would be lh_foo_new, lh_foo_num_items etc. */ +// lhash is a traditional, chaining hash table that automatically expands and +// contracts as needed. One should not use the lh_* functions directly, rather +// use the type-safe macro wrappers: +// +// A hash table of a specific type of object has type |LHASH_OF(type)|. This +// can be defined (once) with |DEFINE_LHASH_OF(type)| and declared where needed +// with |DECLARE_LHASH_OF(type)|. For example: +// +// struct foo { +// int bar; +// }; +// +// DEFINE_LHASH_OF(struct foo); +// +// Although note that the hash table will contain /pointers/ to |foo|. +// +// A macro will be defined for each of the lh_* functions below. For +// LHASH_OF(foo), the macros would be lh_foo_new, lh_foo_num_items etc. #define LHASH_OF(type) struct lhash_st_##type @@ -91,101 +91,84 @@ extern "C" { #define DECLARE_LHASH_OF(type) LHASH_OF(type); -/* The make_macros.sh script in this directory parses the following lines and - * generates the lhash_macros.h file that contains macros for the following - * types of stacks: - * - * LHASH_OF:ASN1_OBJECT - * LHASH_OF:CONF_VALUE - * LHASH_OF:CRYPTO_BUFFER - * LHASH_OF:SSL_SESSION */ +// The make_macros.sh script in this directory parses the following lines and +// generates the lhash_macros.h file that contains macros for the following +// types of stacks: +// +// LHASH_OF:ASN1_OBJECT +// LHASH_OF:CONF_VALUE +// LHASH_OF:CRYPTO_BUFFER +// LHASH_OF:SSL_SESSION #define IN_LHASH_H #include #undef IN_LHASH_H -/* lhash_item_st is an element of a hash chain. It points to the opaque data - * for this element and to the next item in the chain. The linked-list is NULL - * terminated. */ +// lhash_item_st is an element of a hash chain. It points to the opaque data +// for this element and to the next item in the chain. The linked-list is NULL +// terminated. typedef struct lhash_item_st { void *data; struct lhash_item_st *next; - /* hash contains the cached, hash value of |data|. */ + // hash contains the cached, hash value of |data|. uint32_t hash; } LHASH_ITEM; -/* lhash_cmp_func is a comparison function that returns a value equal, or not - * equal, to zero depending on whether |*a| is equal, or not equal to |*b|, - * respectively. Note the difference between this and |stack_cmp_func| in that - * this takes pointers to the objects directly. */ +// lhash_cmp_func is a comparison function that returns a value equal, or not +// equal, to zero depending on whether |*a| is equal, or not equal to |*b|, +// respectively. Note the difference between this and |stack_cmp_func| in that +// this takes pointers to the objects directly. typedef int (*lhash_cmp_func)(const void *a, const void *b); -/* lhash_hash_func is a function that maps an object to a uniformly distributed - * uint32_t. */ +// lhash_hash_func is a function that maps an object to a uniformly distributed +// uint32_t. typedef uint32_t (*lhash_hash_func)(const void *a); -typedef struct lhash_st { - /* num_items contains the total number of items in the hash table. */ - size_t num_items; - /* buckets is an array of |num_buckets| pointers. Each points to the head of - * a chain of LHASH_ITEM objects that have the same hash value, mod - * |num_buckets|. */ - LHASH_ITEM **buckets; - /* num_buckets contains the length of |buckets|. This value is always >= - * kMinNumBuckets. */ - size_t num_buckets; - /* callback_depth contains the current depth of |lh_doall| or |lh_doall_arg| - * calls. If non-zero then this suppresses resizing of the |buckets| array, - * which would otherwise disrupt the iteration. */ - unsigned callback_depth; - - lhash_cmp_func comp; - lhash_hash_func hash; -} _LHASH; - -/* lh_new returns a new, empty hash table or NULL on error. */ +typedef struct lhash_st _LHASH; + +// lh_new returns a new, empty hash table or NULL on error. OPENSSL_EXPORT _LHASH *lh_new(lhash_hash_func hash, lhash_cmp_func comp); -/* lh_free frees the hash table itself but none of the elements. See - * |lh_doall|. */ +// lh_free frees the hash table itself but none of the elements. See +// |lh_doall|. OPENSSL_EXPORT void lh_free(_LHASH *lh); -/* lh_num_items returns the number of items in |lh|. */ +// lh_num_items returns the number of items in |lh|. OPENSSL_EXPORT size_t lh_num_items(const _LHASH *lh); -/* lh_retrieve finds an element equal to |data| in the hash table and returns - * it. If no such element exists, it returns NULL. */ +// lh_retrieve finds an element equal to |data| in the hash table and returns +// it. If no such element exists, it returns NULL. OPENSSL_EXPORT void *lh_retrieve(const _LHASH *lh, const void *data); -/* lh_insert inserts |data| into the hash table. If an existing element is - * equal to |data| (with respect to the comparison function) then |*old_data| - * will be set to that value and it will be replaced. Otherwise, or in the - * event of an error, |*old_data| will be set to NULL. It returns one on - * success or zero in the case of an allocation error. */ +// lh_insert inserts |data| into the hash table. If an existing element is +// equal to |data| (with respect to the comparison function) then |*old_data| +// will be set to that value and it will be replaced. Otherwise, or in the +// event of an error, |*old_data| will be set to NULL. It returns one on +// success or zero in the case of an allocation error. OPENSSL_EXPORT int lh_insert(_LHASH *lh, void **old_data, void *data); -/* lh_delete removes an element equal to |data| from the hash table and returns - * it. If no such element is found, it returns NULL. */ +// lh_delete removes an element equal to |data| from the hash table and returns +// it. If no such element is found, it returns NULL. OPENSSL_EXPORT void *lh_delete(_LHASH *lh, const void *data); -/* lh_doall calls |func| on each element of the hash table. - * TODO(fork): rename this */ +// lh_doall calls |func| on each element of the hash table. +// TODO(fork): rename this OPENSSL_EXPORT void lh_doall(_LHASH *lh, void (*func)(void *)); -/* lh_doall_arg calls |func| on each element of the hash table and also passes - * |arg| as the second argument. - * TODO(fork): rename this */ +// lh_doall_arg calls |func| on each element of the hash table and also passes +// |arg| as the second argument. +// TODO(fork): rename this OPENSSL_EXPORT void lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg); -/* lh_strhash is the default hash function which processes NUL-terminated - * strings. */ +// lh_strhash is the default hash function which processes NUL-terminated +// strings. OPENSSL_EXPORT uint32_t lh_strhash(const char *c); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_LHASH_H */ +#endif // OPENSSL_HEADER_LHASH_H diff --git a/Sources/BoringSSL/include/openssl/lhash_macros.h b/Sources/BoringSSL/include/openssl/lhash_macros.h index ca349a975..378c8391e 100644 --- a/Sources/BoringSSL/include/openssl/lhash_macros.h +++ b/Sources/BoringSSL/include/openssl/lhash_macros.h @@ -16,7 +16,7 @@ #error "Don't include this file directly. Include lhash.h" #endif -/* ASN1_OBJECT */ +// ASN1_OBJECT #define lh_ASN1_OBJECT_new(hash, comp) \ ((LHASH_OF(ASN1_OBJECT) *)lh_new( \ CHECKED_CAST(lhash_hash_func, uint32_t(*)(const ASN1_OBJECT *), hash), \ @@ -56,7 +56,7 @@ arg); -/* CONF_VALUE */ +// CONF_VALUE #define lh_CONF_VALUE_new(hash, comp) \ ((LHASH_OF(CONF_VALUE) *)lh_new( \ CHECKED_CAST(lhash_hash_func, uint32_t(*)(const CONF_VALUE *), hash), \ @@ -94,7 +94,7 @@ arg); -/* CRYPTO_BUFFER */ +// CRYPTO_BUFFER #define lh_CRYPTO_BUFFER_new(hash, comp) \ ((LHASH_OF(CRYPTO_BUFFER) *)lh_new( \ CHECKED_CAST(lhash_hash_func, uint32_t(*)(const CRYPTO_BUFFER *), hash), \ @@ -134,7 +134,7 @@ arg); -/* SSL_SESSION */ +// SSL_SESSION #define lh_SSL_SESSION_new(hash, comp) \ ((LHASH_OF(SSL_SESSION) *)lh_new( \ CHECKED_CAST(lhash_hash_func, uint32_t(*)(const SSL_SESSION *), hash), \ diff --git a/Sources/BoringSSL/include/openssl/md4.h b/Sources/BoringSSL/include/openssl/md4.h index b66fcb0f3..52b88ca31 100644 --- a/Sources/BoringSSL/include/openssl/md4.h +++ b/Sources/BoringSSL/include/openssl/md4.h @@ -64,31 +64,31 @@ extern "C" { #endif -/* MD4. */ +// MD4. -/* MD4_CBLOCK is the block size of MD4. */ +// MD4_CBLOCK is the block size of MD4. #define MD4_CBLOCK 64 -/* MD4_DIGEST_LENGTH is the length of an MD4 digest. */ +// MD4_DIGEST_LENGTH is the length of an MD4 digest. #define MD4_DIGEST_LENGTH 16 -/* MD4_Init initialises |md4| and returns one. */ +// MD4_Init initialises |md4| and returns one. OPENSSL_EXPORT int MD4_Init(MD4_CTX *md4); -/* MD4_Update adds |len| bytes from |data| to |md4| and returns one. */ +// MD4_Update adds |len| bytes from |data| to |md4| and returns one. OPENSSL_EXPORT int MD4_Update(MD4_CTX *md4, const void *data, size_t len); -/* MD4_Final adds the final padding to |md4| and writes the resulting digest to - * |md|, which must have at least |MD4_DIGEST_LENGTH| bytes of space. It - * returns one. */ +// MD4_Final adds the final padding to |md4| and writes the resulting digest to +// |md|, which must have at least |MD4_DIGEST_LENGTH| bytes of space. It +// returns one. OPENSSL_EXPORT int MD4_Final(uint8_t *md, MD4_CTX *md4); -/* MD4 writes the digest of |len| bytes from |data| to |out| and returns |out|. - * There must be at least |MD4_DIGEST_LENGTH| bytes of space in |out|. */ +// MD4 writes the digest of |len| bytes from |data| to |out| and returns |out|. +// There must be at least |MD4_DIGEST_LENGTH| bytes of space in |out|. OPENSSL_EXPORT uint8_t *MD4(const uint8_t *data, size_t len, uint8_t *out); -/* MD4_Transform is a low-level function that performs a single, MD4 block - * transformation using the state from |md4| and 64 bytes from |block|. */ +// MD4_Transform is a low-level function that performs a single, MD4 block +// transformation using the state from |md4| and 64 bytes from |block|. OPENSSL_EXPORT void MD4_Transform(MD4_CTX *md4, const uint8_t *block); struct md4_state_st { @@ -100,7 +100,7 @@ struct md4_state_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_MD4_H */ +#endif // OPENSSL_HEADER_MD4_H diff --git a/Sources/BoringSSL/include/openssl/md5.h b/Sources/BoringSSL/include/openssl/md5.h index 55162f023..de6027f5c 100644 --- a/Sources/BoringSSL/include/openssl/md5.h +++ b/Sources/BoringSSL/include/openssl/md5.h @@ -64,32 +64,32 @@ extern "C" { #endif -/* MD5. */ +// MD5. -/* MD5_CBLOCK is the block size of MD5. */ +// MD5_CBLOCK is the block size of MD5. #define MD5_CBLOCK 64 -/* MD5_DIGEST_LENGTH is the length of an MD5 digest. */ +// MD5_DIGEST_LENGTH is the length of an MD5 digest. #define MD5_DIGEST_LENGTH 16 -/* MD5_Init initialises |md5| and returns one. */ +// MD5_Init initialises |md5| and returns one. OPENSSL_EXPORT int MD5_Init(MD5_CTX *md5); -/* MD5_Update adds |len| bytes from |data| to |md5| and returns one. */ +// MD5_Update adds |len| bytes from |data| to |md5| and returns one. OPENSSL_EXPORT int MD5_Update(MD5_CTX *md5, const void *data, size_t len); -/* MD5_Final adds the final padding to |md5| and writes the resulting digest to - * |md|, which must have at least |MD5_DIGEST_LENGTH| bytes of space. It - * returns one. */ +// MD5_Final adds the final padding to |md5| and writes the resulting digest to +// |md|, which must have at least |MD5_DIGEST_LENGTH| bytes of space. It +// returns one. OPENSSL_EXPORT int MD5_Final(uint8_t *md, MD5_CTX *md5); -/* MD5 writes the digest of |len| bytes from |data| to |out| and returns |out|. - * There must be at least |MD5_DIGEST_LENGTH| bytes of space in |out|. */ +// MD5 writes the digest of |len| bytes from |data| to |out| and returns |out|. +// There must be at least |MD5_DIGEST_LENGTH| bytes of space in |out|. OPENSSL_EXPORT uint8_t *MD5(const uint8_t *data, size_t len, uint8_t *out); -/* MD5_Transform is a low-level function that performs a single, MD5 block - * transformation using the state from |md5| and 64 bytes from |block|. */ +// MD5_Transform is a low-level function that performs a single, MD5 block +// transformation using the state from |md5| and 64 bytes from |block|. OPENSSL_EXPORT void MD5_Transform(MD5_CTX *md5, const uint8_t *block); struct md5_state_st { @@ -101,7 +101,7 @@ struct md5_state_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_MD5_H */ +#endif // OPENSSL_HEADER_MD5_H diff --git a/Sources/BoringSSL/include/openssl/mem.h b/Sources/BoringSSL/include/openssl/mem.h index 5d96a2d72..7d7087e60 100644 --- a/Sources/BoringSSL/include/openssl/mem.h +++ b/Sources/BoringSSL/include/openssl/mem.h @@ -67,64 +67,70 @@ extern "C" { #endif -/* Memory and string functions, see also buf.h. - * - * OpenSSL has, historically, had a complex set of malloc debugging options. - * However, that was written in a time before Valgrind and ASAN. Since we now - * have those tools, the OpenSSL allocation functions are simply macros around - * the standard memory functions. */ +// Memory and string functions, see also buf.h. +// +// BoringSSL has its own set of allocation functions, which keep track of +// allocation lengths and zero them out before freeing. All memory returned by +// BoringSSL API calls must therefore generally be freed using |OPENSSL_free| +// unless stated otherwise. + +// OPENSSL_malloc acts like a regular |malloc|. +OPENSSL_EXPORT void *OPENSSL_malloc(size_t size); -#define OPENSSL_malloc malloc -#define OPENSSL_realloc realloc -#define OPENSSL_free free +// OPENSSL_free does nothing if |ptr| is NULL. Otherwise it zeros out the +// memory allocated at |ptr| and frees it. +OPENSSL_EXPORT void OPENSSL_free(void *ptr); -/* OPENSSL_realloc_clean acts like |realloc|, but clears the previous memory - * buffer. Because this is implemented as a wrapper around |malloc|, it needs - * to be given the size of the buffer pointed to by |ptr|. */ -void *OPENSSL_realloc_clean(void *ptr, size_t old_size, size_t new_size); +// OPENSSL_realloc returns a pointer to a buffer of |new_size| bytes that +// contains the contents of |ptr|. Unlike |realloc|, a new buffer is always +// allocated and the data at |ptr| is always wiped and freed. +OPENSSL_EXPORT void *OPENSSL_realloc(void *ptr, size_t new_size); -/* OPENSSL_cleanse zeros out |len| bytes of memory at |ptr|. This is similar to - * |memset_s| from C11. */ +// OPENSSL_cleanse zeros out |len| bytes of memory at |ptr|. This is similar to +// |memset_s| from C11. OPENSSL_EXPORT void OPENSSL_cleanse(void *ptr, size_t len); -/* CRYPTO_memcmp returns zero iff the |len| bytes at |a| and |b| are equal. It - * takes an amount of time dependent on |len|, but independent of the contents - * of |a| and |b|. Unlike memcmp, it cannot be used to put elements into a - * defined order as the return value when a != b is undefined, other than to be - * non-zero. */ +// CRYPTO_memcmp returns zero iff the |len| bytes at |a| and |b| are equal. It +// takes an amount of time dependent on |len|, but independent of the contents +// of |a| and |b|. Unlike memcmp, it cannot be used to put elements into a +// defined order as the return value when a != b is undefined, other than to be +// non-zero. OPENSSL_EXPORT int CRYPTO_memcmp(const void *a, const void *b, size_t len); -/* OPENSSL_hash32 implements the 32 bit, FNV-1a hash. */ +// OPENSSL_hash32 implements the 32 bit, FNV-1a hash. OPENSSL_EXPORT uint32_t OPENSSL_hash32(const void *ptr, size_t len); -/* OPENSSL_strdup has the same behaviour as strdup(3). */ +// OPENSSL_strdup has the same behaviour as strdup(3). OPENSSL_EXPORT char *OPENSSL_strdup(const char *s); -/* OPENSSL_strnlen has the same behaviour as strnlen(3). */ +// OPENSSL_strnlen has the same behaviour as strnlen(3). OPENSSL_EXPORT size_t OPENSSL_strnlen(const char *s, size_t len); -/* OPENSSL_strcasecmp has the same behaviour as strcasecmp(3). */ +// OPENSSL_tolower is a locale-independent version of tolower(3). +OPENSSL_EXPORT int OPENSSL_tolower(int c); + +// OPENSSL_strcasecmp is a locale-independent version of strcasecmp(3). OPENSSL_EXPORT int OPENSSL_strcasecmp(const char *a, const char *b); -/* OPENSSL_strncasecmp has the same behaviour as strncasecmp(3). */ +// OPENSSL_strncasecmp is a locale-independent version of strncasecmp(3). OPENSSL_EXPORT int OPENSSL_strncasecmp(const char *a, const char *b, size_t n); -/* DECIMAL_SIZE returns an upper bound for the length of the decimal - * representation of the given type. */ +// DECIMAL_SIZE returns an upper bound for the length of the decimal +// representation of the given type. #define DECIMAL_SIZE(type) ((sizeof(type)*8+2)/3+1) -/* BIO_snprintf has the same behavior as snprintf(3). */ +// BIO_snprintf has the same behavior as snprintf(3). OPENSSL_EXPORT int BIO_snprintf(char *buf, size_t n, const char *format, ...) OPENSSL_PRINTF_FORMAT_FUNC(3, 4); -/* BIO_vsnprintf has the same behavior as vsnprintf(3). */ +// BIO_vsnprintf has the same behavior as vsnprintf(3). OPENSSL_EXPORT int BIO_vsnprintf(char *buf, size_t n, const char *format, va_list args) OPENSSL_PRINTF_FORMAT_FUNC(3, 0); -/* Deprecated functions. */ +// Deprecated functions. #define CRYPTO_malloc OPENSSL_malloc #define CRYPTO_realloc OPENSSL_realloc @@ -132,7 +138,7 @@ OPENSSL_EXPORT int BIO_vsnprintf(char *buf, size_t n, const char *format, #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -143,8 +149,8 @@ BORINGSSL_MAKE_DELETER(uint8_t, OPENSSL_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif -#endif /* OPENSSL_HEADER_MEM_H */ +#endif // OPENSSL_HEADER_MEM_H diff --git a/Sources/BoringSSL/include/openssl/nid.h b/Sources/BoringSSL/include/openssl/nid.h index 4270dc1b4..afeb2dea4 100644 --- a/Sources/BoringSSL/include/openssl/nid.h +++ b/Sources/BoringSSL/include/openssl/nid.h @@ -65,6 +65,7 @@ extern "C" { #endif + /* The nid library provides numbered values for ASN.1 object identifiers and * other symbols. These values are used by other libraries to identify * cryptographic primitives. @@ -78,6 +79,7 @@ extern "C" { * These values should not be used outside of a single process; they are not * stable identifiers. */ + #define SN_undef "UNDEF" #define LN_undef "undefined" #define NID_undef 0 @@ -4192,6 +4194,47 @@ extern "C" { #define SN_X25519 "X25519" #define NID_X25519 948 +#define SN_ED25519 "ED25519" +#define NID_ED25519 949 +#define OBJ_ED25519 1L, 3L, 101L, 112L + +#define SN_chacha20_poly1305 "ChaCha20-Poly1305" +#define LN_chacha20_poly1305 "chacha20-poly1305" +#define NID_chacha20_poly1305 950 + +#define SN_kx_rsa "KxRSA" +#define LN_kx_rsa "kx-rsa" +#define NID_kx_rsa 951 + +#define SN_kx_ecdhe "KxECDHE" +#define LN_kx_ecdhe "kx-ecdhe" +#define NID_kx_ecdhe 952 + +#define SN_kx_psk "KxPSK" +#define LN_kx_psk "kx-psk" +#define NID_kx_psk 953 + +#define SN_auth_rsa "AuthRSA" +#define LN_auth_rsa "auth-rsa" +#define NID_auth_rsa 954 + +#define SN_auth_ecdsa "AuthECDSA" +#define LN_auth_ecdsa "auth-ecdsa" +#define NID_auth_ecdsa 955 + +#define SN_auth_psk "AuthPSK" +#define LN_auth_psk "auth-psk" +#define NID_auth_psk 956 + +#define SN_kx_any "KxANY" +#define LN_kx_any "kx-any" +#define NID_kx_any 957 + +#define SN_auth_any "AuthANY" +#define LN_auth_any "auth-any" +#define NID_auth_any 958 + + #if defined(__cplusplus) } /* extern C */ #endif diff --git a/Sources/BoringSSL/include/openssl/obj.h b/Sources/BoringSSL/include/openssl/obj.h index 63cf86625..374658ea3 100644 --- a/Sources/BoringSSL/include/openssl/obj.h +++ b/Sources/BoringSSL/include/openssl/obj.h @@ -67,129 +67,135 @@ extern "C" { #endif -/* The objects library deals with the registration and indexing of ASN.1 object - * identifiers. These values are often written as a dotted sequence of numbers, - * e.g. 1.2.840.113549.1.9.16.3.9. - * - * Internally, OpenSSL likes to deal with these values by numbering them with - * numbers called "nids". OpenSSL has a large, built-in database of common - * object identifiers and also has both short and long names for them. - * - * This library provides functions for translating between object identifiers, - * nids, short names and long names. - * - * The nid values should not be used outside of a single process: they are not - * stable identifiers. */ - - -/* Basic operations. */ - -/* OBJ_dup returns a duplicate copy of |obj| or NULL on allocation failure. */ +// The objects library deals with the registration and indexing of ASN.1 object +// identifiers. These values are often written as a dotted sequence of numbers, +// e.g. 1.2.840.113549.1.9.16.3.9. +// +// Internally, OpenSSL likes to deal with these values by numbering them with +// numbers called "nids". OpenSSL has a large, built-in database of common +// object identifiers and also has both short and long names for them. +// +// This library provides functions for translating between object identifiers, +// nids, short names and long names. +// +// The nid values should not be used outside of a single process: they are not +// stable identifiers. + + +// Basic operations. + +// OBJ_dup returns a duplicate copy of |obj| or NULL on allocation failure. OPENSSL_EXPORT ASN1_OBJECT *OBJ_dup(const ASN1_OBJECT *obj); -/* OBJ_cmp returns a value less than, equal to or greater than zero if |a| is - * less than, equal to or greater than |b|, respectively. */ +// OBJ_cmp returns a value less than, equal to or greater than zero if |a| is +// less than, equal to or greater than |b|, respectively. OPENSSL_EXPORT int OBJ_cmp(const ASN1_OBJECT *a, const ASN1_OBJECT *b); +// OBJ_get0_data returns a pointer to the DER representation of |obj|. +OPENSSL_EXPORT const uint8_t *OBJ_get0_data(const ASN1_OBJECT *obj); -/* Looking up nids. */ +// OBJ_length returns the length of the DER representation of |obj|. +OPENSSL_EXPORT size_t OBJ_length(const ASN1_OBJECT *obj); -/* OBJ_obj2nid returns the nid corresponding to |obj|, or |NID_undef| if no - * such object is known. */ + +// Looking up nids. + +// OBJ_obj2nid returns the nid corresponding to |obj|, or |NID_undef| if no +// such object is known. OPENSSL_EXPORT int OBJ_obj2nid(const ASN1_OBJECT *obj); -/* OBJ_cbs2nid returns the nid corresponding to the DER data in |cbs|, or - * |NID_undef| if no such object is known. */ +// OBJ_cbs2nid returns the nid corresponding to the DER data in |cbs|, or +// |NID_undef| if no such object is known. OPENSSL_EXPORT int OBJ_cbs2nid(const CBS *cbs); -/* OBJ_sn2nid returns the nid corresponding to |short_name|, or |NID_undef| if - * no such short name is known. */ +// OBJ_sn2nid returns the nid corresponding to |short_name|, or |NID_undef| if +// no such short name is known. OPENSSL_EXPORT int OBJ_sn2nid(const char *short_name); -/* OBJ_ln2nid returns the nid corresponding to |long_name|, or |NID_undef| if - * no such long name is known. */ +// OBJ_ln2nid returns the nid corresponding to |long_name|, or |NID_undef| if +// no such long name is known. OPENSSL_EXPORT int OBJ_ln2nid(const char *long_name); -/* OBJ_txt2nid returns the nid corresponding to |s|, which may be a short name, - * long name, or an ASCII string containing a dotted sequence of numbers. It - * returns the nid or NID_undef if unknown. */ +// OBJ_txt2nid returns the nid corresponding to |s|, which may be a short name, +// long name, or an ASCII string containing a dotted sequence of numbers. It +// returns the nid or NID_undef if unknown. OPENSSL_EXPORT int OBJ_txt2nid(const char *s); -/* Getting information about nids. */ +// Getting information about nids. -/* OBJ_nid2obj returns the ASN1_OBJECT corresponding to |nid|, or NULL if |nid| - * is unknown. */ +// OBJ_nid2obj returns the ASN1_OBJECT corresponding to |nid|, or NULL if |nid| +// is unknown. OPENSSL_EXPORT const ASN1_OBJECT *OBJ_nid2obj(int nid); -/* OBJ_nid2sn returns the short name for |nid|, or NULL if |nid| is unknown. */ +// OBJ_nid2sn returns the short name for |nid|, or NULL if |nid| is unknown. OPENSSL_EXPORT const char *OBJ_nid2sn(int nid); -/* OBJ_nid2ln returns the long name for |nid|, or NULL if |nid| is unknown. */ +// OBJ_nid2ln returns the long name for |nid|, or NULL if |nid| is unknown. OPENSSL_EXPORT const char *OBJ_nid2ln(int nid); -/* OBJ_nid2cbb writes |nid| as an ASN.1 OBJECT IDENTIFIER to |out|. It returns - * one on success or zero otherwise. */ +// OBJ_nid2cbb writes |nid| as an ASN.1 OBJECT IDENTIFIER to |out|. It returns +// one on success or zero otherwise. OPENSSL_EXPORT int OBJ_nid2cbb(CBB *out, int nid); -/* Dealing with textual representations of object identifiers. */ +// Dealing with textual representations of object identifiers. -/* OBJ_txt2obj returns an ASN1_OBJECT for the textual representation in |s|. - * If |dont_search_names| is zero, then |s| will be matched against the long - * and short names of a known objects to find a match. Otherwise |s| must - * contain an ASCII string with a dotted sequence of numbers. The resulting - * object need not be previously known. It returns a freshly allocated - * |ASN1_OBJECT| or NULL on error. */ +// OBJ_txt2obj returns an ASN1_OBJECT for the textual representation in |s|. +// If |dont_search_names| is zero, then |s| will be matched against the long +// and short names of a known objects to find a match. Otherwise |s| must +// contain an ASCII string with a dotted sequence of numbers. The resulting +// object need not be previously known. It returns a freshly allocated +// |ASN1_OBJECT| or NULL on error. OPENSSL_EXPORT ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names); -/* OBJ_obj2txt converts |obj| to a textual representation. If - * |always_return_oid| is zero then |obj| will be matched against known objects - * and the long (preferably) or short name will be used if found. Otherwise - * |obj| will be converted into a dotted sequence of integers. If |out| is not - * NULL, then at most |out_len| bytes of the textual form will be written - * there. If |out_len| is at least one, then string written to |out| will - * always be NUL terminated. It returns the number of characters that could - * have been written, not including the final NUL, or -1 on error. */ +// OBJ_obj2txt converts |obj| to a textual representation. If +// |always_return_oid| is zero then |obj| will be matched against known objects +// and the long (preferably) or short name will be used if found. Otherwise +// |obj| will be converted into a dotted sequence of integers. If |out| is not +// NULL, then at most |out_len| bytes of the textual form will be written +// there. If |out_len| is at least one, then string written to |out| will +// always be NUL terminated. It returns the number of characters that could +// have been written, not including the final NUL, or -1 on error. OPENSSL_EXPORT int OBJ_obj2txt(char *out, int out_len, const ASN1_OBJECT *obj, int always_return_oid); -/* Adding objects at runtime. */ +// Adding objects at runtime. -/* OBJ_create adds a known object and returns the nid of the new object, or - * NID_undef on error. */ +// OBJ_create adds a known object and returns the nid of the new object, or +// NID_undef on error. OPENSSL_EXPORT int OBJ_create(const char *oid, const char *short_name, const char *long_name); -/* Handling signature algorithm identifiers. - * - * Some NIDs (e.g. sha256WithRSAEncryption) specify both a digest algorithm and - * a public key algorithm. The following functions map between pairs of digest - * and public-key algorithms and the NIDs that specify their combination. - * - * Sometimes the combination NID leaves the digest unspecified (e.g. - * rsassaPss). In these cases, the digest NID is |NID_undef|. */ - -/* OBJ_find_sigid_algs finds the digest and public-key NIDs that correspond to - * the signing algorithm |sign_nid|. If successful, it sets |*out_digest_nid| - * and |*out_pkey_nid| and returns one. Otherwise it returns zero. Any of - * |out_digest_nid| or |out_pkey_nid| can be NULL if the caller doesn't need - * that output value. */ +// Handling signature algorithm identifiers. +// +// Some NIDs (e.g. sha256WithRSAEncryption) specify both a digest algorithm and +// a public key algorithm. The following functions map between pairs of digest +// and public-key algorithms and the NIDs that specify their combination. +// +// Sometimes the combination NID leaves the digest unspecified (e.g. +// rsassaPss). In these cases, the digest NID is |NID_undef|. + +// OBJ_find_sigid_algs finds the digest and public-key NIDs that correspond to +// the signing algorithm |sign_nid|. If successful, it sets |*out_digest_nid| +// and |*out_pkey_nid| and returns one. Otherwise it returns zero. Any of +// |out_digest_nid| or |out_pkey_nid| can be NULL if the caller doesn't need +// that output value. OPENSSL_EXPORT int OBJ_find_sigid_algs(int sign_nid, int *out_digest_nid, int *out_pkey_nid); -/* OBJ_find_sigid_by_algs finds the signature NID that corresponds to the - * combination of |digest_nid| and |pkey_nid|. If success, it sets - * |*out_sign_nid| and returns one. Otherwise it returns zero. The - * |out_sign_nid| argument can be NULL if the caller only wishes to learn - * whether the combination is valid. */ +// OBJ_find_sigid_by_algs finds the signature NID that corresponds to the +// combination of |digest_nid| and |pkey_nid|. If success, it sets +// |*out_sign_nid| and returns one. Otherwise it returns zero. The +// |out_sign_nid| argument can be NULL if the caller only wishes to learn +// whether the combination is valid. OPENSSL_EXPORT int OBJ_find_sigid_by_algs(int *out_sign_nid, int digest_nid, int pkey_nid); -/* Deprecated functions. */ +// Deprecated functions. typedef struct obj_name_st { int type; @@ -201,26 +207,27 @@ typedef struct obj_name_st { #define OBJ_NAME_TYPE_MD_METH 1 #define OBJ_NAME_TYPE_CIPHER_METH 2 -/* OBJ_NAME_do_all_sorted calls |callback| zero or more times, each time with - * the name of a different primitive. If |type| is |OBJ_NAME_TYPE_MD_METH| then - * the primitives will be hash functions, alternatively if |type| is - * |OBJ_NAME_TYPE_CIPHER_METH| then the primitives will be ciphers or cipher - * modes. - * - * This function is ill-specified and should never be used. */ +// OBJ_NAME_do_all_sorted calls |callback| zero or more times, each time with +// the name of a different primitive. If |type| is |OBJ_NAME_TYPE_MD_METH| then +// the primitives will be hash functions, alternatively if |type| is +// |OBJ_NAME_TYPE_CIPHER_METH| then the primitives will be ciphers or cipher +// modes. +// +// This function is ill-specified and should never be used. OPENSSL_EXPORT void OBJ_NAME_do_all_sorted( int type, void (*callback)(const OBJ_NAME *, void *arg), void *arg); -/* OBJ_NAME_do_all calls |OBJ_NAME_do_all_sorted|. */ +// OBJ_NAME_do_all calls |OBJ_NAME_do_all_sorted|. OPENSSL_EXPORT void OBJ_NAME_do_all(int type, void (*callback)(const OBJ_NAME *, void *arg), void *arg); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif #define OBJ_R_UNKNOWN_NID 100 +#define OBJ_R_INVALID_OID_STRING 101 -#endif /* OPENSSL_HEADER_OBJ_H */ +#endif // OPENSSL_HEADER_OBJ_H diff --git a/Sources/BoringSSL/include/openssl/opensslconf.h b/Sources/BoringSSL/include/openssl/opensslconf.h index bf65fc3b2..3c6ffd8be 100644 --- a/Sources/BoringSSL/include/openssl/opensslconf.h +++ b/Sources/BoringSSL/include/openssl/opensslconf.h @@ -19,18 +19,23 @@ #define OPENSSL_HEADER_OPENSSLCONF_H +#define OPENSSL_NO_ASYNC #define OPENSSL_NO_BF +#define OPENSSL_NO_BLAKE2 #define OPENSSL_NO_BUF_FREELISTS #define OPENSSL_NO_CAMELLIA #define OPENSSL_NO_CAPIENG #define OPENSSL_NO_CAST #define OPENSSL_NO_CMS #define OPENSSL_NO_COMP +#define OPENSSL_NO_CT #define OPENSSL_NO_DANE #define OPENSSL_NO_DEPRECATED +#define OPENSSL_NO_DGRAM #define OPENSSL_NO_DYNAMIC_ENGINE #define OPENSSL_NO_EC_NISTP_64_GCC_128 #define OPENSSL_NO_EC2M +#define OPENSSL_NO_EGD #define OPENSSL_NO_ENGINE #define OPENSSL_NO_GMP #define OPENSSL_NO_GOST @@ -52,9 +57,11 @@ #define OPENSSL_NO_SEED #define OPENSSL_NO_SRP #define OPENSSL_NO_SSL2 +#define OPENSSL_NO_SSL3 +#define OPENSSL_NO_SSL3_METHOD #define OPENSSL_NO_STATIC_ENGINE #define OPENSSL_NO_STORE #define OPENSSL_NO_WHIRLPOOL -#endif /* OPENSSL_HEADER_OPENSSLCONF_H */ +#endif // OPENSSL_HEADER_OPENSSLCONF_H diff --git a/Sources/BoringSSL/include/openssl/pem.h b/Sources/BoringSSL/include/openssl/pem.h index 58aecaf50..4868e12fe 100644 --- a/Sources/BoringSSL/include/openssl/pem.h +++ b/Sources/BoringSSL/include/openssl/pem.h @@ -76,41 +76,6 @@ extern "C" { #define PEM_BUFSIZE 1024 -#define PEM_OBJ_UNDEF 0 -#define PEM_OBJ_X509 1 -#define PEM_OBJ_X509_REQ 2 -#define PEM_OBJ_CRL 3 -#define PEM_OBJ_SSL_SESSION 4 -#define PEM_OBJ_PRIV_KEY 10 -#define PEM_OBJ_PRIV_RSA 11 -#define PEM_OBJ_PRIV_DSA 12 -#define PEM_OBJ_PRIV_DH 13 -#define PEM_OBJ_PUB_RSA 14 -#define PEM_OBJ_PUB_DSA 15 -#define PEM_OBJ_PUB_DH 16 -#define PEM_OBJ_DHPARAMS 17 -#define PEM_OBJ_DSAPARAMS 18 -#define PEM_OBJ_PRIV_RSA_PUBLIC 19 -#define PEM_OBJ_PRIV_ECDSA 20 -#define PEM_OBJ_PUB_ECDSA 21 -#define PEM_OBJ_ECPARAMETERS 22 - -#define PEM_ERROR 30 -#define PEM_DEK_DES_CBC 40 -#define PEM_DEK_IDEA_CBC 45 -#define PEM_DEK_DES_EDE 50 -#define PEM_DEK_DES_ECB 60 -#define PEM_DEK_RSA 70 -#define PEM_DEK_RSA_MD2 80 -#define PEM_DEK_RSA_MD5 90 - -#define PEM_MD_MD2 NID_md2 -#define PEM_MD_MD5 NID_md5 -#define PEM_MD_SHA NID_sha -#define PEM_MD_MD2_RSA NID_md2WithRSAEncryption -#define PEM_MD_MD5_RSA NID_md5WithRSAEncryption -#define PEM_MD_SHA_RSA NID_sha1WithRSAEncryption - #define PEM_STRING_X509_OLD "X509 CERTIFICATE" #define PEM_STRING_X509 "CERTIFICATE" #define PEM_STRING_X509_PAIR "CERTIFICATE PAIR" @@ -136,71 +101,12 @@ extern "C" { #define PEM_STRING_ECPRIVATEKEY "EC PRIVATE KEY" #define PEM_STRING_CMS "CMS" - /* Note that this structure is initialised by PEM_SealInit and cleaned up - by PEM_SealFinal (at least for now) */ -typedef struct PEM_Encode_Seal_st - { - EVP_ENCODE_CTX encode; - EVP_MD_CTX md; - EVP_CIPHER_CTX cipher; - } PEM_ENCODE_SEAL_CTX; - /* enc_type is one off */ #define PEM_TYPE_ENCRYPTED 10 #define PEM_TYPE_MIC_ONLY 20 #define PEM_TYPE_MIC_CLEAR 30 #define PEM_TYPE_CLEAR 40 -typedef struct pem_recip_st - { - char *name; - X509_NAME *dn; - - int cipher; - int key_enc; - /* char iv[8]; unused and wrong size */ - } PEM_USER; - -typedef struct pem_ctx_st - { - int type; /* what type of object */ - - struct { - int version; - int mode; - } proc_type; - - char *domain; - - struct { - int cipher; - /* unused, and wrong size - unsigned char iv[8]; */ - } DEK_info; - - PEM_USER *originator; - - int num_recipient; - PEM_USER **recipient; - - EVP_MD *md; /* signature type */ - - int md_enc; /* is the md encrypted or not? */ - int md_len; /* length of md_data */ - char *md_data; /* message digest, could be pkey encrypted */ - - EVP_CIPHER *dec; /* date encryption cipher */ - int key_len; /* key length */ - unsigned char *key; /* key */ - /* unused, and wrong size - unsigned char iv[8]; */ - - - int data_enc; /* is the data encrypted */ - int data_len; - unsigned char *data; - } PEM_CTX; - /* These macros make the PEM_read/PEM_write functions easier to maintain and * write. Now they are all implemented with either: * IMPLEMENT_PEM_rw(...) or IMPLEMENT_PEM_rw_cb(...) @@ -219,7 +125,7 @@ typedef struct pem_ctx_st #define IMPLEMENT_PEM_read_fp(name, type, str, asn1) \ OPENSSL_EXPORT type *PEM_read_##name(FILE *fp, type **x, pem_password_cb *cb, void *u)\ { \ -return PEM_ASN1_read((d2i_of_void *)d2i_##asn1, str,fp,(void **)x,cb,u); \ +return (type *)PEM_ASN1_read((d2i_of_void *)d2i_##asn1, str,fp,(void **)x,cb,u); \ } #define IMPLEMENT_PEM_write_fp(name, type, str, asn1) \ @@ -255,7 +161,7 @@ OPENSSL_EXPORT int PEM_write_##name(FILE *fp, type *x, const EVP_CIPHER *enc, \ #define IMPLEMENT_PEM_read_bio(name, type, str, asn1) \ OPENSSL_EXPORT type *PEM_read_bio_##name(BIO *bp, type **x, pem_password_cb *cb, void *u)\ { \ -return PEM_ASN1_read_bio((d2i_of_void *)d2i_##asn1, str,bp,(void **)x,cb,u); \ +return (type *)PEM_ASN1_read_bio((d2i_of_void *)d2i_##asn1, str,bp,(void **)x,cb,u); \ } #define IMPLEMENT_PEM_write_bio(name, type, str, asn1) \ @@ -404,14 +310,6 @@ OPENSSL_EXPORT void * PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *f OPENSSL_EXPORT int PEM_ASN1_write(i2d_of_void *i2d,const char *name,FILE *fp, void *x,const EVP_CIPHER *enc,unsigned char *kstr, int klen,pem_password_cb *callback, void *u); OPENSSL_EXPORT STACK_OF(X509_INFO) * PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk, pem_password_cb *cb, void *u); -OPENSSL_EXPORT int PEM_SealInit(PEM_ENCODE_SEAL_CTX *ctx, EVP_CIPHER *type, EVP_MD *md_type, unsigned char **ek, int *ekl, unsigned char *iv, EVP_PKEY **pubk, int npubk); -OPENSSL_EXPORT void PEM_SealUpdate(PEM_ENCODE_SEAL_CTX *ctx, unsigned char *out, int *outl, unsigned char *in, int inl); -OPENSSL_EXPORT int PEM_SealFinal(PEM_ENCODE_SEAL_CTX *ctx, unsigned char *sig,int *sigl, unsigned char *out, int *outl, EVP_PKEY *priv); - -OPENSSL_EXPORT void PEM_SignInit(EVP_MD_CTX *ctx, EVP_MD *type); -OPENSSL_EXPORT void PEM_SignUpdate(EVP_MD_CTX *ctx,unsigned char *d,unsigned int cnt); -OPENSSL_EXPORT int PEM_SignFinal(EVP_MD_CTX *ctx, unsigned char *sigret, unsigned int *siglen, EVP_PKEY *pkey); - /* PEM_def_callback treats |userdata| as a string and copies it into |buf|, * assuming its |size| is sufficient. Returns the length of the string, or 0 * if there is not enough room. If either |buf| or |userdata| is NULL, 0 is @@ -426,17 +324,11 @@ DECLARE_PEM_rw(X509, X509) DECLARE_PEM_rw(X509_AUX, X509) -DECLARE_PEM_rw(X509_CERT_PAIR, X509_CERT_PAIR) - DECLARE_PEM_rw(X509_REQ, X509_REQ) DECLARE_PEM_write(X509_REQ_NEW, X509_REQ) DECLARE_PEM_rw(X509_CRL, X509_CRL) -/* DECLARE_PEM_rw(PKCS7, PKCS7) */ - -DECLARE_PEM_rw(NETSCAPE_CERT_SEQUENCE, NETSCAPE_CERT_SEQUENCE) - DECLARE_PEM_rw(PKCS8, X509_SIG) DECLARE_PEM_rw(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO) @@ -481,18 +373,6 @@ OPENSSL_EXPORT EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_pass OPENSSL_EXPORT int PEM_write_PKCS8PrivateKey(FILE *fp,EVP_PKEY *x,const EVP_CIPHER *enc, char *kstr,int klen, pem_password_cb *cd, void *u); -OPENSSL_EXPORT EVP_PKEY *b2i_PrivateKey(const unsigned char **in, long length); -OPENSSL_EXPORT EVP_PKEY *b2i_PublicKey(const unsigned char **in, long length); -OPENSSL_EXPORT EVP_PKEY *b2i_PrivateKey_bio(BIO *in); -OPENSSL_EXPORT EVP_PKEY *b2i_PublicKey_bio(BIO *in); -OPENSSL_EXPORT int i2b_PrivateKey_bio(BIO *out, EVP_PKEY *pk); -OPENSSL_EXPORT int i2b_PublicKey_bio(BIO *out, EVP_PKEY *pk); -OPENSSL_EXPORT EVP_PKEY *b2i_PVK_bio(BIO *in, pem_password_cb *cb, void *u); -OPENSSL_EXPORT int i2b_PVK_bio(BIO *out, EVP_PKEY *pk, int enclevel, pem_password_cb *cb, void *u); - - -void ERR_load_PEM_strings(void); - #ifdef __cplusplus } diff --git a/Sources/BoringSSL/include/openssl/pkcs7.h b/Sources/BoringSSL/include/openssl/pkcs7.h index 6e5e43307..d7081413b 100644 --- a/Sources/BoringSSL/include/openssl/pkcs7.h +++ b/Sources/BoringSSL/include/openssl/pkcs7.h @@ -12,5 +12,71 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* This header is provided in order to make compiling against code that expects - OpenSSL easier. */ +#ifndef OPENSSL_HEADER_PKCS7_H +#define OPENSSL_HEADER_PKCS7_H + +#include + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + + +// PKCS#7. +// +// This library contains functions for extracting information from PKCS#7 +// structures (RFC 2315). + +DECLARE_STACK_OF(CRYPTO_BUFFER) +DECLARE_STACK_OF(X509) +DECLARE_STACK_OF(X509_CRL) + +// PKCS7_get_raw_certificates parses a PKCS#7, SignedData structure from |cbs| +// and appends the included certificates to |out_certs|. It returns one on +// success and zero on error. +OPENSSL_EXPORT int PKCS7_get_raw_certificates( + STACK_OF(CRYPTO_BUFFER) *out_certs, CBS *cbs, CRYPTO_BUFFER_POOL *pool); + +// PKCS7_get_certificates behaves like |PKCS7_get_raw_certificates| but parses +// them into |X509| objects. +OPENSSL_EXPORT int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs); + +// PKCS7_bundle_certificates appends a PKCS#7, SignedData structure containing +// |certs| to |out|. It returns one on success and zero on error. +OPENSSL_EXPORT int PKCS7_bundle_certificates( + CBB *out, const STACK_OF(X509) *certs); + +// PKCS7_get_CRLs parses a PKCS#7, SignedData structure from |cbs| and appends +// the included CRLs to |out_crls|. It returns one on success and zero on +// error. +OPENSSL_EXPORT int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs); + +// PKCS7_bundle_CRLs appends a PKCS#7, SignedData structure containing +// |crls| to |out|. It returns one on success and zero on error. +OPENSSL_EXPORT int PKCS7_bundle_CRLs(CBB *out, const STACK_OF(X509_CRL) *crls); + +// PKCS7_get_PEM_certificates reads a PEM-encoded, PKCS#7, SignedData structure +// from |pem_bio| and appends the included certificates to |out_certs|. It +// returns one on success and zero on error. +OPENSSL_EXPORT int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, + BIO *pem_bio); + +// PKCS7_get_PEM_CRLs reads a PEM-encoded, PKCS#7, SignedData structure from +// |pem_bio| and appends the included CRLs to |out_crls|. It returns one on +// success and zero on error. +OPENSSL_EXPORT int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, + BIO *pem_bio); + + +#if defined(__cplusplus) +} // extern C +#endif + +#define PKCS7_R_BAD_PKCS7_VERSION 100 +#define PKCS7_R_NOT_PKCS7_SIGNED_DATA 101 +#define PKCS7_R_NO_CERTIFICATES_INCLUDED 102 +#define PKCS7_R_NO_CRLS_INCLUDED 103 + +#endif // OPENSSL_HEADER_PKCS7_H diff --git a/Sources/BoringSSL/include/openssl/pkcs8.h b/Sources/BoringSSL/include/openssl/pkcs8.h index 70d6f4952..f865c767e 100644 --- a/Sources/BoringSSL/include/openssl/pkcs8.h +++ b/Sources/BoringSSL/include/openssl/pkcs8.h @@ -66,106 +66,121 @@ extern "C" { #endif -/* PKCS8_encrypt serializes and encrypts a PKCS8_PRIV_KEY_INFO with PBES1 or - * PBES2 as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, - * pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, defined in PKCS - * #12, and PBES2, are supported. PBES2 is selected by setting |cipher| and - * passing -1 for |pbe_nid|. Otherwise, PBES1 is used and |cipher| is ignored. - * - * |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this - * will be converted to a raw byte string as specified in B.1 of PKCS #12. If - * |pass| is NULL, it will be encoded as the empty byte string rather than two - * zero bytes, the PKCS #12 encoding of the empty string. - * - * If |salt| is NULL, a random salt of |salt_len| bytes is generated. If - * |salt_len| is zero, a default salt length is used instead. - * - * The resulting structure is stored in an |X509_SIG| which must be freed by the - * caller. */ +// PKCS8_encrypt serializes and encrypts a PKCS8_PRIV_KEY_INFO with PBES1 or +// PBES2 as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, +// pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, defined in PKCS +// #12, and PBES2, are supported. PBES2 is selected by setting |cipher| and +// passing -1 for |pbe_nid|. Otherwise, PBES1 is used and |cipher| is ignored. +// +// |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this +// will be converted to a raw byte string as specified in B.1 of PKCS #12. If +// |pass| is NULL, it will be encoded as the empty byte string rather than two +// zero bytes, the PKCS #12 encoding of the empty string. +// +// If |salt| is NULL, a random salt of |salt_len| bytes is generated. If +// |salt_len| is zero, a default salt length is used instead. +// +// The resulting structure is stored in an |X509_SIG| which must be freed by the +// caller. OPENSSL_EXPORT X509_SIG *PKCS8_encrypt(int pbe_nid, const EVP_CIPHER *cipher, const char *pass, int pass_len, const uint8_t *salt, size_t salt_len, int iterations, PKCS8_PRIV_KEY_INFO *p8inf); -/* PKCS8_decrypt decrypts and decodes a PKCS8_PRIV_KEY_INFO with PBES1 or PBES2 - * as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, - * pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, and PBES2, - * defined in PKCS #12, are supported. - * - * |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this - * will be converted to a raw byte string as specified in B.1 of PKCS #12. If - * |pass| is NULL, it will be encoded as the empty byte string rather than two - * zero bytes, the PKCS #12 encoding of the empty string. - * - * The resulting structure must be freed by the caller. */ +// PKCS8_marshal_encrypted_private_key behaves like |PKCS8_encrypt| but encrypts +// an |EVP_PKEY| and writes the serialized EncryptedPrivateKeyInfo to |out|. It +// returns one on success and zero on error. +OPENSSL_EXPORT int PKCS8_marshal_encrypted_private_key( + CBB *out, int pbe_nid, const EVP_CIPHER *cipher, const char *pass, + size_t pass_len, const uint8_t *salt, size_t salt_len, int iterations, + const EVP_PKEY *pkey); + +// PKCS8_decrypt decrypts and decodes a PKCS8_PRIV_KEY_INFO with PBES1 or PBES2 +// as defined in PKCS #5. Only pbeWithSHAAnd128BitRC4, +// pbeWithSHAAnd3-KeyTripleDES-CBC and pbeWithSHA1And40BitRC2, and PBES2, +// defined in PKCS #12, are supported. +// +// |pass| is used as the password. If a PBES1 scheme from PKCS #12 is used, this +// will be converted to a raw byte string as specified in B.1 of PKCS #12. If +// |pass| is NULL, it will be encoded as the empty byte string rather than two +// zero bytes, the PKCS #12 encoding of the empty string. +// +// The resulting structure must be freed by the caller. OPENSSL_EXPORT PKCS8_PRIV_KEY_INFO *PKCS8_decrypt(X509_SIG *pkcs8, const char *pass, int pass_len); -/* PKCS12_get_key_and_certs parses a PKCS#12 structure from |in|, authenticates - * and decrypts it using |password|, sets |*out_key| to the included private - * key and appends the included certificates to |out_certs|. It returns one on - * success and zero on error. The caller takes ownership of the outputs. */ +// PKCS8_parse_encrypted_private_key behaves like |PKCS8_decrypt| but it parses +// the EncryptedPrivateKeyInfo structure from |cbs| and advances |cbs|. It +// returns a newly-allocated |EVP_PKEY| on success and zero on error. +OPENSSL_EXPORT EVP_PKEY *PKCS8_parse_encrypted_private_key(CBS *cbs, + const char *pass, + size_t pass_len); + +// PKCS12_get_key_and_certs parses a PKCS#12 structure from |in|, authenticates +// and decrypts it using |password|, sets |*out_key| to the included private +// key and appends the included certificates to |out_certs|. It returns one on +// success and zero on error. The caller takes ownership of the outputs. OPENSSL_EXPORT int PKCS12_get_key_and_certs(EVP_PKEY **out_key, STACK_OF(X509) *out_certs, CBS *in, const char *password); -/* Deprecated functions. */ +// Deprecated functions. -/* PKCS12_PBE_add does nothing. It exists for compatibility with OpenSSL. */ +// PKCS12_PBE_add does nothing. It exists for compatibility with OpenSSL. OPENSSL_EXPORT void PKCS12_PBE_add(void); -/* d2i_PKCS12 is a dummy function that copies |*ber_bytes| into a - * |PKCS12| structure. The |out_p12| argument should be NULL(✝). On exit, - * |*ber_bytes| will be advanced by |ber_len|. It returns a fresh |PKCS12| - * structure or NULL on error. - * - * Note: unlike other d2i functions, |d2i_PKCS12| will always consume |ber_len| - * bytes. - * - * (✝) If |out_p12| is not NULL and the function is successful, |*out_p12| will - * be freed if not NULL itself and the result will be written to |*out_p12|. - * New code should not depend on this. */ +// d2i_PKCS12 is a dummy function that copies |*ber_bytes| into a +// |PKCS12| structure. The |out_p12| argument should be NULL(✝). On exit, +// |*ber_bytes| will be advanced by |ber_len|. It returns a fresh |PKCS12| +// structure or NULL on error. +// +// Note: unlike other d2i functions, |d2i_PKCS12| will always consume |ber_len| +// bytes. +// +// (✝) If |out_p12| is not NULL and the function is successful, |*out_p12| will +// be freed if not NULL itself and the result will be written to |*out_p12|. +// New code should not depend on this. OPENSSL_EXPORT PKCS12 *d2i_PKCS12(PKCS12 **out_p12, const uint8_t **ber_bytes, size_t ber_len); -/* d2i_PKCS12_bio acts like |d2i_PKCS12| but reads from a |BIO|. */ +// d2i_PKCS12_bio acts like |d2i_PKCS12| but reads from a |BIO|. OPENSSL_EXPORT PKCS12* d2i_PKCS12_bio(BIO *bio, PKCS12 **out_p12); -/* d2i_PKCS12_fp acts like |d2i_PKCS12| but reads from a |FILE|. */ +// d2i_PKCS12_fp acts like |d2i_PKCS12| but reads from a |FILE|. OPENSSL_EXPORT PKCS12* d2i_PKCS12_fp(FILE *fp, PKCS12 **out_p12); -/* PKCS12_parse calls |PKCS12_get_key_and_certs| on the ASN.1 data stored in - * |p12|. The |out_pkey| and |out_cert| arguments must not be NULL and, on - * successful exit, the private key and first certificate will be stored in - * them. The |out_ca_certs| argument may be NULL but, if not, then any extra - * certificates will be appended to |*out_ca_certs|. If |*out_ca_certs| is NULL - * then it will be set to a freshly allocated stack containing the extra certs. - * - * It returns one on success and zero on error. */ +// PKCS12_parse calls |PKCS12_get_key_and_certs| on the ASN.1 data stored in +// |p12|. The |out_pkey| and |out_cert| arguments must not be NULL and, on +// successful exit, the private key and first certificate will be stored in +// them. The |out_ca_certs| argument may be NULL but, if not, then any extra +// certificates will be appended to |*out_ca_certs|. If |*out_ca_certs| is NULL +// then it will be set to a freshly allocated stack containing the extra certs. +// +// It returns one on success and zero on error. OPENSSL_EXPORT int PKCS12_parse(const PKCS12 *p12, const char *password, EVP_PKEY **out_pkey, X509 **out_cert, STACK_OF(X509) **out_ca_certs); -/* PKCS12_verify_mac returns one if |password| is a valid password for |p12| - * and zero otherwise. Since |PKCS12_parse| doesn't take a length parameter, - * it's not actually possible to use a non-NUL-terminated password to actually - * get anything from a |PKCS12|. Thus |password| and |password_len| may be - * |NULL| and zero, respectively, or else |password_len| may be -1, or else - * |password[password_len]| must be zero and no other NUL bytes may appear in - * |password|. If the |password_len| checks fail, zero is returned - * immediately. */ +// PKCS12_verify_mac returns one if |password| is a valid password for |p12| +// and zero otherwise. Since |PKCS12_parse| doesn't take a length parameter, +// it's not actually possible to use a non-NUL-terminated password to actually +// get anything from a |PKCS12|. Thus |password| and |password_len| may be +// |NULL| and zero, respectively, or else |password_len| may be -1, or else +// |password[password_len]| must be zero and no other NUL bytes may appear in +// |password|. If the |password_len| checks fail, zero is returned +// immediately. OPENSSL_EXPORT int PKCS12_verify_mac(const PKCS12 *p12, const char *password, int password_len); -/* PKCS12_free frees |p12| and its contents. */ +// PKCS12_free frees |p12| and its contents. OPENSSL_EXPORT void PKCS12_free(PKCS12 *p12); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -176,7 +191,7 @@ BORINGSSL_MAKE_DELETER(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -212,4 +227,4 @@ BORINGSSL_MAKE_DELETER(PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO_free) #define PKCS8_R_BAD_ITERATION_COUNT 129 #define PKCS8_R_UNSUPPORTED_PRF 130 -#endif /* OPENSSL_HEADER_PKCS8_H */ +#endif // OPENSSL_HEADER_PKCS8_H diff --git a/Sources/BoringSSL/include/openssl/poly1305.h b/Sources/BoringSSL/include/openssl/poly1305.h index b4e23e298..cefe2b1c7 100644 --- a/Sources/BoringSSL/include/openssl/poly1305.h +++ b/Sources/BoringSSL/include/openssl/poly1305.h @@ -24,28 +24,28 @@ extern "C" { typedef uint8_t poly1305_state[512]; -/* CRYPTO_poly1305_init sets up |state| so that it can be used to calculate an - * authentication tag with the one-time key |key|. Note that |key| is a - * one-time key and therefore there is no `reset' method because that would - * enable several messages to be authenticated with the same key. */ +// CRYPTO_poly1305_init sets up |state| so that it can be used to calculate an +// authentication tag with the one-time key |key|. Note that |key| is a +// one-time key and therefore there is no `reset' method because that would +// enable several messages to be authenticated with the same key. OPENSSL_EXPORT void CRYPTO_poly1305_init(poly1305_state* state, const uint8_t key[32]); -/* CRYPTO_poly1305_update processes |in_len| bytes from |in|. It can be called - * zero or more times after poly1305_init. */ +// CRYPTO_poly1305_update processes |in_len| bytes from |in|. It can be called +// zero or more times after poly1305_init. OPENSSL_EXPORT void CRYPTO_poly1305_update(poly1305_state* state, const uint8_t* in, size_t in_len); -/* CRYPTO_poly1305_finish completes the poly1305 calculation and writes a 16 - * byte authentication tag to |mac|. The |mac| address must be 16-byte - * aligned. */ +// CRYPTO_poly1305_finish completes the poly1305 calculation and writes a 16 +// byte authentication tag to |mac|. The |mac| address must be 16-byte +// aligned. OPENSSL_EXPORT void CRYPTO_poly1305_finish(poly1305_state* state, uint8_t mac[16]); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_POLY1305_H */ +#endif // OPENSSL_HEADER_POLY1305_H diff --git a/Sources/BoringSSL/include/openssl/pool.h b/Sources/BoringSSL/include/openssl/pool.h index dc5c938eb..373952f53 100644 --- a/Sources/BoringSSL/include/openssl/pool.h +++ b/Sources/BoringSSL/include/openssl/pool.h @@ -17,59 +17,63 @@ #include +#include + #if defined(__cplusplus) extern "C" { #endif -/* Buffers and buffer pools. - * - * |CRYPTO_BUFFER|s are simply reference-counted blobs. A |CRYPTO_BUFFER_POOL| - * is an intern table for |CRYPTO_BUFFER|s. This allows for a single copy of a - * given blob to be kept in memory and referenced from multiple places. */ +// Buffers and buffer pools. +// +// |CRYPTO_BUFFER|s are simply reference-counted blobs. A |CRYPTO_BUFFER_POOL| +// is an intern table for |CRYPTO_BUFFER|s. This allows for a single copy of a +// given blob to be kept in memory and referenced from multiple places. + +DEFINE_STACK_OF(CRYPTO_BUFFER) -/* CRYPTO_BUFFER_POOL_new returns a freshly allocated |CRYPTO_BUFFER_POOL| or - * NULL on error. */ +// CRYPTO_BUFFER_POOL_new returns a freshly allocated |CRYPTO_BUFFER_POOL| or +// NULL on error. OPENSSL_EXPORT CRYPTO_BUFFER_POOL* CRYPTO_BUFFER_POOL_new(void); -/* CRYPTO_BUFFER_POOL_free frees |pool|, which must be empty. */ +// CRYPTO_BUFFER_POOL_free frees |pool|, which must be empty. OPENSSL_EXPORT void CRYPTO_BUFFER_POOL_free(CRYPTO_BUFFER_POOL *pool); -/* CRYPTO_BUFFER_new returns a |CRYPTO_BUFFER| containing a copy of |data|, or - * else NULL on error. If |pool| is not NULL then the returned value may be a - * reference to a previously existing |CRYPTO_BUFFER| that contained the same - * data. Otherwise, the returned, fresh |CRYPTO_BUFFER| will be added to the - * pool. */ +// CRYPTO_BUFFER_new returns a |CRYPTO_BUFFER| containing a copy of |data|, or +// else NULL on error. If |pool| is not NULL then the returned value may be a +// reference to a previously existing |CRYPTO_BUFFER| that contained the same +// data. Otherwise, the returned, fresh |CRYPTO_BUFFER| will be added to the +// pool. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_new(const uint8_t *data, size_t len, CRYPTO_BUFFER_POOL *pool); -/* CRYPTO_BUFFER_new_from_CBS acts the same as |CRYPTO_BUFFER_new|. */ +// CRYPTO_BUFFER_new_from_CBS acts the same as |CRYPTO_BUFFER_new|. OPENSSL_EXPORT CRYPTO_BUFFER *CRYPTO_BUFFER_new_from_CBS( CBS *cbs, CRYPTO_BUFFER_POOL *pool); -/* CRYPTO_BUFFER_free decrements the reference count of |buf|. If there are no - * other references, or if the only remaining reference is from a pool, then - * |buf| will be freed. */ +// CRYPTO_BUFFER_free decrements the reference count of |buf|. If there are no +// other references, or if the only remaining reference is from a pool, then +// |buf| will be freed. OPENSSL_EXPORT void CRYPTO_BUFFER_free(CRYPTO_BUFFER *buf); -/* CRYPTO_BUFFER_up_ref increments the reference count of |buf| and returns - * one. */ +// CRYPTO_BUFFER_up_ref increments the reference count of |buf| and returns +// one. OPENSSL_EXPORT int CRYPTO_BUFFER_up_ref(CRYPTO_BUFFER *buf); -/* CRYPTO_BUFFER_data returns a pointer to the data contained in |buf|. */ +// CRYPTO_BUFFER_data returns a pointer to the data contained in |buf|. OPENSSL_EXPORT const uint8_t *CRYPTO_BUFFER_data(const CRYPTO_BUFFER *buf); -/* CRYPTO_BUFFER_len returns the length, in bytes, of the data contained in - * |buf|. */ +// CRYPTO_BUFFER_len returns the length, in bytes, of the data contained in +// |buf|. OPENSSL_EXPORT size_t CRYPTO_BUFFER_len(const CRYPTO_BUFFER *buf); -/* CRYPTO_BUFFER_init_CBS initialises |out| to point at the data from |buf|. */ +// CRYPTO_BUFFER_init_CBS initialises |out| to point at the data from |buf|. OPENSSL_EXPORT void CRYPTO_BUFFER_init_CBS(const CRYPTO_BUFFER *buf, CBS *out); #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -80,7 +84,7 @@ BORINGSSL_MAKE_DELETER(CRYPTO_BUFFER, CRYPTO_BUFFER_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif diff --git a/Sources/BoringSSL/include/openssl/rand.h b/Sources/BoringSSL/include/openssl/rand.h index 0e9a8cd7c..5d02e12b3 100644 --- a/Sources/BoringSSL/include/openssl/rand.h +++ b/Sources/BoringSSL/include/openssl/rand.h @@ -22,83 +22,83 @@ extern "C" { #endif -/* Random number generation. */ +// Random number generation. -/* RAND_bytes writes |len| bytes of random data to |buf| and returns one. */ +// RAND_bytes writes |len| bytes of random data to |buf| and returns one. OPENSSL_EXPORT int RAND_bytes(uint8_t *buf, size_t len); -/* RAND_cleanup frees any resources used by the RNG. This is not safe if other - * threads might still be calling |RAND_bytes|. */ +// RAND_cleanup frees any resources used by the RNG. This is not safe if other +// threads might still be calling |RAND_bytes|. OPENSSL_EXPORT void RAND_cleanup(void); -/* Obscure functions. */ +// Obscure functions. #if !defined(OPENSSL_WINDOWS) -/* RAND_set_urandom_fd causes the module to use a copy of |fd| for system - * randomness rather opening /dev/urandom internally. The caller retains - * ownership of |fd| and is at liberty to close it at any time. This is useful - * if, due to a sandbox, /dev/urandom isn't available. If used, it must be - * called before the first call to |RAND_bytes|, and it is mutually exclusive - * with |RAND_enable_fork_unsafe_buffering|. - * - * |RAND_set_urandom_fd| does not buffer any entropy, so it is safe to call - * |fork| at any time after calling |RAND_set_urandom_fd|. */ +// RAND_set_urandom_fd causes the module to use a copy of |fd| for system +// randomness rather opening /dev/urandom internally. The caller retains +// ownership of |fd| and is at liberty to close it at any time. This is useful +// if, due to a sandbox, /dev/urandom isn't available. If used, it must be +// called before the first call to |RAND_bytes|, and it is mutually exclusive +// with |RAND_enable_fork_unsafe_buffering|. +// +// |RAND_set_urandom_fd| does not buffer any entropy, so it is safe to call +// |fork| at any time after calling |RAND_set_urandom_fd|. OPENSSL_EXPORT void RAND_set_urandom_fd(int fd); -/* RAND_enable_fork_unsafe_buffering enables efficient buffered reading of - * /dev/urandom. It adds an overhead of a few KB of memory per thread. It must - * be called before the first call to |RAND_bytes| and it is mutually exclusive - * with calls to |RAND_set_urandom_fd|. - * - * If |fd| is non-negative then a copy of |fd| will be used rather than opening - * /dev/urandom internally. Like |RAND_set_urandom_fd|, the caller retains - * ownership of |fd|. If |fd| is negative then /dev/urandom will be opened and - * any error from open(2) crashes the address space. - * - * It has an unusual name because the buffer is unsafe across calls to |fork|. - * Hence, this function should never be called by libraries. */ +// RAND_enable_fork_unsafe_buffering enables efficient buffered reading of +// /dev/urandom. It adds an overhead of a few KB of memory per thread. It must +// be called before the first call to |RAND_bytes| and it is mutually exclusive +// with calls to |RAND_set_urandom_fd|. +// +// If |fd| is non-negative then a copy of |fd| will be used rather than opening +// /dev/urandom internally. Like |RAND_set_urandom_fd|, the caller retains +// ownership of |fd|. If |fd| is negative then /dev/urandom will be opened and +// any error from open(2) crashes the address space. +// +// It has an unusual name because the buffer is unsafe across calls to |fork|. +// Hence, this function should never be called by libraries. OPENSSL_EXPORT void RAND_enable_fork_unsafe_buffering(int fd); #endif #if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) -/* RAND_reset_for_fuzzing resets the fuzzer-only deterministic RNG. This - * function is only defined in the fuzzer-only build configuration. */ +// RAND_reset_for_fuzzing resets the fuzzer-only deterministic RNG. This +// function is only defined in the fuzzer-only build configuration. OPENSSL_EXPORT void RAND_reset_for_fuzzing(void); #endif -/* Deprecated functions */ +// Deprecated functions -/* RAND_pseudo_bytes is a wrapper around |RAND_bytes|. */ +// RAND_pseudo_bytes is a wrapper around |RAND_bytes|. OPENSSL_EXPORT int RAND_pseudo_bytes(uint8_t *buf, size_t len); -/* RAND_seed reads a single byte of random data to ensure that any file - * descriptors etc are opened. */ +// RAND_seed reads a single byte of random data to ensure that any file +// descriptors etc are opened. OPENSSL_EXPORT void RAND_seed(const void *buf, int num); -/* RAND_load_file returns a nonnegative number. */ +// RAND_load_file returns a nonnegative number. OPENSSL_EXPORT int RAND_load_file(const char *path, long num); -/* RAND_file_name returns NULL. */ +// RAND_file_name returns NULL. OPENSSL_EXPORT const char *RAND_file_name(char *buf, size_t num); -/* RAND_add does nothing. */ +// RAND_add does nothing. OPENSSL_EXPORT void RAND_add(const void *buf, int num, double entropy); -/* RAND_egd returns 255. */ +// RAND_egd returns 255. OPENSSL_EXPORT int RAND_egd(const char *); -/* RAND_poll returns one. */ +// RAND_poll returns one. OPENSSL_EXPORT int RAND_poll(void); -/* RAND_status returns one. */ +// RAND_status returns one. OPENSSL_EXPORT int RAND_status(void); -/* rand_meth_st is typedefed to |RAND_METHOD| in base.h. It isn't used; it - * exists only to be the return type of |RAND_SSLeay|. It's - * external so that variables of this type can be initialized. */ +// rand_meth_st is typedefed to |RAND_METHOD| in base.h. It isn't used; it +// exists only to be the return type of |RAND_SSLeay|. It's +// external so that variables of this type can be initialized. struct rand_meth_st { void (*seed) (const void *buf, int num); int (*bytes) (uint8_t *buf, size_t num); @@ -108,15 +108,18 @@ struct rand_meth_st { int (*status) (void); }; -/* RAND_SSLeay returns a pointer to a dummy |RAND_METHOD|. */ +// RAND_SSLeay returns a pointer to a dummy |RAND_METHOD|. OPENSSL_EXPORT RAND_METHOD *RAND_SSLeay(void); -/* RAND_set_rand_method does nothing. */ +// RAND_get_rand_method returns |RAND_SSLeay()|. +OPENSSL_EXPORT const RAND_METHOD *RAND_get_rand_method(void); + +// RAND_set_rand_method does nothing. OPENSSL_EXPORT void RAND_set_rand_method(const RAND_METHOD *); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_RAND_H */ +#endif // OPENSSL_HEADER_RAND_H diff --git a/Sources/BoringSSL/include/openssl/rc4.h b/Sources/BoringSSL/include/openssl/rc4.h index 68af8782e..acf56ae98 100644 --- a/Sources/BoringSSL/include/openssl/rc4.h +++ b/Sources/BoringSSL/include/openssl/rc4.h @@ -64,7 +64,7 @@ extern "C" { #endif -/* RC4. */ +// RC4. struct rc4_key_st { @@ -72,25 +72,25 @@ struct rc4_key_st { uint32_t data[256]; } /* RC4_KEY */; -/* RC4_set_key performs an RC4 key schedule and initialises |rc4key| with |len| - * bytes of key material from |key|. */ +// RC4_set_key performs an RC4 key schedule and initialises |rc4key| with |len| +// bytes of key material from |key|. OPENSSL_EXPORT void RC4_set_key(RC4_KEY *rc4key, unsigned len, const uint8_t *key); -/* RC4 encrypts (or decrypts, it's the same with RC4) |len| bytes from |in| to - * |out|. */ +// RC4 encrypts (or decrypts, it's the same with RC4) |len| bytes from |in| to +// |out|. OPENSSL_EXPORT void RC4(RC4_KEY *key, size_t len, const uint8_t *in, uint8_t *out); -/* Deprecated functions. */ +// Deprecated functions. -/* RC4_options returns the string "rc4(ptr,int)". */ +// RC4_options returns the string "rc4(ptr,int)". OPENSSL_EXPORT const char *RC4_options(void); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_RC4_H */ +#endif // OPENSSL_HEADER_RC4_H diff --git a/Sources/BoringSSL/include/openssl/ripemd.h b/Sources/BoringSSL/include/openssl/ripemd.h index cf1e49e2a..fb0b50c12 100644 --- a/Sources/BoringSSL/include/openssl/ripemd.h +++ b/Sources/BoringSSL/include/openssl/ripemd.h @@ -75,33 +75,33 @@ struct RIPEMD160state_st { unsigned num; }; -/* RIPEMD160_Init initialises |ctx| and returns one. */ +// RIPEMD160_Init initialises |ctx| and returns one. OPENSSL_EXPORT int RIPEMD160_Init(RIPEMD160_CTX *ctx); -/* RIPEMD160_Update adds |len| bytes from |data| to |ctx| and returns one. */ +// RIPEMD160_Update adds |len| bytes from |data| to |ctx| and returns one. OPENSSL_EXPORT int RIPEMD160_Update(RIPEMD160_CTX *ctx, const void *data, size_t len); -/* RIPEMD160_Final adds the final padding to |ctx| and writes the resulting - * digest to |md|, which must have at least |RIPEMD160_DIGEST_LENGTH| bytes of - * space. It returns one. */ +// RIPEMD160_Final adds the final padding to |ctx| and writes the resulting +// digest to |md|, which must have at least |RIPEMD160_DIGEST_LENGTH| bytes of +// space. It returns one. OPENSSL_EXPORT int RIPEMD160_Final(uint8_t *md, RIPEMD160_CTX *ctx); -/* RIPEMD160 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |RIPEMD160_DIGEST_LENGTH| bytes of space in - * |out|. */ +// RIPEMD160 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |RIPEMD160_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *RIPEMD160(const uint8_t *data, size_t len, uint8_t *out); -/* RIPEMD160_Transform is a low-level function that performs a single, - * RIPEMD160 block transformation using the state from |ctx| and 64 bytes from - * |block|. */ +// RIPEMD160_Transform is a low-level function that performs a single, +// RIPEMD160 block transformation using the state from |ctx| and 64 bytes from +// |block|. OPENSSL_EXPORT void RIPEMD160_Transform(RIPEMD160_CTX *ctx, const uint8_t *block); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_RIPEMD_H */ +#endif // OPENSSL_HEADER_RIPEMD_H diff --git a/Sources/BoringSSL/include/openssl/rsa.h b/Sources/BoringSSL/include/openssl/rsa.h index bad3fad81..11aa8e427 100644 --- a/Sources/BoringSSL/include/openssl/rsa.h +++ b/Sources/BoringSSL/include/openssl/rsa.h @@ -68,462 +68,521 @@ extern "C" { #endif -/* rsa.h contains functions for handling encryption and signature using RSA. */ +// rsa.h contains functions for handling encryption and signature using RSA. -/* Allocation and destruction. */ +// Allocation and destruction. -/* RSA_new returns a new, empty RSA object or NULL on error. */ +// RSA_new returns a new, empty RSA object or NULL on error. OPENSSL_EXPORT RSA *RSA_new(void); -/* RSA_new_method acts the same as |RSA_new| but takes an explicit |ENGINE|. */ +// RSA_new_method acts the same as |RSA_new| but takes an explicit |ENGINE|. OPENSSL_EXPORT RSA *RSA_new_method(const ENGINE *engine); -/* RSA_free decrements the reference count of |rsa| and frees it if the - * reference count drops to zero. */ +// RSA_free decrements the reference count of |rsa| and frees it if the +// reference count drops to zero. OPENSSL_EXPORT void RSA_free(RSA *rsa); -/* RSA_up_ref increments the reference count of |rsa| and returns one. */ +// RSA_up_ref increments the reference count of |rsa| and returns one. OPENSSL_EXPORT int RSA_up_ref(RSA *rsa); -/* Properties. */ +// Properties. -/* RSA_get0_key sets |*out_n|, |*out_e|, and |*out_d|, if non-NULL, to |rsa|'s - * modulus, public exponent, and private exponent, respectively. If |rsa| is a - * public key, the private exponent will be set to NULL. */ +// RSA_bits returns the size of |rsa|, in bits. +OPENSSL_EXPORT unsigned RSA_bits(const RSA *rsa); + +// RSA_get0_key sets |*out_n|, |*out_e|, and |*out_d|, if non-NULL, to |rsa|'s +// modulus, public exponent, and private exponent, respectively. If |rsa| is a +// public key, the private exponent will be set to NULL. OPENSSL_EXPORT void RSA_get0_key(const RSA *rsa, const BIGNUM **out_n, const BIGNUM **out_e, const BIGNUM **out_d); -/* RSA_get0_factors sets |*out_p| and |*out_q|, if non-NULL, to |rsa|'s prime - * factors. If |rsa| is a public key, they will be set to NULL. If |rsa| is a - * multi-prime key, only the first two prime factors will be reported. */ +// RSA_get0_factors sets |*out_p| and |*out_q|, if non-NULL, to |rsa|'s prime +// factors. If |rsa| is a public key, they will be set to NULL. OPENSSL_EXPORT void RSA_get0_factors(const RSA *rsa, const BIGNUM **out_p, const BIGNUM **out_q); -/* RSA_get0_crt_params sets |*out_dmp1|, |*out_dmq1|, and |*out_iqmp|, if - * non-NULL, to |rsa|'s CRT parameters. These are d (mod p-1), d (mod q-1) and - * q^-1 (mod p), respectively. If |rsa| is a public key, each parameter will be - * set to NULL. If |rsa| is a multi-prime key, only the CRT parameters for the - * first two primes will be reported. */ +// RSA_get0_crt_params sets |*out_dmp1|, |*out_dmq1|, and |*out_iqmp|, if +// non-NULL, to |rsa|'s CRT parameters. These are d (mod p-1), d (mod q-1) and +// q^-1 (mod p), respectively. If |rsa| is a public key, each parameter will be +// set to NULL. OPENSSL_EXPORT void RSA_get0_crt_params(const RSA *rsa, const BIGNUM **out_dmp1, const BIGNUM **out_dmq1, const BIGNUM **out_iqmp); - -/* Key generation. */ - -/* RSA_generate_key_ex generates a new RSA key where the modulus has size - * |bits| and the public exponent is |e|. If unsure, |RSA_F4| is a good value - * for |e|. If |cb| is not NULL then it is called during the key generation - * process. In addition to the calls documented for |BN_generate_prime_ex|, it - * is called with event=2 when the n'th prime is rejected as unsuitable and - * with event=3 when a suitable value for |p| is found. - * - * It returns one on success or zero on error. */ +// RSA_set0_key sets |rsa|'s modulus, public exponent, and private exponent to +// |n|, |e|, and |d| respectively, if non-NULL. On success, it takes ownership +// of each argument and returns one. Otherwise, it returns zero. +// +// |d| may be NULL, but |n| and |e| must either be non-NULL or already +// configured on |rsa|. +OPENSSL_EXPORT int RSA_set0_key(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d); + +// RSA_set0_factors sets |rsa|'s prime factors to |p| and |q|, if non-NULL, and +// takes ownership of them. On success, it takes ownership of each argument and +// returns one. Otherwise, it returns zero. +// +// Each argument must either be non-NULL or already configured on |rsa|. +OPENSSL_EXPORT int RSA_set0_factors(RSA *rsa, BIGNUM *p, BIGNUM *q); + +// RSA_set0_crt_params sets |rsa|'s CRT parameters to |dmp1|, |dmq1|, and +// |iqmp|, if non-NULL, and takes ownership of them. On success, it takes +// ownership of its parameters and returns one. Otherwise, it returns zero. +// +// Each argument must either be non-NULL or already configured on |rsa|. +OPENSSL_EXPORT int RSA_set0_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, + BIGNUM *iqmp); + + +// Key generation. + +// RSA_generate_key_ex generates a new RSA key where the modulus has size +// |bits| and the public exponent is |e|. If unsure, |RSA_F4| is a good value +// for |e|. If |cb| is not NULL then it is called during the key generation +// process. In addition to the calls documented for |BN_generate_prime_ex|, it +// is called with event=2 when the n'th prime is rejected as unsuitable and +// with event=3 when a suitable value for |p| is found. +// +// It returns one on success or zero on error. OPENSSL_EXPORT int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); -/* RSA_generate_multi_prime_key acts like |RSA_generate_key_ex| but can - * generate an RSA private key with more than two primes. */ -OPENSSL_EXPORT int RSA_generate_multi_prime_key(RSA *rsa, int bits, - int num_primes, BIGNUM *e, - BN_GENCB *cb); +// RSA_generate_key_fips behaves like |RSA_generate_key_ex| but performs +// additional checks for FIPS compliance. The public exponent is always 65537 +// and |bits| must be either 2048 or 3072. +OPENSSL_EXPORT int RSA_generate_key_fips(RSA *rsa, int bits, BN_GENCB *cb); -/* Encryption / Decryption */ +// Encryption / Decryption -/* Padding types for encryption. */ +// Padding types for encryption. #define RSA_PKCS1_PADDING 1 #define RSA_NO_PADDING 3 #define RSA_PKCS1_OAEP_PADDING 4 -/* RSA_PKCS1_PSS_PADDING can only be used via the EVP interface. */ +// RSA_PKCS1_PSS_PADDING can only be used via the EVP interface. #define RSA_PKCS1_PSS_PADDING 6 -/* RSA_encrypt encrypts |in_len| bytes from |in| to the public key from |rsa| - * and writes, at most, |max_out| bytes of encrypted data to |out|. The - * |max_out| argument must be, at least, |RSA_size| in order to ensure success. - * - * It returns 1 on success or zero on error. - * - * The |padding| argument must be one of the |RSA_*_PADDING| values. If in - * doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols but - * |RSA_PKCS1_PADDING| is most common. */ +// RSA_encrypt encrypts |in_len| bytes from |in| to the public key from |rsa| +// and writes, at most, |max_out| bytes of encrypted data to |out|. The +// |max_out| argument must be, at least, |RSA_size| in order to ensure success. +// +// It returns 1 on success or zero on error. +// +// The |padding| argument must be one of the |RSA_*_PADDING| values. If in +// doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols but +// |RSA_PKCS1_PADDING| is most common. OPENSSL_EXPORT int RSA_encrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); -/* RSA_decrypt decrypts |in_len| bytes from |in| with the private key from - * |rsa| and writes, at most, |max_out| bytes of plaintext to |out|. The - * |max_out| argument must be, at least, |RSA_size| in order to ensure success. - * - * It returns 1 on success or zero on error. - * - * The |padding| argument must be one of the |RSA_*_PADDING| values. If in - * doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. - * - * Passing |RSA_PKCS1_PADDING| into this function is deprecated and insecure. If - * implementing a protocol using RSAES-PKCS1-V1_5, use |RSA_NO_PADDING| and then - * check padding in constant-time combined with a swap to a random session key - * or other mitigation. See "Chosen Ciphertext Attacks Against Protocols Based - * on the RSA Encryption Standard PKCS #1", Daniel Bleichenbacher, Advances in - * Cryptology (Crypto '98). */ +// RSA_decrypt decrypts |in_len| bytes from |in| with the private key from +// |rsa| and writes, at most, |max_out| bytes of plaintext to |out|. The +// |max_out| argument must be, at least, |RSA_size| in order to ensure success. +// +// It returns 1 on success or zero on error. +// +// The |padding| argument must be one of the |RSA_*_PADDING| values. If in +// doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. +// +// Passing |RSA_PKCS1_PADDING| into this function is deprecated and insecure. If +// implementing a protocol using RSAES-PKCS1-V1_5, use |RSA_NO_PADDING| and then +// check padding in constant-time combined with a swap to a random session key +// or other mitigation. See "Chosen Ciphertext Attacks Against Protocols Based +// on the RSA Encryption Standard PKCS #1", Daniel Bleichenbacher, Advances in +// Cryptology (Crypto '98). OPENSSL_EXPORT int RSA_decrypt(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); -/* RSA_public_encrypt encrypts |flen| bytes from |from| to the public key in - * |rsa| and writes the encrypted data to |to|. The |to| buffer must have at - * least |RSA_size| bytes of space. It returns the number of bytes written, or - * -1 on error. The |padding| argument must be one of the |RSA_*_PADDING| - * values. If in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols but - * |RSA_PKCS1_PADDING| is most common. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |RSA_encrypt| instead. */ +// RSA_public_encrypt encrypts |flen| bytes from |from| to the public key in +// |rsa| and writes the encrypted data to |to|. The |to| buffer must have at +// least |RSA_size| bytes of space. It returns the number of bytes written, or +// -1 on error. The |padding| argument must be one of the |RSA_*_PADDING| +// values. If in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols but +// |RSA_PKCS1_PADDING| is most common. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |RSA_encrypt| instead. OPENSSL_EXPORT int RSA_public_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); -/* RSA_private_decrypt decrypts |flen| bytes from |from| with the public key in - * |rsa| and writes the plaintext to |to|. The |to| buffer must have at least - * |RSA_size| bytes of space. It returns the number of bytes written, or -1 on - * error. The |padding| argument must be one of the |RSA_*_PADDING| values. If - * in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. Passing - * |RSA_PKCS1_PADDING| into this function is deprecated and insecure. See - * |RSA_decrypt|. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |RSA_decrypt| instead. */ +// RSA_private_decrypt decrypts |flen| bytes from |from| with the public key in +// |rsa| and writes the plaintext to |to|. The |to| buffer must have at least +// |RSA_size| bytes of space. It returns the number of bytes written, or -1 on +// error. The |padding| argument must be one of the |RSA_*_PADDING| values. If +// in doubt, use |RSA_PKCS1_OAEP_PADDING| for new protocols. Passing +// |RSA_PKCS1_PADDING| into this function is deprecated and insecure. See +// |RSA_decrypt|. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |RSA_decrypt| instead. OPENSSL_EXPORT int RSA_private_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); -/* Signing / Verification */ +// Signing / Verification -/* RSA_sign signs |in_len| bytes of digest from |in| with |rsa| using - * RSASSA-PKCS1-v1_5. It writes, at most, |RSA_size(rsa)| bytes to |out|. On - * successful return, the actual number of bytes written is written to - * |*out_len|. - * - * The |hash_nid| argument identifies the hash function used to calculate |in| - * and is embedded in the resulting signature. For example, it might be - * |NID_sha256|. - * - * It returns 1 on success and zero on error. */ +// RSA_sign signs |in_len| bytes of digest from |in| with |rsa| using +// RSASSA-PKCS1-v1_5. It writes, at most, |RSA_size(rsa)| bytes to |out|. On +// successful return, the actual number of bytes written is written to +// |*out_len|. +// +// The |hash_nid| argument identifies the hash function used to calculate |in| +// and is embedded in the resulting signature. For example, it might be +// |NID_sha256|. +// +// It returns 1 on success and zero on error. OPENSSL_EXPORT int RSA_sign(int hash_nid, const uint8_t *in, unsigned int in_len, uint8_t *out, unsigned int *out_len, RSA *rsa); -/* RSA_sign_raw signs |in_len| bytes from |in| with the public key from |rsa| - * and writes, at most, |max_out| bytes of signature data to |out|. The - * |max_out| argument must be, at least, |RSA_size| in order to ensure success. - * - * It returns 1 on success or zero on error. - * - * The |padding| argument must be one of the |RSA_*_PADDING| values. If in - * doubt, |RSA_PKCS1_PADDING| is the most common but |RSA_PKCS1_PSS_PADDING| - * (via the |EVP_PKEY| interface) is preferred for new protocols. */ +// RSA_sign_pss_mgf1 signs |in_len| bytes from |in| with the public key from +// |rsa| using RSASSA-PSS with MGF1 as the mask generation function. It writes, +// at most, |max_out| bytes of signature data to |out|. The |max_out| argument +// must be, at least, |RSA_size| in order to ensure success. It returns 1 on +// success or zero on error. +// +// The |md| and |mgf1_md| arguments identify the hash used to calculate |msg| +// and the MGF1 hash, respectively. If |mgf1_md| is NULL, |md| is +// used. +// +// |salt_len| specifies the expected salt length in bytes. If |salt_len| is -1, +// then the salt length is the same as the hash length. If -2, then the salt +// length is maximal given the size of |rsa|. If unsure, use -1. +OPENSSL_EXPORT int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, + size_t max_out, const uint8_t *in, + size_t in_len, const EVP_MD *md, + const EVP_MD *mgf1_md, int salt_len); + +// RSA_sign_raw signs |in_len| bytes from |in| with the public key from |rsa| +// and writes, at most, |max_out| bytes of signature data to |out|. The +// |max_out| argument must be, at least, |RSA_size| in order to ensure success. +// +// It returns 1 on success or zero on error. +// +// The |padding| argument must be one of the |RSA_*_PADDING| values. If in +// doubt, |RSA_PKCS1_PADDING| is the most common but |RSA_PKCS1_PSS_PADDING| +// (via the |EVP_PKEY| interface) is preferred for new protocols. OPENSSL_EXPORT int RSA_sign_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); -/* RSA_verify verifies that |sig_len| bytes from |sig| are a valid, - * RSASSA-PKCS1-v1_5 signature of |msg_len| bytes at |msg| by |rsa|. - * - * The |hash_nid| argument identifies the hash function used to calculate |in| - * and is embedded in the resulting signature in order to prevent hash - * confusion attacks. For example, it might be |NID_sha256|. - * - * It returns one if the signature is valid and zero otherwise. - * - * WARNING: this differs from the original, OpenSSL function which additionally - * returned -1 on error. */ +// RSA_verify verifies that |sig_len| bytes from |sig| are a valid, +// RSASSA-PKCS1-v1_5 signature of |msg_len| bytes at |msg| by |rsa|. +// +// The |hash_nid| argument identifies the hash function used to calculate |msg| +// and is embedded in the resulting signature in order to prevent hash +// confusion attacks. For example, it might be |NID_sha256|. +// +// It returns one if the signature is valid and zero otherwise. +// +// WARNING: this differs from the original, OpenSSL function which additionally +// returned -1 on error. OPENSSL_EXPORT int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len, const uint8_t *sig, size_t sig_len, RSA *rsa); -/* RSA_verify_raw verifies |in_len| bytes of signature from |in| using the - * public key from |rsa| and writes, at most, |max_out| bytes of plaintext to - * |out|. The |max_out| argument must be, at least, |RSA_size| in order to - * ensure success. - * - * It returns 1 on success or zero on error. - * - * The |padding| argument must be one of the |RSA_*_PADDING| values. If in - * doubt, |RSA_PKCS1_PADDING| is the most common but |RSA_PKCS1_PSS_PADDING| - * (via the |EVP_PKEY| interface) is preferred for new protocols. */ +// RSA_verify_pss_mgf1 verifies that |sig_len| bytes from |sig| are a valid, +// RSASSA-PSS signature of |msg_len| bytes at |msg| by |rsa|. It returns one if +// the signature is valid and zero otherwise. MGF1 is used as the mask +// generation function. +// +// The |md| and |mgf1_md| arguments identify the hash used to calculate |msg| +// and the MGF1 hash, respectively. If |mgf1_md| is NULL, |md| is +// used. |salt_len| specifies the expected salt length in bytes. +// +// If |salt_len| is -1, then the salt length is the same as the hash length. If +// -2, then the salt length is recovered and all values accepted. If unsure, use +// -1. +OPENSSL_EXPORT int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *msg, + size_t msg_len, const EVP_MD *md, + const EVP_MD *mgf1_md, int salt_len, + const uint8_t *sig, size_t sig_len); + +// RSA_verify_raw verifies |in_len| bytes of signature from |in| using the +// public key from |rsa| and writes, at most, |max_out| bytes of plaintext to +// |out|. The |max_out| argument must be, at least, |RSA_size| in order to +// ensure success. +// +// It returns 1 on success or zero on error. +// +// The |padding| argument must be one of the |RSA_*_PADDING| values. If in +// doubt, |RSA_PKCS1_PADDING| is the most common but |RSA_PKCS1_PSS_PADDING| +// (via the |EVP_PKEY| interface) is preferred for new protocols. OPENSSL_EXPORT int RSA_verify_raw(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); -/* RSA_private_encrypt encrypts |flen| bytes from |from| with the private key in - * |rsa| and writes the encrypted data to |to|. The |to| buffer must have at - * least |RSA_size| bytes of space. It returns the number of bytes written, or - * -1 on error. The |padding| argument must be one of the |RSA_*_PADDING| - * values. If in doubt, |RSA_PKCS1_PADDING| is the most common but - * |RSA_PKCS1_PSS_PADDING| (via the |EVP_PKEY| interface) is preferred for new - * protocols. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |RSA_sign_raw| instead. */ +// RSA_private_encrypt encrypts |flen| bytes from |from| with the private key in +// |rsa| and writes the encrypted data to |to|. The |to| buffer must have at +// least |RSA_size| bytes of space. It returns the number of bytes written, or +// -1 on error. The |padding| argument must be one of the |RSA_*_PADDING| +// values. If in doubt, |RSA_PKCS1_PADDING| is the most common but +// |RSA_PKCS1_PSS_PADDING| (via the |EVP_PKEY| interface) is preferred for new +// protocols. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |RSA_sign_raw| instead. OPENSSL_EXPORT int RSA_private_encrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); -/* RSA_public_decrypt verifies |flen| bytes of signature from |from| using the - * public key in |rsa| and writes the plaintext to |to|. The |to| buffer must - * have at least |RSA_size| bytes of space. It returns the number of bytes - * written, or -1 on error. The |padding| argument must be one of the - * |RSA_*_PADDING| values. If in doubt, |RSA_PKCS1_PADDING| is the most common - * but |RSA_PKCS1_PSS_PADDING| (via the |EVP_PKEY| interface) is preferred for - * new protocols. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |RSA_verify_raw| instead. */ +// RSA_public_decrypt verifies |flen| bytes of signature from |from| using the +// public key in |rsa| and writes the plaintext to |to|. The |to| buffer must +// have at least |RSA_size| bytes of space. It returns the number of bytes +// written, or -1 on error. The |padding| argument must be one of the +// |RSA_*_PADDING| values. If in doubt, |RSA_PKCS1_PADDING| is the most common +// but |RSA_PKCS1_PSS_PADDING| (via the |EVP_PKEY| interface) is preferred for +// new protocols. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |RSA_verify_raw| instead. OPENSSL_EXPORT int RSA_public_decrypt(size_t flen, const uint8_t *from, uint8_t *to, RSA *rsa, int padding); -/* Utility functions. */ +// Utility functions. -/* RSA_size returns the number of bytes in the modulus, which is also the size - * of a signature or encrypted value using |rsa|. */ +// RSA_size returns the number of bytes in the modulus, which is also the size +// of a signature or encrypted value using |rsa|. OPENSSL_EXPORT unsigned RSA_size(const RSA *rsa); -/* RSA_is_opaque returns one if |rsa| is opaque and doesn't expose its key - * material. Otherwise it returns zero. */ +// RSA_is_opaque returns one if |rsa| is opaque and doesn't expose its key +// material. Otherwise it returns zero. OPENSSL_EXPORT int RSA_is_opaque(const RSA *rsa); -/* RSA_supports_digest returns one if |rsa| supports signing digests - * of type |md|. Otherwise it returns zero. */ -OPENSSL_EXPORT int RSA_supports_digest(const RSA *rsa, const EVP_MD *md); - -/* RSAPublicKey_dup allocates a fresh |RSA| and copies the public key from - * |rsa| into it. It returns the fresh |RSA| object, or NULL on error. */ +// RSAPublicKey_dup allocates a fresh |RSA| and copies the public key from +// |rsa| into it. It returns the fresh |RSA| object, or NULL on error. OPENSSL_EXPORT RSA *RSAPublicKey_dup(const RSA *rsa); -/* RSAPrivateKey_dup allocates a fresh |RSA| and copies the private key from - * |rsa| into it. It returns the fresh |RSA| object, or NULL on error. */ +// RSAPrivateKey_dup allocates a fresh |RSA| and copies the private key from +// |rsa| into it. It returns the fresh |RSA| object, or NULL on error. OPENSSL_EXPORT RSA *RSAPrivateKey_dup(const RSA *rsa); -/* RSA_check_key performs basic validatity tests on |rsa|. It returns one if - * they pass and zero otherwise. Opaque keys and public keys always pass. If it - * returns zero then a more detailed error is available on the error queue. */ +// RSA_check_key performs basic validity tests on |rsa|. It returns one if +// they pass and zero otherwise. Opaque keys and public keys always pass. If it +// returns zero then a more detailed error is available on the error queue. OPENSSL_EXPORT int RSA_check_key(const RSA *rsa); -/* RSA_recover_crt_params uses |rsa->n|, |rsa->d| and |rsa->e| in order to - * calculate the two primes used and thus the precomputed, CRT values. These - * values are set in the |p|, |q|, |dmp1|, |dmq1| and |iqmp| members of |rsa|, - * which must be |NULL| on entry. It returns one on success and zero - * otherwise. */ -OPENSSL_EXPORT int RSA_recover_crt_params(RSA *rsa); - -/* RSA_verify_PKCS1_PSS_mgf1 verifies that |EM| is a correct PSS padding of - * |mHash|, where |mHash| is a digest produced by |Hash|. |EM| must point to - * exactly |RSA_size(rsa)| bytes of data. The |mgf1Hash| argument specifies the - * hash function for generating the mask. If NULL, |Hash| is used. The |sLen| - * argument specifies the expected salt length in bytes. If |sLen| is -1 then - * the salt length is the same as the hash length. If -2, then the salt length - * is recovered and all values accepted. - * - * If unsure, use -1. - * - * It returns one on success or zero on error. */ +// RSA_check_fips performs public key validity tests on |key|. It returns one +// if they pass and zero otherwise. Opaque keys always fail. +OPENSSL_EXPORT int RSA_check_fips(RSA *key); + +// RSA_verify_PKCS1_PSS_mgf1 verifies that |EM| is a correct PSS padding of +// |mHash|, where |mHash| is a digest produced by |Hash|. |EM| must point to +// exactly |RSA_size(rsa)| bytes of data. The |mgf1Hash| argument specifies the +// hash function for generating the mask. If NULL, |Hash| is used. The |sLen| +// argument specifies the expected salt length in bytes. If |sLen| is -1 then +// the salt length is the same as the hash length. If -2, then the salt length +// is recovered and all values accepted. +// +// If unsure, use -1. +// +// It returns one on success or zero on error. +// +// This function implements only the low-level padding logic. Use +// |RSA_verify_pss_mgf1| instead. OPENSSL_EXPORT int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, const uint8_t *EM, int sLen); -/* RSA_padding_add_PKCS1_PSS_mgf1 writes a PSS padding of |mHash| to |EM|, - * where |mHash| is a digest produced by |Hash|. |RSA_size(rsa)| bytes of - * output will be written to |EM|. The |mgf1Hash| argument specifies the hash - * function for generating the mask. If NULL, |Hash| is used. The |sLen| - * argument specifies the expected salt length in bytes. If |sLen| is -1 then - * the salt length is the same as the hash length. If -2, then the salt length - * is maximal given the space in |EM|. - * - * It returns one on success or zero on error. */ +// RSA_padding_add_PKCS1_PSS_mgf1 writes a PSS padding of |mHash| to |EM|, +// where |mHash| is a digest produced by |Hash|. |RSA_size(rsa)| bytes of +// output will be written to |EM|. The |mgf1Hash| argument specifies the hash +// function for generating the mask. If NULL, |Hash| is used. The |sLen| +// argument specifies the expected salt length in bytes. If |sLen| is -1 then +// the salt length is the same as the hash length. If -2, then the salt length +// is maximal given the space in |EM|. +// +// It returns one on success or zero on error. +// +// This function implements only the low-level padding logic. Use +// |RSA_sign_pss_mgf1| instead. OPENSSL_EXPORT int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, uint8_t *EM, const uint8_t *mHash, const EVP_MD *Hash, const EVP_MD *mgf1Hash, int sLen); -/* RSA_padding_add_PKCS1_OAEP_mgf1 writes an OAEP padding of |from| to |to| - * with the given parameters and hash functions. If |md| is NULL then SHA-1 is - * used. If |mgf1md| is NULL then the value of |md| is used (which means SHA-1 - * if that, in turn, is NULL). - * - * It returns one on success or zero on error. */ +// RSA_padding_add_PKCS1_OAEP_mgf1 writes an OAEP padding of |from| to |to| +// with the given parameters and hash functions. If |md| is NULL then SHA-1 is +// used. If |mgf1md| is NULL then the value of |md| is used (which means SHA-1 +// if that, in turn, is NULL). +// +// It returns one on success or zero on error. OPENSSL_EXPORT int RSA_padding_add_PKCS1_OAEP_mgf1( - uint8_t *to, unsigned to_len, const uint8_t *from, unsigned from_len, - const uint8_t *param, unsigned param_len, const EVP_MD *md, + uint8_t *to, size_t to_len, const uint8_t *from, size_t from_len, + const uint8_t *param, size_t param_len, const EVP_MD *md, const EVP_MD *mgf1md); -/* RSA_add_pkcs1_prefix builds a version of |msg| prefixed with the DigestInfo - * header for the given hash function and sets |out_msg| to point to it. On - * successful return, |*out_msg| may be allocated memory and, if so, - * |*is_alloced| will be 1. */ +// RSA_add_pkcs1_prefix builds a version of |msg| prefixed with the DigestInfo +// header for the given hash function and sets |out_msg| to point to it. On +// successful return, if |*is_alloced| is one, the caller must release +// |*out_msg| with |OPENSSL_free|. OPENSSL_EXPORT int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len, int *is_alloced, int hash_nid, const uint8_t *msg, size_t msg_len); -/* ASN.1 functions. */ +// ASN.1 functions. -/* RSA_parse_public_key parses a DER-encoded RSAPublicKey structure (RFC 3447) - * from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on - * error. */ +// RSA_parse_public_key parses a DER-encoded RSAPublicKey structure (RFC 3447) +// from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on +// error. OPENSSL_EXPORT RSA *RSA_parse_public_key(CBS *cbs); -/* RSA_parse_public_key_buggy behaves like |RSA_parse_public_key|, but it - * tolerates some invalid encodings. Do not use this function. */ -OPENSSL_EXPORT RSA *RSA_parse_public_key_buggy(CBS *cbs); - -/* RSA_public_key_from_bytes parses |in| as a DER-encoded RSAPublicKey structure - * (RFC 3447). It returns a newly-allocated |RSA| or NULL on error. */ +// RSA_public_key_from_bytes parses |in| as a DER-encoded RSAPublicKey structure +// (RFC 3447). It returns a newly-allocated |RSA| or NULL on error. OPENSSL_EXPORT RSA *RSA_public_key_from_bytes(const uint8_t *in, size_t in_len); -/* RSA_marshal_public_key marshals |rsa| as a DER-encoded RSAPublicKey structure - * (RFC 3447) and appends the result to |cbb|. It returns one on success and - * zero on failure. */ +// RSA_marshal_public_key marshals |rsa| as a DER-encoded RSAPublicKey structure +// (RFC 3447) and appends the result to |cbb|. It returns one on success and +// zero on failure. OPENSSL_EXPORT int RSA_marshal_public_key(CBB *cbb, const RSA *rsa); -/* RSA_public_key_to_bytes marshals |rsa| as a DER-encoded RSAPublicKey - * structure (RFC 3447) and, on success, sets |*out_bytes| to a newly allocated - * buffer containing the result and returns one. Otherwise, it returns zero. The - * result should be freed with |OPENSSL_free|. */ +// RSA_public_key_to_bytes marshals |rsa| as a DER-encoded RSAPublicKey +// structure (RFC 3447) and, on success, sets |*out_bytes| to a newly allocated +// buffer containing the result and returns one. Otherwise, it returns zero. The +// result should be freed with |OPENSSL_free|. OPENSSL_EXPORT int RSA_public_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa); -/* RSA_parse_private_key parses a DER-encoded RSAPrivateKey structure (RFC 3447) - * from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on - * error. */ +// RSA_parse_private_key parses a DER-encoded RSAPrivateKey structure (RFC 3447) +// from |cbs| and advances |cbs|. It returns a newly-allocated |RSA| or NULL on +// error. OPENSSL_EXPORT RSA *RSA_parse_private_key(CBS *cbs); -/* RSA_private_key_from_bytes parses |in| as a DER-encoded RSAPrivateKey - * structure (RFC 3447). It returns a newly-allocated |RSA| or NULL on error. */ +// RSA_private_key_from_bytes parses |in| as a DER-encoded RSAPrivateKey +// structure (RFC 3447). It returns a newly-allocated |RSA| or NULL on error. OPENSSL_EXPORT RSA *RSA_private_key_from_bytes(const uint8_t *in, size_t in_len); -/* RSA_marshal_private_key marshals |rsa| as a DER-encoded RSAPrivateKey - * structure (RFC 3447) and appends the result to |cbb|. It returns one on - * success and zero on failure. */ +// RSA_marshal_private_key marshals |rsa| as a DER-encoded RSAPrivateKey +// structure (RFC 3447) and appends the result to |cbb|. It returns one on +// success and zero on failure. OPENSSL_EXPORT int RSA_marshal_private_key(CBB *cbb, const RSA *rsa); -/* RSA_private_key_to_bytes marshals |rsa| as a DER-encoded RSAPrivateKey - * structure (RFC 3447) and, on success, sets |*out_bytes| to a newly allocated - * buffer containing the result and returns one. Otherwise, it returns zero. The - * result should be freed with |OPENSSL_free|. */ +// RSA_private_key_to_bytes marshals |rsa| as a DER-encoded RSAPrivateKey +// structure (RFC 3447) and, on success, sets |*out_bytes| to a newly allocated +// buffer containing the result and returns one. Otherwise, it returns zero. The +// result should be freed with |OPENSSL_free|. OPENSSL_EXPORT int RSA_private_key_to_bytes(uint8_t **out_bytes, size_t *out_len, const RSA *rsa); -/* ex_data functions. - * - * See |ex_data.h| for details. */ +// ex_data functions. +// +// See |ex_data.h| for details. OPENSSL_EXPORT int RSA_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); -OPENSSL_EXPORT int RSA_set_ex_data(RSA *r, int idx, void *arg); -OPENSSL_EXPORT void *RSA_get_ex_data(const RSA *r, int idx); +OPENSSL_EXPORT int RSA_set_ex_data(RSA *rsa, int idx, void *arg); +OPENSSL_EXPORT void *RSA_get_ex_data(const RSA *rsa, int idx); -/* Flags. */ +// Flags. -/* RSA_FLAG_OPAQUE specifies that this RSA_METHOD does not expose its key - * material. This may be set if, for instance, it is wrapping some other crypto - * API, like a platform key store. */ +// RSA_FLAG_OPAQUE specifies that this RSA_METHOD does not expose its key +// material. This may be set if, for instance, it is wrapping some other crypto +// API, like a platform key store. #define RSA_FLAG_OPAQUE 1 -/* Deprecated and ignored. */ +// Deprecated and ignored. #define RSA_FLAG_CACHE_PUBLIC 2 -/* Deprecated and ignored. */ +// Deprecated and ignored. #define RSA_FLAG_CACHE_PRIVATE 4 -/* RSA_FLAG_NO_BLINDING disables blinding of private operations, which is a - * dangerous thing to do. It is deprecated and should not be used. It will - * be ignored whenever possible. - * - * This flag must be used if a key without the public exponent |e| is used for - * private key operations; avoid using such keys whenever possible. */ +// RSA_FLAG_NO_BLINDING disables blinding of private operations, which is a +// dangerous thing to do. It is deprecated and should not be used. It will +// be ignored whenever possible. +// +// This flag must be used if a key without the public exponent |e| is used for +// private key operations; avoid using such keys whenever possible. #define RSA_FLAG_NO_BLINDING 8 -/* RSA_FLAG_EXT_PKEY is deprecated and ignored. */ +// RSA_FLAG_EXT_PKEY is deprecated and ignored. #define RSA_FLAG_EXT_PKEY 0x20 -/* RSA_FLAG_SIGN_VER causes the |sign| and |verify| functions of |rsa_meth_st| - * to be called when set. */ +// RSA_FLAG_SIGN_VER causes the |sign| and |verify| functions of |rsa_meth_st| +// to be called when set. #define RSA_FLAG_SIGN_VER 0x40 -/* RSA public exponent values. */ +// RSA public exponent values. #define RSA_3 0x3 #define RSA_F4 0x10001 -/* Deprecated functions. */ +// Deprecated functions. -/* RSA_blinding_on returns one. */ +// RSA_blinding_on returns one. OPENSSL_EXPORT int RSA_blinding_on(RSA *rsa, BN_CTX *ctx); -/* RSA_generate_key behaves like |RSA_generate_key_ex|, which is what you - * should use instead. It returns NULL on error, or a newly-allocated |RSA| on - * success. This function is provided for compatibility only. The |callback| - * and |cb_arg| parameters must be NULL. */ +// RSA_generate_key behaves like |RSA_generate_key_ex|, which is what you +// should use instead. It returns NULL on error, or a newly-allocated |RSA| on +// success. This function is provided for compatibility only. The |callback| +// and |cb_arg| parameters must be NULL. OPENSSL_EXPORT RSA *RSA_generate_key(int bits, unsigned long e, void *callback, void *cb_arg); -/* d2i_RSAPublicKey parses an ASN.1, DER-encoded, RSA public key from |len| - * bytes at |*inp|. If |out| is not NULL then, on exit, a pointer to the result - * is in |*out|. Note that, even if |*out| is already non-NULL on entry, it - * will not be written to. Rather, a fresh |RSA| is allocated and the previous - * one is freed. On successful exit, |*inp| is advanced past the DER structure. - * It returns the result or NULL on error. */ +// d2i_RSAPublicKey parses an ASN.1, DER-encoded, RSA public key from |len| +// bytes at |*inp|. If |out| is not NULL then, on exit, a pointer to the result +// is in |*out|. Note that, even if |*out| is already non-NULL on entry, it +// will not be written to. Rather, a fresh |RSA| is allocated and the previous +// one is freed. On successful exit, |*inp| is advanced past the DER structure. +// It returns the result or NULL on error. OPENSSL_EXPORT RSA *d2i_RSAPublicKey(RSA **out, const uint8_t **inp, long len); -/* i2d_RSAPublicKey marshals |in| to an ASN.1, DER structure. If |outp| is not - * NULL then the result is written to |*outp| and |*outp| is advanced just past - * the output. It returns the number of bytes in the result, whether written or - * not, or a negative value on error. */ +// i2d_RSAPublicKey marshals |in| to an ASN.1, DER structure. If |outp| is not +// NULL then the result is written to |*outp| and |*outp| is advanced just past +// the output. It returns the number of bytes in the result, whether written or +// not, or a negative value on error. OPENSSL_EXPORT int i2d_RSAPublicKey(const RSA *in, uint8_t **outp); -/* d2i_RSAPrivateKey parses an ASN.1, DER-encoded, RSA private key from |len| - * bytes at |*inp|. If |out| is not NULL then, on exit, a pointer to the result - * is in |*out|. Note that, even if |*out| is already non-NULL on entry, it - * will not be written to. Rather, a fresh |RSA| is allocated and the previous - * one is freed. On successful exit, |*inp| is advanced past the DER structure. - * It returns the result or NULL on error. */ +// d2i_RSAPrivateKey parses an ASN.1, DER-encoded, RSA private key from |len| +// bytes at |*inp|. If |out| is not NULL then, on exit, a pointer to the result +// is in |*out|. Note that, even if |*out| is already non-NULL on entry, it +// will not be written to. Rather, a fresh |RSA| is allocated and the previous +// one is freed. On successful exit, |*inp| is advanced past the DER structure. +// It returns the result or NULL on error. OPENSSL_EXPORT RSA *d2i_RSAPrivateKey(RSA **out, const uint8_t **inp, long len); -/* i2d_RSAPrivateKey marshals |in| to an ASN.1, DER structure. If |outp| is not - * NULL then the result is written to |*outp| and |*outp| is advanced just past - * the output. It returns the number of bytes in the result, whether written or - * not, or a negative value on error. */ +// i2d_RSAPrivateKey marshals |in| to an ASN.1, DER structure. If |outp| is not +// NULL then the result is written to |*outp| and |*outp| is advanced just past +// the output. It returns the number of bytes in the result, whether written or +// not, or a negative value on error. OPENSSL_EXPORT int i2d_RSAPrivateKey(const RSA *in, uint8_t **outp); -/* RSA_padding_add_PKCS1_PSS acts like |RSA_padding_add_PKCS1_PSS_mgf1| but the - * |mgf1Hash| parameter of the latter is implicitly set to |Hash|. */ +// RSA_padding_add_PKCS1_PSS acts like |RSA_padding_add_PKCS1_PSS_mgf1| but the +// |mgf1Hash| parameter of the latter is implicitly set to |Hash|. +// +// This function implements only the low-level padding logic. Use +// |RSA_sign_pss_mgf1| instead. OPENSSL_EXPORT int RSA_padding_add_PKCS1_PSS(RSA *rsa, uint8_t *EM, const uint8_t *mHash, const EVP_MD *Hash, int sLen); -/* RSA_verify_PKCS1_PSS acts like |RSA_verify_PKCS1_PSS_mgf1| but the - * |mgf1Hash| parameter of the latter is implicitly set to |Hash|. */ +// RSA_verify_PKCS1_PSS acts like |RSA_verify_PKCS1_PSS_mgf1| but the +// |mgf1Hash| parameter of the latter is implicitly set to |Hash|. +// +// This function implements only the low-level padding logic. Use +// |RSA_verify_pss_mgf1| instead. OPENSSL_EXPORT int RSA_verify_PKCS1_PSS(RSA *rsa, const uint8_t *mHash, const EVP_MD *Hash, const uint8_t *EM, int sLen); -/* RSA_padding_add_PKCS1_OAEP acts like |RSA_padding_add_PKCS1_OAEP_mgf1| but - * the |md| and |mgf1md| parameters of the latter are implicitly set to NULL, - * which means SHA-1. */ -OPENSSL_EXPORT int RSA_padding_add_PKCS1_OAEP(uint8_t *to, unsigned to_len, +// RSA_padding_add_PKCS1_OAEP acts like |RSA_padding_add_PKCS1_OAEP_mgf1| but +// the |md| and |mgf1md| parameters of the latter are implicitly set to NULL, +// which means SHA-1. +OPENSSL_EXPORT int RSA_padding_add_PKCS1_OAEP(uint8_t *to, size_t to_len, const uint8_t *from, - unsigned from_len, + size_t from_len, const uint8_t *param, - unsigned param_len); + size_t param_len); struct rsa_meth_st { @@ -534,66 +593,38 @@ struct rsa_meth_st { int (*init)(RSA *rsa); int (*finish)(RSA *rsa); - /* size returns the size of the RSA modulus in bytes. */ + // size returns the size of the RSA modulus in bytes. size_t (*size)(const RSA *rsa); int (*sign)(int type, const uint8_t *m, unsigned int m_length, uint8_t *sigret, unsigned int *siglen, const RSA *rsa); - /* Ignored. Set this to NULL. */ - int (*verify)(int dtype, const uint8_t *m, unsigned int m_length, - const uint8_t *sigbuf, unsigned int siglen, const RSA *rsa); - - - /* These functions mirror the |RSA_*| functions of the same name. */ - int (*encrypt)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, - const uint8_t *in, size_t in_len, int padding); + // These functions mirror the |RSA_*| functions of the same name. int (*sign_raw)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); - int (*decrypt)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, const uint8_t *in, size_t in_len, int padding); - /* Ignored. Set this to NULL. */ - int (*verify_raw)(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out, - const uint8_t *in, size_t in_len, int padding); - - /* private_transform takes a big-endian integer from |in|, calculates the - * d'th power of it, modulo the RSA modulus and writes the result as a - * big-endian integer to |out|. Both |in| and |out| are |len| bytes long and - * |len| is always equal to |RSA_size(rsa)|. If the result of the transform - * can be represented in fewer than |len| bytes, then |out| must be zero - * padded on the left. - * - * It returns one on success and zero otherwise. - * - * RSA decrypt and sign operations will call this, thus an ENGINE might wish - * to override it in order to avoid having to implement the padding - * functionality demanded by those, higher level, operations. */ + + // private_transform takes a big-endian integer from |in|, calculates the + // d'th power of it, modulo the RSA modulus and writes the result as a + // big-endian integer to |out|. Both |in| and |out| are |len| bytes long and + // |len| is always equal to |RSA_size(rsa)|. If the result of the transform + // can be represented in fewer than |len| bytes, then |out| must be zero + // padded on the left. + // + // It returns one on success and zero otherwise. + // + // RSA decrypt and sign operations will call this, thus an ENGINE might wish + // to override it in order to avoid having to implement the padding + // functionality demanded by those, higher level, operations. int (*private_transform)(RSA *rsa, uint8_t *out, const uint8_t *in, size_t len); - /* mod_exp is deprecated and ignored. Set it to NULL. */ - int (*mod_exp)(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx); - - /* bn_mod_exp is deprecated and ignored. Set it to NULL. */ - int (*bn_mod_exp)(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, - const BIGNUM *m, BN_CTX *ctx, - const BN_MONT_CTX *mont); - int flags; - - int (*keygen)(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb); - - int (*multi_prime_keygen)(RSA *rsa, int bits, int num_primes, BIGNUM *e, - BN_GENCB *cb); - - /* supports_digest returns one if |rsa| supports digests of type - * |md|. If null, it is assumed that all digests are supported. */ - int (*supports_digest)(const RSA *rsa, const EVP_MD *md); }; -/* Private functions. */ +// Private functions. typedef struct bn_blinding_st BN_BLINDING; @@ -609,35 +640,33 @@ struct rsa_st { BIGNUM *dmq1; BIGNUM *iqmp; - STACK_OF(RSA_additional_prime) *additional_primes; - - /* be careful using this if the RSA structure is shared */ + // be careful using this if the RSA structure is shared CRYPTO_EX_DATA ex_data; CRYPTO_refcount_t references; int flags; CRYPTO_MUTEX lock; - /* Used to cache montgomery values. The creation of these values is protected - * by |lock|. */ + // Used to cache montgomery values. The creation of these values is protected + // by |lock|. BN_MONT_CTX *mont_n; BN_MONT_CTX *mont_p; BN_MONT_CTX *mont_q; - /* num_blindings contains the size of the |blindings| and |blindings_inuse| - * arrays. This member and the |blindings_inuse| array are protected by - * |lock|. */ + // num_blindings contains the size of the |blindings| and |blindings_inuse| + // arrays. This member and the |blindings_inuse| array are protected by + // |lock|. unsigned num_blindings; - /* blindings is an array of BN_BLINDING structures that can be reserved by a - * thread by locking |lock| and changing the corresponding element in - * |blindings_inuse| from 0 to 1. */ + // blindings is an array of BN_BLINDING structures that can be reserved by a + // thread by locking |lock| and changing the corresponding element in + // |blindings_inuse| from 0 to 1. BN_BLINDING **blindings; unsigned char *blindings_inuse; }; #if defined(__cplusplus) -} /* extern C */ +} // extern C extern "C++" { @@ -647,7 +676,7 @@ BORINGSSL_MAKE_DELETER(RSA, RSA_free) } // namespace bssl -} /* extern C++ */ +} // extern C++ #endif @@ -697,5 +726,6 @@ BORINGSSL_MAKE_DELETER(RSA, RSA_free) #define RSA_R_UNKNOWN_PADDING_TYPE 143 #define RSA_R_VALUE_MISSING 144 #define RSA_R_WRONG_SIGNATURE_LENGTH 145 +#define RSA_R_PUBLIC_KEY_VALIDATION_FAILED 146 -#endif /* OPENSSL_HEADER_RSA_H */ +#endif // OPENSSL_HEADER_RSA_H diff --git a/Sources/BoringSSL/include/openssl/sha.h b/Sources/BoringSSL/include/openssl/sha.h index 7c310979c..fc4644bff 100644 --- a/Sources/BoringSSL/include/openssl/sha.h +++ b/Sources/BoringSSL/include/openssl/sha.h @@ -64,42 +64,42 @@ extern "C" { #endif -/* The SHA family of hash functions (SHA-1 and SHA-2). */ +// The SHA family of hash functions (SHA-1 and SHA-2). -/* SHA_CBLOCK is the block size of SHA-1. */ +// SHA_CBLOCK is the block size of SHA-1. #define SHA_CBLOCK 64 -/* SHA_DIGEST_LENGTH is the length of a SHA-1 digest. */ +// SHA_DIGEST_LENGTH is the length of a SHA-1 digest. #define SHA_DIGEST_LENGTH 20 -/* SHA1_Init initialises |sha| and returns one. */ +// SHA1_Init initialises |sha| and returns one. OPENSSL_EXPORT int SHA1_Init(SHA_CTX *sha); -/* SHA1_Update adds |len| bytes from |data| to |sha| and returns one. */ +// SHA1_Update adds |len| bytes from |data| to |sha| and returns one. OPENSSL_EXPORT int SHA1_Update(SHA_CTX *sha, const void *data, size_t len); -/* SHA1_Final adds the final padding to |sha| and writes the resulting digest - * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. It - * returns one. */ +// SHA1_Final adds the final padding to |sha| and writes the resulting digest +// to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. It +// returns one. OPENSSL_EXPORT int SHA1_Final(uint8_t *md, SHA_CTX *sha); -/* SHA1 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in - * |out|. */ +// SHA1 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out); -/* SHA1_Transform is a low-level function that performs a single, SHA-1 block - * transformation using the state from |sha| and |SHA_CBLOCK| bytes from - * |block|. */ +// SHA1_Transform is a low-level function that performs a single, SHA-1 block +// transformation using the state from |sha| and |SHA_CBLOCK| bytes from +// |block|. OPENSSL_EXPORT void SHA1_Transform(SHA_CTX *sha, const uint8_t *block); struct sha_state_st { #if defined(OPENSSL_WINDOWS) uint32_t h[5]; #else - /* wpa_supplicant accesses |h0|..|h4| so we must support those names - * for compatibility with it until it can be updated. */ + // wpa_supplicant accesses |h0|..|h4| so we must support those names + // for compatibility with it until it can be updated. union { uint32_t h[5]; struct { @@ -117,58 +117,58 @@ struct sha_state_st { }; -/* SHA-224. */ +// SHA-224. -/* SHA224_CBLOCK is the block size of SHA-224. */ +// SHA224_CBLOCK is the block size of SHA-224. #define SHA224_CBLOCK 64 -/* SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. */ +// SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. #define SHA224_DIGEST_LENGTH 28 -/* SHA224_Init initialises |sha| and returns 1. */ +// SHA224_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA224_Init(SHA256_CTX *sha); -/* SHA224_Update adds |len| bytes from |data| to |sha| and returns 1. */ +// SHA224_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA224_Update(SHA256_CTX *sha, const void *data, size_t len); -/* SHA224_Final adds the final padding to |sha| and writes the resulting digest - * to |md|, which must have at least |SHA224_DIGEST_LENGTH| bytes of space. It - * returns one on success and zero on programmer error. */ +// SHA224_Final adds the final padding to |sha| and writes the resulting digest +// to |md|, which must have at least |SHA224_DIGEST_LENGTH| bytes of space. It +// returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA224_Final(uint8_t *md, SHA256_CTX *sha); -/* SHA224 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |SHA224_DIGEST_LENGTH| bytes of space in - * |out|. */ +// SHA224 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |SHA224_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out); -/* SHA-256. */ +// SHA-256. -/* SHA256_CBLOCK is the block size of SHA-256. */ +// SHA256_CBLOCK is the block size of SHA-256. #define SHA256_CBLOCK 64 -/* SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. */ +// SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. #define SHA256_DIGEST_LENGTH 32 -/* SHA256_Init initialises |sha| and returns 1. */ +// SHA256_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA256_Init(SHA256_CTX *sha); -/* SHA256_Update adds |len| bytes from |data| to |sha| and returns 1. */ +// SHA256_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA256_Update(SHA256_CTX *sha, const void *data, size_t len); -/* SHA256_Final adds the final padding to |sha| and writes the resulting digest - * to |md|, which must have at least |SHA256_DIGEST_LENGTH| bytes of space. It - * returns one on success and zero on programmer error. */ +// SHA256_Final adds the final padding to |sha| and writes the resulting digest +// to |md|, which must have at least |SHA256_DIGEST_LENGTH| bytes of space. It +// returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA256_Final(uint8_t *md, SHA256_CTX *sha); -/* SHA256 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |SHA256_DIGEST_LENGTH| bytes of space in - * |out|. */ +// SHA256 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |SHA256_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t *out); -/* SHA256_Transform is a low-level function that performs a single, SHA-256 - * block transformation using the state from |sha| and |SHA256_CBLOCK| bytes - * from |block|. */ +// SHA256_Transform is a low-level function that performs a single, SHA-256 +// block transformation using the state from |sha| and |SHA256_CBLOCK| bytes +// from |block|. OPENSSL_EXPORT void SHA256_Transform(SHA256_CTX *sha, const uint8_t *block); struct sha256_state_st { @@ -179,63 +179,63 @@ struct sha256_state_st { }; -/* SHA-384. */ +// SHA-384. -/* SHA384_CBLOCK is the block size of SHA-384. */ +// SHA384_CBLOCK is the block size of SHA-384. #define SHA384_CBLOCK 128 -/* SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. */ +// SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. #define SHA384_DIGEST_LENGTH 48 -/* SHA384_Init initialises |sha| and returns 1. */ +// SHA384_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA384_Init(SHA512_CTX *sha); -/* SHA384_Update adds |len| bytes from |data| to |sha| and returns 1. */ +// SHA384_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len); -/* SHA384_Final adds the final padding to |sha| and writes the resulting digest - * to |md|, which must have at least |SHA384_DIGEST_LENGTH| bytes of space. It - * returns one on success and zero on programmer error. */ +// SHA384_Final adds the final padding to |sha| and writes the resulting digest +// to |md|, which must have at least |SHA384_DIGEST_LENGTH| bytes of space. It +// returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA384_Final(uint8_t *md, SHA512_CTX *sha); -/* SHA384 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |SHA384_DIGEST_LENGTH| bytes of space in - * |out|. */ +// SHA384 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |SHA384_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out); -/* SHA384_Transform is a low-level function that performs a single, SHA-384 - * block transformation using the state from |sha| and |SHA384_CBLOCK| bytes - * from |block|. */ +// SHA384_Transform is a low-level function that performs a single, SHA-384 +// block transformation using the state from |sha| and |SHA384_CBLOCK| bytes +// from |block|. OPENSSL_EXPORT void SHA384_Transform(SHA512_CTX *sha, const uint8_t *block); -/* SHA-512. */ +// SHA-512. -/* SHA512_CBLOCK is the block size of SHA-512. */ +// SHA512_CBLOCK is the block size of SHA-512. #define SHA512_CBLOCK 128 -/* SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. */ +// SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. #define SHA512_DIGEST_LENGTH 64 -/* SHA512_Init initialises |sha| and returns 1. */ +// SHA512_Init initialises |sha| and returns 1. OPENSSL_EXPORT int SHA512_Init(SHA512_CTX *sha); -/* SHA512_Update adds |len| bytes from |data| to |sha| and returns 1. */ +// SHA512_Update adds |len| bytes from |data| to |sha| and returns 1. OPENSSL_EXPORT int SHA512_Update(SHA512_CTX *sha, const void *data, size_t len); -/* SHA512_Final adds the final padding to |sha| and writes the resulting digest - * to |md|, which must have at least |SHA512_DIGEST_LENGTH| bytes of space. It - * returns one on success and zero on programmer error. */ +// SHA512_Final adds the final padding to |sha| and writes the resulting digest +// to |md|, which must have at least |SHA512_DIGEST_LENGTH| bytes of space. It +// returns one on success and zero on programmer error. OPENSSL_EXPORT int SHA512_Final(uint8_t *md, SHA512_CTX *sha); -/* SHA512 writes the digest of |len| bytes from |data| to |out| and returns - * |out|. There must be at least |SHA512_DIGEST_LENGTH| bytes of space in - * |out|. */ +// SHA512 writes the digest of |len| bytes from |data| to |out| and returns +// |out|. There must be at least |SHA512_DIGEST_LENGTH| bytes of space in +// |out|. OPENSSL_EXPORT uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out); -/* SHA512_Transform is a low-level function that performs a single, SHA-512 - * block transformation using the state from |sha| and |SHA512_CBLOCK| bytes - * from |block|. */ +// SHA512_Transform is a low-level function that performs a single, SHA-512 +// block transformation using the state from |sha| and |SHA512_CBLOCK| bytes +// from |block|. OPENSSL_EXPORT void SHA512_Transform(SHA512_CTX *sha, const uint8_t *block); struct sha512_state_st { @@ -250,7 +250,7 @@ struct sha512_state_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_SHA_H */ +#endif // OPENSSL_HEADER_SHA_H diff --git a/Sources/BoringSSL/include/openssl/span.h b/Sources/BoringSSL/include/openssl/span.h new file mode 100644 index 000000000..3a629f794 --- /dev/null +++ b/Sources/BoringSSL/include/openssl/span.h @@ -0,0 +1,191 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#ifndef OPENSSL_HEADER_SSL_SPAN_H +#define OPENSSL_HEADER_SSL_SPAN_H + +#include + +#if !defined(BORINGSSL_NO_CXX) + +extern "C++" { + +#include +#include +#include +#include + +namespace bssl { + +template +class Span; + +namespace internal { +template +class SpanBase { + // Put comparison operator implementations into a base class with const T, so + // they can be used with any type that implicitly converts into a Span. + static_assert(std::is_const::value, + "Span must be derived from SpanBase"); + + friend bool operator==(Span lhs, Span rhs) { + // MSVC issues warning C4996 because std::equal is unsafe. The pragma to + // suppress the warning mysteriously has no effect, hence this + // implementation. See + // https://msdn.microsoft.com/en-us/library/aa985974.aspx. + if (lhs.size() != rhs.size()) { + return false; + } + for (T *l = lhs.begin(), *r = rhs.begin(); l != lhs.end() && r != rhs.end(); + ++l, ++r) { + if (*l != *r) { + return false; + } + } + return true; + } + + friend bool operator!=(Span lhs, Span rhs) { return !(lhs == rhs); } +}; +} // namespace internal + +// A Span is a non-owning reference to a contiguous array of objects of type +// |T|. Conceptually, a Span is a simple a pointer to |T| and a count of +// elements accessible via that pointer. The elements referenced by the Span can +// be mutated if |T| is mutable. +// +// A Span can be constructed from container types implementing |data()| and +// |size()| methods. If |T| is constant, construction from a container type is +// implicit. This allows writing methods that accept data from some unspecified +// container type: +// +// // Foo views data referenced by v. +// void Foo(bssl::Span v) { ... } +// +// std::vector vec; +// Foo(vec); +// +// For mutable Spans, conversion is explicit: +// +// // FooMutate mutates data referenced by v. +// void FooMutate(bssl::Span v) { ... } +// +// FooMutate(bssl::Span(vec)); +// +// You can also use the |MakeSpan| and |MakeConstSpan| factory methods to +// construct Spans in order to deduce the type of the Span automatically. +// +// FooMutate(bssl::MakeSpan(vec)); +// +// Note that Spans have value type sematics. They are cheap to construct and +// copy, and should be passed by value whenever a method would otherwise accept +// a reference or pointer to a container or array. +template +class Span : private internal::SpanBase { + private: + // Heuristically test whether C is a container type that can be converted into + // a Span by checking for data() and size() member functions. + // + // TODO(davidben): Switch everything to std::enable_if_t when we remove + // support for MSVC 2015. Although we could write our own enable_if_t and MSVC + // 2015 has std::enable_if_t anyway, MSVC 2015's SFINAE implementation is + // problematic and does not work below unless we write the ::type at use. + template + using EnableIfContainer = std::enable_if< + std::is_convertible().data()), T *>::value && + std::is_integral().size())>::value>; + + static const size_t npos = static_cast(-1); + + public: + constexpr Span() : Span(nullptr, 0) {} + constexpr Span(T *ptr, size_t len) : data_(ptr), size_(len) {} + + template + constexpr Span(T (&array)[N]) : Span(array, N) {} + + template < + typename C, typename = typename EnableIfContainer::type, + typename = typename std::enable_if::value, C>::type> + Span(const C &container) : data_(container.data()), size_(container.size()) {} + + template < + typename C, typename = typename EnableIfContainer::type, + typename = typename std::enable_if::value, C>::type> + explicit Span(C &container) + : data_(container.data()), size_(container.size()) {} + + T *data() const { return data_; } + size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + + T *begin() const { return data_; } + const T *cbegin() const { return data_; } + T *end() const { return data_ + size_; }; + const T *cend() const { return end(); }; + + T &front() const { + assert(size_ != 0); + return data_[0]; + } + T &back() const { + assert(size_ != 0); + return data_[size_ - 1]; + } + + T &operator[](size_t i) const { return data_[i]; } + T &at(size_t i) const { return data_[i]; } + + Span subspan(size_t pos = 0, size_t len = npos) const { + if (pos > size_) { + abort(); // absl::Span throws an exception here. + } + return Span(data_ + pos, std::min(size_ - pos, len)); + } + + private: + T *data_; + size_t size_; +}; + +template +const size_t Span::npos; + +template +Span MakeSpan(T *ptr, size_t size) { + return Span(ptr, size); +} + +template +auto MakeSpan(C &c) -> decltype(MakeSpan(c.data(), c.size())) { + return MakeSpan(c.data(), c.size()); +} + +template +Span MakeConstSpan(T *ptr, size_t size) { + return Span(ptr, size); +} + +template +auto MakeConstSpan(const C &c) -> decltype(MakeConstSpan(c.data(), c.size())) { + return MakeConstSpan(c.data(), c.size()); +} + +} // namespace bssl + +} // extern C++ + +#endif // !defined(BORINGSSL_NO_CXX) + +#endif // OPENSSL_HEADER_SSL_SPAN_H diff --git a/Sources/BoringSSL/include/openssl/ssl.h b/Sources/BoringSSL/include/openssl/ssl.h index 313153980..53a8eb514 100644 --- a/Sources/BoringSSL/include/openssl/ssl.h +++ b/Sources/BoringSSL/include/openssl/ssl.h @@ -146,9 +146,8 @@ #include #include -#include -#include #include +#include #include #include #include @@ -158,9 +157,14 @@ #include #endif -/* Forward-declare struct timeval. On Windows, it is defined in winsock2.h and - * Windows headers define too many macros to be included in public headers. - * However, only a forward declaration is needed. */ +// NGINX needs this #include. Consider revisiting this after NGINX 1.14.0 has +// been out for a year or so (assuming that they fix it in that release.) See +// https://boringssl-review.googlesource.com/c/boringssl/+/21664. +#include + +// Forward-declare struct timeval. On Windows, it is defined in winsock2.h and +// Windows headers define too many macros to be included in public headers. +// However, only a forward declaration is needed. struct timeval; #if defined(__cplusplus) @@ -168,387 +172,412 @@ extern "C" { #endif -/* SSL implementation. */ +// SSL implementation. -/* SSL contexts. - * - * |SSL_CTX| objects manage shared state and configuration between multiple TLS - * or DTLS connections. Whether the connections are TLS or DTLS is selected by - * an |SSL_METHOD| on creation. - * - * |SSL_CTX| are reference-counted and may be shared by connections across - * multiple threads. Once shared, functions which change the |SSL_CTX|'s - * configuration may not be used. */ +// SSL contexts. +// +// |SSL_CTX| objects manage shared state and configuration between multiple TLS +// or DTLS connections. Whether the connections are TLS or DTLS is selected by +// an |SSL_METHOD| on creation. +// +// |SSL_CTX| are reference-counted and may be shared by connections across +// multiple threads. Once shared, functions which change the |SSL_CTX|'s +// configuration may not be used. -/* TLS_method is the |SSL_METHOD| used for TLS (and SSLv3) connections. */ +// TLS_method is the |SSL_METHOD| used for TLS (and SSLv3) connections. OPENSSL_EXPORT const SSL_METHOD *TLS_method(void); -/* DTLS_method is the |SSL_METHOD| used for DTLS connections. */ +// DTLS_method is the |SSL_METHOD| used for DTLS connections. OPENSSL_EXPORT const SSL_METHOD *DTLS_method(void); -/* SSL_CTX_new returns a newly-allocated |SSL_CTX| with default settings or NULL - * on error. */ +// TLS_with_buffers_method is like |TLS_method|, but avoids all use of +// crypto/x509. +OPENSSL_EXPORT const SSL_METHOD *TLS_with_buffers_method(void); + +// DTLS_with_buffers_method is like |DTLS_method|, but avoids all use of +// crypto/x509. +OPENSSL_EXPORT const SSL_METHOD *DTLS_with_buffers_method(void); + +// SSL_CTX_new returns a newly-allocated |SSL_CTX| with default settings or NULL +// on error. OPENSSL_EXPORT SSL_CTX *SSL_CTX_new(const SSL_METHOD *method); -/* SSL_CTX_up_ref increments the reference count of |ctx|. It returns one. */ +// SSL_CTX_up_ref increments the reference count of |ctx|. It returns one. OPENSSL_EXPORT int SSL_CTX_up_ref(SSL_CTX *ctx); -/* SSL_CTX_free releases memory associated with |ctx|. */ +// SSL_CTX_free releases memory associated with |ctx|. OPENSSL_EXPORT void SSL_CTX_free(SSL_CTX *ctx); -/* SSL connections. - * - * An |SSL| object represents a single TLS or DTLS connection. Although the - * shared |SSL_CTX| is thread-safe, an |SSL| is not thread-safe and may only be - * used on one thread at a time. */ +// SSL connections. +// +// An |SSL| object represents a single TLS or DTLS connection. Although the +// shared |SSL_CTX| is thread-safe, an |SSL| is not thread-safe and may only be +// used on one thread at a time. -/* SSL_new returns a newly-allocated |SSL| using |ctx| or NULL on error. The new - * connection inherits settings from |ctx| at the time of creation. Settings may - * also be individually configured on the connection. - * - * On creation, an |SSL| is not configured to be either a client or server. Call - * |SSL_set_connect_state| or |SSL_set_accept_state| to set this. */ +// SSL_new returns a newly-allocated |SSL| using |ctx| or NULL on error. The new +// connection inherits settings from |ctx| at the time of creation. Settings may +// also be individually configured on the connection. +// +// On creation, an |SSL| is not configured to be either a client or server. Call +// |SSL_set_connect_state| or |SSL_set_accept_state| to set this. OPENSSL_EXPORT SSL *SSL_new(SSL_CTX *ctx); -/* SSL_free releases memory associated with |ssl|. */ +// SSL_free releases memory associated with |ssl|. OPENSSL_EXPORT void SSL_free(SSL *ssl); -/* SSL_get_SSL_CTX returns the |SSL_CTX| associated with |ssl|. If - * |SSL_set_SSL_CTX| is called, it returns the new |SSL_CTX|, not the initial - * one. */ +// SSL_get_SSL_CTX returns the |SSL_CTX| associated with |ssl|. If +// |SSL_set_SSL_CTX| is called, it returns the new |SSL_CTX|, not the initial +// one. OPENSSL_EXPORT SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl); -/* SSL_set_connect_state configures |ssl| to be a client. */ +// SSL_set_connect_state configures |ssl| to be a client. OPENSSL_EXPORT void SSL_set_connect_state(SSL *ssl); -/* SSL_set_accept_state configures |ssl| to be a server. */ +// SSL_set_accept_state configures |ssl| to be a server. OPENSSL_EXPORT void SSL_set_accept_state(SSL *ssl); -/* SSL_is_server returns one if |ssl| is configured as a server and zero - * otherwise. */ +// SSL_is_server returns one if |ssl| is configured as a server and zero +// otherwise. OPENSSL_EXPORT int SSL_is_server(const SSL *ssl); -/* SSL_is_dtls returns one if |ssl| is a DTLS connection and zero otherwise. */ +// SSL_is_dtls returns one if |ssl| is a DTLS connection and zero otherwise. OPENSSL_EXPORT int SSL_is_dtls(const SSL *ssl); -/* SSL_set_bio configures |ssl| to read from |rbio| and write to |wbio|. |ssl| - * takes ownership of the two |BIO|s. If |rbio| and |wbio| are the same, |ssl| - * only takes ownership of one reference. - * - * In DTLS, if |rbio| is blocking, it must handle - * |BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT| control requests to set read timeouts. - * - * If |rbio| is the same as the currently configured |BIO| for reading, that - * side is left untouched and is not freed. - * - * If |wbio| is the same as the currently configured |BIO| for writing AND |ssl| - * is not currently configured to read from and write to the same |BIO|, that - * side is left untouched and is not freed. This asymmetry is present for - * historical reasons. - * - * Due to the very complex historical behavior of this function, calling this - * function if |ssl| already has |BIO|s configured is deprecated. Prefer - * |SSL_set0_rbio| and |SSL_set0_wbio| instead. */ +// SSL_set_bio configures |ssl| to read from |rbio| and write to |wbio|. |ssl| +// takes ownership of the two |BIO|s. If |rbio| and |wbio| are the same, |ssl| +// only takes ownership of one reference. +// +// In DTLS, |rbio| must be non-blocking to properly handle timeouts and +// retransmits. +// +// If |rbio| is the same as the currently configured |BIO| for reading, that +// side is left untouched and is not freed. +// +// If |wbio| is the same as the currently configured |BIO| for writing AND |ssl| +// is not currently configured to read from and write to the same |BIO|, that +// side is left untouched and is not freed. This asymmetry is present for +// historical reasons. +// +// Due to the very complex historical behavior of this function, calling this +// function if |ssl| already has |BIO|s configured is deprecated. Prefer +// |SSL_set0_rbio| and |SSL_set0_wbio| instead. OPENSSL_EXPORT void SSL_set_bio(SSL *ssl, BIO *rbio, BIO *wbio); -/* SSL_set0_rbio configures |ssl| to write to |rbio|. It takes ownership of - * |rbio|. - * - * Note that, although this function and |SSL_set0_wbio| may be called on the - * same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. */ +// SSL_set0_rbio configures |ssl| to write to |rbio|. It takes ownership of +// |rbio|. +// +// Note that, although this function and |SSL_set0_wbio| may be called on the +// same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. OPENSSL_EXPORT void SSL_set0_rbio(SSL *ssl, BIO *rbio); -/* SSL_set0_wbio configures |ssl| to write to |wbio|. It takes ownership of - * |wbio|. - * - * Note that, although this function and |SSL_set0_rbio| may be called on the - * same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. */ +// SSL_set0_wbio configures |ssl| to write to |wbio|. It takes ownership of +// |wbio|. +// +// Note that, although this function and |SSL_set0_rbio| may be called on the +// same |BIO|, each call takes a reference. Use |BIO_up_ref| to balance this. OPENSSL_EXPORT void SSL_set0_wbio(SSL *ssl, BIO *wbio); -/* SSL_get_rbio returns the |BIO| that |ssl| reads from. */ +// SSL_get_rbio returns the |BIO| that |ssl| reads from. OPENSSL_EXPORT BIO *SSL_get_rbio(const SSL *ssl); -/* SSL_get_wbio returns the |BIO| that |ssl| writes to. */ +// SSL_get_wbio returns the |BIO| that |ssl| writes to. OPENSSL_EXPORT BIO *SSL_get_wbio(const SSL *ssl); -/* SSL_get_fd calls |SSL_get_rfd|. */ +// SSL_get_fd calls |SSL_get_rfd|. OPENSSL_EXPORT int SSL_get_fd(const SSL *ssl); -/* SSL_get_rfd returns the file descriptor that |ssl| is configured to read - * from. If |ssl|'s read |BIO| is not configured or doesn't wrap a file - * descriptor then it returns -1. - * - * Note: On Windows, this may return either a file descriptor or a socket (cast - * to int), depending on whether |ssl| was configured with a file descriptor or - * socket |BIO|. */ +// SSL_get_rfd returns the file descriptor that |ssl| is configured to read +// from. If |ssl|'s read |BIO| is not configured or doesn't wrap a file +// descriptor then it returns -1. +// +// Note: On Windows, this may return either a file descriptor or a socket (cast +// to int), depending on whether |ssl| was configured with a file descriptor or +// socket |BIO|. OPENSSL_EXPORT int SSL_get_rfd(const SSL *ssl); -/* SSL_get_wfd returns the file descriptor that |ssl| is configured to write - * to. If |ssl|'s write |BIO| is not configured or doesn't wrap a file - * descriptor then it returns -1. - * - * Note: On Windows, this may return either a file descriptor or a socket (cast - * to int), depending on whether |ssl| was configured with a file descriptor or - * socket |BIO|. */ +// SSL_get_wfd returns the file descriptor that |ssl| is configured to write +// to. If |ssl|'s write |BIO| is not configured or doesn't wrap a file +// descriptor then it returns -1. +// +// Note: On Windows, this may return either a file descriptor or a socket (cast +// to int), depending on whether |ssl| was configured with a file descriptor or +// socket |BIO|. OPENSSL_EXPORT int SSL_get_wfd(const SSL *ssl); -/* SSL_set_fd configures |ssl| to read from and write to |fd|. It returns one - * on success and zero on allocation error. The caller retains ownership of - * |fd|. - * - * On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. */ +// SSL_set_fd configures |ssl| to read from and write to |fd|. It returns one +// on success and zero on allocation error. The caller retains ownership of +// |fd|. +// +// On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_fd(SSL *ssl, int fd); -/* SSL_set_rfd configures |ssl| to read from |fd|. It returns one on success and - * zero on allocation error. The caller retains ownership of |fd|. - * - * On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. */ +// SSL_set_rfd configures |ssl| to read from |fd|. It returns one on success and +// zero on allocation error. The caller retains ownership of |fd|. +// +// On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_rfd(SSL *ssl, int fd); -/* SSL_set_wfd configures |ssl| to write to |fd|. It returns one on success and - * zero on allocation error. The caller retains ownership of |fd|. - * - * On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. */ +// SSL_set_wfd configures |ssl| to write to |fd|. It returns one on success and +// zero on allocation error. The caller retains ownership of |fd|. +// +// On Windows, |fd| is cast to a |SOCKET| and used with Winsock APIs. OPENSSL_EXPORT int SSL_set_wfd(SSL *ssl, int fd); -/* SSL_do_handshake continues the current handshake. If there is none or the - * handshake has completed or False Started, it returns one. Otherwise, it - * returns <= 0. The caller should pass the value into |SSL_get_error| to - * determine how to proceed. - * - * In DTLS, if the read |BIO| is non-blocking, the caller must drive - * retransmissions. Whenever |SSL_get_error| signals |SSL_ERROR_WANT_READ|, use - * |DTLSv1_get_timeout| to determine the current timeout. If it expires before - * the next retry, call |DTLSv1_handle_timeout|. Note that DTLS handshake - * retransmissions use fresh sequence numbers, so it is not sufficient to replay - * packets at the transport. - * - * TODO(davidben): Ensure 0 is only returned on transport EOF. - * https://crbug.com/466303. */ +// SSL_do_handshake continues the current handshake. If there is none or the +// handshake has completed or False Started, it returns one. Otherwise, it +// returns <= 0. The caller should pass the value into |SSL_get_error| to +// determine how to proceed. +// +// In DTLS, the caller must drive retransmissions. Whenever |SSL_get_error| +// signals |SSL_ERROR_WANT_READ|, use |DTLSv1_get_timeout| to determine the +// current timeout. If it expires before the next retry, call +// |DTLSv1_handle_timeout|. Note that DTLS handshake retransmissions use fresh +// sequence numbers, so it is not sufficient to replay packets at the transport. +// +// TODO(davidben): Ensure 0 is only returned on transport EOF. +// https://crbug.com/466303. OPENSSL_EXPORT int SSL_do_handshake(SSL *ssl); -/* SSL_connect configures |ssl| as a client, if unconfigured, and calls - * |SSL_do_handshake|. */ +// SSL_connect configures |ssl| as a client, if unconfigured, and calls +// |SSL_do_handshake|. OPENSSL_EXPORT int SSL_connect(SSL *ssl); -/* SSL_accept configures |ssl| as a server, if unconfigured, and calls - * |SSL_do_handshake|. */ +// SSL_accept configures |ssl| as a server, if unconfigured, and calls +// |SSL_do_handshake|. OPENSSL_EXPORT int SSL_accept(SSL *ssl); -/* SSL_read reads up to |num| bytes from |ssl| into |buf|. It implicitly runs - * any pending handshakes, including renegotiations when enabled. On success, it - * returns the number of bytes read. Otherwise, it returns <= 0. The caller - * should pass the value into |SSL_get_error| to determine how to proceed. - * - * TODO(davidben): Ensure 0 is only returned on transport EOF. - * https://crbug.com/466303. */ +// SSL_read reads up to |num| bytes from |ssl| into |buf|. It implicitly runs +// any pending handshakes, including renegotiations when enabled. On success, it +// returns the number of bytes read. Otherwise, it returns <= 0. The caller +// should pass the value into |SSL_get_error| to determine how to proceed. +// +// TODO(davidben): Ensure 0 is only returned on transport EOF. +// https://crbug.com/466303. OPENSSL_EXPORT int SSL_read(SSL *ssl, void *buf, int num); -/* SSL_peek behaves like |SSL_read| but does not consume any bytes returned. */ +// SSL_peek behaves like |SSL_read| but does not consume any bytes returned. OPENSSL_EXPORT int SSL_peek(SSL *ssl, void *buf, int num); -/* SSL_pending returns the number of bytes available in |ssl|. It does not read - * from the transport. */ +// SSL_pending returns the number of bytes available in |ssl|. It does not read +// from the transport. OPENSSL_EXPORT int SSL_pending(const SSL *ssl); -/* SSL_write writes up to |num| bytes from |buf| into |ssl|. It implicitly runs - * any pending handshakes, including renegotiations when enabled. On success, it - * returns the number of bytes written. Otherwise, it returns <= 0. The caller - * should pass the value into |SSL_get_error| to determine how to proceed. - * - * In TLS, a non-blocking |SSL_write| differs from non-blocking |write| in that - * a failed |SSL_write| still commits to the data passed in. When retrying, the - * caller must supply the original write buffer (or a larger one containing the - * original as a prefix). By default, retries will fail if they also do not - * reuse the same |buf| pointer. This may be relaxed with - * |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER|, but the buffer contents still must be - * unchanged. - * - * By default, in TLS, |SSL_write| will not return success until all |num| bytes - * are written. This may be relaxed with |SSL_MODE_ENABLE_PARTIAL_WRITE|. It - * allows |SSL_write| to complete with a partial result when only part of the - * input was written in a single record. - * - * In DTLS, neither |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER| and - * |SSL_MODE_ENABLE_PARTIAL_WRITE| do anything. The caller may retry with a - * different buffer freely. A single call to |SSL_write| only ever writes a - * single record in a single packet, so |num| must be at most - * |SSL3_RT_MAX_PLAIN_LENGTH|. - * - * TODO(davidben): Ensure 0 is only returned on transport EOF. - * https://crbug.com/466303. */ +// SSL_write writes up to |num| bytes from |buf| into |ssl|. It implicitly runs +// any pending handshakes, including renegotiations when enabled. On success, it +// returns the number of bytes written. Otherwise, it returns <= 0. The caller +// should pass the value into |SSL_get_error| to determine how to proceed. +// +// In TLS, a non-blocking |SSL_write| differs from non-blocking |write| in that +// a failed |SSL_write| still commits to the data passed in. When retrying, the +// caller must supply the original write buffer (or a larger one containing the +// original as a prefix). By default, retries will fail if they also do not +// reuse the same |buf| pointer. This may be relaxed with +// |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER|, but the buffer contents still must be +// unchanged. +// +// By default, in TLS, |SSL_write| will not return success until all |num| bytes +// are written. This may be relaxed with |SSL_MODE_ENABLE_PARTIAL_WRITE|. It +// allows |SSL_write| to complete with a partial result when only part of the +// input was written in a single record. +// +// In DTLS, neither |SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER| and +// |SSL_MODE_ENABLE_PARTIAL_WRITE| do anything. The caller may retry with a +// different buffer freely. A single call to |SSL_write| only ever writes a +// single record in a single packet, so |num| must be at most +// |SSL3_RT_MAX_PLAIN_LENGTH|. +// +// TODO(davidben): Ensure 0 is only returned on transport EOF. +// https://crbug.com/466303. OPENSSL_EXPORT int SSL_write(SSL *ssl, const void *buf, int num); -/* SSL_shutdown shuts down |ssl|. On success, it completes in two stages. First, - * it returns 0 if |ssl| completed uni-directional shutdown; close_notify has - * been sent, but the peer's close_notify has not been received. Most callers - * may stop at this point. For bi-directional shutdown, call |SSL_shutdown| - * again. It returns 1 if close_notify has been both sent and received. - * - * If the peer's close_notify arrived first, the first stage is skipped. - * |SSL_shutdown| will return 1 once close_notify is sent and skip 0. Callers - * only interested in uni-directional shutdown must therefore allow for the - * first stage returning either 0 or 1. - * - * |SSL_shutdown| returns -1 on failure. The caller should pass the return value - * into |SSL_get_error| to determine how to proceed. If the underlying |BIO| is - * non-blocking, both stages may require retry. */ +// SSL_shutdown shuts down |ssl|. On success, it completes in two stages. First, +// it returns 0 if |ssl| completed uni-directional shutdown; close_notify has +// been sent, but the peer's close_notify has not been received. Most callers +// may stop at this point. For bi-directional shutdown, call |SSL_shutdown| +// again. It returns 1 if close_notify has been both sent and received. +// +// If the peer's close_notify arrived first, the first stage is skipped. +// |SSL_shutdown| will return 1 once close_notify is sent and skip 0. Callers +// only interested in uni-directional shutdown must therefore allow for the +// first stage returning either 0 or 1. +// +// |SSL_shutdown| returns -1 on failure. The caller should pass the return value +// into |SSL_get_error| to determine how to proceed. If the underlying |BIO| is +// non-blocking, both stages may require retry. OPENSSL_EXPORT int SSL_shutdown(SSL *ssl); -/* SSL_CTX_set_quiet_shutdown sets quiet shutdown on |ctx| to |mode|. If - * enabled, |SSL_shutdown| will not send a close_notify alert or wait for one - * from the peer. It will instead synchronously return one. */ +// SSL_CTX_set_quiet_shutdown sets quiet shutdown on |ctx| to |mode|. If +// enabled, |SSL_shutdown| will not send a close_notify alert or wait for one +// from the peer. It will instead synchronously return one. OPENSSL_EXPORT void SSL_CTX_set_quiet_shutdown(SSL_CTX *ctx, int mode); -/* SSL_CTX_get_quiet_shutdown returns whether quiet shutdown is enabled for - * |ctx|. */ +// SSL_CTX_get_quiet_shutdown returns whether quiet shutdown is enabled for +// |ctx|. OPENSSL_EXPORT int SSL_CTX_get_quiet_shutdown(const SSL_CTX *ctx); -/* SSL_set_quiet_shutdown sets quiet shutdown on |ssl| to |mode|. If enabled, - * |SSL_shutdown| will not send a close_notify alert or wait for one from the - * peer. It will instead synchronously return one. */ +// SSL_set_quiet_shutdown sets quiet shutdown on |ssl| to |mode|. If enabled, +// |SSL_shutdown| will not send a close_notify alert or wait for one from the +// peer. It will instead synchronously return one. OPENSSL_EXPORT void SSL_set_quiet_shutdown(SSL *ssl, int mode); -/* SSL_get_quiet_shutdown returns whether quiet shutdown is enabled for - * |ssl|. */ +// SSL_get_quiet_shutdown returns whether quiet shutdown is enabled for +// |ssl|. OPENSSL_EXPORT int SSL_get_quiet_shutdown(const SSL *ssl); -/* SSL_get_error returns a |SSL_ERROR_*| value for the most recent operation on - * |ssl|. It should be called after an operation failed to determine whether the - * error was fatal and, if not, when to retry. */ +// SSL_get_error returns a |SSL_ERROR_*| value for the most recent operation on +// |ssl|. It should be called after an operation failed to determine whether the +// error was fatal and, if not, when to retry. OPENSSL_EXPORT int SSL_get_error(const SSL *ssl, int ret_code); -/* SSL_ERROR_NONE indicates the operation succeeded. */ +// SSL_ERROR_NONE indicates the operation succeeded. #define SSL_ERROR_NONE 0 -/* SSL_ERROR_SSL indicates the operation failed within the library. The caller - * may inspect the error queue for more information. */ +// SSL_ERROR_SSL indicates the operation failed within the library. The caller +// may inspect the error queue for more information. #define SSL_ERROR_SSL 1 -/* SSL_ERROR_WANT_READ indicates the operation failed attempting to read from - * the transport. The caller may retry the operation when the transport is ready - * for reading. - * - * If signaled by a DTLS handshake, the caller must also call - * |DTLSv1_get_timeout| and |DTLSv1_handle_timeout| as appropriate. See - * |SSL_do_handshake|. */ +// SSL_ERROR_WANT_READ indicates the operation failed attempting to read from +// the transport. The caller may retry the operation when the transport is ready +// for reading. +// +// If signaled by a DTLS handshake, the caller must also call +// |DTLSv1_get_timeout| and |DTLSv1_handle_timeout| as appropriate. See +// |SSL_do_handshake|. #define SSL_ERROR_WANT_READ 2 -/* SSL_ERROR_WANT_WRITE indicates the operation failed attempting to write to - * the transport. The caller may retry the operation when the transport is ready - * for writing. */ +// SSL_ERROR_WANT_WRITE indicates the operation failed attempting to write to +// the transport. The caller may retry the operation when the transport is ready +// for writing. #define SSL_ERROR_WANT_WRITE 3 -/* SSL_ERROR_WANT_X509_LOOKUP indicates the operation failed in calling the - * |cert_cb| or |client_cert_cb|. The caller may retry the operation when the - * callback is ready to return a certificate or one has been configured - * externally. - * - * See also |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb|. */ +// SSL_ERROR_WANT_X509_LOOKUP indicates the operation failed in calling the +// |cert_cb| or |client_cert_cb|. The caller may retry the operation when the +// callback is ready to return a certificate or one has been configured +// externally. +// +// See also |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb|. #define SSL_ERROR_WANT_X509_LOOKUP 4 -/* SSL_ERROR_SYSCALL indicates the operation failed externally to the library. - * The caller should consult the system-specific error mechanism. This is - * typically |errno| but may be something custom if using a custom |BIO|. It - * may also be signaled if the transport returned EOF, in which case the - * operation's return value will be zero. */ +// SSL_ERROR_SYSCALL indicates the operation failed externally to the library. +// The caller should consult the system-specific error mechanism. This is +// typically |errno| but may be something custom if using a custom |BIO|. It +// may also be signaled if the transport returned EOF, in which case the +// operation's return value will be zero. #define SSL_ERROR_SYSCALL 5 -/* SSL_ERROR_ZERO_RETURN indicates the operation failed because the connection - * was cleanly shut down with a close_notify alert. */ +// SSL_ERROR_ZERO_RETURN indicates the operation failed because the connection +// was cleanly shut down with a close_notify alert. #define SSL_ERROR_ZERO_RETURN 6 -/* SSL_ERROR_WANT_CONNECT indicates the operation failed attempting to connect - * the transport (the |BIO| signaled |BIO_RR_CONNECT|). The caller may retry the - * operation when the transport is ready. */ +// SSL_ERROR_WANT_CONNECT indicates the operation failed attempting to connect +// the transport (the |BIO| signaled |BIO_RR_CONNECT|). The caller may retry the +// operation when the transport is ready. #define SSL_ERROR_WANT_CONNECT 7 -/* SSL_ERROR_WANT_ACCEPT indicates the operation failed attempting to accept a - * connection from the transport (the |BIO| signaled |BIO_RR_ACCEPT|). The - * caller may retry the operation when the transport is ready. - * - * TODO(davidben): Remove this. It's used by accept BIOs which are bizarre. */ +// SSL_ERROR_WANT_ACCEPT indicates the operation failed attempting to accept a +// connection from the transport (the |BIO| signaled |BIO_RR_ACCEPT|). The +// caller may retry the operation when the transport is ready. +// +// TODO(davidben): Remove this. It's used by accept BIOs which are bizarre. #define SSL_ERROR_WANT_ACCEPT 8 -/* SSL_ERROR_WANT_CHANNEL_ID_LOOKUP indicates the operation failed looking up - * the Channel ID key. The caller may retry the operation when |channel_id_cb| - * is ready to return a key or one has been configured with - * |SSL_set1_tls_channel_id|. - * - * See also |SSL_CTX_set_channel_id_cb|. */ +// SSL_ERROR_WANT_CHANNEL_ID_LOOKUP indicates the operation failed looking up +// the Channel ID key. The caller may retry the operation when |channel_id_cb| +// is ready to return a key or one has been configured with +// |SSL_set1_tls_channel_id|. +// +// See also |SSL_CTX_set_channel_id_cb|. #define SSL_ERROR_WANT_CHANNEL_ID_LOOKUP 9 -/* SSL_ERROR_PENDING_SESSION indicates the operation failed because the session - * lookup callback indicated the session was unavailable. The caller may retry - * the operation when lookup has completed. - * - * See also |SSL_CTX_sess_set_get_cb| and |SSL_magic_pending_session_ptr|. */ +// SSL_ERROR_PENDING_SESSION indicates the operation failed because the session +// lookup callback indicated the session was unavailable. The caller may retry +// the operation when lookup has completed. +// +// See also |SSL_CTX_sess_set_get_cb| and |SSL_magic_pending_session_ptr|. #define SSL_ERROR_PENDING_SESSION 11 -/* SSL_ERROR_PENDING_CERTIFICATE indicates the operation failed because the - * early callback indicated certificate lookup was incomplete. The caller may - * retry the operation when lookup has completed. Note: when the operation is - * retried, the early callback will not be called a second time. - * - * See also |SSL_CTX_set_select_certificate_cb|. */ +// SSL_ERROR_PENDING_CERTIFICATE indicates the operation failed because the +// early callback indicated certificate lookup was incomplete. The caller may +// retry the operation when lookup has completed. +// +// See also |SSL_CTX_set_select_certificate_cb|. #define SSL_ERROR_PENDING_CERTIFICATE 12 -/* SSL_ERROR_WANT_PRIVATE_KEY_OPERATION indicates the operation failed because - * a private key operation was unfinished. The caller may retry the operation - * when the private key operation is complete. - * - * See also |SSL_set_private_key_method| and - * |SSL_CTX_set_private_key_method|. */ +// SSL_ERROR_WANT_PRIVATE_KEY_OPERATION indicates the operation failed because +// a private key operation was unfinished. The caller may retry the operation +// when the private key operation is complete. +// +// See also |SSL_set_private_key_method| and +// |SSL_CTX_set_private_key_method|. #define SSL_ERROR_WANT_PRIVATE_KEY_OPERATION 13 -/* SSL_set_mtu sets the |ssl|'s MTU in DTLS to |mtu|. It returns one on success - * and zero on failure. */ +// SSL_ERROR_PENDING_TICKET indicates that a ticket decryption is pending. The +// caller may retry the operation when the decryption is ready. +// +// See also |SSL_CTX_set_ticket_aead_method|. +#define SSL_ERROR_PENDING_TICKET 14 + +// SSL_ERROR_EARLY_DATA_REJECTED indicates that early data was rejected. The +// caller should treat this as a connection failure and retry any operations +// associated with the rejected early data. |SSL_reset_early_data_reject| may be +// used to reuse the underlying connection for the retry. +#define SSL_ERROR_EARLY_DATA_REJECTED 15 + +// SSL_ERROR_WANT_CERTIFICATE_VERIFY indicates the operation failed because +// certificate verification was incomplete. The caller may retry the operation +// when certificate verification is complete. +// +// See also |SSL_CTX_set_custom_verify|. +#define SSL_ERROR_WANT_CERTIFICATE_VERIFY 16 + +// SSL_set_mtu sets the |ssl|'s MTU in DTLS to |mtu|. It returns one on success +// and zero on failure. OPENSSL_EXPORT int SSL_set_mtu(SSL *ssl, unsigned mtu); -/* DTLSv1_set_initial_timeout_duration sets the initial duration for a DTLS - * handshake timeout. - * - * This duration overrides the default of 1 second, which is the strong - * recommendation of RFC 6347 (see section 4.2.4.1). However, there may exist - * situations where a shorter timeout would be beneficial, such as for - * time-sensitive applications. */ +// DTLSv1_set_initial_timeout_duration sets the initial duration for a DTLS +// handshake timeout. +// +// This duration overrides the default of 1 second, which is the strong +// recommendation of RFC 6347 (see section 4.2.4.1). However, there may exist +// situations where a shorter timeout would be beneficial, such as for +// time-sensitive applications. OPENSSL_EXPORT void DTLSv1_set_initial_timeout_duration(SSL *ssl, unsigned duration_ms); -/* DTLSv1_get_timeout queries the next DTLS handshake timeout. If there is a - * timeout in progress, it sets |*out| to the time remaining and returns one. - * Otherwise, it returns zero. - * - * When the timeout expires, call |DTLSv1_handle_timeout| to handle the - * retransmit behavior. - * - * NOTE: This function must be queried again whenever the handshake state - * machine changes, including when |DTLSv1_handle_timeout| is called. */ +// DTLSv1_get_timeout queries the next DTLS handshake timeout. If there is a +// timeout in progress, it sets |*out| to the time remaining and returns one. +// Otherwise, it returns zero. +// +// When the timeout expires, call |DTLSv1_handle_timeout| to handle the +// retransmit behavior. +// +// NOTE: This function must be queried again whenever the handshake state +// machine changes, including when |DTLSv1_handle_timeout| is called. OPENSSL_EXPORT int DTLSv1_get_timeout(const SSL *ssl, struct timeval *out); -/* DTLSv1_handle_timeout is called when a DTLS handshake timeout expires. If no - * timeout had expired, it returns 0. Otherwise, it retransmits the previous - * flight of handshake messages and returns 1. If too many timeouts had expired - * without progress or an error occurs, it returns -1. - * - * The caller's external timer should be compatible with the one |ssl| queries - * within some fudge factor. Otherwise, the call will be a no-op, but - * |DTLSv1_get_timeout| will return an updated timeout. - * - * If the function returns -1, checking if |SSL_get_error| returns - * |SSL_ERROR_WANT_WRITE| may be used to determine if the retransmit failed due - * to a non-fatal error at the write |BIO|. However, the operation may not be - * retried until the next timeout fires. - * - * WARNING: This function breaks the usual return value convention. - * - * TODO(davidben): This |SSL_ERROR_WANT_WRITE| behavior is kind of bizarre. */ +// DTLSv1_handle_timeout is called when a DTLS handshake timeout expires. If no +// timeout had expired, it returns 0. Otherwise, it retransmits the previous +// flight of handshake messages and returns 1. If too many timeouts had expired +// without progress or an error occurs, it returns -1. +// +// The caller's external timer should be compatible with the one |ssl| queries +// within some fudge factor. Otherwise, the call will be a no-op, but +// |DTLSv1_get_timeout| will return an updated timeout. +// +// If the function returns -1, checking if |SSL_get_error| returns +// |SSL_ERROR_WANT_WRITE| may be used to determine if the retransmit failed due +// to a non-fatal error at the write |BIO|. However, the operation may not be +// retried until the next timeout fires. +// +// WARNING: This function breaks the usual return value convention. +// +// TODO(davidben): This |SSL_ERROR_WANT_WRITE| behavior is kind of bizarre. OPENSSL_EXPORT int DTLSv1_handle_timeout(SSL *ssl); -/* Protocol versions. */ +// Protocol versions. #define DTLS1_VERSION_MAJOR 0xfe #define SSL3_VERSION_MAJOR 0x03 @@ -563,54 +592,59 @@ OPENSSL_EXPORT int DTLSv1_handle_timeout(SSL *ssl); #define DTLS1_2_VERSION 0xfefd #define TLS1_3_DRAFT_VERSION 0x7f12 - -/* SSL_CTX_set_min_proto_version sets the minimum protocol version for |ctx| to - * |version|. If |version| is zero, the default minimum version is used. It - * returns one on success and zero if |version| is invalid. */ +#define TLS1_3_DRAFT21_VERSION 0x7f15 +#define TLS1_3_DRAFT22_VERSION 0x7e04 +#define TLS1_3_EXPERIMENT_VERSION 0x7e01 +#define TLS1_3_EXPERIMENT2_VERSION 0x7e02 +#define TLS1_3_EXPERIMENT3_VERSION 0x7e03 + +// SSL_CTX_set_min_proto_version sets the minimum protocol version for |ctx| to +// |version|. If |version| is zero, the default minimum version is used. It +// returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version); -/* SSL_CTX_set_max_proto_version sets the maximum protocol version for |ctx| to - * |version|. If |version| is zero, the default maximum version is used. It - * returns one on success and zero if |version| is invalid. */ +// SSL_CTX_set_max_proto_version sets the maximum protocol version for |ctx| to +// |version|. If |version| is zero, the default maximum version is used. It +// returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version); -/* SSL_set_min_proto_version sets the minimum protocol version for |ssl| to - * |version|. If |version| is zero, the default minimum version is used. It - * returns one on success and zero if |version| is invalid. */ +// SSL_set_min_proto_version sets the minimum protocol version for |ssl| to +// |version|. If |version| is zero, the default minimum version is used. It +// returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_set_min_proto_version(SSL *ssl, uint16_t version); -/* SSL_set_max_proto_version sets the maximum protocol version for |ssl| to - * |version|. If |version| is zero, the default maximum version is used. It - * returns one on success and zero if |version| is invalid. */ +// SSL_set_max_proto_version sets the maximum protocol version for |ssl| to +// |version|. If |version| is zero, the default maximum version is used. It +// returns one on success and zero if |version| is invalid. OPENSSL_EXPORT int SSL_set_max_proto_version(SSL *ssl, uint16_t version); -/* SSL_version returns the TLS or DTLS protocol version used by |ssl|, which is - * one of the |*_VERSION| values. (E.g. |TLS1_2_VERSION|.) Before the version - * is negotiated, the result is undefined. */ +// SSL_version returns the TLS or DTLS protocol version used by |ssl|, which is +// one of the |*_VERSION| values. (E.g. |TLS1_2_VERSION|.) Before the version +// is negotiated, the result is undefined. OPENSSL_EXPORT int SSL_version(const SSL *ssl); -/* Options. - * - * Options configure protocol behavior. */ +// Options. +// +// Options configure protocol behavior. -/* SSL_OP_NO_QUERY_MTU, in DTLS, disables querying the MTU from the underlying - * |BIO|. Instead, the MTU is configured with |SSL_set_mtu|. */ +// SSL_OP_NO_QUERY_MTU, in DTLS, disables querying the MTU from the underlying +// |BIO|. Instead, the MTU is configured with |SSL_set_mtu|. #define SSL_OP_NO_QUERY_MTU 0x00001000L -/* SSL_OP_NO_TICKET disables session ticket support (RFC 5077). */ +// SSL_OP_NO_TICKET disables session ticket support (RFC 5077). #define SSL_OP_NO_TICKET 0x00004000L -/* SSL_OP_CIPHER_SERVER_PREFERENCE configures servers to select ciphers and - * ECDHE curves according to the server's preferences instead of the - * client's. */ +// SSL_OP_CIPHER_SERVER_PREFERENCE configures servers to select ciphers and +// ECDHE curves according to the server's preferences instead of the +// client's. #define SSL_OP_CIPHER_SERVER_PREFERENCE 0x00400000L -/* The following flags toggle individual protocol versions. This is deprecated. - * Use |SSL_CTX_set_min_proto_version| and |SSL_CTX_set_max_proto_version| - * instead. */ +// The following flags toggle individual protocol versions. This is deprecated. +// Use |SSL_CTX_set_min_proto_version| and |SSL_CTX_set_max_proto_version| +// instead. #define SSL_OP_NO_SSLv3 0x02000000L #define SSL_OP_NO_TLSv1 0x04000000L #define SSL_OP_NO_TLSv1_2 0x08000000L @@ -619,314 +653,314 @@ OPENSSL_EXPORT int SSL_version(const SSL *ssl); #define SSL_OP_NO_DTLSv1 SSL_OP_NO_TLSv1 #define SSL_OP_NO_DTLSv1_2 SSL_OP_NO_TLSv1_2 -/* SSL_CTX_set_options enables all options set in |options| (which should be one - * or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a - * bitmask representing the resulting enabled options. */ +// SSL_CTX_set_options enables all options set in |options| (which should be one +// or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a +// bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_CTX_set_options(SSL_CTX *ctx, uint32_t options); -/* SSL_CTX_clear_options disables all options set in |options| (which should be - * one or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a - * bitmask representing the resulting enabled options. */ +// SSL_CTX_clear_options disables all options set in |options| (which should be +// one or more of the |SSL_OP_*| values, ORed together) in |ctx|. It returns a +// bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_CTX_clear_options(SSL_CTX *ctx, uint32_t options); -/* SSL_CTX_get_options returns a bitmask of |SSL_OP_*| values that represent all - * the options enabled for |ctx|. */ +// SSL_CTX_get_options returns a bitmask of |SSL_OP_*| values that represent all +// the options enabled for |ctx|. OPENSSL_EXPORT uint32_t SSL_CTX_get_options(const SSL_CTX *ctx); -/* SSL_set_options enables all options set in |options| (which should be one or - * more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a bitmask - * representing the resulting enabled options. */ +// SSL_set_options enables all options set in |options| (which should be one or +// more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a bitmask +// representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_set_options(SSL *ssl, uint32_t options); -/* SSL_clear_options disables all options set in |options| (which should be one - * or more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a - * bitmask representing the resulting enabled options. */ +// SSL_clear_options disables all options set in |options| (which should be one +// or more of the |SSL_OP_*| values, ORed together) in |ssl|. It returns a +// bitmask representing the resulting enabled options. OPENSSL_EXPORT uint32_t SSL_clear_options(SSL *ssl, uint32_t options); -/* SSL_get_options returns a bitmask of |SSL_OP_*| values that represent all the - * options enabled for |ssl|. */ +// SSL_get_options returns a bitmask of |SSL_OP_*| values that represent all the +// options enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_get_options(const SSL *ssl); -/* Modes. - * - * Modes configure API behavior. */ +// Modes. +// +// Modes configure API behavior. -/* SSL_MODE_ENABLE_PARTIAL_WRITE, in TLS, allows |SSL_write| to complete with a - * partial result when the only part of the input was written in a single - * record. In DTLS, it does nothing. */ +// SSL_MODE_ENABLE_PARTIAL_WRITE, in TLS, allows |SSL_write| to complete with a +// partial result when the only part of the input was written in a single +// record. In DTLS, it does nothing. #define SSL_MODE_ENABLE_PARTIAL_WRITE 0x00000001L -/* SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, in TLS, allows retrying an incomplete - * |SSL_write| with a different buffer. However, |SSL_write| still assumes the - * buffer contents are unchanged. This is not the default to avoid the - * misconception that non-blocking |SSL_write| behaves like non-blocking - * |write|. In DTLS, it does nothing. */ +// SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, in TLS, allows retrying an incomplete +// |SSL_write| with a different buffer. However, |SSL_write| still assumes the +// buffer contents are unchanged. This is not the default to avoid the +// misconception that non-blocking |SSL_write| behaves like non-blocking +// |write|. In DTLS, it does nothing. #define SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER 0x00000002L -/* SSL_MODE_NO_AUTO_CHAIN disables automatically building a certificate chain - * before sending certificates to the peer. This flag is set (and the feature - * disabled) by default. - * TODO(davidben): Remove this behavior. https://crbug.com/boringssl/42. */ +// SSL_MODE_NO_AUTO_CHAIN disables automatically building a certificate chain +// before sending certificates to the peer. This flag is set (and the feature +// disabled) by default. +// TODO(davidben): Remove this behavior. https://crbug.com/boringssl/42. #define SSL_MODE_NO_AUTO_CHAIN 0x00000008L -/* SSL_MODE_ENABLE_FALSE_START allows clients to send application data before - * receipt of ChangeCipherSpec and Finished. This mode enables full handshakes - * to 'complete' in one RTT. See RFC 7918. - * - * When False Start is enabled, |SSL_do_handshake| may succeed before the - * handshake has completely finished. |SSL_write| will function at this point, - * and |SSL_read| will transparently wait for the final handshake leg before - * returning application data. To determine if False Start occurred or when the - * handshake is completely finished, see |SSL_in_false_start|, |SSL_in_init|, - * and |SSL_CB_HANDSHAKE_DONE| from |SSL_CTX_set_info_callback|. */ +// SSL_MODE_ENABLE_FALSE_START allows clients to send application data before +// receipt of ChangeCipherSpec and Finished. This mode enables full handshakes +// to 'complete' in one RTT. See RFC 7918. +// +// When False Start is enabled, |SSL_do_handshake| may succeed before the +// handshake has completely finished. |SSL_write| will function at this point, +// and |SSL_read| will transparently wait for the final handshake leg before +// returning application data. To determine if False Start occurred or when the +// handshake is completely finished, see |SSL_in_false_start|, |SSL_in_init|, +// and |SSL_CB_HANDSHAKE_DONE| from |SSL_CTX_set_info_callback|. #define SSL_MODE_ENABLE_FALSE_START 0x00000080L -/* SSL_MODE_CBC_RECORD_SPLITTING causes multi-byte CBC records in SSL 3.0 and - * TLS 1.0 to be split in two: the first record will contain a single byte and - * the second will contain the remainder. This effectively randomises the IV and - * prevents BEAST attacks. */ +// SSL_MODE_CBC_RECORD_SPLITTING causes multi-byte CBC records in SSL 3.0 and +// TLS 1.0 to be split in two: the first record will contain a single byte and +// the second will contain the remainder. This effectively randomises the IV and +// prevents BEAST attacks. #define SSL_MODE_CBC_RECORD_SPLITTING 0x00000100L -/* SSL_MODE_NO_SESSION_CREATION will cause any attempts to create a session to - * fail with SSL_R_SESSION_MAY_NOT_BE_CREATED. This can be used to enforce that - * session resumption is used for a given SSL*. */ +// SSL_MODE_NO_SESSION_CREATION will cause any attempts to create a session to +// fail with SSL_R_SESSION_MAY_NOT_BE_CREATED. This can be used to enforce that +// session resumption is used for a given SSL*. #define SSL_MODE_NO_SESSION_CREATION 0x00000200L -/* SSL_MODE_SEND_FALLBACK_SCSV sends TLS_FALLBACK_SCSV in the ClientHello. - * To be set only by applications that reconnect with a downgraded protocol - * version; see RFC 7507 for details. - * - * DO NOT ENABLE THIS if your application attempts a normal handshake. Only use - * this in explicit fallback retries, following the guidance in RFC 7507. */ +// SSL_MODE_SEND_FALLBACK_SCSV sends TLS_FALLBACK_SCSV in the ClientHello. +// To be set only by applications that reconnect with a downgraded protocol +// version; see RFC 7507 for details. +// +// DO NOT ENABLE THIS if your application attempts a normal handshake. Only use +// this in explicit fallback retries, following the guidance in RFC 7507. #define SSL_MODE_SEND_FALLBACK_SCSV 0x00000400L -/* SSL_CTX_set_mode enables all modes set in |mode| (which should be one or more - * of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a bitmask - * representing the resulting enabled modes. */ +// SSL_CTX_set_mode enables all modes set in |mode| (which should be one or more +// of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a bitmask +// representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_CTX_set_mode(SSL_CTX *ctx, uint32_t mode); -/* SSL_CTX_clear_mode disables all modes set in |mode| (which should be one or - * more of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a - * bitmask representing the resulting enabled modes. */ +// SSL_CTX_clear_mode disables all modes set in |mode| (which should be one or +// more of the |SSL_MODE_*| values, ORed together) in |ctx|. It returns a +// bitmask representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_CTX_clear_mode(SSL_CTX *ctx, uint32_t mode); -/* SSL_CTX_get_mode returns a bitmask of |SSL_MODE_*| values that represent all - * the modes enabled for |ssl|. */ +// SSL_CTX_get_mode returns a bitmask of |SSL_MODE_*| values that represent all +// the modes enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_CTX_get_mode(const SSL_CTX *ctx); -/* SSL_set_mode enables all modes set in |mode| (which should be one or more of - * the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask - * representing the resulting enabled modes. */ +// SSL_set_mode enables all modes set in |mode| (which should be one or more of +// the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask +// representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_set_mode(SSL *ssl, uint32_t mode); -/* SSL_clear_mode disables all modes set in |mode| (which should be one or more - * of the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask - * representing the resulting enabled modes. */ +// SSL_clear_mode disables all modes set in |mode| (which should be one or more +// of the |SSL_MODE_*| values, ORed together) in |ssl|. It returns a bitmask +// representing the resulting enabled modes. OPENSSL_EXPORT uint32_t SSL_clear_mode(SSL *ssl, uint32_t mode); -/* SSL_get_mode returns a bitmask of |SSL_MODE_*| values that represent all the - * modes enabled for |ssl|. */ +// SSL_get_mode returns a bitmask of |SSL_MODE_*| values that represent all the +// modes enabled for |ssl|. OPENSSL_EXPORT uint32_t SSL_get_mode(const SSL *ssl); -/* SSL_CTX_set0_buffer_pool sets a |CRYPTO_BUFFER_POOL| that will be used to - * store certificates. This can allow multiple connections to share - * certificates and thus save memory. - * - * The SSL_CTX does not take ownership of |pool| and the caller must ensure - * that |pool| outlives |ctx| and all objects linked to it, including |SSL|, - * |X509| and |SSL_SESSION| objects. Basically, don't ever free |pool|. */ +// SSL_CTX_set0_buffer_pool sets a |CRYPTO_BUFFER_POOL| that will be used to +// store certificates. This can allow multiple connections to share +// certificates and thus save memory. +// +// The SSL_CTX does not take ownership of |pool| and the caller must ensure +// that |pool| outlives |ctx| and all objects linked to it, including |SSL|, +// |X509| and |SSL_SESSION| objects. Basically, don't ever free |pool|. OPENSSL_EXPORT void SSL_CTX_set0_buffer_pool(SSL_CTX *ctx, CRYPTO_BUFFER_POOL *pool); -/* Configuring certificates and private keys. - * - * These functions configure the connection's leaf certificate, private key, and - * certificate chain. The certificate chain is ordered leaf to root (as sent on - * the wire) but does not include the leaf. Both client and server certificates - * use these functions. - * - * Certificates and keys may be configured before the handshake or dynamically - * in the early callback and certificate callback. */ +// Configuring certificates and private keys. +// +// These functions configure the connection's leaf certificate, private key, and +// certificate chain. The certificate chain is ordered leaf to root (as sent on +// the wire) but does not include the leaf. Both client and server certificates +// use these functions. +// +// Certificates and keys may be configured before the handshake or dynamically +// in the early callback and certificate callback. -/* SSL_CTX_use_certificate sets |ctx|'s leaf certificate to |x509|. It returns - * one on success and zero on failure. */ +// SSL_CTX_use_certificate sets |ctx|'s leaf certificate to |x509|. It returns +// one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x509); -/* SSL_use_certificate sets |ssl|'s leaf certificate to |x509|. It returns one - * on success and zero on failure. */ +// SSL_use_certificate sets |ssl|'s leaf certificate to |x509|. It returns one +// on success and zero on failure. OPENSSL_EXPORT int SSL_use_certificate(SSL *ssl, X509 *x509); -/* SSL_CTX_use_PrivateKey sets |ctx|'s private key to |pkey|. It returns one on - * success and zero on failure. */ +// SSL_CTX_use_PrivateKey sets |ctx|'s private key to |pkey|. It returns one on +// success and zero on failure. OPENSSL_EXPORT int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey); -/* SSL_use_PrivateKey sets |ssl|'s private key to |pkey|. It returns one on - * success and zero on failure. */ +// SSL_use_PrivateKey sets |ssl|'s private key to |pkey|. It returns one on +// success and zero on failure. OPENSSL_EXPORT int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey); -/* SSL_CTX_set0_chain sets |ctx|'s certificate chain, excluding the leaf, to - * |chain|. On success, it returns one and takes ownership of |chain|. - * Otherwise, it returns zero. */ +// SSL_CTX_set0_chain sets |ctx|'s certificate chain, excluding the leaf, to +// |chain|. On success, it returns one and takes ownership of |chain|. +// Otherwise, it returns zero. OPENSSL_EXPORT int SSL_CTX_set0_chain(SSL_CTX *ctx, STACK_OF(X509) *chain); -/* SSL_CTX_set1_chain sets |ctx|'s certificate chain, excluding the leaf, to - * |chain|. It returns one on success and zero on failure. The caller retains - * ownership of |chain| and may release it freely. */ +// SSL_CTX_set1_chain sets |ctx|'s certificate chain, excluding the leaf, to +// |chain|. It returns one on success and zero on failure. The caller retains +// ownership of |chain| and may release it freely. OPENSSL_EXPORT int SSL_CTX_set1_chain(SSL_CTX *ctx, STACK_OF(X509) *chain); -/* SSL_set0_chain sets |ssl|'s certificate chain, excluding the leaf, to - * |chain|. On success, it returns one and takes ownership of |chain|. - * Otherwise, it returns zero. */ +// SSL_set0_chain sets |ssl|'s certificate chain, excluding the leaf, to +// |chain|. On success, it returns one and takes ownership of |chain|. +// Otherwise, it returns zero. OPENSSL_EXPORT int SSL_set0_chain(SSL *ssl, STACK_OF(X509) *chain); -/* SSL_set1_chain sets |ssl|'s certificate chain, excluding the leaf, to - * |chain|. It returns one on success and zero on failure. The caller retains - * ownership of |chain| and may release it freely. */ +// SSL_set1_chain sets |ssl|'s certificate chain, excluding the leaf, to +// |chain|. It returns one on success and zero on failure. The caller retains +// ownership of |chain| and may release it freely. OPENSSL_EXPORT int SSL_set1_chain(SSL *ssl, STACK_OF(X509) *chain); -/* SSL_CTX_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On - * success, it returns one and takes ownership of |x509|. Otherwise, it returns - * zero. */ +// SSL_CTX_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On +// success, it returns one and takes ownership of |x509|. Otherwise, it returns +// zero. OPENSSL_EXPORT int SSL_CTX_add0_chain_cert(SSL_CTX *ctx, X509 *x509); -/* SSL_CTX_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It - * returns one on success and zero on failure. The caller retains ownership of - * |x509| and may release it freely. */ +// SSL_CTX_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It +// returns one on success and zero on failure. The caller retains ownership of +// |x509| and may release it freely. OPENSSL_EXPORT int SSL_CTX_add1_chain_cert(SSL_CTX *ctx, X509 *x509); -/* SSL_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On success, - * it returns one and takes ownership of |x509|. Otherwise, it returns zero. */ +// SSL_add0_chain_cert appends |x509| to |ctx|'s certificate chain. On success, +// it returns one and takes ownership of |x509|. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_add0_chain_cert(SSL *ssl, X509 *x509); -/* SSL_CTX_add_extra_chain_cert calls |SSL_CTX_add0_chain_cert|. */ +// SSL_CTX_add_extra_chain_cert calls |SSL_CTX_add0_chain_cert|. OPENSSL_EXPORT int SSL_CTX_add_extra_chain_cert(SSL_CTX *ctx, X509 *x509); -/* SSL_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It returns - * one on success and zero on failure. The caller retains ownership of |x509| - * and may release it freely. */ +// SSL_add1_chain_cert appends |x509| to |ctx|'s certificate chain. It returns +// one on success and zero on failure. The caller retains ownership of |x509| +// and may release it freely. OPENSSL_EXPORT int SSL_add1_chain_cert(SSL *ssl, X509 *x509); -/* SSL_CTX_clear_chain_certs clears |ctx|'s certificate chain and returns - * one. */ +// SSL_CTX_clear_chain_certs clears |ctx|'s certificate chain and returns +// one. OPENSSL_EXPORT int SSL_CTX_clear_chain_certs(SSL_CTX *ctx); -/* SSL_CTX_clear_extra_chain_certs calls |SSL_CTX_clear_chain_certs|. */ +// SSL_CTX_clear_extra_chain_certs calls |SSL_CTX_clear_chain_certs|. OPENSSL_EXPORT int SSL_CTX_clear_extra_chain_certs(SSL_CTX *ctx); -/* SSL_clear_chain_certs clears |ssl|'s certificate chain and returns one. */ +// SSL_clear_chain_certs clears |ssl|'s certificate chain and returns one. OPENSSL_EXPORT int SSL_clear_chain_certs(SSL *ssl); -/* SSL_CTX_set_cert_cb sets a callback that is called to select a certificate. - * The callback returns one on success, zero on internal error, and a negative - * number on failure or to pause the handshake. If the handshake is paused, - * |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. - * - * On the client, the callback may call |SSL_get0_certificate_types| and - * |SSL_get_client_CA_list| for information on the server's certificate - * request. - * - * On the server, the callback will be called on non-resumption handshakes, - * after extensions have been processed. */ +// SSL_CTX_set_cert_cb sets a callback that is called to select a certificate. +// The callback returns one on success, zero on internal error, and a negative +// number on failure or to pause the handshake. If the handshake is paused, +// |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. +// +// On the client, the callback may call |SSL_get0_certificate_types| and +// |SSL_get_client_CA_list| for information on the server's certificate +// request. +// +// On the server, the callback will be called on non-resumption handshakes, +// after extensions have been processed. OPENSSL_EXPORT void SSL_CTX_set_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, void *arg), void *arg); -/* SSL_set_cert_cb sets a callback that is called to select a certificate. The - * callback returns one on success, zero on internal error, and a negative - * number on failure or to pause the handshake. If the handshake is paused, - * |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. - * - * On the client, the callback may call |SSL_get0_certificate_types| and - * |SSL_get_client_CA_list| for information on the server's certificate - * request. */ +// SSL_set_cert_cb sets a callback that is called to select a certificate. The +// callback returns one on success, zero on internal error, and a negative +// number on failure or to pause the handshake. If the handshake is paused, +// |SSL_get_error| will return |SSL_ERROR_WANT_X509_LOOKUP|. +// +// On the client, the callback may call |SSL_get0_certificate_types| and +// |SSL_get_client_CA_list| for information on the server's certificate +// request. OPENSSL_EXPORT void SSL_set_cert_cb(SSL *ssl, int (*cb)(SSL *ssl, void *arg), void *arg); -/* SSL_get0_certificate_types, for a client, sets |*out_types| to an array - * containing the client certificate types requested by a server. It returns the - * length of the array. - * - * The behavior of this function is undefined except during the callbacks set by - * by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or when the - * handshake is paused because of them. */ +// SSL_get0_certificate_types, for a client, sets |*out_types| to an array +// containing the client certificate types requested by a server. It returns the +// length of the array. +// +// The behavior of this function is undefined except during the callbacks set by +// by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or when the +// handshake is paused because of them. OPENSSL_EXPORT size_t SSL_get0_certificate_types(SSL *ssl, const uint8_t **out_types); -/* SSL_certs_clear resets the private key, leaf certificate, and certificate - * chain of |ssl|. */ +// SSL_certs_clear resets the private key, leaf certificate, and certificate +// chain of |ssl|. OPENSSL_EXPORT void SSL_certs_clear(SSL *ssl); -/* SSL_CTX_check_private_key returns one if the certificate and private key - * configured in |ctx| are consistent and zero otherwise. */ +// SSL_CTX_check_private_key returns one if the certificate and private key +// configured in |ctx| are consistent and zero otherwise. OPENSSL_EXPORT int SSL_CTX_check_private_key(const SSL_CTX *ctx); -/* SSL_check_private_key returns one if the certificate and private key - * configured in |ssl| are consistent and zero otherwise. */ +// SSL_check_private_key returns one if the certificate and private key +// configured in |ssl| are consistent and zero otherwise. OPENSSL_EXPORT int SSL_check_private_key(const SSL *ssl); -/* SSL_CTX_get0_certificate returns |ctx|'s leaf certificate. */ +// SSL_CTX_get0_certificate returns |ctx|'s leaf certificate. OPENSSL_EXPORT X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx); -/* SSL_get_certificate returns |ssl|'s leaf certificate. */ +// SSL_get_certificate returns |ssl|'s leaf certificate. OPENSSL_EXPORT X509 *SSL_get_certificate(const SSL *ssl); -/* SSL_CTX_get0_privatekey returns |ctx|'s private key. */ +// SSL_CTX_get0_privatekey returns |ctx|'s private key. OPENSSL_EXPORT EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx); -/* SSL_get_privatekey returns |ssl|'s private key. */ +// SSL_get_privatekey returns |ssl|'s private key. OPENSSL_EXPORT EVP_PKEY *SSL_get_privatekey(const SSL *ssl); -/* SSL_CTX_get0_chain_certs sets |*out_chain| to |ctx|'s certificate chain and - * returns one. */ +// SSL_CTX_get0_chain_certs sets |*out_chain| to |ctx|'s certificate chain and +// returns one. OPENSSL_EXPORT int SSL_CTX_get0_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain); -/* SSL_CTX_get_extra_chain_certs calls |SSL_CTX_get0_chain_certs|. */ +// SSL_CTX_get_extra_chain_certs calls |SSL_CTX_get0_chain_certs|. OPENSSL_EXPORT int SSL_CTX_get_extra_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain); -/* SSL_get0_chain_certs sets |*out_chain| to |ssl|'s certificate chain and - * returns one. */ +// SSL_get0_chain_certs sets |*out_chain| to |ssl|'s certificate chain and +// returns one. OPENSSL_EXPORT int SSL_get0_chain_certs(const SSL *ssl, STACK_OF(X509) **out_chain); -/* SSL_CTX_set_signed_cert_timestamp_list sets the list of signed certificate - * timestamps that is sent to clients that request it. The |list| argument must - * contain one or more SCT structures serialised as a SignedCertificateTimestamp - * List (see https://tools.ietf.org/html/rfc6962#section-3.3) – i.e. each SCT - * is prefixed by a big-endian, uint16 length and the concatenation of one or - * more such prefixed SCTs are themselves also prefixed by a uint16 length. It - * returns one on success and zero on error. The caller retains ownership of - * |list|. */ +// SSL_CTX_set_signed_cert_timestamp_list sets the list of signed certificate +// timestamps that is sent to clients that request it. The |list| argument must +// contain one or more SCT structures serialised as a SignedCertificateTimestamp +// List (see https://tools.ietf.org/html/rfc6962#section-3.3) – i.e. each SCT +// is prefixed by a big-endian, uint16 length and the concatenation of one or +// more such prefixed SCTs are themselves also prefixed by a uint16 length. It +// returns one on success and zero on error. The caller retains ownership of +// |list|. OPENSSL_EXPORT int SSL_CTX_set_signed_cert_timestamp_list(SSL_CTX *ctx, const uint8_t *list, size_t list_len); -/* SSL_set_signed_cert_timestamp_list sets the list of signed certificate - * timestamps that is sent to clients that request is. The same format as the - * one used for |SSL_CTX_set_signed_cert_timestamp_list| applies. The caller - * retains ownership of |list|. */ +// SSL_set_signed_cert_timestamp_list sets the list of signed certificate +// timestamps that is sent to clients that request is. The same format as the +// one used for |SSL_CTX_set_signed_cert_timestamp_list| applies. The caller +// retains ownership of |list|. OPENSSL_EXPORT int SSL_set_signed_cert_timestamp_list(SSL *ctx, const uint8_t *list, size_t list_len); -/* SSL_CTX_set_ocsp_response sets the OCSP response that is sent to clients - * which request it. It returns one on success and zero on error. The caller - * retains ownership of |response|. */ +// SSL_CTX_set_ocsp_response sets the OCSP response that is sent to clients +// which request it. It returns one on success and zero on error. The caller +// retains ownership of |response|. OPENSSL_EXPORT int SSL_CTX_set_ocsp_response(SSL_CTX *ctx, const uint8_t *response, size_t response_len); -/* SSL_set_ocsp_response sets the OCSP response that is sent to clients which - * request it. It returns one on success and zero on error. The caller retains - * ownership of |response|. */ +// SSL_set_ocsp_response sets the OCSP response that is sent to clients which +// request it. It returns one on success and zero on error. The caller retains +// ownership of |response|. OPENSSL_EXPORT int SSL_set_ocsp_response(SSL *ssl, const uint8_t *response, size_t response_len); -/* SSL_SIGN_* are signature algorithm values as defined in TLS 1.3. */ +// SSL_SIGN_* are signature algorithm values as defined in TLS 1.3. #define SSL_SIGN_RSA_PKCS1_SHA1 0x0201 #define SSL_SIGN_RSA_PKCS1_SHA256 0x0401 #define SSL_SIGN_RSA_PKCS1_SHA384 0x0501 @@ -938,42 +972,78 @@ OPENSSL_EXPORT int SSL_set_ocsp_response(SSL *ssl, #define SSL_SIGN_RSA_PSS_SHA256 0x0804 #define SSL_SIGN_RSA_PSS_SHA384 0x0805 #define SSL_SIGN_RSA_PSS_SHA512 0x0806 +#define SSL_SIGN_ED25519 0x0807 -/* SSL_SIGN_RSA_PKCS1_MD5_SHA1 is an internal signature algorithm used to - * specify raw RSASSA-PKCS1-v1_5 with an MD5/SHA-1 concatenation, as used in TLS - * before TLS 1.2. */ +// SSL_SIGN_RSA_PKCS1_MD5_SHA1 is an internal signature algorithm used to +// specify raw RSASSA-PKCS1-v1_5 with an MD5/SHA-1 concatenation, as used in TLS +// before TLS 1.2. #define SSL_SIGN_RSA_PKCS1_MD5_SHA1 0xff01 -/* SSL_CTX_set_signing_algorithm_prefs configures |ctx| to use |prefs| as the - * preference list when signing with |ctx|'s private key. It returns one on - * success and zero on error. |prefs| should not include the internal-only value - * |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. */ +// SSL_get_signature_algorithm_name returns a human-readable name for |sigalg|, +// or NULL if unknown. If |include_curve| is one, the curve for ECDSA algorithms +// is included as in TLS 1.3. Otherwise, it is excluded as in TLS 1.2. +OPENSSL_EXPORT const char *SSL_get_signature_algorithm_name(uint16_t sigalg, + int include_curve); + +// SSL_get_signature_algorithm_key_type returns the key type associated with +// |sigalg| as an |EVP_PKEY_*| constant or |EVP_PKEY_NONE| if unknown. +OPENSSL_EXPORT int SSL_get_signature_algorithm_key_type(uint16_t sigalg); + +// SSL_get_signature_algorithm_digest returns the digest function associated +// with |sigalg| or |NULL| if |sigalg| has no prehash (Ed25519) or is unknown. +OPENSSL_EXPORT const EVP_MD *SSL_get_signature_algorithm_digest( + uint16_t sigalg); + +// SSL_is_signature_algorithm_rsa_pss returns one if |sigalg| is an RSA-PSS +// signature algorithm and zero otherwise. +OPENSSL_EXPORT int SSL_is_signature_algorithm_rsa_pss(uint16_t sigalg); + +// SSL_CTX_set_signing_algorithm_prefs configures |ctx| to use |prefs| as the +// preference list when signing with |ctx|'s private key. It returns one on +// success and zero on error. |prefs| should not include the internal-only value +// |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_CTX_set_signing_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, size_t num_prefs); -/* SSL_set_signing_algorithm_prefs configures |ssl| to use |prefs| as the - * preference list when signing with |ssl|'s private key. It returns one on - * success and zero on error. |prefs| should not include the internal-only value - * |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. */ +// SSL_set_signing_algorithm_prefs configures |ssl| to use |prefs| as the +// preference list when signing with |ssl|'s private key. It returns one on +// success and zero on error. |prefs| should not include the internal-only value +// |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. OPENSSL_EXPORT int SSL_set_signing_algorithm_prefs(SSL *ssl, const uint16_t *prefs, size_t num_prefs); -/* Certificate and private key convenience functions. */ +// Certificate and private key convenience functions. -/* SSL_CTX_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one - * on success and zero on failure. */ +// SSL_CTX_set_chain_and_key sets the certificate chain and private key for a +// TLS client or server. References to the given |CRYPTO_BUFFER| and |EVP_PKEY| +// objects are added as needed. Exactly one of |privkey| or |privkey_method| +// may be non-NULL. Returns one on success and zero on error. +OPENSSL_EXPORT int SSL_CTX_set_chain_and_key( + SSL_CTX *ctx, CRYPTO_BUFFER *const *certs, size_t num_certs, + EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method); + +// SSL_set_chain_and_key sets the certificate chain and private key for a TLS +// client or server. References to the given |CRYPTO_BUFFER| and |EVP_PKEY| +// objects are added as needed. Exactly one of |privkey| or |privkey_method| +// may be non-NULL. Returns one on success and zero on error. +OPENSSL_EXPORT int SSL_set_chain_and_key( + SSL *ssl, CRYPTO_BUFFER *const *certs, size_t num_certs, EVP_PKEY *privkey, + const SSL_PRIVATE_KEY_METHOD *privkey_method); + +// SSL_CTX_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one +// on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa); -/* SSL_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one on - * success and zero on failure. */ +// SSL_use_RSAPrivateKey sets |ctx|'s private key to |rsa|. It returns one on +// success and zero on failure. OPENSSL_EXPORT int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa); -/* The following functions configure certificates or private keys but take as - * input DER-encoded structures. They return one on success and zero on - * failure. */ +// The following functions configure certificates or private keys but take as +// input DER-encoded structures. They return one on success and zero on +// failure. OPENSSL_EXPORT int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, size_t der_len, const uint8_t *der); @@ -992,13 +1062,13 @@ OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, OPENSSL_EXPORT int SSL_use_RSAPrivateKey_ASN1(SSL *ssl, const uint8_t *der, size_t der_len); -/* The following functions configure certificates or private keys but take as - * input files to read from. They return one on success and zero on failure. The - * |type| parameter is one of the |SSL_FILETYPE_*| values and determines whether - * the file's contents are read as PEM or DER. */ +// The following functions configure certificates or private keys but take as +// input files to read from. They return one on success and zero on failure. The +// |type| parameter is one of the |SSL_FILETYPE_*| values and determines whether +// the file's contents are read as PEM or DER. -#define SSL_FILETYPE_ASN1 X509_FILETYPE_ASN1 -#define SSL_FILETYPE_PEM X509_FILETYPE_PEM +#define SSL_FILETYPE_PEM 1 +#define SSL_FILETYPE_ASN1 2 OPENSSL_EXPORT int SSL_CTX_use_RSAPrivateKey_file(SSL_CTX *ctx, const char *file, @@ -1016,25 +1086,34 @@ OPENSSL_EXPORT int SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, OPENSSL_EXPORT int SSL_use_PrivateKey_file(SSL *ssl, const char *file, int type); -/* SSL_CTX_use_certificate_chain_file configures certificates for |ctx|. It - * reads the contents of |file| as a PEM-encoded leaf certificate followed - * optionally by the certificate chain to send to the peer. It returns one on - * success and zero on failure. */ +// SSL_CTX_use_certificate_chain_file configures certificates for |ctx|. It +// reads the contents of |file| as a PEM-encoded leaf certificate followed +// optionally by the certificate chain to send to the peer. It returns one on +// success and zero on failure. OPENSSL_EXPORT int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file); -/* SSL_CTX_set_default_passwd_cb sets the password callback for PEM-based - * convenience functions called on |ctx|. */ +// SSL_CTX_set_default_passwd_cb sets the password callback for PEM-based +// convenience functions called on |ctx|. OPENSSL_EXPORT void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb); -/* SSL_CTX_set_default_passwd_cb_userdata sets the userdata parameter for - * |ctx|'s password callback. */ +// SSL_CTX_get_default_passwd_cb returns the callback set by +// |SSL_CTX_set_default_passwd_cb|. +OPENSSL_EXPORT pem_password_cb *SSL_CTX_get_default_passwd_cb( + const SSL_CTX *ctx); + +// SSL_CTX_set_default_passwd_cb_userdata sets the userdata parameter for +// |ctx|'s password callback. OPENSSL_EXPORT void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *data); +// SSL_CTX_get_default_passwd_cb_userdata returns the userdata parameter set by +// |SSL_CTX_set_default_passwd_cb_userdata|. +OPENSSL_EXPORT void *SSL_CTX_get_default_passwd_cb_userdata(const SSL_CTX *ctx); -/* Custom private keys. */ + +// Custom private keys. enum ssl_private_key_result_t { ssl_private_key_success, @@ -1042,1574 +1121,1682 @@ enum ssl_private_key_result_t { ssl_private_key_failure, }; -/* SSL_PRIVATE_KEY_METHOD describes private key hooks. This is used to off-load - * signing operations to a custom, potentially asynchronous, backend. */ -typedef struct ssl_private_key_method_st { - /* type returns the type of the key used by |ssl|. For RSA keys, return - * |NID_rsaEncryption|. For ECDSA keys, return |NID_X9_62_prime256v1|, - * |NID_secp384r1|, or |NID_secp521r1|, depending on the curve. - * - * Returning |EVP_PKEY_EC| for ECDSA keys is deprecated and may result in - * connection failures in TLS 1.3. */ - int (*type)(SSL *ssl); - - /* max_signature_len returns the maximum length of a signature signed by the - * key used by |ssl|. This must be a constant value for a given |ssl|. */ - size_t (*max_signature_len)(SSL *ssl); - - /* sign signs the message |in| in using the specified signature algorithm. On - * success, it returns |ssl_private_key_success| and writes at most |max_out| - * bytes of signature data to |out| and sets |*out_len| to the number of bytes - * written. On failure, it returns |ssl_private_key_failure|. If the operation - * has not completed, it returns |ssl_private_key_retry|. |sign| should - * arrange for the high-level operation on |ssl| to be retried when the - * operation is completed. This will result in a call to |complete|. - * - * |signature_algorithm| is one of the |SSL_SIGN_*| values, as defined in TLS - * 1.3. Note that, in TLS 1.2, ECDSA algorithms do not require that curve - * sizes match hash sizes, so the curve portion of |SSL_SIGN_ECDSA_*| values - * must be ignored. BoringSSL will internally handle the curve matching logic - * where appropriate. - * - * It is an error to call |sign| while another private key operation is in - * progress on |ssl|. */ +// ssl_private_key_method_st (aka |SSL_PRIVATE_KEY_METHOD|) describes private +// key hooks. This is used to off-load signing operations to a custom, +// potentially asynchronous, backend. Metadata about the key such as the type +// and size are parsed out of the certificate. +struct ssl_private_key_method_st { + // sign signs the message |in| in using the specified signature algorithm. On + // success, it returns |ssl_private_key_success| and writes at most |max_out| + // bytes of signature data to |out| and sets |*out_len| to the number of bytes + // written. On failure, it returns |ssl_private_key_failure|. If the operation + // has not completed, it returns |ssl_private_key_retry|. |sign| should + // arrange for the high-level operation on |ssl| to be retried when the + // operation is completed. This will result in a call to |complete|. + // + // |signature_algorithm| is one of the |SSL_SIGN_*| values, as defined in TLS + // 1.3. Note that, in TLS 1.2, ECDSA algorithms do not require that curve + // sizes match hash sizes, so the curve portion of |SSL_SIGN_ECDSA_*| values + // must be ignored. BoringSSL will internally handle the curve matching logic + // where appropriate. + // + // It is an error to call |sign| while another private key operation is in + // progress on |ssl|. enum ssl_private_key_result_t (*sign)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, uint16_t signature_algorithm, const uint8_t *in, size_t in_len); - /* sign_digest signs |in_len| bytes of digest from |in|. |md| is the hash - * function used to calculate |in|. On success, it returns - * |ssl_private_key_success| and writes at most |max_out| bytes of signature - * data to |out|. On failure, it returns |ssl_private_key_failure|. If the - * operation has not completed, it returns |ssl_private_key_retry|. |sign| - * should arrange for the high-level operation on |ssl| to be retried when the - * operation is completed. This will result in a call to |complete|. - * - * If the key is an RSA key, implementations must use PKCS#1 padding. |in| is - * the digest itself, so the DigestInfo prefix, if any, must be prepended by - * |sign|. If |md| is |EVP_md5_sha1|, there is no prefix. - * - * It is an error to call |sign_digest| while another private key operation is - * in progress on |ssl|. - * - * This function is deprecated. Implement |sign| instead. - * - * TODO(davidben): Remove this function. */ - enum ssl_private_key_result_t (*sign_digest)(SSL *ssl, uint8_t *out, - size_t *out_len, size_t max_out, - const EVP_MD *md, - const uint8_t *in, - size_t in_len); - - /* decrypt decrypts |in_len| bytes of encrypted data from |in|. On success it - * returns |ssl_private_key_success|, writes at most |max_out| bytes of - * decrypted data to |out| and sets |*out_len| to the actual number of bytes - * written. On failure it returns |ssl_private_key_failure|. If the operation - * has not completed, it returns |ssl_private_key_retry|. The caller should - * arrange for the high-level operation on |ssl| to be retried when the - * operation is completed, which will result in a call to |complete|. This - * function only works with RSA keys and should perform a raw RSA decryption - * operation with no padding. - * - * It is an error to call |decrypt| while another private key operation is in - * progress on |ssl|. */ + // decrypt decrypts |in_len| bytes of encrypted data from |in|. On success it + // returns |ssl_private_key_success|, writes at most |max_out| bytes of + // decrypted data to |out| and sets |*out_len| to the actual number of bytes + // written. On failure it returns |ssl_private_key_failure|. If the operation + // has not completed, it returns |ssl_private_key_retry|. The caller should + // arrange for the high-level operation on |ssl| to be retried when the + // operation is completed, which will result in a call to |complete|. This + // function only works with RSA keys and should perform a raw RSA decryption + // operation with no padding. + // + // It is an error to call |decrypt| while another private key operation is in + // progress on |ssl|. enum ssl_private_key_result_t (*decrypt)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, const uint8_t *in, size_t in_len); - /* complete completes a pending operation. If the operation has completed, it - * returns |ssl_private_key_success| and writes the result to |out| as in - * |sign|. Otherwise, it returns |ssl_private_key_failure| on failure and - * |ssl_private_key_retry| if the operation is still in progress. - * - * |complete| may be called arbitrarily many times before completion, but it - * is an error to call |complete| if there is no pending operation in progress - * on |ssl|. */ + // complete completes a pending operation. If the operation has completed, it + // returns |ssl_private_key_success| and writes the result to |out| as in + // |sign|. Otherwise, it returns |ssl_private_key_failure| on failure and + // |ssl_private_key_retry| if the operation is still in progress. + // + // |complete| may be called arbitrarily many times before completion, but it + // is an error to call |complete| if there is no pending operation in progress + // on |ssl|. enum ssl_private_key_result_t (*complete)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out); -} SSL_PRIVATE_KEY_METHOD; +}; -/* SSL_set_private_key_method configures a custom private key on |ssl|. - * |key_method| must remain valid for the lifetime of |ssl|. */ +// SSL_set_private_key_method configures a custom private key on |ssl|. +// |key_method| must remain valid for the lifetime of |ssl|. OPENSSL_EXPORT void SSL_set_private_key_method( SSL *ssl, const SSL_PRIVATE_KEY_METHOD *key_method); -/* SSL_CTX_set_private_key_method configures a custom private key on |ctx|. - * |key_method| must remain valid for the lifetime of |ctx|. */ +// SSL_CTX_set_private_key_method configures a custom private key on |ctx|. +// |key_method| must remain valid for the lifetime of |ctx|. OPENSSL_EXPORT void SSL_CTX_set_private_key_method( SSL_CTX *ctx, const SSL_PRIVATE_KEY_METHOD *key_method); -/* Cipher suites. - * - * |SSL_CIPHER| objects represent cipher suites. */ +// Cipher suites. +// +// |SSL_CIPHER| objects represent cipher suites. -DECLARE_STACK_OF(SSL_CIPHER) +DEFINE_CONST_STACK_OF(SSL_CIPHER) -/* SSL_get_cipher_by_value returns the structure representing a TLS cipher - * suite based on its assigned number, or NULL if unknown. See - * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4. */ +// SSL_get_cipher_by_value returns the structure representing a TLS cipher +// suite based on its assigned number, or NULL if unknown. See +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_cipher_by_value(uint16_t value); -/* SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to - * get the cipher suite value. */ +// SSL_CIPHER_get_id returns |cipher|'s id. It may be cast to a |uint16_t| to +// get the cipher suite value. OPENSSL_EXPORT uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *cipher); -/* SSL_CIPHER_is_AES returns one if |cipher| uses AES (either GCM or CBC - * mode). */ -OPENSSL_EXPORT int SSL_CIPHER_is_AES(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_has_SHA1_HMAC returns one if |cipher| uses HMAC-SHA1. */ -OPENSSL_EXPORT int SSL_CIPHER_has_SHA1_HMAC(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_has_SHA256_HMAC returns one if |cipher| uses HMAC-SHA256. */ -OPENSSL_EXPORT int SSL_CIPHER_has_SHA256_HMAC(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_AEAD returns one if |cipher| uses an AEAD cipher. */ -OPENSSL_EXPORT int SSL_CIPHER_is_AEAD(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_AESGCM returns one if |cipher| uses AES-GCM. */ -OPENSSL_EXPORT int SSL_CIPHER_is_AESGCM(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_AES128GCM returns one if |cipher| uses 128-bit AES-GCM. */ -OPENSSL_EXPORT int SSL_CIPHER_is_AES128GCM(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_AES128CBC returns one if |cipher| uses 128-bit AES in CBC - * mode. */ -OPENSSL_EXPORT int SSL_CIPHER_is_AES128CBC(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_AES256CBC returns one if |cipher| uses 256-bit AES in CBC - * mode. */ -OPENSSL_EXPORT int SSL_CIPHER_is_AES256CBC(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_CHACHA20POLY1305 returns one if |cipher| uses - * CHACHA20_POLY1305. Note this includes both the RFC 7905 and - * draft-agl-tls-chacha20poly1305-04 versions. */ -OPENSSL_EXPORT int SSL_CIPHER_is_CHACHA20POLY1305(const SSL_CIPHER *cipher); +// SSL_CIPHER_is_aead returns one if |cipher| uses an AEAD cipher. +OPENSSL_EXPORT int SSL_CIPHER_is_aead(const SSL_CIPHER *cipher); -/* SSL_CIPHER_is_NULL returns one if |cipher| does not encrypt. */ -OPENSSL_EXPORT int SSL_CIPHER_is_NULL(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_block_cipher returns one if |cipher| is a block cipher. */ +// SSL_CIPHER_is_block_cipher returns one if |cipher| is a block cipher. OPENSSL_EXPORT int SSL_CIPHER_is_block_cipher(const SSL_CIPHER *cipher); -/* SSL_CIPHER_is_ECDSA returns one if |cipher| uses ECDSA. */ -OPENSSL_EXPORT int SSL_CIPHER_is_ECDSA(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_DHE returns one if |cipher| uses DHE. */ -OPENSSL_EXPORT int SSL_CIPHER_is_DHE(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_ECDHE returns one if |cipher| uses ECDHE. */ -OPENSSL_EXPORT int SSL_CIPHER_is_ECDHE(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_is_static_RSA returns one if |cipher| uses the static RSA key - * exchange. */ -OPENSSL_EXPORT int SSL_CIPHER_is_static_RSA(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_get_min_version returns the minimum protocol version required - * for |cipher|. */ +// SSL_CIPHER_get_cipher_nid returns the NID for |cipher|'s bulk +// cipher. Possible values are |NID_aes_128_gcm|, |NID_aes_256_gcm|, +// |NID_chacha20_poly1305|, |NID_aes_128_cbc|, |NID_aes_256_cbc|, and +// |NID_des_ede3_cbc|. +OPENSSL_EXPORT int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_digest_nid returns the NID for |cipher|'s HMAC if it is a +// legacy cipher suite. For modern AEAD-based ciphers (see +// |SSL_CIPHER_is_aead|), it returns |NID_undef|. +// +// Note this function only returns the legacy HMAC digest, not the PRF hash. +OPENSSL_EXPORT int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_kx_nid returns the NID for |cipher|'s key exchange. This may +// be |NID_kx_rsa|, |NID_kx_ecdhe|, or |NID_kx_psk| for TLS 1.2. In TLS 1.3, +// cipher suites do not specify the key exchange, so this function returns +// |NID_kx_any|. +OPENSSL_EXPORT int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_auth_nid returns the NID for |cipher|'s authentication +// type. This may be |NID_auth_rsa|, |NID_auth_ecdsa|, or |NID_auth_psk| for TLS +// 1.2. In TLS 1.3, cipher suites do not specify authentication, so this +// function returns |NID_auth_any|. +OPENSSL_EXPORT int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_prf_nid retuns the NID for |cipher|'s PRF hash. If |cipher| is +// a pre-TLS-1.2 cipher, it returns |NID_md5_sha1| but note these ciphers use +// SHA-256 in TLS 1.2. Other return values may be treated uniformly in all +// applicable versions. +OPENSSL_EXPORT int SSL_CIPHER_get_prf_nid(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_min_version returns the minimum protocol version required +// for |cipher|. OPENSSL_EXPORT uint16_t SSL_CIPHER_get_min_version(const SSL_CIPHER *cipher); -/* SSL_CIPHER_get_max_version returns the maximum protocol version that - * supports |cipher|. */ +// SSL_CIPHER_get_max_version returns the maximum protocol version that +// supports |cipher|. OPENSSL_EXPORT uint16_t SSL_CIPHER_get_max_version(const SSL_CIPHER *cipher); -/* SSL_CIPHER_get_name returns the OpenSSL name of |cipher|. */ +// SSL_CIPHER_standard_name returns the standard IETF name for |cipher|. For +// example, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". +OPENSSL_EXPORT const char *SSL_CIPHER_standard_name(const SSL_CIPHER *cipher); + +// SSL_CIPHER_get_name returns the OpenSSL name of |cipher|. For example, +// "ECDHE-RSA-AES128-GCM-SHA256". OPENSSL_EXPORT const char *SSL_CIPHER_get_name(const SSL_CIPHER *cipher); -/* SSL_CIPHER_get_kx_name returns a string that describes the key-exchange - * method used by |cipher|. For example, "ECDHE_ECDSA". TLS 1.3 AEAD-only - * ciphers return the string "GENERIC". */ +// SSL_CIPHER_get_kx_name returns a string that describes the key-exchange +// method used by |cipher|. For example, "ECDHE_ECDSA". TLS 1.3 AEAD-only +// ciphers return the string "GENERIC". OPENSSL_EXPORT const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher); -/* SSL_CIPHER_get_rfc_name returns a newly-allocated string with the standard - * name for |cipher| or NULL on error. For example, - * "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". The caller is responsible for - * calling |OPENSSL_free| on the result. */ -OPENSSL_EXPORT char *SSL_CIPHER_get_rfc_name(const SSL_CIPHER *cipher); - -/* SSL_CIPHER_get_bits returns the strength, in bits, of |cipher|. If - * |out_alg_bits| is not NULL, it writes the number of bits consumed by the - * symmetric algorithm to |*out_alg_bits|. */ +// SSL_CIPHER_get_bits returns the strength, in bits, of |cipher|. If +// |out_alg_bits| is not NULL, it writes the number of bits consumed by the +// symmetric algorithm to |*out_alg_bits|. OPENSSL_EXPORT int SSL_CIPHER_get_bits(const SSL_CIPHER *cipher, int *out_alg_bits); -/* Cipher suite configuration. - * - * OpenSSL uses a mini-language to configure cipher suites. The language - * maintains an ordered list of enabled ciphers, along with an ordered list of - * disabled but available ciphers. Initially, all ciphers are disabled with a - * default ordering. The cipher string is then interpreted as a sequence of - * directives, separated by colons, each of which modifies this state. - * - * Most directives consist of a one character or empty opcode followed by a - * selector which matches a subset of available ciphers. - * - * Available opcodes are: - * - * The empty opcode enables and appends all matching disabled ciphers to the - * end of the enabled list. The newly appended ciphers are ordered relative to - * each other matching their order in the disabled list. - * - * |-| disables all matching enabled ciphers and prepends them to the disabled - * list, with relative order from the enabled list preserved. This means the - * most recently disabled ciphers get highest preference relative to other - * disabled ciphers if re-enabled. - * - * |+| moves all matching enabled ciphers to the end of the enabled list, with - * relative order preserved. - * - * |!| deletes all matching ciphers, enabled or not, from either list. Deleted - * ciphers will not matched by future operations. - * - * A selector may be a specific cipher (using the OpenSSL name for the cipher) - * or one or more rules separated by |+|. The final selector matches the - * intersection of each rule. For instance, |AESGCM+aECDSA| matches - * ECDSA-authenticated AES-GCM ciphers. - * - * Available cipher rules are: - * - * |ALL| matches all ciphers. - * - * |kRSA|, |kDHE|, |kECDHE|, and |kPSK| match ciphers using plain RSA, DHE, - * ECDHE, and plain PSK key exchanges, respectively. Note that ECDHE_PSK is - * matched by |kECDHE| and not |kPSK|. - * - * |aRSA|, |aECDSA|, and |aPSK| match ciphers authenticated by RSA, ECDSA, and - * a pre-shared key, respectively. - * - * |RSA|, |DHE|, |ECDHE|, |PSK|, |ECDSA|, and |PSK| are aliases for the - * corresponding |k*| or |a*| cipher rule. |RSA| is an alias for |kRSA|, not - * |aRSA|. - * - * |3DES|, |AES128|, |AES256|, |AES|, |AESGCM|, |CHACHA20| match ciphers - * whose bulk cipher use the corresponding encryption scheme. Note that - * |AES|, |AES128|, and |AES256| match both CBC and GCM ciphers. - * - * |SHA1|, |SHA256|, and |SHA384| match legacy cipher suites using the - * corresponding hash function in their MAC. AEADs are matched by none of - * these. - * - * |SHA| is an alias for |SHA1|. - * - * Although implemented, authentication-only ciphers match no rules and must be - * explicitly selected by name. - * - * Deprecated cipher rules: - * - * |kEDH|, |EDH|, |kEECDH|, and |EECDH| are legacy aliases for |kDHE|, |DHE|, - * |kECDHE|, and |ECDHE|, respectively. - * - * |HIGH| is an alias for |ALL|. - * - * |FIPS| is an alias for |HIGH|. - * - * |SSLv3| and |TLSv1| match ciphers available in TLS 1.1 or earlier. - * |TLSv1_2| matches ciphers new in TLS 1.2. This is confusing and should not - * be used. - * - * Unknown rules are silently ignored by legacy APIs, and rejected by APIs with - * "strict" in the name, which should be preferred. Cipher lists can be long and - * it's easy to commit typos. - * - * The special |@STRENGTH| directive will sort all enabled ciphers by strength. - * - * The |DEFAULT| directive, when appearing at the front of the string, expands - * to the default ordering of available ciphers. - * - * If configuring a server, one may also configure equal-preference groups to - * partially respect the client's preferences when - * |SSL_OP_CIPHER_SERVER_PREFERENCE| is enabled. Ciphers in an equal-preference - * group have equal priority and use the client order. This may be used to - * enforce that AEADs are preferred but select AES-GCM vs. ChaCha20-Poly1305 - * based on client preferences. An equal-preference is specified with square - * brackets, combining multiple selectors separated by |. For example: - * - * [ECDHE-ECDSA-CHACHA20-POLY1305|ECDHE-ECDSA-AES128-GCM-SHA256] - * - * Once an equal-preference group is used, future directives must be - * opcode-less. - * - * TLS 1.3 ciphers do not participate in this mechanism and instead have a - * built-in preference order. Functions to set cipher lists do not affect TLS - * 1.3, and functions to query the cipher list do not include TLS 1.3 - * ciphers. */ - -/* SSL_DEFAULT_CIPHER_LIST is the default cipher suite configuration. It is - * substituted when a cipher string starts with 'DEFAULT'. */ +// Cipher suite configuration. +// +// OpenSSL uses a mini-language to configure cipher suites. The language +// maintains an ordered list of enabled ciphers, along with an ordered list of +// disabled but available ciphers. Initially, all ciphers are disabled with a +// default ordering. The cipher string is then interpreted as a sequence of +// directives, separated by colons, each of which modifies this state. +// +// Most directives consist of a one character or empty opcode followed by a +// selector which matches a subset of available ciphers. +// +// Available opcodes are: +// +// The empty opcode enables and appends all matching disabled ciphers to the +// end of the enabled list. The newly appended ciphers are ordered relative to +// each other matching their order in the disabled list. +// +// |-| disables all matching enabled ciphers and prepends them to the disabled +// list, with relative order from the enabled list preserved. This means the +// most recently disabled ciphers get highest preference relative to other +// disabled ciphers if re-enabled. +// +// |+| moves all matching enabled ciphers to the end of the enabled list, with +// relative order preserved. +// +// |!| deletes all matching ciphers, enabled or not, from either list. Deleted +// ciphers will not matched by future operations. +// +// A selector may be a specific cipher (using either the standard or OpenSSL +// name for the cipher) or one or more rules separated by |+|. The final +// selector matches the intersection of each rule. For instance, |AESGCM+aECDSA| +// matches ECDSA-authenticated AES-GCM ciphers. +// +// Available cipher rules are: +// +// |ALL| matches all ciphers. +// +// |kRSA|, |kDHE|, |kECDHE|, and |kPSK| match ciphers using plain RSA, DHE, +// ECDHE, and plain PSK key exchanges, respectively. Note that ECDHE_PSK is +// matched by |kECDHE| and not |kPSK|. +// +// |aRSA|, |aECDSA|, and |aPSK| match ciphers authenticated by RSA, ECDSA, and +// a pre-shared key, respectively. +// +// |RSA|, |DHE|, |ECDHE|, |PSK|, |ECDSA|, and |PSK| are aliases for the +// corresponding |k*| or |a*| cipher rule. |RSA| is an alias for |kRSA|, not +// |aRSA|. +// +// |3DES|, |AES128|, |AES256|, |AES|, |AESGCM|, |CHACHA20| match ciphers +// whose bulk cipher use the corresponding encryption scheme. Note that +// |AES|, |AES128|, and |AES256| match both CBC and GCM ciphers. +// +// |SHA1|, |SHA256|, and |SHA384| match legacy cipher suites using the +// corresponding hash function in their MAC. AEADs are matched by none of +// these. +// +// |SHA| is an alias for |SHA1|. +// +// Although implemented, authentication-only ciphers match no rules and must be +// explicitly selected by name. +// +// Deprecated cipher rules: +// +// |kEDH|, |EDH|, |kEECDH|, and |EECDH| are legacy aliases for |kDHE|, |DHE|, +// |kECDHE|, and |ECDHE|, respectively. +// +// |HIGH| is an alias for |ALL|. +// +// |FIPS| is an alias for |HIGH|. +// +// |SSLv3| and |TLSv1| match ciphers available in TLS 1.1 or earlier. +// |TLSv1_2| matches ciphers new in TLS 1.2. This is confusing and should not +// be used. +// +// Unknown rules are silently ignored by legacy APIs, and rejected by APIs with +// "strict" in the name, which should be preferred. Cipher lists can be long +// and it's easy to commit typos. Strict functions will also reject the use of +// spaces, semi-colons and commas as alternative separators. +// +// The special |@STRENGTH| directive will sort all enabled ciphers by strength. +// +// The |DEFAULT| directive, when appearing at the front of the string, expands +// to the default ordering of available ciphers. +// +// If configuring a server, one may also configure equal-preference groups to +// partially respect the client's preferences when +// |SSL_OP_CIPHER_SERVER_PREFERENCE| is enabled. Ciphers in an equal-preference +// group have equal priority and use the client order. This may be used to +// enforce that AEADs are preferred but select AES-GCM vs. ChaCha20-Poly1305 +// based on client preferences. An equal-preference is specified with square +// brackets, combining multiple selectors separated by |. For example: +// +// [ECDHE-ECDSA-CHACHA20-POLY1305|ECDHE-ECDSA-AES128-GCM-SHA256] +// +// Once an equal-preference group is used, future directives must be +// opcode-less. Inside an equal-preference group, spaces are not allowed. +// +// TLS 1.3 ciphers do not participate in this mechanism and instead have a +// built-in preference order. Functions to set cipher lists do not affect TLS +// 1.3, and functions to query the cipher list do not include TLS 1.3 +// ciphers. + +// SSL_DEFAULT_CIPHER_LIST is the default cipher suite configuration. It is +// substituted when a cipher string starts with 'DEFAULT'. #define SSL_DEFAULT_CIPHER_LIST "ALL" -/* SSL_CTX_set_strict_cipher_list configures the cipher list for |ctx|, - * evaluating |str| as a cipher string and returning error if |str| contains - * anything meaningless. It returns one on success and zero on failure. */ +// SSL_CTX_set_strict_cipher_list configures the cipher list for |ctx|, +// evaluating |str| as a cipher string and returning error if |str| contains +// anything meaningless. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set_strict_cipher_list(SSL_CTX *ctx, const char *str); -/* SSL_CTX_set_cipher_list configures the cipher list for |ctx|, evaluating - * |str| as a cipher string. It returns one on success and zero on failure. - * - * Prefer to use |SSL_CTX_set_strict_cipher_list|. This function tolerates - * garbage inputs, unless an empty cipher list results. */ +// SSL_CTX_set_cipher_list configures the cipher list for |ctx|, evaluating +// |str| as a cipher string. It returns one on success and zero on failure. +// +// Prefer to use |SSL_CTX_set_strict_cipher_list|. This function tolerates +// garbage inputs, unless an empty cipher list results. OPENSSL_EXPORT int SSL_CTX_set_cipher_list(SSL_CTX *ctx, const char *str); -/* SSL_set_strict_cipher_list configures the cipher list for |ssl|, evaluating - * |str| as a cipher string and returning error if |str| contains anything - * meaningless. It returns one on success and zero on failure. */ +// SSL_set_strict_cipher_list configures the cipher list for |ssl|, evaluating +// |str| as a cipher string and returning error if |str| contains anything +// meaningless. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_set_strict_cipher_list(SSL *ssl, const char *str); -/* SSL_set_cipher_list configures the cipher list for |ssl|, evaluating |str| as - * a cipher string. It returns one on success and zero on failure. - * - * Prefer to use |SSL_set_strict_cipher_list|. This function tolerates garbage - * inputs, unless an empty cipher list results. */ +// SSL_set_cipher_list configures the cipher list for |ssl|, evaluating |str| as +// a cipher string. It returns one on success and zero on failure. +// +// Prefer to use |SSL_set_strict_cipher_list|. This function tolerates garbage +// inputs, unless an empty cipher list results. OPENSSL_EXPORT int SSL_set_cipher_list(SSL *ssl, const char *str); -/* SSL_get_ciphers returns the cipher list for |ssl|, in order of preference. */ +// SSL_CTX_get_ciphers returns the cipher list for |ctx|, in order of +// preference. +OPENSSL_EXPORT STACK_OF(SSL_CIPHER) *SSL_CTX_get_ciphers(const SSL_CTX *ctx); + +// SSL_CTX_cipher_in_group returns one if the |i|th cipher (see +// |SSL_CTX_get_ciphers|) is in the same equipreference group as the one +// following it and zero otherwise. +OPENSSL_EXPORT int SSL_CTX_cipher_in_group(const SSL_CTX *ctx, size_t i); + +// SSL_get_ciphers returns the cipher list for |ssl|, in order of preference. OPENSSL_EXPORT STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl); -/* Connection information. */ +// Connection information. -/* SSL_is_init_finished returns one if |ssl| has completed its initial handshake - * and has no pending handshake. It returns zero otherwise. */ +// SSL_is_init_finished returns one if |ssl| has completed its initial handshake +// and has no pending handshake. It returns zero otherwise. OPENSSL_EXPORT int SSL_is_init_finished(const SSL *ssl); -/* SSL_in_init returns one if |ssl| has a pending handshake and zero - * otherwise. */ +// SSL_in_init returns one if |ssl| has a pending handshake and zero +// otherwise. OPENSSL_EXPORT int SSL_in_init(const SSL *ssl); -/* SSL_in_false_start returns one if |ssl| has a pending handshake that is in - * False Start. |SSL_write| may be called at this point without waiting for the - * peer, but |SSL_read| will complete the handshake before accepting application - * data. - * - * See also |SSL_MODE_ENABLE_FALSE_START|. */ +// SSL_in_false_start returns one if |ssl| has a pending handshake that is in +// False Start. |SSL_write| may be called at this point without waiting for the +// peer, but |SSL_read| will complete the handshake before accepting application +// data. +// +// See also |SSL_MODE_ENABLE_FALSE_START|. OPENSSL_EXPORT int SSL_in_false_start(const SSL *ssl); -/* SSL_get_peer_certificate returns the peer's leaf certificate or NULL if the - * peer did not use certificates. The caller must call |X509_free| on the - * result to release it. */ +// SSL_get_peer_certificate returns the peer's leaf certificate or NULL if the +// peer did not use certificates. The caller must call |X509_free| on the +// result to release it. OPENSSL_EXPORT X509 *SSL_get_peer_certificate(const SSL *ssl); -/* SSL_get_peer_cert_chain returns the peer's certificate chain or NULL if - * unavailable or the peer did not use certificates. This is the unverified - * list of certificates as sent by the peer, not the final chain built during - * verification. For historical reasons, this value may not be available if - * resuming a serialized |SSL_SESSION|. The caller does not take ownership of - * the result. - * - * WARNING: This function behaves differently between client and server. If - * |ssl| is a server, the returned chain does not include the leaf certificate. - * If a client, it does. */ +// SSL_get_peer_cert_chain returns the peer's certificate chain or NULL if +// unavailable or the peer did not use certificates. This is the unverified list +// of certificates as sent by the peer, not the final chain built during +// verification. The caller does not take ownership of the result. +// +// WARNING: This function behaves differently between client and server. If +// |ssl| is a server, the returned chain does not include the leaf certificate. +// If a client, it does. OPENSSL_EXPORT STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl); -/* SSL_get_peer_full_cert_chain returns the peer's certificate chain, or NULL if - * unavailable or the peer did not use certificates. This is the unverified - * list of certificates as sent by the peer, not the final chain built during - * verification. For historical reasons, this value may not be available if - * resuming a serialized |SSL_SESSION|. The caller does not take ownership of - * the result. - * - * This is the same as |SSL_get_peer_cert_chain| except that this function - * always returns the full chain, i.e. the first element of the return value - * (if any) will be the leaf certificate. In constrast, - * |SSL_get_peer_cert_chain| returns only the intermediate certificates if the - * |ssl| is a server. */ +// SSL_get_peer_full_cert_chain returns the peer's certificate chain, or NULL if +// unavailable or the peer did not use certificates. This is the unverified list +// of certificates as sent by the peer, not the final chain built during +// verification. The caller does not take ownership of the result. +// +// This is the same as |SSL_get_peer_cert_chain| except that this function +// always returns the full chain, i.e. the first element of the return value +// (if any) will be the leaf certificate. In constrast, +// |SSL_get_peer_cert_chain| returns only the intermediate certificates if the +// |ssl| is a server. OPENSSL_EXPORT STACK_OF(X509) *SSL_get_peer_full_cert_chain(const SSL *ssl); -/* SSL_get0_signed_cert_timestamp_list sets |*out| and |*out_len| to point to - * |*out_len| bytes of SCT information from the server. This is only valid if - * |ssl| is a client. The SCT information is a SignedCertificateTimestampList - * (including the two leading length bytes). - * See https://tools.ietf.org/html/rfc6962#section-3.3 - * If no SCT was received then |*out_len| will be zero on return. - * - * WARNING: the returned data is not guaranteed to be well formed. */ +// SSL_get0_peer_certificates returns the peer's certificate chain, or NULL if +// unavailable or the peer did not use certificates. This is the unverified list +// of certificates as sent by the peer, not the final chain built during +// verification. The caller does not take ownership of the result. +// +// This is the |CRYPTO_BUFFER| variant of |SSL_get_peer_full_cert_chain|. +OPENSSL_EXPORT STACK_OF(CRYPTO_BUFFER) * + SSL_get0_peer_certificates(const SSL *ssl); + +// SSL_get0_signed_cert_timestamp_list sets |*out| and |*out_len| to point to +// |*out_len| bytes of SCT information from the server. This is only valid if +// |ssl| is a client. The SCT information is a SignedCertificateTimestampList +// (including the two leading length bytes). +// See https://tools.ietf.org/html/rfc6962#section-3.3 +// If no SCT was received then |*out_len| will be zero on return. +// +// WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_get0_signed_cert_timestamp_list(const SSL *ssl, const uint8_t **out, size_t *out_len); -/* SSL_get0_ocsp_response sets |*out| and |*out_len| to point to |*out_len| - * bytes of an OCSP response from the server. This is the DER encoding of an - * OCSPResponse type as defined in RFC 2560. - * - * WARNING: the returned data is not guaranteed to be well formed. */ +// SSL_get0_ocsp_response sets |*out| and |*out_len| to point to |*out_len| +// bytes of an OCSP response from the server. This is the DER encoding of an +// OCSPResponse type as defined in RFC 2560. +// +// WARNING: the returned data is not guaranteed to be well formed. OPENSSL_EXPORT void SSL_get0_ocsp_response(const SSL *ssl, const uint8_t **out, size_t *out_len); -/* SSL_get_tls_unique writes at most |max_out| bytes of the tls-unique value - * for |ssl| to |out| and sets |*out_len| to the number of bytes written. It - * returns one on success or zero on error. In general |max_out| should be at - * least 12. - * - * This function will always fail if the initial handshake has not completed. - * The tls-unique value will change after a renegotiation but, since - * renegotiations can be initiated by the server at any point, the higher-level - * protocol must either leave them disabled or define states in which the - * tls-unique value can be read. - * - * The tls-unique value is defined by - * https://tools.ietf.org/html/rfc5929#section-3.1. Due to a weakness in the - * TLS protocol, tls-unique is broken for resumed connections unless the - * Extended Master Secret extension is negotiated. Thus this function will - * return zero if |ssl| performed session resumption unless EMS was used when - * negotiating the original session. */ +// SSL_get_tls_unique writes at most |max_out| bytes of the tls-unique value +// for |ssl| to |out| and sets |*out_len| to the number of bytes written. It +// returns one on success or zero on error. In general |max_out| should be at +// least 12. +// +// This function will always fail if the initial handshake has not completed. +// The tls-unique value will change after a renegotiation but, since +// renegotiations can be initiated by the server at any point, the higher-level +// protocol must either leave them disabled or define states in which the +// tls-unique value can be read. +// +// The tls-unique value is defined by +// https://tools.ietf.org/html/rfc5929#section-3.1. Due to a weakness in the +// TLS protocol, tls-unique is broken for resumed connections unless the +// Extended Master Secret extension is negotiated. Thus this function will +// return zero if |ssl| performed session resumption unless EMS was used when +// negotiating the original session. OPENSSL_EXPORT int SSL_get_tls_unique(const SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out); -/* SSL_get_extms_support returns one if the Extended Master Secret extension or - * TLS 1.3 was negotiated. Otherwise, it returns zero. */ +// SSL_get_extms_support returns one if the Extended Master Secret extension or +// TLS 1.3 was negotiated. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_get_extms_support(const SSL *ssl); -/* SSL_get_current_cipher returns the cipher used in the current outgoing - * connection state, or NULL if the null cipher is active. */ +// SSL_get_current_cipher returns the cipher used in the current outgoing +// connection state, or NULL if the null cipher is active. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_current_cipher(const SSL *ssl); -/* SSL_session_reused returns one if |ssl| performed an abbreviated handshake - * and zero otherwise. - * - * TODO(davidben): Hammer down the semantics of this API while a handshake, - * initial or renego, is in progress. */ +// SSL_session_reused returns one if |ssl| performed an abbreviated handshake +// and zero otherwise. +// +// TODO(davidben): Hammer down the semantics of this API while a handshake, +// initial or renego, is in progress. OPENSSL_EXPORT int SSL_session_reused(const SSL *ssl); -/* SSL_get_secure_renegotiation_support returns one if the peer supports secure - * renegotiation (RFC 5746) or TLS 1.3. Otherwise, it returns zero. */ +// SSL_get_secure_renegotiation_support returns one if the peer supports secure +// renegotiation (RFC 5746) or TLS 1.3. Otherwise, it returns zero. OPENSSL_EXPORT int SSL_get_secure_renegotiation_support(const SSL *ssl); -/* SSL_export_keying_material exports a value derived from the master secret, as - * specified in RFC 5705. It writes |out_len| bytes to |out| given a label and - * optional context. (Since a zero length context is allowed, the |use_context| - * flag controls whether a context is included.) - * - * It returns one on success and zero otherwise. */ +// SSL_export_keying_material exports a value derived from the master secret, as +// specified in RFC 5705. It writes |out_len| bytes to |out| given a label and +// optional context. (Since a zero length context is allowed, the |use_context| +// flag controls whether a context is included.) +// +// It returns one on success and zero otherwise. OPENSSL_EXPORT int SSL_export_keying_material( SSL *ssl, uint8_t *out, size_t out_len, const char *label, size_t label_len, const uint8_t *context, size_t context_len, int use_context); -/* Custom extensions. - * - * The custom extension functions allow TLS extensions to be added to - * ClientHello and ServerHello messages. */ - -/* SSL_custom_ext_add_cb is a callback function that is called when the - * ClientHello (for clients) or ServerHello (for servers) is constructed. In - * the case of a server, this callback will only be called for a given - * extension if the ClientHello contained that extension – it's not possible to - * inject extensions into a ServerHello that the client didn't request. - * - * When called, |extension_value| will contain the extension number that is - * being considered for addition (so that a single callback can handle multiple - * extensions). If the callback wishes to include the extension, it must set - * |*out| to point to |*out_len| bytes of extension contents and return one. In - * this case, the corresponding |SSL_custom_ext_free_cb| callback will later be - * called with the value of |*out| once that data has been copied. - * - * If the callback does not wish to add an extension it must return zero. - * - * Alternatively, the callback can abort the connection by setting - * |*out_alert_value| to a TLS alert number and returning -1. */ +// Custom extensions. +// +// The custom extension functions allow TLS extensions to be added to +// ClientHello and ServerHello messages. + +// SSL_custom_ext_add_cb is a callback function that is called when the +// ClientHello (for clients) or ServerHello (for servers) is constructed. In +// the case of a server, this callback will only be called for a given +// extension if the ClientHello contained that extension – it's not possible to +// inject extensions into a ServerHello that the client didn't request. +// +// When called, |extension_value| will contain the extension number that is +// being considered for addition (so that a single callback can handle multiple +// extensions). If the callback wishes to include the extension, it must set +// |*out| to point to |*out_len| bytes of extension contents and return one. In +// this case, the corresponding |SSL_custom_ext_free_cb| callback will later be +// called with the value of |*out| once that data has been copied. +// +// If the callback does not wish to add an extension it must return zero. +// +// Alternatively, the callback can abort the connection by setting +// |*out_alert_value| to a TLS alert number and returning -1. typedef int (*SSL_custom_ext_add_cb)(SSL *ssl, unsigned extension_value, const uint8_t **out, size_t *out_len, int *out_alert_value, void *add_arg); -/* SSL_custom_ext_free_cb is a callback function that is called by OpenSSL iff - * an |SSL_custom_ext_add_cb| callback previously returned one. In that case, - * this callback is called and passed the |out| pointer that was returned by - * the add callback. This is to free any dynamically allocated data created by - * the add callback. */ +// SSL_custom_ext_free_cb is a callback function that is called by OpenSSL iff +// an |SSL_custom_ext_add_cb| callback previously returned one. In that case, +// this callback is called and passed the |out| pointer that was returned by +// the add callback. This is to free any dynamically allocated data created by +// the add callback. typedef void (*SSL_custom_ext_free_cb)(SSL *ssl, unsigned extension_value, const uint8_t *out, void *add_arg); -/* SSL_custom_ext_parse_cb is a callback function that is called by OpenSSL to - * parse an extension from the peer: that is from the ServerHello for a client - * and from the ClientHello for a server. - * - * When called, |extension_value| will contain the extension number and the - * contents of the extension are |contents_len| bytes at |contents|. - * - * The callback must return one to continue the handshake. Otherwise, if it - * returns zero, a fatal alert with value |*out_alert_value| is sent and the - * handshake is aborted. */ +// SSL_custom_ext_parse_cb is a callback function that is called by OpenSSL to +// parse an extension from the peer: that is from the ServerHello for a client +// and from the ClientHello for a server. +// +// When called, |extension_value| will contain the extension number and the +// contents of the extension are |contents_len| bytes at |contents|. +// +// The callback must return one to continue the handshake. Otherwise, if it +// returns zero, a fatal alert with value |*out_alert_value| is sent and the +// handshake is aborted. typedef int (*SSL_custom_ext_parse_cb)(SSL *ssl, unsigned extension_value, const uint8_t *contents, size_t contents_len, int *out_alert_value, void *parse_arg); -/* SSL_extension_supported returns one iff OpenSSL internally handles - * extensions of type |extension_value|. This can be used to avoid registering - * custom extension handlers for extensions that a future version of OpenSSL - * may handle internally. */ +// SSL_extension_supported returns one iff OpenSSL internally handles +// extensions of type |extension_value|. This can be used to avoid registering +// custom extension handlers for extensions that a future version of OpenSSL +// may handle internally. OPENSSL_EXPORT int SSL_extension_supported(unsigned extension_value); -/* SSL_CTX_add_client_custom_ext registers callback functions for handling - * custom TLS extensions for client connections. - * - * If |add_cb| is NULL then an empty extension will be added in each - * ClientHello. Otherwise, see the comment for |SSL_custom_ext_add_cb| about - * this callback. - * - * The |free_cb| may be NULL if |add_cb| doesn't dynamically allocate data that - * needs to be freed. - * - * It returns one on success or zero on error. It's always an error to register - * callbacks for the same extension twice, or to register callbacks for an - * extension that OpenSSL handles internally. See |SSL_extension_supported| to - * discover, at runtime, which extensions OpenSSL handles internally. */ +// SSL_CTX_add_client_custom_ext registers callback functions for handling +// custom TLS extensions for client connections. +// +// If |add_cb| is NULL then an empty extension will be added in each +// ClientHello. Otherwise, see the comment for |SSL_custom_ext_add_cb| about +// this callback. +// +// The |free_cb| may be NULL if |add_cb| doesn't dynamically allocate data that +// needs to be freed. +// +// It returns one on success or zero on error. It's always an error to register +// callbacks for the same extension twice, or to register callbacks for an +// extension that OpenSSL handles internally. See |SSL_extension_supported| to +// discover, at runtime, which extensions OpenSSL handles internally. OPENSSL_EXPORT int SSL_CTX_add_client_custom_ext( SSL_CTX *ctx, unsigned extension_value, SSL_custom_ext_add_cb add_cb, SSL_custom_ext_free_cb free_cb, void *add_arg, SSL_custom_ext_parse_cb parse_cb, void *parse_arg); -/* SSL_CTX_add_server_custom_ext is the same as - * |SSL_CTX_add_client_custom_ext|, but for server connections. - * - * Unlike on the client side, if |add_cb| is NULL no extension will be added. - * The |add_cb|, if any, will only be called if the ClientHello contained a - * matching extension. */ +// SSL_CTX_add_server_custom_ext is the same as +// |SSL_CTX_add_client_custom_ext|, but for server connections. +// +// Unlike on the client side, if |add_cb| is NULL no extension will be added. +// The |add_cb|, if any, will only be called if the ClientHello contained a +// matching extension. OPENSSL_EXPORT int SSL_CTX_add_server_custom_ext( SSL_CTX *ctx, unsigned extension_value, SSL_custom_ext_add_cb add_cb, SSL_custom_ext_free_cb free_cb, void *add_arg, SSL_custom_ext_parse_cb parse_cb, void *parse_arg); -/* Sessions. - * - * An |SSL_SESSION| represents an SSL session that may be resumed in an - * abbreviated handshake. It is reference-counted and immutable. Once - * established, an |SSL_SESSION| may be shared by multiple |SSL| objects on - * different threads and must not be modified. */ +// Sessions. +// +// An |SSL_SESSION| represents an SSL session that may be resumed in an +// abbreviated handshake. It is reference-counted and immutable. Once +// established, an |SSL_SESSION| may be shared by multiple |SSL| objects on +// different threads and must not be modified. -DECLARE_LHASH_OF(SSL_SESSION) DECLARE_PEM_rw(SSL_SESSION, SSL_SESSION) -/* SSL_SESSION_new returns a newly-allocated blank |SSL_SESSION| or NULL on - * error. This may be useful when writing tests but should otherwise not be - * used. */ +// SSL_SESSION_new returns a newly-allocated blank |SSL_SESSION| or NULL on +// error. This may be useful when writing tests but should otherwise not be +// used. OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_new(const SSL_CTX *ctx); -/* SSL_SESSION_up_ref increments the reference count of |session| and returns - * one. */ +// SSL_SESSION_up_ref increments the reference count of |session| and returns +// one. OPENSSL_EXPORT int SSL_SESSION_up_ref(SSL_SESSION *session); -/* SSL_SESSION_free decrements the reference count of |session|. If it reaches - * zero, all data referenced by |session| and |session| itself are released. */ +// SSL_SESSION_free decrements the reference count of |session|. If it reaches +// zero, all data referenced by |session| and |session| itself are released. OPENSSL_EXPORT void SSL_SESSION_free(SSL_SESSION *session); -/* SSL_SESSION_to_bytes serializes |in| into a newly allocated buffer and sets - * |*out_data| to that buffer and |*out_len| to its length. The caller takes - * ownership of the buffer and must call |OPENSSL_free| when done. It returns - * one on success and zero on error. */ +// SSL_SESSION_to_bytes serializes |in| into a newly allocated buffer and sets +// |*out_data| to that buffer and |*out_len| to its length. The caller takes +// ownership of the buffer and must call |OPENSSL_free| when done. It returns +// one on success and zero on error. OPENSSL_EXPORT int SSL_SESSION_to_bytes(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len); -/* SSL_SESSION_to_bytes_for_ticket serializes |in|, but excludes the session - * identification information, namely the session ID and ticket. */ +// SSL_SESSION_to_bytes_for_ticket serializes |in|, but excludes the session +// identification information, namely the session ID and ticket. OPENSSL_EXPORT int SSL_SESSION_to_bytes_for_ticket(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len); -/* SSL_SESSION_from_bytes parses |in_len| bytes from |in| as an SSL_SESSION. It - * returns a newly-allocated |SSL_SESSION| on success or NULL on error. */ +// SSL_SESSION_from_bytes parses |in_len| bytes from |in| as an SSL_SESSION. It +// returns a newly-allocated |SSL_SESSION| on success or NULL on error. OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_from_bytes( const uint8_t *in, size_t in_len, const SSL_CTX *ctx); -/* SSL_SESSION_get_version returns a string describing the TLS version |session| - * was established at. For example, "TLSv1.2" or "SSLv3". */ +// SSL_SESSION_get_version returns a string describing the TLS or DTLS version +// |session| was established at. For example, "TLSv1.2" or "SSLv3". OPENSSL_EXPORT const char *SSL_SESSION_get_version(const SSL_SESSION *session); -/* SSL_SESSION_get_id returns a pointer to a buffer containing |session|'s - * session ID and sets |*out_len| to its length. */ +// SSL_SESSION_get_protocol_version returns the TLS or DTLS version |session| +// was established at. +OPENSSL_EXPORT uint16_t +SSL_SESSION_get_protocol_version(const SSL_SESSION *session); + +// SSL_SESSION_set_protocol_version sets |session|'s TLS or DTLS version to +// |version|. This may be useful when writing tests but should otherwise not be +// used. It returns one on success and zero on error. +OPENSSL_EXPORT int SSL_SESSION_set_protocol_version(SSL_SESSION *session, + uint16_t version); + +// SSL_SESSION_get_id returns a pointer to a buffer containing |session|'s +// session ID and sets |*out_len| to its length. OPENSSL_EXPORT const uint8_t *SSL_SESSION_get_id(const SSL_SESSION *session, unsigned *out_len); -/* SSL_SESSION_get_time returns the time at which |session| was established in - * seconds since the UNIX epoch. */ -OPENSSL_EXPORT long SSL_SESSION_get_time(const SSL_SESSION *session); +// SSL_SESSION_get_time returns the time at which |session| was established in +// seconds since the UNIX epoch. +OPENSSL_EXPORT uint64_t SSL_SESSION_get_time(const SSL_SESSION *session); -/* SSL_SESSION_get_timeout returns the lifetime of |session| in seconds. */ -OPENSSL_EXPORT long SSL_SESSION_get_timeout(const SSL_SESSION *session); +// SSL_SESSION_get_timeout returns the lifetime of |session| in seconds. +OPENSSL_EXPORT uint32_t SSL_SESSION_get_timeout(const SSL_SESSION *session); -/* SSL_SESSION_get0_peer returns the peer leaf certificate stored in - * |session|. - * - * TODO(davidben): This should return a const X509 *. */ +// SSL_SESSION_get0_peer returns the peer leaf certificate stored in +// |session|. +// +// TODO(davidben): This should return a const X509 *. OPENSSL_EXPORT X509 *SSL_SESSION_get0_peer(const SSL_SESSION *session); -/* SSL_SESSION_get_master_key writes up to |max_out| bytes of |session|'s master - * secret to |out| and returns the number of bytes written. If |max_out| is - * zero, it returns the size of the master secret. */ +// SSL_SESSION_get_master_key writes up to |max_out| bytes of |session|'s master +// secret to |out| and returns the number of bytes written. If |max_out| is +// zero, it returns the size of the master secret. OPENSSL_EXPORT size_t SSL_SESSION_get_master_key(const SSL_SESSION *session, uint8_t *out, size_t max_out); -/* SSL_SESSION_set_time sets |session|'s creation time to |time| and returns - * |time|. This function may be useful in writing tests but otherwise should not - * be used. */ -OPENSSL_EXPORT long SSL_SESSION_set_time(SSL_SESSION *session, long time); - -/* SSL_SESSION_set_timeout sets |session|'s timeout to |timeout| and returns - * one. This function may be useful in writing tests but otherwise should not - * be used. */ -OPENSSL_EXPORT long SSL_SESSION_set_timeout(SSL_SESSION *session, long timeout); - -/* SSL_SESSION_set1_id_context sets |session|'s session ID context (see - * |SSL_CTX_set_session_id_context|) to |sid_ctx|. It returns one on success and - * zero on error. This function may be useful in writing tests but otherwise - * should not be used. */ +// SSL_SESSION_set_time sets |session|'s creation time to |time| and returns +// |time|. This function may be useful in writing tests but otherwise should not +// be used. +OPENSSL_EXPORT uint64_t SSL_SESSION_set_time(SSL_SESSION *session, + uint64_t time); + +// SSL_SESSION_set_timeout sets |session|'s timeout to |timeout| and returns +// one. This function may be useful in writing tests but otherwise should not +// be used. +OPENSSL_EXPORT uint32_t SSL_SESSION_set_timeout(SSL_SESSION *session, + uint32_t timeout); + +// SSL_SESSION_set1_id_context sets |session|'s session ID context (see +// |SSL_CTX_set_session_id_context|) to |sid_ctx|. It returns one on success and +// zero on error. This function may be useful in writing tests but otherwise +// should not be used. OPENSSL_EXPORT int SSL_SESSION_set1_id_context(SSL_SESSION *session, const uint8_t *sid_ctx, size_t sid_ctx_len); - -/* Session caching. - * - * Session caching allows clients to reconnect to a server based on saved - * parameters from a previous connection. - * - * For a server, the library implements a built-in internal session cache as an - * in-memory hash table. One may also register callbacks to implement a custom - * external session cache. An external cache may be used in addition to or - * instead of the internal one. Use |SSL_CTX_set_session_cache_mode| to toggle - * the internal cache. - * - * For a client, the only option is an external session cache. Prior to - * handshaking, the consumer should look up a session externally (keyed, for - * instance, by hostname) and use |SSL_set_session| to configure which session - * to offer. The callbacks may be used to determine when new sessions are - * available. - * - * Note that offering or accepting a session short-circuits most parameter - * negotiation. Resuming sessions across different configurations may result in - * surprising behavior. So, for instance, a client implementing a version - * fallback should shard its session cache by maximum protocol version. */ - -/* SSL_SESS_CACHE_OFF disables all session caching. */ +// SSL_SESSION_should_be_single_use returns one if |session| should be +// single-use (TLS 1.3 and later) and zero otherwise. +// +// If this function returns one, clients retain multiple sessions and use each +// only once. This prevents passive observers from correlating connections with +// tickets. See draft-ietf-tls-tls13-18, appendix B.5. If it returns zero, +// |session| cannot be used without leaking a correlator. +OPENSSL_EXPORT int SSL_SESSION_should_be_single_use(const SSL_SESSION *session); + +// SSL_SESSION_is_resumable returns one if |session| is resumable and zero +// otherwise. +OPENSSL_EXPORT int SSL_SESSION_is_resumable(const SSL_SESSION *session); + +// SSL_SESSION_has_ticket returns one if |session| has a ticket and zero +// otherwise. +OPENSSL_EXPORT int SSL_SESSION_has_ticket(const SSL_SESSION *session); + +// SSL_SESSION_get0_ticket sets |*out_ticket| and |*out_len| to |session|'s +// ticket, or NULL and zero if it does not have one. |out_ticket| may be NULL +// if only the ticket length is needed. +OPENSSL_EXPORT void SSL_SESSION_get0_ticket(const SSL_SESSION *session, + const uint8_t **out_ticket, + size_t *out_len); + +// SSL_SESSION_get_ticket_lifetime_hint returns ticket lifetime hint of +// |session| in seconds or zero if none was set. +OPENSSL_EXPORT uint32_t +SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *session); + + +// Session caching. +// +// Session caching allows connections to be established more efficiently based +// on saved parameters from a previous connection, called a session (see +// |SSL_SESSION|). The client offers a saved session, using an opaque identifier +// from a previous connection. The server may accept the session, if it has the +// parameters available. Otherwise, it will decline and continue with a full +// handshake. +// +// This requires both the client and the server to retain session state. A +// client does so with a stateful session cache. A server may do the same or, if +// supported by both sides, statelessly using session tickets. For more +// information on the latter, see the next section. +// +// For a server, the library implements a built-in internal session cache as an +// in-memory hash table. Servers may also use |SSL_CTX_sess_set_get_cb| and +// |SSL_CTX_sess_set_new_cb| to implement a custom external session cache. In +// particular, this may be used to share a session cache between multiple +// servers in a large deployment. An external cache may be used in addition to +// or instead of the internal one. Use |SSL_CTX_set_session_cache_mode| to +// toggle the internal cache. +// +// For a client, the only option is an external session cache. Clients may use +// |SSL_CTX_sess_set_new_cb| to register a callback for when new sessions are +// available. These may be cached and, in subsequent compatible connections, +// configured with |SSL_set_session|. +// +// Note that offering or accepting a session short-circuits certificate +// verification and most parameter negotiation. Resuming sessions across +// different contexts may result in security failures and surprising +// behavior. For a typical client, this means sessions for different hosts must +// be cached under different keys. A client that connects to the same host with, +// e.g., different cipher suite settings or client certificates should also use +// separate session caches between those contexts. Servers should also partition +// session caches between SNI hosts with |SSL_CTX_set_session_id_context|. +// +// Note also, in TLS 1.2 and earlier, offering sessions allows passive observers +// to correlate different client connections. TLS 1.3 and later fix this, +// provided clients use sessions at most once. Session caches are managed by the +// caller in BoringSSL, so this must be implemented externally. See +// |SSL_SESSION_should_be_single_use| for details. + +// SSL_SESS_CACHE_OFF disables all session caching. #define SSL_SESS_CACHE_OFF 0x0000 -/* SSL_SESS_CACHE_CLIENT enables session caching for a client. The internal - * cache is never used on a client, so this only enables the callbacks. */ +// SSL_SESS_CACHE_CLIENT enables session caching for a client. The internal +// cache is never used on a client, so this only enables the callbacks. #define SSL_SESS_CACHE_CLIENT 0x0001 -/* SSL_SESS_CACHE_SERVER enables session caching for a server. */ +// SSL_SESS_CACHE_SERVER enables session caching for a server. #define SSL_SESS_CACHE_SERVER 0x0002 -/* SSL_SESS_CACHE_BOTH enables session caching for both client and server. */ +// SSL_SESS_CACHE_BOTH enables session caching for both client and server. #define SSL_SESS_CACHE_BOTH (SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_SERVER) -/* SSL_SESS_CACHE_NO_AUTO_CLEAR disables automatically calling - * |SSL_CTX_flush_sessions| every 255 connections. */ +// SSL_SESS_CACHE_NO_AUTO_CLEAR disables automatically calling +// |SSL_CTX_flush_sessions| every 255 connections. #define SSL_SESS_CACHE_NO_AUTO_CLEAR 0x0080 -/* SSL_SESS_CACHE_NO_INTERNAL_LOOKUP, on a server, disables looking up a session - * from the internal session cache. */ +// SSL_SESS_CACHE_NO_INTERNAL_LOOKUP, on a server, disables looking up a session +// from the internal session cache. #define SSL_SESS_CACHE_NO_INTERNAL_LOOKUP 0x0100 -/* SSL_SESS_CACHE_NO_INTERNAL_STORE, on a server, disables storing sessions in - * the internal session cache. */ +// SSL_SESS_CACHE_NO_INTERNAL_STORE, on a server, disables storing sessions in +// the internal session cache. #define SSL_SESS_CACHE_NO_INTERNAL_STORE 0x0200 -/* SSL_SESS_CACHE_NO_INTERNAL, on a server, disables the internal session - * cache. */ +// SSL_SESS_CACHE_NO_INTERNAL, on a server, disables the internal session +// cache. #define SSL_SESS_CACHE_NO_INTERNAL \ (SSL_SESS_CACHE_NO_INTERNAL_LOOKUP | SSL_SESS_CACHE_NO_INTERNAL_STORE) -/* SSL_CTX_set_session_cache_mode sets the session cache mode bits for |ctx| to - * |mode|. It returns the previous value. */ +// SSL_CTX_set_session_cache_mode sets the session cache mode bits for |ctx| to +// |mode|. It returns the previous value. OPENSSL_EXPORT int SSL_CTX_set_session_cache_mode(SSL_CTX *ctx, int mode); -/* SSL_CTX_get_session_cache_mode returns the session cache mode bits for - * |ctx| */ +// SSL_CTX_get_session_cache_mode returns the session cache mode bits for +// |ctx| OPENSSL_EXPORT int SSL_CTX_get_session_cache_mode(const SSL_CTX *ctx); -/* SSL_set_session, for a client, configures |ssl| to offer to resume |session| - * in the initial handshake and returns one. The caller retains ownership of - * |session|. - * - * It is an error to call this function after the handshake has begun. */ +// SSL_set_session, for a client, configures |ssl| to offer to resume |session| +// in the initial handshake and returns one. The caller retains ownership of +// |session|. +// +// It is an error to call this function after the handshake has begun. OPENSSL_EXPORT int SSL_set_session(SSL *ssl, SSL_SESSION *session); -/* SSL_get_session returns a non-owning pointer to |ssl|'s session. For - * historical reasons, which session it returns depends on |ssl|'s state. - * - * Prior to the start of the initial handshake, it returns the session the - * caller set with |SSL_set_session|. After the initial handshake has finished - * and if no additional handshakes are in progress, it returns the currently - * active session. Its behavior is undefined while a handshake is in progress. - * - * Using this function to add new sessions to an external session cache is - * deprecated. Use |SSL_CTX_sess_set_new_cb| instead. */ -OPENSSL_EXPORT SSL_SESSION *SSL_get_session(const SSL *ssl); - -/* SSL_get0_session is an alias for |SSL_get_session|. */ -#define SSL_get0_session SSL_get_session - -/* SSL_get1_session acts like |SSL_get_session| but returns a new reference to - * the session. */ -OPENSSL_EXPORT SSL_SESSION *SSL_get1_session(SSL *ssl); - -/* SSL_DEFAULT_SESSION_TIMEOUT is the default lifetime, in seconds, of a - * session in TLS 1.2 or earlier. This is how long we are willing to use the - * secret to encrypt traffic without fresh key material. */ +// SSL_DEFAULT_SESSION_TIMEOUT is the default lifetime, in seconds, of a +// session in TLS 1.2 or earlier. This is how long we are willing to use the +// secret to encrypt traffic without fresh key material. #define SSL_DEFAULT_SESSION_TIMEOUT (2 * 60 * 60) -/* SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT is the default lifetime, in seconds, of a - * session for TLS 1.3 psk_dhe_ke. This is how long we are willing to use the - * secret as an authenticator. */ +// SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT is the default lifetime, in seconds, of a +// session for TLS 1.3 psk_dhe_ke. This is how long we are willing to use the +// secret as an authenticator. #define SSL_DEFAULT_SESSION_PSK_DHE_TIMEOUT (2 * 24 * 60 * 60) -/* SSL_DEFAULT_SESSION_AUTH_TIMEOUT is the default non-renewable lifetime, in - * seconds, of a TLS 1.3 session. This is how long we are willing to trust the - * signature in the initial handshake. */ +// SSL_DEFAULT_SESSION_AUTH_TIMEOUT is the default non-renewable lifetime, in +// seconds, of a TLS 1.3 session. This is how long we are willing to trust the +// signature in the initial handshake. #define SSL_DEFAULT_SESSION_AUTH_TIMEOUT (7 * 24 * 60 * 60) -/* SSL_CTX_set_timeout sets the lifetime, in seconds, of TLS 1.2 (or earlier) - * sessions created in |ctx| to |timeout|. */ -OPENSSL_EXPORT long SSL_CTX_set_timeout(SSL_CTX *ctx, long timeout); +// SSL_CTX_set_timeout sets the lifetime, in seconds, of TLS 1.2 (or earlier) +// sessions created in |ctx| to |timeout|. +OPENSSL_EXPORT uint32_t SSL_CTX_set_timeout(SSL_CTX *ctx, uint32_t timeout); -/* SSL_CTX_set_session_psk_dhe_timeout sets the lifetime, in seconds, of TLS 1.3 - * sessions created in |ctx| to |timeout|. */ +// SSL_CTX_set_session_psk_dhe_timeout sets the lifetime, in seconds, of TLS 1.3 +// sessions created in |ctx| to |timeout|. OPENSSL_EXPORT void SSL_CTX_set_session_psk_dhe_timeout(SSL_CTX *ctx, - long timeout); - -/* SSL_CTX_get_timeout returns the lifetime, in seconds, of TLS 1.2 (or earlier) - * sessions created in |ctx|. */ -OPENSSL_EXPORT long SSL_CTX_get_timeout(const SSL_CTX *ctx); - -/* SSL_CTX_set_session_id_context sets |ctx|'s session ID context to |sid_ctx|. - * It returns one on success and zero on error. The session ID context is an - * application-defined opaque byte string. A session will not be used in a - * connection without a matching session ID context. - * - * For a server, if |SSL_VERIFY_PEER| is enabled, it is an error to not set a - * session ID context. - * - * TODO(davidben): Is that check needed? That seems a special case of taking - * care not to cross-resume across configuration changes, and this is only - * relevant if a server requires client auth. */ + uint32_t timeout); + +// SSL_CTX_get_timeout returns the lifetime, in seconds, of TLS 1.2 (or earlier) +// sessions created in |ctx|. +OPENSSL_EXPORT uint32_t SSL_CTX_get_timeout(const SSL_CTX *ctx); + +// SSL_CTX_set_session_id_context sets |ctx|'s session ID context to |sid_ctx|. +// It returns one on success and zero on error. The session ID context is an +// application-defined opaque byte string. A session will not be used in a +// connection without a matching session ID context. +// +// For a server, if |SSL_VERIFY_PEER| is enabled, it is an error to not set a +// session ID context. OPENSSL_EXPORT int SSL_CTX_set_session_id_context(SSL_CTX *ctx, const uint8_t *sid_ctx, size_t sid_ctx_len); -/* SSL_set_session_id_context sets |ssl|'s session ID context to |sid_ctx|. It - * returns one on success and zero on error. See also - * |SSL_CTX_set_session_id_context|. */ +// SSL_set_session_id_context sets |ssl|'s session ID context to |sid_ctx|. It +// returns one on success and zero on error. See also +// |SSL_CTX_set_session_id_context|. OPENSSL_EXPORT int SSL_set_session_id_context(SSL *ssl, const uint8_t *sid_ctx, size_t sid_ctx_len); -/* SSL_get0_session_id_context returns a pointer to |ssl|'s session ID context - * and sets |*out_len| to its length. */ +// SSL_get0_session_id_context returns a pointer to |ssl|'s session ID context +// and sets |*out_len| to its length. OPENSSL_EXPORT const uint8_t *SSL_get0_session_id_context(const SSL *ssl, size_t *out_len); -/* SSL_SESSION_CACHE_MAX_SIZE_DEFAULT is the default maximum size of a session - * cache. */ +// SSL_SESSION_CACHE_MAX_SIZE_DEFAULT is the default maximum size of a session +// cache. #define SSL_SESSION_CACHE_MAX_SIZE_DEFAULT (1024 * 20) -/* SSL_CTX_sess_set_cache_size sets the maximum size of |ctx|'s internal session - * cache to |size|. It returns the previous value. */ +// SSL_CTX_sess_set_cache_size sets the maximum size of |ctx|'s internal session +// cache to |size|. It returns the previous value. OPENSSL_EXPORT unsigned long SSL_CTX_sess_set_cache_size(SSL_CTX *ctx, unsigned long size); -/* SSL_CTX_sess_get_cache_size returns the maximum size of |ctx|'s internal - * session cache. */ +// SSL_CTX_sess_get_cache_size returns the maximum size of |ctx|'s internal +// session cache. OPENSSL_EXPORT unsigned long SSL_CTX_sess_get_cache_size(const SSL_CTX *ctx); -/* SSL_CTX_sessions returns |ctx|'s internal session cache. */ -OPENSSL_EXPORT LHASH_OF(SSL_SESSION) *SSL_CTX_sessions(SSL_CTX *ctx); - -/* SSL_CTX_sess_number returns the number of sessions in |ctx|'s internal - * session cache. */ +// SSL_CTX_sess_number returns the number of sessions in |ctx|'s internal +// session cache. OPENSSL_EXPORT size_t SSL_CTX_sess_number(const SSL_CTX *ctx); -/* SSL_CTX_add_session inserts |session| into |ctx|'s internal session cache. It - * returns one on success and zero on error or if |session| is already in the - * cache. The caller retains its reference to |session|. */ +// SSL_CTX_add_session inserts |session| into |ctx|'s internal session cache. It +// returns one on success and zero on error or if |session| is already in the +// cache. The caller retains its reference to |session|. OPENSSL_EXPORT int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session); -/* SSL_CTX_remove_session removes |session| from |ctx|'s internal session cache. - * It returns one on success and zero if |session| was not in the cache. */ +// SSL_CTX_remove_session removes |session| from |ctx|'s internal session cache. +// It returns one on success and zero if |session| was not in the cache. OPENSSL_EXPORT int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session); -/* SSL_CTX_flush_sessions removes all sessions from |ctx| which have expired as - * of time |time|. If |time| is zero, all sessions are removed. */ -OPENSSL_EXPORT void SSL_CTX_flush_sessions(SSL_CTX *ctx, long time); - -/* SSL_CTX_sess_set_new_cb sets the callback to be called when a new session is - * established and ready to be cached. If the session cache is disabled (the - * appropriate one of |SSL_SESS_CACHE_CLIENT| or |SSL_SESS_CACHE_SERVER| is - * unset), the callback is not called. - * - * The callback is passed a reference to |session|. It returns one if it takes - * ownership and zero otherwise. - * - * Note: For a client, the callback may be called on abbreviated handshakes if a - * ticket is renewed. Further, it may not be called until some time after - * |SSL_do_handshake| or |SSL_connect| completes if False Start is enabled. Thus - * it's recommended to use this callback over checking |SSL_session_reused| on - * handshake completion. - * - * TODO(davidben): Conditioning callbacks on |SSL_SESS_CACHE_CLIENT| or - * |SSL_SESS_CACHE_SERVER| doesn't make any sense when one could just as easily - * not supply the callbacks. Removing that condition and the client internal - * cache would simplify things. */ +// SSL_CTX_flush_sessions removes all sessions from |ctx| which have expired as +// of time |time|. If |time| is zero, all sessions are removed. +OPENSSL_EXPORT void SSL_CTX_flush_sessions(SSL_CTX *ctx, uint64_t time); + +// SSL_CTX_sess_set_new_cb sets the callback to be called when a new session is +// established and ready to be cached. If the session cache is disabled (the +// appropriate one of |SSL_SESS_CACHE_CLIENT| or |SSL_SESS_CACHE_SERVER| is +// unset), the callback is not called. +// +// The callback is passed a reference to |session|. It returns one if it takes +// ownership (and then calls |SSL_SESSION_free| when done) and zero otherwise. A +// consumer which places |session| into an in-memory cache will likely return +// one, with the cache calling |SSL_SESSION_free|. A consumer which serializes +// |session| with |SSL_SESSION_to_bytes| may not need to retain |session| and +// will likely return zero. Returning one is equivalent to calling +// |SSL_SESSION_up_ref| and then returning zero. +// +// Note: For a client, the callback may be called on abbreviated handshakes if a +// ticket is renewed. Further, it may not be called until some time after +// |SSL_do_handshake| or |SSL_connect| completes if False Start is enabled. Thus +// it's recommended to use this callback over calling |SSL_get_session| on +// handshake completion. OPENSSL_EXPORT void SSL_CTX_sess_set_new_cb( SSL_CTX *ctx, int (*new_session_cb)(SSL *ssl, SSL_SESSION *session)); -/* SSL_CTX_sess_get_new_cb returns the callback set by - * |SSL_CTX_sess_set_new_cb|. */ +// SSL_CTX_sess_get_new_cb returns the callback set by +// |SSL_CTX_sess_set_new_cb|. OPENSSL_EXPORT int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx))( SSL *ssl, SSL_SESSION *session); -/* SSL_CTX_sess_set_remove_cb sets a callback which is called when a session is - * removed from the internal session cache. - * - * TODO(davidben): What is the point of this callback? It seems useless since it - * only fires on sessions in the internal cache. */ +// SSL_CTX_sess_set_remove_cb sets a callback which is called when a session is +// removed from the internal session cache. +// +// TODO(davidben): What is the point of this callback? It seems useless since it +// only fires on sessions in the internal cache. OPENSSL_EXPORT void SSL_CTX_sess_set_remove_cb( SSL_CTX *ctx, void (*remove_session_cb)(SSL_CTX *ctx, SSL_SESSION *session)); -/* SSL_CTX_sess_get_remove_cb returns the callback set by - * |SSL_CTX_sess_set_remove_cb|. */ +// SSL_CTX_sess_get_remove_cb returns the callback set by +// |SSL_CTX_sess_set_remove_cb|. OPENSSL_EXPORT void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))( SSL_CTX *ctx, SSL_SESSION *session); -/* SSL_CTX_sess_set_get_cb sets a callback to look up a session by ID for a - * server. The callback is passed the session ID and should return a matching - * |SSL_SESSION| or NULL if not found. It should set |*out_copy| to zero and - * return a new reference to the session. This callback is not used for a - * client. - * - * For historical reasons, if |*out_copy| is set to one (default), the SSL - * library will take a new reference to the returned |SSL_SESSION|, expecting - * the callback to return a non-owning pointer. This is not recommended. If - * |ctx| and thus the callback is used on multiple threads, the session may be - * removed and invalidated before the SSL library calls |SSL_SESSION_up_ref|, - * whereas the callback may synchronize internally. - * - * To look up a session asynchronously, the callback may return - * |SSL_magic_pending_session_ptr|. See the documentation for that function and - * |SSL_ERROR_PENDING_SESSION|. - * - * If the internal session cache is enabled, the callback is only consulted if - * the internal cache does not return a match. - * - * The callback's |id| parameter is not const for historical reasons, but the - * contents may not be modified. */ +// SSL_CTX_sess_set_get_cb sets a callback to look up a session by ID for a +// server. The callback is passed the session ID and should return a matching +// |SSL_SESSION| or NULL if not found. It should set |*out_copy| to zero and +// return a new reference to the session. This callback is not used for a +// client. +// +// For historical reasons, if |*out_copy| is set to one (default), the SSL +// library will take a new reference to the returned |SSL_SESSION|, expecting +// the callback to return a non-owning pointer. This is not recommended. If +// |ctx| and thus the callback is used on multiple threads, the session may be +// removed and invalidated before the SSL library calls |SSL_SESSION_up_ref|, +// whereas the callback may synchronize internally. +// +// To look up a session asynchronously, the callback may return +// |SSL_magic_pending_session_ptr|. See the documentation for that function and +// |SSL_ERROR_PENDING_SESSION|. +// +// If the internal session cache is enabled, the callback is only consulted if +// the internal cache does not return a match. OPENSSL_EXPORT void SSL_CTX_sess_set_get_cb( - SSL_CTX *ctx, - SSL_SESSION *(*get_session_cb)(SSL *ssl, uint8_t *id, int id_len, - int *out_copy)); + SSL_CTX *ctx, SSL_SESSION *(*get_session_cb)(SSL *ssl, const uint8_t *id, + int id_len, int *out_copy)); -/* SSL_CTX_sess_get_get_cb returns the callback set by - * |SSL_CTX_sess_set_get_cb|. */ +// SSL_CTX_sess_get_get_cb returns the callback set by +// |SSL_CTX_sess_set_get_cb|. OPENSSL_EXPORT SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))( - SSL *ssl, uint8_t *id, int id_len, int *out_copy); + SSL *ssl, const uint8_t *id, int id_len, int *out_copy); -/* SSL_magic_pending_session_ptr returns a magic |SSL_SESSION|* which indicates - * that the session isn't currently unavailable. |SSL_get_error| will then - * return |SSL_ERROR_PENDING_SESSION| and the handshake can be retried later - * when the lookup has completed. */ +// SSL_magic_pending_session_ptr returns a magic |SSL_SESSION|* which indicates +// that the session isn't currently unavailable. |SSL_get_error| will then +// return |SSL_ERROR_PENDING_SESSION| and the handshake can be retried later +// when the lookup has completed. OPENSSL_EXPORT SSL_SESSION *SSL_magic_pending_session_ptr(void); -/* Session tickets. - * - * Session tickets, from RFC 5077, allow session resumption without server-side - * state. Session tickets are supported in by default but may be disabled with - * |SSL_OP_NO_TICKET|. - * - * On the client, ticket-based sessions use the same APIs as ID-based tickets. - * Callers do not need to handle them differently. - * - * On the server, tickets are encrypted and authenticated with a secret key. By - * default, an |SSL_CTX| generates a key on creation. Tickets are minted and - * processed transparently. The following functions may be used to configure a - * persistent key or implement more custom behavior. */ - -/* SSL_CTX_get_tlsext_ticket_keys writes |ctx|'s session ticket key material to - * |len| bytes of |out|. It returns one on success and zero if |len| is not - * 48. If |out| is NULL, it returns 48 instead. */ +// Session tickets. +// +// Session tickets, from RFC 5077, allow session resumption without server-side +// state. The server maintains a secret ticket key and sends the client opaque +// encrypted session parameters, called a ticket. When offering the session, the +// client sends the ticket which the server decrypts to recover session state. +// Session tickets are enabled by default but may be disabled with +// |SSL_OP_NO_TICKET|. +// +// On the client, ticket-based sessions use the same APIs as ID-based tickets. +// Callers do not need to handle them differently. +// +// On the server, tickets are encrypted and authenticated with a secret key. By +// default, an |SSL_CTX| generates a key on creation and uses it for the +// lifetime of the |SSL_CTX|. Tickets are minted and processed +// transparently. The following functions may be used to configure a persistent +// key or implement more custom behavior, including key rotation and sharing +// keys between multiple servers in a large deployment. There are three levels +// of customisation possible: +// +// 1) One can simply set the keys with |SSL_CTX_set_tlsext_ticket_keys|. +// 2) One can configure an |EVP_CIPHER_CTX| and |HMAC_CTX| directly for +// encryption and authentication. +// 3) One can configure an |SSL_TICKET_AEAD_METHOD| to have more control +// and the option of asynchronous decryption. +// +// An attacker that compromises a server's session ticket key can impersonate +// the server and, prior to TLS 1.3, retroactively decrypt all application +// traffic from sessions using that ticket key. Thus ticket keys must be +// regularly rotated for forward secrecy. Note the default key is rotated +// automatically once every 48 hours but manually configured keys are not. + +// SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL is the interval with which the +// default session ticket encryption key is rotated, if in use. If any +// non-default ticket encryption mechanism is configured, automatic rotation is +// disabled. +#define SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL (2 * 24 * 60 * 60) + +// SSL_CTX_get_tlsext_ticket_keys writes |ctx|'s session ticket key material to +// |len| bytes of |out|. It returns one on success and zero if |len| is not +// 48. If |out| is NULL, it returns 48 instead. OPENSSL_EXPORT int SSL_CTX_get_tlsext_ticket_keys(SSL_CTX *ctx, void *out, size_t len); -/* SSL_CTX_set_tlsext_ticket_keys sets |ctx|'s session ticket key material to - * |len| bytes of |in|. It returns one on success and zero if |len| is not - * 48. If |in| is NULL, it returns 48 instead. */ +// SSL_CTX_set_tlsext_ticket_keys sets |ctx|'s session ticket key material to +// |len| bytes of |in|. It returns one on success and zero if |len| is not +// 48. If |in| is NULL, it returns 48 instead. OPENSSL_EXPORT int SSL_CTX_set_tlsext_ticket_keys(SSL_CTX *ctx, const void *in, size_t len); -/* SSL_TICKET_KEY_NAME_LEN is the length of the key name prefix of a session - * ticket. */ +// SSL_TICKET_KEY_NAME_LEN is the length of the key name prefix of a session +// ticket. #define SSL_TICKET_KEY_NAME_LEN 16 -/* SSL_CTX_set_tlsext_ticket_key_cb sets the ticket callback to |callback| and - * returns one. |callback| will be called when encrypting a new ticket and when - * decrypting a ticket from the client. - * - * In both modes, |ctx| and |hmac_ctx| will already have been initialized with - * |EVP_CIPHER_CTX_init| and |HMAC_CTX_init|, respectively. |callback| - * configures |hmac_ctx| with an HMAC digest and key, and configures |ctx| - * for encryption or decryption, based on the mode. - * - * When encrypting a new ticket, |encrypt| will be one. It writes a public - * 16-byte key name to |key_name| and a fresh IV to |iv|. The output IV length - * must match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, - * |callback| returns 1 on success and -1 on error. - * - * When decrypting a ticket, |encrypt| will be zero. |key_name| will point to a - * 16-byte key name and |iv| points to an IV. The length of the IV consumed must - * match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, - * |callback| returns -1 to abort the handshake, 0 if decrypting the ticket - * failed, and 1 or 2 on success. If it returns 2, the ticket will be renewed. - * This may be used to re-key the ticket. - * - * WARNING: |callback| wildly breaks the usual return value convention and is - * called in two different modes. */ +// SSL_CTX_set_tlsext_ticket_key_cb sets the ticket callback to |callback| and +// returns one. |callback| will be called when encrypting a new ticket and when +// decrypting a ticket from the client. +// +// In both modes, |ctx| and |hmac_ctx| will already have been initialized with +// |EVP_CIPHER_CTX_init| and |HMAC_CTX_init|, respectively. |callback| +// configures |hmac_ctx| with an HMAC digest and key, and configures |ctx| +// for encryption or decryption, based on the mode. +// +// When encrypting a new ticket, |encrypt| will be one. It writes a public +// 16-byte key name to |key_name| and a fresh IV to |iv|. The output IV length +// must match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, +// |callback| returns 1 on success and -1 on error. +// +// When decrypting a ticket, |encrypt| will be zero. |key_name| will point to a +// 16-byte key name and |iv| points to an IV. The length of the IV consumed must +// match |EVP_CIPHER_CTX_iv_length| of the cipher selected. In this mode, +// |callback| returns -1 to abort the handshake, 0 if decrypting the ticket +// failed, and 1 or 2 on success. If it returns 2, the ticket will be renewed. +// This may be used to re-key the ticket. +// +// WARNING: |callback| wildly breaks the usual return value convention and is +// called in two different modes. OPENSSL_EXPORT int SSL_CTX_set_tlsext_ticket_key_cb( SSL_CTX *ctx, int (*callback)(SSL *ssl, uint8_t *key_name, uint8_t *iv, EVP_CIPHER_CTX *ctx, HMAC_CTX *hmac_ctx, int encrypt)); +// ssl_ticket_aead_result_t enumerates the possible results from decrypting a +// ticket with an |SSL_TICKET_AEAD_METHOD|. +enum ssl_ticket_aead_result_t { + // ssl_ticket_aead_success indicates that the ticket was successfully + // decrypted. + ssl_ticket_aead_success, + // ssl_ticket_aead_retry indicates that the operation could not be + // immediately completed and must be reattempted, via |open|, at a later + // point. + ssl_ticket_aead_retry, + // ssl_ticket_aead_ignore_ticket indicates that the ticket should be ignored + // (i.e. is corrupt or otherwise undecryptable). + ssl_ticket_aead_ignore_ticket, + // ssl_ticket_aead_error indicates that a fatal error occured and the + // handshake should be terminated. + ssl_ticket_aead_error, +}; -/* Elliptic curve Diffie-Hellman. - * - * Cipher suites using an ECDHE key exchange perform Diffie-Hellman over an - * elliptic curve negotiated by both endpoints. See RFC 4492. Only named curves - * are supported. ECDHE is always enabled, but the curve preferences may be - * configured with these functions. - * - * Note that TLS 1.3 renames these from curves to groups. For consistency, we - * currently use the TLS 1.2 name in the API. */ +// ssl_ticket_aead_method_st (aka |SSL_TICKET_AEAD_METHOD|) contains methods +// for encrypting and decrypting session tickets. +struct ssl_ticket_aead_method_st { + // max_overhead returns the maximum number of bytes of overhead that |seal| + // may add. + size_t (*max_overhead)(SSL *ssl); + + // seal encrypts and authenticates |in_len| bytes from |in|, writes, at most, + // |max_out_len| bytes to |out|, and puts the number of bytes written in + // |*out_len|. The |in| and |out| buffers may be equal but will not otherwise + // alias. It returns one on success or zero on error. + int (*seal)(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out_len, + const uint8_t *in, size_t in_len); + + // open authenticates and decrypts |in_len| bytes from |in|, writes, at most, + // |max_out_len| bytes of plaintext to |out|, and puts the number of bytes + // written in |*out_len|. The |in| and |out| buffers may be equal but will + // not otherwise alias. See |ssl_ticket_aead_result_t| for details of the + // return values. In the case that a retry is indicated, the caller should + // arrange for the high-level operation on |ssl| to be retried when the + // operation is completed, which will result in another call to |open|. + enum ssl_ticket_aead_result_t (*open)(SSL *ssl, uint8_t *out, size_t *out_len, + size_t max_out_len, const uint8_t *in, + size_t in_len); +}; -/* SSL_CTX_set1_curves sets the preferred curves for |ctx| to be |curves|. Each - * element of |curves| should be a curve nid. It returns one on success and - * zero on failure. - * - * Note that this API uses nid values from nid.h and not the |SSL_CURVE_*| - * values defined below. */ +// SSL_CTX_set_ticket_aead_method configures a custom ticket AEAD method table +// on |ctx|. |aead_method| must remain valid for the lifetime of |ctx|. +OPENSSL_EXPORT void SSL_CTX_set_ticket_aead_method( + SSL_CTX *ctx, const SSL_TICKET_AEAD_METHOD *aead_method); + + +// Elliptic curve Diffie-Hellman. +// +// Cipher suites using an ECDHE key exchange perform Diffie-Hellman over an +// elliptic curve negotiated by both endpoints. See RFC 4492. Only named curves +// are supported. ECDHE is always enabled, but the curve preferences may be +// configured with these functions. +// +// Note that TLS 1.3 renames these from curves to groups. For consistency, we +// currently use the TLS 1.2 name in the API. + +// SSL_CTX_set1_curves sets the preferred curves for |ctx| to be |curves|. Each +// element of |curves| should be a curve nid. It returns one on success and +// zero on failure. +// +// Note that this API uses nid values from nid.h and not the |SSL_CURVE_*| +// values defined below. OPENSSL_EXPORT int SSL_CTX_set1_curves(SSL_CTX *ctx, const int *curves, size_t curves_len); -/* SSL_set1_curves sets the preferred curves for |ssl| to be |curves|. Each - * element of |curves| should be a curve nid. It returns one on success and - * zero on failure. - * - * Note that this API uses nid values from nid.h and not the |SSL_CURVE_*| - * values defined below. */ +// SSL_set1_curves sets the preferred curves for |ssl| to be |curves|. Each +// element of |curves| should be a curve nid. It returns one on success and +// zero on failure. +// +// Note that this API uses nid values from nid.h and not the |SSL_CURVE_*| +// values defined below. OPENSSL_EXPORT int SSL_set1_curves(SSL *ssl, const int *curves, size_t curves_len); -/* SSL_CTX_set1_curves_list sets the preferred curves for |ctx| to be the - * colon-separated list |curves|. Each element of |curves| should be a curve - * name (e.g. P-256, X25519, ...). It returns one on success and zero on - * failure. */ +// SSL_CTX_set1_curves_list sets the preferred curves for |ctx| to be the +// colon-separated list |curves|. Each element of |curves| should be a curve +// name (e.g. P-256, X25519, ...). It returns one on success and zero on +// failure. OPENSSL_EXPORT int SSL_CTX_set1_curves_list(SSL_CTX *ctx, const char *curves); -/* SSL_set1_curves_list sets the preferred curves for |ssl| to be the - * colon-separated list |curves|. Each element of |curves| should be a curve - * name (e.g. P-256, X25519, ...). It returns one on success and zero on - * failure. */ +// SSL_set1_curves_list sets the preferred curves for |ssl| to be the +// colon-separated list |curves|. Each element of |curves| should be a curve +// name (e.g. P-256, X25519, ...). It returns one on success and zero on +// failure. OPENSSL_EXPORT int SSL_set1_curves_list(SSL *ssl, const char *curves); -/* SSL_CURVE_* define TLS curve IDs. */ +// SSL_CURVE_* define TLS curve IDs. +#define SSL_CURVE_SECP224R1 21 #define SSL_CURVE_SECP256R1 23 #define SSL_CURVE_SECP384R1 24 #define SSL_CURVE_SECP521R1 25 #define SSL_CURVE_X25519 29 -/* SSL_get_curve_id returns the ID of the curve used by |ssl|'s most recently - * completed handshake or 0 if not applicable. - * - * TODO(davidben): This API currently does not work correctly if there is a - * renegotiation in progress. Fix this. */ +// SSL_get_curve_id returns the ID of the curve used by |ssl|'s most recently +// completed handshake or 0 if not applicable. +// +// TODO(davidben): This API currently does not work correctly if there is a +// renegotiation in progress. Fix this. OPENSSL_EXPORT uint16_t SSL_get_curve_id(const SSL *ssl); -/* SSL_get_curve_name returns a human-readable name for the curve specified by - * the given TLS curve id, or NULL if the curve is unknown. */ +// SSL_get_curve_name returns a human-readable name for the curve specified by +// the given TLS curve id, or NULL if the curve is unknown. OPENSSL_EXPORT const char *SSL_get_curve_name(uint16_t curve_id); -/* Multiplicative Diffie-Hellman. - * - * Cipher suites using a DHE key exchange perform Diffie-Hellman over a - * multiplicative group selected by the server. These ciphers are disabled for a - * server unless a group is chosen with one of these functions. */ - -/* SSL_CTX_set_tmp_dh configures |ctx| to use the group from |dh| as the group - * for DHE. Only the group is used, so |dh| needn't have a keypair. It returns - * one on success and zero on error. */ -OPENSSL_EXPORT int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh); - -/* SSL_set_tmp_dh configures |ssl| to use the group from |dh| as the group for - * DHE. Only the group is used, so |dh| needn't have a keypair. It returns one - * on success and zero on error. */ -OPENSSL_EXPORT int SSL_set_tmp_dh(SSL *ssl, const DH *dh); - -/* SSL_CTX_set_tmp_dh_callback configures |ctx| to use |callback| to determine - * the group for DHE ciphers. |callback| should ignore |is_export| and - * |keylength| and return a |DH| of the selected group or NULL on error. Only - * the parameters are used, so the |DH| needn't have a generated keypair. - * - * WARNING: The caller does not take ownership of the resulting |DH|, so - * |callback| must save and release the object elsewhere. */ -OPENSSL_EXPORT void SSL_CTX_set_tmp_dh_callback( - SSL_CTX *ctx, DH *(*callback)(SSL *ssl, int is_export, int keylength)); - -/* SSL_set_tmp_dh_callback configures |ssl| to use |callback| to determine the - * group for DHE ciphers. |callback| should ignore |is_export| and |keylength| - * and return a |DH| of the selected group or NULL on error. Only the - * parameters are used, so the |DH| needn't have a generated keypair. - * - * WARNING: The caller does not take ownership of the resulting |DH|, so - * |callback| must save and release the object elsewhere. */ -OPENSSL_EXPORT void SSL_set_tmp_dh_callback(SSL *ssl, - DH *(*dh)(SSL *ssl, int is_export, - int keylength)); - - -/* Certificate verification. - * - * SSL may authenticate either endpoint with an X.509 certificate. Typically - * this is used to authenticate the server to the client. These functions - * configure certificate verification. - * - * WARNING: By default, certificate verification errors on a client are not - * fatal. See |SSL_VERIFY_NONE| This may be configured with - * |SSL_CTX_set_verify|. - * - * By default clients are anonymous but a server may request a certificate from - * the client by setting |SSL_VERIFY_PEER|. - * - * Many of these functions use OpenSSL's legacy X.509 stack which is - * underdocumented and deprecated, but the replacement isn't ready yet. For - * now, consumers may use the existing stack or bypass it by performing - * certificate verification externally. This may be done with - * |SSL_CTX_set_cert_verify_callback| or by extracting the chain with - * |SSL_get_peer_cert_chain| after the handshake. In the future, functions will - * be added to use the SSL stack without dependency on any part of the legacy - * X.509 and ASN.1 stack. - * - * To augment certificate verification, a client may also enable OCSP stapling - * (RFC 6066) and Certificate Transparency (RFC 6962) extensions. */ - -/* SSL_VERIFY_NONE, on a client, verifies the server certificate but does not - * make errors fatal. The result may be checked with |SSL_get_verify_result|. On - * a server it does not request a client certificate. This is the default. */ +// Certificate verification. +// +// SSL may authenticate either endpoint with an X.509 certificate. Typically +// this is used to authenticate the server to the client. These functions +// configure certificate verification. +// +// WARNING: By default, certificate verification errors on a client are not +// fatal. See |SSL_VERIFY_NONE| This may be configured with +// |SSL_CTX_set_verify|. +// +// By default clients are anonymous but a server may request a certificate from +// the client by setting |SSL_VERIFY_PEER|. +// +// Many of these functions use OpenSSL's legacy X.509 stack which is +// underdocumented and deprecated, but the replacement isn't ready yet. For +// now, consumers may use the existing stack or bypass it by performing +// certificate verification externally. This may be done with +// |SSL_CTX_set_cert_verify_callback| or by extracting the chain with +// |SSL_get_peer_cert_chain| after the handshake. In the future, functions will +// be added to use the SSL stack without dependency on any part of the legacy +// X.509 and ASN.1 stack. +// +// To augment certificate verification, a client may also enable OCSP stapling +// (RFC 6066) and Certificate Transparency (RFC 6962) extensions. + +// SSL_VERIFY_NONE, on a client, verifies the server certificate but does not +// make errors fatal. The result may be checked with |SSL_get_verify_result|. On +// a server it does not request a client certificate. This is the default. #define SSL_VERIFY_NONE 0x00 -/* SSL_VERIFY_PEER, on a client, makes server certificate errors fatal. On a - * server it requests a client certificate and makes errors fatal. However, - * anonymous clients are still allowed. See - * |SSL_VERIFY_FAIL_IF_NO_PEER_CERT|. */ +// SSL_VERIFY_PEER, on a client, makes server certificate errors fatal. On a +// server it requests a client certificate and makes errors fatal. However, +// anonymous clients are still allowed. See +// |SSL_VERIFY_FAIL_IF_NO_PEER_CERT|. #define SSL_VERIFY_PEER 0x01 -/* SSL_VERIFY_FAIL_IF_NO_PEER_CERT configures a server to reject connections if - * the client declines to send a certificate. Otherwise |SSL_VERIFY_PEER| still - * allows anonymous clients. */ +// SSL_VERIFY_FAIL_IF_NO_PEER_CERT configures a server to reject connections if +// the client declines to send a certificate. This flag must be used together +// with |SSL_VERIFY_PEER|, otherwise it won't work. #define SSL_VERIFY_FAIL_IF_NO_PEER_CERT 0x02 -/* SSL_VERIFY_PEER_IF_NO_OBC configures a server to request a client certificate - * if and only if Channel ID is not negotiated. */ +// SSL_VERIFY_PEER_IF_NO_OBC configures a server to request a client certificate +// if and only if Channel ID is not negotiated. #define SSL_VERIFY_PEER_IF_NO_OBC 0x04 -/* SSL_CTX_set_verify configures certificate verification behavior. |mode| is - * one of the |SSL_VERIFY_*| values defined above. |callback|, if not NULL, is - * used to customize certificate verification. See the behavior of - * |X509_STORE_CTX_set_verify_cb|. - * - * The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| with - * |X509_STORE_CTX_get_ex_data| to look up the |SSL| from |store_ctx|. */ +// SSL_CTX_set_verify configures certificate verification behavior. |mode| is +// one of the |SSL_VERIFY_*| values defined above. |callback|, if not NULL, is +// used to customize certificate verification. See the behavior of +// |X509_STORE_CTX_set_verify_cb|. +// +// The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| with +// |X509_STORE_CTX_get_ex_data| to look up the |SSL| from |store_ctx|. OPENSSL_EXPORT void SSL_CTX_set_verify( SSL_CTX *ctx, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)); -/* SSL_set_verify configures certificate verification behavior. |mode| is one of - * the |SSL_VERIFY_*| values defined above. |callback|, if not NULL, is used to - * customize certificate verification. See the behavior of - * |X509_STORE_CTX_set_verify_cb|. - * - * The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| with - * |X509_STORE_CTX_get_ex_data| to look up the |SSL| from |store_ctx|. */ +// SSL_set_verify configures certificate verification behavior. |mode| is one of +// the |SSL_VERIFY_*| values defined above. |callback|, if not NULL, is used to +// customize certificate verification. See the behavior of +// |X509_STORE_CTX_set_verify_cb|. +// +// The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| with +// |X509_STORE_CTX_get_ex_data| to look up the |SSL| from |store_ctx|. OPENSSL_EXPORT void SSL_set_verify(SSL *ssl, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)); -/* SSL_CTX_get_verify_mode returns |ctx|'s verify mode, set by - * |SSL_CTX_set_verify|. */ +enum ssl_verify_result_t { + ssl_verify_ok, + ssl_verify_invalid, + ssl_verify_retry, +}; + +// SSL_CTX_set_custom_verify configures certificate verification. |mode| is one +// of the |SSL_VERIFY_*| values defined above. |callback| performs the +// certificate verification. +// +// The callback may call |SSL_get0_peer_certificates| for the certificate chain +// to validate. The callback should return |ssl_verify_ok| if the certificate is +// valid. If the certificate is invalid, the callback should return +// |ssl_verify_invalid| and optionally set |*out_alert| to an alert to send to +// the peer. Some useful alerts include |SSL_AD_CERTIFICATE_EXPIRED|, +// |SSL_AD_CERTIFICATE_REVOKED|, |SSL_AD_UNKNOWN_CA|, |SSL_AD_BAD_CERTIFICATE|, +// |SSL_AD_CERTIFICATE_UNKNOWN|, and |SSL_AD_INTERNAL_ERROR|. See RFC 5246 +// section 7.2.2 for their precise meanings. If unspecified, +// |SSL_AD_CERTIFICATE_UNKNOWN| will be sent by default. +// +// To verify a certificate asynchronously, the callback may return +// |ssl_verify_retry|. The handshake will then pause with |SSL_get_error| +// returning |SSL_ERROR_WANT_CERTIFICATE_VERIFY|. +OPENSSL_EXPORT void SSL_CTX_set_custom_verify( + SSL_CTX *ctx, int mode, + enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)); + +// SSL_set_custom_verify behaves like |SSL_CTX_set_custom_verify| but configures +// an individual |SSL|. +OPENSSL_EXPORT void SSL_set_custom_verify( + SSL *ssl, int mode, + enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)); + +// SSL_CTX_get_verify_mode returns |ctx|'s verify mode, set by +// |SSL_CTX_set_verify|. OPENSSL_EXPORT int SSL_CTX_get_verify_mode(const SSL_CTX *ctx); -/* SSL_get_verify_mode returns |ssl|'s verify mode, set by |SSL_CTX_set_verify| - * or |SSL_set_verify|. */ +// SSL_get_verify_mode returns |ssl|'s verify mode, set by |SSL_CTX_set_verify| +// or |SSL_set_verify|. OPENSSL_EXPORT int SSL_get_verify_mode(const SSL *ssl); -/* SSL_CTX_get_verify_callback returns the callback set by - * |SSL_CTX_set_verify|. */ +// SSL_CTX_get_verify_callback returns the callback set by +// |SSL_CTX_set_verify|. OPENSSL_EXPORT int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx))( int ok, X509_STORE_CTX *store_ctx); -/* SSL_get_verify_callback returns the callback set by |SSL_CTX_set_verify| or - * |SSL_set_verify|. */ +// SSL_get_verify_callback returns the callback set by |SSL_CTX_set_verify| or +// |SSL_set_verify|. OPENSSL_EXPORT int (*SSL_get_verify_callback(const SSL *ssl))( int ok, X509_STORE_CTX *store_ctx); -/* SSL_CTX_set_verify_depth sets the maximum depth of a certificate chain - * accepted in verification. This number does not include the leaf, so a depth - * of 1 allows the leaf and one CA certificate. */ +// SSL_CTX_set_verify_depth sets the maximum depth of a certificate chain +// accepted in verification. This number does not include the leaf, so a depth +// of 1 allows the leaf and one CA certificate. OPENSSL_EXPORT void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth); -/* SSL_set_verify_depth sets the maximum depth of a certificate chain accepted - * in verification. This number does not include the leaf, so a depth of 1 - * allows the leaf and one CA certificate. */ +// SSL_set_verify_depth sets the maximum depth of a certificate chain accepted +// in verification. This number does not include the leaf, so a depth of 1 +// allows the leaf and one CA certificate. OPENSSL_EXPORT void SSL_set_verify_depth(SSL *ssl, int depth); -/* SSL_CTX_get_verify_depth returns the maximum depth of a certificate accepted - * in verification. */ +// SSL_CTX_get_verify_depth returns the maximum depth of a certificate accepted +// in verification. OPENSSL_EXPORT int SSL_CTX_get_verify_depth(const SSL_CTX *ctx); -/* SSL_get_verify_depth returns the maximum depth of a certificate accepted in - * verification. */ +// SSL_get_verify_depth returns the maximum depth of a certificate accepted in +// verification. OPENSSL_EXPORT int SSL_get_verify_depth(const SSL *ssl); -/* SSL_CTX_set1_param sets verification parameters from |param|. It returns one - * on success and zero on failure. The caller retains ownership of |param|. */ +// SSL_CTX_set1_param sets verification parameters from |param|. It returns one +// on success and zero on failure. The caller retains ownership of |param|. OPENSSL_EXPORT int SSL_CTX_set1_param(SSL_CTX *ctx, const X509_VERIFY_PARAM *param); -/* SSL_set1_param sets verification parameters from |param|. It returns one on - * success and zero on failure. The caller retains ownership of |param|. */ +// SSL_set1_param sets verification parameters from |param|. It returns one on +// success and zero on failure. The caller retains ownership of |param|. OPENSSL_EXPORT int SSL_set1_param(SSL *ssl, const X509_VERIFY_PARAM *param); -/* SSL_CTX_get0_param returns |ctx|'s |X509_VERIFY_PARAM| for certificate - * verification. The caller must not release the returned pointer but may call - * functions on it to configure it. */ +// SSL_CTX_get0_param returns |ctx|'s |X509_VERIFY_PARAM| for certificate +// verification. The caller must not release the returned pointer but may call +// functions on it to configure it. OPENSSL_EXPORT X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx); -/* SSL_get0_param returns |ssl|'s |X509_VERIFY_PARAM| for certificate - * verification. The caller must not release the returned pointer but may call - * functions on it to configure it. */ +// SSL_get0_param returns |ssl|'s |X509_VERIFY_PARAM| for certificate +// verification. The caller must not release the returned pointer but may call +// functions on it to configure it. OPENSSL_EXPORT X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl); -/* SSL_CTX_set_purpose sets |ctx|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to - * |purpose|. It returns one on success and zero on error. */ +// SSL_CTX_set_purpose sets |ctx|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to +// |purpose|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_purpose(SSL_CTX *ctx, int purpose); -/* SSL_set_purpose sets |ssl|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to - * |purpose|. It returns one on success and zero on error. */ +// SSL_set_purpose sets |ssl|'s |X509_VERIFY_PARAM|'s 'purpose' parameter to +// |purpose|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_purpose(SSL *ssl, int purpose); -/* SSL_CTX_set_trust sets |ctx|'s |X509_VERIFY_PARAM|'s 'trust' parameter to - * |trust|. It returns one on success and zero on error. */ +// SSL_CTX_set_trust sets |ctx|'s |X509_VERIFY_PARAM|'s 'trust' parameter to +// |trust|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set_trust(SSL_CTX *ctx, int trust); -/* SSL_set_trust sets |ssl|'s |X509_VERIFY_PARAM|'s 'trust' parameter to - * |trust|. It returns one on success and zero on error. */ +// SSL_set_trust sets |ssl|'s |X509_VERIFY_PARAM|'s 'trust' parameter to +// |trust|. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_trust(SSL *ssl, int trust); -/* SSL_CTX_set_cert_store sets |ctx|'s certificate store to |store|. It takes - * ownership of |store|. The store is used for certificate verification. - * - * The store is also used for the auto-chaining feature, but this is deprecated. - * See also |SSL_MODE_NO_AUTO_CHAIN|. */ +// SSL_CTX_set_cert_store sets |ctx|'s certificate store to |store|. It takes +// ownership of |store|. The store is used for certificate verification. +// +// The store is also used for the auto-chaining feature, but this is deprecated. +// See also |SSL_MODE_NO_AUTO_CHAIN|. OPENSSL_EXPORT void SSL_CTX_set_cert_store(SSL_CTX *ctx, X509_STORE *store); -/* SSL_CTX_get_cert_store returns |ctx|'s certificate store. */ +// SSL_CTX_get_cert_store returns |ctx|'s certificate store. OPENSSL_EXPORT X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *ctx); -/* SSL_CTX_set_default_verify_paths loads the OpenSSL system-default trust - * anchors into |ctx|'s store. It returns one on success and zero on failure. */ +// SSL_CTX_set_default_verify_paths loads the OpenSSL system-default trust +// anchors into |ctx|'s store. It returns one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx); -/* SSL_CTX_load_verify_locations loads trust anchors into |ctx|'s store from - * |ca_file| and |ca_dir|, either of which may be NULL. If |ca_file| is passed, - * it is opened and PEM-encoded CA certificates are read. If |ca_dir| is passed, - * it is treated as a directory in OpenSSL's hashed directory format. It returns - * one on success and zero on failure. - * - * See - * https://www.openssl.org/docs/manmaster/ssl/SSL_CTX_load_verify_locations.html - * for documentation on the directory format. */ +// SSL_CTX_load_verify_locations loads trust anchors into |ctx|'s store from +// |ca_file| and |ca_dir|, either of which may be NULL. If |ca_file| is passed, +// it is opened and PEM-encoded CA certificates are read. If |ca_dir| is passed, +// it is treated as a directory in OpenSSL's hashed directory format. It returns +// one on success and zero on failure. +// +// See +// https://www.openssl.org/docs/manmaster/ssl/SSL_CTX_load_verify_locations.html +// for documentation on the directory format. OPENSSL_EXPORT int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *ca_file, const char *ca_dir); -/* SSL_get_verify_result returns the result of certificate verification. It is - * either |X509_V_OK| or a |X509_V_ERR_*| value. */ +// SSL_get_verify_result returns the result of certificate verification. It is +// either |X509_V_OK| or a |X509_V_ERR_*| value. OPENSSL_EXPORT long SSL_get_verify_result(const SSL *ssl); -/* SSL_get_ex_data_X509_STORE_CTX_idx returns the ex_data index used to look up - * the |SSL| associated with an |X509_STORE_CTX| in the verify callback. */ +// SSL_get_ex_data_X509_STORE_CTX_idx returns the ex_data index used to look up +// the |SSL| associated with an |X509_STORE_CTX| in the verify callback. OPENSSL_EXPORT int SSL_get_ex_data_X509_STORE_CTX_idx(void); -/* SSL_CTX_set_cert_verify_callback sets a custom callback to be called on - * certificate verification rather than |X509_verify_cert|. |store_ctx| contains - * the verification parameters. The callback should return one on success and - * zero on fatal error. It may use |X509_STORE_CTX_set_error| to set a - * verification result. - * - * The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| to recover the - * |SSL| object from |store_ctx|. */ +// SSL_CTX_set_cert_verify_callback sets a custom callback to be called on +// certificate verification rather than |X509_verify_cert|. |store_ctx| contains +// the verification parameters. The callback should return one on success and +// zero on fatal error. It may use |X509_STORE_CTX_set_error| to set a +// verification result. +// +// The callback may use |SSL_get_ex_data_X509_STORE_CTX_idx| to recover the +// |SSL| object from |store_ctx|. OPENSSL_EXPORT void SSL_CTX_set_cert_verify_callback( SSL_CTX *ctx, int (*callback)(X509_STORE_CTX *store_ctx, void *arg), void *arg); -/* SSL_enable_signed_cert_timestamps causes |ssl| (which must be the client end - * of a connection) to request SCTs from the server. See - * https://tools.ietf.org/html/rfc6962. - * - * Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the - * handshake. */ +// SSL_enable_signed_cert_timestamps causes |ssl| (which must be the client end +// of a connection) to request SCTs from the server. See +// https://tools.ietf.org/html/rfc6962. +// +// Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the +// handshake. OPENSSL_EXPORT void SSL_enable_signed_cert_timestamps(SSL *ssl); -/* SSL_CTX_enable_signed_cert_timestamps enables SCT requests on all client SSL - * objects created from |ctx|. - * - * Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the - * handshake. */ +// SSL_CTX_enable_signed_cert_timestamps enables SCT requests on all client SSL +// objects created from |ctx|. +// +// Call |SSL_get0_signed_cert_timestamp_list| to recover the SCT after the +// handshake. OPENSSL_EXPORT void SSL_CTX_enable_signed_cert_timestamps(SSL_CTX *ctx); -/* SSL_enable_ocsp_stapling causes |ssl| (which must be the client end of a - * connection) to request a stapled OCSP response from the server. - * - * Call |SSL_get0_ocsp_response| to recover the OCSP response after the - * handshake. */ +// SSL_enable_ocsp_stapling causes |ssl| (which must be the client end of a +// connection) to request a stapled OCSP response from the server. +// +// Call |SSL_get0_ocsp_response| to recover the OCSP response after the +// handshake. OPENSSL_EXPORT void SSL_enable_ocsp_stapling(SSL *ssl); -/* SSL_CTX_enable_ocsp_stapling enables OCSP stapling on all client SSL objects - * created from |ctx|. - * - * Call |SSL_get0_ocsp_response| to recover the OCSP response after the - * handshake. */ +// SSL_CTX_enable_ocsp_stapling enables OCSP stapling on all client SSL objects +// created from |ctx|. +// +// Call |SSL_get0_ocsp_response| to recover the OCSP response after the +// handshake. OPENSSL_EXPORT void SSL_CTX_enable_ocsp_stapling(SSL_CTX *ctx); -/* SSL_CTX_set0_verify_cert_store sets an |X509_STORE| that will be used - * exclusively for certificate verification and returns one. Ownership of - * |store| is transferred to the |SSL_CTX|. */ +// SSL_CTX_set0_verify_cert_store sets an |X509_STORE| that will be used +// exclusively for certificate verification and returns one. Ownership of +// |store| is transferred to the |SSL_CTX|. OPENSSL_EXPORT int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *store); -/* SSL_CTX_set1_verify_cert_store sets an |X509_STORE| that will be used - * exclusively for certificate verification and returns one. An additional - * reference to |store| will be taken. */ +// SSL_CTX_set1_verify_cert_store sets an |X509_STORE| that will be used +// exclusively for certificate verification and returns one. An additional +// reference to |store| will be taken. OPENSSL_EXPORT int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *store); -/* SSL_set0_verify_cert_store sets an |X509_STORE| that will be used - * exclusively for certificate verification and returns one. Ownership of - * |store| is transferred to the |SSL|. */ +// SSL_set0_verify_cert_store sets an |X509_STORE| that will be used +// exclusively for certificate verification and returns one. Ownership of +// |store| is transferred to the |SSL|. OPENSSL_EXPORT int SSL_set0_verify_cert_store(SSL *ssl, X509_STORE *store); -/* SSL_set1_verify_cert_store sets an |X509_STORE| that will be used - * exclusively for certificate verification and returns one. An additional - * reference to |store| will be taken. */ +// SSL_set1_verify_cert_store sets an |X509_STORE| that will be used +// exclusively for certificate verification and returns one. An additional +// reference to |store| will be taken. OPENSSL_EXPORT int SSL_set1_verify_cert_store(SSL *ssl, X509_STORE *store); +// SSL_CTX_set_ed25519_enabled configures whether |ctx| advertises support for +// the Ed25519 signature algorithm when using the default preference list. +OPENSSL_EXPORT void SSL_CTX_set_ed25519_enabled(SSL_CTX *ctx, int enabled); + +// SSL_CTX_set_verify_algorithm_prefs confingures |ctx| to use |prefs| as the +// preference list when verifying signature's from the peer's long-term key. It +// returns one on zero on error. |prefs| should not include the internal-only +// value |SSL_SIGN_RSA_PKCS1_MD5_SHA1|. +OPENSSL_EXPORT int SSL_CTX_set_verify_algorithm_prefs(SSL_CTX *ctx, + const uint16_t *prefs, + size_t num_prefs); -/* Client certificate CA list. - * - * When requesting a client certificate, a server may advertise a list of - * certificate authorities which are accepted. These functions may be used to - * configure this list. */ -/* SSL_set_client_CA_list sets |ssl|'s client certificate CA list to - * |name_list|. It takes ownership of |name_list|. */ +// Client certificate CA list. +// +// When requesting a client certificate, a server may advertise a list of +// certificate authorities which are accepted. These functions may be used to +// configure this list. + +// SSL_set_client_CA_list sets |ssl|'s client certificate CA list to +// |name_list|. It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_set_client_CA_list(SSL *ssl, STACK_OF(X509_NAME) *name_list); -/* SSL_CTX_set_client_CA_list sets |ctx|'s client certificate CA list to - * |name_list|. It takes ownership of |name_list|. */ +// SSL_CTX_set_client_CA_list sets |ctx|'s client certificate CA list to +// |name_list|. It takes ownership of |name_list|. OPENSSL_EXPORT void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list); -/* SSL_get_client_CA_list returns |ssl|'s client certificate CA list. If |ssl| - * has not been configured as a client, this is the list configured by - * |SSL_CTX_set_client_CA_list|. - * - * If configured as a client, it returns the client certificate CA list sent by - * the server. In this mode, the behavior is undefined except during the - * callbacks set by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or - * when the handshake is paused because of them. */ +// SSL_set0_client_CAs sets |ssl|'s client certificate CA list to |name_list|, +// which should contain DER-encoded distinguished names (RFC 5280). It takes +// ownership of |name_list|. +OPENSSL_EXPORT void SSL_set0_client_CAs(SSL *ssl, + STACK_OF(CRYPTO_BUFFER) *name_list); + +// SSL_CTX_set0_client_CAs sets |ctx|'s client certificate CA list to +// |name_list|, which should contain DER-encoded distinguished names (RFC 5280). +// It takes ownership of |name_list|. +OPENSSL_EXPORT void SSL_CTX_set0_client_CAs(SSL_CTX *ctx, + STACK_OF(CRYPTO_BUFFER) *name_list); + +// SSL_get_client_CA_list returns |ssl|'s client certificate CA list. If |ssl| +// has not been configured as a client, this is the list configured by +// |SSL_CTX_set_client_CA_list|. +// +// If configured as a client, it returns the client certificate CA list sent by +// the server. In this mode, the behavior is undefined except during the +// callbacks set by |SSL_CTX_set_cert_cb| and |SSL_CTX_set_client_cert_cb| or +// when the handshake is paused because of them. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *ssl); -/* SSL_CTX_get_client_CA_list returns |ctx|'s client certificate CA list. */ +// SSL_get0_server_requested_CAs returns the CAs sent by a server to guide a +// client in certificate selection. They are a series of DER-encoded X.509 +// names. This function may only be called during a callback set by +// |SSL_CTX_set_cert_cb| or when the handshake is paused because of it. +// +// The returned stack is owned by |ssl|, as are its contents. It should not be +// used past the point where the handshake is restarted after the callback. +OPENSSL_EXPORT STACK_OF(CRYPTO_BUFFER) *SSL_get0_server_requested_CAs( + const SSL *ssl); + +// SSL_CTX_get_client_CA_list returns |ctx|'s client certificate CA list. OPENSSL_EXPORT STACK_OF(X509_NAME) * SSL_CTX_get_client_CA_list(const SSL_CTX *ctx); -/* SSL_add_client_CA appends |x509|'s subject to the client certificate CA list. - * It returns one on success or zero on error. The caller retains ownership of - * |x509|. */ +// SSL_add_client_CA appends |x509|'s subject to the client certificate CA list. +// It returns one on success or zero on error. The caller retains ownership of +// |x509|. OPENSSL_EXPORT int SSL_add_client_CA(SSL *ssl, X509 *x509); -/* SSL_CTX_add_client_CA appends |x509|'s subject to the client certificate CA - * list. It returns one on success or zero on error. The caller retains - * ownership of |x509|. */ +// SSL_CTX_add_client_CA appends |x509|'s subject to the client certificate CA +// list. It returns one on success or zero on error. The caller retains +// ownership of |x509|. OPENSSL_EXPORT int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x509); -/* SSL_load_client_CA_file opens |file| and reads PEM-encoded certificates from - * it. It returns a newly-allocated stack of the certificate subjects or NULL - * on error. */ +// SSL_load_client_CA_file opens |file| and reads PEM-encoded certificates from +// it. It returns a newly-allocated stack of the certificate subjects or NULL +// on error. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file); -/* SSL_dup_CA_list makes a deep copy of |list|. It returns the new list on - * success or NULL on allocation error. */ +// SSL_dup_CA_list makes a deep copy of |list|. It returns the new list on +// success or NULL on allocation error. OPENSSL_EXPORT STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list); -/* SSL_add_file_cert_subjects_to_stack behaves like |SSL_load_client_CA_file| - * but appends the result to |out|. It returns one on success or zero on - * error. */ +// SSL_add_file_cert_subjects_to_stack behaves like |SSL_load_client_CA_file| +// but appends the result to |out|. It returns one on success or zero on +// error. OPENSSL_EXPORT int SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, const char *file); -/* Server name indication. - * - * The server_name extension (RFC 3546) allows the client to advertise the name - * of the server it is connecting to. This is used in virtual hosting - * deployments to select one of a several certificates on a single IP. Only the - * host_name name type is supported. */ +// Server name indication. +// +// The server_name extension (RFC 3546) allows the client to advertise the name +// of the server it is connecting to. This is used in virtual hosting +// deployments to select one of a several certificates on a single IP. Only the +// host_name name type is supported. #define TLSEXT_NAMETYPE_host_name 0 -/* SSL_set_tlsext_host_name, for a client, configures |ssl| to advertise |name| - * in the server_name extension. It returns one on success and zero on error. */ +// SSL_set_tlsext_host_name, for a client, configures |ssl| to advertise |name| +// in the server_name extension. It returns one on success and zero on error. OPENSSL_EXPORT int SSL_set_tlsext_host_name(SSL *ssl, const char *name); -/* SSL_get_servername, for a server, returns the hostname supplied by the - * client or NULL if there was none. The |type| argument must be - * |TLSEXT_NAMETYPE_host_name|. */ +// SSL_get_servername, for a server, returns the hostname supplied by the +// client or NULL if there was none. The |type| argument must be +// |TLSEXT_NAMETYPE_host_name|. OPENSSL_EXPORT const char *SSL_get_servername(const SSL *ssl, const int type); -/* SSL_get_servername_type, for a server, returns |TLSEXT_NAMETYPE_host_name| - * if the client sent a hostname and -1 otherwise. */ +// SSL_get_servername_type, for a server, returns |TLSEXT_NAMETYPE_host_name| +// if the client sent a hostname and -1 otherwise. OPENSSL_EXPORT int SSL_get_servername_type(const SSL *ssl); -/* SSL_CTX_set_tlsext_servername_callback configures |callback| to be called on - * the server after ClientHello extensions have been parsed and returns one. - * The callback may use |SSL_get_servername| to examine the server_name - * extension and returns a |SSL_TLSEXT_ERR_*| value. The value of |arg| may be - * set by calling |SSL_CTX_set_tlsext_servername_arg|. - * - * If the callback returns |SSL_TLSEXT_ERR_NOACK|, the server_name extension is - * not acknowledged in the ServerHello. If the return value is - * |SSL_TLSEXT_ERR_ALERT_FATAL|, then |*out_alert| is the alert to send, - * defaulting to |SSL_AD_UNRECOGNIZED_NAME|. |SSL_TLSEXT_ERR_ALERT_WARNING| is - * ignored and treated as |SSL_TLSEXT_ERR_OK|. */ +// SSL_CTX_set_tlsext_servername_callback configures |callback| to be called on +// the server after ClientHello extensions have been parsed and returns one. +// The callback may use |SSL_get_servername| to examine the server_name +// extension and returns a |SSL_TLSEXT_ERR_*| value. The value of |arg| may be +// set by calling |SSL_CTX_set_tlsext_servername_arg|. +// +// If the callback returns |SSL_TLSEXT_ERR_NOACK|, the server_name extension is +// not acknowledged in the ServerHello. If the return value is +// |SSL_TLSEXT_ERR_ALERT_FATAL|, then |*out_alert| is the alert to send, +// defaulting to |SSL_AD_UNRECOGNIZED_NAME|. |SSL_TLSEXT_ERR_ALERT_WARNING| is +// ignored and treated as |SSL_TLSEXT_ERR_OK|. OPENSSL_EXPORT int SSL_CTX_set_tlsext_servername_callback( SSL_CTX *ctx, int (*callback)(SSL *ssl, int *out_alert, void *arg)); -/* SSL_CTX_set_tlsext_servername_arg sets the argument to the servername - * callback and returns one. See |SSL_CTX_set_tlsext_servername_callback|. */ +// SSL_CTX_set_tlsext_servername_arg sets the argument to the servername +// callback and returns one. See |SSL_CTX_set_tlsext_servername_callback|. OPENSSL_EXPORT int SSL_CTX_set_tlsext_servername_arg(SSL_CTX *ctx, void *arg); -/* SSL_TLSEXT_ERR_* are values returned by some extension-related callbacks. */ +// SSL_TLSEXT_ERR_* are values returned by some extension-related callbacks. #define SSL_TLSEXT_ERR_OK 0 #define SSL_TLSEXT_ERR_ALERT_WARNING 1 #define SSL_TLSEXT_ERR_ALERT_FATAL 2 #define SSL_TLSEXT_ERR_NOACK 3 +// SSL_set_SSL_CTX changes |ssl|'s |SSL_CTX|. |ssl| will use the +// certificate-related settings from |ctx|, and |SSL_get_SSL_CTX| will report +// |ctx|. This function may be used during the callbacks registered by +// |SSL_CTX_set_select_certificate_cb|, +// |SSL_CTX_set_tlsext_servername_callback|, and |SSL_CTX_set_cert_cb| or when +// the handshake is paused from them. It is typically used to switch +// certificates based on SNI. +// +// Note the session cache and related settings will continue to use the initial +// |SSL_CTX|. Callers should use |SSL_CTX_set_session_id_context| to partition +// the session cache between different domains. +// +// TODO(davidben): Should other settings change after this call? +OPENSSL_EXPORT SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx); -/* Application-layer protocol negotiation. - * - * The ALPN extension (RFC 7301) allows negotiating different application-layer - * protocols over a single port. This is used, for example, to negotiate - * HTTP/2. */ - -/* SSL_CTX_set_alpn_protos sets the client ALPN protocol list on |ctx| to - * |protos|. |protos| must be in wire-format (i.e. a series of non-empty, 8-bit - * length-prefixed strings). It returns zero on success and one on failure. - * Configuring this list enables ALPN on a client. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. */ + +// Application-layer protocol negotiation. +// +// The ALPN extension (RFC 7301) allows negotiating different application-layer +// protocols over a single port. This is used, for example, to negotiate +// HTTP/2. + +// SSL_CTX_set_alpn_protos sets the client ALPN protocol list on |ctx| to +// |protos|. |protos| must be in wire-format (i.e. a series of non-empty, 8-bit +// length-prefixed strings). It returns zero on success and one on failure. +// Configuring this list enables ALPN on a client. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. OPENSSL_EXPORT int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const uint8_t *protos, unsigned protos_len); -/* SSL_set_alpn_protos sets the client ALPN protocol list on |ssl| to |protos|. - * |protos| must be in wire-format (i.e. a series of non-empty, 8-bit - * length-prefixed strings). It returns zero on success and one on failure. - * Configuring this list enables ALPN on a client. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. */ +// SSL_set_alpn_protos sets the client ALPN protocol list on |ssl| to |protos|. +// |protos| must be in wire-format (i.e. a series of non-empty, 8-bit +// length-prefixed strings). It returns zero on success and one on failure. +// Configuring this list enables ALPN on a client. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. OPENSSL_EXPORT int SSL_set_alpn_protos(SSL *ssl, const uint8_t *protos, unsigned protos_len); -/* SSL_CTX_set_alpn_select_cb sets a callback function on |ctx| that is called - * during ClientHello processing in order to select an ALPN protocol from the - * client's list of offered protocols. Configuring this callback enables ALPN on - * a server. - * - * The callback is passed a wire-format (i.e. a series of non-empty, 8-bit - * length-prefixed strings) ALPN protocol list in |in|. It should set |*out| and - * |*out_len| to the selected protocol and return |SSL_TLSEXT_ERR_OK| on - * success. It does not pass ownership of the buffer. Otherwise, it should - * return |SSL_TLSEXT_ERR_NOACK|. Other |SSL_TLSEXT_ERR_*| values are - * unimplemented and will be treated as |SSL_TLSEXT_ERR_NOACK|. - * - * The cipher suite is selected before negotiating ALPN. The callback may use - * |SSL_get_pending_cipher| to query the cipher suite. */ +// SSL_CTX_set_alpn_select_cb sets a callback function on |ctx| that is called +// during ClientHello processing in order to select an ALPN protocol from the +// client's list of offered protocols. Configuring this callback enables ALPN on +// a server. +// +// The callback is passed a wire-format (i.e. a series of non-empty, 8-bit +// length-prefixed strings) ALPN protocol list in |in|. It should set |*out| and +// |*out_len| to the selected protocol and return |SSL_TLSEXT_ERR_OK| on +// success. It does not pass ownership of the buffer. Otherwise, it should +// return |SSL_TLSEXT_ERR_NOACK|. Other |SSL_TLSEXT_ERR_*| values are +// unimplemented and will be treated as |SSL_TLSEXT_ERR_NOACK|. +// +// The cipher suite is selected before negotiating ALPN. The callback may use +// |SSL_get_pending_cipher| to query the cipher suite. OPENSSL_EXPORT void SSL_CTX_set_alpn_select_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg); -/* SSL_get0_alpn_selected gets the selected ALPN protocol (if any) from |ssl|. - * On return it sets |*out_data| to point to |*out_len| bytes of protocol name - * (not including the leading length-prefix byte). If the server didn't respond - * with a negotiated protocol then |*out_len| will be zero. */ +// SSL_get0_alpn_selected gets the selected ALPN protocol (if any) from |ssl|. +// On return it sets |*out_data| to point to |*out_len| bytes of protocol name +// (not including the leading length-prefix byte). If the server didn't respond +// with a negotiated protocol then |*out_len| will be zero. OPENSSL_EXPORT void SSL_get0_alpn_selected(const SSL *ssl, const uint8_t **out_data, unsigned *out_len); - -/* Next protocol negotiation. - * - * The NPN extension (draft-agl-tls-nextprotoneg-03) is the predecessor to ALPN - * and deprecated in favor of it. */ - -/* SSL_CTX_set_next_protos_advertised_cb sets a callback that is called when a - * TLS server needs a list of supported protocols for Next Protocol - * Negotiation. The returned list must be in wire format. The list is returned - * by setting |*out| to point to it and |*out_len| to its length. This memory - * will not be modified, but one should assume that |ssl| keeps a reference to - * it. - * - * The callback should return |SSL_TLSEXT_ERR_OK| if it wishes to advertise. - * Otherwise, no such extension will be included in the ServerHello. */ +// SSL_CTX_set_allow_unknown_alpn_protos configures client connections on |ctx| +// to allow unknown ALPN protocols from the server. Otherwise, by default, the +// client will require that the protocol be advertised in +// |SSL_CTX_set_alpn_protos|. +OPENSSL_EXPORT void SSL_CTX_set_allow_unknown_alpn_protos(SSL_CTX *ctx, + int enabled); + + +// Next protocol negotiation. +// +// The NPN extension (draft-agl-tls-nextprotoneg-03) is the predecessor to ALPN +// and deprecated in favor of it. + +// SSL_CTX_set_next_protos_advertised_cb sets a callback that is called when a +// TLS server needs a list of supported protocols for Next Protocol +// Negotiation. The returned list must be in wire format. The list is returned +// by setting |*out| to point to it and |*out_len| to its length. This memory +// will not be modified, but one should assume that |ssl| keeps a reference to +// it. +// +// The callback should return |SSL_TLSEXT_ERR_OK| if it wishes to advertise. +// Otherwise, no such extension will be included in the ServerHello. OPENSSL_EXPORT void SSL_CTX_set_next_protos_advertised_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, const uint8_t **out, unsigned *out_len, void *arg), void *arg); -/* SSL_CTX_set_next_proto_select_cb sets a callback that is called when a client - * needs to select a protocol from the server's provided list. |*out| must be - * set to point to the selected protocol (which may be within |in|). The length - * of the protocol name must be written into |*out_len|. The server's advertised - * protocols are provided in |in| and |in_len|. The callback can assume that - * |in| is syntactically valid. - * - * The client must select a protocol. It is fatal to the connection if this - * callback returns a value other than |SSL_TLSEXT_ERR_OK|. - * - * Configuring this callback enables NPN on a client. */ +// SSL_CTX_set_next_proto_select_cb sets a callback that is called when a client +// needs to select a protocol from the server's provided list. |*out| must be +// set to point to the selected protocol (which may be within |in|). The length +// of the protocol name must be written into |*out_len|. The server's advertised +// protocols are provided in |in| and |in_len|. The callback can assume that +// |in| is syntactically valid. +// +// The client must select a protocol. It is fatal to the connection if this +// callback returns a value other than |SSL_TLSEXT_ERR_OK|. +// +// Configuring this callback enables NPN on a client. OPENSSL_EXPORT void SSL_CTX_set_next_proto_select_cb( SSL_CTX *ctx, int (*cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, const uint8_t *in, unsigned in_len, void *arg), void *arg); -/* SSL_get0_next_proto_negotiated sets |*out_data| and |*out_len| to point to - * the client's requested protocol for this connection. If the client didn't - * request any protocol, then |*out_data| is set to NULL. - * - * Note that the client can request any protocol it chooses. The value returned - * from this function need not be a member of the list of supported protocols - * provided by the server. */ +// SSL_get0_next_proto_negotiated sets |*out_data| and |*out_len| to point to +// the client's requested protocol for this connection. If the client didn't +// request any protocol, then |*out_data| is set to NULL. +// +// Note that the client can request any protocol it chooses. The value returned +// from this function need not be a member of the list of supported protocols +// provided by the server. OPENSSL_EXPORT void SSL_get0_next_proto_negotiated(const SSL *ssl, const uint8_t **out_data, unsigned *out_len); -/* SSL_select_next_proto implements the standard protocol selection. It is - * expected that this function is called from the callback set by - * |SSL_CTX_set_next_proto_select_cb|. - * - * The protocol data is assumed to be a vector of 8-bit, length prefixed byte - * strings. The length byte itself is not included in the length. A byte - * string of length 0 is invalid. No byte string may be truncated. - * - * The current, but experimental algorithm for selecting the protocol is: - * - * 1) If the server doesn't support NPN then this is indicated to the - * callback. In this case, the client application has to abort the connection - * or have a default application level protocol. - * - * 2) If the server supports NPN, but advertises an empty list then the - * client selects the first protocol in its list, but indicates via the - * API that this fallback case was enacted. - * - * 3) Otherwise, the client finds the first protocol in the server's list - * that it supports and selects this protocol. This is because it's - * assumed that the server has better information about which protocol - * a client should use. - * - * 4) If the client doesn't support any of the server's advertised - * protocols, then this is treated the same as case 2. - * - * It returns either |OPENSSL_NPN_NEGOTIATED| if a common protocol was found, or - * |OPENSSL_NPN_NO_OVERLAP| if the fallback case was reached. */ +// SSL_select_next_proto implements the standard protocol selection. It is +// expected that this function is called from the callback set by +// |SSL_CTX_set_next_proto_select_cb|. +// +// |peer| and |supported| must be vectors of 8-bit, length-prefixed byte strings +// containing the peer and locally-configured protocols, respectively. The +// length byte itself is not included in the length. A byte string of length 0 +// is invalid. No byte string may be truncated. |supported| is assumed to be +// non-empty. +// +// This function finds the first protocol in |peer| which is also in +// |supported|. If one was found, it sets |*out| and |*out_len| to point to it +// and returns |OPENSSL_NPN_NEGOTIATED|. Otherwise, it returns +// |OPENSSL_NPN_NO_OVERLAP| and sets |*out| and |*out_len| to the first +// supported protocol. OPENSSL_EXPORT int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, - const uint8_t *server, - unsigned server_len, - const uint8_t *client, - unsigned client_len); + const uint8_t *peer, unsigned peer_len, + const uint8_t *supported, + unsigned supported_len); #define OPENSSL_NPN_UNSUPPORTED 0 #define OPENSSL_NPN_NEGOTIATED 1 #define OPENSSL_NPN_NO_OVERLAP 2 -/* Channel ID. - * - * See draft-balfanz-tls-channelid-01. */ +// Channel ID. +// +// See draft-balfanz-tls-channelid-01. -/* SSL_CTX_set_tls_channel_id_enabled configures whether connections associated - * with |ctx| should enable Channel ID. */ +// SSL_CTX_set_tls_channel_id_enabled configures whether connections associated +// with |ctx| should enable Channel ID. OPENSSL_EXPORT void SSL_CTX_set_tls_channel_id_enabled(SSL_CTX *ctx, int enabled); -/* SSL_set_tls_channel_id_enabled configures whether |ssl| should enable Channel - * ID. */ +// SSL_set_tls_channel_id_enabled configures whether |ssl| should enable Channel +// ID. OPENSSL_EXPORT void SSL_set_tls_channel_id_enabled(SSL *ssl, int enabled); -/* SSL_CTX_set1_tls_channel_id configures a TLS client to send a TLS Channel ID - * to compatible servers. |private_key| must be a P-256 EC key. It returns one - * on success and zero on error. */ +// SSL_CTX_set1_tls_channel_id configures a TLS client to send a TLS Channel ID +// to compatible servers. |private_key| must be a P-256 EC key. It returns one +// on success and zero on error. OPENSSL_EXPORT int SSL_CTX_set1_tls_channel_id(SSL_CTX *ctx, EVP_PKEY *private_key); -/* SSL_set1_tls_channel_id configures a TLS client to send a TLS Channel ID to - * compatible servers. |private_key| must be a P-256 EC key. It returns one on - * success and zero on error. */ +// SSL_set1_tls_channel_id configures a TLS client to send a TLS Channel ID to +// compatible servers. |private_key| must be a P-256 EC key. It returns one on +// success and zero on error. OPENSSL_EXPORT int SSL_set1_tls_channel_id(SSL *ssl, EVP_PKEY *private_key); -/* SSL_get_tls_channel_id gets the client's TLS Channel ID from a server |SSL*| - * and copies up to the first |max_out| bytes into |out|. The Channel ID - * consists of the client's P-256 public key as an (x,y) pair where each is a - * 32-byte, big-endian field element. It returns 0 if the client didn't offer a - * Channel ID and the length of the complete Channel ID otherwise. */ +// SSL_get_tls_channel_id gets the client's TLS Channel ID from a server |SSL*| +// and copies up to the first |max_out| bytes into |out|. The Channel ID +// consists of the client's P-256 public key as an (x,y) pair where each is a +// 32-byte, big-endian field element. It returns 0 if the client didn't offer a +// Channel ID and the length of the complete Channel ID otherwise. OPENSSL_EXPORT size_t SSL_get_tls_channel_id(SSL *ssl, uint8_t *out, size_t max_out); -/* SSL_CTX_set_channel_id_cb sets a callback to be called when a TLS Channel ID - * is requested. The callback may set |*out_pkey| to a key, passing a reference - * to the caller. If none is returned, the handshake will pause and - * |SSL_get_error| will return |SSL_ERROR_WANT_CHANNEL_ID_LOOKUP|. - * - * See also |SSL_ERROR_WANT_CHANNEL_ID_LOOKUP|. */ +// SSL_CTX_set_channel_id_cb sets a callback to be called when a TLS Channel ID +// is requested. The callback may set |*out_pkey| to a key, passing a reference +// to the caller. If none is returned, the handshake will pause and +// |SSL_get_error| will return |SSL_ERROR_WANT_CHANNEL_ID_LOOKUP|. +// +// See also |SSL_ERROR_WANT_CHANNEL_ID_LOOKUP|. OPENSSL_EXPORT void SSL_CTX_set_channel_id_cb( SSL_CTX *ctx, void (*channel_id_cb)(SSL *ssl, EVP_PKEY **out_pkey)); -/* SSL_CTX_get_channel_id_cb returns the callback set by - * |SSL_CTX_set_channel_id_cb|. */ +// SSL_CTX_get_channel_id_cb returns the callback set by +// |SSL_CTX_set_channel_id_cb|. OPENSSL_EXPORT void (*SSL_CTX_get_channel_id_cb(SSL_CTX *ctx))( SSL *ssl, EVP_PKEY **out_pkey); -/* DTLS-SRTP. - * - * See RFC 5764. */ +// DTLS-SRTP. +// +// See RFC 5764. -/* srtp_protection_profile_st (aka |SRTP_PROTECTION_PROFILE|) is an SRTP - * profile for use with the use_srtp extension. */ +// srtp_protection_profile_st (aka |SRTP_PROTECTION_PROFILE|) is an SRTP +// profile for use with the use_srtp extension. struct srtp_protection_profile_st { const char *name; unsigned long id; } /* SRTP_PROTECTION_PROFILE */; -DECLARE_STACK_OF(SRTP_PROTECTION_PROFILE) +DEFINE_CONST_STACK_OF(SRTP_PROTECTION_PROFILE) -/* SRTP_* define constants for SRTP profiles. */ +// SRTP_* define constants for SRTP profiles. #define SRTP_AES128_CM_SHA1_80 0x0001 #define SRTP_AES128_CM_SHA1_32 0x0002 #define SRTP_AES128_F8_SHA1_80 0x0003 @@ -2619,132 +2806,207 @@ DECLARE_STACK_OF(SRTP_PROTECTION_PROFILE) #define SRTP_AEAD_AES_128_GCM 0x0007 #define SRTP_AEAD_AES_256_GCM 0x0008 -/* SSL_CTX_set_srtp_profiles enables SRTP for all SSL objects created from - * |ctx|. |profile| contains a colon-separated list of profile names. It returns - * one on success and zero on failure. */ +// SSL_CTX_set_srtp_profiles enables SRTP for all SSL objects created from +// |ctx|. |profile| contains a colon-separated list of profile names. It returns +// one on success and zero on failure. OPENSSL_EXPORT int SSL_CTX_set_srtp_profiles(SSL_CTX *ctx, const char *profiles); -/* SSL_set_srtp_profiles enables SRTP for |ssl|. |profile| contains a - * colon-separated list of profile names. It returns one on success and zero on - * failure. */ +// SSL_set_srtp_profiles enables SRTP for |ssl|. |profile| contains a +// colon-separated list of profile names. It returns one on success and zero on +// failure. OPENSSL_EXPORT int SSL_set_srtp_profiles(SSL *ssl, const char *profiles); -/* SSL_get_srtp_profiles returns the SRTP profiles supported by |ssl|. */ +// SSL_get_srtp_profiles returns the SRTP profiles supported by |ssl|. OPENSSL_EXPORT STACK_OF(SRTP_PROTECTION_PROFILE) *SSL_get_srtp_profiles( SSL *ssl); -/* SSL_get_selected_srtp_profile returns the selected SRTP profile, or NULL if - * SRTP was not negotiated. */ +// SSL_get_selected_srtp_profile returns the selected SRTP profile, or NULL if +// SRTP was not negotiated. OPENSSL_EXPORT const SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile( SSL *ssl); -/* Pre-shared keys. - * - * Connections may be configured with PSK (Pre-Shared Key) cipher suites. These - * authenticate using out-of-band pre-shared keys rather than certificates. See - * RFC 4279. - * - * This implementation uses NUL-terminated C strings for identities and identity - * hints, so values with a NUL character are not supported. (RFC 4279 does not - * specify the format of an identity.) */ +// Pre-shared keys. +// +// Connections may be configured with PSK (Pre-Shared Key) cipher suites. These +// authenticate using out-of-band pre-shared keys rather than certificates. See +// RFC 4279. +// +// This implementation uses NUL-terminated C strings for identities and identity +// hints, so values with a NUL character are not supported. (RFC 4279 does not +// specify the format of an identity.) -/* PSK_MAX_IDENTITY_LEN is the maximum supported length of a PSK identity, - * excluding the NUL terminator. */ +// PSK_MAX_IDENTITY_LEN is the maximum supported length of a PSK identity, +// excluding the NUL terminator. #define PSK_MAX_IDENTITY_LEN 128 -/* PSK_MAX_PSK_LEN is the maximum supported length of a pre-shared key. */ +// PSK_MAX_PSK_LEN is the maximum supported length of a pre-shared key. #define PSK_MAX_PSK_LEN 256 -/* SSL_CTX_set_psk_client_callback sets the callback to be called when PSK is - * negotiated on the client. This callback must be set to enable PSK cipher - * suites on the client. - * - * The callback is passed the identity hint in |hint| or NULL if none was - * provided. It should select a PSK identity and write the identity and the - * corresponding PSK to |identity| and |psk|, respectively. The identity is - * written as a NUL-terminated C string of length (excluding the NUL terminator) - * at most |max_identity_len|. The PSK's length must be at most |max_psk_len|. - * The callback returns the length of the PSK or 0 if no suitable identity was - * found. */ +// SSL_CTX_set_psk_client_callback sets the callback to be called when PSK is +// negotiated on the client. This callback must be set to enable PSK cipher +// suites on the client. +// +// The callback is passed the identity hint in |hint| or NULL if none was +// provided. It should select a PSK identity and write the identity and the +// corresponding PSK to |identity| and |psk|, respectively. The identity is +// written as a NUL-terminated C string of length (excluding the NUL terminator) +// at most |max_identity_len|. The PSK's length must be at most |max_psk_len|. +// The callback returns the length of the PSK or 0 if no suitable identity was +// found. OPENSSL_EXPORT void SSL_CTX_set_psk_client_callback( - SSL_CTX *ctx, - unsigned (*psk_client_callback)( - SSL *ssl, const char *hint, char *identity, - unsigned max_identity_len, uint8_t *psk, unsigned max_psk_len)); + SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, + unsigned max_identity_len, uint8_t *psk, + unsigned max_psk_len)); -/* SSL_set_psk_client_callback sets the callback to be called when PSK is - * negotiated on the client. This callback must be set to enable PSK cipher - * suites on the client. See also |SSL_CTX_set_psk_client_callback|. */ +// SSL_set_psk_client_callback sets the callback to be called when PSK is +// negotiated on the client. This callback must be set to enable PSK cipher +// suites on the client. See also |SSL_CTX_set_psk_client_callback|. OPENSSL_EXPORT void SSL_set_psk_client_callback( - SSL *ssl, unsigned (*psk_client_callback)(SSL *ssl, const char *hint, - char *identity, - unsigned max_identity_len, - uint8_t *psk, - unsigned max_psk_len)); - -/* SSL_CTX_set_psk_server_callback sets the callback to be called when PSK is - * negotiated on the server. This callback must be set to enable PSK cipher - * suites on the server. - * - * The callback is passed the identity in |identity|. It should write a PSK of - * length at most |max_psk_len| to |psk| and return the number of bytes written - * or zero if the PSK identity is unknown. */ + SSL *ssl, unsigned (*cb)(SSL *ssl, const char *hint, char *identity, + unsigned max_identity_len, uint8_t *psk, + unsigned max_psk_len)); + +// SSL_CTX_set_psk_server_callback sets the callback to be called when PSK is +// negotiated on the server. This callback must be set to enable PSK cipher +// suites on the server. +// +// The callback is passed the identity in |identity|. It should write a PSK of +// length at most |max_psk_len| to |psk| and return the number of bytes written +// or zero if the PSK identity is unknown. OPENSSL_EXPORT void SSL_CTX_set_psk_server_callback( - SSL_CTX *ctx, - unsigned (*psk_server_callback)(SSL *ssl, const char *identity, - uint8_t *psk, - unsigned max_psk_len)); + SSL_CTX *ctx, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, + unsigned max_psk_len)); -/* SSL_set_psk_server_callback sets the callback to be called when PSK is - * negotiated on the server. This callback must be set to enable PSK cipher - * suites on the server. See also |SSL_CTX_set_psk_server_callback|. */ +// SSL_set_psk_server_callback sets the callback to be called when PSK is +// negotiated on the server. This callback must be set to enable PSK cipher +// suites on the server. See also |SSL_CTX_set_psk_server_callback|. OPENSSL_EXPORT void SSL_set_psk_server_callback( - SSL *ssl, - unsigned (*psk_server_callback)(SSL *ssl, const char *identity, - uint8_t *psk, - unsigned max_psk_len)); - -/* SSL_CTX_use_psk_identity_hint configures server connections to advertise an - * identity hint of |identity_hint|. It returns one on success and zero on - * error. */ + SSL *ssl, unsigned (*cb)(SSL *ssl, const char *identity, uint8_t *psk, + unsigned max_psk_len)); + +// SSL_CTX_use_psk_identity_hint configures server connections to advertise an +// identity hint of |identity_hint|. It returns one on success and zero on +// error. OPENSSL_EXPORT int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint); -/* SSL_use_psk_identity_hint configures server connections to advertise an - * identity hint of |identity_hint|. It returns one on success and zero on - * error. */ +// SSL_use_psk_identity_hint configures server connections to advertise an +// identity hint of |identity_hint|. It returns one on success and zero on +// error. OPENSSL_EXPORT int SSL_use_psk_identity_hint(SSL *ssl, const char *identity_hint); -/* SSL_get_psk_identity_hint returns the PSK identity hint advertised for |ssl| - * or NULL if there is none. */ +// SSL_get_psk_identity_hint returns the PSK identity hint advertised for |ssl| +// or NULL if there is none. OPENSSL_EXPORT const char *SSL_get_psk_identity_hint(const SSL *ssl); -/* SSL_get_psk_identity, after the handshake completes, returns the PSK identity - * that was negotiated by |ssl| or NULL if PSK was not used. */ +// SSL_get_psk_identity, after the handshake completes, returns the PSK identity +// that was negotiated by |ssl| or NULL if PSK was not used. OPENSSL_EXPORT const char *SSL_get_psk_identity(const SSL *ssl); -/* Alerts. - * - * TLS and SSL 3.0 use alerts to signal error conditions. Alerts have a type - * (warning or fatal) and description. OpenSSL internally handles fatal alerts - * with dedicated error codes (see |SSL_AD_REASON_OFFSET|). Except for - * close_notify, warning alerts are silently ignored and may only be surfaced - * with |SSL_CTX_set_info_callback|. */ - -/* SSL_AD_REASON_OFFSET is the offset between error reasons and |SSL_AD_*| - * values. Any error code under |ERR_LIB_SSL| with an error reason above this - * value corresponds to an alert description. Consumers may add or subtract - * |SSL_AD_REASON_OFFSET| to convert between them. - * - * make_errors.go reserves error codes above 1000 for manually-assigned errors. - * This value must be kept in sync with reservedReasonCode in make_errors.h */ +// Early data. +// +// WARNING: 0-RTT support in BoringSSL is currently experimental and not fully +// implemented. It may cause interoperability or security failures when used. +// +// Early data, or 0-RTT, is a feature in TLS 1.3 which allows clients to send +// data on the first flight during a resumption handshake. This can save a +// round-trip in some application protocols. +// +// WARNING: A 0-RTT handshake has different security properties from normal +// handshake, so it is off by default unless opted in. In particular, early data +// is replayable by a network attacker. Callers must account for this when +// sending or processing data before the handshake is confirmed. See +// draft-ietf-tls-tls13-18 for more information. +// +// As a server, if early data is accepted, |SSL_do_handshake| will complete as +// soon as the ClientHello is processed and server flight sent. |SSL_write| may +// be used to send half-RTT data. |SSL_read| will consume early data and +// transition to 1-RTT data as appropriate. Prior to the transition, +// |SSL_in_init| will report the handshake is still in progress. Callers may use +// it or |SSL_in_early_data| to defer or reject requests as needed. +// +// Early data as a client is more complex. If the offered session (see +// |SSL_set_session|) is 0-RTT-capable, the handshake will return after sending +// the ClientHello. The predicted peer certificates and ALPN protocol will be +// available via the usual APIs. |SSL_write| will write early data, up to the +// session's limit. Writes past this limit and |SSL_read| will complete the +// handshake before continuing. Callers may also call |SSL_do_handshake| again +// to complete the handshake sooner. +// +// If the server accepts early data, the handshake will succeed. |SSL_read| and +// |SSL_write| will then act as in a 1-RTT handshake. The peer certificates and +// ALPN protocol will be as predicted and need not be re-queried. +// +// If the server rejects early data, |SSL_do_handshake| (and thus |SSL_read| and +// |SSL_write|) will then fail with |SSL_get_error| returning +// |SSL_ERROR_EARLY_DATA_REJECTED|. The caller should treat this as a connection +// error and most likely perform a high-level retry. Note the server may still +// have processed the early data due to attacker replays. +// +// To then continue the handshake on the original connection, use +// |SSL_reset_early_data_reject|. The connection will then behave as one which +// had not yet completed the handshake. This allows a faster retry than making a +// fresh connection. |SSL_do_handshake| will complete the full handshake, +// possibly resulting in different peer certificates, ALPN protocol, and other +// properties. The caller must disregard any values from before the reset and +// query again. +// +// Finally, to implement the fallback described in draft-ietf-tls-tls13-18 +// appendix C.3, retry on a fresh connection without 0-RTT if the handshake +// fails with |SSL_R_WRONG_VERSION_ON_EARLY_DATA|. + +// SSL_CTX_set_early_data_enabled sets whether early data is allowed to be used +// with resumptions using |ctx|. +OPENSSL_EXPORT void SSL_CTX_set_early_data_enabled(SSL_CTX *ctx, int enabled); + +// SSL_set_early_data_enabled sets whether early data is allowed to be used +// with resumptions using |ssl|. See |SSL_CTX_set_early_data_enabled| for more +// information. +OPENSSL_EXPORT void SSL_set_early_data_enabled(SSL *ssl, int enabled); + +// SSL_in_early_data returns one if |ssl| has a pending handshake that has +// progressed enough to send or receive early data. Clients may call |SSL_write| +// to send early data, but |SSL_read| will complete the handshake before +// accepting application data. Servers may call |SSL_read| to read early data +// and |SSL_write| to send half-RTT data. +OPENSSL_EXPORT int SSL_in_early_data(const SSL *ssl); + +// SSL_early_data_accepted returns whether early data was accepted on the +// handshake performed by |ssl|. +OPENSSL_EXPORT int SSL_early_data_accepted(const SSL *ssl); + +// SSL_reset_early_data_reject resets |ssl| after an early data reject. All +// 0-RTT state is discarded, including any pending |SSL_write| calls. The caller +// should treat |ssl| as a logically fresh connection, usually by driving the +// handshake to completion using |SSL_do_handshake|. +// +// It is an error to call this function on an |SSL| object that is not signaling +// |SSL_ERROR_EARLY_DATA_REJECTED|. +OPENSSL_EXPORT void SSL_reset_early_data_reject(SSL *ssl); + + +// Alerts. +// +// TLS and SSL 3.0 use alerts to signal error conditions. Alerts have a type +// (warning or fatal) and description. OpenSSL internally handles fatal alerts +// with dedicated error codes (see |SSL_AD_REASON_OFFSET|). Except for +// close_notify, warning alerts are silently ignored and may only be surfaced +// with |SSL_CTX_set_info_callback|. + +// SSL_AD_REASON_OFFSET is the offset between error reasons and |SSL_AD_*| +// values. Any error code under |ERR_LIB_SSL| with an error reason above this +// value corresponds to an alert description. Consumers may add or subtract +// |SSL_AD_REASON_OFFSET| to convert between them. +// +// make_errors.go reserves error codes above 1000 for manually-assigned errors. +// This value must be kept in sync with reservedReasonCode in make_errors.h #define SSL_AD_REASON_OFFSET 1000 -/* SSL_AD_* are alert descriptions for SSL 3.0 and TLS. */ +// SSL_AD_* are alert descriptions for SSL 3.0 and TLS. #define SSL_AD_CLOSE_NOTIFY SSL3_AD_CLOSE_NOTIFY #define SSL_AD_UNEXPECTED_MESSAGE SSL3_AD_UNEXPECTED_MESSAGE #define SSL_AD_BAD_RECORD_MAC SSL3_AD_BAD_RECORD_MAC @@ -2752,7 +3014,7 @@ OPENSSL_EXPORT const char *SSL_get_psk_identity(const SSL *ssl); #define SSL_AD_RECORD_OVERFLOW TLS1_AD_RECORD_OVERFLOW #define SSL_AD_DECOMPRESSION_FAILURE SSL3_AD_DECOMPRESSION_FAILURE #define SSL_AD_HANDSHAKE_FAILURE SSL3_AD_HANDSHAKE_FAILURE -#define SSL_AD_NO_CERTIFICATE SSL3_AD_NO_CERTIFICATE /* Not used in TLS */ +#define SSL_AD_NO_CERTIFICATE SSL3_AD_NO_CERTIFICATE // Not used in TLS #define SSL_AD_BAD_CERTIFICATE SSL3_AD_BAD_CERTIFICATE #define SSL_AD_UNSUPPORTED_CERTIFICATE SSL3_AD_UNSUPPORTED_CERTIFICATE #define SSL_AD_CERTIFICATE_REVOKED SSL3_AD_CERTIFICATE_REVOKED @@ -2780,34 +3042,34 @@ OPENSSL_EXPORT const char *SSL_get_psk_identity(const SSL *ssl); #define SSL_AD_UNKNOWN_PSK_IDENTITY TLS1_AD_UNKNOWN_PSK_IDENTITY #define SSL_AD_CERTIFICATE_REQUIRED TLS1_AD_CERTIFICATE_REQUIRED -/* SSL_alert_type_string_long returns a string description of |value| as an - * alert type (warning or fatal). */ +// SSL_alert_type_string_long returns a string description of |value| as an +// alert type (warning or fatal). OPENSSL_EXPORT const char *SSL_alert_type_string_long(int value); -/* SSL_alert_desc_string_long returns a string description of |value| as an - * alert description or "unknown" if unknown. */ +// SSL_alert_desc_string_long returns a string description of |value| as an +// alert description or "unknown" if unknown. OPENSSL_EXPORT const char *SSL_alert_desc_string_long(int value); -/* SSL_send_fatal_alert sends a fatal alert over |ssl| of the specified type, - * which should be one of the |SSL_AD_*| constants. It returns one on success - * and <= 0 on error. The caller should pass the return value into - * |SSL_get_error| to determine how to proceed. Once this function has been - * called, future calls to |SSL_write| will fail. - * - * If retrying a failed operation due to |SSL_ERROR_WANT_WRITE|, subsequent - * calls must use the same |alert| parameter. */ +// SSL_send_fatal_alert sends a fatal alert over |ssl| of the specified type, +// which should be one of the |SSL_AD_*| constants. It returns one on success +// and <= 0 on error. The caller should pass the return value into +// |SSL_get_error| to determine how to proceed. Once this function has been +// called, future calls to |SSL_write| will fail. +// +// If retrying a failed operation due to |SSL_ERROR_WANT_WRITE|, subsequent +// calls must use the same |alert| parameter. OPENSSL_EXPORT int SSL_send_fatal_alert(SSL *ssl, uint8_t alert); -/* ex_data functions. - * - * See |ex_data.h| for details. */ +// ex_data functions. +// +// See |ex_data.h| for details. OPENSSL_EXPORT int SSL_set_ex_data(SSL *ssl, int idx, void *data); OPENSSL_EXPORT void *SSL_get_ex_data(const SSL *ssl, int idx); OPENSSL_EXPORT int SSL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int SSL_SESSION_set_ex_data(SSL_SESSION *session, int idx, @@ -2816,111 +3078,111 @@ OPENSSL_EXPORT void *SSL_SESSION_get_ex_data(const SSL_SESSION *session, int idx); OPENSSL_EXPORT int SSL_SESSION_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int SSL_CTX_set_ex_data(SSL_CTX *ctx, int idx, void *data); OPENSSL_EXPORT void *SSL_CTX_get_ex_data(const SSL_CTX *ctx, int idx); OPENSSL_EXPORT int SSL_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); -/* Low-level record-layer state. */ +// Low-level record-layer state. -/* SSL_get_ivs sets |*out_iv_len| to the length of the IVs for the ciphers - * underlying |ssl| and sets |*out_read_iv| and |*out_write_iv| to point to the - * current IVs for the read and write directions. This is only meaningful for - * connections with implicit IVs (i.e. CBC mode with SSLv3 or TLS 1.0). - * - * It returns one on success or zero on error. */ +// SSL_get_ivs sets |*out_iv_len| to the length of the IVs for the ciphers +// underlying |ssl| and sets |*out_read_iv| and |*out_write_iv| to point to the +// current IVs for the read and write directions. This is only meaningful for +// connections with implicit IVs (i.e. CBC mode with SSLv3 or TLS 1.0). +// +// It returns one on success or zero on error. OPENSSL_EXPORT int SSL_get_ivs(const SSL *ssl, const uint8_t **out_read_iv, const uint8_t **out_write_iv, size_t *out_iv_len); -/* SSL_get_key_block_len returns the length of |ssl|'s key block. */ +// SSL_get_key_block_len returns the length of |ssl|'s key block. OPENSSL_EXPORT size_t SSL_get_key_block_len(const SSL *ssl); -/* SSL_generate_key_block generates |out_len| bytes of key material for |ssl|'s - * current connection state. */ +// SSL_generate_key_block generates |out_len| bytes of key material for |ssl|'s +// current connection state. OPENSSL_EXPORT int SSL_generate_key_block(const SSL *ssl, uint8_t *out, size_t out_len); -/* SSL_get_read_sequence returns, in TLS, the expected sequence number of the - * next incoming record in the current epoch. In DTLS, it returns the maximum - * sequence number received in the current epoch and includes the epoch number - * in the two most significant bytes. */ +// SSL_get_read_sequence returns, in TLS, the expected sequence number of the +// next incoming record in the current epoch. In DTLS, it returns the maximum +// sequence number received in the current epoch and includes the epoch number +// in the two most significant bytes. OPENSSL_EXPORT uint64_t SSL_get_read_sequence(const SSL *ssl); -/* SSL_get_write_sequence returns the sequence number of the next outgoing - * record in the current epoch. In DTLS, it includes the epoch number in the - * two most significant bytes. */ +// SSL_get_write_sequence returns the sequence number of the next outgoing +// record in the current epoch. In DTLS, it includes the epoch number in the +// two most significant bytes. OPENSSL_EXPORT uint64_t SSL_get_write_sequence(const SSL *ssl); -/* Obscure functions. */ +// Obscure functions. -/* SSL_get_structure_sizes returns the sizes of the SSL, SSL_CTX and - * SSL_SESSION structures so that a test can ensure that outside code agrees on - * these values. */ +// SSL_get_structure_sizes returns the sizes of the SSL, SSL_CTX and +// SSL_SESSION structures so that a test can ensure that outside code agrees on +// these values. OPENSSL_EXPORT void SSL_get_structure_sizes(size_t *ssl_size, size_t *ssl_ctx_size, size_t *ssl_session_size); -/* SSL_CTX_set_msg_callback installs |cb| as the message callback for |ctx|. - * This callback will be called when sending or receiving low-level record - * headers, complete handshake messages, ChangeCipherSpec, and alerts. - * |write_p| is one for outgoing messages and zero for incoming messages. - * - * For each record header, |cb| is called with |version| = 0 and |content_type| - * = |SSL3_RT_HEADER|. The |len| bytes from |buf| contain the header. Note that - * this does not include the record body. If the record is sealed, the length - * in the header is the length of the ciphertext. - * - * For each handshake message, ChangeCipherSpec, and alert, |version| is the - * protocol version and |content_type| is the corresponding record type. The - * |len| bytes from |buf| contain the handshake message, one-byte - * ChangeCipherSpec body, and two-byte alert, respectively. - * - * For a V2ClientHello, |version| is |SSL2_VERSION|, |content_type| is zero, and - * the |len| bytes from |buf| contain the V2ClientHello structure. */ +// SSL_CTX_set_msg_callback installs |cb| as the message callback for |ctx|. +// This callback will be called when sending or receiving low-level record +// headers, complete handshake messages, ChangeCipherSpec, and alerts. +// |write_p| is one for outgoing messages and zero for incoming messages. +// +// For each record header, |cb| is called with |version| = 0 and |content_type| +// = |SSL3_RT_HEADER|. The |len| bytes from |buf| contain the header. Note that +// this does not include the record body. If the record is sealed, the length +// in the header is the length of the ciphertext. +// +// For each handshake message, ChangeCipherSpec, and alert, |version| is the +// protocol version and |content_type| is the corresponding record type. The +// |len| bytes from |buf| contain the handshake message, one-byte +// ChangeCipherSpec body, and two-byte alert, respectively. +// +// For a V2ClientHello, |version| is |SSL2_VERSION|, |content_type| is zero, and +// the |len| bytes from |buf| contain the V2ClientHello structure. OPENSSL_EXPORT void SSL_CTX_set_msg_callback( SSL_CTX *ctx, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); -/* SSL_CTX_set_msg_callback_arg sets the |arg| parameter of the message - * callback. */ +// SSL_CTX_set_msg_callback_arg sets the |arg| parameter of the message +// callback. OPENSSL_EXPORT void SSL_CTX_set_msg_callback_arg(SSL_CTX *ctx, void *arg); -/* SSL_set_msg_callback installs |cb| as the message callback of |ssl|. See - * |SSL_CTX_set_msg_callback| for when this callback is called. */ +// SSL_set_msg_callback installs |cb| as the message callback of |ssl|. See +// |SSL_CTX_set_msg_callback| for when this callback is called. OPENSSL_EXPORT void SSL_set_msg_callback( SSL *ssl, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)); -/* SSL_set_msg_callback_arg sets the |arg| parameter of the message callback. */ +// SSL_set_msg_callback_arg sets the |arg| parameter of the message callback. OPENSSL_EXPORT void SSL_set_msg_callback_arg(SSL *ssl, void *arg); -/* SSL_CTX_set_keylog_callback configures a callback to log key material. This - * is intended for debugging use with tools like Wireshark. The |cb| function - * should log |line| followed by a newline, synchronizing with any concurrent - * access to the log. - * - * The format is described in - * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. */ +// SSL_CTX_set_keylog_callback configures a callback to log key material. This +// is intended for debugging use with tools like Wireshark. The |cb| function +// should log |line| followed by a newline, synchronizing with any concurrent +// access to the log. +// +// The format is described in +// https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. OPENSSL_EXPORT void SSL_CTX_set_keylog_callback( SSL_CTX *ctx, void (*cb)(const SSL *ssl, const char *line)); -/* SSL_CTX_get_keylog_callback returns the callback configured by - * |SSL_CTX_set_keylog_callback|. */ +// SSL_CTX_get_keylog_callback returns the callback configured by +// |SSL_CTX_set_keylog_callback|. OPENSSL_EXPORT void (*SSL_CTX_get_keylog_callback(const SSL_CTX *ctx))( const SSL *ssl, const char *line); -/* SSL_CTX_set_current_time_cb configures a callback to retrieve the current - * time, which should be set in |*out_clock|. This can be used for testing - * purposes; for example, a callback can be configured that returns a time - * set explicitly by the test. */ +// SSL_CTX_set_current_time_cb configures a callback to retrieve the current +// time, which should be set in |*out_clock|. This can be used for testing +// purposes; for example, a callback can be configured that returns a time +// set explicitly by the test. The |ssl| pointer passed to |cb| is always null. OPENSSL_EXPORT void SSL_CTX_set_current_time_cb( SSL_CTX *ctx, void (*cb)(const SSL *ssl, struct timeval *out_clock)); @@ -2931,80 +3193,92 @@ enum ssl_renegotiate_mode_t { ssl_renegotiate_ignore, }; -/* SSL_set_renegotiate_mode configures how |ssl|, a client, reacts to - * renegotiation attempts by a server. If |ssl| is a server, peer-initiated - * renegotiations are *always* rejected and this function does nothing. - * - * The renegotiation mode defaults to |ssl_renegotiate_never|, but may be set - * at any point in a connection's lifetime. Set it to |ssl_renegotiate_once| to - * allow one renegotiation, |ssl_renegotiate_freely| to allow all - * renegotiations or |ssl_renegotiate_ignore| to ignore HelloRequest messages. - * Note that ignoring HelloRequest messages may cause the connection to stall - * if the server waits for the renegotiation to complete. - * - * There is no support in BoringSSL for initiating renegotiations as a client - * or server. */ +// SSL_set_renegotiate_mode configures how |ssl|, a client, reacts to +// renegotiation attempts by a server. If |ssl| is a server, peer-initiated +// renegotiations are *always* rejected and this function does nothing. +// +// The renegotiation mode defaults to |ssl_renegotiate_never|, but may be set +// at any point in a connection's lifetime. Set it to |ssl_renegotiate_once| to +// allow one renegotiation, |ssl_renegotiate_freely| to allow all +// renegotiations or |ssl_renegotiate_ignore| to ignore HelloRequest messages. +// Note that ignoring HelloRequest messages may cause the connection to stall +// if the server waits for the renegotiation to complete. +// +// There is no support in BoringSSL for initiating renegotiations as a client +// or server. OPENSSL_EXPORT void SSL_set_renegotiate_mode(SSL *ssl, enum ssl_renegotiate_mode_t mode); -/* SSL_renegotiate_pending returns one if |ssl| is in the middle of a - * renegotiation. */ +// SSL_renegotiate_pending returns one if |ssl| is in the middle of a +// renegotiation. OPENSSL_EXPORT int SSL_renegotiate_pending(SSL *ssl); -/* SSL_total_renegotiations returns the total number of renegotiation handshakes - * performed by |ssl|. This includes the pending renegotiation, if any. */ +// SSL_total_renegotiations returns the total number of renegotiation handshakes +// performed by |ssl|. This includes the pending renegotiation, if any. OPENSSL_EXPORT int SSL_total_renegotiations(const SSL *ssl); -/* SSL_CTX_set_early_data_enabled sets whether early data is allowed to be used - * with resumptions using |ctx|. WARNING: This is experimental and may cause - * interoperability failures until fully implemented. */ -OPENSSL_EXPORT void SSL_CTX_set_early_data_enabled(SSL_CTX *ctx, int enabled); +enum tls13_variant_t { + tls13_default = 0, + tls13_experiment = 1, + tls13_experiment2 = 2, + tls13_experiment3 = 3, + tls13_draft21 = 4, + tls13_draft22 = 5, +}; + +// SSL_CTX_set_tls13_variant sets which variant of TLS 1.3 we negotiate. On the +// server, if |variant| is not |tls13_default|, all variants are enabled. On the +// client, only the configured variant is enabled. +OPENSSL_EXPORT void SSL_CTX_set_tls13_variant(SSL_CTX *ctx, + enum tls13_variant_t variant); + +// SSL_set_tls13_variant sets which variant of TLS 1.3 we negotiate. On the +// server, if |variant| is not |tls13_default|, all variants are enabled. On the +// client, only the configured variant is enabled. +OPENSSL_EXPORT void SSL_set_tls13_variant(SSL *ssl, + enum tls13_variant_t variant); -/* SSL_MAX_CERT_LIST_DEFAULT is the default maximum length, in bytes, of a peer - * certificate chain. */ +// SSL_MAX_CERT_LIST_DEFAULT is the default maximum length, in bytes, of a peer +// certificate chain. #define SSL_MAX_CERT_LIST_DEFAULT (1024 * 100) -/* SSL_CTX_get_max_cert_list returns the maximum length, in bytes, of a peer - * certificate chain accepted by |ctx|. */ +// SSL_CTX_get_max_cert_list returns the maximum length, in bytes, of a peer +// certificate chain accepted by |ctx|. OPENSSL_EXPORT size_t SSL_CTX_get_max_cert_list(const SSL_CTX *ctx); -/* SSL_CTX_set_max_cert_list sets the maximum length, in bytes, of a peer - * certificate chain to |max_cert_list|. This affects how much memory may be - * consumed during the handshake. */ +// SSL_CTX_set_max_cert_list sets the maximum length, in bytes, of a peer +// certificate chain to |max_cert_list|. This affects how much memory may be +// consumed during the handshake. OPENSSL_EXPORT void SSL_CTX_set_max_cert_list(SSL_CTX *ctx, size_t max_cert_list); -/* SSL_get_max_cert_list returns the maximum length, in bytes, of a peer - * certificate chain accepted by |ssl|. */ +// SSL_get_max_cert_list returns the maximum length, in bytes, of a peer +// certificate chain accepted by |ssl|. OPENSSL_EXPORT size_t SSL_get_max_cert_list(const SSL *ssl); -/* SSL_set_max_cert_list sets the maximum length, in bytes, of a peer - * certificate chain to |max_cert_list|. This affects how much memory may be - * consumed during the handshake. */ +// SSL_set_max_cert_list sets the maximum length, in bytes, of a peer +// certificate chain to |max_cert_list|. This affects how much memory may be +// consumed during the handshake. OPENSSL_EXPORT void SSL_set_max_cert_list(SSL *ssl, size_t max_cert_list); -/* SSL_CTX_set_max_send_fragment sets the maximum length, in bytes, of records - * sent by |ctx|. Beyond this length, handshake messages and application data - * will be split into multiple records. It returns one on success or zero on - * error. */ +// SSL_CTX_set_max_send_fragment sets the maximum length, in bytes, of records +// sent by |ctx|. Beyond this length, handshake messages and application data +// will be split into multiple records. It returns one on success or zero on +// error. OPENSSL_EXPORT int SSL_CTX_set_max_send_fragment(SSL_CTX *ctx, size_t max_send_fragment); -/* SSL_set_max_send_fragment sets the maximum length, in bytes, of records sent - * by |ssl|. Beyond this length, handshake messages and application data will - * be split into multiple records. It returns one on success or zero on - * error. */ +// SSL_set_max_send_fragment sets the maximum length, in bytes, of records sent +// by |ssl|. Beyond this length, handshake messages and application data will +// be split into multiple records. It returns one on success or zero on +// error. OPENSSL_EXPORT int SSL_set_max_send_fragment(SSL *ssl, size_t max_send_fragment); -/* SSL_get_v2clienthello_count returns the total number of V2ClientHellos that - * are accepted. */ -OPENSSL_EXPORT uint64_t SSL_get_v2clienthello_count(void); - -/* ssl_early_callback_ctx (aka |SSL_CLIENT_HELLO|) is passed to certain - * callbacks that are called very early on during the server handshake. At this - * point, much of the SSL* hasn't been filled out and only the ClientHello can - * be depended on. */ +// ssl_early_callback_ctx (aka |SSL_CLIENT_HELLO|) is passed to certain +// callbacks that are called very early on during the server handshake. At this +// point, much of the SSL* hasn't been filled out and only the ClientHello can +// be depended on. typedef struct ssl_early_callback_ctx { SSL *ssl; const uint8_t *client_hello; @@ -3022,45 +3296,62 @@ typedef struct ssl_early_callback_ctx { size_t extensions_len; } SSL_CLIENT_HELLO; -/* SSL_early_callback_ctx_extension_get searches the extensions in - * |client_hello| for an extension of the given type. If not found, it returns - * zero. Otherwise it sets |out_data| to point to the extension contents (not - * including the type and length bytes), sets |out_len| to the length of the - * extension contents and returns one. */ +// ssl_select_cert_result_t enumerates the possible results from selecting a +// certificate with |select_certificate_cb|. +enum ssl_select_cert_result_t { + // ssl_select_cert_success indicates that the certificate selection was + // successful. + ssl_select_cert_success = 1, + // ssl_select_cert_retry indicates that the operation could not be + // immediately completed and must be reattempted at a later point. + ssl_select_cert_retry = 0, + // ssl_select_cert_error indicates that a fatal error occured and the + // handshake should be terminated. + ssl_select_cert_error = -1, +}; + +// SSL_early_callback_ctx_extension_get searches the extensions in +// |client_hello| for an extension of the given type. If not found, it returns +// zero. Otherwise it sets |out_data| to point to the extension contents (not +// including the type and length bytes), sets |out_len| to the length of the +// extension contents and returns one. OPENSSL_EXPORT int SSL_early_callback_ctx_extension_get( const SSL_CLIENT_HELLO *client_hello, uint16_t extension_type, const uint8_t **out_data, size_t *out_len); -/* SSL_CTX_set_select_certificate_cb sets a callback that is called before most - * ClientHello processing and before the decision whether to resume a session - * is made. The callback may inspect the ClientHello and configure the - * connection. It may then return one to continue the handshake or zero to - * pause the handshake to perform an asynchronous operation. If paused, - * |SSL_get_error| will return |SSL_ERROR_PENDING_CERTIFICATE|. - * - * Note: The |SSL_CLIENT_HELLO| is only valid for the duration of the callback - * and is not valid while the handshake is paused. */ +// SSL_CTX_set_select_certificate_cb sets a callback that is called before most +// ClientHello processing and before the decision whether to resume a session +// is made. The callback may inspect the ClientHello and configure the +// connection. See |ssl_select_cert_result_t| for details of the return values. +// +// In the case that a retry is indicated, |SSL_get_error| will return +// |SSL_ERROR_PENDING_CERTIFICATE| and the caller should arrange for the +// high-level operation on |ssl| to be retried at a later time, which will +// result in another call to |cb|. +// +// Note: The |SSL_CLIENT_HELLO| is only valid for the duration of the callback +// and is not valid while the handshake is paused. OPENSSL_EXPORT void SSL_CTX_set_select_certificate_cb( - SSL_CTX *ctx, int (*cb)(const SSL_CLIENT_HELLO *)); + SSL_CTX *ctx, + enum ssl_select_cert_result_t (*cb)(const SSL_CLIENT_HELLO *)); -/* SSL_CTX_set_dos_protection_cb sets a callback that is called once the - * resumption decision for a ClientHello has been made. It can return one to - * allow the handshake to continue or zero to cause the handshake to abort. */ +// SSL_CTX_set_dos_protection_cb sets a callback that is called once the +// resumption decision for a ClientHello has been made. It can return one to +// allow the handshake to continue or zero to cause the handshake to abort. OPENSSL_EXPORT void SSL_CTX_set_dos_protection_cb( SSL_CTX *ctx, int (*cb)(const SSL_CLIENT_HELLO *)); -/* SSL_ST_* are possible values for |SSL_state| and the bitmasks that make them - * up. */ +// SSL_ST_* are possible values for |SSL_state| and the bitmasks that make them +// up. #define SSL_ST_CONNECT 0x1000 #define SSL_ST_ACCEPT 0x2000 #define SSL_ST_MASK 0x0FFF #define SSL_ST_INIT (SSL_ST_CONNECT | SSL_ST_ACCEPT) #define SSL_ST_OK 0x03 #define SSL_ST_RENEGOTIATE (0x04 | SSL_ST_INIT) -#define SSL_ST_TLS13 (0x05 | SSL_ST_INIT) -/* SSL_CB_* are possible values for the |type| parameter in the info - * callback and the bitmasks that make them up. */ +// SSL_CB_* are possible values for the |type| parameter in the info +// callback and the bitmasks that make them up. #define SSL_CB_LOOP 0x01 #define SSL_CB_EXIT 0x02 #define SSL_CB_READ 0x04 @@ -3075,181 +3366,176 @@ OPENSSL_EXPORT void SSL_CTX_set_dos_protection_cb( #define SSL_CB_HANDSHAKE_START 0x10 #define SSL_CB_HANDSHAKE_DONE 0x20 -/* SSL_CTX_set_info_callback configures a callback to be run when various - * events occur during a connection's lifetime. The |type| argument determines - * the type of event and the meaning of the |value| argument. Callbacks must - * ignore unexpected |type| values. - * - * |SSL_CB_READ_ALERT| is signaled for each alert received, warning or fatal. - * The |value| argument is a 16-bit value where the alert level (either - * |SSL3_AL_WARNING| or |SSL3_AL_FATAL|) is in the most-significant eight bits - * and the alert type (one of |SSL_AD_*|) is in the least-significant eight. - * - * |SSL_CB_WRITE_ALERT| is signaled for each alert sent. The |value| argument - * is constructed as with |SSL_CB_READ_ALERT|. - * - * |SSL_CB_HANDSHAKE_START| is signaled when a handshake begins. The |value| - * argument is always one. - * - * |SSL_CB_HANDSHAKE_DONE| is signaled when a handshake completes successfully. - * The |value| argument is always one. If a handshake False Starts, this event - * may be used to determine when the Finished message is received. - * - * The following event types expose implementation details of the handshake - * state machine. Consuming them is deprecated. - * - * |SSL_CB_ACCEPT_LOOP| (respectively, |SSL_CB_CONNECT_LOOP|) is signaled when - * a server (respectively, client) handshake progresses. The |value| argument - * is always one. - * - * |SSL_CB_ACCEPT_EXIT| (respectively, |SSL_CB_CONNECT_EXIT|) is signaled when - * a server (respectively, client) handshake completes, fails, or is paused. - * The |value| argument is one if the handshake succeeded and <= 0 - * otherwise. */ +// SSL_CTX_set_info_callback configures a callback to be run when various +// events occur during a connection's lifetime. The |type| argument determines +// the type of event and the meaning of the |value| argument. Callbacks must +// ignore unexpected |type| values. +// +// |SSL_CB_READ_ALERT| is signaled for each alert received, warning or fatal. +// The |value| argument is a 16-bit value where the alert level (either +// |SSL3_AL_WARNING| or |SSL3_AL_FATAL|) is in the most-significant eight bits +// and the alert type (one of |SSL_AD_*|) is in the least-significant eight. +// +// |SSL_CB_WRITE_ALERT| is signaled for each alert sent. The |value| argument +// is constructed as with |SSL_CB_READ_ALERT|. +// +// |SSL_CB_HANDSHAKE_START| is signaled when a handshake begins. The |value| +// argument is always one. +// +// |SSL_CB_HANDSHAKE_DONE| is signaled when a handshake completes successfully. +// The |value| argument is always one. If a handshake False Starts, this event +// may be used to determine when the Finished message is received. +// +// The following event types expose implementation details of the handshake +// state machine. Consuming them is deprecated. +// +// |SSL_CB_ACCEPT_LOOP| (respectively, |SSL_CB_CONNECT_LOOP|) is signaled when +// a server (respectively, client) handshake progresses. The |value| argument +// is always one. +// +// |SSL_CB_ACCEPT_EXIT| (respectively, |SSL_CB_CONNECT_EXIT|) is signaled when +// a server (respectively, client) handshake completes, fails, or is paused. +// The |value| argument is one if the handshake succeeded and <= 0 +// otherwise. OPENSSL_EXPORT void SSL_CTX_set_info_callback( SSL_CTX *ctx, void (*cb)(const SSL *ssl, int type, int value)); -/* SSL_CTX_get_info_callback returns the callback set by - * |SSL_CTX_set_info_callback|. */ +// SSL_CTX_get_info_callback returns the callback set by +// |SSL_CTX_set_info_callback|. OPENSSL_EXPORT void (*SSL_CTX_get_info_callback(SSL_CTX *ctx))(const SSL *ssl, int type, int value); -/* SSL_set_info_callback configures a callback to be run at various events - * during a connection's lifetime. See |SSL_CTX_set_info_callback|. */ +// SSL_set_info_callback configures a callback to be run at various events +// during a connection's lifetime. See |SSL_CTX_set_info_callback|. OPENSSL_EXPORT void SSL_set_info_callback( SSL *ssl, void (*cb)(const SSL *ssl, int type, int value)); -/* SSL_get_info_callback returns the callback set by |SSL_set_info_callback|. */ +// SSL_get_info_callback returns the callback set by |SSL_set_info_callback|. OPENSSL_EXPORT void (*SSL_get_info_callback(const SSL *ssl))(const SSL *ssl, int type, int value); -/* SSL_state_string_long returns the current state of the handshake state - * machine as a string. This may be useful for debugging and logging. */ +// SSL_state_string_long returns the current state of the handshake state +// machine as a string. This may be useful for debugging and logging. OPENSSL_EXPORT const char *SSL_state_string_long(const SSL *ssl); -/* SSL_set_SSL_CTX partially changes |ssl|'s |SSL_CTX|. |ssl| will use the - * certificate and session_id_context from |ctx|, and |SSL_get_SSL_CTX| will - * report |ctx|. However most settings and the session cache itself will - * continue to use the initial |SSL_CTX|. It is often used as part of SNI. - * - * TODO(davidben): Make a better story here and get rid of this API. Also - * determine if there's anything else affected by |SSL_set_SSL_CTX| that - * matters. Not as many values are affected as one might initially think. The - * session cache explicitly selects the initial |SSL_CTX|. Most settings are - * copied at |SSL_new| so |ctx|'s versions don't apply. This, notably, has some - * consequences for any plans to make |SSL| copy-on-write most of its - * configuration. */ -OPENSSL_EXPORT SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx); - #define SSL_SENT_SHUTDOWN 1 #define SSL_RECEIVED_SHUTDOWN 2 -/* SSL_get_shutdown returns a bitmask with a subset of |SSL_SENT_SHUTDOWN| and - * |SSL_RECEIVED_SHUTDOWN| to query whether close_notify was sent or received, - * respectively. */ +// SSL_get_shutdown returns a bitmask with a subset of |SSL_SENT_SHUTDOWN| and +// |SSL_RECEIVED_SHUTDOWN| to query whether close_notify was sent or received, +// respectively. OPENSSL_EXPORT int SSL_get_shutdown(const SSL *ssl); -/* SSL_get_peer_signature_algorithm returns the signature algorithm used by the - * peer. If not applicable, it returns zero. */ +// SSL_get_peer_signature_algorithm returns the signature algorithm used by the +// peer. If not applicable, it returns zero. OPENSSL_EXPORT uint16_t SSL_get_peer_signature_algorithm(const SSL *ssl); -/* SSL_get_client_random writes up to |max_out| bytes of the most recent - * handshake's client_random to |out| and returns the number of bytes written. - * If |max_out| is zero, it returns the size of the client_random. */ +// SSL_get_client_random writes up to |max_out| bytes of the most recent +// handshake's client_random to |out| and returns the number of bytes written. +// If |max_out| is zero, it returns the size of the client_random. OPENSSL_EXPORT size_t SSL_get_client_random(const SSL *ssl, uint8_t *out, size_t max_out); -/* SSL_get_server_random writes up to |max_out| bytes of the most recent - * handshake's server_random to |out| and returns the number of bytes written. - * If |max_out| is zero, it returns the size of the server_random. */ +// SSL_get_server_random writes up to |max_out| bytes of the most recent +// handshake's server_random to |out| and returns the number of bytes written. +// If |max_out| is zero, it returns the size of the server_random. OPENSSL_EXPORT size_t SSL_get_server_random(const SSL *ssl, uint8_t *out, size_t max_out); -/* SSL_get_pending_cipher returns the cipher suite for the current handshake or - * NULL if one has not been negotiated yet or there is no pending handshake. */ +// SSL_get_pending_cipher returns the cipher suite for the current handshake or +// NULL if one has not been negotiated yet or there is no pending handshake. OPENSSL_EXPORT const SSL_CIPHER *SSL_get_pending_cipher(const SSL *ssl); -/* SSL_set_retain_only_sha256_of_client_certs, on a server, sets whether only - * the SHA-256 hash of peer's certificate should be saved in memory and in the - * session. This can save memory, ticket size and session cache space. If - * enabled, |SSL_get_peer_certificate| will return NULL after the handshake - * completes. See the |peer_sha256| field of |SSL_SESSION| for the hash. */ +// SSL_set_retain_only_sha256_of_client_certs, on a server, sets whether only +// the SHA-256 hash of peer's certificate should be saved in memory and in the +// session. This can save memory, ticket size and session cache space. If +// enabled, |SSL_get_peer_certificate| will return NULL after the handshake +// completes. See the |peer_sha256| field of |SSL_SESSION| for the hash. OPENSSL_EXPORT void SSL_set_retain_only_sha256_of_client_certs(SSL *ssl, int enable); -/* SSL_CTX_set_retain_only_sha256_of_client_certs, on a server, sets whether - * only the SHA-256 hash of peer's certificate should be saved in memory and in - * the session. This can save memory, ticket size and session cache space. If - * enabled, |SSL_get_peer_certificate| will return NULL after the handshake - * completes. See the |peer_sha256| field of |SSL_SESSION| for the hash. */ +// SSL_CTX_set_retain_only_sha256_of_client_certs, on a server, sets whether +// only the SHA-256 hash of peer's certificate should be saved in memory and in +// the session. This can save memory, ticket size and session cache space. If +// enabled, |SSL_get_peer_certificate| will return NULL after the handshake +// completes. See the |peer_sha256| field of |SSL_SESSION| for the hash. OPENSSL_EXPORT void SSL_CTX_set_retain_only_sha256_of_client_certs(SSL_CTX *ctx, int enable); -/* SSL_CTX_set_grease_enabled configures whether sockets on |ctx| should enable - * GREASE. See draft-davidben-tls-grease-01. */ +// SSL_CTX_set_grease_enabled configures whether sockets on |ctx| should enable +// GREASE. See draft-davidben-tls-grease-01. OPENSSL_EXPORT void SSL_CTX_set_grease_enabled(SSL_CTX *ctx, int enabled); -/* SSL_max_seal_overhead returns the maximum overhead, in bytes, of sealing a - * record with |ssl|. */ +// SSL_max_seal_overhead returns the maximum overhead, in bytes, of sealing a +// record with |ssl|. OPENSSL_EXPORT size_t SSL_max_seal_overhead(const SSL *ssl); -/* SSL_CTX_set_short_header_enabled configures whether a short record header in - * TLS 1.3 may be negotiated. This allows client and server to negotiate - * https://github.com/tlswg/tls13-spec/pull/762 for testing. */ -OPENSSL_EXPORT void SSL_CTX_set_short_header_enabled(SSL_CTX *ctx, int enabled); +// SSL_get_ticket_age_skew returns the difference, in seconds, between the +// client-sent ticket age and the server-computed value in TLS 1.3 server +// connections which resumed a session. +OPENSSL_EXPORT int32_t SSL_get_ticket_age_skew(const SSL *ssl); -/* Deprecated functions. */ +// Deprecated functions. -/* SSL_library_init calls |CRYPTO_library_init| and returns one. */ +// SSL_library_init calls |CRYPTO_library_init| and returns one. OPENSSL_EXPORT int SSL_library_init(void); -/* SSL_CIPHER_description writes a description of |cipher| into |buf| and - * returns |buf|. If |buf| is NULL, it returns a newly allocated string, to be - * freed with |OPENSSL_free|, or NULL on error. - * - * The description includes a trailing newline and has the form: - * AES128-SHA Kx=RSA Au=RSA Enc=AES(128) Mac=SHA1 - * - * Consider |SSL_CIPHER_get_name| or |SSL_CIPHER_get_rfc_name| instead. */ +// SSL_CIPHER_description writes a description of |cipher| into |buf| and +// returns |buf|. If |buf| is NULL, it returns a newly allocated string, to be +// freed with |OPENSSL_free|, or NULL on error. +// +// The description includes a trailing newline and has the form: +// AES128-SHA Kx=RSA Au=RSA Enc=AES(128) Mac=SHA1 +// +// Consider |SSL_CIPHER_standard_name| or |SSL_CIPHER_get_name| instead. OPENSSL_EXPORT const char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, int len); -/* SSL_CIPHER_get_version returns the string "TLSv1/SSLv3". */ +// SSL_CIPHER_get_version returns the string "TLSv1/SSLv3". OPENSSL_EXPORT const char *SSL_CIPHER_get_version(const SSL_CIPHER *cipher); +// SSL_CIPHER_get_rfc_name returns a newly-allocated string containing the +// result of |SSL_CIPHER_standard_name| or NULL on error. The caller is +// responsible for calling |OPENSSL_free| on the result. +// +// Use |SSL_CIPHER_standard_name| instead. +OPENSSL_EXPORT char *SSL_CIPHER_get_rfc_name(const SSL_CIPHER *cipher); + typedef void COMP_METHOD; -/* SSL_COMP_get_compression_methods returns NULL. */ -OPENSSL_EXPORT COMP_METHOD *SSL_COMP_get_compression_methods(void); +// SSL_COMP_get_compression_methods returns NULL. +OPENSSL_EXPORT STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void); -/* SSL_COMP_add_compression_method returns one. */ +// SSL_COMP_add_compression_method returns one. OPENSSL_EXPORT int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm); -/* SSL_COMP_get_name returns NULL. */ +// SSL_COMP_get_name returns NULL. OPENSSL_EXPORT const char *SSL_COMP_get_name(const COMP_METHOD *comp); -/* SSL_COMP_free_compression_methods does nothing. */ +// SSL_COMP_free_compression_methods does nothing. OPENSSL_EXPORT void SSL_COMP_free_compression_methods(void); -/* SSLv23_method calls |TLS_method|. */ +// SSLv23_method calls |TLS_method|. OPENSSL_EXPORT const SSL_METHOD *SSLv23_method(void); -/* These version-specific methods behave exactly like |TLS_method| and - * |DTLS_method| except they also call |SSL_CTX_set_min_proto_version| and - * |SSL_CTX_set_max_proto_version| to lock connections to that protocol - * version. */ -OPENSSL_EXPORT const SSL_METHOD *SSLv3_method(void); +// These version-specific methods behave exactly like |TLS_method| and +// |DTLS_method| except they also call |SSL_CTX_set_min_proto_version| and +// |SSL_CTX_set_max_proto_version| to lock connections to that protocol +// version. OPENSSL_EXPORT const SSL_METHOD *TLSv1_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_1_method(void); OPENSSL_EXPORT const SSL_METHOD *TLSv1_2_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_method(void); -/* These client- and server-specific methods call their corresponding generic - * methods. */ +// SSLv3_method returns an |SSL_METHOD| with no versions enabled. +OPENSSL_EXPORT const SSL_METHOD *SSLv3_method(void); + +// These client- and server-specific methods call their corresponding generic +// methods. OPENSSL_EXPORT const SSL_METHOD *TLS_server_method(void); OPENSSL_EXPORT const SSL_METHOD *TLS_client_method(void); OPENSSL_EXPORT const SSL_METHOD *SSLv23_server_method(void); @@ -3269,156 +3555,172 @@ OPENSSL_EXPORT const SSL_METHOD *DTLSv1_client_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_server_method(void); OPENSSL_EXPORT const SSL_METHOD *DTLSv1_2_client_method(void); -/* SSL_clear resets |ssl| to allow another connection and returns one on success - * or zero on failure. It returns most configuration state but releases memory - * associated with the current connection. - * - * Free |ssl| and create a new one instead. */ +// SSL_clear resets |ssl| to allow another connection and returns one on success +// or zero on failure. It returns most configuration state but releases memory +// associated with the current connection. +// +// Free |ssl| and create a new one instead. OPENSSL_EXPORT int SSL_clear(SSL *ssl); -/* SSL_CTX_set_tmp_rsa_callback does nothing. */ +// SSL_CTX_set_tmp_rsa_callback does nothing. OPENSSL_EXPORT void SSL_CTX_set_tmp_rsa_callback( SSL_CTX *ctx, RSA *(*cb)(SSL *ssl, int is_export, int keylength)); -/* SSL_set_tmp_rsa_callback does nothing. */ +// SSL_set_tmp_rsa_callback does nothing. OPENSSL_EXPORT void SSL_set_tmp_rsa_callback(SSL *ssl, RSA *(*cb)(SSL *ssl, int is_export, int keylength)); -/* SSL_CTX_sess_connect returns zero. */ +// SSL_CTX_sess_connect returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect(const SSL_CTX *ctx); -/* SSL_CTX_sess_connect_good returns zero. */ +// SSL_CTX_sess_connect_good returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect_good(const SSL_CTX *ctx); -/* SSL_CTX_sess_connect_renegotiate returns zero. */ +// SSL_CTX_sess_connect_renegotiate returns zero. OPENSSL_EXPORT int SSL_CTX_sess_connect_renegotiate(const SSL_CTX *ctx); -/* SSL_CTX_sess_accept returns zero. */ +// SSL_CTX_sess_accept returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept(const SSL_CTX *ctx); -/* SSL_CTX_sess_accept_renegotiate returns zero. */ +// SSL_CTX_sess_accept_renegotiate returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept_renegotiate(const SSL_CTX *ctx); -/* SSL_CTX_sess_accept_good returns zero. */ +// SSL_CTX_sess_accept_good returns zero. OPENSSL_EXPORT int SSL_CTX_sess_accept_good(const SSL_CTX *ctx); -/* SSL_CTX_sess_hits returns zero. */ +// SSL_CTX_sess_hits returns zero. OPENSSL_EXPORT int SSL_CTX_sess_hits(const SSL_CTX *ctx); -/* SSL_CTX_sess_cb_hits returns zero. */ +// SSL_CTX_sess_cb_hits returns zero. OPENSSL_EXPORT int SSL_CTX_sess_cb_hits(const SSL_CTX *ctx); -/* SSL_CTX_sess_misses returns zero. */ +// SSL_CTX_sess_misses returns zero. OPENSSL_EXPORT int SSL_CTX_sess_misses(const SSL_CTX *ctx); -/* SSL_CTX_sess_timeouts returns zero. */ +// SSL_CTX_sess_timeouts returns zero. OPENSSL_EXPORT int SSL_CTX_sess_timeouts(const SSL_CTX *ctx); -/* SSL_CTX_sess_cache_full returns zero. */ +// SSL_CTX_sess_cache_full returns zero. OPENSSL_EXPORT int SSL_CTX_sess_cache_full(const SSL_CTX *ctx); -/* SSL_cutthrough_complete calls |SSL_in_false_start|. */ -OPENSSL_EXPORT int SSL_cutthrough_complete(const SSL *s); +// SSL_cutthrough_complete calls |SSL_in_false_start|. +OPENSSL_EXPORT int SSL_cutthrough_complete(const SSL *ssl); -/* SSL_num_renegotiations calls |SSL_total_renegotiations|. */ +// SSL_num_renegotiations calls |SSL_total_renegotiations|. OPENSSL_EXPORT int SSL_num_renegotiations(const SSL *ssl); -/* SSL_CTX_need_tmp_RSA returns zero. */ +// SSL_CTX_need_tmp_RSA returns zero. OPENSSL_EXPORT int SSL_CTX_need_tmp_RSA(const SSL_CTX *ctx); -/* SSL_need_tmp_RSA returns zero. */ +// SSL_need_tmp_RSA returns zero. OPENSSL_EXPORT int SSL_need_tmp_RSA(const SSL *ssl); -/* SSL_CTX_set_tmp_rsa returns one. */ +// SSL_CTX_set_tmp_rsa returns one. OPENSSL_EXPORT int SSL_CTX_set_tmp_rsa(SSL_CTX *ctx, const RSA *rsa); -/* SSL_set_tmp_rsa returns one. */ +// SSL_set_tmp_rsa returns one. OPENSSL_EXPORT int SSL_set_tmp_rsa(SSL *ssl, const RSA *rsa); -/* SSL_CTX_get_read_ahead returns zero. */ +// SSL_CTX_get_read_ahead returns zero. OPENSSL_EXPORT int SSL_CTX_get_read_ahead(const SSL_CTX *ctx); -/* SSL_CTX_set_read_ahead does nothing. */ +// SSL_CTX_set_read_ahead does nothing. OPENSSL_EXPORT void SSL_CTX_set_read_ahead(SSL_CTX *ctx, int yes); -/* SSL_get_read_ahead returns zero. */ -OPENSSL_EXPORT int SSL_get_read_ahead(const SSL *s); +// SSL_get_read_ahead returns zero. +OPENSSL_EXPORT int SSL_get_read_ahead(const SSL *ssl); -/* SSL_set_read_ahead does nothing. */ -OPENSSL_EXPORT void SSL_set_read_ahead(SSL *s, int yes); +// SSL_set_read_ahead does nothing. +OPENSSL_EXPORT void SSL_set_read_ahead(SSL *ssl, int yes); -/* SSL_renegotiate put an error on the error queue and returns zero. */ +// SSL_renegotiate put an error on the error queue and returns zero. OPENSSL_EXPORT int SSL_renegotiate(SSL *ssl); -/* SSL_set_state does nothing. */ +// SSL_set_state does nothing. OPENSSL_EXPORT void SSL_set_state(SSL *ssl, int state); -/* SSL_get_shared_ciphers writes an empty string to |buf| and returns a - * pointer to |buf|, or NULL if |len| is less than or equal to zero. */ +// SSL_get_shared_ciphers writes an empty string to |buf| and returns a +// pointer to |buf|, or NULL if |len| is less than or equal to zero. OPENSSL_EXPORT char *SSL_get_shared_ciphers(const SSL *ssl, char *buf, int len); -/* SSL_MODE_HANDSHAKE_CUTTHROUGH is the same as SSL_MODE_ENABLE_FALSE_START. */ +// SSL_MODE_HANDSHAKE_CUTTHROUGH is the same as SSL_MODE_ENABLE_FALSE_START. #define SSL_MODE_HANDSHAKE_CUTTHROUGH SSL_MODE_ENABLE_FALSE_START -/* i2d_SSL_SESSION serializes |in| to the bytes pointed to by |*pp|. On success, - * it returns the number of bytes written and advances |*pp| by that many bytes. - * On failure, it returns -1. If |pp| is NULL, no bytes are written and only the - * length is returned. - * - * Use |SSL_SESSION_to_bytes| instead. */ +// i2d_SSL_SESSION serializes |in| to the bytes pointed to by |*pp|. On success, +// it returns the number of bytes written and advances |*pp| by that many bytes. +// On failure, it returns -1. If |pp| is NULL, no bytes are written and only the +// length is returned. +// +// Use |SSL_SESSION_to_bytes| instead. OPENSSL_EXPORT int i2d_SSL_SESSION(SSL_SESSION *in, uint8_t **pp); -/* d2i_SSL_SESSION parses a serialized session from the |length| bytes pointed - * to by |*pp|. It returns the new |SSL_SESSION| and advances |*pp| by the - * number of bytes consumed on success and NULL on failure. The caller takes - * ownership of the new session and must call |SSL_SESSION_free| when done. - * - * If |a| is non-NULL, |*a| is released and set the new |SSL_SESSION|. - * - * Use |SSL_SESSION_from_bytes| instead. */ +// d2i_SSL_SESSION parses a serialized session from the |length| bytes pointed +// to by |*pp|. It returns the new |SSL_SESSION| and advances |*pp| by the +// number of bytes consumed on success and NULL on failure. The caller takes +// ownership of the new session and must call |SSL_SESSION_free| when done. +// +// If |a| is non-NULL, |*a| is released and set the new |SSL_SESSION|. +// +// Use |SSL_SESSION_from_bytes| instead. OPENSSL_EXPORT SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const uint8_t **pp, long length); -/* i2d_SSL_SESSION_bio serializes |session| and writes the result to |bio|. It - * returns the number of bytes written on success and <= 0 on error. */ +// i2d_SSL_SESSION_bio serializes |session| and writes the result to |bio|. It +// returns the number of bytes written on success and <= 0 on error. OPENSSL_EXPORT int i2d_SSL_SESSION_bio(BIO *bio, const SSL_SESSION *session); -/* d2i_SSL_SESSION_bio reads a serialized |SSL_SESSION| from |bio| and returns a - * newly-allocated |SSL_SESSION| or NULL on error. If |out| is not NULL, it also - * frees |*out| and sets |*out| to the new |SSL_SESSION|. */ +// d2i_SSL_SESSION_bio reads a serialized |SSL_SESSION| from |bio| and returns a +// newly-allocated |SSL_SESSION| or NULL on error. If |out| is not NULL, it also +// frees |*out| and sets |*out| to the new |SSL_SESSION|. OPENSSL_EXPORT SSL_SESSION *d2i_SSL_SESSION_bio(BIO *bio, SSL_SESSION **out); -/* ERR_load_SSL_strings does nothing. */ +// ERR_load_SSL_strings does nothing. OPENSSL_EXPORT void ERR_load_SSL_strings(void); -/* SSL_load_error_strings does nothing. */ +// SSL_load_error_strings does nothing. OPENSSL_EXPORT void SSL_load_error_strings(void); -/* SSL_CTX_set_tlsext_use_srtp calls |SSL_CTX_set_srtp_profiles|. It returns - * zero on success and one on failure. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |SSL_CTX_set_srtp_profiles| instead. */ +// SSL_CTX_set_tlsext_use_srtp calls |SSL_CTX_set_srtp_profiles|. It returns +// zero on success and one on failure. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |SSL_CTX_set_srtp_profiles| instead. OPENSSL_EXPORT int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles); -/* SSL_set_tlsext_use_srtp calls |SSL_set_srtp_profiles|. It returns zero on - * success and one on failure. - * - * WARNING: this function is dangerous because it breaks the usual return value - * convention. Use |SSL_set_srtp_profiles| instead. */ +// SSL_set_tlsext_use_srtp calls |SSL_set_srtp_profiles|. It returns zero on +// success and one on failure. +// +// WARNING: this function is dangerous because it breaks the usual return value +// convention. Use |SSL_set_srtp_profiles| instead. OPENSSL_EXPORT int SSL_set_tlsext_use_srtp(SSL *ssl, const char *profiles); -/* SSL_get_current_compression returns NULL. */ -OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_compression(SSL *s); +// SSL_get_current_compression returns NULL. +OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_compression(SSL *ssl); -/* SSL_get_current_expansion returns NULL. */ -OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_expansion(SSL *s); +// SSL_get_current_expansion returns NULL. +OPENSSL_EXPORT const COMP_METHOD *SSL_get_current_expansion(SSL *ssl); -/* SSL_get_server_tmp_key returns zero. */ +// SSL_get_server_tmp_key returns zero. OPENSSL_EXPORT int *SSL_get_server_tmp_key(SSL *ssl, EVP_PKEY **out_key); +// SSL_CTX_set_tmp_dh returns 1. +OPENSSL_EXPORT int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh); + +// SSL_set_tmp_dh returns 1. +OPENSSL_EXPORT int SSL_set_tmp_dh(SSL *ssl, const DH *dh); + +// SSL_CTX_set_tmp_dh_callback does nothing. +OPENSSL_EXPORT void SSL_CTX_set_tmp_dh_callback( + SSL_CTX *ctx, DH *(*cb)(SSL *ssl, int is_export, int keylength)); + +// SSL_set_tmp_dh_callback does nothing. +OPENSSL_EXPORT void SSL_set_tmp_dh_callback(SSL *ssl, + DH *(*cb)(SSL *ssl, int is_export, + int keylength)); + + #define SSL_set_app_data(s, arg) (SSL_set_ex_data(s, 0, (char *)(arg))) #define SSL_get_app_data(s) (SSL_get_ex_data(s, 0)) #define SSL_SESSION_set_app_data(s, a) \ @@ -3452,10 +3754,10 @@ struct ssl_comp_st { char *method; }; -DECLARE_STACK_OF(SSL_COMP) +DEFINE_STACK_OF(SSL_COMP) -/* The following flags do nothing and are included only to make it easier to - * compile code with BoringSSL. */ +// The following flags do nothing and are included only to make it easier to +// compile code with BoringSSL. #define SSL_MODE_AUTO_RETRY 0 #define SSL_MODE_RELEASE_BUFFERS 0 #define SSL_MODE_SEND_CLIENTHELLO_TIME 0 @@ -3486,37 +3788,36 @@ DECLARE_STACK_OF(SSL_COMP) #define SSL_OP_TLS_ROLLBACK_BUG 0 #define SSL_VERIFY_CLIENT_ONCE 0 -/* SSL_cache_hit calls |SSL_session_reused|. */ +// SSL_cache_hit calls |SSL_session_reused|. OPENSSL_EXPORT int SSL_cache_hit(SSL *ssl); -/* SSL_get_default_timeout returns |SSL_DEFAULT_SESSION_TIMEOUT|. */ +// SSL_get_default_timeout returns |SSL_DEFAULT_SESSION_TIMEOUT|. OPENSSL_EXPORT long SSL_get_default_timeout(const SSL *ssl); -/* SSL_get_version returns a string describing the TLS version used by |ssl|. - * For example, "TLSv1.2" or "SSLv3". */ +// SSL_get_version returns a string describing the TLS version used by |ssl|. +// For example, "TLSv1.2" or "SSLv3". OPENSSL_EXPORT const char *SSL_get_version(const SSL *ssl); -/* SSL_get_cipher_list returns the name of the |n|th cipher in the output of - * |SSL_get_ciphers| or NULL if out of range. Use |SSL_get_ciphers| instead. */ +// SSL_get_cipher_list returns the name of the |n|th cipher in the output of +// |SSL_get_ciphers| or NULL if out of range. Use |SSL_get_ciphers| instead. OPENSSL_EXPORT const char *SSL_get_cipher_list(const SSL *ssl, int n); -/* SSL_CTX_set_client_cert_cb sets a callback which is called on the client if - * the server requests a client certificate and none is configured. On success, - * the callback should return one and set |*out_x509| to |*out_pkey| to a leaf - * certificate and private key, respectively, passing ownership. It should - * return zero to send no certificate and -1 to fail or pause the handshake. If - * the handshake is paused, |SSL_get_error| will return - * |SSL_ERROR_WANT_X509_LOOKUP|. - * - * The callback may call |SSL_get0_certificate_types| and - * |SSL_get_client_CA_list| for information on the server's certificate request. - * - * Use |SSL_CTX_set_cert_cb| instead. Configuring intermediate certificates with - * this function is confusing. This callback may not be registered concurrently - * with |SSL_CTX_set_cert_cb| or |SSL_set_cert_cb|. */ +// SSL_CTX_set_client_cert_cb sets a callback which is called on the client if +// the server requests a client certificate and none is configured. On success, +// the callback should return one and set |*out_x509| to |*out_pkey| to a leaf +// certificate and private key, respectively, passing ownership. It should +// return zero to send no certificate and -1 to fail or pause the handshake. If +// the handshake is paused, |SSL_get_error| will return +// |SSL_ERROR_WANT_X509_LOOKUP|. +// +// The callback may call |SSL_get0_certificate_types| and +// |SSL_get_client_CA_list| for information on the server's certificate request. +// +// Use |SSL_CTX_set_cert_cb| instead. Configuring intermediate certificates with +// this function is confusing. This callback may not be registered concurrently +// with |SSL_CTX_set_cert_cb| or |SSL_set_cert_cb|. OPENSSL_EXPORT void SSL_CTX_set_client_cert_cb( - SSL_CTX *ctx, - int (*client_cert_cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey)); + SSL_CTX *ctx, int (*cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey)); #define SSL_NOTHING 1 #define SSL_WRITING 2 @@ -3526,39 +3827,46 @@ OPENSSL_EXPORT void SSL_CTX_set_client_cert_cb( #define SSL_PENDING_SESSION 7 #define SSL_CERTIFICATE_SELECTION_PENDING 8 #define SSL_PRIVATE_KEY_OPERATION 9 +#define SSL_PENDING_TICKET 10 +#define SSL_EARLY_DATA_REJECTED 11 +#define SSL_CERTIFICATE_VERIFY 12 -/* SSL_want returns one of the above values to determine what the most recent - * operation on |ssl| was blocked on. Use |SSL_get_error| instead. */ +// SSL_want returns one of the above values to determine what the most recent +// operation on |ssl| was blocked on. Use |SSL_get_error| instead. OPENSSL_EXPORT int SSL_want(const SSL *ssl); #define SSL_want_read(ssl) (SSL_want(ssl) == SSL_READING) #define SSL_want_write(ssl) (SSL_want(ssl) == SSL_WRITING) - /* SSL_get_finished writes up to |count| bytes of the Finished message sent by - * |ssl| to |buf|. It returns the total untruncated length or zero if none has - * been sent yet. At SSL 3.0 or TLS 1.3 and later, it returns zero. - * - * Use |SSL_get_tls_unique| instead. */ + // SSL_get_finished writes up to |count| bytes of the Finished message sent by + // |ssl| to |buf|. It returns the total untruncated length or zero if none has + // been sent yet. At SSL 3.0 or TLS 1.3 and later, it returns zero. + // + // Use |SSL_get_tls_unique| instead. OPENSSL_EXPORT size_t SSL_get_finished(const SSL *ssl, void *buf, size_t count); - /* SSL_get_peer_finished writes up to |count| bytes of the Finished message - * received from |ssl|'s peer to |buf|. It returns the total untruncated length - * or zero if none has been received yet. At SSL 3.0 or TLS 1.3 and later, it - * returns zero. - * - * Use |SSL_get_tls_unique| instead. */ + // SSL_get_peer_finished writes up to |count| bytes of the Finished message + // received from |ssl|'s peer to |buf|. It returns the total untruncated length + // or zero if none has been received yet. At SSL 3.0 or TLS 1.3 and later, it + // returns zero. + // + // Use |SSL_get_tls_unique| instead. OPENSSL_EXPORT size_t SSL_get_peer_finished(const SSL *ssl, void *buf, size_t count); -/* SSL_alert_type_string returns "!". Use |SSL_alert_type_string_long| - * instead. */ +// SSL_alert_type_string returns "!". Use |SSL_alert_type_string_long| +// instead. OPENSSL_EXPORT const char *SSL_alert_type_string(int value); -/* SSL_alert_desc_string returns "!!". Use |SSL_alert_desc_string_long| - * instead. */ +// SSL_alert_desc_string returns "!!". Use |SSL_alert_desc_string_long| +// instead. OPENSSL_EXPORT const char *SSL_alert_desc_string(int value); -/* SSL_TXT_* expand to strings. */ +// SSL_state_string returns "!!!!!!". Use |SSL_state_string_long| for a more +// intelligible string. +OPENSSL_EXPORT const char *SSL_state_string(const SSL *ssl); + +// SSL_TXT_* expand to strings. #define SSL_TXT_MEDIUM "MEDIUM" #define SSL_TXT_HIGH "HIGH" #define SSL_TXT_FIPS "FIPS" @@ -3602,117 +3910,121 @@ OPENSSL_EXPORT const char *SSL_alert_desc_string(int value); typedef struct ssl_conf_ctx_st SSL_CONF_CTX; -/* SSL_state returns |SSL_ST_INIT| if a handshake is in progress and |SSL_ST_OK| - * otherwise. - * - * Use |SSL_is_init| instead. */ +// SSL_state returns |SSL_ST_INIT| if a handshake is in progress and |SSL_ST_OK| +// otherwise. +// +// Use |SSL_is_init| instead. OPENSSL_EXPORT int SSL_state(const SSL *ssl); #define SSL_get_state(ssl) SSL_state(ssl) -/* SSL_state_string returns the current state of the handshake state machine as - * a six-letter string. Use |SSL_state_string_long| for a more intelligible - * string. */ -OPENSSL_EXPORT const char *SSL_state_string(const SSL *ssl); - -/* SSL_set_shutdown causes |ssl| to behave as if the shutdown bitmask (see - * |SSL_get_shutdown|) were |mode|. This may be used to skip sending or - * receiving close_notify in |SSL_shutdown| by causing the implementation to - * believe the events already happened. - * - * It is an error to use |SSL_set_shutdown| to unset a bit that has already been - * set. Doing so will trigger an |assert| in debug builds and otherwise be - * ignored. - * - * Use |SSL_CTX_set_quiet_shutdown| instead. */ +// SSL_set_shutdown causes |ssl| to behave as if the shutdown bitmask (see +// |SSL_get_shutdown|) were |mode|. This may be used to skip sending or +// receiving close_notify in |SSL_shutdown| by causing the implementation to +// believe the events already happened. +// +// It is an error to use |SSL_set_shutdown| to unset a bit that has already been +// set. Doing so will trigger an |assert| in debug builds and otherwise be +// ignored. +// +// Use |SSL_CTX_set_quiet_shutdown| instead. OPENSSL_EXPORT void SSL_set_shutdown(SSL *ssl, int mode); -/* SSL_CTX_set_tmp_ecdh calls |SSL_CTX_set1_curves| with a one-element list - * containing |ec_key|'s curve. */ +// SSL_CTX_set_tmp_ecdh calls |SSL_CTX_set1_curves| with a one-element list +// containing |ec_key|'s curve. OPENSSL_EXPORT int SSL_CTX_set_tmp_ecdh(SSL_CTX *ctx, const EC_KEY *ec_key); -/* SSL_set_tmp_ecdh calls |SSL_set1_curves| with a one-element list containing - * |ec_key|'s curve. */ +// SSL_set_tmp_ecdh calls |SSL_set1_curves| with a one-element list containing +// |ec_key|'s curve. OPENSSL_EXPORT int SSL_set_tmp_ecdh(SSL *ssl, const EC_KEY *ec_key); -/* SSL_add_dir_cert_subjects_to_stack lists files in directory |dir|. It calls - * |SSL_add_file_cert_subjects_to_stack| on each file and returns one on success - * or zero on error. This function is only available from the libdecrepit - * library. */ +// SSL_add_dir_cert_subjects_to_stack lists files in directory |dir|. It calls +// |SSL_add_file_cert_subjects_to_stack| on each file and returns one on success +// or zero on error. This function is only available from the libdecrepit +// library. OPENSSL_EXPORT int SSL_add_dir_cert_subjects_to_stack(STACK_OF(X509_NAME) *out, const char *dir); -/* SSL_set_private_key_digest_prefs copies |num_digests| NIDs from |digest_nids| - * into |ssl|. These digests will be used, in decreasing order of preference, - * when signing with |ssl|'s private key. It returns one on success and zero on - * error. - * - * Use |SSL_set_signing_algorithm_prefs| instead. - * - * TODO(davidben): Remove this API when callers have been updated. */ -OPENSSL_EXPORT int SSL_set_private_key_digest_prefs(SSL *ssl, - const int *digest_nids, - size_t num_digests); - -/* SSL_set_verify_result calls |abort| unless |result| is |X509_V_OK|. - * - * TODO(davidben): Remove this function once it has been removed from - * netty-tcnative. */ +// SSL_set_verify_result calls |abort| unless |result| is |X509_V_OK|. +// +// TODO(davidben): Remove this function once it has been removed from +// netty-tcnative. OPENSSL_EXPORT void SSL_set_verify_result(SSL *ssl, long result); -/* SSL_CTX_set_min_version calls |SSL_CTX_set_min_proto_version|. */ -OPENSSL_EXPORT int SSL_CTX_set_min_version(SSL_CTX *ctx, uint16_t version); - -/* SSL_CTX_set_max_version calls |SSL_CTX_set_max_proto_version|. */ -OPENSSL_EXPORT int SSL_CTX_set_max_version(SSL_CTX *ctx, uint16_t version); - -/* SSL_set_min_version calls |SSL_set_min_proto_version|. */ -OPENSSL_EXPORT int SSL_set_min_version(SSL *ssl, uint16_t version); - -/* SSL_set_max_version calls |SSL_set_max_proto_version|. */ -OPENSSL_EXPORT int SSL_set_max_version(SSL *ssl, uint16_t version); - -/* SSL_CTX_enable_tls_channel_id calls |SSL_CTX_set_tls_channel_id_enabled|. */ +// SSL_CTX_enable_tls_channel_id calls |SSL_CTX_set_tls_channel_id_enabled|. OPENSSL_EXPORT int SSL_CTX_enable_tls_channel_id(SSL_CTX *ctx); -/* SSL_enable_tls_channel_id calls |SSL_set_tls_channel_id_enabled|. */ +// SSL_enable_tls_channel_id calls |SSL_set_tls_channel_id_enabled|. OPENSSL_EXPORT int SSL_enable_tls_channel_id(SSL *ssl); -/* BIO_f_ssl returns a |BIO_METHOD| that can wrap an |SSL*| in a |BIO*|. Note - * that this has quite different behaviour from the version in OpenSSL (notably - * that it doesn't try to auto renegotiate). - * - * IMPORTANT: if you are not curl, don't use this. */ +// BIO_f_ssl returns a |BIO_METHOD| that can wrap an |SSL*| in a |BIO*|. Note +// that this has quite different behaviour from the version in OpenSSL (notably +// that it doesn't try to auto renegotiate). +// +// IMPORTANT: if you are not curl, don't use this. OPENSSL_EXPORT const BIO_METHOD *BIO_f_ssl(void); -/* BIO_set_ssl sets |ssl| as the underlying connection for |bio|, which must - * have been created using |BIO_f_ssl|. If |take_owership| is true, |bio| will - * call |SSL_free| on |ssl| when closed. It returns one on success or something - * other than one on error. */ +// BIO_set_ssl sets |ssl| as the underlying connection for |bio|, which must +// have been created using |BIO_f_ssl|. If |take_owership| is true, |bio| will +// call |SSL_free| on |ssl| when closed. It returns one on success or something +// other than one on error. OPENSSL_EXPORT long BIO_set_ssl(BIO *bio, SSL *ssl, int take_owership); +// SSL_CTX_set_ecdh_auto returns one. +#define SSL_CTX_set_ecdh_auto(ctx, onoff) 1 + +// SSL_set_ecdh_auto returns one. +#define SSL_set_ecdh_auto(ssl, onoff) 1 + +// SSL_get_session returns a non-owning pointer to |ssl|'s session. For +// historical reasons, which session it returns depends on |ssl|'s state. +// +// Prior to the start of the initial handshake, it returns the session the +// caller set with |SSL_set_session|. After the initial handshake has finished +// and if no additional handshakes are in progress, it returns the currently +// active session. Its behavior is undefined while a handshake is in progress. +// +// If trying to add new sessions to an external session cache, use +// |SSL_CTX_sess_set_new_cb| instead. In particular, using the callback is +// required as of TLS 1.3. For compatibility, this function will return an +// unresumable session which may be cached, but will never be resumed. +// +// If querying properties of the connection, use APIs on the |SSL| object. +OPENSSL_EXPORT SSL_SESSION *SSL_get_session(const SSL *ssl); -/* Private structures. - * - * This structures are exposed for historical reasons, but access to them is - * deprecated. */ +// SSL_get0_session is an alias for |SSL_get_session|. +#define SSL_get0_session SSL_get_session -typedef struct ssl_protocol_method_st SSL_PROTOCOL_METHOD; -typedef struct ssl_x509_method_st SSL_X509_METHOD; +// SSL_get1_session acts like |SSL_get_session| but returns a new reference to +// the session. +OPENSSL_EXPORT SSL_SESSION *SSL_get1_session(SSL *ssl); -struct ssl_cipher_st { - /* name is the OpenSSL name for the cipher. */ - const char *name; - /* id is the cipher suite value bitwise OR-d with 0x03000000. */ - uint32_t id; - - /* algorithm_* are internal fields. See ssl/internal.h for their values. */ - uint32_t algorithm_mkey; - uint32_t algorithm_auth; - uint32_t algorithm_enc; - uint32_t algorithm_mac; - uint32_t algorithm_prf; -}; +#define OPENSSL_INIT_NO_LOAD_SSL_STRINGS 0 +#define OPENSSL_INIT_LOAD_SSL_STRINGS 0 +#define OPENSSL_INIT_SSL_DEFAULT 0 + +// OPENSSL_init_ssl calls |CRYPTO_library_init| and returns one. +OPENSSL_EXPORT int OPENSSL_init_ssl(uint64_t opts, + const OPENSSL_INIT_SETTINGS *settings); + +#if !defined(BORINGSSL_NO_CXX) +// SSL_CTX_sess_set_get_cb is a legacy C++ overload of |SSL_CTX_sess_set_get_cb| +// which supports the old callback signature. +// +// TODO(davidben): Remove this once Node is compatible with OpenSSL 1.1.0. +extern "C++" OPENSSL_EXPORT void SSL_CTX_sess_set_get_cb( + SSL_CTX *ctx, SSL_SESSION *(*get_session_cb)(SSL *ssl, uint8_t *id, + int id_len, int *out_copy)); +#endif + + +// Private structures. +// +// This structures are exposed for historical reasons, but access to them is +// deprecated. + +// TODO(davidben): Remove this forward declaration when |SSL_SESSION| is opaque. +typedef struct ssl_x509_method_st SSL_X509_METHOD; #define SSL_MAX_SSL_SESSION_ID_LENGTH 32 #define SSL_MAX_SID_CTX_LENGTH 32 @@ -3720,432 +4032,150 @@ struct ssl_cipher_st { struct ssl_session_st { CRYPTO_refcount_t references; - int ssl_version; /* what ssl version session info is being kept in here? */ + uint16_t ssl_version; // what ssl version session info is being kept in here? - /* group_id is the ID of the ECDH group used to establish this session or zero - * if not applicable or unknown. */ + // group_id is the ID of the ECDH group used to establish this session or zero + // if not applicable or unknown. uint16_t group_id; - /* peer_signature_algorithm is the signature algorithm used to authenticate - * the peer, or zero if not applicable or unknown. */ + // peer_signature_algorithm is the signature algorithm used to authenticate + // the peer, or zero if not applicable or unknown. uint16_t peer_signature_algorithm; - /* master_key, in TLS 1.2 and below, is the master secret associated with the - * session. In TLS 1.3 and up, it is the resumption secret. */ + // master_key, in TLS 1.2 and below, is the master secret associated with the + // session. In TLS 1.3 and up, it is the resumption secret. int master_key_length; uint8_t master_key[SSL_MAX_MASTER_KEY_LENGTH]; - /* session_id - valid? */ + // session_id - valid? unsigned int session_id_length; uint8_t session_id[SSL_MAX_SSL_SESSION_ID_LENGTH]; - /* this is used to determine whether the session is being reused in - * the appropriate context. It is up to the application to set this, - * via SSL_new */ + // this is used to determine whether the session is being reused in + // the appropriate context. It is up to the application to set this, + // via SSL_new uint8_t sid_ctx_length; uint8_t sid_ctx[SSL_MAX_SID_CTX_LENGTH]; char *psk_identity; - /* certs contains the certificate chain from the peer, starting with the leaf - * certificate. */ + // certs contains the certificate chain from the peer, starting with the leaf + // certificate. STACK_OF(CRYPTO_BUFFER) *certs; const SSL_X509_METHOD *x509_method; - /* x509_peer is the peer's certificate. */ + // x509_peer is the peer's certificate. X509 *x509_peer; - /* x509_chain is the certificate chain sent by the peer. NOTE: for historical - * reasons, when a client (so the peer is a server), the chain includes - * |peer|, but when a server it does not. */ + // x509_chain is the certificate chain sent by the peer. NOTE: for historical + // reasons, when a client (so the peer is a server), the chain includes + // |peer|, but when a server it does not. STACK_OF(X509) *x509_chain; - /* x509_chain_without_leaf is a lazily constructed copy of |x509_chain| that - * omits the leaf certificate. This exists because OpenSSL, historically, - * didn't include the leaf certificate in the chain for a server, but did for - * a client. The |x509_chain| always includes it and, if an API call requires - * a chain without, it is stored here. */ + // x509_chain_without_leaf is a lazily constructed copy of |x509_chain| that + // omits the leaf certificate. This exists because OpenSSL, historically, + // didn't include the leaf certificate in the chain for a server, but did for + // a client. The |x509_chain| always includes it and, if an API call requires + // a chain without, it is stored here. STACK_OF(X509) *x509_chain_without_leaf; - /* verify_result is the result of certificate verification in the case of - * non-fatal certificate errors. */ + // verify_result is the result of certificate verification in the case of + // non-fatal certificate errors. long verify_result; - /* timeout is the lifetime of the session in seconds, measured from |time|. - * This is renewable up to |auth_timeout|. */ - long timeout; + // timeout is the lifetime of the session in seconds, measured from |time|. + // This is renewable up to |auth_timeout|. + uint32_t timeout; - /* auth_timeout is the non-renewable lifetime of the session in seconds, - * measured from |time|. */ - long auth_timeout; + // auth_timeout is the non-renewable lifetime of the session in seconds, + // measured from |time|. + uint32_t auth_timeout; - /* time is the time the session was issued, measured in seconds from the UNIX - * epoch. */ - long time; + // time is the time the session was issued, measured in seconds from the UNIX + // epoch. + uint64_t time; const SSL_CIPHER *cipher; - CRYPTO_EX_DATA ex_data; /* application specific data */ + CRYPTO_EX_DATA ex_data; // application specific data - /* These are used to make removal of session-ids more efficient and to - * implement a maximum cache size. */ + // These are used to make removal of session-ids more efficient and to + // implement a maximum cache size. SSL_SESSION *prev, *next; - char *tlsext_hostname; - /* RFC4507 info */ - uint8_t *tlsext_tick; /* Session ticket */ - size_t tlsext_ticklen; /* Session ticket length */ + // RFC4507 info + uint8_t *tlsext_tick; // Session ticket + size_t tlsext_ticklen; // Session ticket length - size_t tlsext_signed_cert_timestamp_list_length; - uint8_t *tlsext_signed_cert_timestamp_list; /* Server's list. */ + CRYPTO_BUFFER *signed_cert_timestamp_list; - /* The OCSP response that came with the session. */ - size_t ocsp_response_length; - uint8_t *ocsp_response; + // The OCSP response that came with the session. + CRYPTO_BUFFER *ocsp_response; - /* peer_sha256 contains the SHA-256 hash of the peer's certificate if - * |peer_sha256_valid| is true. */ + // peer_sha256 contains the SHA-256 hash of the peer's certificate if + // |peer_sha256_valid| is true. uint8_t peer_sha256[SHA256_DIGEST_LENGTH]; - /* original_handshake_hash contains the handshake hash (either SHA-1+MD5 or - * SHA-2, depending on TLS version) for the original, full handshake that - * created a session. This is used by Channel IDs during resumption. */ + // original_handshake_hash contains the handshake hash (either SHA-1+MD5 or + // SHA-2, depending on TLS version) for the original, full handshake that + // created a session. This is used by Channel IDs during resumption. uint8_t original_handshake_hash[EVP_MAX_MD_SIZE]; uint8_t original_handshake_hash_len; - uint32_t tlsext_tick_lifetime_hint; /* Session lifetime hint in seconds */ + uint32_t tlsext_tick_lifetime_hint; // Session lifetime hint in seconds uint32_t ticket_age_add; - /* ticket_max_early_data is the maximum amount of data allowed to be sent as - * early data. If zero, 0-RTT is disallowed. */ + // ticket_max_early_data is the maximum amount of data allowed to be sent as + // early data. If zero, 0-RTT is disallowed. uint32_t ticket_max_early_data; - /* early_alpn is the ALPN protocol from the initial handshake. This is only - * stored for TLS 1.3 and above in order to enforce ALPN matching for 0-RTT - * resumptions. */ + // early_alpn is the ALPN protocol from the initial handshake. This is only + // stored for TLS 1.3 and above in order to enforce ALPN matching for 0-RTT + // resumptions. uint8_t *early_alpn; size_t early_alpn_len; - /* extended_master_secret is true if the master secret in this session was - * generated using EMS and thus isn't vulnerable to the Triple Handshake - * attack. */ + // extended_master_secret is true if the master secret in this session was + // generated using EMS and thus isn't vulnerable to the Triple Handshake + // attack. unsigned extended_master_secret:1; - /* peer_sha256_valid is non-zero if |peer_sha256| is valid. */ - unsigned peer_sha256_valid:1; /* Non-zero if peer_sha256 is valid */ + // peer_sha256_valid is non-zero if |peer_sha256| is valid. + unsigned peer_sha256_valid:1; // Non-zero if peer_sha256 is valid - /* not_resumable is used to indicate that session resumption is disallowed. */ + // not_resumable is used to indicate that session resumption is disallowed. unsigned not_resumable:1; - /* ticket_age_add_valid is non-zero if |ticket_age_add| is valid. */ + // ticket_age_add_valid is non-zero if |ticket_age_add| is valid. unsigned ticket_age_add_valid:1; - /* is_server is true if this session was created by a server. */ + // is_server is true if this session was created by a server. unsigned is_server:1; }; -/* ssl_cipher_preference_list_st contains a list of SSL_CIPHERs with - * equal-preference groups. For TLS clients, the groups are moot because the - * server picks the cipher and groups cannot be expressed on the wire. However, - * for servers, the equal-preference groups allow the client's preferences to - * be partially respected. (This only has an effect with - * SSL_OP_CIPHER_SERVER_PREFERENCE). - * - * The equal-preference groups are expressed by grouping SSL_CIPHERs together. - * All elements of a group have the same priority: no ordering is expressed - * within a group. - * - * The values in |ciphers| are in one-to-one correspondence with - * |in_group_flags|. (That is, sk_SSL_CIPHER_num(ciphers) is the number of - * bytes in |in_group_flags|.) The bytes in |in_group_flags| are either 1, to - * indicate that the corresponding SSL_CIPHER is not the last element of a - * group, or 0 to indicate that it is. - * - * For example, if |in_group_flags| contains all zeros then that indicates a - * traditional, fully-ordered preference. Every SSL_CIPHER is the last element - * of the group (i.e. they are all in a one-element group). - * - * For a more complex example, consider: - * ciphers: A B C D E F - * in_group_flags: 1 1 0 0 1 0 - * - * That would express the following, order: - * - * A E - * B -> D -> F - * C - */ -struct ssl_cipher_preference_list_st { - STACK_OF(SSL_CIPHER) *ciphers; - uint8_t *in_group_flags; -}; - -/* ssl_ctx_st (aka |SSL_CTX|) contains configuration common to several SSL - * connections. */ -struct ssl_ctx_st { - const SSL_PROTOCOL_METHOD *method; - const SSL_X509_METHOD *x509_method; - /* lock is used to protect various operations on this object. */ - CRYPTO_MUTEX lock; - - /* max_version is the maximum acceptable protocol version. Note this version - * is normalized in DTLS. */ - uint16_t max_version; - - /* min_version is the minimum acceptable protocol version. Note this version - * is normalized in DTLS. */ - uint16_t min_version; - - struct ssl_cipher_preference_list_st *cipher_list; - - X509_STORE *cert_store; - LHASH_OF(SSL_SESSION) *sessions; - /* Most session-ids that will be cached, default is - * SSL_SESSION_CACHE_MAX_SIZE_DEFAULT. 0 is unlimited. */ - unsigned long session_cache_size; - SSL_SESSION *session_cache_head; - SSL_SESSION *session_cache_tail; - - /* handshakes_since_cache_flush is the number of successful handshakes since - * the last cache flush. */ - int handshakes_since_cache_flush; - - /* This can have one of 2 values, ored together, - * SSL_SESS_CACHE_CLIENT, - * SSL_SESS_CACHE_SERVER, - * Default is SSL_SESSION_CACHE_SERVER, which means only - * SSL_accept which cache SSL_SESSIONS. */ - int session_cache_mode; - - /* session_timeout is the default lifetime for new sessions in TLS 1.2 and - * earlier, in seconds. */ - long session_timeout; - - /* session_psk_dhe_timeout is the default lifetime for new sessions in TLS - * 1.3, in seconds. */ - long session_psk_dhe_timeout; - - /* If this callback is not null, it will be called each time a session id is - * added to the cache. If this function returns 1, it means that the - * callback will do a SSL_SESSION_free() when it has finished using it. - * Otherwise, on 0, it means the callback has finished with it. If - * remove_session_cb is not null, it will be called when a session-id is - * removed from the cache. After the call, OpenSSL will SSL_SESSION_free() - * it. */ - int (*new_session_cb)(SSL *ssl, SSL_SESSION *sess); - void (*remove_session_cb)(SSL_CTX *ctx, SSL_SESSION *sess); - SSL_SESSION *(*get_session_cb)(SSL *ssl, uint8_t *data, int len, - int *copy); - - CRYPTO_refcount_t references; - - /* if defined, these override the X509_verify_cert() calls */ - int (*app_verify_callback)(X509_STORE_CTX *store_ctx, void *arg); - void *app_verify_arg; - - /* Default password callback. */ - pem_password_cb *default_passwd_callback; - - /* Default password callback user data. */ - void *default_passwd_callback_userdata; - - /* get client cert callback */ - int (*client_cert_cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey); - - /* get channel id callback */ - void (*channel_id_cb)(SSL *ssl, EVP_PKEY **out_pkey); - - CRYPTO_EX_DATA ex_data; - - /* custom_*_extensions stores any callback sets for custom extensions. Note - * that these pointers will be NULL if the stack would otherwise be empty. */ - STACK_OF(SSL_CUSTOM_EXTENSION) *client_custom_extensions; - STACK_OF(SSL_CUSTOM_EXTENSION) *server_custom_extensions; - - /* Default values used when no per-SSL value is defined follow */ - - void (*info_callback)(const SSL *ssl, int type, int value); - - /* what we put in client cert requests */ - STACK_OF(X509_NAME) *client_CA; - - - /* Default values to use in SSL structures follow (these are copied by - * SSL_new) */ - - uint32_t options; - uint32_t mode; - uint32_t max_cert_list; - - struct cert_st /* CERT */ *cert; - - /* callback that allows applications to peek at protocol messages */ - void (*msg_callback)(int write_p, int version, int content_type, - const void *buf, size_t len, SSL *ssl, void *arg); - void *msg_callback_arg; - - int verify_mode; - int (*default_verify_callback)( - int ok, X509_STORE_CTX *ctx); /* called 'verify_callback' in the SSL */ - - X509_VERIFY_PARAM *param; - - /* select_certificate_cb is called before most ClientHello processing and - * before the decision whether to resume a session is made. It may return one - * to continue the handshake or zero to cause the handshake loop to return - * with an error and cause SSL_get_error to return - * SSL_ERROR_PENDING_CERTIFICATE. Note: when the handshake loop is resumed, it - * will not call the callback a second time. */ - int (*select_certificate_cb)(const SSL_CLIENT_HELLO *); - - /* dos_protection_cb is called once the resumption decision for a ClientHello - * has been made. It returns one to continue the handshake or zero to - * abort. */ - int (*dos_protection_cb) (const SSL_CLIENT_HELLO *); - - /* Maximum amount of data to send in one fragment. actual record size can be - * more than this due to padding and MAC overheads. */ - uint16_t max_send_fragment; - - /* TLS extensions servername callback */ - int (*tlsext_servername_callback)(SSL *, int *, void *); - void *tlsext_servername_arg; - /* RFC 4507 session ticket keys */ - uint8_t tlsext_tick_key_name[SSL_TICKET_KEY_NAME_LEN]; - uint8_t tlsext_tick_hmac_key[16]; - uint8_t tlsext_tick_aes_key[16]; - /* Callback to support customisation of ticket key setting */ - int (*tlsext_ticket_key_cb)(SSL *ssl, uint8_t *name, uint8_t *iv, - EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc); - - /* Server-only: psk_identity_hint is the default identity hint to send in - * PSK-based key exchanges. */ - char *psk_identity_hint; - - unsigned int (*psk_client_callback)(SSL *ssl, const char *hint, - char *identity, - unsigned int max_identity_len, - uint8_t *psk, unsigned int max_psk_len); - unsigned int (*psk_server_callback)(SSL *ssl, const char *identity, - uint8_t *psk, unsigned int max_psk_len); - - - /* retain_only_sha256_of_client_certs is true if we should compute the SHA256 - * hash of the peer's certificate and then discard it to save memory and - * session space. Only effective on the server side. */ - char retain_only_sha256_of_client_certs; - - /* Next protocol negotiation information */ - /* (for experimental NPN extension). */ - - /* For a server, this contains a callback function by which the set of - * advertised protocols can be provided. */ - int (*next_protos_advertised_cb)(SSL *ssl, const uint8_t **out, - unsigned *out_len, void *arg); - void *next_protos_advertised_cb_arg; - /* For a client, this contains a callback function that selects the - * next protocol from the list provided by the server. */ - int (*next_proto_select_cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, - const uint8_t *in, unsigned in_len, void *arg); - void *next_proto_select_cb_arg; - - /* ALPN information - * (we are in the process of transitioning from NPN to ALPN.) */ - - /* For a server, this contains a callback function that allows the - * server to select the protocol for the connection. - * out: on successful return, this must point to the raw protocol - * name (without the length prefix). - * outlen: on successful return, this contains the length of |*out|. - * in: points to the client's list of supported protocols in - * wire-format. - * inlen: the length of |in|. */ - int (*alpn_select_cb)(SSL *s, const uint8_t **out, uint8_t *out_len, - const uint8_t *in, unsigned in_len, void *arg); - void *alpn_select_cb_arg; - - /* For a client, this contains the list of supported protocols in wire - * format. */ - uint8_t *alpn_client_proto_list; - unsigned alpn_client_proto_list_len; - - /* SRTP profiles we are willing to do from RFC 5764 */ - STACK_OF(SRTP_PROTECTION_PROFILE) *srtp_profiles; - - /* Supported group values inherited by SSL structure */ - size_t supported_group_list_len; - uint16_t *supported_group_list; - - /* The client's Channel ID private key. */ - EVP_PKEY *tlsext_channel_id_private; - - /* keylog_callback, if not NULL, is the key logging callback. See - * |SSL_CTX_set_keylog_callback|. */ - void (*keylog_callback)(const SSL *ssl, const char *line); - - /* current_time_cb, if not NULL, is the function to use to get the current - * time. It sets |*out_clock| to the current time. See - * |SSL_CTX_set_current_time_cb|. */ - void (*current_time_cb)(const SSL *ssl, struct timeval *out_clock); - - /* pool is used for all |CRYPTO_BUFFER|s in case we wish to share certificate - * memory. */ - CRYPTO_BUFFER_POOL *pool; - - /* quiet_shutdown is true if the connection should not send a close_notify on - * shutdown. */ - unsigned quiet_shutdown:1; - - /* If enable_early_data is non-zero, early data can be sent and accepted over - * new connections. */ - unsigned enable_early_data:1; - - /* ocsp_stapling_enabled is only used by client connections and indicates - * whether OCSP stapling will be requested. */ - unsigned ocsp_stapling_enabled:1; - - /* If true, a client will request certificate timestamps. */ - unsigned signed_cert_timestamps_enabled:1; - - /* tlsext_channel_id_enabled is one if Channel ID is enabled and zero - * otherwise. For a server, means that we'll accept Channel IDs from clients. - * For a client, means that we'll advertise support. */ - unsigned tlsext_channel_id_enabled:1; - - /* grease_enabled is one if draft-davidben-tls-grease-01 is enabled and zero - * otherwise. */ - unsigned grease_enabled:1; - - /* short_header_enabled is one if a short record header in TLS 1.3 may - * be negotiated and zero otherwise. */ - unsigned short_header_enabled:1; -}; - - -/* Nodejs compatibility section (hidden). - * - * These defines exist for node.js, with the hope that we can eliminate the - * need for them over time. */ +// Nodejs compatibility section (hidden). +// +// These defines exist for node.js, with the hope that we can eliminate the +// need for them over time. #define SSLerr(function, reason) \ ERR_put_error(ERR_LIB_SSL, 0, reason, __FILE__, __LINE__) -/* Preprocessor compatibility section (hidden). - * - * Historically, a number of APIs were implemented in OpenSSL as macros and - * constants to 'ctrl' functions. To avoid breaking #ifdefs in consumers, this - * section defines a number of legacy macros. - * - * Although using either the CTRL values or their wrapper macros in #ifdefs is - * still supported, the CTRL values may not be passed to |SSL_ctrl| and - * |SSL_CTX_ctrl|. Call the functions (previously wrapper macros) instead. - * - * See PORTING.md in the BoringSSL source tree for a table of corresponding - * functions. - * https://boringssl.googlesource.com/boringssl/+/master/PORTING.md#Replacements-for-values - */ +// Preprocessor compatibility section (hidden). +// +// Historically, a number of APIs were implemented in OpenSSL as macros and +// constants to 'ctrl' functions. To avoid breaking #ifdefs in consumers, this +// section defines a number of legacy macros. +// +// Although using either the CTRL values or their wrapper macros in #ifdefs is +// still supported, the CTRL values may not be passed to |SSL_ctrl| and +// |SSL_CTX_ctrl|. Call the functions (previously wrapper macros) instead. +// +// See PORTING.md in the BoringSSL source tree for a table of corresponding +// functions. +// https://boringssl.googlesource.com/boringssl/+/master/PORTING.md#Replacements-for-values #define DTLS_CTRL_GET_TIMEOUT doesnt_exist #define DTLS_CTRL_HANDLE_TIMEOUT doesnt_exist @@ -4175,6 +4205,7 @@ struct ssl_ctx_st { #define SSL_CTRL_SESS_NUMBER doesnt_exist #define SSL_CTRL_SET_CURVES doesnt_exist #define SSL_CTRL_SET_CURVES_LIST doesnt_exist +#define SSL_CTRL_SET_ECDH_AUTO doesnt_exist #define SSL_CTRL_SET_MAX_CERT_LIST doesnt_exist #define SSL_CTRL_SET_MAX_SEND_FRAGMENT doesnt_exist #define SSL_CTRL_SET_MSG_CALLBACK doesnt_exist @@ -4266,7 +4297,9 @@ struct ssl_ctx_st { #if defined(__cplusplus) -} /* extern C */ +} // extern C + +#if !defined(BORINGSSL_NO_CXX) extern "C++" { @@ -4276,9 +4309,67 @@ BORINGSSL_MAKE_DELETER(SSL, SSL_free) BORINGSSL_MAKE_DELETER(SSL_CTX, SSL_CTX_free) BORINGSSL_MAKE_DELETER(SSL_SESSION, SSL_SESSION_free) +enum class OpenRecordResult { + kOK, + kDiscard, + kIncompleteRecord, + kAlertCloseNotify, + kError, +}; + +// *** EXPERIMENTAL -- DO NOT USE *** +// +// OpenRecord decrypts the first complete SSL record from |in| in-place, sets +// |out| to the decrypted application data, and |out_record_len| to the length +// of the encrypted record. Returns: +// - kOK if an application-data record was successfully decrypted and verified. +// - kDiscard if a record was sucessfully processed, but should be discarded. +// - kIncompleteRecord if |in| did not contain a complete record. +// - kAlertCloseNotify if a record was successfully processed but is a +// close_notify alert. +// - kError if an error occurred or the record is invalid. |*out_alert| will be +// set to an alert to emit, or zero if no alert should be emitted. +OPENSSL_EXPORT OpenRecordResult OpenRecord(SSL *ssl, Span *out, + size_t *out_record_len, + uint8_t *out_alert, + Span in); + +OPENSSL_EXPORT size_t SealRecordPrefixLen(const SSL *ssl, size_t plaintext_len); + +// SealRecordSuffixLen returns the length of the suffix written by |SealRecord|. +// +// |plaintext_len| must be equal to the size of the plaintext passed to +// |SealRecord|. +// +// |plaintext_len| must not exceed |SSL3_RT_MAX_PLAINTEXT_LENGTH|. The returned +// suffix length will not exceed |SSL3_RT_MAX_ENCRYPTED_OVERHEAD|. +OPENSSL_EXPORT size_t SealRecordSuffixLen(const SSL *ssl, size_t plaintext_len); + +// *** EXPERIMENTAL -- DO NOT USE *** +// +// SealRecord encrypts the cleartext of |in| and scatters the resulting TLS +// application data record between |out_prefix|, |out|, and |out_suffix|. It +// returns true on success or false if an error occurred. +// +// The length of |out_prefix| must equal |SealRecordPrefixLen|. The length of +// |out| must equal the length of |in|, which must not exceed +// |SSL3_RT_MAX_PLAINTEXT_LENGTH|. The length of |out_suffix| must equal +// |SealRecordSuffixLen|. +// +// If enabled, |SealRecord| may perform TLS 1.0 CBC 1/n-1 record splitting. +// |SealRecordPrefixLen| accounts for the required overhead if that is the case. +// +// |out| may equal |in| to encrypt in-place but may not otherwise alias. +// |out_prefix| and |out_suffix| may not alias anything. +OPENSSL_EXPORT bool SealRecord(SSL *ssl, Span out_prefix, + Span out, Span out_suffix, + Span in); + } // namespace bssl -} /* extern C++ */ +} // extern C++ + +#endif // !defined(BORINGSSL_NO_CXX) #endif @@ -4345,7 +4436,6 @@ BORINGSSL_MAKE_DELETER(SSL_SESSION, SSL_SESSION_free) #define SSL_R_INVALID_SSL_SESSION 160 #define SSL_R_INVALID_TICKET_KEYS_LENGTH 161 #define SSL_R_LENGTH_MISMATCH 162 -#define SSL_R_LIBRARY_HAS_NO_CIPHERS 163 #define SSL_R_MISSING_EXTENSION 164 #define SSL_R_MISSING_RSA_CERTIFICATE 165 #define SSL_R_MISSING_TMP_DH_KEY 166 @@ -4455,6 +4545,16 @@ BORINGSSL_MAKE_DELETER(SSL_SESSION, SSL_SESSION_free) #define SSL_R_TOO_MUCH_SKIPPED_EARLY_DATA 270 #define SSL_R_PSK_IDENTITY_BINDER_COUNT_MISMATCH 271 #define SSL_R_CANNOT_PARSE_LEAF_CERT 272 +#define SSL_R_SERVER_CERT_CHANGED 273 +#define SSL_R_CERTIFICATE_AND_PRIVATE_KEY_MISMATCH 274 +#define SSL_R_CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD 275 +#define SSL_R_TICKET_ENCRYPTION_FAILED 276 +#define SSL_R_ALPN_MISMATCH_ON_EARLY_DATA 277 +#define SSL_R_WRONG_VERSION_ON_EARLY_DATA 278 +#define SSL_R_UNEXPECTED_EXTENSION_ON_EARLY_DATA 279 +#define SSL_R_NO_SUPPORTED_VERSIONS_ENABLED 280 +#define SSL_R_APPLICATION_DATA_INSTEAD_OF_HANDSHAKE 281 +#define SSL_R_EMPTY_HELLO_RETRY_REQUEST 282 #define SSL_R_SSLV3_ALERT_CLOSE_NOTIFY 1000 #define SSL_R_SSLV3_ALERT_UNEXPECTED_MESSAGE 1010 #define SSL_R_SSLV3_ALERT_BAD_RECORD_MAC 1020 @@ -4487,5 +4587,6 @@ BORINGSSL_MAKE_DELETER(SSL_SESSION, SSL_SESSION_free) #define SSL_R_TLSV1_BAD_CERTIFICATE_HASH_VALUE 1114 #define SSL_R_TLSV1_UNKNOWN_PSK_IDENTITY 1115 #define SSL_R_TLSV1_CERTIFICATE_REQUIRED 1116 +#define SSL_R_TOO_MUCH_READ_EARLY_DATA 1117 -#endif /* OPENSSL_HEADER_SSL_H */ +#endif // OPENSSL_HEADER_SSL_H diff --git a/Sources/BoringSSL/include/openssl/ssl3.h b/Sources/BoringSSL/include/openssl/ssl3.h index fcaeb2df9..ae0be88d5 100644 --- a/Sources/BoringSSL/include/openssl/ssl3.h +++ b/Sources/BoringSSL/include/openssl/ssl3.h @@ -125,14 +125,14 @@ extern "C" { #endif -/* These are kept to support clients that negotiates higher protocol versions - * using SSLv2 client hello records. */ +// These are kept to support clients that negotiates higher protocol versions +// using SSLv2 client hello records. #define SSL2_MT_CLIENT_HELLO 1 #define SSL2_VERSION 0x0002 -/* Signalling cipher suite value from RFC 5746. */ +// Signalling cipher suite value from RFC 5746. #define SSL3_CK_SCSV 0x030000FF -/* Fallback signalling cipher suite value from RFC 7507. */ +// Fallback signalling cipher suite value from RFC 7507. #define SSL3_CK_FALLBACK_SCSV 0x03005600 #define SSL3_CK_RSA_NULL_MD5 0x03000001 @@ -208,11 +208,11 @@ extern "C" { #define SSL3_HM_HEADER_LENGTH 4 #ifndef SSL3_ALIGN_PAYLOAD -/* Some will argue that this increases memory footprint, but it's not actually - * true. Point is that malloc has to return at least 64-bit aligned pointers, - * meaning that allocating 5 bytes wastes 3 bytes in either case. Suggested - * pre-gaping simply moves these wasted bytes from the end of allocated region - * to its front, but makes data payload aligned, which improves performance. */ +// Some will argue that this increases memory footprint, but it's not actually +// true. Point is that malloc has to return at least 64-bit aligned pointers, +// meaning that allocating 5 bytes wastes 3 bytes in either case. Suggested +// pre-gaping simply moves these wasted bytes from the end of allocated region +// to its front, but makes data payload aligned, which improves performance. #define SSL3_ALIGN_PAYLOAD 8 #else #if (SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) != 0 @@ -221,33 +221,33 @@ extern "C" { #endif #endif -/* This is the maximum MAC (digest) size used by the SSL library. Currently - * maximum of 20 is used by SHA1, but we reserve for future extension for - * 512-bit hashes. */ +// This is the maximum MAC (digest) size used by the SSL library. Currently +// maximum of 20 is used by SHA1, but we reserve for future extension for +// 512-bit hashes. #define SSL3_RT_MAX_MD_SIZE 64 -/* Maximum block size used in all ciphersuites. Currently 16 for AES. */ +// Maximum block size used in all ciphersuites. Currently 16 for AES. #define SSL_RT_MAX_CIPHER_BLOCK_SIZE 16 -/* Maximum plaintext length: defined by SSL/TLS standards */ +// Maximum plaintext length: defined by SSL/TLS standards #define SSL3_RT_MAX_PLAIN_LENGTH 16384 -/* Maximum compression overhead: defined by SSL/TLS standards */ +// Maximum compression overhead: defined by SSL/TLS standards #define SSL3_RT_MAX_COMPRESSED_OVERHEAD 1024 -/* The standards give a maximum encryption overhead of 1024 bytes. In practice - * the value is lower than this. The overhead is the maximum number of padding - * bytes (256) plus the mac size. - * - * TODO(davidben): This derivation doesn't take AEADs into account, or TLS 1.1 - * explicit nonces. It happens to work because |SSL3_RT_MAX_MD_SIZE| is larger - * than necessary and no true AEAD has variable overhead in TLS 1.2. */ +// The standards give a maximum encryption overhead of 1024 bytes. In practice +// the value is lower than this. The overhead is the maximum number of padding +// bytes (256) plus the mac size. +// +// TODO(davidben): This derivation doesn't take AEADs into account, or TLS 1.1 +// explicit nonces. It happens to work because |SSL3_RT_MAX_MD_SIZE| is larger +// than necessary and no true AEAD has variable overhead in TLS 1.2. #define SSL3_RT_MAX_ENCRYPTED_OVERHEAD (256 + SSL3_RT_MAX_MD_SIZE) -/* SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD is the maximum overhead in encrypting a - * record. This does not include the record header. Some ciphers use explicit - * nonces, so it includes both the AEAD overhead as well as the nonce. */ +// SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD is the maximum overhead in encrypting a +// record. This does not include the record header. Some ciphers use explicit +// nonces, so it includes both the AEAD overhead as well as the nonce. #define SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \ (EVP_AEAD_MAX_OVERHEAD + EVP_AEAD_MAX_NONCE_LENGTH) @@ -255,9 +255,9 @@ OPENSSL_COMPILE_ASSERT( SSL3_RT_MAX_ENCRYPTED_OVERHEAD >= SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD, max_overheads_are_consistent); -/* SSL3_RT_MAX_COMPRESSED_LENGTH is an alias for - * |SSL3_RT_MAX_PLAIN_LENGTH|. Compression is gone, so don't include the - * compression overhead. */ +// SSL3_RT_MAX_COMPRESSED_LENGTH is an alias for +// |SSL3_RT_MAX_PLAIN_LENGTH|. Compression is gone, so don't include the +// compression overhead. #define SSL3_RT_MAX_COMPRESSED_LENGTH SSL3_RT_MAX_PLAIN_LENGTH #define SSL3_RT_MAX_ENCRYPTED_LENGTH \ @@ -273,105 +273,33 @@ OPENSSL_COMPILE_ASSERT( #define SSL3_RT_HANDSHAKE 22 #define SSL3_RT_APPLICATION_DATA 23 -/* Pseudo content type for SSL/TLS header info */ +// Pseudo content type for SSL/TLS header info #define SSL3_RT_HEADER 0x100 #define SSL3_AL_WARNING 1 #define SSL3_AL_FATAL 2 #define SSL3_AD_CLOSE_NOTIFY 0 -#define SSL3_AD_UNEXPECTED_MESSAGE 10 /* fatal */ -#define SSL3_AD_BAD_RECORD_MAC 20 /* fatal */ -#define SSL3_AD_DECOMPRESSION_FAILURE 30 /* fatal */ -#define SSL3_AD_HANDSHAKE_FAILURE 40 /* fatal */ +#define SSL3_AD_UNEXPECTED_MESSAGE 10 // fatal +#define SSL3_AD_BAD_RECORD_MAC 20 // fatal +#define SSL3_AD_DECOMPRESSION_FAILURE 30 // fatal +#define SSL3_AD_HANDSHAKE_FAILURE 40 // fatal #define SSL3_AD_NO_CERTIFICATE 41 #define SSL3_AD_BAD_CERTIFICATE 42 #define SSL3_AD_UNSUPPORTED_CERTIFICATE 43 #define SSL3_AD_CERTIFICATE_REVOKED 44 #define SSL3_AD_CERTIFICATE_EXPIRED 45 #define SSL3_AD_CERTIFICATE_UNKNOWN 46 -#define SSL3_AD_ILLEGAL_PARAMETER 47 /* fatal */ -#define SSL3_AD_INAPPROPRIATE_FALLBACK 86 /* fatal */ +#define SSL3_AD_ILLEGAL_PARAMETER 47 // fatal +#define SSL3_AD_INAPPROPRIATE_FALLBACK 86 // fatal #define SSL3_CT_RSA_SIGN 1 -#define SSL3_CT_DSS_SIGN 2 -#define SSL3_CT_RSA_FIXED_DH 3 -#define SSL3_CT_DSS_FIXED_DH 4 -#define SSL3_CT_RSA_EPHEMERAL_DH 5 -#define SSL3_CT_DSS_EPHEMERAL_DH 6 -#define SSL3_CT_FORTEZZA_DMS 20 - -/* SSLv3 */ -/* client */ -/* extra state */ -#define SSL3_ST_CW_FLUSH (0x100 | SSL_ST_CONNECT) -#define SSL3_ST_FALSE_START (0x101 | SSL_ST_CONNECT) -#define SSL3_ST_VERIFY_SERVER_CERT (0x102 | SSL_ST_CONNECT) -#define SSL3_ST_FINISH_CLIENT_HANDSHAKE (0x103 | SSL_ST_CONNECT) -/* write to server */ -#define SSL3_ST_CW_CLNT_HELLO_A (0x110 | SSL_ST_CONNECT) -/* read from server */ -#define SSL3_ST_CR_SRVR_HELLO_A (0x120 | SSL_ST_CONNECT) -#define DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A (0x126 | SSL_ST_CONNECT) -#define SSL3_ST_CR_CERT_A (0x130 | SSL_ST_CONNECT) -#define SSL3_ST_CR_KEY_EXCH_A (0x140 | SSL_ST_CONNECT) -#define SSL3_ST_CR_KEY_EXCH_B (0x141 | SSL_ST_CONNECT) -#define SSL3_ST_CR_CERT_REQ_A (0x150 | SSL_ST_CONNECT) -#define SSL3_ST_CR_SRVR_DONE_A (0x160 | SSL_ST_CONNECT) -/* write to server */ -#define SSL3_ST_CW_CERT_A (0x170 | SSL_ST_CONNECT) -#define SSL3_ST_CW_KEY_EXCH_A (0x180 | SSL_ST_CONNECT) -#define SSL3_ST_CW_CERT_VRFY_A (0x190 | SSL_ST_CONNECT) -#define SSL3_ST_CW_CERT_VRFY_B (0x191 | SSL_ST_CONNECT) -#define SSL3_ST_CW_CHANGE (0x1A0 | SSL_ST_CONNECT) -#define SSL3_ST_CW_NEXT_PROTO_A (0x200 | SSL_ST_CONNECT) -#define SSL3_ST_CW_CHANNEL_ID_A (0x220 | SSL_ST_CONNECT) -#define SSL3_ST_CW_FINISHED_A (0x1B0 | SSL_ST_CONNECT) -/* read from server */ -#define SSL3_ST_CR_CHANGE (0x1C0 | SSL_ST_CONNECT) -#define SSL3_ST_CR_FINISHED_A (0x1D0 | SSL_ST_CONNECT) -#define SSL3_ST_CR_SESSION_TICKET_A (0x1E0 | SSL_ST_CONNECT) -#define SSL3_ST_CR_CERT_STATUS_A (0x1F0 | SSL_ST_CONNECT) - -/* SSL3_ST_CR_SRVR_HELLO_B is a legacy alias for |SSL3_ST_CR_SRVR_HELLO_A| used - * by some consumers which check |SSL_state|. */ -#define SSL3_ST_CR_SRVR_HELLO_B SSL3_ST_CR_SRVR_HELLO_A - -/* server */ -/* extra state */ -#define SSL3_ST_SW_FLUSH (0x100 | SSL_ST_ACCEPT) -/* read from client */ -#define SSL3_ST_SR_CLNT_HELLO_A (0x110 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CLNT_HELLO_B (0x111 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CLNT_HELLO_C (0x112 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CLNT_HELLO_D (0x113 | SSL_ST_ACCEPT) -/* write to client */ -#define SSL3_ST_SW_SRVR_HELLO_A (0x130 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_CERT_A (0x140 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_KEY_EXCH_A (0x150 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_KEY_EXCH_B (0x151 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_CERT_REQ_A (0x160 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_SRVR_DONE_A (0x170 | SSL_ST_ACCEPT) -/* read from client */ -#define SSL3_ST_SR_CERT_A (0x180 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_KEY_EXCH_A (0x190 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_KEY_EXCH_B (0x191 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CERT_VRFY_A (0x1A0 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CHANGE (0x1B0 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_NEXT_PROTO_A (0x210 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_CHANNEL_ID_A (0x230 | SSL_ST_ACCEPT) -#define SSL3_ST_SR_FINISHED_A (0x1C0 | SSL_ST_ACCEPT) - -/* write to client */ -#define SSL3_ST_SW_CHANGE (0x1D0 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_FINISHED_A (0x1E0 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_SESSION_TICKET_A (0x1F0 | SSL_ST_ACCEPT) -#define SSL3_ST_SW_CERT_STATUS_A (0x200 | SSL_ST_ACCEPT) #define SSL3_MT_HELLO_REQUEST 0 #define SSL3_MT_CLIENT_HELLO 1 #define SSL3_MT_SERVER_HELLO 2 #define SSL3_MT_NEW_SESSION_TICKET 4 +#define SSL3_MT_END_OF_EARLY_DATA 5 #define SSL3_MT_HELLO_RETRY_REQUEST 6 #define SSL3_MT_ENCRYPTED_EXTENSIONS 8 #define SSL3_MT_CERTIFICATE 11 @@ -386,29 +314,20 @@ OPENSSL_COMPILE_ASSERT( #define SSL3_MT_KEY_UPDATE 24 #define SSL3_MT_NEXT_PROTO 67 #define SSL3_MT_CHANNEL_ID 203 +#define SSL3_MT_MESSAGE_HASH 254 #define DTLS1_MT_HELLO_VERIFY_REQUEST 3 -/* The following are legacy aliases for consumers which use - * |SSL_CTX_set_msg_callback|. */ +// The following are legacy aliases for consumers which use +// |SSL_CTX_set_msg_callback|. #define SSL3_MT_SERVER_DONE SSL3_MT_SERVER_HELLO_DONE #define SSL3_MT_NEWSESSION_TICKET SSL3_MT_NEW_SESSION_TICKET #define SSL3_MT_CCS 1 -/* These are used when changing over to a new cipher */ -#define SSL3_CC_READ 0x01 -#define SSL3_CC_WRITE 0x02 -#define SSL3_CC_CLIENT 0x10 -#define SSL3_CC_SERVER 0x20 -#define SSL3_CHANGE_CIPHER_CLIENT_WRITE (SSL3_CC_CLIENT | SSL3_CC_WRITE) -#define SSL3_CHANGE_CIPHER_SERVER_READ (SSL3_CC_SERVER | SSL3_CC_READ) -#define SSL3_CHANGE_CIPHER_CLIENT_READ (SSL3_CC_CLIENT | SSL3_CC_READ) -#define SSL3_CHANGE_CIPHER_SERVER_WRITE (SSL3_CC_SERVER | SSL3_CC_WRITE) - #ifdef __cplusplus -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_SSL3_H */ +#endif // OPENSSL_HEADER_SSL3_H diff --git a/Sources/BoringSSL/include/openssl/stack.h b/Sources/BoringSSL/include/openssl/stack.h index c0cd0f6f5..46f57a34c 100644 --- a/Sources/BoringSSL/include/openssl/stack.h +++ b/Sources/BoringSSL/include/openssl/stack.h @@ -66,45 +66,45 @@ extern "C" { #endif -/* A stack, in OpenSSL, is an array of pointers. They are the most commonly - * used collection object. - * - * This file defines macros for type safe use of the stack functions. A stack - * of a specific type of object has type |STACK_OF(type)|. This can be defined - * (once) with |DEFINE_STACK_OF(type)| and declared where needed with - * |DECLARE_STACK_OF(type)|. For example: - * - * struct foo { - * int bar; - * }; - * - * DEFINE_STACK_OF(struct foo); - * - * Although note that the stack will contain /pointers/ to |foo|. - * - * A macro will be defined for each of the sk_* functions below. For - * STACK_OF(foo), the macros would be sk_foo_new, sk_foo_pop etc. */ - - -/* stack_cmp_func is a comparison function that returns a value < 0, 0 or > 0 - * if |*a| is less than, equal to or greater than |*b|, respectively. Note the - * extra indirection - the function is given a pointer to a pointer to the - * element. This differs from the usual qsort/bsearch comparison function. */ +// A stack, in OpenSSL, is an array of pointers. They are the most commonly +// used collection object. +// +// This file defines macros for type safe use of the stack functions. A stack +// of a specific type of object has type |STACK_OF(type)|. This can be defined +// (once) with |DEFINE_STACK_OF(type)| and declared where needed with +// |DECLARE_STACK_OF(type)|. For example: +// +// typedef struct foo_st { +// int bar; +// } FOO; +// +// DEFINE_STACK_OF(FOO); +// +// Although note that the stack will contain /pointers/ to |FOO|. +// +// A macro will be defined for each of the sk_* functions below. For +// STACK_OF(FOO), the macros would be sk_FOO_new, sk_FOO_pop etc. + + +// stack_cmp_func is a comparison function that returns a value < 0, 0 or > 0 +// if |*a| is less than, equal to or greater than |*b|, respectively. Note the +// extra indirection - the function is given a pointer to a pointer to the +// element. This differs from the usual qsort/bsearch comparison function. typedef int (*stack_cmp_func)(const void **a, const void **b); -/* stack_st contains an array of pointers. It is not designed to be used - * directly, rather the wrapper macros should be used. */ +// stack_st contains an array of pointers. It is not designed to be used +// directly, rather the wrapper macros should be used. typedef struct stack_st { - /* num contains the number of valid pointers in |data|. */ + // num contains the number of valid pointers in |data|. size_t num; void **data; - /* sorted is non-zero if the values pointed to by |data| are in ascending - * order, based on |comp|. */ + // sorted is non-zero if the values pointed to by |data| are in ascending + // order, based on |comp|. int sorted; - /* num_alloc contains the number of pointers allocated in the buffer pointed - * to by |data|, which may be larger than |num|. */ + // num_alloc contains the number of pointers allocated in the buffer pointed + // to by |data|, which may be larger than |num|. size_t num_alloc; - /* comp is an optional comparison function. */ + // comp is an optional comparison function. stack_cmp_func comp; } _STACK; @@ -113,182 +113,373 @@ typedef struct stack_st { #define DECLARE_STACK_OF(type) STACK_OF(type); -/* The make_macros.sh script in this directory parses the following lines and - * generates the stack_macros.h file that contains macros for the following - * types of stacks: - * - * STACK_OF:ACCESS_DESCRIPTION - * STACK_OF:ASN1_ADB_TABLE - * STACK_OF:ASN1_GENERALSTRING - * STACK_OF:ASN1_INTEGER - * STACK_OF:ASN1_OBJECT - * STACK_OF:ASN1_STRING_TABLE - * STACK_OF:ASN1_TYPE - * STACK_OF:ASN1_VALUE - * STACK_OF:BIO - * STACK_OF:BY_DIR_ENTRY - * STACK_OF:BY_DIR_HASH - * STACK_OF:CONF_VALUE - * STACK_OF:CRYPTO_BUFFER - * STACK_OF:CRYPTO_EX_DATA_FUNCS - * STACK_OF:DIST_POINT - * STACK_OF:GENERAL_NAME - * STACK_OF:GENERAL_NAMES - * STACK_OF:GENERAL_SUBTREE - * STACK_OF:POLICYINFO - * STACK_OF:POLICYQUALINFO - * STACK_OF:POLICY_MAPPING - * STACK_OF:RSA_additional_prime - * STACK_OF:SSL_COMP - * STACK_OF:SSL_CUSTOM_EXTENSION - * STACK_OF:STACK_OF_X509_NAME_ENTRY - * STACK_OF:SXNETID - * STACK_OF:X509 - * STACK_OF:X509V3_EXT_METHOD - * STACK_OF:X509_ALGOR - * STACK_OF:X509_ATTRIBUTE - * STACK_OF:X509_CRL - * STACK_OF:X509_EXTENSION - * STACK_OF:X509_INFO - * STACK_OF:X509_LOOKUP - * STACK_OF:X509_NAME - * STACK_OF:X509_NAME_ENTRY - * STACK_OF:X509_OBJECT - * STACK_OF:X509_POLICY_DATA - * STACK_OF:X509_POLICY_NODE - * STACK_OF:X509_PURPOSE - * STACK_OF:X509_REVOKED - * STACK_OF:X509_TRUST - * STACK_OF:X509_VERIFY_PARAM - * STACK_OF:void - * - * Some stacks contain only const structures, so the stack should return const - * pointers to retain type-checking. - * - * CONST_STACK_OF:SRTP_PROTECTION_PROFILE - * CONST_STACK_OF:SSL_CIPHER */ - - -/* Some stacks are special because, although we would like STACK_OF(char *), - * that would actually be a stack of pointers to char*, but we just want to - * point to the string directly. In this case we call them "special" and use - * |DEFINE_SPECIAL_STACK_OF(type)| */ -#define DEFINE_SPECIAL_STACK_OF(type, inner) \ - STACK_OF(type) { _STACK special_stack; }; \ - OPENSSL_COMPILE_ASSERT(sizeof(type) == sizeof(void *), \ - special_stack_of_non_pointer_##type); - -typedef char *OPENSSL_STRING; - -DEFINE_SPECIAL_STACK_OF(OPENSSL_STRING, char) +// These are the raw stack functions, you shouldn't be using them. Rather you +// should be using the type stack macros implemented above. -/* The make_macros.sh script in this directory parses the following lines and - * generates the stack_macros.h file that contains macros for the following - * types of stacks: - * - * SPECIAL_STACK_OF:OPENSSL_STRING */ - -#define IN_STACK_H -#include -#undef IN_STACK_H - - -/* These are the raw stack functions, you shouldn't be using them. Rather you - * should be using the type stack macros implemented above. */ - -/* sk_new creates a new, empty stack with the given comparison function, which - * may be zero. It returns the new stack or NULL on allocation failure. */ +// sk_new creates a new, empty stack with the given comparison function, which +// may be zero. It returns the new stack or NULL on allocation failure. OPENSSL_EXPORT _STACK *sk_new(stack_cmp_func comp); -/* sk_new_null creates a new, empty stack. It returns the new stack or NULL on - * allocation failure. */ +// sk_new_null creates a new, empty stack. It returns the new stack or NULL on +// allocation failure. OPENSSL_EXPORT _STACK *sk_new_null(void); -/* sk_num returns the number of elements in |s|. */ +// sk_num returns the number of elements in |s|. OPENSSL_EXPORT size_t sk_num(const _STACK *sk); -/* sk_zero resets |sk| to the empty state but does nothing to free the - * individual elements themselves. */ +// sk_zero resets |sk| to the empty state but does nothing to free the +// individual elements themselves. OPENSSL_EXPORT void sk_zero(_STACK *sk); -/* sk_value returns the |i|th pointer in |sk|, or NULL if |i| is out of - * range. */ +// sk_value returns the |i|th pointer in |sk|, or NULL if |i| is out of +// range. OPENSSL_EXPORT void *sk_value(const _STACK *sk, size_t i); -/* sk_set sets the |i|th pointer in |sk| to |p| and returns |p|. If |i| is out - * of range, it returns NULL. */ +// sk_set sets the |i|th pointer in |sk| to |p| and returns |p|. If |i| is out +// of range, it returns NULL. OPENSSL_EXPORT void *sk_set(_STACK *sk, size_t i, void *p); -/* sk_free frees the given stack and array of pointers, but does nothing to - * free the individual elements. Also see |sk_pop_free|. */ +// sk_free frees the given stack and array of pointers, but does nothing to +// free the individual elements. Also see |sk_pop_free|. OPENSSL_EXPORT void sk_free(_STACK *sk); -/* sk_pop_free calls |free_func| on each element in the stack and then frees - * the stack itself. */ +// sk_pop_free calls |free_func| on each element in the stack and then frees +// the stack itself. OPENSSL_EXPORT void sk_pop_free(_STACK *sk, void (*free_func)(void *)); -/* sk_insert inserts |p| into the stack at index |where|, moving existing - * elements if needed. It returns the length of the new stack, or zero on - * error. */ +// sk_insert inserts |p| into the stack at index |where|, moving existing +// elements if needed. It returns the length of the new stack, or zero on +// error. OPENSSL_EXPORT size_t sk_insert(_STACK *sk, void *p, size_t where); -/* sk_delete removes the pointer at index |where|, moving other elements down - * if needed. It returns the removed pointer, or NULL if |where| is out of - * range. */ +// sk_delete removes the pointer at index |where|, moving other elements down +// if needed. It returns the removed pointer, or NULL if |where| is out of +// range. OPENSSL_EXPORT void *sk_delete(_STACK *sk, size_t where); -/* sk_delete_ptr removes, at most, one instance of |p| from the stack based on - * pointer equality. If an instance of |p| is found then |p| is returned, - * otherwise it returns NULL. */ +// sk_delete_ptr removes, at most, one instance of |p| from the stack based on +// pointer equality. If an instance of |p| is found then |p| is returned, +// otherwise it returns NULL. OPENSSL_EXPORT void *sk_delete_ptr(_STACK *sk, void *p); -/* sk_find returns the first value in the stack equal to |p|. If a comparison - * function has been set on the stack, then equality is defined by it and the - * stack will be sorted if need be so that a binary search can be used. - * Otherwise pointer equality is used. If a matching element is found, its - * index is written to |*out_index| (if |out_index| is not NULL) and one is - * returned. Otherwise zero is returned. */ +// sk_find returns the first value in the stack equal to |p|. If a comparison +// function has been set on the stack, then equality is defined by it and the +// stack will be sorted if need be so that a binary search can be used. +// Otherwise pointer equality is used. If a matching element is found, its +// index is written to |*out_index| (if |out_index| is not NULL) and one is +// returned. Otherwise zero is returned. OPENSSL_EXPORT int sk_find(_STACK *sk, size_t *out_index, void *p); -/* sk_shift removes and returns the first element in the stack, or returns NULL - * if the stack is empty. */ +// sk_shift removes and returns the first element in the stack, or returns NULL +// if the stack is empty. OPENSSL_EXPORT void *sk_shift(_STACK *sk); -/* sk_push appends |p| to the stack and returns the length of the new stack, or - * 0 on allocation failure. */ +// sk_push appends |p| to the stack and returns the length of the new stack, or +// 0 on allocation failure. OPENSSL_EXPORT size_t sk_push(_STACK *sk, void *p); -/* sk_pop returns and removes the last element on the stack, or NULL if the - * stack is empty. */ +// sk_pop returns and removes the last element on the stack, or NULL if the +// stack is empty. OPENSSL_EXPORT void *sk_pop(_STACK *sk); -/* sk_dup performs a shallow copy of a stack and returns the new stack, or NULL - * on error. */ +// sk_dup performs a shallow copy of a stack and returns the new stack, or NULL +// on error. OPENSSL_EXPORT _STACK *sk_dup(const _STACK *sk); -/* sk_sort sorts the elements of |sk| into ascending order based on the - * comparison function. The stack maintains a |sorted| flag and sorting an - * already sorted stack is a no-op. */ +// sk_sort sorts the elements of |sk| into ascending order based on the +// comparison function. The stack maintains a |sorted| flag and sorting an +// already sorted stack is a no-op. OPENSSL_EXPORT void sk_sort(_STACK *sk); -/* sk_is_sorted returns one if |sk| is known to be sorted and zero - * otherwise. */ +// sk_is_sorted returns one if |sk| is known to be sorted and zero +// otherwise. OPENSSL_EXPORT int sk_is_sorted(const _STACK *sk); -/* sk_set_cmp_func sets the comparison function to be used by |sk| and returns - * the previous one. */ +// sk_set_cmp_func sets the comparison function to be used by |sk| and returns +// the previous one. OPENSSL_EXPORT stack_cmp_func sk_set_cmp_func(_STACK *sk, stack_cmp_func comp); -/* sk_deep_copy performs a copy of |sk| and of each of the non-NULL elements in - * |sk| by using |copy_func|. If an error occurs, |free_func| is used to free - * any copies already made and NULL is returned. */ +// sk_deep_copy performs a copy of |sk| and of each of the non-NULL elements in +// |sk| by using |copy_func|. If an error occurs, |free_func| is used to free +// any copies already made and NULL is returned. OPENSSL_EXPORT _STACK *sk_deep_copy(const _STACK *sk, void *(*copy_func)(void *), void (*free_func)(void *)); +// Defining stack types. +// +// This set of macros is used to emit the typed functions that act on a +// |STACK_OF(T)|. + +#if !defined(BORINGSSL_NO_CXX) +extern "C++" { +namespace bssl { +namespace internal { +template +struct StackTraits {}; +} +} +} + +#define BORINGSSL_DEFINE_STACK_TRAITS(name, type, is_const) \ + extern "C++" { \ + namespace bssl { \ + namespace internal { \ + template <> \ + struct StackTraits { \ + static constexpr bool kIsStack = true; \ + using Type = type; \ + static constexpr bool kIsConst = is_const; \ + }; \ + } \ + } \ + } + +#else +#define BORINGSSL_DEFINE_STACK_TRAITS(name, type, is_const) +#endif + +// Stack functions must be tagged unused to support file-local stack types. +// Clang's -Wunused-function only allows unused static inline functions if they +// are defined in a header. + +#define BORINGSSL_DEFINE_STACK_OF_IMPL(name, ptrtype, constptrtype) \ + DECLARE_STACK_OF(name) \ + \ + typedef int (*stack_##name##_cmp_func)(constptrtype *a, constptrtype *b); \ + \ + static inline OPENSSL_UNUSED STACK_OF(name) * \ + sk_##name##_new(stack_##name##_cmp_func comp) { \ + return (STACK_OF(name) *)sk_new((stack_cmp_func)comp); \ + } \ + \ + static inline OPENSSL_UNUSED STACK_OF(name) *sk_##name##_new_null(void) { \ + return (STACK_OF(name) *)sk_new_null(); \ + } \ + \ + static inline OPENSSL_UNUSED size_t sk_##name##_num( \ + const STACK_OF(name) *sk) { \ + return sk_num((const _STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED void sk_##name##_zero(STACK_OF(name) *sk) { \ + sk_zero((_STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_value( \ + const STACK_OF(name) *sk, size_t i) { \ + return (ptrtype)sk_value((const _STACK *)sk, i); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_set(STACK_OF(name) *sk, \ + size_t i, ptrtype p) { \ + return (ptrtype)sk_set((_STACK *)sk, i, (void *)p); \ + } \ + \ + static inline OPENSSL_UNUSED void sk_##name##_free(STACK_OF(name) *sk) { \ + sk_free((_STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED void sk_##name##_pop_free( \ + STACK_OF(name) *sk, void (*free_func)(ptrtype p)) { \ + sk_pop_free((_STACK *)sk, (void (*)(void *))free_func); \ + } \ + \ + static inline OPENSSL_UNUSED size_t sk_##name##_insert( \ + STACK_OF(name) *sk, ptrtype p, size_t where) { \ + return sk_insert((_STACK *)sk, (void *)p, where); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_delete(STACK_OF(name) *sk, \ + size_t where) { \ + return (ptrtype)sk_delete((_STACK *)sk, where); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_delete_ptr( \ + STACK_OF(name) *sk, ptrtype p) { \ + return (ptrtype)sk_delete_ptr((_STACK *)sk, (void *)p); \ + } \ + \ + static inline OPENSSL_UNUSED int sk_##name##_find( \ + STACK_OF(name) *sk, size_t *out_index, ptrtype p) { \ + return sk_find((_STACK *)sk, out_index, (void *)p); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_shift(STACK_OF(name) *sk) { \ + return (ptrtype)sk_shift((_STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED size_t sk_##name##_push(STACK_OF(name) *sk, \ + ptrtype p) { \ + return sk_push((_STACK *)sk, (void *)p); \ + } \ + \ + static inline OPENSSL_UNUSED ptrtype sk_##name##_pop(STACK_OF(name) *sk) { \ + return (ptrtype)sk_pop((_STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED STACK_OF(name) * \ + sk_##name##_dup(const STACK_OF(name) *sk) { \ + return (STACK_OF(name) *)sk_dup((const _STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED void sk_##name##_sort(STACK_OF(name) *sk) { \ + sk_sort((_STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED int sk_##name##_is_sorted( \ + const STACK_OF(name) *sk) { \ + return sk_is_sorted((const _STACK *)sk); \ + } \ + \ + static inline OPENSSL_UNUSED stack_##name##_cmp_func \ + sk_##name##_set_cmp_func(STACK_OF(name) *sk, \ + stack_##name##_cmp_func comp) { \ + return (stack_##name##_cmp_func)sk_set_cmp_func((_STACK *)sk, \ + (stack_cmp_func)comp); \ + } \ + \ + static inline OPENSSL_UNUSED STACK_OF(name) * \ + sk_##name##_deep_copy(const STACK_OF(name) *sk, \ + ptrtype(*copy_func)(ptrtype), \ + void (*free_func)(ptrtype)) { \ + return (STACK_OF(name) *)sk_deep_copy((_STACK *)sk, \ + (void *(*)(void *))copy_func, \ + (void (*)(void *))free_func); \ + } + +// DEFINE_STACK_OF defines |STACK_OF(type)| to be a stack whose elements are +// |type| *. +#define DEFINE_STACK_OF(type) \ + BORINGSSL_DEFINE_STACK_OF_IMPL(type, type *, const type *) \ + BORINGSSL_DEFINE_STACK_TRAITS(type, type, false) + +// DEFINE_CONST_STACK_OF defines |STACK_OF(type)| to be a stack whose elements +// are const |type| *. +#define DEFINE_CONST_STACK_OF(type) \ + BORINGSSL_DEFINE_STACK_OF_IMPL(type, const type *, const type *) \ + BORINGSSL_DEFINE_STACK_TRAITS(type, const type, true) + +// DEFINE_SPECIAL_STACK_OF defines |STACK_OF(type)| to be a stack whose elements +// are |type|, where |type| must be a typedef for a pointer. +#define DEFINE_SPECIAL_STACK_OF(type) \ + OPENSSL_COMPILE_ASSERT(sizeof(type) == sizeof(void *), \ + special_stack_of_non_pointer_##type); \ + BORINGSSL_DEFINE_STACK_OF_IMPL(type, type, const type) + + +typedef char *OPENSSL_STRING; + +DEFINE_STACK_OF(void) +DEFINE_SPECIAL_STACK_OF(OPENSSL_STRING) + + #if defined(__cplusplus) -} /* extern C */ +} // extern C +#endif + +#if !defined(BORINGSSL_NO_CXX) +extern "C++" { + +#include + +namespace bssl { + +namespace internal { + +// Stacks defined with |DEFINE_CONST_STACK_OF| are freed with |sk_free|. +template +struct DeleterImpl< + Stack, typename std::enable_if::kIsConst>::type> { + static void Free(Stack *sk) { sk_free(reinterpret_cast<_STACK *>(sk)); } +}; + +// Stacks defined with |DEFINE_STACK_OF| are freed with |sk_pop_free| and the +// corresponding type's deleter. +template +struct DeleterImpl< + Stack, typename std::enable_if::kIsConst>::type> { + static void Free(Stack *sk) { + sk_pop_free( + reinterpret_cast<_STACK *>(sk), + reinterpret_cast( + DeleterImpl::Type>::Free)); + } +}; + +template +class StackIteratorImpl { + public: + using Type = typename StackTraits::Type; + // Iterators must be default-constructable. + StackIteratorImpl() : sk_(nullptr), idx_(0) {} + StackIteratorImpl(const Stack *sk, size_t idx) : sk_(sk), idx_(idx) {} + + bool operator==(StackIteratorImpl other) const { + return sk_ == other.sk_ && idx_ == other.idx_; + } + bool operator!=(StackIteratorImpl other) const { + return !(*this == other); + } + + Type *operator*() const { + return reinterpret_cast( + sk_value(reinterpret_cast(sk_), idx_)); + } + + StackIteratorImpl &operator++(/* prefix */) { + idx_++; + return *this; + } + + StackIteratorImpl operator++(int /* postfix */) { + StackIteratorImpl copy(*this); + ++(*this); + return copy; + } + + private: + const Stack *sk_; + size_t idx_; +}; + +template +using StackIterator = typename std::enable_if::kIsStack, + StackIteratorImpl>::type; + +} // namespace internal + +// PushToStack pushes |elem| to |sk|. It returns true on success and false on +// allocation failure. +template +static inline + typename std::enable_if::kIsConst, bool>::type + PushToStack(Stack *sk, + UniquePtr::Type> elem) { + if (!sk_push(reinterpret_cast<_STACK *>(sk), elem.get())) { + return false; + } + // sk_push takes ownership on success. + elem.release(); + return true; +} + +} // namespace bssl + +// Define begin() and end() for stack types so C++ range for loops work. +template +static inline bssl::internal::StackIterator begin(const Stack *sk) { + return bssl::internal::StackIterator(sk, 0); +} + +template +static inline bssl::internal::StackIterator end(const Stack *sk) { + return bssl::internal::StackIterator( + sk, sk_num(reinterpret_cast(sk))); +} + +} // extern C++ #endif -#endif /* OPENSSL_HEADER_STACK_H */ +#endif // OPENSSL_HEADER_STACK_H diff --git a/Sources/BoringSSL/include/openssl/stack_macros.h b/Sources/BoringSSL/include/openssl/stack_macros.h deleted file mode 100644 index a5f36fb64..000000000 --- a/Sources/BoringSSL/include/openssl/stack_macros.h +++ /dev/null @@ -1,3987 +0,0 @@ -/* Copyright (c) 2014, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#if !defined(IN_STACK_H) -#error "Don't include this file directly. Include stack.h." -#endif - -/* ACCESS_DESCRIPTION */ -#define sk_ACCESS_DESCRIPTION_new(comp) \ - ((STACK_OF(ACCESS_DESCRIPTION) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const ACCESS_DESCRIPTION **a, const ACCESS_DESCRIPTION **b), \ - comp))) - -#define sk_ACCESS_DESCRIPTION_new_null() \ - ((STACK_OF(ACCESS_DESCRIPTION) *)sk_new_null()) - -#define sk_ACCESS_DESCRIPTION_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ACCESS_DESCRIPTION) *, sk)) - -#define sk_ACCESS_DESCRIPTION_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk)); - -#define sk_ACCESS_DESCRIPTION_value(sk, i) \ - ((ACCESS_DESCRIPTION *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - (i))) - -#define sk_ACCESS_DESCRIPTION_set(sk, i, p) \ - ((ACCESS_DESCRIPTION *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), (i), \ - CHECKED_CAST(void *, ACCESS_DESCRIPTION *, p))) - -#define sk_ACCESS_DESCRIPTION_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk)) - -#define sk_ACCESS_DESCRIPTION_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ACCESS_DESCRIPTION *), \ - free_func)) - -#define sk_ACCESS_DESCRIPTION_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(void *, ACCESS_DESCRIPTION *, p), (where)) - -#define sk_ACCESS_DESCRIPTION_delete(sk, where) \ - ((ACCESS_DESCRIPTION *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), (where))) - -#define sk_ACCESS_DESCRIPTION_delete_ptr(sk, p) \ - ((ACCESS_DESCRIPTION *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(void *, ACCESS_DESCRIPTION *, p))) - -#define sk_ACCESS_DESCRIPTION_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - (out_index), CHECKED_CAST(void *, ACCESS_DESCRIPTION *, p)) - -#define sk_ACCESS_DESCRIPTION_shift(sk) \ - ((ACCESS_DESCRIPTION *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk))) - -#define sk_ACCESS_DESCRIPTION_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(void *, ACCESS_DESCRIPTION *, p)) - -#define sk_ACCESS_DESCRIPTION_pop(sk) \ - ((ACCESS_DESCRIPTION *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk))) - -#define sk_ACCESS_DESCRIPTION_dup(sk) \ - ((STACK_OF(ACCESS_DESCRIPTION) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ACCESS_DESCRIPTION) *, sk))) - -#define sk_ACCESS_DESCRIPTION_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk)) - -#define sk_ACCESS_DESCRIPTION_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ACCESS_DESCRIPTION) *, sk)) - -#define sk_ACCESS_DESCRIPTION_set_cmp_func(sk, comp) \ - ((int (*)(const ACCESS_DESCRIPTION **a, const ACCESS_DESCRIPTION **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const ACCESS_DESCRIPTION **a, \ - const ACCESS_DESCRIPTION **b), \ - comp))) - -#define sk_ACCESS_DESCRIPTION_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ACCESS_DESCRIPTION) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ACCESS_DESCRIPTION) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - ACCESS_DESCRIPTION *(*)(ACCESS_DESCRIPTION *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ACCESS_DESCRIPTION *), \ - free_func))) - -/* ASN1_ADB_TABLE */ -#define sk_ASN1_ADB_TABLE_new(comp) \ - ((STACK_OF(ASN1_ADB_TABLE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const ASN1_ADB_TABLE **a, const ASN1_ADB_TABLE **b), comp))) - -#define sk_ASN1_ADB_TABLE_new_null() ((STACK_OF(ASN1_ADB_TABLE) *)sk_new_null()) - -#define sk_ASN1_ADB_TABLE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_ADB_TABLE) *, sk)) - -#define sk_ASN1_ADB_TABLE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk)); - -#define sk_ASN1_ADB_TABLE_value(sk, i) \ - ((ASN1_ADB_TABLE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_ADB_TABLE) *, sk), \ - (i))) - -#define sk_ASN1_ADB_TABLE_set(sk, i, p) \ - ((ASN1_ADB_TABLE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), (i), \ - CHECKED_CAST(void *, ASN1_ADB_TABLE *, p))) - -#define sk_ASN1_ADB_TABLE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk)) - -#define sk_ASN1_ADB_TABLE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_ADB_TABLE *), free_func)) - -#define sk_ASN1_ADB_TABLE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_ADB_TABLE *, p), (where)) - -#define sk_ASN1_ADB_TABLE_delete(sk, where) \ - ((ASN1_ADB_TABLE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), (where))) - -#define sk_ASN1_ADB_TABLE_delete_ptr(sk, p) \ - ((ASN1_ADB_TABLE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_ADB_TABLE *, p))) - -#define sk_ASN1_ADB_TABLE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), (out_index), \ - CHECKED_CAST(void *, ASN1_ADB_TABLE *, p)) - -#define sk_ASN1_ADB_TABLE_shift(sk) \ - ((ASN1_ADB_TABLE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk))) - -#define sk_ASN1_ADB_TABLE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_ADB_TABLE *, p)) - -#define sk_ASN1_ADB_TABLE_pop(sk) \ - ((ASN1_ADB_TABLE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk))) - -#define sk_ASN1_ADB_TABLE_dup(sk) \ - ((STACK_OF(ASN1_ADB_TABLE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_ADB_TABLE) *, sk))) - -#define sk_ASN1_ADB_TABLE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk)) - -#define sk_ASN1_ADB_TABLE_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_ADB_TABLE) *, sk)) - -#define sk_ASN1_ADB_TABLE_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_ADB_TABLE **a, const ASN1_ADB_TABLE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const ASN1_ADB_TABLE **a, \ - const ASN1_ADB_TABLE **b), \ - comp))) - -#define sk_ASN1_ADB_TABLE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_ADB_TABLE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_ADB_TABLE) *, sk), \ - CHECKED_CAST(void *(*)(void *), ASN1_ADB_TABLE *(*)(ASN1_ADB_TABLE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_ADB_TABLE *), free_func))) - -/* ASN1_GENERALSTRING */ -#define sk_ASN1_GENERALSTRING_new(comp) \ - ((STACK_OF(ASN1_GENERALSTRING) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const ASN1_GENERALSTRING **a, const ASN1_GENERALSTRING **b), \ - comp))) - -#define sk_ASN1_GENERALSTRING_new_null() \ - ((STACK_OF(ASN1_GENERALSTRING) *)sk_new_null()) - -#define sk_ASN1_GENERALSTRING_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_GENERALSTRING) *, sk)) - -#define sk_ASN1_GENERALSTRING_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk)); - -#define sk_ASN1_GENERALSTRING_value(sk, i) \ - ((ASN1_GENERALSTRING *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_GENERALSTRING) *, sk), \ - (i))) - -#define sk_ASN1_GENERALSTRING_set(sk, i, p) \ - ((ASN1_GENERALSTRING *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), (i), \ - CHECKED_CAST(void *, ASN1_GENERALSTRING *, p))) - -#define sk_ASN1_GENERALSTRING_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk)) - -#define sk_ASN1_GENERALSTRING_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_GENERALSTRING *), \ - free_func)) - -#define sk_ASN1_GENERALSTRING_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(void *, ASN1_GENERALSTRING *, p), (where)) - -#define sk_ASN1_GENERALSTRING_delete(sk, where) \ - ((ASN1_GENERALSTRING *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), (where))) - -#define sk_ASN1_GENERALSTRING_delete_ptr(sk, p) \ - ((ASN1_GENERALSTRING *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(void *, ASN1_GENERALSTRING *, p))) - -#define sk_ASN1_GENERALSTRING_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - (out_index), CHECKED_CAST(void *, ASN1_GENERALSTRING *, p)) - -#define sk_ASN1_GENERALSTRING_shift(sk) \ - ((ASN1_GENERALSTRING *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk))) - -#define sk_ASN1_GENERALSTRING_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(void *, ASN1_GENERALSTRING *, p)) - -#define sk_ASN1_GENERALSTRING_pop(sk) \ - ((ASN1_GENERALSTRING *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk))) - -#define sk_ASN1_GENERALSTRING_dup(sk) \ - ((STACK_OF(ASN1_GENERALSTRING) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_GENERALSTRING) *, sk))) - -#define sk_ASN1_GENERALSTRING_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk)) - -#define sk_ASN1_GENERALSTRING_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_GENERALSTRING) *, sk)) - -#define sk_ASN1_GENERALSTRING_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_GENERALSTRING **a, const ASN1_GENERALSTRING **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const ASN1_GENERALSTRING **a, \ - const ASN1_GENERALSTRING **b), \ - comp))) - -#define sk_ASN1_GENERALSTRING_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_GENERALSTRING) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_GENERALSTRING) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - ASN1_GENERALSTRING *(*)(ASN1_GENERALSTRING *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_GENERALSTRING *), \ - free_func))) - -/* ASN1_INTEGER */ -#define sk_ASN1_INTEGER_new(comp) \ - ((STACK_OF(ASN1_INTEGER) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const ASN1_INTEGER **a, const ASN1_INTEGER **b), \ - comp))) - -#define sk_ASN1_INTEGER_new_null() ((STACK_OF(ASN1_INTEGER) *)sk_new_null()) - -#define sk_ASN1_INTEGER_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_INTEGER) *, sk)) - -#define sk_ASN1_INTEGER_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk)); - -#define sk_ASN1_INTEGER_value(sk, i) \ - ((ASN1_INTEGER *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_INTEGER) *, sk), (i))) - -#define sk_ASN1_INTEGER_set(sk, i, p) \ - ((ASN1_INTEGER *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), (i), \ - CHECKED_CAST(void *, ASN1_INTEGER *, p))) - -#define sk_ASN1_INTEGER_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk)) - -#define sk_ASN1_INTEGER_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_INTEGER *), free_func)) - -#define sk_ASN1_INTEGER_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(void *, ASN1_INTEGER *, p), (where)) - -#define sk_ASN1_INTEGER_delete(sk, where) \ - ((ASN1_INTEGER *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), (where))) - -#define sk_ASN1_INTEGER_delete_ptr(sk, p) \ - ((ASN1_INTEGER *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(void *, ASN1_INTEGER *, p))) - -#define sk_ASN1_INTEGER_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), (out_index), \ - CHECKED_CAST(void *, ASN1_INTEGER *, p)) - -#define sk_ASN1_INTEGER_shift(sk) \ - ((ASN1_INTEGER *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk))) - -#define sk_ASN1_INTEGER_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(void *, ASN1_INTEGER *, p)) - -#define sk_ASN1_INTEGER_pop(sk) \ - ((ASN1_INTEGER *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk))) - -#define sk_ASN1_INTEGER_dup(sk) \ - ((STACK_OF(ASN1_INTEGER) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_INTEGER) *, sk))) - -#define sk_ASN1_INTEGER_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk)) - -#define sk_ASN1_INTEGER_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_INTEGER) *, sk)) - -#define sk_ASN1_INTEGER_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_INTEGER **a, const ASN1_INTEGER **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const ASN1_INTEGER **a, const ASN1_INTEGER **b), \ - comp))) - -#define sk_ASN1_INTEGER_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_INTEGER) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_INTEGER) *, sk), \ - CHECKED_CAST(void *(*)(void *), ASN1_INTEGER *(*)(ASN1_INTEGER *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_INTEGER *), free_func))) - -/* ASN1_OBJECT */ -#define sk_ASN1_OBJECT_new(comp) \ - ((STACK_OF(ASN1_OBJECT) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const ASN1_OBJECT **a, const ASN1_OBJECT **b), \ - comp))) - -#define sk_ASN1_OBJECT_new_null() ((STACK_OF(ASN1_OBJECT) *)sk_new_null()) - -#define sk_ASN1_OBJECT_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_OBJECT) *, sk)) - -#define sk_ASN1_OBJECT_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk)); - -#define sk_ASN1_OBJECT_value(sk, i) \ - ((ASN1_OBJECT *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_OBJECT) *, sk), (i))) - -#define sk_ASN1_OBJECT_set(sk, i, p) \ - ((ASN1_OBJECT *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - (i), CHECKED_CAST(void *, ASN1_OBJECT *, p))) - -#define sk_ASN1_OBJECT_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk)) - -#define sk_ASN1_OBJECT_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_OBJECT *), free_func)) - -#define sk_ASN1_OBJECT_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(void *, ASN1_OBJECT *, p), (where)) - -#define sk_ASN1_OBJECT_delete(sk, where) \ - ((ASN1_OBJECT *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), (where))) - -#define sk_ASN1_OBJECT_delete_ptr(sk, p) \ - ((ASN1_OBJECT *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(void *, ASN1_OBJECT *, p))) - -#define sk_ASN1_OBJECT_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), (out_index), \ - CHECKED_CAST(void *, ASN1_OBJECT *, p)) - -#define sk_ASN1_OBJECT_shift(sk) \ - ((ASN1_OBJECT *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk))) - -#define sk_ASN1_OBJECT_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(void *, ASN1_OBJECT *, p)) - -#define sk_ASN1_OBJECT_pop(sk) \ - ((ASN1_OBJECT *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk))) - -#define sk_ASN1_OBJECT_dup(sk) \ - ((STACK_OF(ASN1_OBJECT) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_OBJECT) *, sk))) - -#define sk_ASN1_OBJECT_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk)) - -#define sk_ASN1_OBJECT_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_OBJECT) *, sk)) - -#define sk_ASN1_OBJECT_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_OBJECT **a, const ASN1_OBJECT **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const ASN1_OBJECT **a, const ASN1_OBJECT **b), \ - comp))) - -#define sk_ASN1_OBJECT_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_OBJECT) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_OBJECT) *, sk), \ - CHECKED_CAST(void *(*)(void *), ASN1_OBJECT *(*)(ASN1_OBJECT *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_OBJECT *), free_func))) - -/* ASN1_STRING_TABLE */ -#define sk_ASN1_STRING_TABLE_new(comp) \ - ((STACK_OF(ASN1_STRING_TABLE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const ASN1_STRING_TABLE **a, const ASN1_STRING_TABLE **b), \ - comp))) - -#define sk_ASN1_STRING_TABLE_new_null() \ - ((STACK_OF(ASN1_STRING_TABLE) *)sk_new_null()) - -#define sk_ASN1_STRING_TABLE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_STRING_TABLE) *, sk)) - -#define sk_ASN1_STRING_TABLE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk)); - -#define sk_ASN1_STRING_TABLE_value(sk, i) \ - ((ASN1_STRING_TABLE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_STRING_TABLE) *, sk), \ - (i))) - -#define sk_ASN1_STRING_TABLE_set(sk, i, p) \ - ((ASN1_STRING_TABLE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), (i), \ - CHECKED_CAST(void *, ASN1_STRING_TABLE *, p))) - -#define sk_ASN1_STRING_TABLE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk)) - -#define sk_ASN1_STRING_TABLE_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_STRING_TABLE *), \ - free_func)) - -#define sk_ASN1_STRING_TABLE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_STRING_TABLE *, p), (where)) - -#define sk_ASN1_STRING_TABLE_delete(sk, where) \ - ((ASN1_STRING_TABLE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), (where))) - -#define sk_ASN1_STRING_TABLE_delete_ptr(sk, p) \ - ((ASN1_STRING_TABLE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_STRING_TABLE *, p))) - -#define sk_ASN1_STRING_TABLE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - (out_index), CHECKED_CAST(void *, ASN1_STRING_TABLE *, p)) - -#define sk_ASN1_STRING_TABLE_shift(sk) \ - ((ASN1_STRING_TABLE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk))) - -#define sk_ASN1_STRING_TABLE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(void *, ASN1_STRING_TABLE *, p)) - -#define sk_ASN1_STRING_TABLE_pop(sk) \ - ((ASN1_STRING_TABLE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk))) - -#define sk_ASN1_STRING_TABLE_dup(sk) \ - ((STACK_OF(ASN1_STRING_TABLE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_STRING_TABLE) *, sk))) - -#define sk_ASN1_STRING_TABLE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk)) - -#define sk_ASN1_STRING_TABLE_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_STRING_TABLE) *, sk)) - -#define sk_ASN1_STRING_TABLE_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_STRING_TABLE **a, const ASN1_STRING_TABLE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const ASN1_STRING_TABLE **a, \ - const ASN1_STRING_TABLE **b), \ - comp))) - -#define sk_ASN1_STRING_TABLE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_STRING_TABLE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_STRING_TABLE) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - ASN1_STRING_TABLE *(*)(ASN1_STRING_TABLE *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_STRING_TABLE *), \ - free_func))) - -/* ASN1_TYPE */ -#define sk_ASN1_TYPE_new(comp) \ - ((STACK_OF(ASN1_TYPE) *)sk_new( \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const ASN1_TYPE **a, const ASN1_TYPE **b), comp))) - -#define sk_ASN1_TYPE_new_null() ((STACK_OF(ASN1_TYPE) *)sk_new_null()) - -#define sk_ASN1_TYPE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_TYPE) *, sk)) - -#define sk_ASN1_TYPE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk)); - -#define sk_ASN1_TYPE_value(sk, i) \ - ((ASN1_TYPE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_TYPE) *, sk), (i))) - -#define sk_ASN1_TYPE_set(sk, i, p) \ - ((ASN1_TYPE *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), (i), \ - CHECKED_CAST(void *, ASN1_TYPE *, p))) - -#define sk_ASN1_TYPE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk)) - -#define sk_ASN1_TYPE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_TYPE *), free_func)) - -#define sk_ASN1_TYPE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(void *, ASN1_TYPE *, p), (where)) - -#define sk_ASN1_TYPE_delete(sk, where) \ - ((ASN1_TYPE *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - (where))) - -#define sk_ASN1_TYPE_delete_ptr(sk, p) \ - ((ASN1_TYPE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(void *, ASN1_TYPE *, p))) - -#define sk_ASN1_TYPE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), (out_index), \ - CHECKED_CAST(void *, ASN1_TYPE *, p)) - -#define sk_ASN1_TYPE_shift(sk) \ - ((ASN1_TYPE *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk))) - -#define sk_ASN1_TYPE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(void *, ASN1_TYPE *, p)) - -#define sk_ASN1_TYPE_pop(sk) \ - ((ASN1_TYPE *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk))) - -#define sk_ASN1_TYPE_dup(sk) \ - ((STACK_OF(ASN1_TYPE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_TYPE) *, sk))) - -#define sk_ASN1_TYPE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk)) - -#define sk_ASN1_TYPE_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_TYPE) *, sk)) - -#define sk_ASN1_TYPE_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_TYPE **a, const ASN1_TYPE **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const ASN1_TYPE **a, const ASN1_TYPE **b), comp))) - -#define sk_ASN1_TYPE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_TYPE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_TYPE) *, sk), \ - CHECKED_CAST(void *(*)(void *), ASN1_TYPE *(*)(ASN1_TYPE *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_TYPE *), free_func))) - -/* ASN1_VALUE */ -#define sk_ASN1_VALUE_new(comp) \ - ((STACK_OF(ASN1_VALUE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const ASN1_VALUE **a, const ASN1_VALUE **b), \ - comp))) - -#define sk_ASN1_VALUE_new_null() ((STACK_OF(ASN1_VALUE) *)sk_new_null()) - -#define sk_ASN1_VALUE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_VALUE) *, sk)) - -#define sk_ASN1_VALUE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk)); - -#define sk_ASN1_VALUE_value(sk, i) \ - ((ASN1_VALUE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_VALUE) *, sk), (i))) - -#define sk_ASN1_VALUE_set(sk, i, p) \ - ((ASN1_VALUE *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - (i), CHECKED_CAST(void *, ASN1_VALUE *, p))) - -#define sk_ASN1_VALUE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk)) - -#define sk_ASN1_VALUE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_VALUE *), free_func)) - -#define sk_ASN1_VALUE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(void *, ASN1_VALUE *, p), (where)) - -#define sk_ASN1_VALUE_delete(sk, where) \ - ((ASN1_VALUE *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - (where))) - -#define sk_ASN1_VALUE_delete_ptr(sk, p) \ - ((ASN1_VALUE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(void *, ASN1_VALUE *, p))) - -#define sk_ASN1_VALUE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), (out_index), \ - CHECKED_CAST(void *, ASN1_VALUE *, p)) - -#define sk_ASN1_VALUE_shift(sk) \ - ((ASN1_VALUE *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk))) - -#define sk_ASN1_VALUE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(void *, ASN1_VALUE *, p)) - -#define sk_ASN1_VALUE_pop(sk) \ - ((ASN1_VALUE *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk))) - -#define sk_ASN1_VALUE_dup(sk) \ - ((STACK_OF(ASN1_VALUE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_VALUE) *, sk))) - -#define sk_ASN1_VALUE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk)) - -#define sk_ASN1_VALUE_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_VALUE) *, sk)) - -#define sk_ASN1_VALUE_set_cmp_func(sk, comp) \ - ((int (*)(const ASN1_VALUE **a, const ASN1_VALUE **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const ASN1_VALUE **a, const ASN1_VALUE **b), \ - comp))) - -#define sk_ASN1_VALUE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(ASN1_VALUE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(ASN1_VALUE) *, sk), \ - CHECKED_CAST(void *(*)(void *), ASN1_VALUE *(*)(ASN1_VALUE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(ASN1_VALUE *), free_func))) - -/* BIO */ -#define sk_BIO_new(comp) \ - ((STACK_OF(BIO) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const BIO **a, const BIO **b), comp))) - -#define sk_BIO_new_null() ((STACK_OF(BIO) *)sk_new_null()) - -#define sk_BIO_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(BIO) *, sk)) - -#define sk_BIO_zero(sk) sk_zero(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk)); - -#define sk_BIO_value(sk, i) \ - ((BIO *)sk_value(CHECKED_CAST(const _STACK *, const STACK_OF(BIO) *, sk), \ - (i))) - -#define sk_BIO_set(sk, i, p) \ - ((BIO *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), (i), \ - CHECKED_CAST(void *, BIO *, p))) - -#define sk_BIO_free(sk) sk_free(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk)) - -#define sk_BIO_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(BIO *), free_func)) - -#define sk_BIO_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), \ - CHECKED_CAST(void *, BIO *, p), (where)) - -#define sk_BIO_delete(sk, where) \ - ((BIO *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), (where))) - -#define sk_BIO_delete_ptr(sk, p) \ - ((BIO *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), \ - CHECKED_CAST(void *, BIO *, p))) - -#define sk_BIO_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), (out_index), \ - CHECKED_CAST(void *, BIO *, p)) - -#define sk_BIO_shift(sk) \ - ((BIO *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk))) - -#define sk_BIO_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), \ - CHECKED_CAST(void *, BIO *, p)) - -#define sk_BIO_pop(sk) \ - ((BIO *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk))) - -#define sk_BIO_dup(sk) \ - ((STACK_OF(BIO) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BIO) *, sk))) - -#define sk_BIO_sort(sk) sk_sort(CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk)) - -#define sk_BIO_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(BIO) *, sk)) - -#define sk_BIO_set_cmp_func(sk, comp) \ - ((int (*)(const BIO **a, const BIO **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(BIO) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const BIO **a, const BIO **b), \ - comp))) - -#define sk_BIO_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(BIO) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BIO) *, sk), \ - CHECKED_CAST(void *(*)(void *), BIO *(*)(BIO *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(BIO *), free_func))) - -/* BY_DIR_ENTRY */ -#define sk_BY_DIR_ENTRY_new(comp) \ - ((STACK_OF(BY_DIR_ENTRY) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const BY_DIR_ENTRY **a, const BY_DIR_ENTRY **b), \ - comp))) - -#define sk_BY_DIR_ENTRY_new_null() ((STACK_OF(BY_DIR_ENTRY) *)sk_new_null()) - -#define sk_BY_DIR_ENTRY_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_ENTRY) *, sk)) - -#define sk_BY_DIR_ENTRY_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk)); - -#define sk_BY_DIR_ENTRY_value(sk, i) \ - ((BY_DIR_ENTRY *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_ENTRY) *, sk), (i))) - -#define sk_BY_DIR_ENTRY_set(sk, i, p) \ - ((BY_DIR_ENTRY *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), (i), \ - CHECKED_CAST(void *, BY_DIR_ENTRY *, p))) - -#define sk_BY_DIR_ENTRY_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk)) - -#define sk_BY_DIR_ENTRY_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(BY_DIR_ENTRY *), free_func)) - -#define sk_BY_DIR_ENTRY_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(void *, BY_DIR_ENTRY *, p), (where)) - -#define sk_BY_DIR_ENTRY_delete(sk, where) \ - ((BY_DIR_ENTRY *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), (where))) - -#define sk_BY_DIR_ENTRY_delete_ptr(sk, p) \ - ((BY_DIR_ENTRY *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(void *, BY_DIR_ENTRY *, p))) - -#define sk_BY_DIR_ENTRY_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), (out_index), \ - CHECKED_CAST(void *, BY_DIR_ENTRY *, p)) - -#define sk_BY_DIR_ENTRY_shift(sk) \ - ((BY_DIR_ENTRY *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk))) - -#define sk_BY_DIR_ENTRY_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(void *, BY_DIR_ENTRY *, p)) - -#define sk_BY_DIR_ENTRY_pop(sk) \ - ((BY_DIR_ENTRY *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk))) - -#define sk_BY_DIR_ENTRY_dup(sk) \ - ((STACK_OF(BY_DIR_ENTRY) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_ENTRY) *, sk))) - -#define sk_BY_DIR_ENTRY_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk)) - -#define sk_BY_DIR_ENTRY_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_ENTRY) *, sk)) - -#define sk_BY_DIR_ENTRY_set_cmp_func(sk, comp) \ - ((int (*)(const BY_DIR_ENTRY **a, const BY_DIR_ENTRY **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const BY_DIR_ENTRY **a, const BY_DIR_ENTRY **b), \ - comp))) - -#define sk_BY_DIR_ENTRY_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(BY_DIR_ENTRY) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_ENTRY) *, sk), \ - CHECKED_CAST(void *(*)(void *), BY_DIR_ENTRY *(*)(BY_DIR_ENTRY *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(BY_DIR_ENTRY *), free_func))) - -/* BY_DIR_HASH */ -#define sk_BY_DIR_HASH_new(comp) \ - ((STACK_OF(BY_DIR_HASH) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const BY_DIR_HASH **a, const BY_DIR_HASH **b), \ - comp))) - -#define sk_BY_DIR_HASH_new_null() ((STACK_OF(BY_DIR_HASH) *)sk_new_null()) - -#define sk_BY_DIR_HASH_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_HASH) *, sk)) - -#define sk_BY_DIR_HASH_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk)); - -#define sk_BY_DIR_HASH_value(sk, i) \ - ((BY_DIR_HASH *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_HASH) *, sk), (i))) - -#define sk_BY_DIR_HASH_set(sk, i, p) \ - ((BY_DIR_HASH *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - (i), CHECKED_CAST(void *, BY_DIR_HASH *, p))) - -#define sk_BY_DIR_HASH_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk)) - -#define sk_BY_DIR_HASH_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(BY_DIR_HASH *), free_func)) - -#define sk_BY_DIR_HASH_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(void *, BY_DIR_HASH *, p), (where)) - -#define sk_BY_DIR_HASH_delete(sk, where) \ - ((BY_DIR_HASH *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), (where))) - -#define sk_BY_DIR_HASH_delete_ptr(sk, p) \ - ((BY_DIR_HASH *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(void *, BY_DIR_HASH *, p))) - -#define sk_BY_DIR_HASH_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), (out_index), \ - CHECKED_CAST(void *, BY_DIR_HASH *, p)) - -#define sk_BY_DIR_HASH_shift(sk) \ - ((BY_DIR_HASH *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk))) - -#define sk_BY_DIR_HASH_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(void *, BY_DIR_HASH *, p)) - -#define sk_BY_DIR_HASH_pop(sk) \ - ((BY_DIR_HASH *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk))) - -#define sk_BY_DIR_HASH_dup(sk) \ - ((STACK_OF(BY_DIR_HASH) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_HASH) *, sk))) - -#define sk_BY_DIR_HASH_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk)) - -#define sk_BY_DIR_HASH_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_HASH) *, sk)) - -#define sk_BY_DIR_HASH_set_cmp_func(sk, comp) \ - ((int (*)(const BY_DIR_HASH **a, const BY_DIR_HASH **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const BY_DIR_HASH **a, const BY_DIR_HASH **b), \ - comp))) - -#define sk_BY_DIR_HASH_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(BY_DIR_HASH) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(BY_DIR_HASH) *, sk), \ - CHECKED_CAST(void *(*)(void *), BY_DIR_HASH *(*)(BY_DIR_HASH *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(BY_DIR_HASH *), free_func))) - -/* CONF_VALUE */ -#define sk_CONF_VALUE_new(comp) \ - ((STACK_OF(CONF_VALUE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const CONF_VALUE **a, const CONF_VALUE **b), \ - comp))) - -#define sk_CONF_VALUE_new_null() ((STACK_OF(CONF_VALUE) *)sk_new_null()) - -#define sk_CONF_VALUE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(CONF_VALUE) *, sk)) - -#define sk_CONF_VALUE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk)); - -#define sk_CONF_VALUE_value(sk, i) \ - ((CONF_VALUE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CONF_VALUE) *, sk), (i))) - -#define sk_CONF_VALUE_set(sk, i, p) \ - ((CONF_VALUE *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - (i), CHECKED_CAST(void *, CONF_VALUE *, p))) - -#define sk_CONF_VALUE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk)) - -#define sk_CONF_VALUE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(CONF_VALUE *), free_func)) - -#define sk_CONF_VALUE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(void *, CONF_VALUE *, p), (where)) - -#define sk_CONF_VALUE_delete(sk, where) \ - ((CONF_VALUE *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - (where))) - -#define sk_CONF_VALUE_delete_ptr(sk, p) \ - ((CONF_VALUE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(void *, CONF_VALUE *, p))) - -#define sk_CONF_VALUE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), (out_index), \ - CHECKED_CAST(void *, CONF_VALUE *, p)) - -#define sk_CONF_VALUE_shift(sk) \ - ((CONF_VALUE *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk))) - -#define sk_CONF_VALUE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(void *, CONF_VALUE *, p)) - -#define sk_CONF_VALUE_pop(sk) \ - ((CONF_VALUE *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk))) - -#define sk_CONF_VALUE_dup(sk) \ - ((STACK_OF(CONF_VALUE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CONF_VALUE) *, sk))) - -#define sk_CONF_VALUE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk)) - -#define sk_CONF_VALUE_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(CONF_VALUE) *, sk)) - -#define sk_CONF_VALUE_set_cmp_func(sk, comp) \ - ((int (*)(const CONF_VALUE **a, const CONF_VALUE **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const CONF_VALUE **a, const CONF_VALUE **b), \ - comp))) - -#define sk_CONF_VALUE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(CONF_VALUE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CONF_VALUE) *, sk), \ - CHECKED_CAST(void *(*)(void *), CONF_VALUE *(*)(CONF_VALUE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(CONF_VALUE *), free_func))) - -/* CRYPTO_BUFFER */ -#define sk_CRYPTO_BUFFER_new(comp) \ - ((STACK_OF(CRYPTO_BUFFER) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const CRYPTO_BUFFER **a, const CRYPTO_BUFFER **b), comp))) - -#define sk_CRYPTO_BUFFER_new_null() ((STACK_OF(CRYPTO_BUFFER) *)sk_new_null()) - -#define sk_CRYPTO_BUFFER_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_BUFFER) *, sk)) - -#define sk_CRYPTO_BUFFER_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk)); - -#define sk_CRYPTO_BUFFER_value(sk, i) \ - ((CRYPTO_BUFFER *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_BUFFER) *, sk), (i))) - -#define sk_CRYPTO_BUFFER_set(sk, i, p) \ - ((CRYPTO_BUFFER *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), (i), \ - CHECKED_CAST(void *, CRYPTO_BUFFER *, p))) - -#define sk_CRYPTO_BUFFER_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk)) - -#define sk_CRYPTO_BUFFER_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(CRYPTO_BUFFER *), free_func)) - -#define sk_CRYPTO_BUFFER_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(void *, CRYPTO_BUFFER *, p), (where)) - -#define sk_CRYPTO_BUFFER_delete(sk, where) \ - ((CRYPTO_BUFFER *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), (where))) - -#define sk_CRYPTO_BUFFER_delete_ptr(sk, p) \ - ((CRYPTO_BUFFER *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(void *, CRYPTO_BUFFER *, p))) - -#define sk_CRYPTO_BUFFER_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), (out_index), \ - CHECKED_CAST(void *, CRYPTO_BUFFER *, p)) - -#define sk_CRYPTO_BUFFER_shift(sk) \ - ((CRYPTO_BUFFER *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk))) - -#define sk_CRYPTO_BUFFER_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(void *, CRYPTO_BUFFER *, p)) - -#define sk_CRYPTO_BUFFER_pop(sk) \ - ((CRYPTO_BUFFER *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk))) - -#define sk_CRYPTO_BUFFER_dup(sk) \ - ((STACK_OF(CRYPTO_BUFFER) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_BUFFER) *, sk))) - -#define sk_CRYPTO_BUFFER_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk)) - -#define sk_CRYPTO_BUFFER_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_BUFFER) *, sk)) - -#define sk_CRYPTO_BUFFER_set_cmp_func(sk, comp) \ - ((int (*)(const CRYPTO_BUFFER **a, const CRYPTO_BUFFER **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const CRYPTO_BUFFER **a, const CRYPTO_BUFFER **b), \ - comp))) - -#define sk_CRYPTO_BUFFER_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(CRYPTO_BUFFER) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_BUFFER) *, sk), \ - CHECKED_CAST(void *(*)(void *), CRYPTO_BUFFER *(*)(CRYPTO_BUFFER *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(CRYPTO_BUFFER *), free_func))) - -/* CRYPTO_EX_DATA_FUNCS */ -#define sk_CRYPTO_EX_DATA_FUNCS_new(comp) \ - ((STACK_OF(CRYPTO_EX_DATA_FUNCS) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const CRYPTO_EX_DATA_FUNCS **a, const CRYPTO_EX_DATA_FUNCS **b), \ - comp))) - -#define sk_CRYPTO_EX_DATA_FUNCS_new_null() \ - ((STACK_OF(CRYPTO_EX_DATA_FUNCS) *)sk_new_null()) - -#define sk_CRYPTO_EX_DATA_FUNCS_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_EX_DATA_FUNCS) *, \ - sk)) - -#define sk_CRYPTO_EX_DATA_FUNCS_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk)); - -#define sk_CRYPTO_EX_DATA_FUNCS_value(sk, i) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_EX_DATA_FUNCS) *, \ - sk), \ - (i))) - -#define sk_CRYPTO_EX_DATA_FUNCS_set(sk, i, p) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), (i), \ - CHECKED_CAST(void *, CRYPTO_EX_DATA_FUNCS *, p))) - -#define sk_CRYPTO_EX_DATA_FUNCS_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk)) - -#define sk_CRYPTO_EX_DATA_FUNCS_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(CRYPTO_EX_DATA_FUNCS *), \ - free_func)) - -#define sk_CRYPTO_EX_DATA_FUNCS_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - CHECKED_CAST(void *, CRYPTO_EX_DATA_FUNCS *, p), (where)) - -#define sk_CRYPTO_EX_DATA_FUNCS_delete(sk, where) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), (where))) - -#define sk_CRYPTO_EX_DATA_FUNCS_delete_ptr(sk, p) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - CHECKED_CAST(void *, CRYPTO_EX_DATA_FUNCS *, p))) - -#define sk_CRYPTO_EX_DATA_FUNCS_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - (out_index), CHECKED_CAST(void *, CRYPTO_EX_DATA_FUNCS *, p)) - -#define sk_CRYPTO_EX_DATA_FUNCS_shift(sk) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk))) - -#define sk_CRYPTO_EX_DATA_FUNCS_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - CHECKED_CAST(void *, CRYPTO_EX_DATA_FUNCS *, p)) - -#define sk_CRYPTO_EX_DATA_FUNCS_pop(sk) \ - ((CRYPTO_EX_DATA_FUNCS *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk))) - -#define sk_CRYPTO_EX_DATA_FUNCS_dup(sk) \ - ((STACK_OF(CRYPTO_EX_DATA_FUNCS) *)sk_dup(CHECKED_CAST( \ - const _STACK *, const STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk))) - -#define sk_CRYPTO_EX_DATA_FUNCS_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk)) - -#define sk_CRYPTO_EX_DATA_FUNCS_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, \ - const STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk)) - -#define sk_CRYPTO_EX_DATA_FUNCS_set_cmp_func(sk, comp) \ - ((int (*)(const CRYPTO_EX_DATA_FUNCS **a, const CRYPTO_EX_DATA_FUNCS **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(CRYPTO_EX_DATA_FUNCS) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const CRYPTO_EX_DATA_FUNCS **a, \ - const CRYPTO_EX_DATA_FUNCS **b), \ - comp))) - -#define sk_CRYPTO_EX_DATA_FUNCS_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(CRYPTO_EX_DATA_FUNCS) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(CRYPTO_EX_DATA_FUNCS) *, \ - sk), \ - CHECKED_CAST(void *(*)(void *), \ - CRYPTO_EX_DATA_FUNCS *(*)(CRYPTO_EX_DATA_FUNCS *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(CRYPTO_EX_DATA_FUNCS *), \ - free_func))) - -/* DIST_POINT */ -#define sk_DIST_POINT_new(comp) \ - ((STACK_OF(DIST_POINT) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const DIST_POINT **a, const DIST_POINT **b), \ - comp))) - -#define sk_DIST_POINT_new_null() ((STACK_OF(DIST_POINT) *)sk_new_null()) - -#define sk_DIST_POINT_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(DIST_POINT) *, sk)) - -#define sk_DIST_POINT_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk)); - -#define sk_DIST_POINT_value(sk, i) \ - ((DIST_POINT *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(DIST_POINT) *, sk), (i))) - -#define sk_DIST_POINT_set(sk, i, p) \ - ((DIST_POINT *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - (i), CHECKED_CAST(void *, DIST_POINT *, p))) - -#define sk_DIST_POINT_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk)) - -#define sk_DIST_POINT_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(DIST_POINT *), free_func)) - -#define sk_DIST_POINT_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(void *, DIST_POINT *, p), (where)) - -#define sk_DIST_POINT_delete(sk, where) \ - ((DIST_POINT *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - (where))) - -#define sk_DIST_POINT_delete_ptr(sk, p) \ - ((DIST_POINT *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(void *, DIST_POINT *, p))) - -#define sk_DIST_POINT_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), (out_index), \ - CHECKED_CAST(void *, DIST_POINT *, p)) - -#define sk_DIST_POINT_shift(sk) \ - ((DIST_POINT *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk))) - -#define sk_DIST_POINT_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(void *, DIST_POINT *, p)) - -#define sk_DIST_POINT_pop(sk) \ - ((DIST_POINT *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk))) - -#define sk_DIST_POINT_dup(sk) \ - ((STACK_OF(DIST_POINT) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(DIST_POINT) *, sk))) - -#define sk_DIST_POINT_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk)) - -#define sk_DIST_POINT_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(DIST_POINT) *, sk)) - -#define sk_DIST_POINT_set_cmp_func(sk, comp) \ - ((int (*)(const DIST_POINT **a, const DIST_POINT **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const DIST_POINT **a, const DIST_POINT **b), \ - comp))) - -#define sk_DIST_POINT_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(DIST_POINT) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(DIST_POINT) *, sk), \ - CHECKED_CAST(void *(*)(void *), DIST_POINT *(*)(DIST_POINT *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(DIST_POINT *), free_func))) - -/* GENERAL_NAME */ -#define sk_GENERAL_NAME_new(comp) \ - ((STACK_OF(GENERAL_NAME) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const GENERAL_NAME **a, const GENERAL_NAME **b), \ - comp))) - -#define sk_GENERAL_NAME_new_null() ((STACK_OF(GENERAL_NAME) *)sk_new_null()) - -#define sk_GENERAL_NAME_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAME) *, sk)) - -#define sk_GENERAL_NAME_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk)); - -#define sk_GENERAL_NAME_value(sk, i) \ - ((GENERAL_NAME *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAME) *, sk), (i))) - -#define sk_GENERAL_NAME_set(sk, i, p) \ - ((GENERAL_NAME *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), (i), \ - CHECKED_CAST(void *, GENERAL_NAME *, p))) - -#define sk_GENERAL_NAME_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk)) - -#define sk_GENERAL_NAME_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_NAME *), free_func)) - -#define sk_GENERAL_NAME_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAME *, p), (where)) - -#define sk_GENERAL_NAME_delete(sk, where) \ - ((GENERAL_NAME *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), (where))) - -#define sk_GENERAL_NAME_delete_ptr(sk, p) \ - ((GENERAL_NAME *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAME *, p))) - -#define sk_GENERAL_NAME_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), (out_index), \ - CHECKED_CAST(void *, GENERAL_NAME *, p)) - -#define sk_GENERAL_NAME_shift(sk) \ - ((GENERAL_NAME *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk))) - -#define sk_GENERAL_NAME_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAME *, p)) - -#define sk_GENERAL_NAME_pop(sk) \ - ((GENERAL_NAME *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk))) - -#define sk_GENERAL_NAME_dup(sk) \ - ((STACK_OF(GENERAL_NAME) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAME) *, sk))) - -#define sk_GENERAL_NAME_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk)) - -#define sk_GENERAL_NAME_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAME) *, sk)) - -#define sk_GENERAL_NAME_set_cmp_func(sk, comp) \ - ((int (*)(const GENERAL_NAME **a, const GENERAL_NAME **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const GENERAL_NAME **a, const GENERAL_NAME **b), \ - comp))) - -#define sk_GENERAL_NAME_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(GENERAL_NAME) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAME) *, sk), \ - CHECKED_CAST(void *(*)(void *), GENERAL_NAME *(*)(GENERAL_NAME *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_NAME *), free_func))) - -/* GENERAL_NAMES */ -#define sk_GENERAL_NAMES_new(comp) \ - ((STACK_OF(GENERAL_NAMES) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const GENERAL_NAMES **a, const GENERAL_NAMES **b), comp))) - -#define sk_GENERAL_NAMES_new_null() ((STACK_OF(GENERAL_NAMES) *)sk_new_null()) - -#define sk_GENERAL_NAMES_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAMES) *, sk)) - -#define sk_GENERAL_NAMES_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk)); - -#define sk_GENERAL_NAMES_value(sk, i) \ - ((GENERAL_NAMES *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAMES) *, sk), (i))) - -#define sk_GENERAL_NAMES_set(sk, i, p) \ - ((GENERAL_NAMES *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), (i), \ - CHECKED_CAST(void *, GENERAL_NAMES *, p))) - -#define sk_GENERAL_NAMES_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk)) - -#define sk_GENERAL_NAMES_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_NAMES *), free_func)) - -#define sk_GENERAL_NAMES_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAMES *, p), (where)) - -#define sk_GENERAL_NAMES_delete(sk, where) \ - ((GENERAL_NAMES *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), (where))) - -#define sk_GENERAL_NAMES_delete_ptr(sk, p) \ - ((GENERAL_NAMES *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAMES *, p))) - -#define sk_GENERAL_NAMES_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), (out_index), \ - CHECKED_CAST(void *, GENERAL_NAMES *, p)) - -#define sk_GENERAL_NAMES_shift(sk) \ - ((GENERAL_NAMES *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk))) - -#define sk_GENERAL_NAMES_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(void *, GENERAL_NAMES *, p)) - -#define sk_GENERAL_NAMES_pop(sk) \ - ((GENERAL_NAMES *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk))) - -#define sk_GENERAL_NAMES_dup(sk) \ - ((STACK_OF(GENERAL_NAMES) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAMES) *, sk))) - -#define sk_GENERAL_NAMES_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk)) - -#define sk_GENERAL_NAMES_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAMES) *, sk)) - -#define sk_GENERAL_NAMES_set_cmp_func(sk, comp) \ - ((int (*)(const GENERAL_NAMES **a, const GENERAL_NAMES **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const GENERAL_NAMES **a, const GENERAL_NAMES **b), \ - comp))) - -#define sk_GENERAL_NAMES_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(GENERAL_NAMES) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_NAMES) *, sk), \ - CHECKED_CAST(void *(*)(void *), GENERAL_NAMES *(*)(GENERAL_NAMES *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_NAMES *), free_func))) - -/* GENERAL_SUBTREE */ -#define sk_GENERAL_SUBTREE_new(comp) \ - ((STACK_OF(GENERAL_SUBTREE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const GENERAL_SUBTREE **a, const GENERAL_SUBTREE **b), comp))) - -#define sk_GENERAL_SUBTREE_new_null() \ - ((STACK_OF(GENERAL_SUBTREE) *)sk_new_null()) - -#define sk_GENERAL_SUBTREE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_SUBTREE) *, sk)) - -#define sk_GENERAL_SUBTREE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk)); - -#define sk_GENERAL_SUBTREE_value(sk, i) \ - ((GENERAL_SUBTREE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_SUBTREE) *, sk), \ - (i))) - -#define sk_GENERAL_SUBTREE_set(sk, i, p) \ - ((GENERAL_SUBTREE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), (i), \ - CHECKED_CAST(void *, GENERAL_SUBTREE *, p))) - -#define sk_GENERAL_SUBTREE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk)) - -#define sk_GENERAL_SUBTREE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_SUBTREE *), free_func)) - -#define sk_GENERAL_SUBTREE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(void *, GENERAL_SUBTREE *, p), (where)) - -#define sk_GENERAL_SUBTREE_delete(sk, where) \ - ((GENERAL_SUBTREE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), (where))) - -#define sk_GENERAL_SUBTREE_delete_ptr(sk, p) \ - ((GENERAL_SUBTREE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(void *, GENERAL_SUBTREE *, p))) - -#define sk_GENERAL_SUBTREE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - (out_index), CHECKED_CAST(void *, GENERAL_SUBTREE *, p)) - -#define sk_GENERAL_SUBTREE_shift(sk) \ - ((GENERAL_SUBTREE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk))) - -#define sk_GENERAL_SUBTREE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(void *, GENERAL_SUBTREE *, p)) - -#define sk_GENERAL_SUBTREE_pop(sk) \ - ((GENERAL_SUBTREE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk))) - -#define sk_GENERAL_SUBTREE_dup(sk) \ - ((STACK_OF(GENERAL_SUBTREE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_SUBTREE) *, sk))) - -#define sk_GENERAL_SUBTREE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk)) - -#define sk_GENERAL_SUBTREE_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_SUBTREE) *, sk)) - -#define sk_GENERAL_SUBTREE_set_cmp_func(sk, comp) \ - ((int (*)(const GENERAL_SUBTREE **a, const GENERAL_SUBTREE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const GENERAL_SUBTREE **a, \ - const GENERAL_SUBTREE **b), \ - comp))) - -#define sk_GENERAL_SUBTREE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(GENERAL_SUBTREE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(GENERAL_SUBTREE) *, sk), \ - CHECKED_CAST(void *(*)(void *), GENERAL_SUBTREE *(*)(GENERAL_SUBTREE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(GENERAL_SUBTREE *), free_func))) - -/* POLICYINFO */ -#define sk_POLICYINFO_new(comp) \ - ((STACK_OF(POLICYINFO) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const POLICYINFO **a, const POLICYINFO **b), \ - comp))) - -#define sk_POLICYINFO_new_null() ((STACK_OF(POLICYINFO) *)sk_new_null()) - -#define sk_POLICYINFO_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(POLICYINFO) *, sk)) - -#define sk_POLICYINFO_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk)); - -#define sk_POLICYINFO_value(sk, i) \ - ((POLICYINFO *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYINFO) *, sk), (i))) - -#define sk_POLICYINFO_set(sk, i, p) \ - ((POLICYINFO *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - (i), CHECKED_CAST(void *, POLICYINFO *, p))) - -#define sk_POLICYINFO_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk)) - -#define sk_POLICYINFO_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICYINFO *), free_func)) - -#define sk_POLICYINFO_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(void *, POLICYINFO *, p), (where)) - -#define sk_POLICYINFO_delete(sk, where) \ - ((POLICYINFO *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - (where))) - -#define sk_POLICYINFO_delete_ptr(sk, p) \ - ((POLICYINFO *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(void *, POLICYINFO *, p))) - -#define sk_POLICYINFO_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), (out_index), \ - CHECKED_CAST(void *, POLICYINFO *, p)) - -#define sk_POLICYINFO_shift(sk) \ - ((POLICYINFO *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk))) - -#define sk_POLICYINFO_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(void *, POLICYINFO *, p)) - -#define sk_POLICYINFO_pop(sk) \ - ((POLICYINFO *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk))) - -#define sk_POLICYINFO_dup(sk) \ - ((STACK_OF(POLICYINFO) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYINFO) *, sk))) - -#define sk_POLICYINFO_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk)) - -#define sk_POLICYINFO_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(POLICYINFO) *, sk)) - -#define sk_POLICYINFO_set_cmp_func(sk, comp) \ - ((int (*)(const POLICYINFO **a, const POLICYINFO **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const POLICYINFO **a, const POLICYINFO **b), \ - comp))) - -#define sk_POLICYINFO_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(POLICYINFO) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYINFO) *, sk), \ - CHECKED_CAST(void *(*)(void *), POLICYINFO *(*)(POLICYINFO *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICYINFO *), free_func))) - -/* POLICYQUALINFO */ -#define sk_POLICYQUALINFO_new(comp) \ - ((STACK_OF(POLICYQUALINFO) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const POLICYQUALINFO **a, const POLICYQUALINFO **b), comp))) - -#define sk_POLICYQUALINFO_new_null() ((STACK_OF(POLICYQUALINFO) *)sk_new_null()) - -#define sk_POLICYQUALINFO_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(POLICYQUALINFO) *, sk)) - -#define sk_POLICYQUALINFO_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk)); - -#define sk_POLICYQUALINFO_value(sk, i) \ - ((POLICYQUALINFO *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYQUALINFO) *, sk), \ - (i))) - -#define sk_POLICYQUALINFO_set(sk, i, p) \ - ((POLICYQUALINFO *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), (i), \ - CHECKED_CAST(void *, POLICYQUALINFO *, p))) - -#define sk_POLICYQUALINFO_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk)) - -#define sk_POLICYQUALINFO_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICYQUALINFO *), free_func)) - -#define sk_POLICYQUALINFO_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(void *, POLICYQUALINFO *, p), (where)) - -#define sk_POLICYQUALINFO_delete(sk, where) \ - ((POLICYQUALINFO *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), (where))) - -#define sk_POLICYQUALINFO_delete_ptr(sk, p) \ - ((POLICYQUALINFO *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(void *, POLICYQUALINFO *, p))) - -#define sk_POLICYQUALINFO_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), (out_index), \ - CHECKED_CAST(void *, POLICYQUALINFO *, p)) - -#define sk_POLICYQUALINFO_shift(sk) \ - ((POLICYQUALINFO *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk))) - -#define sk_POLICYQUALINFO_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(void *, POLICYQUALINFO *, p)) - -#define sk_POLICYQUALINFO_pop(sk) \ - ((POLICYQUALINFO *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk))) - -#define sk_POLICYQUALINFO_dup(sk) \ - ((STACK_OF(POLICYQUALINFO) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYQUALINFO) *, sk))) - -#define sk_POLICYQUALINFO_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk)) - -#define sk_POLICYQUALINFO_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYQUALINFO) *, sk)) - -#define sk_POLICYQUALINFO_set_cmp_func(sk, comp) \ - ((int (*)(const POLICYQUALINFO **a, const POLICYQUALINFO **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const POLICYQUALINFO **a, \ - const POLICYQUALINFO **b), \ - comp))) - -#define sk_POLICYQUALINFO_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(POLICYQUALINFO) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICYQUALINFO) *, sk), \ - CHECKED_CAST(void *(*)(void *), POLICYQUALINFO *(*)(POLICYQUALINFO *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICYQUALINFO *), free_func))) - -/* POLICY_MAPPING */ -#define sk_POLICY_MAPPING_new(comp) \ - ((STACK_OF(POLICY_MAPPING) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const POLICY_MAPPING **a, const POLICY_MAPPING **b), comp))) - -#define sk_POLICY_MAPPING_new_null() ((STACK_OF(POLICY_MAPPING) *)sk_new_null()) - -#define sk_POLICY_MAPPING_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(POLICY_MAPPING) *, sk)) - -#define sk_POLICY_MAPPING_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk)); - -#define sk_POLICY_MAPPING_value(sk, i) \ - ((POLICY_MAPPING *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICY_MAPPING) *, sk), \ - (i))) - -#define sk_POLICY_MAPPING_set(sk, i, p) \ - ((POLICY_MAPPING *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), (i), \ - CHECKED_CAST(void *, POLICY_MAPPING *, p))) - -#define sk_POLICY_MAPPING_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk)) - -#define sk_POLICY_MAPPING_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICY_MAPPING *), free_func)) - -#define sk_POLICY_MAPPING_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(void *, POLICY_MAPPING *, p), (where)) - -#define sk_POLICY_MAPPING_delete(sk, where) \ - ((POLICY_MAPPING *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), (where))) - -#define sk_POLICY_MAPPING_delete_ptr(sk, p) \ - ((POLICY_MAPPING *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(void *, POLICY_MAPPING *, p))) - -#define sk_POLICY_MAPPING_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), (out_index), \ - CHECKED_CAST(void *, POLICY_MAPPING *, p)) - -#define sk_POLICY_MAPPING_shift(sk) \ - ((POLICY_MAPPING *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk))) - -#define sk_POLICY_MAPPING_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(void *, POLICY_MAPPING *, p)) - -#define sk_POLICY_MAPPING_pop(sk) \ - ((POLICY_MAPPING *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk))) - -#define sk_POLICY_MAPPING_dup(sk) \ - ((STACK_OF(POLICY_MAPPING) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICY_MAPPING) *, sk))) - -#define sk_POLICY_MAPPING_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk)) - -#define sk_POLICY_MAPPING_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICY_MAPPING) *, sk)) - -#define sk_POLICY_MAPPING_set_cmp_func(sk, comp) \ - ((int (*)(const POLICY_MAPPING **a, const POLICY_MAPPING **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const POLICY_MAPPING **a, \ - const POLICY_MAPPING **b), \ - comp))) - -#define sk_POLICY_MAPPING_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(POLICY_MAPPING) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(POLICY_MAPPING) *, sk), \ - CHECKED_CAST(void *(*)(void *), POLICY_MAPPING *(*)(POLICY_MAPPING *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(POLICY_MAPPING *), free_func))) - -/* RSA_additional_prime */ -#define sk_RSA_additional_prime_new(comp) \ - ((STACK_OF(RSA_additional_prime) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const RSA_additional_prime **a, const RSA_additional_prime **b), \ - comp))) - -#define sk_RSA_additional_prime_new_null() \ - ((STACK_OF(RSA_additional_prime) *)sk_new_null()) - -#define sk_RSA_additional_prime_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(RSA_additional_prime) *, \ - sk)) - -#define sk_RSA_additional_prime_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk)); - -#define sk_RSA_additional_prime_value(sk, i) \ - ((RSA_additional_prime *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(RSA_additional_prime) *, \ - sk), \ - (i))) - -#define sk_RSA_additional_prime_set(sk, i, p) \ - ((RSA_additional_prime *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), (i), \ - CHECKED_CAST(void *, RSA_additional_prime *, p))) - -#define sk_RSA_additional_prime_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk)) - -#define sk_RSA_additional_prime_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(RSA_additional_prime *), \ - free_func)) - -#define sk_RSA_additional_prime_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - CHECKED_CAST(void *, RSA_additional_prime *, p), (where)) - -#define sk_RSA_additional_prime_delete(sk, where) \ - ((RSA_additional_prime *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), (where))) - -#define sk_RSA_additional_prime_delete_ptr(sk, p) \ - ((RSA_additional_prime *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - CHECKED_CAST(void *, RSA_additional_prime *, p))) - -#define sk_RSA_additional_prime_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - (out_index), CHECKED_CAST(void *, RSA_additional_prime *, p)) - -#define sk_RSA_additional_prime_shift(sk) \ - ((RSA_additional_prime *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk))) - -#define sk_RSA_additional_prime_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - CHECKED_CAST(void *, RSA_additional_prime *, p)) - -#define sk_RSA_additional_prime_pop(sk) \ - ((RSA_additional_prime *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk))) - -#define sk_RSA_additional_prime_dup(sk) \ - ((STACK_OF(RSA_additional_prime) *)sk_dup(CHECKED_CAST( \ - const _STACK *, const STACK_OF(RSA_additional_prime) *, sk))) - -#define sk_RSA_additional_prime_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk)) - -#define sk_RSA_additional_prime_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, \ - const STACK_OF(RSA_additional_prime) *, sk)) - -#define sk_RSA_additional_prime_set_cmp_func(sk, comp) \ - ((int (*)(const RSA_additional_prime **a, const RSA_additional_prime **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(RSA_additional_prime) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const RSA_additional_prime **a, \ - const RSA_additional_prime **b), \ - comp))) - -#define sk_RSA_additional_prime_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(RSA_additional_prime) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(RSA_additional_prime) *, \ - sk), \ - CHECKED_CAST(void *(*)(void *), \ - RSA_additional_prime *(*)(RSA_additional_prime *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(RSA_additional_prime *), \ - free_func))) - -/* SSL_COMP */ -#define sk_SSL_COMP_new(comp) \ - ((STACK_OF(SSL_COMP) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const SSL_COMP **a, const SSL_COMP **b), comp))) - -#define sk_SSL_COMP_new_null() ((STACK_OF(SSL_COMP) *)sk_new_null()) - -#define sk_SSL_COMP_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(SSL_COMP) *, sk)) - -#define sk_SSL_COMP_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk)); - -#define sk_SSL_COMP_value(sk, i) \ - ((SSL_COMP *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_COMP) *, sk), (i))) - -#define sk_SSL_COMP_set(sk, i, p) \ - ((SSL_COMP *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), (i), \ - CHECKED_CAST(void *, SSL_COMP *, p))) - -#define sk_SSL_COMP_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk)) - -#define sk_SSL_COMP_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(SSL_COMP *), free_func)) - -#define sk_SSL_COMP_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(void *, SSL_COMP *, p), (where)) - -#define sk_SSL_COMP_delete(sk, where) \ - ((SSL_COMP *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - (where))) - -#define sk_SSL_COMP_delete_ptr(sk, p) \ - ((SSL_COMP *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(void *, SSL_COMP *, p))) - -#define sk_SSL_COMP_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), (out_index), \ - CHECKED_CAST(void *, SSL_COMP *, p)) - -#define sk_SSL_COMP_shift(sk) \ - ((SSL_COMP *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk))) - -#define sk_SSL_COMP_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(void *, SSL_COMP *, p)) - -#define sk_SSL_COMP_pop(sk) \ - ((SSL_COMP *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk))) - -#define sk_SSL_COMP_dup(sk) \ - ((STACK_OF(SSL_COMP) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_COMP) *, sk))) - -#define sk_SSL_COMP_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk)) - -#define sk_SSL_COMP_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(SSL_COMP) *, sk)) - -#define sk_SSL_COMP_set_cmp_func(sk, comp) \ - ((int (*)(const SSL_COMP **a, const SSL_COMP **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const SSL_COMP **a, const SSL_COMP **b), comp))) - -#define sk_SSL_COMP_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(SSL_COMP) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_COMP) *, sk), \ - CHECKED_CAST(void *(*)(void *), SSL_COMP *(*)(SSL_COMP *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(SSL_COMP *), free_func))) - -/* SSL_CUSTOM_EXTENSION */ -#define sk_SSL_CUSTOM_EXTENSION_new(comp) \ - ((STACK_OF(SSL_CUSTOM_EXTENSION) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const SSL_CUSTOM_EXTENSION **a, const SSL_CUSTOM_EXTENSION **b), \ - comp))) - -#define sk_SSL_CUSTOM_EXTENSION_new_null() \ - ((STACK_OF(SSL_CUSTOM_EXTENSION) *)sk_new_null()) - -#define sk_SSL_CUSTOM_EXTENSION_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CUSTOM_EXTENSION) *, \ - sk)) - -#define sk_SSL_CUSTOM_EXTENSION_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk)); - -#define sk_SSL_CUSTOM_EXTENSION_value(sk, i) \ - ((SSL_CUSTOM_EXTENSION *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CUSTOM_EXTENSION) *, \ - sk), \ - (i))) - -#define sk_SSL_CUSTOM_EXTENSION_set(sk, i, p) \ - ((SSL_CUSTOM_EXTENSION *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), (i), \ - CHECKED_CAST(void *, SSL_CUSTOM_EXTENSION *, p))) - -#define sk_SSL_CUSTOM_EXTENSION_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk)) - -#define sk_SSL_CUSTOM_EXTENSION_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(SSL_CUSTOM_EXTENSION *), \ - free_func)) - -#define sk_SSL_CUSTOM_EXTENSION_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - CHECKED_CAST(void *, SSL_CUSTOM_EXTENSION *, p), (where)) - -#define sk_SSL_CUSTOM_EXTENSION_delete(sk, where) \ - ((SSL_CUSTOM_EXTENSION *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), (where))) - -#define sk_SSL_CUSTOM_EXTENSION_delete_ptr(sk, p) \ - ((SSL_CUSTOM_EXTENSION *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - CHECKED_CAST(void *, SSL_CUSTOM_EXTENSION *, p))) - -#define sk_SSL_CUSTOM_EXTENSION_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - (out_index), CHECKED_CAST(void *, SSL_CUSTOM_EXTENSION *, p)) - -#define sk_SSL_CUSTOM_EXTENSION_shift(sk) \ - ((SSL_CUSTOM_EXTENSION *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk))) - -#define sk_SSL_CUSTOM_EXTENSION_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - CHECKED_CAST(void *, SSL_CUSTOM_EXTENSION *, p)) - -#define sk_SSL_CUSTOM_EXTENSION_pop(sk) \ - ((SSL_CUSTOM_EXTENSION *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk))) - -#define sk_SSL_CUSTOM_EXTENSION_dup(sk) \ - ((STACK_OF(SSL_CUSTOM_EXTENSION) *)sk_dup(CHECKED_CAST( \ - const _STACK *, const STACK_OF(SSL_CUSTOM_EXTENSION) *, sk))) - -#define sk_SSL_CUSTOM_EXTENSION_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk)) - -#define sk_SSL_CUSTOM_EXTENSION_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, \ - const STACK_OF(SSL_CUSTOM_EXTENSION) *, sk)) - -#define sk_SSL_CUSTOM_EXTENSION_set_cmp_func(sk, comp) \ - ((int (*)(const SSL_CUSTOM_EXTENSION **a, const SSL_CUSTOM_EXTENSION **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CUSTOM_EXTENSION) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const SSL_CUSTOM_EXTENSION **a, \ - const SSL_CUSTOM_EXTENSION **b), \ - comp))) - -#define sk_SSL_CUSTOM_EXTENSION_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(SSL_CUSTOM_EXTENSION) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CUSTOM_EXTENSION) *, \ - sk), \ - CHECKED_CAST(void *(*)(void *), \ - SSL_CUSTOM_EXTENSION *(*)(SSL_CUSTOM_EXTENSION *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(SSL_CUSTOM_EXTENSION *), \ - free_func))) - -/* STACK_OF_X509_NAME_ENTRY */ -#define sk_STACK_OF_X509_NAME_ENTRY_new(comp) \ - ((STACK_OF(STACK_OF_X509_NAME_ENTRY) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const STACK_OF_X509_NAME_ENTRY **a, \ - const STACK_OF_X509_NAME_ENTRY **b), \ - comp))) - -#define sk_STACK_OF_X509_NAME_ENTRY_new_null() \ - ((STACK_OF(STACK_OF_X509_NAME_ENTRY) *)sk_new_null()) - -#define sk_STACK_OF_X509_NAME_ENTRY_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, \ - const STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk)) - -#define sk_STACK_OF_X509_NAME_ENTRY_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk)); - -#define sk_STACK_OF_X509_NAME_ENTRY_value(sk, i) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(STACK_OF_X509_NAME_ENTRY) *, \ - sk), \ - (i))) - -#define sk_STACK_OF_X509_NAME_ENTRY_set(sk, i, p) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), (i), \ - CHECKED_CAST(void *, STACK_OF_X509_NAME_ENTRY *, p))) - -#define sk_STACK_OF_X509_NAME_ENTRY_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk)) - -#define sk_STACK_OF_X509_NAME_ENTRY_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(STACK_OF_X509_NAME_ENTRY *), \ - free_func)) - -#define sk_STACK_OF_X509_NAME_ENTRY_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, STACK_OF_X509_NAME_ENTRY *, p), (where)) - -#define sk_STACK_OF_X509_NAME_ENTRY_delete(sk, where) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - (where))) - -#define sk_STACK_OF_X509_NAME_ENTRY_delete_ptr(sk, p) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, STACK_OF_X509_NAME_ENTRY *, p))) - -#define sk_STACK_OF_X509_NAME_ENTRY_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - (out_index), CHECKED_CAST(void *, STACK_OF_X509_NAME_ENTRY *, p)) - -#define sk_STACK_OF_X509_NAME_ENTRY_shift(sk) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk))) - -#define sk_STACK_OF_X509_NAME_ENTRY_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, STACK_OF_X509_NAME_ENTRY *, p)) - -#define sk_STACK_OF_X509_NAME_ENTRY_pop(sk) \ - ((STACK_OF_X509_NAME_ENTRY *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk))) - -#define sk_STACK_OF_X509_NAME_ENTRY_dup(sk) \ - ((STACK_OF(STACK_OF_X509_NAME_ENTRY) *)sk_dup(CHECKED_CAST( \ - const _STACK *, const STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk))) - -#define sk_STACK_OF_X509_NAME_ENTRY_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk)) - -#define sk_STACK_OF_X509_NAME_ENTRY_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, \ - const STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk)) - -#define sk_STACK_OF_X509_NAME_ENTRY_set_cmp_func(sk, comp) \ - ((int (*)(const STACK_OF_X509_NAME_ENTRY **a, \ - const STACK_OF_X509_NAME_ENTRY **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(STACK_OF_X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const STACK_OF_X509_NAME_ENTRY **a, \ - const STACK_OF_X509_NAME_ENTRY **b), \ - comp))) - -#define sk_STACK_OF_X509_NAME_ENTRY_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(STACK_OF_X509_NAME_ENTRY) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(STACK_OF_X509_NAME_ENTRY) *, \ - sk), \ - CHECKED_CAST(void *(*)(void *), \ - STACK_OF_X509_NAME_ENTRY *(*)(STACK_OF_X509_NAME_ENTRY *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(STACK_OF_X509_NAME_ENTRY *), \ - free_func))) - -/* SXNETID */ -#define sk_SXNETID_new(comp) \ - ((STACK_OF(SXNETID) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const SXNETID **a, const SXNETID **b), comp))) - -#define sk_SXNETID_new_null() ((STACK_OF(SXNETID) *)sk_new_null()) - -#define sk_SXNETID_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(SXNETID) *, sk)) - -#define sk_SXNETID_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk)); - -#define sk_SXNETID_value(sk, i) \ - ((SXNETID *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SXNETID) *, sk), (i))) - -#define sk_SXNETID_set(sk, i, p) \ - ((SXNETID *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), (i), \ - CHECKED_CAST(void *, SXNETID *, p))) - -#define sk_SXNETID_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk)) - -#define sk_SXNETID_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(SXNETID *), free_func)) - -#define sk_SXNETID_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(void *, SXNETID *, p), (where)) - -#define sk_SXNETID_delete(sk, where) \ - ((SXNETID *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - (where))) - -#define sk_SXNETID_delete_ptr(sk, p) \ - ((SXNETID *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(void *, SXNETID *, p))) - -#define sk_SXNETID_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), (out_index), \ - CHECKED_CAST(void *, SXNETID *, p)) - -#define sk_SXNETID_shift(sk) \ - ((SXNETID *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk))) - -#define sk_SXNETID_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(void *, SXNETID *, p)) - -#define sk_SXNETID_pop(sk) \ - ((SXNETID *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk))) - -#define sk_SXNETID_dup(sk) \ - ((STACK_OF(SXNETID) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SXNETID) *, sk))) - -#define sk_SXNETID_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk)) - -#define sk_SXNETID_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(SXNETID) *, sk)) - -#define sk_SXNETID_set_cmp_func(sk, comp) \ - ((int (*)(const SXNETID **a, const SXNETID **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const SXNETID **a, const SXNETID **b), comp))) - -#define sk_SXNETID_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(SXNETID) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SXNETID) *, sk), \ - CHECKED_CAST(void *(*)(void *), SXNETID *(*)(SXNETID *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(SXNETID *), free_func))) - -/* X509 */ -#define sk_X509_new(comp) \ - ((STACK_OF(X509) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509 **a, const X509 **b), comp))) - -#define sk_X509_new_null() ((STACK_OF(X509) *)sk_new_null()) - -#define sk_X509_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509) *, sk)) - -#define sk_X509_zero(sk) sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk)); - -#define sk_X509_value(sk, i) \ - ((X509 *)sk_value(CHECKED_CAST(const _STACK *, const STACK_OF(X509) *, sk), \ - (i))) - -#define sk_X509_set(sk, i, p) \ - ((X509 *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), (i), \ - CHECKED_CAST(void *, X509 *, p))) - -#define sk_X509_free(sk) sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk)) - -#define sk_X509_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509 *), free_func)) - -#define sk_X509_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), \ - CHECKED_CAST(void *, X509 *, p), (where)) - -#define sk_X509_delete(sk, where) \ - ((X509 *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), (where))) - -#define sk_X509_delete_ptr(sk, p) \ - ((X509 *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), \ - CHECKED_CAST(void *, X509 *, p))) - -#define sk_X509_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), (out_index), \ - CHECKED_CAST(void *, X509 *, p)) - -#define sk_X509_shift(sk) \ - ((X509 *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk))) - -#define sk_X509_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), \ - CHECKED_CAST(void *, X509 *, p)) - -#define sk_X509_pop(sk) \ - ((X509 *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk))) - -#define sk_X509_dup(sk) \ - ((STACK_OF(X509) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509) *, sk))) - -#define sk_X509_sort(sk) sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk)) - -#define sk_X509_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509) *, sk)) - -#define sk_X509_set_cmp_func(sk, comp) \ - ((int (*)(const X509 **a, const X509 **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509 **a, const X509 **b), \ - comp))) - -#define sk_X509_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509 *(*)(X509 *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509 *), free_func))) - -/* X509V3_EXT_METHOD */ -#define sk_X509V3_EXT_METHOD_new(comp) \ - ((STACK_OF(X509V3_EXT_METHOD) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509V3_EXT_METHOD **a, const X509V3_EXT_METHOD **b), \ - comp))) - -#define sk_X509V3_EXT_METHOD_new_null() \ - ((STACK_OF(X509V3_EXT_METHOD) *)sk_new_null()) - -#define sk_X509V3_EXT_METHOD_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509V3_EXT_METHOD) *, sk)) - -#define sk_X509V3_EXT_METHOD_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk)); - -#define sk_X509V3_EXT_METHOD_value(sk, i) \ - ((X509V3_EXT_METHOD *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509V3_EXT_METHOD) *, sk), \ - (i))) - -#define sk_X509V3_EXT_METHOD_set(sk, i, p) \ - ((X509V3_EXT_METHOD *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), (i), \ - CHECKED_CAST(void *, X509V3_EXT_METHOD *, p))) - -#define sk_X509V3_EXT_METHOD_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk)) - -#define sk_X509V3_EXT_METHOD_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509V3_EXT_METHOD *), \ - free_func)) - -#define sk_X509V3_EXT_METHOD_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(void *, X509V3_EXT_METHOD *, p), (where)) - -#define sk_X509V3_EXT_METHOD_delete(sk, where) \ - ((X509V3_EXT_METHOD *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), (where))) - -#define sk_X509V3_EXT_METHOD_delete_ptr(sk, p) \ - ((X509V3_EXT_METHOD *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(void *, X509V3_EXT_METHOD *, p))) - -#define sk_X509V3_EXT_METHOD_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - (out_index), CHECKED_CAST(void *, X509V3_EXT_METHOD *, p)) - -#define sk_X509V3_EXT_METHOD_shift(sk) \ - ((X509V3_EXT_METHOD *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk))) - -#define sk_X509V3_EXT_METHOD_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(void *, X509V3_EXT_METHOD *, p)) - -#define sk_X509V3_EXT_METHOD_pop(sk) \ - ((X509V3_EXT_METHOD *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk))) - -#define sk_X509V3_EXT_METHOD_dup(sk) \ - ((STACK_OF(X509V3_EXT_METHOD) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509V3_EXT_METHOD) *, sk))) - -#define sk_X509V3_EXT_METHOD_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk)) - -#define sk_X509V3_EXT_METHOD_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509V3_EXT_METHOD) *, sk)) - -#define sk_X509V3_EXT_METHOD_set_cmp_func(sk, comp) \ - ((int (*)(const X509V3_EXT_METHOD **a, const X509V3_EXT_METHOD **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509V3_EXT_METHOD **a, \ - const X509V3_EXT_METHOD **b), \ - comp))) - -#define sk_X509V3_EXT_METHOD_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509V3_EXT_METHOD) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509V3_EXT_METHOD) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - X509V3_EXT_METHOD *(*)(X509V3_EXT_METHOD *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509V3_EXT_METHOD *), \ - free_func))) - -/* X509_ALGOR */ -#define sk_X509_ALGOR_new(comp) \ - ((STACK_OF(X509_ALGOR) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_ALGOR **a, const X509_ALGOR **b), \ - comp))) - -#define sk_X509_ALGOR_new_null() ((STACK_OF(X509_ALGOR) *)sk_new_null()) - -#define sk_X509_ALGOR_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_ALGOR) *, sk)) - -#define sk_X509_ALGOR_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk)); - -#define sk_X509_ALGOR_value(sk, i) \ - ((X509_ALGOR *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ALGOR) *, sk), (i))) - -#define sk_X509_ALGOR_set(sk, i, p) \ - ((X509_ALGOR *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - (i), CHECKED_CAST(void *, X509_ALGOR *, p))) - -#define sk_X509_ALGOR_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk)) - -#define sk_X509_ALGOR_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_ALGOR *), free_func)) - -#define sk_X509_ALGOR_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(void *, X509_ALGOR *, p), (where)) - -#define sk_X509_ALGOR_delete(sk, where) \ - ((X509_ALGOR *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - (where))) - -#define sk_X509_ALGOR_delete_ptr(sk, p) \ - ((X509_ALGOR *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(void *, X509_ALGOR *, p))) - -#define sk_X509_ALGOR_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_ALGOR *, p)) - -#define sk_X509_ALGOR_shift(sk) \ - ((X509_ALGOR *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk))) - -#define sk_X509_ALGOR_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(void *, X509_ALGOR *, p)) - -#define sk_X509_ALGOR_pop(sk) \ - ((X509_ALGOR *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk))) - -#define sk_X509_ALGOR_dup(sk) \ - ((STACK_OF(X509_ALGOR) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ALGOR) *, sk))) - -#define sk_X509_ALGOR_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk)) - -#define sk_X509_ALGOR_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_ALGOR) *, sk)) - -#define sk_X509_ALGOR_set_cmp_func(sk, comp) \ - ((int (*)(const X509_ALGOR **a, const X509_ALGOR **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_ALGOR **a, const X509_ALGOR **b), \ - comp))) - -#define sk_X509_ALGOR_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_ALGOR) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ALGOR) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_ALGOR *(*)(X509_ALGOR *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_ALGOR *), free_func))) - -/* X509_ATTRIBUTE */ -#define sk_X509_ATTRIBUTE_new(comp) \ - ((STACK_OF(X509_ATTRIBUTE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_ATTRIBUTE **a, const X509_ATTRIBUTE **b), comp))) - -#define sk_X509_ATTRIBUTE_new_null() ((STACK_OF(X509_ATTRIBUTE) *)sk_new_null()) - -#define sk_X509_ATTRIBUTE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_ATTRIBUTE) *, sk)) - -#define sk_X509_ATTRIBUTE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk)); - -#define sk_X509_ATTRIBUTE_value(sk, i) \ - ((X509_ATTRIBUTE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ATTRIBUTE) *, sk), \ - (i))) - -#define sk_X509_ATTRIBUTE_set(sk, i, p) \ - ((X509_ATTRIBUTE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), (i), \ - CHECKED_CAST(void *, X509_ATTRIBUTE *, p))) - -#define sk_X509_ATTRIBUTE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk)) - -#define sk_X509_ATTRIBUTE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_ATTRIBUTE *), free_func)) - -#define sk_X509_ATTRIBUTE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(void *, X509_ATTRIBUTE *, p), (where)) - -#define sk_X509_ATTRIBUTE_delete(sk, where) \ - ((X509_ATTRIBUTE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), (where))) - -#define sk_X509_ATTRIBUTE_delete_ptr(sk, p) \ - ((X509_ATTRIBUTE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(void *, X509_ATTRIBUTE *, p))) - -#define sk_X509_ATTRIBUTE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_ATTRIBUTE *, p)) - -#define sk_X509_ATTRIBUTE_shift(sk) \ - ((X509_ATTRIBUTE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk))) - -#define sk_X509_ATTRIBUTE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(void *, X509_ATTRIBUTE *, p)) - -#define sk_X509_ATTRIBUTE_pop(sk) \ - ((X509_ATTRIBUTE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk))) - -#define sk_X509_ATTRIBUTE_dup(sk) \ - ((STACK_OF(X509_ATTRIBUTE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ATTRIBUTE) *, sk))) - -#define sk_X509_ATTRIBUTE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk)) - -#define sk_X509_ATTRIBUTE_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ATTRIBUTE) *, sk)) - -#define sk_X509_ATTRIBUTE_set_cmp_func(sk, comp) \ - ((int (*)(const X509_ATTRIBUTE **a, const X509_ATTRIBUTE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_ATTRIBUTE **a, \ - const X509_ATTRIBUTE **b), \ - comp))) - -#define sk_X509_ATTRIBUTE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_ATTRIBUTE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_ATTRIBUTE) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_ATTRIBUTE *(*)(X509_ATTRIBUTE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_ATTRIBUTE *), free_func))) - -/* X509_CRL */ -#define sk_X509_CRL_new(comp) \ - ((STACK_OF(X509_CRL) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_CRL **a, const X509_CRL **b), comp))) - -#define sk_X509_CRL_new_null() ((STACK_OF(X509_CRL) *)sk_new_null()) - -#define sk_X509_CRL_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_CRL) *, sk)) - -#define sk_X509_CRL_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk)); - -#define sk_X509_CRL_value(sk, i) \ - ((X509_CRL *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_CRL) *, sk), (i))) - -#define sk_X509_CRL_set(sk, i, p) \ - ((X509_CRL *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), (i), \ - CHECKED_CAST(void *, X509_CRL *, p))) - -#define sk_X509_CRL_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk)) - -#define sk_X509_CRL_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_CRL *), free_func)) - -#define sk_X509_CRL_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(void *, X509_CRL *, p), (where)) - -#define sk_X509_CRL_delete(sk, where) \ - ((X509_CRL *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - (where))) - -#define sk_X509_CRL_delete_ptr(sk, p) \ - ((X509_CRL *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(void *, X509_CRL *, p))) - -#define sk_X509_CRL_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_CRL *, p)) - -#define sk_X509_CRL_shift(sk) \ - ((X509_CRL *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk))) - -#define sk_X509_CRL_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(void *, X509_CRL *, p)) - -#define sk_X509_CRL_pop(sk) \ - ((X509_CRL *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk))) - -#define sk_X509_CRL_dup(sk) \ - ((STACK_OF(X509_CRL) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_CRL) *, sk))) - -#define sk_X509_CRL_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk)) - -#define sk_X509_CRL_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_CRL) *, sk)) - -#define sk_X509_CRL_set_cmp_func(sk, comp) \ - ((int (*)(const X509_CRL **a, const X509_CRL **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_CRL **a, const X509_CRL **b), comp))) - -#define sk_X509_CRL_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_CRL) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_CRL) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_CRL *(*)(X509_CRL *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_CRL *), free_func))) - -/* X509_EXTENSION */ -#define sk_X509_EXTENSION_new(comp) \ - ((STACK_OF(X509_EXTENSION) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_EXTENSION **a, const X509_EXTENSION **b), comp))) - -#define sk_X509_EXTENSION_new_null() ((STACK_OF(X509_EXTENSION) *)sk_new_null()) - -#define sk_X509_EXTENSION_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_EXTENSION) *, sk)) - -#define sk_X509_EXTENSION_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk)); - -#define sk_X509_EXTENSION_value(sk, i) \ - ((X509_EXTENSION *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_EXTENSION) *, sk), \ - (i))) - -#define sk_X509_EXTENSION_set(sk, i, p) \ - ((X509_EXTENSION *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), (i), \ - CHECKED_CAST(void *, X509_EXTENSION *, p))) - -#define sk_X509_EXTENSION_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk)) - -#define sk_X509_EXTENSION_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_EXTENSION *), free_func)) - -#define sk_X509_EXTENSION_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(void *, X509_EXTENSION *, p), (where)) - -#define sk_X509_EXTENSION_delete(sk, where) \ - ((X509_EXTENSION *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), (where))) - -#define sk_X509_EXTENSION_delete_ptr(sk, p) \ - ((X509_EXTENSION *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(void *, X509_EXTENSION *, p))) - -#define sk_X509_EXTENSION_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_EXTENSION *, p)) - -#define sk_X509_EXTENSION_shift(sk) \ - ((X509_EXTENSION *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk))) - -#define sk_X509_EXTENSION_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(void *, X509_EXTENSION *, p)) - -#define sk_X509_EXTENSION_pop(sk) \ - ((X509_EXTENSION *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk))) - -#define sk_X509_EXTENSION_dup(sk) \ - ((STACK_OF(X509_EXTENSION) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_EXTENSION) *, sk))) - -#define sk_X509_EXTENSION_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk)) - -#define sk_X509_EXTENSION_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_EXTENSION) *, sk)) - -#define sk_X509_EXTENSION_set_cmp_func(sk, comp) \ - ((int (*)(const X509_EXTENSION **a, const X509_EXTENSION **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_EXTENSION **a, \ - const X509_EXTENSION **b), \ - comp))) - -#define sk_X509_EXTENSION_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_EXTENSION) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_EXTENSION) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_EXTENSION *(*)(X509_EXTENSION *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_EXTENSION *), free_func))) - -/* X509_INFO */ -#define sk_X509_INFO_new(comp) \ - ((STACK_OF(X509_INFO) *)sk_new( \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_INFO **a, const X509_INFO **b), comp))) - -#define sk_X509_INFO_new_null() ((STACK_OF(X509_INFO) *)sk_new_null()) - -#define sk_X509_INFO_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_INFO) *, sk)) - -#define sk_X509_INFO_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk)); - -#define sk_X509_INFO_value(sk, i) \ - ((X509_INFO *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_INFO) *, sk), (i))) - -#define sk_X509_INFO_set(sk, i, p) \ - ((X509_INFO *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), (i), \ - CHECKED_CAST(void *, X509_INFO *, p))) - -#define sk_X509_INFO_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk)) - -#define sk_X509_INFO_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_INFO *), free_func)) - -#define sk_X509_INFO_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(void *, X509_INFO *, p), (where)) - -#define sk_X509_INFO_delete(sk, where) \ - ((X509_INFO *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - (where))) - -#define sk_X509_INFO_delete_ptr(sk, p) \ - ((X509_INFO *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(void *, X509_INFO *, p))) - -#define sk_X509_INFO_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_INFO *, p)) - -#define sk_X509_INFO_shift(sk) \ - ((X509_INFO *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk))) - -#define sk_X509_INFO_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(void *, X509_INFO *, p)) - -#define sk_X509_INFO_pop(sk) \ - ((X509_INFO *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk))) - -#define sk_X509_INFO_dup(sk) \ - ((STACK_OF(X509_INFO) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_INFO) *, sk))) - -#define sk_X509_INFO_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk)) - -#define sk_X509_INFO_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_INFO) *, sk)) - -#define sk_X509_INFO_set_cmp_func(sk, comp) \ - ((int (*)(const X509_INFO **a, const X509_INFO **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_INFO **a, const X509_INFO **b), comp))) - -#define sk_X509_INFO_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_INFO) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_INFO) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_INFO *(*)(X509_INFO *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_INFO *), free_func))) - -/* X509_LOOKUP */ -#define sk_X509_LOOKUP_new(comp) \ - ((STACK_OF(X509_LOOKUP) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_LOOKUP **a, const X509_LOOKUP **b), \ - comp))) - -#define sk_X509_LOOKUP_new_null() ((STACK_OF(X509_LOOKUP) *)sk_new_null()) - -#define sk_X509_LOOKUP_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_LOOKUP) *, sk)) - -#define sk_X509_LOOKUP_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk)); - -#define sk_X509_LOOKUP_value(sk, i) \ - ((X509_LOOKUP *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_LOOKUP) *, sk), (i))) - -#define sk_X509_LOOKUP_set(sk, i, p) \ - ((X509_LOOKUP *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - (i), CHECKED_CAST(void *, X509_LOOKUP *, p))) - -#define sk_X509_LOOKUP_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk)) - -#define sk_X509_LOOKUP_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_LOOKUP *), free_func)) - -#define sk_X509_LOOKUP_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(void *, X509_LOOKUP *, p), (where)) - -#define sk_X509_LOOKUP_delete(sk, where) \ - ((X509_LOOKUP *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), (where))) - -#define sk_X509_LOOKUP_delete_ptr(sk, p) \ - ((X509_LOOKUP *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(void *, X509_LOOKUP *, p))) - -#define sk_X509_LOOKUP_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_LOOKUP *, p)) - -#define sk_X509_LOOKUP_shift(sk) \ - ((X509_LOOKUP *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk))) - -#define sk_X509_LOOKUP_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(void *, X509_LOOKUP *, p)) - -#define sk_X509_LOOKUP_pop(sk) \ - ((X509_LOOKUP *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk))) - -#define sk_X509_LOOKUP_dup(sk) \ - ((STACK_OF(X509_LOOKUP) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_LOOKUP) *, sk))) - -#define sk_X509_LOOKUP_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk)) - -#define sk_X509_LOOKUP_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_LOOKUP) *, sk)) - -#define sk_X509_LOOKUP_set_cmp_func(sk, comp) \ - ((int (*)(const X509_LOOKUP **a, const X509_LOOKUP **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_LOOKUP **a, const X509_LOOKUP **b), \ - comp))) - -#define sk_X509_LOOKUP_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_LOOKUP) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_LOOKUP) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_LOOKUP *(*)(X509_LOOKUP *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_LOOKUP *), free_func))) - -/* X509_NAME */ -#define sk_X509_NAME_new(comp) \ - ((STACK_OF(X509_NAME) *)sk_new( \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_NAME **a, const X509_NAME **b), comp))) - -#define sk_X509_NAME_new_null() ((STACK_OF(X509_NAME) *)sk_new_null()) - -#define sk_X509_NAME_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME) *, sk)) - -#define sk_X509_NAME_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk)); - -#define sk_X509_NAME_value(sk, i) \ - ((X509_NAME *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME) *, sk), (i))) - -#define sk_X509_NAME_set(sk, i, p) \ - ((X509_NAME *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), (i), \ - CHECKED_CAST(void *, X509_NAME *, p))) - -#define sk_X509_NAME_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk)) - -#define sk_X509_NAME_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_NAME *), free_func)) - -#define sk_X509_NAME_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(void *, X509_NAME *, p), (where)) - -#define sk_X509_NAME_delete(sk, where) \ - ((X509_NAME *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - (where))) - -#define sk_X509_NAME_delete_ptr(sk, p) \ - ((X509_NAME *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(void *, X509_NAME *, p))) - -#define sk_X509_NAME_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_NAME *, p)) - -#define sk_X509_NAME_shift(sk) \ - ((X509_NAME *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk))) - -#define sk_X509_NAME_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(void *, X509_NAME *, p)) - -#define sk_X509_NAME_pop(sk) \ - ((X509_NAME *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk))) - -#define sk_X509_NAME_dup(sk) \ - ((STACK_OF(X509_NAME) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME) *, sk))) - -#define sk_X509_NAME_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk)) - -#define sk_X509_NAME_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME) *, sk)) - -#define sk_X509_NAME_set_cmp_func(sk, comp) \ - ((int (*)(const X509_NAME **a, const X509_NAME **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_NAME **a, const X509_NAME **b), comp))) - -#define sk_X509_NAME_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_NAME) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_NAME *(*)(X509_NAME *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_NAME *), free_func))) - -/* X509_NAME_ENTRY */ -#define sk_X509_NAME_ENTRY_new(comp) \ - ((STACK_OF(X509_NAME_ENTRY) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_NAME_ENTRY **a, const X509_NAME_ENTRY **b), comp))) - -#define sk_X509_NAME_ENTRY_new_null() \ - ((STACK_OF(X509_NAME_ENTRY) *)sk_new_null()) - -#define sk_X509_NAME_ENTRY_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME_ENTRY) *, sk)) - -#define sk_X509_NAME_ENTRY_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk)); - -#define sk_X509_NAME_ENTRY_value(sk, i) \ - ((X509_NAME_ENTRY *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME_ENTRY) *, sk), \ - (i))) - -#define sk_X509_NAME_ENTRY_set(sk, i, p) \ - ((X509_NAME_ENTRY *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), (i), \ - CHECKED_CAST(void *, X509_NAME_ENTRY *, p))) - -#define sk_X509_NAME_ENTRY_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk)) - -#define sk_X509_NAME_ENTRY_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_NAME_ENTRY *), free_func)) - -#define sk_X509_NAME_ENTRY_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, X509_NAME_ENTRY *, p), (where)) - -#define sk_X509_NAME_ENTRY_delete(sk, where) \ - ((X509_NAME_ENTRY *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), (where))) - -#define sk_X509_NAME_ENTRY_delete_ptr(sk, p) \ - ((X509_NAME_ENTRY *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, X509_NAME_ENTRY *, p))) - -#define sk_X509_NAME_ENTRY_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - (out_index), CHECKED_CAST(void *, X509_NAME_ENTRY *, p)) - -#define sk_X509_NAME_ENTRY_shift(sk) \ - ((X509_NAME_ENTRY *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk))) - -#define sk_X509_NAME_ENTRY_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *, X509_NAME_ENTRY *, p)) - -#define sk_X509_NAME_ENTRY_pop(sk) \ - ((X509_NAME_ENTRY *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk))) - -#define sk_X509_NAME_ENTRY_dup(sk) \ - ((STACK_OF(X509_NAME_ENTRY) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME_ENTRY) *, sk))) - -#define sk_X509_NAME_ENTRY_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk)) - -#define sk_X509_NAME_ENTRY_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME_ENTRY) *, sk)) - -#define sk_X509_NAME_ENTRY_set_cmp_func(sk, comp) \ - ((int (*)(const X509_NAME_ENTRY **a, const X509_NAME_ENTRY **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_NAME_ENTRY **a, \ - const X509_NAME_ENTRY **b), \ - comp))) - -#define sk_X509_NAME_ENTRY_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_NAME_ENTRY) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_NAME_ENTRY) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_NAME_ENTRY *(*)(X509_NAME_ENTRY *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_NAME_ENTRY *), free_func))) - -/* X509_OBJECT */ -#define sk_X509_OBJECT_new(comp) \ - ((STACK_OF(X509_OBJECT) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_OBJECT **a, const X509_OBJECT **b), \ - comp))) - -#define sk_X509_OBJECT_new_null() ((STACK_OF(X509_OBJECT) *)sk_new_null()) - -#define sk_X509_OBJECT_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_OBJECT) *, sk)) - -#define sk_X509_OBJECT_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk)); - -#define sk_X509_OBJECT_value(sk, i) \ - ((X509_OBJECT *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_OBJECT) *, sk), (i))) - -#define sk_X509_OBJECT_set(sk, i, p) \ - ((X509_OBJECT *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - (i), CHECKED_CAST(void *, X509_OBJECT *, p))) - -#define sk_X509_OBJECT_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk)) - -#define sk_X509_OBJECT_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_OBJECT *), free_func)) - -#define sk_X509_OBJECT_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(void *, X509_OBJECT *, p), (where)) - -#define sk_X509_OBJECT_delete(sk, where) \ - ((X509_OBJECT *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), (where))) - -#define sk_X509_OBJECT_delete_ptr(sk, p) \ - ((X509_OBJECT *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(void *, X509_OBJECT *, p))) - -#define sk_X509_OBJECT_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_OBJECT *, p)) - -#define sk_X509_OBJECT_shift(sk) \ - ((X509_OBJECT *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk))) - -#define sk_X509_OBJECT_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(void *, X509_OBJECT *, p)) - -#define sk_X509_OBJECT_pop(sk) \ - ((X509_OBJECT *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk))) - -#define sk_X509_OBJECT_dup(sk) \ - ((STACK_OF(X509_OBJECT) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_OBJECT) *, sk))) - -#define sk_X509_OBJECT_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk)) - -#define sk_X509_OBJECT_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_OBJECT) *, sk)) - -#define sk_X509_OBJECT_set_cmp_func(sk, comp) \ - ((int (*)(const X509_OBJECT **a, const X509_OBJECT **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_OBJECT **a, const X509_OBJECT **b), \ - comp))) - -#define sk_X509_OBJECT_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_OBJECT) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_OBJECT) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_OBJECT *(*)(X509_OBJECT *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_OBJECT *), free_func))) - -/* X509_POLICY_DATA */ -#define sk_X509_POLICY_DATA_new(comp) \ - ((STACK_OF(X509_POLICY_DATA) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_POLICY_DATA **a, const X509_POLICY_DATA **b), comp))) - -#define sk_X509_POLICY_DATA_new_null() \ - ((STACK_OF(X509_POLICY_DATA) *)sk_new_null()) - -#define sk_X509_POLICY_DATA_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_DATA) *, sk)) - -#define sk_X509_POLICY_DATA_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk)); - -#define sk_X509_POLICY_DATA_value(sk, i) \ - ((X509_POLICY_DATA *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_DATA) *, sk), \ - (i))) - -#define sk_X509_POLICY_DATA_set(sk, i, p) \ - ((X509_POLICY_DATA *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), (i), \ - CHECKED_CAST(void *, X509_POLICY_DATA *, p))) - -#define sk_X509_POLICY_DATA_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk)) - -#define sk_X509_POLICY_DATA_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_POLICY_DATA *), free_func)) - -#define sk_X509_POLICY_DATA_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_DATA *, p), (where)) - -#define sk_X509_POLICY_DATA_delete(sk, where) \ - ((X509_POLICY_DATA *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), (where))) - -#define sk_X509_POLICY_DATA_delete_ptr(sk, p) \ - ((X509_POLICY_DATA *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_DATA *, p))) - -#define sk_X509_POLICY_DATA_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - (out_index), CHECKED_CAST(void *, X509_POLICY_DATA *, p)) - -#define sk_X509_POLICY_DATA_shift(sk) \ - ((X509_POLICY_DATA *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk))) - -#define sk_X509_POLICY_DATA_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_DATA *, p)) - -#define sk_X509_POLICY_DATA_pop(sk) \ - ((X509_POLICY_DATA *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk))) - -#define sk_X509_POLICY_DATA_dup(sk) \ - ((STACK_OF(X509_POLICY_DATA) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_DATA) *, sk))) - -#define sk_X509_POLICY_DATA_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk)) - -#define sk_X509_POLICY_DATA_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_DATA) *, sk)) - -#define sk_X509_POLICY_DATA_set_cmp_func(sk, comp) \ - ((int (*)(const X509_POLICY_DATA **a, const X509_POLICY_DATA **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_POLICY_DATA **a, \ - const X509_POLICY_DATA **b), \ - comp))) - -#define sk_X509_POLICY_DATA_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_POLICY_DATA) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_DATA) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - X509_POLICY_DATA *(*)(X509_POLICY_DATA *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_POLICY_DATA *), \ - free_func))) - -/* X509_POLICY_NODE */ -#define sk_X509_POLICY_NODE_new(comp) \ - ((STACK_OF(X509_POLICY_NODE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_POLICY_NODE **a, const X509_POLICY_NODE **b), comp))) - -#define sk_X509_POLICY_NODE_new_null() \ - ((STACK_OF(X509_POLICY_NODE) *)sk_new_null()) - -#define sk_X509_POLICY_NODE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_NODE) *, sk)) - -#define sk_X509_POLICY_NODE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk)); - -#define sk_X509_POLICY_NODE_value(sk, i) \ - ((X509_POLICY_NODE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_NODE) *, sk), \ - (i))) - -#define sk_X509_POLICY_NODE_set(sk, i, p) \ - ((X509_POLICY_NODE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), (i), \ - CHECKED_CAST(void *, X509_POLICY_NODE *, p))) - -#define sk_X509_POLICY_NODE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk)) - -#define sk_X509_POLICY_NODE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_POLICY_NODE *), free_func)) - -#define sk_X509_POLICY_NODE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_NODE *, p), (where)) - -#define sk_X509_POLICY_NODE_delete(sk, where) \ - ((X509_POLICY_NODE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), (where))) - -#define sk_X509_POLICY_NODE_delete_ptr(sk, p) \ - ((X509_POLICY_NODE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_NODE *, p))) - -#define sk_X509_POLICY_NODE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - (out_index), CHECKED_CAST(void *, X509_POLICY_NODE *, p)) - -#define sk_X509_POLICY_NODE_shift(sk) \ - ((X509_POLICY_NODE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk))) - -#define sk_X509_POLICY_NODE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(void *, X509_POLICY_NODE *, p)) - -#define sk_X509_POLICY_NODE_pop(sk) \ - ((X509_POLICY_NODE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk))) - -#define sk_X509_POLICY_NODE_dup(sk) \ - ((STACK_OF(X509_POLICY_NODE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_NODE) *, sk))) - -#define sk_X509_POLICY_NODE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk)) - -#define sk_X509_POLICY_NODE_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_NODE) *, sk)) - -#define sk_X509_POLICY_NODE_set_cmp_func(sk, comp) \ - ((int (*)(const X509_POLICY_NODE **a, const X509_POLICY_NODE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_POLICY_NODE **a, \ - const X509_POLICY_NODE **b), \ - comp))) - -#define sk_X509_POLICY_NODE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_POLICY_NODE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_POLICY_NODE) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - X509_POLICY_NODE *(*)(X509_POLICY_NODE *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_POLICY_NODE *), \ - free_func))) - -/* X509_PURPOSE */ -#define sk_X509_PURPOSE_new(comp) \ - ((STACK_OF(X509_PURPOSE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_PURPOSE **a, const X509_PURPOSE **b), \ - comp))) - -#define sk_X509_PURPOSE_new_null() ((STACK_OF(X509_PURPOSE) *)sk_new_null()) - -#define sk_X509_PURPOSE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_PURPOSE) *, sk)) - -#define sk_X509_PURPOSE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk)); - -#define sk_X509_PURPOSE_value(sk, i) \ - ((X509_PURPOSE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_PURPOSE) *, sk), (i))) - -#define sk_X509_PURPOSE_set(sk, i, p) \ - ((X509_PURPOSE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), (i), \ - CHECKED_CAST(void *, X509_PURPOSE *, p))) - -#define sk_X509_PURPOSE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk)) - -#define sk_X509_PURPOSE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_PURPOSE *), free_func)) - -#define sk_X509_PURPOSE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(void *, X509_PURPOSE *, p), (where)) - -#define sk_X509_PURPOSE_delete(sk, where) \ - ((X509_PURPOSE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), (where))) - -#define sk_X509_PURPOSE_delete_ptr(sk, p) \ - ((X509_PURPOSE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(void *, X509_PURPOSE *, p))) - -#define sk_X509_PURPOSE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_PURPOSE *, p)) - -#define sk_X509_PURPOSE_shift(sk) \ - ((X509_PURPOSE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk))) - -#define sk_X509_PURPOSE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(void *, X509_PURPOSE *, p)) - -#define sk_X509_PURPOSE_pop(sk) \ - ((X509_PURPOSE *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk))) - -#define sk_X509_PURPOSE_dup(sk) \ - ((STACK_OF(X509_PURPOSE) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_PURPOSE) *, sk))) - -#define sk_X509_PURPOSE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk)) - -#define sk_X509_PURPOSE_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_PURPOSE) *, sk)) - -#define sk_X509_PURPOSE_set_cmp_func(sk, comp) \ - ((int (*)(const X509_PURPOSE **a, const X509_PURPOSE **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_PURPOSE **a, const X509_PURPOSE **b), \ - comp))) - -#define sk_X509_PURPOSE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_PURPOSE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_PURPOSE) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_PURPOSE *(*)(X509_PURPOSE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_PURPOSE *), free_func))) - -/* X509_REVOKED */ -#define sk_X509_REVOKED_new(comp) \ - ((STACK_OF(X509_REVOKED) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_REVOKED **a, const X509_REVOKED **b), \ - comp))) - -#define sk_X509_REVOKED_new_null() ((STACK_OF(X509_REVOKED) *)sk_new_null()) - -#define sk_X509_REVOKED_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_REVOKED) *, sk)) - -#define sk_X509_REVOKED_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk)); - -#define sk_X509_REVOKED_value(sk, i) \ - ((X509_REVOKED *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_REVOKED) *, sk), (i))) - -#define sk_X509_REVOKED_set(sk, i, p) \ - ((X509_REVOKED *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), (i), \ - CHECKED_CAST(void *, X509_REVOKED *, p))) - -#define sk_X509_REVOKED_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk)) - -#define sk_X509_REVOKED_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_REVOKED *), free_func)) - -#define sk_X509_REVOKED_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(void *, X509_REVOKED *, p), (where)) - -#define sk_X509_REVOKED_delete(sk, where) \ - ((X509_REVOKED *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), (where))) - -#define sk_X509_REVOKED_delete_ptr(sk, p) \ - ((X509_REVOKED *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(void *, X509_REVOKED *, p))) - -#define sk_X509_REVOKED_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_REVOKED *, p)) - -#define sk_X509_REVOKED_shift(sk) \ - ((X509_REVOKED *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk))) - -#define sk_X509_REVOKED_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(void *, X509_REVOKED *, p)) - -#define sk_X509_REVOKED_pop(sk) \ - ((X509_REVOKED *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk))) - -#define sk_X509_REVOKED_dup(sk) \ - ((STACK_OF(X509_REVOKED) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_REVOKED) *, sk))) - -#define sk_X509_REVOKED_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk)) - -#define sk_X509_REVOKED_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_REVOKED) *, sk)) - -#define sk_X509_REVOKED_set_cmp_func(sk, comp) \ - ((int (*)(const X509_REVOKED **a, const X509_REVOKED **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_REVOKED **a, const X509_REVOKED **b), \ - comp))) - -#define sk_X509_REVOKED_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_REVOKED) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_REVOKED) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_REVOKED *(*)(X509_REVOKED *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_REVOKED *), free_func))) - -/* X509_TRUST */ -#define sk_X509_TRUST_new(comp) \ - ((STACK_OF(X509_TRUST) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const X509_TRUST **a, const X509_TRUST **b), \ - comp))) - -#define sk_X509_TRUST_new_null() ((STACK_OF(X509_TRUST) *)sk_new_null()) - -#define sk_X509_TRUST_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_TRUST) *, sk)) - -#define sk_X509_TRUST_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk)); - -#define sk_X509_TRUST_value(sk, i) \ - ((X509_TRUST *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_TRUST) *, sk), (i))) - -#define sk_X509_TRUST_set(sk, i, p) \ - ((X509_TRUST *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - (i), CHECKED_CAST(void *, X509_TRUST *, p))) - -#define sk_X509_TRUST_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk)) - -#define sk_X509_TRUST_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_TRUST *), free_func)) - -#define sk_X509_TRUST_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(void *, X509_TRUST *, p), (where)) - -#define sk_X509_TRUST_delete(sk, where) \ - ((X509_TRUST *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - (where))) - -#define sk_X509_TRUST_delete_ptr(sk, p) \ - ((X509_TRUST *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(void *, X509_TRUST *, p))) - -#define sk_X509_TRUST_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), (out_index), \ - CHECKED_CAST(void *, X509_TRUST *, p)) - -#define sk_X509_TRUST_shift(sk) \ - ((X509_TRUST *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk))) - -#define sk_X509_TRUST_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(void *, X509_TRUST *, p)) - -#define sk_X509_TRUST_pop(sk) \ - ((X509_TRUST *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk))) - -#define sk_X509_TRUST_dup(sk) \ - ((STACK_OF(X509_TRUST) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_TRUST) *, sk))) - -#define sk_X509_TRUST_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk)) - -#define sk_X509_TRUST_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(X509_TRUST) *, sk)) - -#define sk_X509_TRUST_set_cmp_func(sk, comp) \ - ((int (*)(const X509_TRUST **a, const X509_TRUST **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const X509_TRUST **a, const X509_TRUST **b), \ - comp))) - -#define sk_X509_TRUST_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_TRUST) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_TRUST) *, sk), \ - CHECKED_CAST(void *(*)(void *), X509_TRUST *(*)(X509_TRUST *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_TRUST *), free_func))) - -/* X509_VERIFY_PARAM */ -#define sk_X509_VERIFY_PARAM_new(comp) \ - ((STACK_OF(X509_VERIFY_PARAM) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const X509_VERIFY_PARAM **a, const X509_VERIFY_PARAM **b), \ - comp))) - -#define sk_X509_VERIFY_PARAM_new_null() \ - ((STACK_OF(X509_VERIFY_PARAM) *)sk_new_null()) - -#define sk_X509_VERIFY_PARAM_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(X509_VERIFY_PARAM) *, sk)) - -#define sk_X509_VERIFY_PARAM_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk)); - -#define sk_X509_VERIFY_PARAM_value(sk, i) \ - ((X509_VERIFY_PARAM *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_VERIFY_PARAM) *, sk), \ - (i))) - -#define sk_X509_VERIFY_PARAM_set(sk, i, p) \ - ((X509_VERIFY_PARAM *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), (i), \ - CHECKED_CAST(void *, X509_VERIFY_PARAM *, p))) - -#define sk_X509_VERIFY_PARAM_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk)) - -#define sk_X509_VERIFY_PARAM_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_VERIFY_PARAM *), \ - free_func)) - -#define sk_X509_VERIFY_PARAM_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(void *, X509_VERIFY_PARAM *, p), (where)) - -#define sk_X509_VERIFY_PARAM_delete(sk, where) \ - ((X509_VERIFY_PARAM *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), (where))) - -#define sk_X509_VERIFY_PARAM_delete_ptr(sk, p) \ - ((X509_VERIFY_PARAM *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(void *, X509_VERIFY_PARAM *, p))) - -#define sk_X509_VERIFY_PARAM_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - (out_index), CHECKED_CAST(void *, X509_VERIFY_PARAM *, p)) - -#define sk_X509_VERIFY_PARAM_shift(sk) \ - ((X509_VERIFY_PARAM *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk))) - -#define sk_X509_VERIFY_PARAM_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(void *, X509_VERIFY_PARAM *, p)) - -#define sk_X509_VERIFY_PARAM_pop(sk) \ - ((X509_VERIFY_PARAM *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk))) - -#define sk_X509_VERIFY_PARAM_dup(sk) \ - ((STACK_OF(X509_VERIFY_PARAM) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_VERIFY_PARAM) *, sk))) - -#define sk_X509_VERIFY_PARAM_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk)) - -#define sk_X509_VERIFY_PARAM_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_VERIFY_PARAM) *, sk)) - -#define sk_X509_VERIFY_PARAM_set_cmp_func(sk, comp) \ - ((int (*)(const X509_VERIFY_PARAM **a, const X509_VERIFY_PARAM **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const X509_VERIFY_PARAM **a, \ - const X509_VERIFY_PARAM **b), \ - comp))) - -#define sk_X509_VERIFY_PARAM_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(X509_VERIFY_PARAM) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(X509_VERIFY_PARAM) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - X509_VERIFY_PARAM *(*)(X509_VERIFY_PARAM *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(X509_VERIFY_PARAM *), \ - free_func))) - -/* void */ -#define sk_void_new(comp) \ - ((STACK_OF(void) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const void **a, const void **b), comp))) - -#define sk_void_new_null() ((STACK_OF(void) *)sk_new_null()) - -#define sk_void_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(void) *, sk)) - -#define sk_void_zero(sk) sk_zero(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk)); - -#define sk_void_value(sk, i) \ - ((void *)sk_value(CHECKED_CAST(const _STACK *, const STACK_OF(void) *, sk), \ - (i))) - -#define sk_void_set(sk, i, p) \ - ((void *)sk_set(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), (i), \ - CHECKED_CAST(void *, void *, p))) - -#define sk_void_free(sk) sk_free(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk)) - -#define sk_void_pop_free(sk, free_func) \ - sk_pop_free(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(void *), free_func)) - -#define sk_void_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), \ - CHECKED_CAST(void *, void *, p), (where)) - -#define sk_void_delete(sk, where) \ - ((void *)sk_delete(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), (where))) - -#define sk_void_delete_ptr(sk, p) \ - ((void *)sk_delete_ptr(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), \ - CHECKED_CAST(void *, void *, p))) - -#define sk_void_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), (out_index), \ - CHECKED_CAST(void *, void *, p)) - -#define sk_void_shift(sk) \ - ((void *)sk_shift(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk))) - -#define sk_void_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), \ - CHECKED_CAST(void *, void *, p)) - -#define sk_void_pop(sk) \ - ((void *)sk_pop(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk))) - -#define sk_void_dup(sk) \ - ((STACK_OF(void) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(void) *, sk))) - -#define sk_void_sort(sk) sk_sort(CHECKED_CAST(_STACK *, STACK_OF(void) *, sk)) - -#define sk_void_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(void) *, sk)) - -#define sk_void_set_cmp_func(sk, comp) \ - ((int (*)(const void **a, const void **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(void) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const void **a, const void **b), \ - comp))) - -#define sk_void_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(void) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(void) *, sk), \ - CHECKED_CAST(void *(*)(void *), void *(*)(void *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(void *), free_func))) - -/* SRTP_PROTECTION_PROFILE */ -#define sk_SRTP_PROTECTION_PROFILE_new(comp) \ - ((STACK_OF(SRTP_PROTECTION_PROFILE) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, int (*)(const const SRTP_PROTECTION_PROFILE **a, \ - const const SRTP_PROTECTION_PROFILE **b), \ - comp))) - -#define sk_SRTP_PROTECTION_PROFILE_new_null() \ - ((STACK_OF(SRTP_PROTECTION_PROFILE) *)sk_new_null()) - -#define sk_SRTP_PROTECTION_PROFILE_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, \ - const STACK_OF(SRTP_PROTECTION_PROFILE) *, sk)) - -#define sk_SRTP_PROTECTION_PROFILE_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk)); - -#define sk_SRTP_PROTECTION_PROFILE_value(sk, i) \ - ((const SRTP_PROTECTION_PROFILE *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SRTP_PROTECTION_PROFILE) *, \ - sk), \ - (i))) - -#define sk_SRTP_PROTECTION_PROFILE_set(sk, i, p) \ - ((const SRTP_PROTECTION_PROFILE *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), (i), \ - CHECKED_CAST(void *, const SRTP_PROTECTION_PROFILE *, p))) - -#define sk_SRTP_PROTECTION_PROFILE_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk)) - -#define sk_SRTP_PROTECTION_PROFILE_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - CHECKED_CAST(void (*)(void *), \ - void (*)(const SRTP_PROTECTION_PROFILE *), free_func)) - -#define sk_SRTP_PROTECTION_PROFILE_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - CHECKED_CAST(void *, const SRTP_PROTECTION_PROFILE *, p), (where)) - -#define sk_SRTP_PROTECTION_PROFILE_delete(sk, where) \ - ((const SRTP_PROTECTION_PROFILE *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - (where))) - -#define sk_SRTP_PROTECTION_PROFILE_delete_ptr(sk, p) \ - ((const SRTP_PROTECTION_PROFILE *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - CHECKED_CAST(void *, const SRTP_PROTECTION_PROFILE *, p))) - -#define sk_SRTP_PROTECTION_PROFILE_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - (out_index), \ - CHECKED_CAST(void *, const SRTP_PROTECTION_PROFILE *, p)) - -#define sk_SRTP_PROTECTION_PROFILE_shift(sk) \ - ((const SRTP_PROTECTION_PROFILE *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk))) - -#define sk_SRTP_PROTECTION_PROFILE_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - CHECKED_CAST(void *, const SRTP_PROTECTION_PROFILE *, p)) - -#define sk_SRTP_PROTECTION_PROFILE_pop(sk) \ - ((const SRTP_PROTECTION_PROFILE *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk))) - -#define sk_SRTP_PROTECTION_PROFILE_dup(sk) \ - ((STACK_OF(SRTP_PROTECTION_PROFILE) *)sk_dup(CHECKED_CAST( \ - const _STACK *, const STACK_OF(SRTP_PROTECTION_PROFILE) *, sk))) - -#define sk_SRTP_PROTECTION_PROFILE_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk)) - -#define sk_SRTP_PROTECTION_PROFILE_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, \ - const STACK_OF(SRTP_PROTECTION_PROFILE) *, sk)) - -#define sk_SRTP_PROTECTION_PROFILE_set_cmp_func(sk, comp) \ - ((int (*)(const SRTP_PROTECTION_PROFILE **a, \ - const SRTP_PROTECTION_PROFILE **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(SRTP_PROTECTION_PROFILE) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const SRTP_PROTECTION_PROFILE **a, \ - const SRTP_PROTECTION_PROFILE **b), \ - comp))) - -#define sk_SRTP_PROTECTION_PROFILE_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(SRTP_PROTECTION_PROFILE) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SRTP_PROTECTION_PROFILE) *, \ - sk), \ - CHECKED_CAST( \ - void *(*)(void *), \ - const SRTP_PROTECTION_PROFILE *(*)(const SRTP_PROTECTION_PROFILE *), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), \ - void (*)(const SRTP_PROTECTION_PROFILE *), free_func))) - -/* SSL_CIPHER */ -#define sk_SSL_CIPHER_new(comp) \ - ((STACK_OF(SSL_CIPHER) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const const SSL_CIPHER **a, const const SSL_CIPHER **b), comp))) - -#define sk_SSL_CIPHER_new_null() ((STACK_OF(SSL_CIPHER) *)sk_new_null()) - -#define sk_SSL_CIPHER_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CIPHER) *, sk)) - -#define sk_SSL_CIPHER_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk)); - -#define sk_SSL_CIPHER_value(sk, i) \ - ((const SSL_CIPHER *)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CIPHER) *, sk), (i))) - -#define sk_SSL_CIPHER_set(sk, i, p) \ - ((const SSL_CIPHER *)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), (i), \ - CHECKED_CAST(void *, const SSL_CIPHER *, p))) - -#define sk_SSL_CIPHER_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk)) - -#define sk_SSL_CIPHER_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(const SSL_CIPHER *), free_func)) - -#define sk_SSL_CIPHER_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(void *, const SSL_CIPHER *, p), (where)) - -#define sk_SSL_CIPHER_delete(sk, where) \ - ((const SSL_CIPHER *)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), (where))) - -#define sk_SSL_CIPHER_delete_ptr(sk, p) \ - ((const SSL_CIPHER *)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(void *, const SSL_CIPHER *, p))) - -#define sk_SSL_CIPHER_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), (out_index), \ - CHECKED_CAST(void *, const SSL_CIPHER *, p)) - -#define sk_SSL_CIPHER_shift(sk) \ - ((const SSL_CIPHER *)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk))) - -#define sk_SSL_CIPHER_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(void *, const SSL_CIPHER *, p)) - -#define sk_SSL_CIPHER_pop(sk) \ - ((const SSL_CIPHER *)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk))) - -#define sk_SSL_CIPHER_dup(sk) \ - ((STACK_OF(SSL_CIPHER) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CIPHER) *, sk))) - -#define sk_SSL_CIPHER_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk)) - -#define sk_SSL_CIPHER_is_sorted(sk) \ - sk_is_sorted(CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CIPHER) *, sk)) - -#define sk_SSL_CIPHER_set_cmp_func(sk, comp) \ - ((int (*)(const SSL_CIPHER **a, const SSL_CIPHER **b))sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(stack_cmp_func, \ - int (*)(const SSL_CIPHER **a, const SSL_CIPHER **b), \ - comp))) - -#define sk_SSL_CIPHER_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(SSL_CIPHER) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(SSL_CIPHER) *, sk), \ - CHECKED_CAST(void *(*)(void *), \ - const SSL_CIPHER *(*)(const SSL_CIPHER *), copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(const SSL_CIPHER *), \ - free_func))) - -/* OPENSSL_STRING */ -#define sk_OPENSSL_STRING_new(comp) \ - ((STACK_OF(OPENSSL_STRING) *)sk_new(CHECKED_CAST( \ - stack_cmp_func, \ - int (*)(const OPENSSL_STRING *a, const OPENSSL_STRING *b), comp))) - -#define sk_OPENSSL_STRING_new_null() ((STACK_OF(OPENSSL_STRING) *)sk_new_null()) - -#define sk_OPENSSL_STRING_num(sk) \ - sk_num(CHECKED_CAST(const _STACK *, const STACK_OF(OPENSSL_STRING) *, sk)) - -#define sk_OPENSSL_STRING_zero(sk) \ - sk_zero(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk)); - -#define sk_OPENSSL_STRING_value(sk, i) \ - ((OPENSSL_STRING)sk_value( \ - CHECKED_CAST(const _STACK *, const STACK_OF(OPENSSL_STRING) *, sk), \ - (i))) - -#define sk_OPENSSL_STRING_set(sk, i, p) \ - ((OPENSSL_STRING)sk_set( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), (i), \ - CHECKED_CAST(void *, OPENSSL_STRING, p))) - -#define sk_OPENSSL_STRING_free(sk) \ - sk_free(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk)) - -#define sk_OPENSSL_STRING_pop_free(sk, free_func) \ - sk_pop_free( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(void (*)(void *), void (*)(OPENSSL_STRING), free_func)) - -#define sk_OPENSSL_STRING_insert(sk, p, where) \ - sk_insert(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(void *, OPENSSL_STRING, p), (where)) - -#define sk_OPENSSL_STRING_delete(sk, where) \ - ((OPENSSL_STRING)sk_delete( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), (where))) - -#define sk_OPENSSL_STRING_delete_ptr(sk, p) \ - ((OPENSSL_STRING)sk_delete_ptr( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(void *, OPENSSL_STRING, p))) - -#define sk_OPENSSL_STRING_find(sk, out_index, p) \ - sk_find(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), (out_index), \ - CHECKED_CAST(void *, OPENSSL_STRING, p)) - -#define sk_OPENSSL_STRING_shift(sk) \ - ((OPENSSL_STRING)sk_shift( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk))) - -#define sk_OPENSSL_STRING_push(sk, p) \ - sk_push(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(void *, OPENSSL_STRING, p)) - -#define sk_OPENSSL_STRING_pop(sk) \ - ((OPENSSL_STRING)sk_pop( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk))) - -#define sk_OPENSSL_STRING_dup(sk) \ - ((STACK_OF(OPENSSL_STRING) *)sk_dup( \ - CHECKED_CAST(const _STACK *, const STACK_OF(OPENSSL_STRING) *, sk))) - -#define sk_OPENSSL_STRING_sort(sk) \ - sk_sort(CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk)) - -#define sk_OPENSSL_STRING_is_sorted(sk) \ - sk_is_sorted( \ - CHECKED_CAST(const _STACK *, const STACK_OF(OPENSSL_STRING) *, sk)) - -#define sk_OPENSSL_STRING_set_cmp_func(sk, comp) \ - ((int (*)(const OPENSSL_STRING **a, const OPENSSL_STRING **b)) \ - sk_set_cmp_func( \ - CHECKED_CAST(_STACK *, STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(stack_cmp_func, int (*)(const OPENSSL_STRING **a, \ - const OPENSSL_STRING **b), \ - comp))) - -#define sk_OPENSSL_STRING_deep_copy(sk, copy_func, free_func) \ - ((STACK_OF(OPENSSL_STRING) *)sk_deep_copy( \ - CHECKED_CAST(const _STACK *, const STACK_OF(OPENSSL_STRING) *, sk), \ - CHECKED_CAST(void *(*)(void *), OPENSSL_STRING (*)(OPENSSL_STRING), \ - copy_func), \ - CHECKED_CAST(void (*)(void *), void (*)(OPENSSL_STRING), free_func))) diff --git a/Sources/BoringSSL/include/openssl/thread.h b/Sources/BoringSSL/include/openssl/thread.h index 815148477..98073b078 100644 --- a/Sources/BoringSSL/include/openssl/thread.h +++ b/Sources/BoringSSL/include/openssl/thread.h @@ -68,88 +68,88 @@ extern "C" { #if defined(OPENSSL_NO_THREADS) typedef struct crypto_mutex_st { - char padding; /* Empty structs have different sizes in C and C++. */ + char padding; // Empty structs have different sizes in C and C++. } CRYPTO_MUTEX; #elif defined(OPENSSL_WINDOWS) -/* CRYPTO_MUTEX can appear in public header files so we really don't want to - * pull in windows.h. It's statically asserted that this structure is large - * enough to contain a Windows SRWLOCK by thread_win.c. */ +// CRYPTO_MUTEX can appear in public header files so we really don't want to +// pull in windows.h. It's statically asserted that this structure is large +// enough to contain a Windows SRWLOCK by thread_win.c. typedef union crypto_mutex_st { void *handle; } CRYPTO_MUTEX; #elif defined(__MACH__) && defined(__APPLE__) typedef pthread_rwlock_t CRYPTO_MUTEX; #else -/* It is reasonable to include pthread.h on non-Windows systems, however the - * |pthread_rwlock_t| that we need is hidden under feature flags, and we can't - * ensure that we'll be able to get it. It's statically asserted that this - * structure is large enough to contain a |pthread_rwlock_t| by - * thread_pthread.c. */ +// It is reasonable to include pthread.h on non-Windows systems, however the +// |pthread_rwlock_t| that we need is hidden under feature flags, and we can't +// ensure that we'll be able to get it. It's statically asserted that this +// structure is large enough to contain a |pthread_rwlock_t| by +// thread_pthread.c. typedef union crypto_mutex_st { double alignment; uint8_t padding[3*sizeof(int) + 5*sizeof(unsigned) + 16 + 8]; } CRYPTO_MUTEX; #endif -/* CRYPTO_refcount_t is the type of a reference count. - * - * Since some platforms use C11 atomics to access this, it should have the - * _Atomic qualifier. However, this header is included by C++ programs as well - * as C code that might not set -std=c11. So, in practice, it's not possible to - * do that. Instead we statically assert that the size and native alignment of - * a plain uint32_t and an _Atomic uint32_t are equal in refcount_c11.c. */ +// CRYPTO_refcount_t is the type of a reference count. +// +// Since some platforms use C11 atomics to access this, it should have the +// _Atomic qualifier. However, this header is included by C++ programs as well +// as C code that might not set -std=c11. So, in practice, it's not possible to +// do that. Instead we statically assert that the size and native alignment of +// a plain uint32_t and an _Atomic uint32_t are equal in refcount_c11.c. typedef uint32_t CRYPTO_refcount_t; -/* Deprecated functions. - * - * Historically, OpenSSL required callers to provide locking callbacks. - * BoringSSL is thread-safe by default, but some old code calls these functions - * and so no-op implementations are provided. */ +// Deprecated functions. +// +// Historically, OpenSSL required callers to provide locking callbacks. +// BoringSSL is thread-safe by default, but some old code calls these functions +// and so no-op implementations are provided. -/* These defines do nothing but are provided to make old code easier to - * compile. */ +// These defines do nothing but are provided to make old code easier to +// compile. #define CRYPTO_LOCK 1 #define CRYPTO_UNLOCK 2 #define CRYPTO_READ 4 #define CRYPTO_WRITE 8 -/* CRYPTO_num_locks returns one. (This is non-zero that callers who allocate - * sizeof(lock) times this value don't get zero and then fail because malloc(0) - * returned NULL.) */ +// CRYPTO_num_locks returns one. (This is non-zero that callers who allocate +// sizeof(lock) times this value don't get zero and then fail because malloc(0) +// returned NULL.) OPENSSL_EXPORT int CRYPTO_num_locks(void); -/* CRYPTO_set_locking_callback does nothing. */ +// CRYPTO_set_locking_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_locking_callback( void (*func)(int mode, int lock_num, const char *file, int line)); -/* CRYPTO_set_add_lock_callback does nothing. */ +// CRYPTO_set_add_lock_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_add_lock_callback(int (*func)( int *num, int amount, int lock_num, const char *file, int line)); -/* CRYPTO_get_locking_callback returns NULL. */ +// CRYPTO_get_locking_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_locking_callback(void))(int mode, int lock_num, const char *file, int line); -/* CRYPTO_get_lock_name returns a fixed, dummy string. */ +// CRYPTO_get_lock_name returns a fixed, dummy string. OPENSSL_EXPORT const char *CRYPTO_get_lock_name(int lock_num); -/* CRYPTO_THREADID_set_callback returns one. */ +// CRYPTO_THREADID_set_callback returns one. OPENSSL_EXPORT int CRYPTO_THREADID_set_callback( void (*threadid_func)(CRYPTO_THREADID *threadid)); -/* CRYPTO_THREADID_set_numeric does nothing. */ +// CRYPTO_THREADID_set_numeric does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_set_numeric(CRYPTO_THREADID *id, unsigned long val); -/* CRYPTO_THREADID_set_pointer does nothing. */ +// CRYPTO_THREADID_set_pointer does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_set_pointer(CRYPTO_THREADID *id, void *ptr); -/* CRYPTO_THREADID_current does nothing. */ +// CRYPTO_THREADID_current does nothing. OPENSSL_EXPORT void CRYPTO_THREADID_current(CRYPTO_THREADID *id); -/* CRYPTO_set_id_callback does nothing. */ +// CRYPTO_set_id_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_id_callback(unsigned long (*func)(void)); typedef struct { @@ -157,35 +157,35 @@ typedef struct { struct CRYPTO_dynlock_value *data; } CRYPTO_dynlock; -/* CRYPTO_set_dynlock_create_callback does nothing. */ +// CRYPTO_set_dynlock_create_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_create_callback( struct CRYPTO_dynlock_value *(*dyn_create_function)(const char *file, int line)); -/* CRYPTO_set_dynlock_lock_callback does nothing. */ +// CRYPTO_set_dynlock_lock_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_lock_callback(void (*dyn_lock_function)( int mode, struct CRYPTO_dynlock_value *l, const char *file, int line)); -/* CRYPTO_set_dynlock_destroy_callback does nothing. */ +// CRYPTO_set_dynlock_destroy_callback does nothing. OPENSSL_EXPORT void CRYPTO_set_dynlock_destroy_callback( void (*dyn_destroy_function)(struct CRYPTO_dynlock_value *l, const char *file, int line)); -/* CRYPTO_get_dynlock_create_callback returns NULL. */ +// CRYPTO_get_dynlock_create_callback returns NULL. OPENSSL_EXPORT struct CRYPTO_dynlock_value *( *CRYPTO_get_dynlock_create_callback(void))(const char *file, int line); -/* CRYPTO_get_dynlock_lock_callback returns NULL. */ +// CRYPTO_get_dynlock_lock_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_dynlock_lock_callback(void))( int mode, struct CRYPTO_dynlock_value *l, const char *file, int line); -/* CRYPTO_get_dynlock_destroy_callback returns NULL. */ +// CRYPTO_get_dynlock_destroy_callback returns NULL. OPENSSL_EXPORT void (*CRYPTO_get_dynlock_destroy_callback(void))( struct CRYPTO_dynlock_value *l, const char *file, int line); #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_THREAD_H */ +#endif // OPENSSL_HEADER_THREAD_H diff --git a/Sources/BoringSSL/include/openssl/tls1.h b/Sources/BoringSSL/include/openssl/tls1.h index 260197420..f62ee1fb5 100644 --- a/Sources/BoringSSL/include/openssl/tls1.h +++ b/Sources/BoringSSL/include/openssl/tls1.h @@ -171,7 +171,7 @@ extern "C" { #define TLS1_AD_USER_CANCELLED 90 #define TLS1_AD_NO_RENEGOTIATION 100 #define TLS1_AD_MISSING_EXTENSION 109 -/* codes 110-114 are from RFC3546 */ +// codes 110-114 are from RFC3546 #define TLS1_AD_UNSUPPORTED_EXTENSION 110 #define TLS1_AD_CERTIFICATE_UNOBTAINABLE 111 #define TLS1_AD_UNRECOGNIZED_NAME 112 @@ -180,32 +180,32 @@ extern "C" { #define TLS1_AD_UNKNOWN_PSK_IDENTITY 115 #define TLS1_AD_CERTIFICATE_REQUIRED 116 -/* ExtensionType values from RFC6066 */ +// ExtensionType values from RFC6066 #define TLSEXT_TYPE_server_name 0 #define TLSEXT_TYPE_status_request 5 -/* ExtensionType values from RFC4492 */ +// ExtensionType values from RFC4492 #define TLSEXT_TYPE_ec_point_formats 11 -/* ExtensionType values from RFC5246 */ +// ExtensionType values from RFC5246 #define TLSEXT_TYPE_signature_algorithms 13 -/* ExtensionType value from RFC5764 */ +// ExtensionType value from RFC5764 #define TLSEXT_TYPE_srtp 14 -/* ExtensionType value from RFC7301 */ +// ExtensionType value from RFC7301 #define TLSEXT_TYPE_application_layer_protocol_negotiation 16 -/* ExtensionType value from RFC7685 */ +// ExtensionType value from RFC7685 #define TLSEXT_TYPE_padding 21 -/* ExtensionType value from RFC7627 */ +// ExtensionType value from RFC7627 #define TLSEXT_TYPE_extended_master_secret 23 -/* ExtensionType value from RFC4507 */ +// ExtensionType value from RFC4507 #define TLSEXT_TYPE_session_ticket 35 -/* ExtensionType values from draft-ietf-tls-tls13-18 */ +// ExtensionType values from draft-ietf-tls-tls13-18 #define TLSEXT_TYPE_supported_groups 10 #define TLSEXT_TYPE_key_share 40 #define TLSEXT_TYPE_pre_shared_key 41 @@ -214,30 +214,28 @@ extern "C" { #define TLSEXT_TYPE_cookie 44 #define TLSEXT_TYPE_psk_key_exchange_modes 45 #define TLSEXT_TYPE_ticket_early_data_info 46 +#define TLSEXT_TYPE_certificate_authorities 47 -/* ExtensionType value from RFC5746 */ +// ExtensionType value from RFC5746 #define TLSEXT_TYPE_renegotiate 0xff01 -/* ExtensionType value from RFC6962 */ +// ExtensionType value from RFC6962 #define TLSEXT_TYPE_certificate_timestamp 18 -/* This is not an IANA defined extension number */ +// This is not an IANA defined extension number #define TLSEXT_TYPE_next_proto_neg 13172 -/* This is not an IANA defined extension number */ +// This is not an IANA defined extension number #define TLSEXT_TYPE_channel_id 30032 -/* This is not an IANA defined extension number */ -#define TLSEXT_TYPE_short_header 27463 - -/* status request value from RFC 3546 */ +// status request value from RFC 3546 #define TLSEXT_STATUSTYPE_ocsp 1 -/* ECPointFormat values from RFC 4492 */ +// ECPointFormat values from RFC 4492 #define TLSEXT_ECPOINTFORMAT_uncompressed 0 #define TLSEXT_ECPOINTFORMAT_ansiX962_compressed_prime 1 -/* Signature and hash algorithms from RFC 5246 */ +// Signature and hash algorithms from RFC 5246 #define TLSEXT_signature_anonymous 0 #define TLSEXT_signature_rsa 1 @@ -254,30 +252,30 @@ extern "C" { #define TLSEXT_MAXLEN_host_name 255 -/* PSK ciphersuites from 4279 */ +// PSK ciphersuites from 4279 #define TLS1_CK_PSK_WITH_RC4_128_SHA 0x0300008A #define TLS1_CK_PSK_WITH_3DES_EDE_CBC_SHA 0x0300008B #define TLS1_CK_PSK_WITH_AES_128_CBC_SHA 0x0300008C #define TLS1_CK_PSK_WITH_AES_256_CBC_SHA 0x0300008D -/* PSK ciphersuites from RFC 5489 */ +// PSK ciphersuites from RFC 5489 #define TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA 0x0300C035 #define TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA 0x0300C036 -/* Additional TLS ciphersuites from expired Internet Draft - * draft-ietf-tls-56-bit-ciphersuites-01.txt - * (available if TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES is defined, see - * s3_lib.c). We actually treat them like SSL 3.0 ciphers, which we probably - * shouldn't. Note that the first two are actually not in the IDs. */ -#define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_MD5 0x03000060 /* not in ID */ -#define TLS1_CK_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 0x03000061 /* not in ID */ +// Additional TLS ciphersuites from expired Internet Draft +// draft-ietf-tls-56-bit-ciphersuites-01.txt +// (available if TLS1_ALLOW_EXPERIMENTAL_CIPHERSUITES is defined, see +// s3_lib.c). We actually treat them like SSL 3.0 ciphers, which we probably +// shouldn't. Note that the first two are actually not in the IDs. +#define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_MD5 0x03000060 // not in ID +#define TLS1_CK_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 0x03000061 // not in ID #define TLS1_CK_RSA_EXPORT1024_WITH_DES_CBC_SHA 0x03000062 #define TLS1_CK_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA 0x03000063 #define TLS1_CK_RSA_EXPORT1024_WITH_RC4_56_SHA 0x03000064 #define TLS1_CK_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA 0x03000065 #define TLS1_CK_DHE_DSS_WITH_RC4_128_SHA 0x03000066 -/* AES ciphersuites from RFC3268 */ +// AES ciphersuites from RFC3268 #define TLS1_CK_RSA_WITH_AES_128_SHA 0x0300002F #define TLS1_CK_DH_DSS_WITH_AES_128_SHA 0x03000030 @@ -293,7 +291,7 @@ extern "C" { #define TLS1_CK_DHE_RSA_WITH_AES_256_SHA 0x03000039 #define TLS1_CK_ADH_WITH_AES_256_SHA 0x0300003A -/* TLS v1.2 ciphersuites */ +// TLS v1.2 ciphersuites #define TLS1_CK_RSA_WITH_NULL_SHA256 0x0300003B #define TLS1_CK_RSA_WITH_AES_128_SHA256 0x0300003C #define TLS1_CK_RSA_WITH_AES_256_SHA256 0x0300003D @@ -301,7 +299,7 @@ extern "C" { #define TLS1_CK_DH_RSA_WITH_AES_128_SHA256 0x0300003F #define TLS1_CK_DHE_DSS_WITH_AES_128_SHA256 0x03000040 -/* Camellia ciphersuites from RFC4132 */ +// Camellia ciphersuites from RFC4132 #define TLS1_CK_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000041 #define TLS1_CK_DH_DSS_WITH_CAMELLIA_128_CBC_SHA 0x03000042 #define TLS1_CK_DH_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000043 @@ -309,7 +307,7 @@ extern "C" { #define TLS1_CK_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA 0x03000045 #define TLS1_CK_ADH_WITH_CAMELLIA_128_CBC_SHA 0x03000046 -/* TLS v1.2 ciphersuites */ +// TLS v1.2 ciphersuites #define TLS1_CK_DHE_RSA_WITH_AES_128_SHA256 0x03000067 #define TLS1_CK_DH_DSS_WITH_AES_256_SHA256 0x03000068 #define TLS1_CK_DH_RSA_WITH_AES_256_SHA256 0x03000069 @@ -318,7 +316,7 @@ extern "C" { #define TLS1_CK_ADH_WITH_AES_128_SHA256 0x0300006C #define TLS1_CK_ADH_WITH_AES_256_SHA256 0x0300006D -/* Camellia ciphersuites from RFC4132 */ +// Camellia ciphersuites from RFC4132 #define TLS1_CK_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000084 #define TLS1_CK_DH_DSS_WITH_CAMELLIA_256_CBC_SHA 0x03000085 #define TLS1_CK_DH_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000086 @@ -326,7 +324,7 @@ extern "C" { #define TLS1_CK_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA 0x03000088 #define TLS1_CK_ADH_WITH_CAMELLIA_256_CBC_SHA 0x03000089 -/* SEED ciphersuites from RFC4162 */ +// SEED ciphersuites from RFC4162 #define TLS1_CK_RSA_WITH_SEED_SHA 0x03000096 #define TLS1_CK_DH_DSS_WITH_SEED_SHA 0x03000097 #define TLS1_CK_DH_RSA_WITH_SEED_SHA 0x03000098 @@ -334,7 +332,7 @@ extern "C" { #define TLS1_CK_DHE_RSA_WITH_SEED_SHA 0x0300009A #define TLS1_CK_ADH_WITH_SEED_SHA 0x0300009B -/* TLS v1.2 GCM ciphersuites from RFC5288 */ +// TLS v1.2 GCM ciphersuites from RFC5288 #define TLS1_CK_RSA_WITH_AES_128_GCM_SHA256 0x0300009C #define TLS1_CK_RSA_WITH_AES_256_GCM_SHA384 0x0300009D #define TLS1_CK_DHE_RSA_WITH_AES_128_GCM_SHA256 0x0300009E @@ -348,7 +346,7 @@ extern "C" { #define TLS1_CK_ADH_WITH_AES_128_GCM_SHA256 0x030000A6 #define TLS1_CK_ADH_WITH_AES_256_GCM_SHA384 0x030000A7 -/* ECC ciphersuites from RFC4492 */ +// ECC ciphersuites from RFC4492 #define TLS1_CK_ECDH_ECDSA_WITH_NULL_SHA 0x0300C001 #define TLS1_CK_ECDH_ECDSA_WITH_RC4_128_SHA 0x0300C002 #define TLS1_CK_ECDH_ECDSA_WITH_DES_192_CBC3_SHA 0x0300C003 @@ -379,7 +377,7 @@ extern "C" { #define TLS1_CK_ECDH_anon_WITH_AES_128_CBC_SHA 0x0300C018 #define TLS1_CK_ECDH_anon_WITH_AES_256_CBC_SHA 0x0300C019 -/* SRP ciphersuites from RFC 5054 */ +// SRP ciphersuites from RFC 5054 #define TLS1_CK_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0x0300C01A #define TLS1_CK_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0x0300C01B #define TLS1_CK_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0x0300C01C @@ -390,7 +388,7 @@ extern "C" { #define TLS1_CK_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0x0300C021 #define TLS1_CK_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0x0300C022 -/* ECDH HMAC based ciphersuites from RFC5289 */ +// ECDH HMAC based ciphersuites from RFC5289 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256 0x0300C023 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384 0x0300C024 @@ -401,7 +399,7 @@ extern "C" { #define TLS1_CK_ECDH_RSA_WITH_AES_128_SHA256 0x0300C029 #define TLS1_CK_ECDH_RSA_WITH_AES_256_SHA384 0x0300C02A -/* ECDH GCM based ciphersuites from RFC5289 */ +// ECDH GCM based ciphersuites from RFC5289 #define TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02B #define TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0x0300C02C #define TLS1_CK_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0x0300C02D @@ -411,23 +409,23 @@ extern "C" { #define TLS1_CK_ECDH_RSA_WITH_AES_128_GCM_SHA256 0x0300C031 #define TLS1_CK_ECDH_RSA_WITH_AES_256_GCM_SHA384 0x0300C032 -/* ChaCha20-Poly1305 cipher suites from RFC 7905. */ +// ChaCha20-Poly1305 cipher suites from RFC 7905. #define TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 0x0300CCA8 #define TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 0x0300CCA9 #define TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 0x0300CCAC -/* TLS 1.3 ciphersuites from draft-ietf-tls-tls13-16 */ +// TLS 1.3 ciphersuites from draft-ietf-tls-tls13-16 #define TLS1_CK_AES_128_GCM_SHA256 0x03001301 #define TLS1_CK_AES_256_GCM_SHA384 0x03001302 #define TLS1_CK_CHACHA20_POLY1305_SHA256 0x03001303 -/* XXX - * Inconsistency alert: - * The OpenSSL names of ciphers with ephemeral DH here include the string - * "DHE", while elsewhere it has always been "EDH". - * (The alias for the list of all such ciphers also is "EDH".) - * The specifications speak of "EDH"; maybe we should allow both forms - * for everything. */ +// XXX +// Inconsistency alert: +// The OpenSSL names of ciphers with ephemeral DH here include the string +// "DHE", while elsewhere it has always been "EDH". +// (The alias for the list of all such ciphers also is "EDH".) +// The specifications speak of "EDH"; maybe we should allow both forms +// for everything. #define TLS1_TXT_RSA_EXPORT1024_WITH_RC4_56_MD5 "EXP1024-RC4-MD5" #define TLS1_TXT_RSA_EXPORT1024_WITH_RC2_CBC_56_MD5 "EXP1024-RC2-CBC-MD5" #define TLS1_TXT_RSA_EXPORT1024_WITH_DES_CBC_SHA "EXP1024-DES-CBC-SHA" @@ -437,7 +435,7 @@ extern "C" { #define TLS1_TXT_DHE_DSS_EXPORT1024_WITH_RC4_56_SHA "EXP1024-DHE-DSS-RC4-SHA" #define TLS1_TXT_DHE_DSS_WITH_RC4_128_SHA "DHE-DSS-RC4-SHA" -/* AES ciphersuites from RFC3268 */ +// AES ciphersuites from RFC3268 #define TLS1_TXT_RSA_WITH_AES_128_SHA "AES128-SHA" #define TLS1_TXT_DH_DSS_WITH_AES_128_SHA "DH-DSS-AES128-SHA" #define TLS1_TXT_DH_RSA_WITH_AES_128_SHA "DH-RSA-AES128-SHA" @@ -452,7 +450,7 @@ extern "C" { #define TLS1_TXT_DHE_RSA_WITH_AES_256_SHA "DHE-RSA-AES256-SHA" #define TLS1_TXT_ADH_WITH_AES_256_SHA "ADH-AES256-SHA" -/* ECC ciphersuites from RFC4492 */ +// ECC ciphersuites from RFC4492 #define TLS1_TXT_ECDH_ECDSA_WITH_NULL_SHA "ECDH-ECDSA-NULL-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_RC4_128_SHA "ECDH-ECDSA-RC4-SHA" #define TLS1_TXT_ECDH_ECDSA_WITH_DES_192_CBC3_SHA "ECDH-ECDSA-DES-CBC3-SHA" @@ -483,17 +481,17 @@ extern "C" { #define TLS1_TXT_ECDH_anon_WITH_AES_128_CBC_SHA "AECDH-AES128-SHA" #define TLS1_TXT_ECDH_anon_WITH_AES_256_CBC_SHA "AECDH-AES256-SHA" -/* PSK ciphersuites from RFC 4279 */ +// PSK ciphersuites from RFC 4279 #define TLS1_TXT_PSK_WITH_RC4_128_SHA "PSK-RC4-SHA" #define TLS1_TXT_PSK_WITH_3DES_EDE_CBC_SHA "PSK-3DES-EDE-CBC-SHA" #define TLS1_TXT_PSK_WITH_AES_128_CBC_SHA "PSK-AES128-CBC-SHA" #define TLS1_TXT_PSK_WITH_AES_256_CBC_SHA "PSK-AES256-CBC-SHA" -/* PSK ciphersuites from RFC 5489 */ +// PSK ciphersuites from RFC 5489 #define TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA "ECDHE-PSK-AES128-CBC-SHA" #define TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA "ECDHE-PSK-AES256-CBC-SHA" -/* SRP ciphersuite from RFC 5054 */ +// SRP ciphersuite from RFC 5054 #define TLS1_TXT_SRP_SHA_WITH_3DES_EDE_CBC_SHA "SRP-3DES-EDE-CBC-SHA" #define TLS1_TXT_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA "SRP-RSA-3DES-EDE-CBC-SHA" #define TLS1_TXT_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA "SRP-DSS-3DES-EDE-CBC-SHA" @@ -504,7 +502,7 @@ extern "C" { #define TLS1_TXT_SRP_SHA_RSA_WITH_AES_256_CBC_SHA "SRP-RSA-AES-256-CBC-SHA" #define TLS1_TXT_SRP_SHA_DSS_WITH_AES_256_CBC_SHA "SRP-DSS-AES-256-CBC-SHA" -/* Camellia ciphersuites from RFC4132 */ +// Camellia ciphersuites from RFC4132 #define TLS1_TXT_RSA_WITH_CAMELLIA_128_CBC_SHA "CAMELLIA128-SHA" #define TLS1_TXT_DH_DSS_WITH_CAMELLIA_128_CBC_SHA "DH-DSS-CAMELLIA128-SHA" #define TLS1_TXT_DH_RSA_WITH_CAMELLIA_128_CBC_SHA "DH-RSA-CAMELLIA128-SHA" @@ -519,7 +517,7 @@ extern "C" { #define TLS1_TXT_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA "DHE-RSA-CAMELLIA256-SHA" #define TLS1_TXT_ADH_WITH_CAMELLIA_256_CBC_SHA "ADH-CAMELLIA256-SHA" -/* SEED ciphersuites from RFC4162 */ +// SEED ciphersuites from RFC4162 #define TLS1_TXT_RSA_WITH_SEED_SHA "SEED-SHA" #define TLS1_TXT_DH_DSS_WITH_SEED_SHA "DH-DSS-SEED-SHA" #define TLS1_TXT_DH_RSA_WITH_SEED_SHA "DH-RSA-SEED-SHA" @@ -527,7 +525,7 @@ extern "C" { #define TLS1_TXT_DHE_RSA_WITH_SEED_SHA "DHE-RSA-SEED-SHA" #define TLS1_TXT_ADH_WITH_SEED_SHA "ADH-SEED-SHA" -/* TLS v1.2 ciphersuites */ +// TLS v1.2 ciphersuites #define TLS1_TXT_RSA_WITH_NULL_SHA256 "NULL-SHA256" #define TLS1_TXT_RSA_WITH_AES_128_SHA256 "AES128-SHA256" #define TLS1_TXT_RSA_WITH_AES_256_SHA256 "AES256-SHA256" @@ -542,7 +540,7 @@ extern "C" { #define TLS1_TXT_ADH_WITH_AES_128_SHA256 "ADH-AES128-SHA256" #define TLS1_TXT_ADH_WITH_AES_256_SHA256 "ADH-AES256-SHA256" -/* TLS v1.2 GCM ciphersuites from RFC5288 */ +// TLS v1.2 GCM ciphersuites from RFC5288 #define TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256 "AES128-GCM-SHA256" #define TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384 "AES256-GCM-SHA384" #define TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256 "DHE-RSA-AES128-GCM-SHA256" @@ -556,7 +554,7 @@ extern "C" { #define TLS1_TXT_ADH_WITH_AES_128_GCM_SHA256 "ADH-AES128-GCM-SHA256" #define TLS1_TXT_ADH_WITH_AES_256_GCM_SHA384 "ADH-AES256-GCM-SHA384" -/* ECDH HMAC based ciphersuites from RFC5289 */ +// ECDH HMAC based ciphersuites from RFC5289 #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_SHA256 "ECDHE-ECDSA-AES128-SHA256" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_SHA384 "ECDHE-ECDSA-AES256-SHA384" @@ -567,7 +565,7 @@ extern "C" { #define TLS1_TXT_ECDH_RSA_WITH_AES_128_SHA256 "ECDH-RSA-AES128-SHA256" #define TLS1_TXT_ECDH_RSA_WITH_AES_256_SHA384 "ECDH-RSA-AES256-SHA384" -/* ECDH GCM based ciphersuites from RFC5289 */ +// ECDH GCM based ciphersuites from RFC5289 #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 \ "ECDHE-ECDSA-AES128-GCM-SHA256" #define TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 \ @@ -588,7 +586,7 @@ extern "C" { #define TLS1_TXT_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 \ "ECDHE-PSK-CHACHA20-POLY1305" -/* TLS 1.3 ciphersuites from draft-ietf-tls-tls13-16 */ +// TLS 1.3 ciphersuites from draft-ietf-tls-tls13-16 #define TLS1_TXT_AES_128_GCM_SHA256 "AEAD-AES128-GCM-SHA256" #define TLS1_TXT_AES_256_GCM_SHA384 "AEAD-AES256-GCM-SHA384" #define TLS1_TXT_CHACHA20_POLY1305_SHA256 "AEAD-CHACHA20-POLY1305-SHA256" @@ -603,26 +601,10 @@ extern "C" { #define TLS_CT_ECDSA_FIXED_ECDH 66 #define TLS_MD_MAX_CONST_SIZE 20 -#define TLS_MD_CLIENT_FINISH_CONST "client finished" -#define TLS_MD_CLIENT_FINISH_CONST_SIZE 15 -#define TLS_MD_SERVER_FINISH_CONST "server finished" -#define TLS_MD_SERVER_FINISH_CONST_SIZE 15 -#define TLS_MD_KEY_EXPANSION_CONST "key expansion" -#define TLS_MD_KEY_EXPANSION_CONST_SIZE 13 -#define TLS_MD_CLIENT_WRITE_KEY_CONST "client write key" -#define TLS_MD_CLIENT_WRITE_KEY_CONST_SIZE 16 -#define TLS_MD_SERVER_WRITE_KEY_CONST "server write key" -#define TLS_MD_SERVER_WRITE_KEY_CONST_SIZE 16 -#define TLS_MD_IV_BLOCK_CONST "IV block" -#define TLS_MD_IV_BLOCK_CONST_SIZE 8 -#define TLS_MD_MASTER_SECRET_CONST "master secret" -#define TLS_MD_MASTER_SECRET_CONST_SIZE 13 -#define TLS_MD_EXTENDED_MASTER_SECRET_CONST "extended master secret" -#define TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE 22 #ifdef __cplusplus -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_TLS1_H */ +#endif // OPENSSL_HEADER_TLS1_H diff --git a/Sources/BoringSSL/include/openssl/type_check.h b/Sources/BoringSSL/include/openssl/type_check.h index 7e70918b3..da78d70c1 100644 --- a/Sources/BoringSSL/include/openssl/type_check.h +++ b/Sources/BoringSSL/include/openssl/type_check.h @@ -64,32 +64,28 @@ extern "C" { #endif -/* This header file contains some common macros for enforcing type checking. - * Several, common OpenSSL structures (i.e. stack and lhash) operate on void - * pointers, but we wish to have type checking when they are used with a - * specific type. */ +// This header file contains some common macros for enforcing type checking. +// Several, common OpenSSL structures (i.e. stack and lhash) operate on void +// pointers, but we wish to have type checking when they are used with a +// specific type. -/* CHECKED_CAST casts |p| from type |from| to type |to|. */ +// CHECKED_CAST casts |p| from type |from| to type |to|. #define CHECKED_CAST(to, from, p) ((to) (1 ? (p) : (from)0)) -/* CHECKED_PTR_OF casts a given pointer to void* and statically checks that it - * was a pointer to |type|. */ +// CHECKED_PTR_OF casts a given pointer to void* and statically checks that it +// was a pointer to |type|. #define CHECKED_PTR_OF(type, p) CHECKED_CAST(void*, type*, (p)) #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L #define OPENSSL_COMPILE_ASSERT(cond, msg) _Static_assert(cond, #msg) -#elif defined(__GNUC__) -#define OPENSSL_COMPILE_ASSERT(cond, msg) \ - typedef char OPENSSL_COMPILE_ASSERT_##msg[((cond) ? 1 : -1)] \ - __attribute__((unused)) #else #define OPENSSL_COMPILE_ASSERT(cond, msg) \ - typedef char OPENSSL_COMPILE_ASSERT_##msg[((cond) ? 1 : -1)] + typedef char OPENSSL_COMPILE_ASSERT_##msg[((cond) ? 1 : -1)] OPENSSL_UNUSED #endif #if defined(__cplusplus) -} /* extern C */ +} // extern C #endif -#endif /* OPENSSL_HEADER_TYPE_CHECK_H */ +#endif // OPENSSL_HEADER_TYPE_CHECK_H diff --git a/Sources/BoringSSL/include/openssl/x509.h b/Sources/BoringSSL/include/openssl/x509.h index 88455ddb4..430ffc076 100644 --- a/Sources/BoringSSL/include/openssl/x509.h +++ b/Sources/BoringSSL/include/openssl/x509.h @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -88,6 +89,14 @@ extern "C" { #endif +/* Legacy X.509 library. + * + * This header is part of OpenSSL's X.509 implementation. It is retained for + * compatibility but otherwise underdocumented and not actively maintained. In + * the future, a replacement library will be available. Meanwhile, minimize + * dependencies on this header where possible. */ + + #define X509_FILETYPE_PEM 1 #define X509_FILETYPE_ASN1 2 #define X509_FILETYPE_DEFAULT 3 @@ -103,13 +112,7 @@ extern "C" { #define X509v3_KU_DECIPHER_ONLY 0x8000 #define X509v3_KU_UNDEF 0xffff -struct X509_objects_st - { - int nid; - int (*a2i)(void); - int (*i2a)(void); - } /* X509_OBJECTS */; - +DEFINE_STACK_OF(X509_ALGOR) DECLARE_ASN1_SET_OF(X509_ALGOR) typedef STACK_OF(X509_ALGOR) X509_ALGORS; @@ -141,7 +144,7 @@ struct X509_name_entry_st int size; /* temp variable */ } /* X509_NAME_ENTRY */; -DECLARE_STACK_OF(X509_NAME_ENTRY) +DEFINE_STACK_OF(X509_NAME_ENTRY) DECLARE_ASN1_SET_OF(X509_NAME_ENTRY) /* we always keep X509_NAMEs in 2 forms. */ @@ -149,20 +152,14 @@ struct X509_name_st { STACK_OF(X509_NAME_ENTRY) *entries; int modified; /* true if 'bytes' needs to be built */ -#ifndef OPENSSL_NO_BUFFER BUF_MEM *bytes; -#else - char *bytes; -#endif /* unsigned long hash; Keep the hash around for lookups */ unsigned char *canon_enc; int canon_enclen; } /* X509_NAME */; -DECLARE_STACK_OF(X509_NAME) +DEFINE_STACK_OF(X509_NAME) -#define X509_EX_V_NETSCAPE_HACK 0x8000 -#define X509_EX_V_INIT 0x0001 struct X509_extension_st { ASN1_OBJECT *object; @@ -172,7 +169,7 @@ struct X509_extension_st typedef STACK_OF(X509_EXTENSION) X509_EXTENSIONS; -DECLARE_STACK_OF(X509_EXTENSION) +DEFINE_STACK_OF(X509_EXTENSION) DECLARE_ASN1_SET_OF(X509_EXTENSION) /* a sequence of these are used */ @@ -187,7 +184,7 @@ struct x509_attributes_st } value; } /* X509_ATTRIBUTE */; -DECLARE_STACK_OF(X509_ATTRIBUTE) +DEFINE_STACK_OF(X509_ATTRIBUTE) DECLARE_ASN1_SET_OF(X509_ATTRIBUTE) @@ -239,6 +236,9 @@ struct x509_cert_aux_st STACK_OF(X509_ALGOR) *other; /* other unspecified info */ } /* X509_CERT_AUX */; +DECLARE_STACK_OF(DIST_POINT) +DECLARE_STACK_OF(GENERAL_NAME) + struct x509_st { X509_CINF *cert_info; @@ -266,7 +266,7 @@ struct x509_st CRYPTO_MUTEX lock; } /* X509 */; -DECLARE_STACK_OF(X509) +DEFINE_STACK_OF(X509) DECLARE_ASN1_SET_OF(X509) /* This is used for a table of trust checking functions */ @@ -280,12 +280,7 @@ struct x509_trust_st { void *arg2; } /* X509_TRUST */; -DECLARE_STACK_OF(X509_TRUST) - -struct x509_cert_pair_st { - X509 *forward; - X509 *reverse; -} /* X509_CERT_PAIR */; +DEFINE_STACK_OF(X509_TRUST) /* standard trust ids */ @@ -402,7 +397,7 @@ struct x509_revoked_st int sequence; /* load sequence */ }; -DECLARE_STACK_OF(X509_REVOKED) +DEFINE_STACK_OF(X509_REVOKED) DECLARE_ASN1_SET_OF(X509_REVOKED) struct X509_crl_info_st @@ -417,6 +412,8 @@ struct X509_crl_info_st ASN1_ENCODING enc; } /* X509_CRL_INFO */; +DECLARE_STACK_OF(GENERAL_NAMES) + struct X509_crl_st { /* actual signature */ @@ -440,7 +437,7 @@ struct X509_crl_st void *meth_data; } /* X509_CRL */; -DECLARE_STACK_OF(X509_CRL) +DEFINE_STACK_OF(X509_CRL) DECLARE_ASN1_SET_OF(X509_CRL) struct private_key_st @@ -475,7 +472,7 @@ struct X509_info_st } /* X509_INFO */; -DECLARE_STACK_OF(X509_INFO) +DEFINE_STACK_OF(X509_INFO) #endif /* The next 2 structures and their 8 routines were sent to me by @@ -495,20 +492,6 @@ struct Netscape_spki_st ASN1_BIT_STRING *signature; } /* NETSCAPE_SPKI */; -/* Netscape certificate sequence structure */ -struct Netscape_certificate_sequence - { - ASN1_OBJECT *type; - STACK_OF(X509) *certs; - } /* NETSCAPE_CERT_SEQUENCE */; - -/* Unused (and iv length is wrong) -typedef struct CBCParameter_st - { - unsigned char iv[8]; - } CBC_PARAM; -*/ - /* PKCS#8 private key info structure */ struct pkcs8_priv_key_info_st @@ -535,9 +518,6 @@ struct pkcs8_priv_key_info_st extern "C" { #endif -#define X509_EXT_PACK_UNKNOWN 1 -#define X509_EXT_PACK_STRING 2 - #define X509_get_version(x) ASN1_INTEGER_get((x)->cert_info->version) /* #define X509_get_serialNumber(x) ((x)->cert_info->serialNumber) */ #define X509_get_notBefore(x) ((x)->cert_info->validity->notBefore) @@ -595,7 +575,8 @@ OPENSSL_EXPORT int NETSCAPE_SPKI_set_pubkey(NETSCAPE_SPKI *x, EVP_PKEY *pkey); OPENSSL_EXPORT int NETSCAPE_SPKI_print(BIO *out, NETSCAPE_SPKI *spki); OPENSSL_EXPORT int X509_signature_dump(BIO *bp,const ASN1_STRING *sig, int indent); -OPENSSL_EXPORT int X509_signature_print(BIO *bp,X509_ALGOR *alg, ASN1_STRING *sig); +OPENSSL_EXPORT int X509_signature_print(BIO *bp, const X509_ALGOR *alg, + const ASN1_STRING *sig); OPENSSL_EXPORT int X509_sign(X509 *x, EVP_PKEY *pkey, const EVP_MD *md); OPENSSL_EXPORT int X509_sign_ctx(X509 *x, EVP_MD_CTX *ctx); @@ -699,13 +680,15 @@ OPENSSL_EXPORT X509_REVOKED *X509_REVOKED_dup(X509_REVOKED *rev); OPENSSL_EXPORT X509_REQ *X509_REQ_dup(X509_REQ *req); OPENSSL_EXPORT X509_ALGOR *X509_ALGOR_dup(X509_ALGOR *xn); OPENSSL_EXPORT int X509_ALGOR_set0(X509_ALGOR *alg, const ASN1_OBJECT *aobj, int ptype, void *pval); -OPENSSL_EXPORT void X509_ALGOR_get0(ASN1_OBJECT **paobj, int *pptype, void **ppval, - X509_ALGOR *algor); +OPENSSL_EXPORT void X509_ALGOR_get0(const ASN1_OBJECT **paobj, int *pptype, + const void **ppval, + const X509_ALGOR *algor); OPENSSL_EXPORT void X509_ALGOR_set_md(X509_ALGOR *alg, const EVP_MD *md); OPENSSL_EXPORT int X509_ALGOR_cmp(const X509_ALGOR *a, const X509_ALGOR *b); OPENSSL_EXPORT X509_NAME *X509_NAME_dup(X509_NAME *xn); OPENSSL_EXPORT X509_NAME_ENTRY *X509_NAME_ENTRY_dup(X509_NAME_ENTRY *ne); +OPENSSL_EXPORT int X509_NAME_ENTRY_set(const X509_NAME_ENTRY *ne); OPENSSL_EXPORT int X509_cmp_time(const ASN1_TIME *s, time_t *t); OPENSSL_EXPORT int X509_cmp_current_time(const ASN1_TIME *s); @@ -766,20 +749,18 @@ DECLARE_ASN1_FUNCTIONS(X509_CINF) DECLARE_ASN1_FUNCTIONS(X509) DECLARE_ASN1_FUNCTIONS(X509_CERT_AUX) -DECLARE_ASN1_FUNCTIONS(X509_CERT_PAIR) - /* X509_up_ref adds one to the reference count of |x| and returns one. */ OPENSSL_EXPORT int X509_up_ref(X509 *x); OPENSSL_EXPORT int X509_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func); + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int X509_set_ex_data(X509 *r, int idx, void *arg); OPENSSL_EXPORT void *X509_get_ex_data(X509 *r, int idx); OPENSSL_EXPORT int i2d_X509_AUX(X509 *a,unsigned char **pp); OPENSSL_EXPORT X509 * d2i_X509_AUX(X509 **a,const unsigned char **pp,long length); -OPENSSL_EXPORT void X509_get0_signature(ASN1_BIT_STRING **psig, X509_ALGOR **palg, - const X509 *x); +OPENSSL_EXPORT void X509_get0_signature(const ASN1_BIT_STRING **psig, + const X509_ALGOR **palg, const X509 *x); OPENSSL_EXPORT int X509_get_signature_nid(const X509 *x); OPENSSL_EXPORT int X509_alias_set1(X509 *x, unsigned char *name, int len); @@ -807,7 +788,6 @@ OPENSSL_EXPORT void X509_PKEY_free(X509_PKEY *a); DECLARE_ASN1_FUNCTIONS(NETSCAPE_SPKI) DECLARE_ASN1_FUNCTIONS(NETSCAPE_SPKAC) -DECLARE_ASN1_FUNCTIONS(NETSCAPE_CERT_SEQUENCE) #ifndef OPENSSL_NO_EVP OPENSSL_EXPORT X509_INFO * X509_INFO_new(void); @@ -839,11 +819,12 @@ OPENSSL_EXPORT X509_NAME * X509_get_issuer_name(X509 *a); OPENSSL_EXPORT int X509_set_subject_name(X509 *x, X509_NAME *name); OPENSSL_EXPORT X509_NAME * X509_get_subject_name(X509 *a); OPENSSL_EXPORT int X509_set_notBefore(X509 *x, const ASN1_TIME *tm); +OPENSSL_EXPORT const ASN1_TIME *X509_get0_notBefore(const X509 *x); OPENSSL_EXPORT int X509_set_notAfter(X509 *x, const ASN1_TIME *tm); +OPENSSL_EXPORT const ASN1_TIME *X509_get0_notAfter(const X509 *x); OPENSSL_EXPORT int X509_set_pubkey(X509 *x, EVP_PKEY *pkey); OPENSSL_EXPORT EVP_PKEY * X509_get_pubkey(X509 *x); OPENSSL_EXPORT ASN1_BIT_STRING * X509_get0_pubkey_bitstr(const X509 *x); -OPENSSL_EXPORT int X509_certificate_type(X509 *x,EVP_PKEY *pubkey /* optional */); OPENSSL_EXPORT STACK_OF(X509_EXTENSION) *X509_get0_extensions(const X509 *x); OPENSSL_EXPORT int X509_REQ_set_version(X509_REQ *x,long version); @@ -1115,76 +1096,16 @@ typedef struct rsa_pss_params_st { DECLARE_ASN1_FUNCTIONS(RSA_PSS_PARAMS) -/* PKCS7_get_certificates parses a PKCS#7, SignedData structure from |cbs| and - * appends the included certificates to |out_certs|. It returns one on success - * and zero on error. */ -OPENSSL_EXPORT int PKCS7_get_certificates(STACK_OF(X509) *out_certs, CBS *cbs); - -/* PKCS7_bundle_certificates appends a PKCS#7, SignedData structure containing - * |certs| to |out|. It returns one on success and zero on error. */ -OPENSSL_EXPORT int PKCS7_bundle_certificates( - CBB *out, const STACK_OF(X509) *certs); - -/* PKCS7_get_CRLs parses a PKCS#7, SignedData structure from |cbs| and appends - * the included CRLs to |out_crls|. It returns one on success and zero on - * error. */ -OPENSSL_EXPORT int PKCS7_get_CRLs(STACK_OF(X509_CRL) *out_crls, CBS *cbs); - -/* PKCS7_bundle_CRLs appends a PKCS#7, SignedData structure containing - * |crls| to |out|. It returns one on success and zero on error. */ -OPENSSL_EXPORT int PKCS7_bundle_CRLs(CBB *out, const STACK_OF(X509_CRL) *crls); - -/* PKCS7_get_PEM_certificates reads a PEM-encoded, PKCS#7, SignedData structure - * from |pem_bio| and appends the included certificates to |out_certs|. It - * returns one on success and zero on error. */ -OPENSSL_EXPORT int PKCS7_get_PEM_certificates(STACK_OF(X509) *out_certs, - BIO *pem_bio); - -/* PKCS7_get_PEM_CRLs reads a PEM-encoded, PKCS#7, SignedData structure from - * |pem_bio| and appends the included CRLs to |out_crls|. It returns one on - * success and zero on error. */ -OPENSSL_EXPORT int PKCS7_get_PEM_CRLs(STACK_OF(X509_CRL) *out_crls, - BIO *pem_bio); - -/* EVP_PK values indicate the algorithm of the public key in a certificate. */ - -#define EVP_PK_RSA 0x0001 -#define EVP_PK_DSA 0x0002 -#define EVP_PK_DH 0x0004 -#define EVP_PK_EC 0x0008 - -/* EVP_PKS values indicate the algorithm used to sign a certificate. */ - -#define EVP_PKS_RSA 0x0100 -#define EVP_PKS_DSA 0x0200 -#define EVP_PKS_EC 0x0400 - -/* EVP_PKT values are flags that define what public-key operations can be - * performed with the public key from a certificate. */ - -/* EVP_PKT_SIGN indicates that the public key can be used for signing. */ -#define EVP_PKT_SIGN 0x0010 -/* EVP_PKT_ENC indicates that a session key can be encrypted to the public - * key. */ -#define EVP_PKT_ENC 0x0020 -/* EVP_PKT_EXCH indicates that key-agreement can be performed. */ -#define EVP_PKT_EXCH 0x0040 -/* EVP_PKT_EXP indicates that key is weak (i.e. "export"). */ -#define EVP_PKT_EXP 0x1000 - #ifdef __cplusplus } +#endif +#if !defined(BORINGSSL_NO_CXX) extern "C++" { namespace bssl { -BORINGSSL_MAKE_STACK_DELETER(X509, X509_free) -BORINGSSL_MAKE_STACK_DELETER(X509_CRL, X509_CRL_free) -BORINGSSL_MAKE_STACK_DELETER(X509_EXTENSION, X509_EXTENSION_free) -BORINGSSL_MAKE_STACK_DELETER(X509_NAME, X509_NAME_free) - BORINGSSL_MAKE_DELETER(NETSCAPE_SPKI, NETSCAPE_SPKI_free) BORINGSSL_MAKE_DELETER(X509, X509_free) BORINGSSL_MAKE_DELETER(X509_ALGOR, X509_ALGOR_free) @@ -1197,6 +1118,7 @@ BORINGSSL_MAKE_DELETER(X509_NAME, X509_NAME_free) BORINGSSL_MAKE_DELETER(X509_NAME_ENTRY, X509_NAME_ENTRY_free) BORINGSSL_MAKE_DELETER(X509_PKEY, X509_PKEY_free) BORINGSSL_MAKE_DELETER(X509_POLICY_TREE, X509_policy_tree_free) +BORINGSSL_MAKE_DELETER(X509_PUBKEY, X509_PUBKEY_free) BORINGSSL_MAKE_DELETER(X509_REQ, X509_REQ_free) BORINGSSL_MAKE_DELETER(X509_REVOKED, X509_REVOKED_free) BORINGSSL_MAKE_DELETER(X509_SIG, X509_SIG_free) @@ -1204,11 +1126,14 @@ BORINGSSL_MAKE_DELETER(X509_STORE, X509_STORE_free) BORINGSSL_MAKE_DELETER(X509_STORE_CTX, X509_STORE_CTX_free) BORINGSSL_MAKE_DELETER(X509_VERIFY_PARAM, X509_VERIFY_PARAM_free) +using ScopedX509_STORE_CTX = + internal::StackAllocated; + } // namespace bssl } /* extern C++ */ - -#endif +#endif /* !BORINGSSL_NO_CXX */ #define X509_R_AKID_MISMATCH 100 #define X509_R_BAD_PKCS7_VERSION 101 @@ -1246,5 +1171,6 @@ BORINGSSL_MAKE_DELETER(X509_VERIFY_PARAM, X509_VERIFY_PARAM_free) #define X509_R_WRONG_LOOKUP_TYPE 133 #define X509_R_WRONG_TYPE 134 #define X509_R_NAME_TOO_LONG 135 +#define X509_R_INVALID_PARAMETER 136 #endif diff --git a/Sources/BoringSSL/include/openssl/x509_vfy.h b/Sources/BoringSSL/include/openssl/x509_vfy.h index f069cb2ce..208a3807f 100644 --- a/Sources/BoringSSL/include/openssl/x509_vfy.h +++ b/Sources/BoringSSL/include/openssl/x509_vfy.h @@ -64,32 +64,19 @@ #ifndef HEADER_X509_VFY_H #define HEADER_X509_VFY_H -#include -#include #include #ifdef __cplusplus extern "C" { #endif -#if 0 -/* Outer object */ -typedef struct x509_hash_dir_st - { - int num_dirs; - char **dirs; - int *dirs_type; - int num_dirs_alloced; - } X509_HASH_DIR_CTX; -#endif +/* Legacy X.509 library. + * + * This header is part of OpenSSL's X.509 implementation. It is retained for + * compatibility but otherwise underdocumented and not actively maintained. In + * the future, a replacement library will be available. Meanwhile, minimize + * dependencies on this header where possible. */ -typedef struct x509_file_st - { - int num_paths; /* number of paths to files or directories */ - int num_alloced; - char **paths; /* the list of paths or directories */ - int *path_type; - } X509_CERT_FILE_CTX; /*******************************/ /* @@ -109,10 +96,6 @@ The X509_STORE then calls a function to actually verify the certificate chain. */ -/* The following are legacy constants that should not be used. */ -#define X509_LU_RETRY (-1) -#define X509_LU_FAIL 0 - #define X509_LU_X509 1 #define X509_LU_CRL 2 #define X509_LU_PKEY 3 @@ -129,8 +112,8 @@ typedef struct x509_object_st } data; } X509_OBJECT; -DECLARE_STACK_OF(X509_LOOKUP) -DECLARE_STACK_OF(X509_OBJECT) +DEFINE_STACK_OF(X509_LOOKUP) +DEFINE_STACK_OF(X509_OBJECT) /* This is a static that defines the function interface */ typedef struct x509_lookup_method_st @@ -173,7 +156,7 @@ struct X509_VERIFY_PARAM_st X509_VERIFY_PARAM_ID *id; /* opaque ID data */ }; -DECLARE_STACK_OF(X509_VERIFY_PARAM) +DEFINE_STACK_OF(X509_VERIFY_PARAM) /* This is used to hold everything. It is used for all certificate * validation. Once we have a certificate chain, the 'verify' @@ -438,16 +421,20 @@ OPENSSL_EXPORT X509_OBJECT *X509_OBJECT_retrieve_by_subject(STACK_OF(X509_OBJECT OPENSSL_EXPORT X509_OBJECT *X509_OBJECT_retrieve_match(STACK_OF(X509_OBJECT) *h, X509_OBJECT *x); OPENSSL_EXPORT int X509_OBJECT_up_ref_count(X509_OBJECT *a); OPENSSL_EXPORT void X509_OBJECT_free_contents(X509_OBJECT *a); +OPENSSL_EXPORT int X509_OBJECT_get_type(const X509_OBJECT *a); +OPENSSL_EXPORT X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a); OPENSSL_EXPORT X509_STORE *X509_STORE_new(void ); OPENSSL_EXPORT int X509_STORE_up_ref(X509_STORE *store); OPENSSL_EXPORT void X509_STORE_free(X509_STORE *v); +OPENSSL_EXPORT STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(X509_STORE *st); OPENSSL_EXPORT STACK_OF(X509)* X509_STORE_get1_certs(X509_STORE_CTX *st, X509_NAME *nm); OPENSSL_EXPORT STACK_OF(X509_CRL)* X509_STORE_get1_crls(X509_STORE_CTX *st, X509_NAME *nm); OPENSSL_EXPORT int X509_STORE_set_flags(X509_STORE *ctx, unsigned long flags); OPENSSL_EXPORT int X509_STORE_set_purpose(X509_STORE *ctx, int purpose); OPENSSL_EXPORT int X509_STORE_set_trust(X509_STORE *ctx, int trust); OPENSSL_EXPORT int X509_STORE_set1_param(X509_STORE *ctx, X509_VERIFY_PARAM *pm); +OPENSSL_EXPORT X509_VERIFY_PARAM *X509_STORE_get0_param(X509_STORE *ctx); /* X509_STORE_set0_additional_untrusted sets a stack of additional, untrusted * certificates that are available for chain building. This function does not * take ownership of the stack. */ @@ -464,6 +451,7 @@ OPENSSL_EXPORT X509_STORE_CTX *X509_STORE_CTX_new(void); OPENSSL_EXPORT int X509_STORE_CTX_get1_issuer(X509 **issuer, X509_STORE_CTX *ctx, X509 *x); +OPENSSL_EXPORT void X509_STORE_CTX_zero(X509_STORE_CTX *ctx); OPENSSL_EXPORT void X509_STORE_CTX_free(X509_STORE_CTX *ctx); OPENSSL_EXPORT int X509_STORE_CTX_init(X509_STORE_CTX *ctx, X509_STORE *store, X509 *x509, STACK_OF(X509) *chain); @@ -513,7 +501,7 @@ OPENSSL_EXPORT int X509_STORE_set_default_paths(X509_STORE *ctx); #endif OPENSSL_EXPORT int X509_STORE_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func); + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func); OPENSSL_EXPORT int X509_STORE_CTX_set_ex_data(X509_STORE_CTX *ctx,int idx,void *data); OPENSSL_EXPORT void * X509_STORE_CTX_get_ex_data(X509_STORE_CTX *ctx,int idx); OPENSSL_EXPORT int X509_STORE_CTX_get_error(X509_STORE_CTX *ctx); @@ -527,6 +515,8 @@ OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get_chain(X509_STORE_CTX *ctx); OPENSSL_EXPORT STACK_OF(X509) *X509_STORE_CTX_get1_chain(X509_STORE_CTX *ctx); OPENSSL_EXPORT void X509_STORE_CTX_set_cert(X509_STORE_CTX *c,X509 *x); OPENSSL_EXPORT void X509_STORE_CTX_set_chain(X509_STORE_CTX *c,STACK_OF(X509) *sk); +OPENSSL_EXPORT STACK_OF(X509) * + X509_STORE_CTX_get0_untrusted(X509_STORE_CTX *ctx); OPENSSL_EXPORT void X509_STORE_CTX_set0_crls(X509_STORE_CTX *c,STACK_OF(X509_CRL) *sk); OPENSSL_EXPORT int X509_STORE_CTX_set_purpose(X509_STORE_CTX *ctx, int purpose); OPENSSL_EXPORT int X509_STORE_CTX_set_trust(X509_STORE_CTX *ctx, int trust); diff --git a/Sources/BoringSSL/include/openssl/x509v3.h b/Sources/BoringSSL/include/openssl/x509v3.h index 4754f71bf..abd52c0b1 100644 --- a/Sources/BoringSSL/include/openssl/x509v3.h +++ b/Sources/BoringSSL/include/openssl/x509v3.h @@ -57,12 +57,22 @@ #include #include +#include #include #ifdef __cplusplus extern "C" { #endif + +/* Legacy X.509 library. + * + * This header is part of OpenSSL's X.509 implementation. It is retained for + * compatibility but otherwise underdocumented and not actively maintained. In + * the future, a replacement library will be available. Meanwhile, minimize + * dependencies on this header where possible. */ + + /* Forward reference */ struct v3_ext_method; struct v3_ext_ctx; @@ -137,7 +147,7 @@ void *db; typedef struct v3_ext_method X509V3_EXT_METHOD; -DECLARE_STACK_OF(X509V3_EXT_METHOD) +DEFINE_STACK_OF(X509V3_EXT_METHOD) /* ext_flags values */ #define X509V3_EXT_DYNAMIC 0x1 @@ -201,23 +211,25 @@ union { } d; } GENERAL_NAME; +DEFINE_STACK_OF(GENERAL_NAME) +DECLARE_ASN1_SET_OF(GENERAL_NAME) + typedef STACK_OF(GENERAL_NAME) GENERAL_NAMES; +DEFINE_STACK_OF(GENERAL_NAMES) + typedef struct ACCESS_DESCRIPTION_st { ASN1_OBJECT *method; GENERAL_NAME *location; } ACCESS_DESCRIPTION; +DEFINE_STACK_OF(ACCESS_DESCRIPTION) +DECLARE_ASN1_SET_OF(ACCESS_DESCRIPTION) + typedef STACK_OF(ACCESS_DESCRIPTION) AUTHORITY_INFO_ACCESS; typedef STACK_OF(ASN1_OBJECT) EXTENDED_KEY_USAGE; -DECLARE_STACK_OF(GENERAL_NAME) -DECLARE_ASN1_SET_OF(GENERAL_NAME) - -DECLARE_STACK_OF(ACCESS_DESCRIPTION) -DECLARE_ASN1_SET_OF(ACCESS_DESCRIPTION) - typedef struct DIST_POINT_NAME_st { int type; union { @@ -251,7 +263,7 @@ int dp_reasons; typedef STACK_OF(DIST_POINT) CRL_DIST_POINTS; -DECLARE_STACK_OF(DIST_POINT) +DEFINE_STACK_OF(DIST_POINT) DECLARE_ASN1_SET_OF(DIST_POINT) struct AUTHORITY_KEYID_st { @@ -267,7 +279,7 @@ typedef struct SXNET_ID_st { ASN1_OCTET_STRING *user; } SXNETID; -DECLARE_STACK_OF(SXNETID) +DEFINE_STACK_OF(SXNETID) DECLARE_ASN1_SET_OF(SXNETID) typedef struct SXNET_st { @@ -294,7 +306,7 @@ typedef struct POLICYQUALINFO_st { } d; } POLICYQUALINFO; -DECLARE_STACK_OF(POLICYQUALINFO) +DEFINE_STACK_OF(POLICYQUALINFO) DECLARE_ASN1_SET_OF(POLICYQUALINFO) typedef struct POLICYINFO_st { @@ -304,7 +316,7 @@ typedef struct POLICYINFO_st { typedef STACK_OF(POLICYINFO) CERTIFICATEPOLICIES; -DECLARE_STACK_OF(POLICYINFO) +DEFINE_STACK_OF(POLICYINFO) DECLARE_ASN1_SET_OF(POLICYINFO) typedef struct POLICY_MAPPING_st { @@ -312,7 +324,7 @@ typedef struct POLICY_MAPPING_st { ASN1_OBJECT *subjectDomainPolicy; } POLICY_MAPPING; -DECLARE_STACK_OF(POLICY_MAPPING) +DEFINE_STACK_OF(POLICY_MAPPING) typedef STACK_OF(POLICY_MAPPING) POLICY_MAPPINGS; @@ -322,7 +334,7 @@ typedef struct GENERAL_SUBTREE_st { ASN1_INTEGER *maximum; } GENERAL_SUBTREE; -DECLARE_STACK_OF(GENERAL_SUBTREE) +DEFINE_STACK_OF(GENERAL_SUBTREE) struct NAME_CONSTRAINTS_st { STACK_OF(GENERAL_SUBTREE) *permittedSubtrees; @@ -501,7 +513,7 @@ typedef struct x509_purpose_st { #define X509V3_ADD_DELETE 5L #define X509V3_ADD_SILENT 0x10 -DECLARE_STACK_OF(X509_PURPOSE) +DEFINE_STACK_OF(X509_PURPOSE) DECLARE_ASN1_FUNCTIONS(BASIC_CONSTRAINTS) @@ -721,13 +733,12 @@ OPENSSL_EXPORT int X509V3_NAME_from_section(X509_NAME *nm, STACK_OF(CONF_VALUE)* unsigned long chtype); OPENSSL_EXPORT void X509_POLICY_NODE_print(BIO *out, X509_POLICY_NODE *node, int indent); -DECLARE_STACK_OF(X509_POLICY_NODE) +DEFINE_STACK_OF(X509_POLICY_NODE) /* BEGIN ERROR CODES */ /* The following lines are auto generated by the script mkerr.pl. Any changes * made after this point may be overwritten when the script is next run. */ -void ERR_load_X509V3_strings(void); #ifdef __cplusplus @@ -737,15 +748,11 @@ extern "C++" { namespace bssl { -BORINGSSL_MAKE_STACK_DELETER(DIST_POINT, DIST_POINT_free) -BORINGSSL_MAKE_STACK_DELETER(GENERAL_NAME, GENERAL_NAME_free) -// A STACK_OF(POLICYINFO) is also known as a CERTIFICATEPOLICIES. -BORINGSSL_MAKE_STACK_DELETER(POLICYINFO, POLICYINFO_free) - BORINGSSL_MAKE_DELETER(AUTHORITY_KEYID, AUTHORITY_KEYID_free) BORINGSSL_MAKE_DELETER(BASIC_CONSTRAINTS, BASIC_CONSTRAINTS_free) BORINGSSL_MAKE_DELETER(DIST_POINT, DIST_POINT_free) BORINGSSL_MAKE_DELETER(GENERAL_NAME, GENERAL_NAME_free) +BORINGSSL_MAKE_DELETER(POLICYINFO, POLICYINFO_free) } // namespace bssl diff --git a/Sources/BoringSSL/ssl/bio_ssl.c b/Sources/BoringSSL/ssl/bio_ssl.cc similarity index 94% rename from Sources/BoringSSL/ssl/bio_ssl.c rename to Sources/BoringSSL/ssl/bio_ssl.cc index ad8f5d8ff..61afee566 100644 --- a/Sources/BoringSSL/ssl/bio_ssl.c +++ b/Sources/BoringSSL/ssl/bio_ssl.cc @@ -12,8 +12,12 @@ #include +static SSL *get_ssl(BIO *bio) { + return reinterpret_cast(bio->ptr); +} + static int ssl_read(BIO *bio, char *out, int outl) { - SSL *ssl = bio->ptr; + SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } @@ -53,7 +57,7 @@ static int ssl_read(BIO *bio, char *out, int outl) { } static int ssl_write(BIO *bio, const char *out, int outl) { - SSL *ssl = bio->ptr; + SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } @@ -87,7 +91,7 @@ static int ssl_write(BIO *bio, const char *out, int outl) { } static long ssl_ctrl(BIO *bio, int cmd, long num, void *ptr) { - SSL *ssl = bio->ptr; + SSL *ssl = get_ssl(bio); if (ssl == NULL && cmd != BIO_C_SET_SSL) { return 0; } @@ -134,7 +138,7 @@ static int ssl_new(BIO *bio) { } static int ssl_free(BIO *bio) { - SSL *ssl = bio->ptr; + SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 1; @@ -149,7 +153,7 @@ static int ssl_free(BIO *bio) { } static long ssl_callback_ctrl(BIO *bio, int cmd, bio_info_cb fp) { - SSL *ssl = bio->ptr; + SSL *ssl = get_ssl(bio); if (ssl == NULL) { return 0; } diff --git a/Sources/BoringSSL/ssl/custom_extensions.c b/Sources/BoringSSL/ssl/custom_extensions.cc similarity index 90% rename from Sources/BoringSSL/ssl/custom_extensions.c rename to Sources/BoringSSL/ssl/custom_extensions.cc index 10fbfc8f0..85b8a33d7 100644 --- a/Sources/BoringSSL/ssl/custom_extensions.c +++ b/Sources/BoringSSL/ssl/custom_extensions.cc @@ -25,6 +25,8 @@ #include "internal.h" +namespace bssl { + void SSL_CUSTOM_EXTENSION_free(SSL_CUSTOM_EXTENSION *custom_extension) { OPENSSL_free(custom_extension); } @@ -45,9 +47,9 @@ static const SSL_CUSTOM_EXTENSION *custom_ext_find( return NULL; } -/* default_add_callback is used as the |add_callback| when the user doesn't - * provide one. For servers, it does nothing while, for clients, it causes an - * empty extension to be included. */ +// default_add_callback is used as the |add_callback| when the user doesn't +// provide one. For servers, it does nothing while, for clients, it causes an +// empty extension to be included. static int default_add_callback(SSL *ssl, unsigned extension_value, const uint8_t **out, size_t *out_len, int *out_alert_value, void *add_arg) { @@ -74,7 +76,7 @@ static int custom_ext_add_hello(SSL_HANDSHAKE *hs, CBB *extensions) { if (ssl->server && !(hs->custom_extensions.received & (1u << i))) { - /* Servers cannot echo extensions that the client didn't send. */ + // Servers cannot echo extensions that the client didn't send. continue; } @@ -112,7 +114,7 @@ static int custom_ext_add_hello(SSL_HANDSHAKE *hs, CBB *extensions) { break; default: - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); OPENSSL_PUT_ERROR(SSL, SSL_R_CUSTOM_EXTENSION_ERROR); ERR_add_error_dataf("extension %u", (unsigned) ext->value); return 0; @@ -133,9 +135,9 @@ int custom_ext_parse_serverhello(SSL_HANDSHAKE *hs, int *out_alert, const SSL_CUSTOM_EXTENSION *ext = custom_ext_find(ssl->ctx->client_custom_extensions, &index, value); - if (/* Unknown extensions are not allowed in a ServerHello. */ + if (// Unknown extensions are not allowed in a ServerHello. ext == NULL || - /* Also, if we didn't send the extension, that's also unacceptable. */ + // Also, if we didn't send the extension, that's also unacceptable. !(hs->custom_extensions.sent & (1u << index))) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)value); @@ -183,9 +185,9 @@ int custom_ext_add_serverhello(SSL_HANDSHAKE *hs, CBB *extensions) { return custom_ext_add_hello(hs, extensions); } -/* MAX_NUM_CUSTOM_EXTENSIONS is the maximum number of custom extensions that - * can be set on an |SSL_CTX|. It's determined by the size of the bitset used - * to track when an extension has been sent. */ +// MAX_NUM_CUSTOM_EXTENSIONS is the maximum number of custom extensions that +// can be set on an |SSL_CTX|. It's determined by the size of the bitset used +// to track when an extension has been sent. #define MAX_NUM_CUSTOM_EXTENSIONS \ (sizeof(((SSL_HANDSHAKE *)NULL)->custom_extensions.sent) * 8) @@ -198,15 +200,16 @@ static int custom_ext_append(STACK_OF(SSL_CUSTOM_EXTENSION) **stack, if (add_cb == NULL || 0xffff < extension_value || SSL_extension_supported(extension_value) || - /* Specifying a free callback without an add callback is nonsensical - * and an error. */ + // Specifying a free callback without an add callback is nonsensical + // and an error. (*stack != NULL && (MAX_NUM_CUSTOM_EXTENSIONS <= sk_SSL_CUSTOM_EXTENSION_num(*stack) || custom_ext_find(*stack, NULL, extension_value) != NULL))) { return 0; } - SSL_CUSTOM_EXTENSION *ext = OPENSSL_malloc(sizeof(SSL_CUSTOM_EXTENSION)); + SSL_CUSTOM_EXTENSION *ext = + (SSL_CUSTOM_EXTENSION *)OPENSSL_malloc(sizeof(SSL_CUSTOM_EXTENSION)); if (ext == NULL) { return 0; } @@ -237,6 +240,10 @@ static int custom_ext_append(STACK_OF(SSL_CUSTOM_EXTENSION) **stack, return 1; } +} // namespace bssl + +using namespace bssl; + int SSL_CTX_add_client_custom_ext(SSL_CTX *ctx, unsigned extension_value, SSL_custom_ext_add_cb add_cb, SSL_custom_ext_free_cb free_cb, void *add_arg, diff --git a/Sources/BoringSSL/ssl/d1_both.c b/Sources/BoringSSL/ssl/d1_both.cc similarity index 57% rename from Sources/BoringSSL/ssl/d1_both.c rename to Sources/BoringSSL/ssl/d1_both.cc index b864e426f..c219f5aed 100644 --- a/Sources/BoringSSL/ssl/d1_both.c +++ b/Sources/BoringSSL/ssl/d1_both.cc @@ -122,101 +122,92 @@ #include #include #include -#include -#include #include "../crypto/internal.h" #include "internal.h" -/* TODO(davidben): 28 comes from the size of IP + UDP header. Is this reasonable - * for these values? Notably, why is kMinMTU a function of the transport - * protocol's overhead rather than, say, what's needed to hold a minimally-sized - * handshake fragment plus protocol overhead. */ +namespace bssl { -/* kMinMTU is the minimum acceptable MTU value. */ +// TODO(davidben): 28 comes from the size of IP + UDP header. Is this reasonable +// for these values? Notably, why is kMinMTU a function of the transport +// protocol's overhead rather than, say, what's needed to hold a minimally-sized +// handshake fragment plus protocol overhead. + +// kMinMTU is the minimum acceptable MTU value. static const unsigned int kMinMTU = 256 - 28; -/* kDefaultMTU is the default MTU value to use if neither the user nor - * the underlying BIO supplies one. */ +// kDefaultMTU is the default MTU value to use if neither the user nor +// the underlying BIO supplies one. static const unsigned int kDefaultMTU = 1500 - 28; -/* Receiving handshake messages. */ +// Receiving handshake messages. -static void dtls1_hm_fragment_free(hm_fragment *frag) { - if (frag == NULL) { - return; - } - OPENSSL_free(frag->data); - OPENSSL_free(frag->reassembly); - OPENSSL_free(frag); +hm_fragment::~hm_fragment() { + OPENSSL_free(data); + OPENSSL_free(reassembly); } -static hm_fragment *dtls1_hm_fragment_new(const struct hm_header_st *msg_hdr) { - hm_fragment *frag = OPENSSL_malloc(sizeof(hm_fragment)); - if (frag == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; +static UniquePtr dtls1_hm_fragment_new( + const struct hm_header_st *msg_hdr) { + ScopedCBB cbb; + UniquePtr frag = MakeUnique(); + if (!frag) { + return nullptr; } - OPENSSL_memset(frag, 0, sizeof(hm_fragment)); frag->type = msg_hdr->type; frag->seq = msg_hdr->seq; frag->msg_len = msg_hdr->msg_len; - /* Allocate space for the reassembled message and fill in the header. */ - frag->data = OPENSSL_malloc(DTLS1_HM_HEADER_LENGTH + msg_hdr->msg_len); + // Allocate space for the reassembled message and fill in the header. + frag->data = + (uint8_t *)OPENSSL_malloc(DTLS1_HM_HEADER_LENGTH + msg_hdr->msg_len); if (frag->data == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } - CBB cbb; - if (!CBB_init_fixed(&cbb, frag->data, DTLS1_HM_HEADER_LENGTH) || - !CBB_add_u8(&cbb, msg_hdr->type) || - !CBB_add_u24(&cbb, msg_hdr->msg_len) || - !CBB_add_u16(&cbb, msg_hdr->seq) || - !CBB_add_u24(&cbb, 0 /* frag_off */) || - !CBB_add_u24(&cbb, msg_hdr->msg_len) || - !CBB_finish(&cbb, NULL, NULL)) { - CBB_cleanup(&cbb); + if (!CBB_init_fixed(cbb.get(), frag->data, DTLS1_HM_HEADER_LENGTH) || + !CBB_add_u8(cbb.get(), msg_hdr->type) || + !CBB_add_u24(cbb.get(), msg_hdr->msg_len) || + !CBB_add_u16(cbb.get(), msg_hdr->seq) || + !CBB_add_u24(cbb.get(), 0 /* frag_off */) || + !CBB_add_u24(cbb.get(), msg_hdr->msg_len) || + !CBB_finish(cbb.get(), NULL, NULL)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } - /* If the handshake message is empty, |frag->reassembly| is NULL. */ + // If the handshake message is empty, |frag->reassembly| is NULL. if (msg_hdr->msg_len > 0) { - /* Initialize reassembly bitmask. */ + // Initialize reassembly bitmask. if (msg_hdr->msg_len + 7 < msg_hdr->msg_len) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - goto err; + return nullptr; } size_t bitmask_len = (msg_hdr->msg_len + 7) / 8; - frag->reassembly = OPENSSL_malloc(bitmask_len); + frag->reassembly = (uint8_t *)OPENSSL_malloc(bitmask_len); if (frag->reassembly == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } OPENSSL_memset(frag->reassembly, 0, bitmask_len); } return frag; - -err: - dtls1_hm_fragment_free(frag); - return NULL; } -/* bit_range returns a |uint8_t| with bits |start|, inclusive, to |end|, - * exclusive, set. */ +// bit_range returns a |uint8_t| with bits |start|, inclusive, to |end|, +// exclusive, set. static uint8_t bit_range(size_t start, size_t end) { return (uint8_t)(~((1u << start) - 1) & ((1u << end) - 1)); } -/* dtls1_hm_fragment_mark marks bytes |start|, inclusive, to |end|, exclusive, - * as received in |frag|. If |frag| becomes complete, it clears - * |frag->reassembly|. The range must be within the bounds of |frag|'s message - * and |frag->reassembly| must not be NULL. */ +// dtls1_hm_fragment_mark marks bytes |start|, inclusive, to |end|, exclusive, +// as received in |frag|. If |frag| becomes complete, it clears +// |frag->reassembly|. The range must be within the bounds of |frag|'s message +// and |frag->reassembly| must not be NULL. static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start, size_t end) { size_t msg_len = frag->msg_len; @@ -225,9 +216,13 @@ static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start, assert(0); return; } - /* A zero-length message will never have a pending reassembly. */ + // A zero-length message will never have a pending reassembly. assert(msg_len > 0); + if (start == end) { + return; + } + if ((start >> 3) == (end >> 3)) { frag->reassembly[start >> 3] |= bit_range(start & 7, end & 7); } else { @@ -240,7 +235,7 @@ static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start, } } - /* Check if the fragment is complete. */ + // Check if the fragment is complete. for (size_t i = 0; i < (msg_len >> 3); i++) { if (frag->reassembly[i] != 0xff) { return; @@ -255,96 +250,113 @@ static void dtls1_hm_fragment_mark(hm_fragment *frag, size_t start, frag->reassembly = NULL; } -/* dtls1_is_current_message_complete returns one if the current handshake - * message is complete and zero otherwise. */ -static int dtls1_is_current_message_complete(const SSL *ssl) { - hm_fragment *frag = ssl->d1->incoming_messages[ssl->d1->handshake_read_seq % - SSL_MAX_HANDSHAKE_FLIGHT]; +// dtls1_is_current_message_complete returns whether the current handshake +// message is complete. +static bool dtls1_is_current_message_complete(const SSL *ssl) { + size_t idx = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; + hm_fragment *frag = ssl->d1->incoming_messages[idx].get(); return frag != NULL && frag->reassembly == NULL; } -/* dtls1_get_incoming_message returns the incoming message corresponding to - * |msg_hdr|. If none exists, it creates a new one and inserts it in the - * queue. Otherwise, it checks |msg_hdr| is consistent with the existing one. It - * returns NULL on failure. The caller does not take ownership of the result. */ +// dtls1_get_incoming_message returns the incoming message corresponding to +// |msg_hdr|. If none exists, it creates a new one and inserts it in the +// queue. Otherwise, it checks |msg_hdr| is consistent with the existing one. It +// returns NULL on failure. The caller does not take ownership of the result. static hm_fragment *dtls1_get_incoming_message( - SSL *ssl, const struct hm_header_st *msg_hdr) { + SSL *ssl, uint8_t *out_alert, const struct hm_header_st *msg_hdr) { if (msg_hdr->seq < ssl->d1->handshake_read_seq || msg_hdr->seq - ssl->d1->handshake_read_seq >= SSL_MAX_HANDSHAKE_FLIGHT) { + *out_alert = SSL_AD_INTERNAL_ERROR; return NULL; } size_t idx = msg_hdr->seq % SSL_MAX_HANDSHAKE_FLIGHT; - hm_fragment *frag = ssl->d1->incoming_messages[idx]; + hm_fragment *frag = ssl->d1->incoming_messages[idx].get(); if (frag != NULL) { assert(frag->seq == msg_hdr->seq); - /* The new fragment must be compatible with the previous fragments from this - * message. */ + // The new fragment must be compatible with the previous fragments from this + // message. if (frag->type != msg_hdr->type || frag->msg_len != msg_hdr->msg_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_FRAGMENT_MISMATCH); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; return NULL; } return frag; } - /* This is the first fragment from this message. */ - frag = dtls1_hm_fragment_new(msg_hdr); - if (frag == NULL) { + // This is the first fragment from this message. + ssl->d1->incoming_messages[idx] = dtls1_hm_fragment_new(msg_hdr); + if (!ssl->d1->incoming_messages[idx]) { + *out_alert = SSL_AD_INTERNAL_ERROR; return NULL; } - ssl->d1->incoming_messages[idx] = frag; - return frag; + return ssl->d1->incoming_messages[idx].get(); } -/* dtls1_process_handshake_record reads a handshake record and processes it. It - * returns one if the record was successfully processed and 0 or -1 on error. */ -static int dtls1_process_handshake_record(SSL *ssl) { - SSL3_RECORD *rr = &ssl->s3->rrec; - -start: - if (rr->length == 0) { - int ret = dtls1_get_record(ssl); - if (ret <= 0) { - return ret; - } - } - - /* Cross-epoch records are discarded, but we may receive out-of-order - * application data between ChangeCipherSpec and Finished or a - * ChangeCipherSpec before the appropriate point in the handshake. Those must - * be silently discarded. - * - * However, only allow the out-of-order records in the correct epoch. - * Application data must come in the encrypted epoch, and ChangeCipherSpec in - * the unencrypted epoch (we never renegotiate). Other cases fall through and - * fail with a fatal error. */ - if ((rr->type == SSL3_RT_APPLICATION_DATA && - ssl->s3->aead_read_ctx != NULL) || - (rr->type == SSL3_RT_CHANGE_CIPHER_SPEC && - ssl->s3->aead_read_ctx == NULL)) { - rr->length = 0; - goto start; - } - - if (rr->type != SSL3_RT_HANDSHAKE) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - return -1; +ssl_open_record_t dtls1_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in) { + uint8_t type; + Span record; + auto ret = dtls_open_record(ssl, &type, &record, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; + } + + switch (type) { + case SSL3_RT_APPLICATION_DATA: + // Unencrypted application data records are always illegal. + if (ssl->s3->aead_read_ctx->is_null_cipher()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + // Out-of-order application data may be received between ChangeCipherSpec + // and finished. Discard it. + return ssl_open_record_discard; + + case SSL3_RT_CHANGE_CIPHER_SPEC: + // We do not support renegotiation, so encrypted ChangeCipherSpec records + // are illegal. + if (!ssl->s3->aead_read_ctx->is_null_cipher()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + if (record.size() != 1u || record[0] != SSL3_MT_CCS) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return ssl_open_record_error; + } + + // Flag the ChangeCipherSpec for later. + ssl->d1->has_change_cipher_spec = true; + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, + record); + return ssl_open_record_success; + + case SSL3_RT_HANDSHAKE: + // Break out to main processing. + break; + + default: + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; } CBS cbs; - CBS_init(&cbs, rr->data, rr->length); - + CBS_init(&cbs, record.data(), record.size()); while (CBS_len(&cbs) > 0) { - /* Read a handshake fragment. */ + // Read a handshake fragment. struct hm_header_st msg_hdr; CBS body; if (!dtls1_parse_fragment(&cbs, &msg_hdr, &body)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HANDSHAKE_RECORD); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return -1; + *out_alert = SSL_AD_DECODE_ERROR; + return ssl_open_record_error; } const size_t frag_off = msg_hdr.frag_off; @@ -354,128 +366,98 @@ static int dtls1_process_handshake_record(SSL *ssl) { frag_off + frag_len > msg_len || msg_len > ssl_max_handshake_message_len(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return -1; + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return ssl_open_record_error; } - /* The encrypted epoch in DTLS has only one handshake message. */ + // The encrypted epoch in DTLS has only one handshake message. if (ssl->d1->r_epoch == 1 && msg_hdr.seq != ssl->d1->handshake_read_seq) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return -1; + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; } if (msg_hdr.seq < ssl->d1->handshake_read_seq || msg_hdr.seq > (unsigned)ssl->d1->handshake_read_seq + SSL_MAX_HANDSHAKE_FLIGHT) { - /* Ignore fragments from the past, or ones too far in the future. */ + // Ignore fragments from the past, or ones too far in the future. continue; } - hm_fragment *frag = dtls1_get_incoming_message(ssl, &msg_hdr); + hm_fragment *frag = dtls1_get_incoming_message(ssl, out_alert, &msg_hdr); if (frag == NULL) { - return -1; + return ssl_open_record_error; } assert(frag->msg_len == msg_len); if (frag->reassembly == NULL) { - /* The message is already assembled. */ + // The message is already assembled. continue; } assert(msg_len > 0); - /* Copy the body into the fragment. */ + // Copy the body into the fragment. OPENSSL_memcpy(frag->data + DTLS1_HM_HEADER_LENGTH + frag_off, CBS_data(&body), CBS_len(&body)); dtls1_hm_fragment_mark(frag, frag_off, frag_off + frag_len); } - rr->length = 0; - ssl_read_buffer_discard(ssl); - return 1; + return ssl_open_record_success; } -int dtls1_get_message(SSL *ssl) { - if (ssl->s3->tmp.reuse_message) { - /* There must be a current message. */ - assert(ssl->init_msg != NULL); - ssl->s3->tmp.reuse_message = 0; - } else { - dtls1_release_current_message(ssl, 0 /* don't free buffer */); +bool dtls1_get_message(SSL *ssl, SSLMessage *out) { + if (!dtls1_is_current_message_complete(ssl)) { + return false; } - /* Process handshake records until the current message is ready. */ - while (!dtls1_is_current_message_complete(ssl)) { - int ret = dtls1_process_handshake_record(ssl); - if (ret <= 0) { - return ret; - } + size_t idx = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; + hm_fragment *frag = ssl->d1->incoming_messages[idx].get(); + out->type = frag->type; + CBS_init(&out->body, frag->data + DTLS1_HM_HEADER_LENGTH, frag->msg_len); + CBS_init(&out->raw, frag->data, DTLS1_HM_HEADER_LENGTH + frag->msg_len); + out->is_v2_hello = false; + if (!ssl->s3->has_message) { + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, out->raw); + ssl->s3->has_message = true; } - - hm_fragment *frag = ssl->d1->incoming_messages[ssl->d1->handshake_read_seq % - SSL_MAX_HANDSHAKE_FLIGHT]; - assert(frag != NULL); - assert(frag->reassembly == NULL); - assert(ssl->d1->handshake_read_seq == frag->seq); - - /* TODO(davidben): This function has a lot of implicit outputs. Simplify the - * |ssl_get_message| API. */ - ssl->s3->tmp.message_type = frag->type; - ssl->init_msg = frag->data + DTLS1_HM_HEADER_LENGTH; - ssl->init_num = frag->msg_len; - - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, frag->data, - ssl->init_num + DTLS1_HM_HEADER_LENGTH); - return 1; + return true; } -void dtls1_get_current_message(const SSL *ssl, CBS *out) { - assert(dtls1_is_current_message_complete(ssl)); - - hm_fragment *frag = ssl->d1->incoming_messages[ssl->d1->handshake_read_seq % - SSL_MAX_HANDSHAKE_FLIGHT]; - CBS_init(out, frag->data, DTLS1_HM_HEADER_LENGTH + frag->msg_len); -} - -void dtls1_release_current_message(SSL *ssl, int free_buffer) { - if (ssl->init_msg == NULL) { - return; - } - +void dtls1_next_message(SSL *ssl) { + assert(ssl->s3->has_message); assert(dtls1_is_current_message_complete(ssl)); size_t index = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; - dtls1_hm_fragment_free(ssl->d1->incoming_messages[index]); - ssl->d1->incoming_messages[index] = NULL; + ssl->d1->incoming_messages[index].reset(); ssl->d1->handshake_read_seq++; - - ssl->init_msg = NULL; - ssl->init_num = 0; + ssl->s3->has_message = false; + // If we previously sent a flight, mark it as having a reply, so + // |on_handshake_complete| can manage post-handshake retransmission. + if (ssl->d1->outgoing_messages_complete) { + ssl->d1->flight_has_reply = true; + } } -void dtls_clear_incoming_messages(SSL *ssl) { - for (size_t i = 0; i < SSL_MAX_HANDSHAKE_FLIGHT; i++) { - dtls1_hm_fragment_free(ssl->d1->incoming_messages[i]); - ssl->d1->incoming_messages[i] = NULL; +bool dtls_has_unprocessed_handshake_data(const SSL *ssl) { + if (ssl->d1->has_change_cipher_spec) { + return true; } -} -int dtls_has_incoming_messages(const SSL *ssl) { size_t current = ssl->d1->handshake_read_seq % SSL_MAX_HANDSHAKE_FLIGHT; for (size_t i = 0; i < SSL_MAX_HANDSHAKE_FLIGHT; i++) { - /* Skip the current message. */ - if (ssl->init_msg != NULL && i == current) { + // Skip the current message. + if (ssl->s3->has_message && i == current) { assert(dtls1_is_current_message_complete(ssl)); continue; } - if (ssl->d1->incoming_messages[i] != NULL) { - return 1; + if (ssl->d1->incoming_messages[i] != nullptr) { + return true; } } - return 0; + return false; } -int dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, - CBS *out_body) { +bool dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, + CBS *out_body) { OPENSSL_memset(out_hdr, 0x00, sizeof(struct hm_header_st)); if (!CBS_get_u8(cbs, &out_hdr->type) || @@ -484,116 +466,142 @@ int dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, !CBS_get_u24(cbs, &out_hdr->frag_off) || !CBS_get_u24(cbs, &out_hdr->frag_len) || !CBS_get_bytes(cbs, out_body, out_hdr->frag_len)) { - return 0; + return false; } - return 1; + return true; +} + +ssl_open_record_t dtls1_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in) { + if (!ssl->d1->has_change_cipher_spec) { + // dtls1_open_handshake processes both handshake and ChangeCipherSpec. + auto ret = dtls1_open_handshake(ssl, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; + } + } + if (ssl->d1->has_change_cipher_spec) { + ssl->d1->has_change_cipher_spec = false; + return ssl_open_record_success; + } + return ssl_open_record_discard; } -/* Sending handshake messages. */ +// Sending handshake messages. + +void DTLS_OUTGOING_MESSAGE::Clear() { + OPENSSL_free(data); + data = nullptr; +} void dtls_clear_outgoing_messages(SSL *ssl) { for (size_t i = 0; i < ssl->d1->outgoing_messages_len; i++) { - OPENSSL_free(ssl->d1->outgoing_messages[i].data); - ssl->d1->outgoing_messages[i].data = NULL; + ssl->d1->outgoing_messages[i].Clear(); } ssl->d1->outgoing_messages_len = 0; ssl->d1->outgoing_written = 0; ssl->d1->outgoing_offset = 0; + ssl->d1->outgoing_messages_complete = false; + ssl->d1->flight_has_reply = false; } -int dtls1_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { - /* Pick a modest size hint to save most of the |realloc| calls. */ +bool dtls1_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { + // Pick a modest size hint to save most of the |realloc| calls. if (!CBB_init(cbb, 64) || !CBB_add_u8(cbb, type) || !CBB_add_u24(cbb, 0 /* length (filled in later) */) || !CBB_add_u16(cbb, ssl->d1->handshake_write_seq) || !CBB_add_u24(cbb, 0 /* offset */) || !CBB_add_u24_length_prefixed(cbb, body)) { - return 0; + return false; } - return 1; + return true; } -int dtls1_finish_message(SSL *ssl, CBB *cbb, uint8_t **out_msg, - size_t *out_len) { - *out_msg = NULL; - if (!CBB_finish(cbb, out_msg, out_len) || - *out_len < DTLS1_HM_HEADER_LENGTH) { +bool dtls1_finish_message(SSL *ssl, CBB *cbb, Array *out_msg) { + if (!CBBFinishArray(cbb, out_msg) || + out_msg->size() < DTLS1_HM_HEADER_LENGTH) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - OPENSSL_free(*out_msg); - return 0; + return false; } - /* Fix up the header. Copy the fragment length into the total message - * length. */ - OPENSSL_memcpy(*out_msg + 1, *out_msg + DTLS1_HM_HEADER_LENGTH - 3, 3); - return 1; + // Fix up the header. Copy the fragment length into the total message + // length. + OPENSSL_memcpy(out_msg->data() + 1, + out_msg->data() + DTLS1_HM_HEADER_LENGTH - 3, 3); + return true; } -/* add_outgoing adds a new handshake message or ChangeCipherSpec to the current - * outgoing flight. It returns one on success and zero on error. In both cases, - * it takes ownership of |data| and releases it with |OPENSSL_free| when - * done. */ -static int add_outgoing(SSL *ssl, int is_ccs, uint8_t *data, size_t len) { - OPENSSL_COMPILE_ASSERT(SSL_MAX_HANDSHAKE_FLIGHT < - (1 << 8 * sizeof(ssl->d1->outgoing_messages_len)), - outgoing_messages_len_is_too_small); - if (ssl->d1->outgoing_messages_len >= SSL_MAX_HANDSHAKE_FLIGHT) { - assert(0); +// add_outgoing adds a new handshake message or ChangeCipherSpec to the current +// outgoing flight. It returns true on success and false on error. +static bool add_outgoing(SSL *ssl, bool is_ccs, Array data) { + if (ssl->d1->outgoing_messages_complete) { + // If we've begun writing a new flight, we received the peer flight. Discard + // the timer and the our flight. + dtls1_stop_timer(ssl); + dtls_clear_outgoing_messages(ssl); + } + + static_assert(SSL_MAX_HANDSHAKE_FLIGHT < + (1 << 8 * sizeof(ssl->d1->outgoing_messages_len)), + "outgoing_messages_len is too small"); + if (ssl->d1->outgoing_messages_len >= SSL_MAX_HANDSHAKE_FLIGHT || + data.size() > 0xffffffff) { + assert(false); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - OPENSSL_free(data); - return 0; + return false; } if (!is_ccs) { - /* TODO(svaldez): Move this up a layer to fix abstraction for SSL_TRANSCRIPT - * on hs. */ + // TODO(svaldez): Move this up a layer to fix abstraction for SSLTranscript + // on hs. if (ssl->s3->hs != NULL && - !SSL_TRANSCRIPT_update(&ssl->s3->hs->transcript, data, len)) { + !ssl->s3->hs->transcript.Update(data)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - OPENSSL_free(data); - return 0; + return false; } ssl->d1->handshake_write_seq++; } DTLS_OUTGOING_MESSAGE *msg = &ssl->d1->outgoing_messages[ssl->d1->outgoing_messages_len]; - msg->data = data; + size_t len; + data.Release(&msg->data, &len); msg->len = len; msg->epoch = ssl->d1->w_epoch; msg->is_ccs = is_ccs; ssl->d1->outgoing_messages_len++; - return 1; + return true; } -int dtls1_add_message(SSL *ssl, uint8_t *data, size_t len) { - return add_outgoing(ssl, 0 /* handshake */, data, len); +bool dtls1_add_message(SSL *ssl, Array data) { + return add_outgoing(ssl, false /* handshake */, std::move(data)); } -int dtls1_add_change_cipher_spec(SSL *ssl) { - return add_outgoing(ssl, 1 /* ChangeCipherSpec */, NULL, 0); +bool dtls1_add_change_cipher_spec(SSL *ssl) { + return add_outgoing(ssl, true /* ChangeCipherSpec */, Array()); } -int dtls1_add_alert(SSL *ssl, uint8_t level, uint8_t desc) { - /* The |add_alert| path is only used for warning alerts for now, which DTLS - * never sends. This will be implemented later once closure alerts are - * converted. */ - assert(0); +bool dtls1_add_alert(SSL *ssl, uint8_t level, uint8_t desc) { + // The |add_alert| path is only used for warning alerts for now, which DTLS + // never sends. This will be implemented later once closure alerts are + // converted. + assert(false); OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; + return false; } -/* dtls1_update_mtu updates the current MTU from the BIO, ensuring it is above - * the minimum. */ +// dtls1_update_mtu updates the current MTU from the BIO, ensuring it is above +// the minimum. static void dtls1_update_mtu(SSL *ssl) { - /* TODO(davidben): No consumer implements |BIO_CTRL_DGRAM_SET_MTU| and the - * only |BIO_CTRL_DGRAM_QUERY_MTU| implementation could use - * |SSL_set_mtu|. Does this need to be so complex? */ + // TODO(davidben): No consumer implements |BIO_CTRL_DGRAM_SET_MTU| and the + // only |BIO_CTRL_DGRAM_QUERY_MTU| implementation could use + // |SSL_set_mtu|. Does this need to be so complex? if (ssl->d1->mtu < dtls1_min_mtu() && !(SSL_get_options(ssl) & SSL_OP_NO_QUERY_MTU)) { long mtu = BIO_ctrl(ssl->wbio, BIO_CTRL_DGRAM_QUERY_MTU, 0, NULL); @@ -605,7 +613,7 @@ static void dtls1_update_mtu(SSL *ssl) { } } - /* The MTU should be above the minimum now. */ + // The MTU should be above the minimum now. assert(ssl->d1->mtu >= dtls1_min_mtu()); } @@ -616,28 +624,28 @@ enum seal_result_t { seal_success, }; -/* seal_next_message seals |msg|, which must be the next message, to |out|. If - * progress was made, it returns |seal_partial| or |seal_success| and sets - * |*out_len| to the number of bytes written. */ +// seal_next_message seals |msg|, which must be the next message, to |out|. If +// progress was made, it returns |seal_partial| or |seal_success| and sets +// |*out_len| to the number of bytes written. static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, const DTLS_OUTGOING_MESSAGE *msg) { assert(ssl->d1->outgoing_written < ssl->d1->outgoing_messages_len); assert(msg == &ssl->d1->outgoing_messages[ssl->d1->outgoing_written]); - /* DTLS renegotiation is unsupported, so only epochs 0 (NULL cipher) and 1 - * (negotiated cipher) exist. */ - assert(ssl->d1->w_epoch == 0 || ssl->d1->w_epoch == 1); - assert(msg->epoch <= ssl->d1->w_epoch); enum dtls1_use_epoch_t use_epoch = dtls1_use_current_epoch; - if (ssl->d1->w_epoch == 1 && msg->epoch == 0) { + if (ssl->d1->w_epoch >= 1 && msg->epoch == ssl->d1->w_epoch - 1) { use_epoch = dtls1_use_previous_epoch; + } else if (msg->epoch != ssl->d1->w_epoch) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return seal_error; } + size_t overhead = dtls_max_seal_overhead(ssl, use_epoch); size_t prefix = dtls_seal_prefix_len(ssl, use_epoch); if (msg->is_ccs) { - /* Check there is room for the ChangeCipherSpec. */ + // Check there is room for the ChangeCipherSpec. static const uint8_t kChangeCipherSpec[1] = {SSL3_MT_CCS}; if (max_out < sizeof(kChangeCipherSpec) + overhead) { return seal_no_progress; @@ -650,11 +658,11 @@ static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, } ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_CHANGE_CIPHER_SPEC, - kChangeCipherSpec, sizeof(kChangeCipherSpec)); + kChangeCipherSpec); return seal_success; } - /* DTLS messages are serialized as a single fragment in |msg|. */ + // DTLS messages are serialized as a single fragment in |msg|. CBS cbs, body; struct hm_header_st hdr; CBS_init(&cbs, msg->data, msg->len); @@ -668,7 +676,7 @@ static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, return seal_error; } - /* Determine how much progress can be made. */ + // Determine how much progress can be made. if (max_out < DTLS1_HM_HEADER_LENGTH + 1 + overhead || max_out < prefix) { return seal_no_progress; } @@ -677,24 +685,24 @@ static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, todo = max_out - DTLS1_HM_HEADER_LENGTH - overhead; } - /* Assemble a fragment, to be sealed in-place. */ - CBB cbb; + // Assemble a fragment, to be sealed in-place. + ScopedCBB cbb; uint8_t *frag = out + prefix; size_t max_frag = max_out - prefix, frag_len; - if (!CBB_init_fixed(&cbb, frag, max_frag) || - !CBB_add_u8(&cbb, hdr.type) || - !CBB_add_u24(&cbb, hdr.msg_len) || - !CBB_add_u16(&cbb, hdr.seq) || - !CBB_add_u24(&cbb, ssl->d1->outgoing_offset) || - !CBB_add_u24(&cbb, todo) || - !CBB_add_bytes(&cbb, CBS_data(&body), todo) || - !CBB_finish(&cbb, NULL, &frag_len)) { - CBB_cleanup(&cbb); + if (!CBB_init_fixed(cbb.get(), frag, max_frag) || + !CBB_add_u8(cbb.get(), hdr.type) || + !CBB_add_u24(cbb.get(), hdr.msg_len) || + !CBB_add_u16(cbb.get(), hdr.seq) || + !CBB_add_u24(cbb.get(), ssl->d1->outgoing_offset) || + !CBB_add_u24(cbb.get(), todo) || + !CBB_add_bytes(cbb.get(), CBS_data(&body), todo) || + !CBB_finish(cbb.get(), NULL, &frag_len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return seal_error; } - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HANDSHAKE, frag, frag_len); + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HANDSHAKE, + MakeSpan(frag, frag_len)); if (!dtls_seal_record(ssl, out, out_len, max_out, SSL3_RT_HANDSHAKE, out + prefix, frag_len, use_epoch)) { @@ -702,7 +710,7 @@ static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, } if (todo == CBS_len(&body)) { - /* The next message is complete. */ + // The next message is complete. ssl->d1->outgoing_offset = 0; return seal_success; } @@ -711,12 +719,12 @@ static enum seal_result_t seal_next_message(SSL *ssl, uint8_t *out, return seal_partial; } -/* seal_next_packet writes as much of the next flight as possible to |out| and - * advances |ssl->d1->outgoing_written| and |ssl->d1->outgoing_offset| as - * appropriate. */ -static int seal_next_packet(SSL *ssl, uint8_t *out, size_t *out_len, - size_t max_out) { - int made_progress = 0; +// seal_next_packet writes as much of the next flight as possible to |out| and +// advances |ssl->d1->outgoing_written| and |ssl->d1->outgoing_offset| as +// appropriate. +static bool seal_next_packet(SSL *ssl, uint8_t *out, size_t *out_len, + size_t max_out) { + bool made_progress = false; size_t total = 0; assert(ssl->d1->outgoing_written < ssl->d1->outgoing_messages_len); for (; ssl->d1->outgoing_written < ssl->d1->outgoing_messages_len; @@ -727,7 +735,7 @@ static int seal_next_packet(SSL *ssl, uint8_t *out, size_t *out_len, enum seal_result_t ret = seal_next_message(ssl, out, &len, max_out, msg); switch (ret) { case seal_error: - return 0; + return false; case seal_no_progress: goto packet_full; @@ -737,7 +745,7 @@ static int seal_next_packet(SSL *ssl, uint8_t *out, size_t *out_len, out += len; max_out -= len; total += len; - made_progress = 1; + made_progress = true; if (ret == seal_partial) { goto packet_full; @@ -747,21 +755,26 @@ static int seal_next_packet(SSL *ssl, uint8_t *out, size_t *out_len, } packet_full: - /* The MTU was too small to make any progress. */ + // The MTU was too small to make any progress. if (!made_progress) { OPENSSL_PUT_ERROR(SSL, SSL_R_MTU_TOO_SMALL); - return 0; + return false; } *out_len = total; - return 1; + return true; } -int dtls1_flush_flight(SSL *ssl) { +static int send_flight(SSL *ssl) { + if (ssl->s3->write_shutdown != ssl_shutdown_none) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); + return -1; + } + dtls1_update_mtu(ssl); int ret = -1; - uint8_t *packet = OPENSSL_malloc(ssl->d1->mtu); + uint8_t *packet = (uint8_t *)OPENSSL_malloc(ssl->d1->mtu); if (packet == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); goto err; @@ -778,17 +791,17 @@ int dtls1_flush_flight(SSL *ssl) { int bio_ret = BIO_write(ssl->wbio, packet, packet_len); if (bio_ret <= 0) { - /* Retry this packet the next time around. */ + // Retry this packet the next time around. ssl->d1->outgoing_written = old_written; ssl->d1->outgoing_offset = old_offset; - ssl->rwstate = SSL_WRITING; + ssl->s3->rwstate = SSL_WRITING; ret = bio_ret; goto err; } } if (BIO_flush(ssl->wbio) <= 0) { - ssl->rwstate = SSL_WRITING; + ssl->s3->rwstate = SSL_WRITING; goto err; } @@ -799,17 +812,26 @@ int dtls1_flush_flight(SSL *ssl) { return ret; } +int dtls1_flush_flight(SSL *ssl) { + ssl->d1->outgoing_messages_complete = true; + // Start the retransmission timer for the next flight (if any). + dtls1_start_timer(ssl); + return send_flight(ssl); +} + int dtls1_retransmit_outgoing_messages(SSL *ssl) { - /* Rewind to the start of the flight and write it again. - * - * TODO(davidben): This does not allow retransmits to be resumed on - * non-blocking write. */ + // Rewind to the start of the flight and write it again. + // + // TODO(davidben): This does not allow retransmits to be resumed on + // non-blocking write. ssl->d1->outgoing_written = 0; ssl->d1->outgoing_offset = 0; - return dtls1_flush_flight(ssl); + return send_flight(ssl); } unsigned int dtls1_min_mtu(void) { return kMinMTU; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/d1_lib.c b/Sources/BoringSSL/ssl/d1_lib.cc similarity index 67% rename from Sources/BoringSSL/ssl/d1_lib.c rename to Sources/BoringSSL/ssl/d1_lib.cc index 258e9ab29..eff06eec7 100644 --- a/Sources/BoringSSL/ssl/d1_lib.c +++ b/Sources/BoringSSL/ssl/d1_lib.cc @@ -68,156 +68,107 @@ #include "internal.h" +namespace bssl { -/* DTLS1_MTU_TIMEOUTS is the maximum number of timeouts to expire - * before starting to decrease the MTU. */ +// DTLS1_MTU_TIMEOUTS is the maximum number of timeouts to expire +// before starting to decrease the MTU. #define DTLS1_MTU_TIMEOUTS 2 -/* DTLS1_MAX_TIMEOUTS is the maximum number of timeouts to expire - * before failing the DTLS handshake. */ +// DTLS1_MAX_TIMEOUTS is the maximum number of timeouts to expire +// before failing the DTLS handshake. #define DTLS1_MAX_TIMEOUTS 12 -int dtls1_new(SSL *ssl) { - DTLS1_STATE *d1; +DTLS1_STATE::DTLS1_STATE() + : has_change_cipher_spec(false), + outgoing_messages_complete(false), + flight_has_reply(false) {} +DTLS1_STATE::~DTLS1_STATE() {} + +bool dtls1_new(SSL *ssl) { if (!ssl3_new(ssl)) { - return 0; + return false; } - d1 = OPENSSL_malloc(sizeof *d1); - if (d1 == NULL) { + UniquePtr d1 = MakeUnique(); + if (!d1) { ssl3_free(ssl); - return 0; + return false; } - OPENSSL_memset(d1, 0, sizeof *d1); - ssl->d1 = d1; + ssl->d1 = d1.release(); - /* Set the version to the highest supported version. - * - * TODO(davidben): Move this field into |s3|, have it store the normalized - * protocol version, and implement this pre-negotiation quirk in |SSL_version| - * at the API boundary rather than in internal state. */ + // Set the version to the highest supported version. + // + // TODO(davidben): Move this field into |s3|, have it store the normalized + // protocol version, and implement this pre-negotiation quirk in |SSL_version| + // at the API boundary rather than in internal state. ssl->version = DTLS1_2_VERSION; - return 1; + return true; } void dtls1_free(SSL *ssl) { ssl3_free(ssl); - if (ssl == NULL || ssl->d1 == NULL) { + if (ssl == NULL) { return; } - dtls_clear_incoming_messages(ssl); - dtls_clear_outgoing_messages(ssl); - - OPENSSL_free(ssl->d1); + Delete(ssl->d1); ssl->d1 = NULL; } -void DTLSv1_set_initial_timeout_duration(SSL *ssl, unsigned int duration_ms) { - ssl->initial_timeout_duration_ms = duration_ms; -} - void dtls1_start_timer(SSL *ssl) { - /* If timer is not set, initialize duration (by default, 1 second) */ + // If timer is not set, initialize duration (by default, 1 second) if (ssl->d1->next_timeout.tv_sec == 0 && ssl->d1->next_timeout.tv_usec == 0) { ssl->d1->timeout_duration_ms = ssl->initial_timeout_duration_ms; } - /* Set timeout to current time */ + // Set timeout to current time ssl_get_current_time(ssl, &ssl->d1->next_timeout); - /* Add duration to current time */ + // Add duration to current time ssl->d1->next_timeout.tv_sec += ssl->d1->timeout_duration_ms / 1000; ssl->d1->next_timeout.tv_usec += (ssl->d1->timeout_duration_ms % 1000) * 1000; if (ssl->d1->next_timeout.tv_usec >= 1000000) { ssl->d1->next_timeout.tv_sec++; ssl->d1->next_timeout.tv_usec -= 1000000; } - BIO_ctrl(ssl->rbio, BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, - &ssl->d1->next_timeout); } -int DTLSv1_get_timeout(const SSL *ssl, struct timeval *out) { - if (!SSL_is_dtls(ssl)) { - return 0; - } - - /* If no timeout is set, just return NULL */ - if (ssl->d1->next_timeout.tv_sec == 0 && ssl->d1->next_timeout.tv_usec == 0) { - return 0; - } - - struct timeval timenow; - ssl_get_current_time(ssl, &timenow); - - /* If timer already expired, set remaining time to 0 */ - if (ssl->d1->next_timeout.tv_sec < timenow.tv_sec || - (ssl->d1->next_timeout.tv_sec == timenow.tv_sec && - ssl->d1->next_timeout.tv_usec <= timenow.tv_usec)) { - OPENSSL_memset(out, 0, sizeof(struct timeval)); - return 1; - } - - /* Calculate time left until timer expires */ - OPENSSL_memcpy(out, &ssl->d1->next_timeout, sizeof(struct timeval)); - out->tv_sec -= timenow.tv_sec; - out->tv_usec -= timenow.tv_usec; - if (out->tv_usec < 0) { - out->tv_sec--; - out->tv_usec += 1000000; - } - - /* If remaining time is less than 15 ms, set it to 0 to prevent issues - * because of small devergences with socket timeouts. */ - if (out->tv_sec == 0 && out->tv_usec < 15000) { - OPENSSL_memset(out, 0, sizeof(struct timeval)); - } - - return 1; -} - -int dtls1_is_timer_expired(SSL *ssl) { +bool dtls1_is_timer_expired(SSL *ssl) { struct timeval timeleft; - /* Get time left until timeout, return false if no timer running */ + // Get time left until timeout, return false if no timer running if (!DTLSv1_get_timeout(ssl, &timeleft)) { - return 0; + return false; } - /* Return false if timer is not expired yet */ + // Return false if timer is not expired yet if (timeleft.tv_sec > 0 || timeleft.tv_usec > 0) { - return 0; + return false; } - /* Timer expired, so return true */ - return 1; + // Timer expired, so return true + return true; } -void dtls1_double_timeout(SSL *ssl) { +static void dtls1_double_timeout(SSL *ssl) { ssl->d1->timeout_duration_ms *= 2; if (ssl->d1->timeout_duration_ms > 60000) { ssl->d1->timeout_duration_ms = 60000; } - dtls1_start_timer(ssl); } void dtls1_stop_timer(SSL *ssl) { - /* Reset everything */ ssl->d1->num_timeouts = 0; - OPENSSL_memset(&ssl->d1->next_timeout, 0, sizeof(struct timeval)); + OPENSSL_memset(&ssl->d1->next_timeout, 0, sizeof(ssl->d1->next_timeout)); ssl->d1->timeout_duration_ms = ssl->initial_timeout_duration_ms; - BIO_ctrl(ssl->rbio, BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0, - &ssl->d1->next_timeout); - /* Clear retransmission buffer */ - dtls_clear_outgoing_messages(ssl); } -int dtls1_check_timeout_num(SSL *ssl) { +bool dtls1_check_timeout_num(SSL *ssl) { ssl->d1->num_timeouts++; - /* Reduce MTU after 2 unsuccessful retransmissions */ + // Reduce MTU after 2 unsuccessful retransmissions if (ssl->d1->num_timeouts > DTLS1_MTU_TIMEOUTS && !(SSL_get_options(ssl) & SSL_OP_NO_QUERY_MTU)) { long mtu = BIO_ctrl(ssl->wbio, BIO_CTRL_DGRAM_GET_FALLBACK_MTU, 0, NULL); @@ -227,32 +178,90 @@ int dtls1_check_timeout_num(SSL *ssl) { } if (ssl->d1->num_timeouts > DTLS1_MAX_TIMEOUTS) { - /* fail the connection, enough alerts have been sent */ + // fail the connection, enough alerts have been sent OPENSSL_PUT_ERROR(SSL, SSL_R_READ_TIMEOUT_EXPIRED); - return -1; + return false; + } + + return true; +} + +} // namespace bssl + +using namespace bssl; + +void DTLSv1_set_initial_timeout_duration(SSL *ssl, unsigned int duration_ms) { + ssl->initial_timeout_duration_ms = duration_ms; +} + +int DTLSv1_get_timeout(const SSL *ssl, struct timeval *out) { + if (!SSL_is_dtls(ssl)) { + return 0; + } + + // If no timeout is set, just return 0. + if (ssl->d1->next_timeout.tv_sec == 0 && ssl->d1->next_timeout.tv_usec == 0) { + return 0; } - return 0; + struct OPENSSL_timeval timenow; + ssl_get_current_time(ssl, &timenow); + + // If timer already expired, set remaining time to 0. + if (ssl->d1->next_timeout.tv_sec < timenow.tv_sec || + (ssl->d1->next_timeout.tv_sec == timenow.tv_sec && + ssl->d1->next_timeout.tv_usec <= timenow.tv_usec)) { + OPENSSL_memset(out, 0, sizeof(*out)); + return 1; + } + + // Calculate time left until timer expires. + struct OPENSSL_timeval ret; + OPENSSL_memcpy(&ret, &ssl->d1->next_timeout, sizeof(ret)); + ret.tv_sec -= timenow.tv_sec; + if (ret.tv_usec >= timenow.tv_usec) { + ret.tv_usec -= timenow.tv_usec; + } else { + ret.tv_usec = 1000000 + ret.tv_usec - timenow.tv_usec; + ret.tv_sec--; + } + + // If remaining time is less than 15 ms, set it to 0 to prevent issues + // because of small divergences with socket timeouts. + if (ret.tv_sec == 0 && ret.tv_usec < 15000) { + OPENSSL_memset(&ret, 0, sizeof(ret)); + } + + // Clamp the result in case of overflow. + if (ret.tv_sec > INT_MAX) { + assert(0); + out->tv_sec = INT_MAX; + } else { + out->tv_sec = ret.tv_sec; + } + + out->tv_usec = ret.tv_usec; + return 1; } int DTLSv1_handle_timeout(SSL *ssl) { ssl_reset_error_state(ssl); if (!SSL_is_dtls(ssl)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } - /* if no timer is expired, don't do anything */ + // If no timer is expired, don't do anything. if (!dtls1_is_timer_expired(ssl)) { return 0; } - dtls1_double_timeout(ssl); - - if (dtls1_check_timeout_num(ssl) < 0) { + if (!dtls1_check_timeout_num(ssl)) { return -1; } + dtls1_double_timeout(ssl); dtls1_start_timer(ssl); return dtls1_retransmit_outgoing_messages(ssl); } diff --git a/Sources/BoringSSL/ssl/d1_pkt.c b/Sources/BoringSSL/ssl/d1_pkt.cc similarity index 57% rename from Sources/BoringSSL/ssl/d1_pkt.c rename to Sources/BoringSSL/ssl/d1_pkt.cc index 27b2763ac..d29a5c284 100644 --- a/Sources/BoringSSL/ssl/d1_pkt.c +++ b/Sources/BoringSSL/ssl/d1_pkt.cc @@ -126,214 +126,77 @@ #include "internal.h" -int dtls1_get_record(SSL *ssl) { -again: - switch (ssl->s3->recv_shutdown) { - case ssl_shutdown_none: - break; - case ssl_shutdown_fatal_alert: - OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); - return -1; - case ssl_shutdown_close_notify: - return 0; - } - - /* Read a new packet if there is no unconsumed one. */ - if (ssl_read_buffer_len(ssl) == 0) { - int read_ret = ssl_read_buffer_extend_to(ssl, 0 /* unused */); - if (read_ret < 0 && dtls1_is_timer_expired(ssl)) { - /* For blocking BIOs, retransmits must be handled internally. */ - int timeout_ret = DTLSv1_handle_timeout(ssl); - if (timeout_ret <= 0) { - return timeout_ret; - } - goto again; - } - if (read_ret <= 0) { - return read_ret; - } - } - assert(ssl_read_buffer_len(ssl) > 0); - - CBS body; - uint8_t type, alert; - size_t consumed; - enum ssl_open_record_t open_ret = - dtls_open_record(ssl, &type, &body, &consumed, &alert, - ssl_read_buffer(ssl), ssl_read_buffer_len(ssl)); - ssl_read_buffer_consume(ssl, consumed); - switch (open_ret) { - case ssl_open_record_partial: - /* Impossible in DTLS. */ - break; - - case ssl_open_record_success: - if (CBS_len(&body) > 0xffff) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return -1; - } - - SSL3_RECORD *rr = &ssl->s3->rrec; - rr->type = type; - rr->length = (uint16_t)CBS_len(&body); - rr->data = (uint8_t *)CBS_data(&body); - return 1; - - case ssl_open_record_discard: - goto again; - - case ssl_open_record_close_notify: - return 0; - - case ssl_open_record_fatal_alert: - return -1; +namespace bssl { - case ssl_open_record_error: - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; - } - - assert(0); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; -} - -int dtls1_read_app_data(SSL *ssl, int *out_got_handshake, uint8_t *buf, int len, - int peek) { +ssl_open_record_t dtls1_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in) { assert(!SSL_in_init(ssl)); - *out_got_handshake = 0; - SSL3_RECORD *rr = &ssl->s3->rrec; - -again: - if (rr->length == 0) { - int ret = dtls1_get_record(ssl); - if (ret <= 0) { - return ret; - } + uint8_t type; + Span record; + auto ret = dtls_open_record(ssl, &type, &record, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; } - if (rr->type == SSL3_RT_HANDSHAKE) { - /* Parse the first fragment header to determine if this is a pre-CCS or - * post-CCS handshake record. DTLS resets handshake message numbers on each - * handshake, so renegotiations and retransmissions are ambiguous. */ + if (type == SSL3_RT_HANDSHAKE) { + // Parse the first fragment header to determine if this is a pre-CCS or + // post-CCS handshake record. DTLS resets handshake message numbers on each + // handshake, so renegotiations and retransmissions are ambiguous. CBS cbs, body; struct hm_header_st msg_hdr; - CBS_init(&cbs, rr->data, rr->length); + CBS_init(&cbs, record.data(), record.size()); if (!dtls1_parse_fragment(&cbs, &msg_hdr, &body)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HANDSHAKE_RECORD); - return -1; + *out_alert = SSL_AD_DECODE_ERROR; + return ssl_open_record_error; } if (msg_hdr.type == SSL3_MT_FINISHED && msg_hdr.seq == ssl->d1->handshake_read_seq - 1) { if (msg_hdr.frag_off == 0) { - /* Retransmit our last flight of messages. If the peer sends the second - * Finished, they may not have received ours. Only do this for the - * first fragment, in case the Finished was fragmented. */ - if (dtls1_check_timeout_num(ssl) < 0) { - return -1; + // Retransmit our last flight of messages. If the peer sends the second + // Finished, they may not have received ours. Only do this for the + // first fragment, in case the Finished was fragmented. + if (!dtls1_check_timeout_num(ssl)) { + *out_alert = 0; // TODO(davidben): Send an alert? + return ssl_open_record_error; } dtls1_retransmit_outgoing_messages(ssl); } - - rr->length = 0; - goto again; + return ssl_open_record_discard; } - /* Otherwise, this is a pre-CCS handshake message from an unsupported - * renegotiation attempt. Fall through to the error path. */ + // Otherwise, this is a pre-CCS handshake message from an unsupported + // renegotiation attempt. Fall through to the error path. } - if (rr->type != SSL3_RT_APPLICATION_DATA) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + if (type != SSL3_RT_APPLICATION_DATA) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - return -1; - } - - /* Discard empty records. */ - if (rr->length == 0) { - goto again; - } - - if (len <= 0) { - return len; - } - - if ((unsigned)len > rr->length) { - len = rr->length; + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; } - OPENSSL_memcpy(buf, rr->data, len); - if (!peek) { - /* TODO(davidben): Should the record be truncated instead? This is a - * datagram transport. See https://crbug.com/boringssl/65. */ - rr->length -= len; - rr->data += len; - if (rr->length == 0) { - /* The record has been consumed, so we may now clear the buffer. */ - ssl_read_buffer_discard(ssl); - } + if (record.empty()) { + return ssl_open_record_discard; } - return len; + *out = record; + return ssl_open_record_success; } -int dtls1_read_change_cipher_spec(SSL *ssl) { - SSL3_RECORD *rr = &ssl->s3->rrec; - -again: - if (rr->length == 0) { - int ret = dtls1_get_record(ssl); - if (ret <= 0) { - return ret; - } - } - - /* Drop handshake records silently. The epochs match, so this must be a - * retransmit of a message we already received. */ - if (rr->type == SSL3_RT_HANDSHAKE) { - rr->length = 0; - goto again; - } - - /* Other record types are illegal in this epoch. Note all application data - * records come in the encrypted epoch. */ - if (rr->type != SSL3_RT_CHANGE_CIPHER_SPEC) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - return -1; - } +int dtls1_write_app_data(SSL *ssl, bool *out_needs_handshake, const uint8_t *in, + int len) { + assert(!SSL_in_init(ssl)); + *out_needs_handshake = false; - if (rr->length != 1 || rr->data[0] != SSL3_MT_CCS) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + if (ssl->s3->write_shutdown != ssl_shutdown_none) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, rr->data, - rr->length); - - rr->length = 0; - ssl_read_buffer_discard(ssl); - return 1; -} - -void dtls1_read_close_notify(SSL *ssl) { - /* Bidirectional shutdown doesn't make sense for an unordered transport. DTLS - * alerts also aren't delivered reliably, so we may even time out because the - * peer never received our close_notify. Report to the caller that the channel - * has fully shut down. */ - if (ssl->s3->recv_shutdown == ssl_shutdown_none) { - ssl->s3->recv_shutdown = ssl_shutdown_close_notify; - } -} - -int dtls1_write_app_data(SSL *ssl, const uint8_t *buf, int len) { - assert(!SSL_in_init(ssl)); - if (len > SSL3_RT_MAX_PLAIN_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_DTLS_MESSAGE_TOO_BIG); return -1; @@ -348,7 +211,7 @@ int dtls1_write_app_data(SSL *ssl, const uint8_t *buf, int len) { return 0; } - int ret = dtls1_write_record(ssl, SSL3_RT_APPLICATION_DATA, buf, (size_t)len, + int ret = dtls1_write_record(ssl, SSL3_RT_APPLICATION_DATA, in, (size_t)len, dtls1_use_current_epoch); if (ret <= 0) { return ret; @@ -356,29 +219,29 @@ int dtls1_write_app_data(SSL *ssl, const uint8_t *buf, int len) { return len; } -int dtls1_write_record(SSL *ssl, int type, const uint8_t *buf, size_t len, +int dtls1_write_record(SSL *ssl, int type, const uint8_t *in, size_t len, enum dtls1_use_epoch_t use_epoch) { + SSLBuffer *buf = &ssl->s3->write_buffer; assert(len <= SSL3_RT_MAX_PLAIN_LENGTH); - /* There should never be a pending write buffer in DTLS. One can't write half - * a datagram, so the write buffer is always dropped in - * |ssl_write_buffer_flush|. */ - assert(!ssl_write_buffer_is_pending(ssl)); + // There should never be a pending write buffer in DTLS. One can't write half + // a datagram, so the write buffer is always dropped in + // |ssl_write_buffer_flush|. + assert(buf->empty()); if (len > SSL3_RT_MAX_PLAIN_LENGTH) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } - size_t max_out = len + SSL_max_seal_overhead(ssl); - uint8_t *out; size_t ciphertext_len; - if (!ssl_write_buffer_init(ssl, &out, max_out) || - !dtls_seal_record(ssl, out, &ciphertext_len, max_out, type, buf, len, - use_epoch)) { - ssl_write_buffer_clear(ssl); + if (!buf->EnsureCap(ssl_seal_align_prefix_len(ssl), + len + SSL_max_seal_overhead(ssl)) || + !dtls_seal_record(ssl, buf->remaining().data(), &ciphertext_len, + buf->remaining().size(), type, in, len, use_epoch)) { + buf->Clear(); return -1; } - ssl_write_buffer_set_len(ssl, ciphertext_len); + buf->DidWrite(ciphertext_len); int ret = ssl_write_buffer_flush(ssl); if (ret <= 0) { @@ -395,16 +258,17 @@ int dtls1_dispatch_alert(SSL *ssl) { } ssl->s3->alert_dispatch = 0; - /* If the alert is fatal, flush the BIO now. */ + // If the alert is fatal, flush the BIO now. if (ssl->s3->send_alert[0] == SSL3_AL_FATAL) { BIO_flush(ssl->wbio); } - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert, - 2); + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert); int alert = (ssl->s3->send_alert[0] << 8) | ssl->s3->send_alert[1]; ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, alert); return 1; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/d1_srtp.c b/Sources/BoringSSL/ssl/d1_srtp.cc similarity index 94% rename from Sources/BoringSSL/ssl/d1_srtp.c rename to Sources/BoringSSL/ssl/d1_srtp.cc index 108537777..1a8e08487 100644 --- a/Sources/BoringSSL/ssl/d1_srtp.c +++ b/Sources/BoringSSL/ssl/d1_srtp.cc @@ -124,6 +124,8 @@ #include "internal.h" +using namespace bssl; + static const SRTP_PROTECTION_PROFILE kSRTPProfiles[] = { { "SRTP_AES128_CM_SHA1_80", SRTP_AES128_CM_SHA1_80, @@ -143,9 +145,7 @@ static const SRTP_PROTECTION_PROFILE kSRTPProfiles[] = { static int find_profile_by_name(const char *profile_name, const SRTP_PROTECTION_PROFILE **pptr, size_t len) { - const SRTP_PROTECTION_PROFILE *p; - - p = kSRTPProfiles; + const SRTP_PROTECTION_PROFILE *p = kSRTPProfiles; while (p->name) { if (len == strlen(p->name) && !strncmp(p->name, profile_name, len)) { *pptr = p; @@ -160,9 +160,9 @@ static int find_profile_by_name(const char *profile_name, static int ssl_ctx_make_profiles(const char *profiles_string, STACK_OF(SRTP_PROTECTION_PROFILE) **out) { - STACK_OF(SRTP_PROTECTION_PROFILE) *profiles = - sk_SRTP_PROTECTION_PROFILE_new_null(); - if (profiles == NULL) { + UniquePtr profiles( + sk_SRTP_PROTECTION_PROFILE_new_null()); + if (profiles == nullptr) { OPENSSL_PUT_ERROR(SSL, SSL_R_SRTP_COULD_NOT_ALLOCATE_PROFILES); return 0; } @@ -176,11 +176,11 @@ static int ssl_ctx_make_profiles(const char *profiles_string, if (!find_profile_by_name(ptr, &profile, col ? (size_t)(col - ptr) : strlen(ptr))) { OPENSSL_PUT_ERROR(SSL, SSL_R_SRTP_UNKNOWN_PROTECTION_PROFILE); - goto err; + return 0; } - if (!sk_SRTP_PROTECTION_PROFILE_push(profiles, profile)) { - goto err; + if (!sk_SRTP_PROTECTION_PROFILE_push(profiles.get(), profile)) { + return 0; } if (col) { @@ -189,12 +189,8 @@ static int ssl_ctx_make_profiles(const char *profiles_string, } while (col); sk_SRTP_PROTECTION_PROFILE_free(*out); - *out = profiles; + *out = profiles.release(); return 1; - -err: - sk_SRTP_PROTECTION_PROFILE_free(profiles); - return 0; } int SSL_CTX_set_srtp_profiles(SSL_CTX *ctx, const char *profiles) { @@ -226,11 +222,11 @@ const SRTP_PROTECTION_PROFILE *SSL_get_selected_srtp_profile(SSL *ssl) { } int SSL_CTX_set_tlsext_use_srtp(SSL_CTX *ctx, const char *profiles) { - /* This API inverts its return value. */ + // This API inverts its return value. return !SSL_CTX_set_srtp_profiles(ctx, profiles); } int SSL_set_tlsext_use_srtp(SSL *ssl, const char *profiles) { - /* This API inverts its return value. */ + // This API inverts its return value. return !SSL_set_srtp_profiles(ssl, profiles); } diff --git a/Sources/BoringSSL/ssl/dtls_method.c b/Sources/BoringSSL/ssl/dtls_method.cc similarity index 71% rename from Sources/BoringSSL/ssl/dtls_method.c rename to Sources/BoringSSL/ssl/dtls_method.cc index 60847895a..d0416adae 100644 --- a/Sources/BoringSSL/ssl/dtls_method.c +++ b/Sources/BoringSSL/ssl/dtls_method.cc @@ -66,97 +66,64 @@ #include "internal.h" -static int dtls1_version_from_wire(uint16_t *out_version, - uint16_t wire_version) { - switch (wire_version) { - case DTLS1_VERSION: - /* DTLS 1.0 maps to TLS 1.1, not TLS 1.0. */ - *out_version = TLS1_1_VERSION; - return 1; - case DTLS1_2_VERSION: - *out_version = TLS1_2_VERSION; - return 1; +using namespace bssl; + +static void dtls1_on_handshake_complete(SSL *ssl) { + // Stop the reply timer left by the last flight we sent. + dtls1_stop_timer(ssl); + // If the final flight had a reply, we know the peer has received it. If not, + // we must leave the flight around for post-handshake retransmission. + if (ssl->d1->flight_has_reply) { + dtls_clear_outgoing_messages(ssl); } - - return 0; -} - -static uint16_t dtls1_version_to_wire(uint16_t version) { - switch (version) { - case TLS1_1_VERSION: - /* DTLS 1.0 maps to TLS 1.1, not TLS 1.0. */ - return DTLS1_VERSION; - case TLS1_2_VERSION: - return DTLS1_2_VERSION; - } - - /* It is an error to use this function with an invalid version. */ - assert(0); - return 0; -} - -static int dtls1_supports_cipher(const SSL_CIPHER *cipher) { - return cipher->algorithm_enc != SSL_eNULL; } -static void dtls1_expect_flight(SSL *ssl) { dtls1_start_timer(ssl); } - -static void dtls1_received_flight(SSL *ssl) { dtls1_stop_timer(ssl); } - -static int dtls1_set_read_state(SSL *ssl, SSL_AEAD_CTX *aead_ctx) { - /* Cipher changes are illegal when there are buffered incoming messages. */ - if (dtls_has_incoming_messages(ssl)) { +static bool dtls1_set_read_state(SSL *ssl, UniquePtr aead_ctx) { + // Cipher changes are forbidden if the current epoch has leftover data. + if (dtls_has_unprocessed_handshake_data(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFERED_MESSAGES_ON_CIPHER_CHANGE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - SSL_AEAD_CTX_free(aead_ctx); - return 0; + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return false; } ssl->d1->r_epoch++; OPENSSL_memset(&ssl->d1->bitmap, 0, sizeof(ssl->d1->bitmap)); OPENSSL_memset(ssl->s3->read_sequence, 0, sizeof(ssl->s3->read_sequence)); - SSL_AEAD_CTX_free(ssl->s3->aead_read_ctx); - ssl->s3->aead_read_ctx = aead_ctx; - return 1; + ssl->s3->aead_read_ctx = std::move(aead_ctx); + return true; } -static int dtls1_set_write_state(SSL *ssl, SSL_AEAD_CTX *aead_ctx) { +static bool dtls1_set_write_state(SSL *ssl, + UniquePtr aead_ctx) { ssl->d1->w_epoch++; OPENSSL_memcpy(ssl->d1->last_write_sequence, ssl->s3->write_sequence, sizeof(ssl->s3->write_sequence)); OPENSSL_memset(ssl->s3->write_sequence, 0, sizeof(ssl->s3->write_sequence)); - SSL_AEAD_CTX_free(ssl->s3->aead_write_ctx); - ssl->s3->aead_write_ctx = aead_ctx; - return 1; + ssl->d1->last_aead_write_ctx = std::move(ssl->s3->aead_write_ctx); + ssl->s3->aead_write_ctx = std::move(aead_ctx); + return true; } static const SSL_PROTOCOL_METHOD kDTLSProtocolMethod = { - 1 /* is_dtls */, - TLS1_1_VERSION, - TLS1_2_VERSION, - dtls1_version_from_wire, - dtls1_version_to_wire, + true /* is_dtls */, dtls1_new, dtls1_free, dtls1_get_message, - dtls1_get_current_message, - dtls1_release_current_message, - dtls1_read_app_data, - dtls1_read_change_cipher_spec, - dtls1_read_close_notify, + dtls1_next_message, + dtls1_open_handshake, + dtls1_open_change_cipher_spec, + dtls1_open_app_data, dtls1_write_app_data, dtls1_dispatch_alert, - dtls1_supports_cipher, dtls1_init_message, dtls1_finish_message, dtls1_add_message, dtls1_add_change_cipher_spec, dtls1_add_alert, dtls1_flush_flight, - dtls1_expect_flight, - dtls1_received_flight, + dtls1_on_handshake_complete, dtls1_set_read_state, dtls1_set_write_state, }; @@ -170,7 +137,16 @@ const SSL_METHOD *DTLS_method(void) { return &kMethod; } -/* Legacy version-locked methods. */ +const SSL_METHOD *DTLS_with_buffers_method(void) { + static const SSL_METHOD kMethod = { + 0, + &kDTLSProtocolMethod, + &ssl_noop_x509_method, + }; + return &kMethod; +} + +// Legacy version-locked methods. const SSL_METHOD *DTLSv1_2_method(void) { static const SSL_METHOD kMethod = { @@ -190,7 +166,7 @@ const SSL_METHOD *DTLSv1_method(void) { return &kMethod; } -/* Legacy side-specific methods. */ +// Legacy side-specific methods. const SSL_METHOD *DTLSv1_2_server_method(void) { return DTLSv1_2_method(); diff --git a/Sources/BoringSSL/ssl/dtls_record.c b/Sources/BoringSSL/ssl/dtls_record.cc similarity index 73% rename from Sources/BoringSSL/ssl/dtls_record.c rename to Sources/BoringSSL/ssl/dtls_record.cc index 879706dfc..5e795fa37 100644 --- a/Sources/BoringSSL/ssl/dtls_record.c +++ b/Sources/BoringSSL/ssl/dtls_record.cc @@ -121,8 +121,10 @@ #include "../crypto/internal.h" -/* to_u64_be treats |in| as a 8-byte big-endian integer and returns the value as - * a |uint64_t|. */ +namespace bssl { + +// to_u64_be treats |in| as a 8-byte big-endian integer and returns the value as +// a |uint64_t|. static uint64_t to_u64_be(const uint8_t in[8]) { uint64_t ret = 0; unsigned i; @@ -133,8 +135,8 @@ static uint64_t to_u64_be(const uint8_t in[8]) { return ret; } -/* dtls1_bitmap_should_discard returns one if |seq_num| has been seen in |bitmap| - * or is stale. Otherwise it returns zero. */ +// dtls1_bitmap_should_discard returns one if |seq_num| has been seen in +// |bitmap| or is stale. Otherwise it returns zero. static int dtls1_bitmap_should_discard(DTLS1_BITMAP *bitmap, const uint8_t seq_num[8]) { const unsigned kWindowSize = sizeof(bitmap->map) * 8; @@ -147,15 +149,15 @@ static int dtls1_bitmap_should_discard(DTLS1_BITMAP *bitmap, return idx >= kWindowSize || (bitmap->map & (((uint64_t)1) << idx)); } -/* dtls1_bitmap_record updates |bitmap| to record receipt of sequence number - * |seq_num|. It slides the window forward if needed. It is an error to call - * this function on a stale sequence number. */ +// dtls1_bitmap_record updates |bitmap| to record receipt of sequence number +// |seq_num|. It slides the window forward if needed. It is an error to call +// this function on a stale sequence number. static void dtls1_bitmap_record(DTLS1_BITMAP *bitmap, const uint8_t seq_num[8]) { const unsigned kWindowSize = sizeof(bitmap->map) * 8; uint64_t seq_num_u = to_u64_be(seq_num); - /* Shift the window if necessary. */ + // Shift the window if necessary. if (seq_num_u > bitmap->max_seq_num) { uint64_t shift = seq_num_u - bitmap->max_seq_num; if (shift >= kWindowSize) { @@ -172,16 +174,22 @@ static void dtls1_bitmap_record(DTLS1_BITMAP *bitmap, } } -enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, CBS *out, +enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, + Span *out, size_t *out_consumed, - uint8_t *out_alert, uint8_t *in, - size_t in_len) { + uint8_t *out_alert, Span in) { *out_consumed = 0; + if (ssl->s3->read_shutdown == ssl_shutdown_close_notify) { + return ssl_open_record_close_notify; + } + + if (in.empty()) { + return ssl_open_record_partial; + } - CBS cbs; - CBS_init(&cbs, in, in_len); + CBS cbs = CBS(in); - /* Decode the record. */ + // Decode the record. uint8_t type; uint16_t version; uint8_t sequence[8]; @@ -190,45 +198,59 @@ enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, CBS *out, !CBS_get_u16(&cbs, &version) || !CBS_copy_bytes(&cbs, sequence, 8) || !CBS_get_u16_length_prefixed(&cbs, &body) || - (ssl->s3->have_version && version != ssl->version) || - (version >> 8) != DTLS1_VERSION_MAJOR || CBS_len(&body) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { - /* The record header was incomplete or malformed. Drop the entire packet. */ - *out_consumed = in_len; + // The record header was incomplete or malformed. Drop the entire packet. + *out_consumed = in.size(); return ssl_open_record_discard; } - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, in, - DTLS1_RT_HEADER_LENGTH); + bool version_ok; + if (ssl->s3->aead_read_ctx->is_null_cipher()) { + // Only check the first byte. Enforcing beyond that can prevent decoding + // version negotiation failure alerts. + version_ok = (version >> 8) == DTLS1_VERSION_MAJOR; + } else { + version_ok = version == ssl->s3->aead_read_ctx->RecordVersion(); + } + + if (!version_ok) { + // The record header was incomplete or malformed. Drop the entire packet. + *out_consumed = in.size(); + return ssl_open_record_discard; + } + + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, + in.subspan(0, DTLS1_RT_HEADER_LENGTH)); uint16_t epoch = (((uint16_t)sequence[0]) << 8) | sequence[1]; if (epoch != ssl->d1->r_epoch || dtls1_bitmap_should_discard(&ssl->d1->bitmap, sequence)) { - /* Drop this record. It's from the wrong epoch or is a replay. Note that if - * |epoch| is the next epoch, the record could be buffered for later. For - * simplicity, drop it and expect retransmit to handle it later; DTLS must - * handle packet loss anyway. */ - *out_consumed = in_len - CBS_len(&cbs); + // Drop this record. It's from the wrong epoch or is a replay. Note that if + // |epoch| is the next epoch, the record could be buffered for later. For + // simplicity, drop it and expect retransmit to handle it later; DTLS must + // handle packet loss anyway. + *out_consumed = in.size() - CBS_len(&cbs); return ssl_open_record_discard; } - /* Decrypt the body in-place. */ - if (!SSL_AEAD_CTX_open(ssl->s3->aead_read_ctx, out, type, version, sequence, - (uint8_t *)CBS_data(&body), CBS_len(&body))) { - /* Bad packets are silently dropped in DTLS. See section 4.2.1 of RFC 6347. - * Clear the error queue of any errors decryption may have added. Drop the - * entire packet as it must not have come from the peer. - * - * TODO(davidben): This doesn't distinguish malloc failures from encryption - * failures. */ + // discard the body in-place. + if (!ssl->s3->aead_read_ctx->Open( + out, type, version, sequence, + MakeSpan(const_cast(CBS_data(&body)), CBS_len(&body)))) { + // Bad packets are silently dropped in DTLS. See section 4.2.1 of RFC 6347. + // Clear the error queue of any errors decryption may have added. Drop the + // entire packet as it must not have come from the peer. + // + // TODO(davidben): This doesn't distinguish malloc failures from encryption + // failures. ERR_clear_error(); - *out_consumed = in_len - CBS_len(&cbs); + *out_consumed = in.size() - CBS_len(&cbs); return ssl_open_record_discard; } - *out_consumed = in_len - CBS_len(&cbs); + *out_consumed = in.size() - CBS_len(&cbs); - /* Check the plaintext length. */ - if (CBS_len(out) > SSL3_RT_MAX_PLAIN_LENGTH) { + // Check the plaintext length. + if (out->size() > SSL3_RT_MAX_PLAIN_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); *out_alert = SSL_AD_RECORD_OVERFLOW; return ssl_open_record_error; @@ -236,11 +258,11 @@ enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, CBS *out, dtls1_bitmap_record(&ssl->d1->bitmap, sequence); - /* TODO(davidben): Limit the number of empty records as in TLS? This is only - * useful if we also limit discarded packets. */ + // TODO(davidben): Limit the number of empty records as in TLS? This is only + // useful if we also limit discarded packets. if (type == SSL3_RT_ALERT) { - return ssl_process_alert(ssl, out_alert, CBS_data(out), CBS_len(out)); + return ssl_process_alert(ssl, out_alert, *out); } ssl->s3->warning_alert_count = 0; @@ -249,27 +271,24 @@ enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, CBS *out, return ssl_open_record_success; } -static const SSL_AEAD_CTX *get_write_aead(const SSL *ssl, - enum dtls1_use_epoch_t use_epoch) { +static const SSLAEADContext *get_write_aead(const SSL *ssl, + enum dtls1_use_epoch_t use_epoch) { if (use_epoch == dtls1_use_previous_epoch) { - /* DTLS renegotiation is unsupported, so only epochs 0 (NULL cipher) and 1 - * (negotiated cipher) exist. */ - assert(ssl->d1->w_epoch == 1); - return NULL; + assert(ssl->d1->w_epoch >= 1); + return ssl->d1->last_aead_write_ctx.get(); } - return ssl->s3->aead_write_ctx; + return ssl->s3->aead_write_ctx.get(); } size_t dtls_max_seal_overhead(const SSL *ssl, enum dtls1_use_epoch_t use_epoch) { - return DTLS1_RT_HEADER_LENGTH + - SSL_AEAD_CTX_max_overhead(get_write_aead(ssl, use_epoch)); + return DTLS1_RT_HEADER_LENGTH + get_write_aead(ssl, use_epoch)->MaxOverhead(); } size_t dtls_seal_prefix_len(const SSL *ssl, enum dtls1_use_epoch_t use_epoch) { return DTLS1_RT_HEADER_LENGTH + - SSL_AEAD_CTX_explicit_nonce_len(get_write_aead(ssl, use_epoch)); + get_write_aead(ssl, use_epoch)->ExplicitNonceLen(); } int dtls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, @@ -282,16 +301,14 @@ int dtls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, return 0; } - /* Determine the parameters for the current epoch. */ + // Determine the parameters for the current epoch. uint16_t epoch = ssl->d1->w_epoch; - SSL_AEAD_CTX *aead = ssl->s3->aead_write_ctx; + SSLAEADContext *aead = ssl->s3->aead_write_ctx.get(); uint8_t *seq = ssl->s3->write_sequence; if (use_epoch == dtls1_use_previous_epoch) { - /* DTLS renegotiation is unsupported, so only epochs 0 (NULL cipher) and 1 - * (negotiated cipher) exist. */ - assert(ssl->d1->w_epoch == 1); + assert(ssl->d1->w_epoch >= 1); epoch = ssl->d1->w_epoch - 1; - aead = NULL; + aead = ssl->d1->last_aead_write_ctx.get(); seq = ssl->d1->last_write_sequence; } @@ -302,18 +319,18 @@ int dtls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, out[0] = type; - uint16_t wire_version = ssl->s3->have_version ? ssl->version : DTLS1_VERSION; - out[1] = wire_version >> 8; - out[2] = wire_version & 0xff; + uint16_t record_version = ssl->s3->aead_write_ctx->RecordVersion(); + out[1] = record_version >> 8; + out[2] = record_version & 0xff; out[3] = epoch >> 8; out[4] = epoch & 0xff; OPENSSL_memcpy(&out[5], &seq[2], 6); size_t ciphertext_len; - if (!SSL_AEAD_CTX_seal(aead, out + DTLS1_RT_HEADER_LENGTH, &ciphertext_len, - max_out - DTLS1_RT_HEADER_LENGTH, type, wire_version, - &out[3] /* seq */, in, in_len) || + if (!aead->Seal(out + DTLS1_RT_HEADER_LENGTH, &ciphertext_len, + max_out - DTLS1_RT_HEADER_LENGTH, type, record_version, + &out[3] /* seq */, in, in_len) || !ssl_record_sequence_update(&seq[2], 6)) { return 0; } @@ -327,8 +344,10 @@ int dtls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, *out_len = DTLS1_RT_HEADER_LENGTH + ciphertext_len; - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, out, - DTLS1_RT_HEADER_LENGTH); + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, + MakeSpan(out, DTLS1_RT_HEADER_LENGTH)); return 1; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/handshake.cc b/Sources/BoringSSL/ssl/handshake.cc new file mode 100644 index 000000000..3b446a8cb --- /dev/null +++ b/Sources/BoringSSL/ssl/handshake.cc @@ -0,0 +1,616 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * ECC cipher suite support in OpenSSL originally developed by + * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ + +#include + +#include + +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +SSL_HANDSHAKE::SSL_HANDSHAKE(SSL *ssl_arg) + : ssl(ssl_arg), + scts_requested(false), + needs_psk_binder(false), + received_hello_retry_request(false), + sent_hello_retry_request(false), + received_custom_extension(false), + handshake_finalized(false), + accept_psk_mode(false), + cert_request(false), + certificate_status_expected(false), + ocsp_stapling_requested(false), + should_ack_sni(false), + in_false_start(false), + in_early_data(false), + early_data_offered(false), + can_early_read(false), + can_early_write(false), + next_proto_neg_seen(false), + ticket_expected(false), + extended_master_secret(false), + pending_private_key_op(false) { +} + +SSL_HANDSHAKE::~SSL_HANDSHAKE() { + ssl->ctx->x509_method->hs_flush_cached_ca_names(this); +} + +UniquePtr ssl_handshake_new(SSL *ssl) { + UniquePtr hs = MakeUnique(ssl); + if (!hs || + !hs->transcript.Init()) { + return nullptr; + } + return hs; +} + +bool ssl_check_message_type(SSL *ssl, const SSLMessage &msg, int type) { + if (msg.type != type) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + ERR_add_error_dataf("got type %d, wanted type %d", msg.type, type); + return false; + } + + return true; +} + +bool ssl_add_message_cbb(SSL *ssl, CBB *cbb) { + Array msg; + if (!ssl->method->finish_message(ssl, cbb, &msg) || + !ssl->method->add_message(ssl, std::move(msg))) { + return false; + } + + return true; +} + +size_t ssl_max_handshake_message_len(const SSL *ssl) { + // kMaxMessageLen is the default maximum message size for handshakes which do + // not accept peer certificate chains. + static const size_t kMaxMessageLen = 16384; + + if (SSL_in_init(ssl)) { + if ((!ssl->server || (ssl->verify_mode & SSL_VERIFY_PEER)) && + kMaxMessageLen < ssl->max_cert_list) { + return ssl->max_cert_list; + } + return kMaxMessageLen; + } + + if (ssl_protocol_version(ssl) < TLS1_3_VERSION) { + // In TLS 1.2 and below, the largest acceptable post-handshake message is + // a HelloRequest. + return 0; + } + + if (ssl->server) { + // The largest acceptable post-handshake message for a server is a + // KeyUpdate. We will never initiate post-handshake auth. + return 1; + } + + // Clients must accept NewSessionTicket, so allow the default size. + return kMaxMessageLen; +} + +bool ssl_hash_message(SSL_HANDSHAKE *hs, const SSLMessage &msg) { + // V2ClientHello messages are pre-hashed. + if (msg.is_v2_hello) { + return true; + } + + return hs->transcript.Update(msg.raw); +} + +int ssl_parse_extensions(const CBS *cbs, uint8_t *out_alert, + const SSL_EXTENSION_TYPE *ext_types, + size_t num_ext_types, int ignore_unknown) { + // Reset everything. + for (size_t i = 0; i < num_ext_types; i++) { + *ext_types[i].out_present = 0; + CBS_init(ext_types[i].out_data, NULL, 0); + } + + CBS copy = *cbs; + while (CBS_len(©) != 0) { + uint16_t type; + CBS data; + if (!CBS_get_u16(©, &type) || + !CBS_get_u16_length_prefixed(©, &data)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); + *out_alert = SSL_AD_DECODE_ERROR; + return 0; + } + + const SSL_EXTENSION_TYPE *ext_type = NULL; + for (size_t i = 0; i < num_ext_types; i++) { + if (type == ext_types[i].type) { + ext_type = &ext_types[i]; + break; + } + } + + if (ext_type == NULL) { + if (ignore_unknown) { + continue; + } + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; + return 0; + } + + // Duplicate ext_types are forbidden. + if (*ext_type->out_present) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_EXTENSION); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return 0; + } + + *ext_type->out_present = 1; + *ext_type->out_data = data; + } + + return 1; +} + +static void set_crypto_buffer(CRYPTO_BUFFER **dest, CRYPTO_BUFFER *src) { + // TODO(davidben): Remove this helper once |SSL_SESSION| can use |UniquePtr| + // and |UniquePtr| has up_ref helpers. + CRYPTO_BUFFER_free(*dest); + *dest = src; + if (src != nullptr) { + CRYPTO_BUFFER_up_ref(src); + } +} + +enum ssl_verify_result_t ssl_verify_peer_cert(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + const SSL_SESSION *prev_session = ssl->s3->established_session.get(); + if (prev_session != NULL) { + // If renegotiating, the server must not change the server certificate. See + // https://mitls.org/pages/attacks/3SHAKE. We never resume on renegotiation, + // so this check is sufficient to ensure the reported peer certificate never + // changes on renegotiation. + assert(!ssl->server); + if (sk_CRYPTO_BUFFER_num(prev_session->certs) != + sk_CRYPTO_BUFFER_num(hs->new_session->certs)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_SERVER_CERT_CHANGED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_verify_invalid; + } + + for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(hs->new_session->certs); i++) { + const CRYPTO_BUFFER *old_cert = + sk_CRYPTO_BUFFER_value(prev_session->certs, i); + const CRYPTO_BUFFER *new_cert = + sk_CRYPTO_BUFFER_value(hs->new_session->certs, i); + if (CRYPTO_BUFFER_len(old_cert) != CRYPTO_BUFFER_len(new_cert) || + OPENSSL_memcmp(CRYPTO_BUFFER_data(old_cert), + CRYPTO_BUFFER_data(new_cert), + CRYPTO_BUFFER_len(old_cert)) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_SERVER_CERT_CHANGED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_verify_invalid; + } + } + + // The certificate is identical, so we may skip re-verifying the + // certificate. Since we only authenticated the previous one, copy other + // authentication from the established session and ignore what was newly + // received. + set_crypto_buffer(&hs->new_session->ocsp_response, + prev_session->ocsp_response); + set_crypto_buffer(&hs->new_session->signed_cert_timestamp_list, + prev_session->signed_cert_timestamp_list); + hs->new_session->verify_result = prev_session->verify_result; + return ssl_verify_ok; + } + + uint8_t alert = SSL_AD_CERTIFICATE_UNKNOWN; + enum ssl_verify_result_t ret; + if (ssl->custom_verify_callback != nullptr) { + ret = ssl->custom_verify_callback(ssl, &alert); + switch (ret) { + case ssl_verify_ok: + hs->new_session->verify_result = X509_V_OK; + break; + case ssl_verify_invalid: + hs->new_session->verify_result = X509_V_ERR_APPLICATION_VERIFICATION; + break; + case ssl_verify_retry: + break; + } + } else { + ret = ssl->ctx->x509_method->session_verify_cert_chain( + hs->new_session.get(), ssl, &alert) + ? ssl_verify_ok + : ssl_verify_invalid; + } + + if (ret == ssl_verify_invalid) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CERTIFICATE_VERIFY_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + } + + return ret; +} + +uint16_t ssl_get_grease_value(const SSL *ssl, enum ssl_grease_index_t index) { + // Use the client_random or server_random for entropy. This both avoids + // calling |RAND_bytes| on a single byte repeatedly and ensures the values are + // deterministic. This allows the same ClientHello be sent twice for a + // HelloRetryRequest or the same group be advertised in both supported_groups + // and key_shares. + uint16_t ret = ssl->server ? ssl->s3->server_random[index] + : ssl->s3->client_random[index]; + // The first four bytes of server_random are a timestamp prior to TLS 1.3, but + // servers have no fields to GREASE until TLS 1.3. + assert(!ssl->server || ssl_protocol_version(ssl) >= TLS1_3_VERSION); + // This generates a random value of the form 0xωaωa, for all 0 ≤ ω < 16. + ret = (ret & 0xf0) | 0x0a; + ret |= ret << 8; + return ret; +} + +enum ssl_hs_wait_t ssl_get_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED)) { + return ssl_hs_error; + } + + // Snapshot the finished hash before incorporating the new message. + uint8_t finished[EVP_MAX_MD_SIZE]; + size_t finished_len; + if (!hs->transcript.GetFinishedMAC(finished, &finished_len, + SSL_get_session(ssl), !ssl->server) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + int finished_ok = CBS_mem_equal(&msg.body, finished, finished_len); +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + finished_ok = 1; +#endif + if (!finished_ok) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); + return ssl_hs_error; + } + + // Copy the Finished so we can use it for renegotiation checks. + if (ssl->version != SSL3_VERSION) { + if (finished_len > sizeof(ssl->s3->previous_client_finished) || + finished_len > sizeof(ssl->s3->previous_server_finished)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + if (ssl->server) { + OPENSSL_memcpy(ssl->s3->previous_client_finished, finished, finished_len); + ssl->s3->previous_client_finished_len = finished_len; + } else { + OPENSSL_memcpy(ssl->s3->previous_server_finished, finished, finished_len); + ssl->s3->previous_server_finished_len = finished_len; + } + } + + ssl->method->next_message(ssl); + return ssl_hs_ok; +} + +bool ssl_send_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + const SSL_SESSION *session = SSL_get_session(ssl); + + uint8_t finished[EVP_MAX_MD_SIZE]; + size_t finished_len; + if (!hs->transcript.GetFinishedMAC(finished, &finished_len, session, + ssl->server)) { + return 0; + } + + // Log the master secret, if logging is enabled. + if (!ssl_log_secret(ssl, "CLIENT_RANDOM", + session->master_key, + session->master_key_length)) { + return 0; + } + + // Copy the Finished so we can use it for renegotiation checks. + if (ssl->version != SSL3_VERSION) { + if (finished_len > sizeof(ssl->s3->previous_client_finished) || + finished_len > sizeof(ssl->s3->previous_server_finished)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + if (ssl->server) { + OPENSSL_memcpy(ssl->s3->previous_server_finished, finished, finished_len); + ssl->s3->previous_server_finished_len = finished_len; + } else { + OPENSSL_memcpy(ssl->s3->previous_client_finished, finished, finished_len); + ssl->s3->previous_client_finished_len = finished_len; + } + } + + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_FINISHED) || + !CBB_add_bytes(&body, finished, finished_len) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + return 1; +} + +bool ssl_output_cert_chain(SSL *ssl) { + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE) || + !ssl_add_cert_chain(ssl, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + + return true; +} + +int ssl_run_handshake(SSL_HANDSHAKE *hs, bool *out_early_return) { + SSL *const ssl = hs->ssl; + for (;;) { + // Resolve the operation the handshake was waiting on. + switch (hs->wait) { + case ssl_hs_error: + ERR_restore_state(hs->error.get()); + return -1; + + case ssl_hs_flush: { + int ret = ssl->method->flush_flight(ssl); + if (ret <= 0) { + return ret; + } + break; + } + + case ssl_hs_read_server_hello: + case ssl_hs_read_message: + case ssl_hs_read_change_cipher_spec: { + uint8_t alert = SSL_AD_DECODE_ERROR; + size_t consumed = 0; + ssl_open_record_t ret; + if (hs->wait == ssl_hs_read_change_cipher_spec) { + ret = ssl_open_change_cipher_spec(ssl, &consumed, &alert, + ssl->s3->read_buffer.span()); + } else { + ret = ssl_open_handshake(ssl, &consumed, &alert, + ssl->s3->read_buffer.span()); + } + if (ret == ssl_open_record_error && + hs->wait == ssl_hs_read_server_hello) { + uint32_t err = ERR_peek_error(); + if (ERR_GET_LIB(err) == ERR_LIB_SSL && + ERR_GET_REASON(err) == SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE) { + // Add a dedicated error code to the queue for a handshake_failure + // alert in response to ClientHello. This matches NSS's client + // behavior and gives a better error on a (probable) failure to + // negotiate initial parameters. Note: this error code comes after + // the original one. + // + // See https://crbug.com/446505. + OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_FAILURE_ON_CLIENT_HELLO); + } + } + bool retry; + int bio_ret = ssl_handle_open_record(ssl, &retry, ret, consumed, alert); + if (bio_ret <= 0) { + return bio_ret; + } + if (retry) { + continue; + } + ssl->s3->read_buffer.DiscardConsumed(); + break; + } + + case ssl_hs_read_end_of_early_data: { + if (ssl->s3->hs->can_early_read) { + // While we are processing early data, the handshake returns early. + *out_early_return = true; + return 1; + } + hs->wait = ssl_hs_ok; + break; + } + + case ssl_hs_certificate_selection_pending: + ssl->s3->rwstate = SSL_CERTIFICATE_SELECTION_PENDING; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_x509_lookup: + ssl->s3->rwstate = SSL_X509_LOOKUP; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_channel_id_lookup: + ssl->s3->rwstate = SSL_CHANNEL_ID_LOOKUP; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_private_key_operation: + ssl->s3->rwstate = SSL_PRIVATE_KEY_OPERATION; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_pending_session: + ssl->s3->rwstate = SSL_PENDING_SESSION; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_pending_ticket: + ssl->s3->rwstate = SSL_PENDING_TICKET; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_certificate_verify: + ssl->s3->rwstate = SSL_CERTIFICATE_VERIFY; + hs->wait = ssl_hs_ok; + return -1; + + case ssl_hs_early_data_rejected: + ssl->s3->rwstate = SSL_EARLY_DATA_REJECTED; + // Cause |SSL_write| to start failing immediately. + hs->can_early_write = false; + return -1; + + case ssl_hs_early_return: + *out_early_return = true; + hs->wait = ssl_hs_ok; + return 1; + + case ssl_hs_ok: + break; + } + + // Run the state machine again. + hs->wait = ssl->do_handshake(hs); + if (hs->wait == ssl_hs_error) { + hs->error.reset(ERR_save_state()); + return -1; + } + if (hs->wait == ssl_hs_ok) { + // The handshake has completed. + *out_early_return = false; + return 1; + } + + // Otherwise, loop to the beginning and resolve what was blocking the + // handshake. + } +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/handshake_client.c b/Sources/BoringSSL/ssl/handshake_client.c deleted file mode 100644 index c4f5e8e9f..000000000 --- a/Sources/BoringSSL/ssl/handshake_client.c +++ /dev/null @@ -1,1883 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* ==================================================================== - * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. - * - * Portions of the attached software ("Contribution") are developed by - * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. - * - * The Contribution is licensed pursuant to the OpenSSL open source - * license provided above. - * - * ECC cipher suite support in OpenSSL originally written by - * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories. - * - */ -/* ==================================================================== - * Copyright 2005 Nokia. All rights reserved. - * - * The portions of the attached software ("Contribution") is developed by - * Nokia Corporation and is licensed pursuant to the OpenSSL open source - * license. - * - * The Contribution, originally written by Mika Kousa and Pasi Eronen of - * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites - * support (see RFC 4279) to OpenSSL. - * - * No patent licenses or other rights except those expressly stated in - * the OpenSSL open source license shall be deemed granted or received - * expressly, by implication, estoppel, or otherwise. - * - * No assurances are provided by Nokia that the Contribution does not - * infringe the patent or other intellectual property rights of any third - * party or that the license provides you with all the necessary rights - * to make use of the Contribution. - * - * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN - * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA - * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY - * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR - * OTHERWISE. - */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -static int ssl3_send_client_hello(SSL_HANDSHAKE *hs); -static int dtls1_get_hello_verify(SSL_HANDSHAKE *hs); -static int ssl3_get_server_hello(SSL_HANDSHAKE *hs); -static int ssl3_get_server_certificate(SSL_HANDSHAKE *hs); -static int ssl3_get_cert_status(SSL_HANDSHAKE *hs); -static int ssl3_verify_server_cert(SSL_HANDSHAKE *hs); -static int ssl3_get_server_key_exchange(SSL_HANDSHAKE *hs); -static int ssl3_get_certificate_request(SSL_HANDSHAKE *hs); -static int ssl3_get_server_hello_done(SSL_HANDSHAKE *hs); -static int ssl3_send_client_certificate(SSL_HANDSHAKE *hs); -static int ssl3_send_client_key_exchange(SSL_HANDSHAKE *hs); -static int ssl3_send_cert_verify(SSL_HANDSHAKE *hs); -static int ssl3_send_next_proto(SSL_HANDSHAKE *hs); -static int ssl3_send_channel_id(SSL_HANDSHAKE *hs); -static int ssl3_get_new_session_ticket(SSL_HANDSHAKE *hs); - -int ssl3_connect(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = -1; - - assert(ssl->handshake_func == ssl3_connect); - assert(!ssl->server); - - for (;;) { - int state = hs->state; - - switch (hs->state) { - case SSL_ST_INIT: - ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_START, 1); - hs->state = SSL3_ST_CW_CLNT_HELLO_A; - break; - - case SSL3_ST_CW_CLNT_HELLO_A: - ret = ssl3_send_client_hello(hs); - if (ret <= 0) { - goto end; - } - - if (!SSL_is_dtls(ssl) || ssl->d1->send_cookie) { - hs->next_state = SSL3_ST_CR_SRVR_HELLO_A; - } else { - hs->next_state = DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A; - } - hs->state = SSL3_ST_CW_FLUSH; - break; - - case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: - assert(SSL_is_dtls(ssl)); - ret = dtls1_get_hello_verify(hs); - if (ret <= 0) { - goto end; - } - if (ssl->d1->send_cookie) { - ssl->method->received_flight(ssl); - hs->state = SSL3_ST_CW_CLNT_HELLO_A; - } else { - hs->state = SSL3_ST_CR_SRVR_HELLO_A; - } - break; - - case SSL3_ST_CR_SRVR_HELLO_A: - ret = ssl3_get_server_hello(hs); - if (hs->state == SSL_ST_TLS13) { - break; - } - if (ret <= 0) { - goto end; - } - - if (ssl->session != NULL) { - hs->state = SSL3_ST_CR_SESSION_TICKET_A; - } else { - hs->state = SSL3_ST_CR_CERT_A; - } - break; - - case SSL3_ST_CR_CERT_A: - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - ret = ssl3_get_server_certificate(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CR_CERT_STATUS_A; - break; - - case SSL3_ST_CR_CERT_STATUS_A: - if (hs->certificate_status_expected) { - ret = ssl3_get_cert_status(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_VERIFY_SERVER_CERT; - break; - - case SSL3_ST_VERIFY_SERVER_CERT: - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - ret = ssl3_verify_server_cert(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CR_KEY_EXCH_A; - break; - - case SSL3_ST_CR_KEY_EXCH_A: - ret = ssl3_get_server_key_exchange(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_CR_CERT_REQ_A; - break; - - case SSL3_ST_CR_CERT_REQ_A: - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - ret = ssl3_get_certificate_request(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CR_SRVR_DONE_A; - break; - - case SSL3_ST_CR_SRVR_DONE_A: - ret = ssl3_get_server_hello_done(hs); - if (ret <= 0) { - goto end; - } - ssl->method->received_flight(ssl); - hs->state = SSL3_ST_CW_CERT_A; - break; - - case SSL3_ST_CW_CERT_A: - if (hs->cert_request) { - ret = ssl3_send_client_certificate(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CW_KEY_EXCH_A; - break; - - case SSL3_ST_CW_KEY_EXCH_A: - ret = ssl3_send_client_key_exchange(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_CW_CERT_VRFY_A; - break; - - case SSL3_ST_CW_CERT_VRFY_A: - case SSL3_ST_CW_CERT_VRFY_B: - if (hs->cert_request && ssl_has_certificate(ssl)) { - ret = ssl3_send_cert_verify(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CW_CHANGE; - break; - - case SSL3_ST_CW_CHANGE: - if (!ssl->method->add_change_cipher_spec(ssl) || - !tls1_change_cipher_state(hs, SSL3_CHANGE_CIPHER_CLIENT_WRITE)) { - ret = -1; - goto end; - } - - hs->state = SSL3_ST_CW_NEXT_PROTO_A; - break; - - case SSL3_ST_CW_NEXT_PROTO_A: - if (hs->next_proto_neg_seen) { - ret = ssl3_send_next_proto(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CW_CHANNEL_ID_A; - break; - - case SSL3_ST_CW_CHANNEL_ID_A: - if (ssl->s3->tlsext_channel_id_valid) { - ret = ssl3_send_channel_id(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CW_FINISHED_A; - break; - - case SSL3_ST_CW_FINISHED_A: - ret = ssl3_send_finished(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_CW_FLUSH; - - if (ssl->session != NULL) { - hs->next_state = SSL3_ST_FINISH_CLIENT_HANDSHAKE; - } else { - /* This is a non-resumption handshake. If it involves ChannelID, then - * record the handshake hashes at this point in the session so that - * any resumption of this session with ChannelID can sign those - * hashes. */ - ret = tls1_record_handshake_hashes_for_channel_id(hs); - if (ret <= 0) { - goto end; - } - if ((SSL_get_mode(ssl) & SSL_MODE_ENABLE_FALSE_START) && - ssl3_can_false_start(ssl) && - /* No False Start on renegotiation (would complicate the state - * machine). */ - !ssl->s3->initial_handshake_complete) { - hs->next_state = SSL3_ST_FALSE_START; - } else { - hs->next_state = SSL3_ST_CR_SESSION_TICKET_A; - } - } - break; - - case SSL3_ST_FALSE_START: - hs->state = SSL3_ST_CR_SESSION_TICKET_A; - hs->in_false_start = 1; - ret = 1; - goto end; - - case SSL3_ST_CR_SESSION_TICKET_A: - if (hs->ticket_expected) { - ret = ssl3_get_new_session_ticket(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_CR_CHANGE; - break; - - case SSL3_ST_CR_CHANGE: - ret = ssl->method->read_change_cipher_spec(ssl); - if (ret <= 0) { - goto end; - } - - if (!tls1_change_cipher_state(hs, SSL3_CHANGE_CIPHER_CLIENT_READ)) { - ret = -1; - goto end; - } - hs->state = SSL3_ST_CR_FINISHED_A; - break; - - case SSL3_ST_CR_FINISHED_A: - ret = ssl3_get_finished(hs); - if (ret <= 0) { - goto end; - } - ssl->method->received_flight(ssl); - - if (ssl->session != NULL) { - hs->state = SSL3_ST_CW_CHANGE; - } else { - hs->state = SSL3_ST_FINISH_CLIENT_HANDSHAKE; - } - break; - - case SSL3_ST_CW_FLUSH: - ret = ssl->method->flush_flight(ssl); - if (ret <= 0) { - goto end; - } - hs->state = hs->next_state; - if (hs->state != SSL3_ST_FINISH_CLIENT_HANDSHAKE) { - ssl->method->expect_flight(ssl); - } - break; - - case SSL_ST_TLS13: - ret = tls13_handshake(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_FINISH_CLIENT_HANDSHAKE; - break; - - case SSL3_ST_FINISH_CLIENT_HANDSHAKE: - ssl->method->release_current_message(ssl, 1 /* free_buffer */); - - SSL_SESSION_free(ssl->s3->established_session); - if (ssl->session != NULL) { - SSL_SESSION_up_ref(ssl->session); - ssl->s3->established_session = ssl->session; - } else { - /* We make a copy of the session in order to maintain the immutability - * of the new established_session due to False Start. The caller may - * have taken a reference to the temporary session. */ - ssl->s3->established_session = - SSL_SESSION_dup(hs->new_session, SSL_SESSION_DUP_ALL); - if (ssl->s3->established_session == NULL) { - ret = -1; - goto end; - } - ssl->s3->established_session->not_resumable = 0; - - SSL_SESSION_free(hs->new_session); - hs->new_session = NULL; - } - - hs->state = SSL_ST_OK; - break; - - case SSL_ST_OK: { - const int is_initial_handshake = !ssl->s3->initial_handshake_complete; - ssl->s3->initial_handshake_complete = 1; - if (is_initial_handshake) { - /* Renegotiations do not participate in session resumption. */ - ssl_update_cache(hs, SSL_SESS_CACHE_CLIENT); - } - - ret = 1; - ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_DONE, 1); - goto end; - } - - default: - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_STATE); - ret = -1; - goto end; - } - - if (hs->state != state) { - ssl_do_info_callback(ssl, SSL_CB_CONNECT_LOOP, 1); - } - } - -end: - ssl_do_info_callback(ssl, SSL_CB_CONNECT_EXIT, ret); - return ret; -} - -uint16_t ssl_get_grease_value(const SSL *ssl, enum ssl_grease_index_t index) { - /* Use the client_random for entropy. This both avoids calling |RAND_bytes| on - * a single byte repeatedly and ensures the values are deterministic. This - * allows the same ClientHello be sent twice for a HelloRetryRequest or the - * same group be advertised in both supported_groups and key_shares. */ - uint16_t ret = ssl->s3->client_random[index]; - /* This generates a random value of the form 0xωaωa, for all 0 ≤ ω < 16. */ - ret = (ret & 0xf0) | 0x0a; - ret |= ret << 8; - return ret; -} - -/* ssl_get_client_disabled sets |*out_mask_a| and |*out_mask_k| to masks of - * disabled algorithms. */ -static void ssl_get_client_disabled(SSL *ssl, uint32_t *out_mask_a, - uint32_t *out_mask_k) { - int have_rsa = 0, have_ecdsa = 0; - *out_mask_a = 0; - *out_mask_k = 0; - - /* Now go through all signature algorithms seeing if we support any for RSA or - * ECDSA. Do this for all versions not just TLS 1.2. */ - const uint16_t *sigalgs; - size_t num_sigalgs = tls12_get_verify_sigalgs(ssl, &sigalgs); - for (size_t i = 0; i < num_sigalgs; i++) { - switch (sigalgs[i]) { - case SSL_SIGN_RSA_PSS_SHA512: - case SSL_SIGN_RSA_PSS_SHA384: - case SSL_SIGN_RSA_PSS_SHA256: - case SSL_SIGN_RSA_PKCS1_SHA512: - case SSL_SIGN_RSA_PKCS1_SHA384: - case SSL_SIGN_RSA_PKCS1_SHA256: - case SSL_SIGN_RSA_PKCS1_SHA1: - have_rsa = 1; - break; - - case SSL_SIGN_ECDSA_SECP521R1_SHA512: - case SSL_SIGN_ECDSA_SECP384R1_SHA384: - case SSL_SIGN_ECDSA_SECP256R1_SHA256: - case SSL_SIGN_ECDSA_SHA1: - have_ecdsa = 1; - break; - } - } - - /* Disable auth if we don't include any appropriate signature algorithms. */ - if (!have_rsa) { - *out_mask_a |= SSL_aRSA; - } - if (!have_ecdsa) { - *out_mask_a |= SSL_aECDSA; - } - - /* PSK requires a client callback. */ - if (ssl->psk_client_callback == NULL) { - *out_mask_a |= SSL_aPSK; - *out_mask_k |= SSL_kPSK; - } -} - -static int ssl_write_client_cipher_list(SSL *ssl, CBB *out, - uint16_t min_version, - uint16_t max_version) { - uint32_t mask_a, mask_k; - ssl_get_client_disabled(ssl, &mask_a, &mask_k); - - CBB child; - if (!CBB_add_u16_length_prefixed(out, &child)) { - return 0; - } - - /* Add a fake cipher suite. See draft-davidben-tls-grease-01. */ - if (ssl->ctx->grease_enabled && - !CBB_add_u16(&child, ssl_get_grease_value(ssl, ssl_grease_cipher))) { - return 0; - } - - /* Add TLS 1.3 ciphers. Order ChaCha20-Poly1305 relative to AES-GCM based on - * hardware support. */ - if (max_version >= TLS1_3_VERSION) { - if (!EVP_has_aes_hardware() && - !CBB_add_u16(&child, TLS1_CK_CHACHA20_POLY1305_SHA256 & 0xffff)) { - return 0; - } - if (!CBB_add_u16(&child, TLS1_CK_AES_128_GCM_SHA256 & 0xffff) || - !CBB_add_u16(&child, TLS1_CK_AES_256_GCM_SHA384 & 0xffff)) { - return 0; - } - if (EVP_has_aes_hardware() && - !CBB_add_u16(&child, TLS1_CK_CHACHA20_POLY1305_SHA256 & 0xffff)) { - return 0; - } - } - - if (min_version < TLS1_3_VERSION) { - STACK_OF(SSL_CIPHER) *ciphers = SSL_get_ciphers(ssl); - int any_enabled = 0; - for (size_t i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) { - const SSL_CIPHER *cipher = sk_SSL_CIPHER_value(ciphers, i); - /* Skip disabled ciphers */ - if ((cipher->algorithm_mkey & mask_k) || - (cipher->algorithm_auth & mask_a)) { - continue; - } - if (SSL_CIPHER_get_min_version(cipher) > max_version || - SSL_CIPHER_get_max_version(cipher) < min_version) { - continue; - } - any_enabled = 1; - if (!CBB_add_u16(&child, ssl_cipher_get_value(cipher))) { - return 0; - } - } - - /* If all ciphers were disabled, return the error to the caller. */ - if (!any_enabled && max_version < TLS1_3_VERSION) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHERS_AVAILABLE); - return 0; - } - } - - /* For SSLv3, the SCSV is added. Otherwise the renegotiation extension is - * added. */ - if (max_version == SSL3_VERSION && - !ssl->s3->initial_handshake_complete) { - if (!CBB_add_u16(&child, SSL3_CK_SCSV & 0xffff)) { - return 0; - } - } - - if (ssl->mode & SSL_MODE_SEND_FALLBACK_SCSV) { - if (!CBB_add_u16(&child, SSL3_CK_FALLBACK_SCSV & 0xffff)) { - return 0; - } - } - - return CBB_flush(out); -} - -int ssl_write_client_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_CLIENT_HELLO)) { - goto err; - } - - /* Renegotiations do not participate in session resumption. */ - int has_session = ssl->session != NULL && - !ssl->s3->initial_handshake_complete; - - CBB child; - if (!CBB_add_u16(&body, hs->client_version) || - !CBB_add_bytes(&body, ssl->s3->client_random, SSL3_RANDOM_SIZE) || - !CBB_add_u8_length_prefixed(&body, &child) || - (has_session && - !CBB_add_bytes(&child, ssl->session->session_id, - ssl->session->session_id_length))) { - goto err; - } - - if (SSL_is_dtls(ssl)) { - if (!CBB_add_u8_length_prefixed(&body, &child) || - !CBB_add_bytes(&child, ssl->d1->cookie, ssl->d1->cookie_len)) { - goto err; - } - } - - size_t header_len = - SSL_is_dtls(ssl) ? DTLS1_HM_HEADER_LENGTH : SSL3_HM_HEADER_LENGTH; - if (!ssl_write_client_cipher_list(ssl, &body, min_version, max_version) || - !CBB_add_u8(&body, 1 /* one compression method */) || - !CBB_add_u8(&body, 0 /* null compression */) || - !ssl_add_clienthello_tlsext(hs, &body, header_len + CBB_len(&body))) { - goto err; - } - - uint8_t *msg = NULL; - size_t len; - if (!ssl->method->finish_message(ssl, &cbb, &msg, &len)) { - goto err; - } - - /* Now that the length prefixes have been computed, fill in the placeholder - * PSK binder. */ - if (hs->needs_psk_binder && - !tls13_write_psk_binder(hs, msg, len)) { - OPENSSL_free(msg); - goto err; - } - - return ssl->method->add_message(ssl, msg, len); - - err: - CBB_cleanup(&cbb); - return 0; -} - -static int ssl3_send_client_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* The handshake buffer is reset on every ClientHello. Notably, in DTLS, we - * may send multiple ClientHellos if we receive HelloVerifyRequest. */ - if (!SSL_TRANSCRIPT_init(&hs->transcript)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return -1; - } - - uint16_t max_wire_version = ssl->method->version_to_wire(max_version); - assert(hs->state == SSL3_ST_CW_CLNT_HELLO_A); - if (!ssl->s3->have_version) { - ssl->version = max_wire_version; - } - - /* Always advertise the ClientHello version from the original maximum version, - * even on renegotiation. The static RSA key exchange uses this field, and - * some servers fail when it changes across handshakes. */ - hs->client_version = max_wire_version; - if (max_version >= TLS1_3_VERSION) { - hs->client_version = ssl->method->version_to_wire(TLS1_2_VERSION); - } - - /* If the configured session has expired or was created at a disabled - * version, drop it. */ - if (ssl->session != NULL) { - uint16_t session_version; - if (ssl->session->is_server || - !ssl->method->version_from_wire(&session_version, - ssl->session->ssl_version) || - (session_version < TLS1_3_VERSION && - ssl->session->session_id_length == 0) || - ssl->session->not_resumable || - !ssl_session_is_time_valid(ssl, ssl->session) || - session_version < min_version || session_version > max_version) { - ssl_set_session(ssl, NULL); - } - } - - /* If resending the ClientHello in DTLS after a HelloVerifyRequest, don't - * renegerate the client_random. The random must be reused. */ - if ((!SSL_is_dtls(ssl) || !ssl->d1->send_cookie) && - !RAND_bytes(ssl->s3->client_random, sizeof(ssl->s3->client_random))) { - return -1; - } - - if (!ssl_write_client_hello(hs)) { - return -1; - } - - return 1; -} - -static int dtls1_get_hello_verify(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al; - CBS hello_verify_request, cookie; - uint16_t server_version; - - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (ssl->s3->tmp.message_type != DTLS1_MT_HELLO_VERIFY_REQUEST) { - ssl->d1->send_cookie = 0; - ssl->s3->tmp.reuse_message = 1; - return 1; - } - - CBS_init(&hello_verify_request, ssl->init_msg, ssl->init_num); - if (!CBS_get_u16(&hello_verify_request, &server_version) || - !CBS_get_u8_length_prefixed(&hello_verify_request, &cookie) || - CBS_len(&hello_verify_request) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - if (CBS_len(&cookie) > sizeof(ssl->d1->cookie)) { - al = SSL_AD_ILLEGAL_PARAMETER; - goto f_err; - } - - OPENSSL_memcpy(ssl->d1->cookie, CBS_data(&cookie), CBS_len(&cookie)); - ssl->d1->cookie_len = CBS_len(&cookie); - - ssl->d1->send_cookie = 1; - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); - return -1; -} - -static int ssl3_get_server_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al = SSL_AD_INTERNAL_ERROR; - CBS server_hello, server_random, session_id; - uint16_t server_wire_version, cipher_suite; - uint8_t compression_method; - - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - uint32_t err = ERR_peek_error(); - if (ERR_GET_LIB(err) == ERR_LIB_SSL && - ERR_GET_REASON(err) == SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE) { - /* Add a dedicated error code to the queue for a handshake_failure alert - * in response to ClientHello. This matches NSS's client behavior and - * gives a better error on a (probable) failure to negotiate initial - * parameters. Note: this error code comes after the original one. - * - * See https://crbug.com/446505. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_HANDSHAKE_FAILURE_ON_CLIENT_HELLO); - } - return ret; - } - - if (ssl->s3->tmp.message_type != SSL3_MT_SERVER_HELLO && - ssl->s3->tmp.message_type != SSL3_MT_HELLO_RETRY_REQUEST) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - return -1; - } - - CBS_init(&server_hello, ssl->init_msg, ssl->init_num); - - if (!CBS_get_u16(&server_hello, &server_wire_version)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - uint16_t min_version, max_version, server_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version) || - !ssl->method->version_from_wire(&server_version, server_wire_version) || - server_version < min_version || server_version > max_version) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); - al = SSL_AD_PROTOCOL_VERSION; - goto f_err; - } - - assert(ssl->s3->have_version == ssl->s3->initial_handshake_complete); - if (!ssl->s3->have_version) { - ssl->version = server_wire_version; - /* At this point, the connection's version is known and ssl->version is - * fixed. Begin enforcing the record-layer version. */ - ssl->s3->have_version = 1; - } else if (server_wire_version != ssl->version) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); - al = SSL_AD_PROTOCOL_VERSION; - goto f_err; - } - - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - hs->state = SSL_ST_TLS13; - hs->do_tls13_handshake = tls13_client_handshake; - return 1; - } - - ssl_clear_tls13_state(hs); - - if (!ssl_check_message_type(ssl, SSL3_MT_SERVER_HELLO)) { - return -1; - } - - if (!CBS_get_bytes(&server_hello, &server_random, SSL3_RANDOM_SIZE) || - !CBS_get_u8_length_prefixed(&server_hello, &session_id) || - CBS_len(&session_id) > SSL3_SESSION_ID_SIZE || - !CBS_get_u16(&server_hello, &cipher_suite) || - !CBS_get_u8(&server_hello, &compression_method)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - /* Copy over the server random. */ - OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_random), SSL3_RANDOM_SIZE); - - /* TODO(davidben): Implement the TLS 1.1 and 1.2 downgrade sentinels once TLS - * 1.3 is finalized and we are not implementing a draft version. */ - - if (!ssl->s3->initial_handshake_complete && ssl->session != NULL && - ssl->session->session_id_length != 0 && - CBS_mem_equal(&session_id, ssl->session->session_id, - ssl->session->session_id_length)) { - ssl->s3->session_reused = 1; - } else { - /* The session wasn't resumed. Create a fresh SSL_SESSION to - * fill out. */ - ssl_set_session(ssl, NULL); - if (!ssl_get_new_session(hs, 0 /* client */)) { - goto f_err; - } - /* Note: session_id could be empty. */ - hs->new_session->session_id_length = CBS_len(&session_id); - OPENSSL_memcpy(hs->new_session->session_id, CBS_data(&session_id), - CBS_len(&session_id)); - } - - const SSL_CIPHER *c = SSL_get_cipher_by_value(cipher_suite); - if (c == NULL) { - /* unknown cipher */ - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CIPHER_RETURNED); - goto f_err; - } - - /* The cipher must be allowed in the selected version and enabled. */ - uint32_t mask_a, mask_k; - ssl_get_client_disabled(ssl, &mask_a, &mask_k); - if ((c->algorithm_mkey & mask_k) || (c->algorithm_auth & mask_a) || - SSL_CIPHER_get_min_version(c) > ssl3_protocol_version(ssl) || - SSL_CIPHER_get_max_version(c) < ssl3_protocol_version(ssl) || - !sk_SSL_CIPHER_find(SSL_get_ciphers(ssl), NULL, c)) { - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); - goto f_err; - } - - if (ssl->session != NULL) { - if (ssl->session->ssl_version != ssl->version) { - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); - goto f_err; - } - if (ssl->session->cipher != c) { - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED); - goto f_err; - } - if (!ssl_session_is_context_valid(ssl, ssl->session)) { - /* This is actually a client application bug. */ - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, - SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); - goto f_err; - } - } else { - hs->new_session->cipher = c; - } - hs->new_cipher = c; - - /* Now that the cipher is known, initialize the handshake hash and hash the - * ServerHello. */ - if (!SSL_TRANSCRIPT_init_hash(&hs->transcript, ssl3_protocol_version(ssl), - c->algorithm_prf) || - !ssl_hash_current_message(hs)) { - goto f_err; - } - - /* If doing a full handshake, the server may request a client certificate - * which requires hashing the handshake transcript. Otherwise, the handshake - * buffer may be released. */ - if (ssl->session != NULL || - !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - } - - /* Only the NULL compression algorithm is supported. */ - if (compression_method != 0) { - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM); - goto f_err; - } - - /* TLS extensions */ - if (!ssl_parse_serverhello_tlsext(hs, &server_hello)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); - goto err; - } - - /* There should be nothing left over in the record. */ - if (CBS_len(&server_hello) != 0) { - /* wrong packet length */ - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - if (ssl->session != NULL && - hs->extended_master_secret != ssl->session->extended_master_secret) { - al = SSL_AD_HANDSHAKE_FAILURE; - if (ssl->session->extended_master_secret) { - OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); - } else { - OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION); - } - goto f_err; - } - - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); -err: - return -1; -} - -static int ssl3_get_server_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE) || - !ssl_hash_current_message(hs)) { - return -1; - } - - CBS cbs; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - - uint8_t alert = SSL_AD_DECODE_ERROR; - sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); - EVP_PKEY_free(hs->peer_pubkey); - hs->peer_pubkey = NULL; - hs->new_session->certs = ssl_parse_cert_chain(&alert, &hs->peer_pubkey, NULL, - &cbs, ssl->ctx->pool); - if (hs->new_session->certs == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; - } - - if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0 || - CBS_len(&cbs) != 0 || - !ssl->ctx->x509_method->session_cache_objects(hs->new_session)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return -1; - } - - if (!ssl_check_leaf_certificate( - hs, hs->peer_pubkey, - sk_CRYPTO_BUFFER_value(hs->new_session->certs, 0))) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return -1; - } - - return 1; -} - -static int ssl3_get_cert_status(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al; - CBS certificate_status, ocsp_response; - uint8_t status_type; - - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (ssl->s3->tmp.message_type != SSL3_MT_CERTIFICATE_STATUS) { - /* A server may send status_request in ServerHello and then change - * its mind about sending CertificateStatus. */ - ssl->s3->tmp.reuse_message = 1; - return 1; - } - - if (!ssl_hash_current_message(hs)) { - return -1; - } - - CBS_init(&certificate_status, ssl->init_msg, ssl->init_num); - if (!CBS_get_u8(&certificate_status, &status_type) || - status_type != TLSEXT_STATUSTYPE_ocsp || - !CBS_get_u24_length_prefixed(&certificate_status, &ocsp_response) || - CBS_len(&ocsp_response) == 0 || - CBS_len(&certificate_status) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - if (!CBS_stow(&ocsp_response, &hs->new_session->ocsp_response, - &hs->new_session->ocsp_response_length)) { - al = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto f_err; - } - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); - return -1; -} - -static int ssl3_verify_server_cert(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_verify_cert_chain(ssl, &hs->new_session->verify_result, - hs->new_session->x509_chain)) { - return -1; - } - - return 1; -} - -static int ssl3_get_server_key_exchange(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al; - DH *dh = NULL; - EC_KEY *ecdh = NULL; - EC_POINT *srvr_ecpoint = NULL; - - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (ssl->s3->tmp.message_type != SSL3_MT_SERVER_KEY_EXCHANGE) { - /* Some ciphers (pure PSK) have an optional ServerKeyExchange message. */ - if (ssl_cipher_requires_server_key_exchange(hs->new_cipher)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return -1; - } - - ssl->s3->tmp.reuse_message = 1; - return 1; - } - - if (!ssl_hash_current_message(hs)) { - return -1; - } - - /* Retain a copy of the original CBS to compute the signature over. */ - CBS server_key_exchange; - CBS_init(&server_key_exchange, ssl->init_msg, ssl->init_num); - CBS server_key_exchange_orig = server_key_exchange; - - uint32_t alg_k = hs->new_cipher->algorithm_mkey; - uint32_t alg_a = hs->new_cipher->algorithm_auth; - - if (alg_a & SSL_aPSK) { - CBS psk_identity_hint; - - /* Each of the PSK key exchanges begins with a psk_identity_hint. */ - if (!CBS_get_u16_length_prefixed(&server_key_exchange, - &psk_identity_hint)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - /* Store PSK identity hint for later use, hint is used in - * ssl3_send_client_key_exchange. Assume that the maximum length of a PSK - * identity hint can be as long as the maximum length of a PSK identity. - * Also do not allow NULL characters; identities are saved as C strings. - * - * TODO(davidben): Should invalid hints be ignored? It's a hint rather than - * a specific identity. */ - if (CBS_len(&psk_identity_hint) > PSK_MAX_IDENTITY_LEN || - CBS_contains_zero_byte(&psk_identity_hint)) { - al = SSL_AD_HANDSHAKE_FAILURE; - OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); - goto f_err; - } - - /* Save non-empty identity hints as a C string. Empty identity hints we - * treat as missing. Plain PSK makes it possible to send either no hint - * (omit ServerKeyExchange) or an empty hint, while ECDHE_PSK can only spell - * empty hint. Having different capabilities is odd, so we interpret empty - * and missing as identical. */ - if (CBS_len(&psk_identity_hint) != 0 && - !CBS_strdup(&psk_identity_hint, &hs->peer_psk_identity_hint)) { - al = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto f_err; - } - } - - if (alg_k & SSL_kDHE) { - CBS dh_p, dh_g, dh_Ys; - if (!CBS_get_u16_length_prefixed(&server_key_exchange, &dh_p) || - CBS_len(&dh_p) == 0 || - !CBS_get_u16_length_prefixed(&server_key_exchange, &dh_g) || - CBS_len(&dh_g) == 0 || - !CBS_get_u16_length_prefixed(&server_key_exchange, &dh_Ys) || - CBS_len(&dh_Ys) == 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - dh = DH_new(); - if (dh == NULL) { - goto err; - } - - dh->p = BN_bin2bn(CBS_data(&dh_p), CBS_len(&dh_p), NULL); - dh->g = BN_bin2bn(CBS_data(&dh_g), CBS_len(&dh_g), NULL); - if (dh->p == NULL || dh->g == NULL) { - goto err; - } - - unsigned bits = DH_num_bits(dh); - if (bits < 1024) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_DH_P_LENGTH); - goto err; - } else if (bits > 4096) { - /* Overly large DHE groups are prohibitively expensive, so enforce a limit - * to prevent a server from causing us to perform too expensive of a - * computation. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_DH_P_TOO_LONG); - goto err; - } - - SSL_ECDH_CTX_init_for_dhe(&hs->ecdh_ctx, dh); - dh = NULL; - - /* Save the peer public key for later. */ - if (!CBS_stow(&dh_Ys, &hs->peer_key, &hs->peer_key_len)) { - goto err; - } - } else if (alg_k & SSL_kECDHE) { - /* Parse the server parameters. */ - uint8_t group_type; - uint16_t group_id; - CBS point; - if (!CBS_get_u8(&server_key_exchange, &group_type) || - group_type != NAMED_CURVE_TYPE || - !CBS_get_u16(&server_key_exchange, &group_id) || - !CBS_get_u8_length_prefixed(&server_key_exchange, &point)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - hs->new_session->group_id = group_id; - - /* Ensure the group is consistent with preferences. */ - if (!tls1_check_group_id(ssl, group_id)) { - al = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); - goto f_err; - } - - /* Initialize ECDH and save the peer public key for later. */ - if (!SSL_ECDH_CTX_init(&hs->ecdh_ctx, group_id) || - !CBS_stow(&point, &hs->peer_key, &hs->peer_key_len)) { - goto err; - } - } else if (!(alg_k & SSL_kPSK)) { - al = SSL_AD_UNEXPECTED_MESSAGE; - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - goto f_err; - } - - /* At this point, |server_key_exchange| contains the signature, if any, while - * |server_key_exchange_orig| contains the entire message. From that, derive - * a CBS containing just the parameter. */ - CBS parameter; - CBS_init(¶meter, CBS_data(&server_key_exchange_orig), - CBS_len(&server_key_exchange_orig) - CBS_len(&server_key_exchange)); - - /* ServerKeyExchange should be signed by the server's public key. */ - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - uint16_t signature_algorithm = 0; - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - if (!CBS_get_u16(&server_key_exchange, &signature_algorithm)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - if (!tls12_check_peer_sigalg(ssl, &al, signature_algorithm)) { - goto f_err; - } - hs->new_session->peer_signature_algorithm = signature_algorithm; - } else if (hs->peer_pubkey->type == EVP_PKEY_RSA) { - signature_algorithm = SSL_SIGN_RSA_PKCS1_MD5_SHA1; - } else if (hs->peer_pubkey->type == EVP_PKEY_EC) { - signature_algorithm = SSL_SIGN_ECDSA_SHA1; - } else { - al = SSL_AD_UNSUPPORTED_CERTIFICATE; - OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); - goto f_err; - } - - /* The last field in |server_key_exchange| is the signature. */ - CBS signature; - if (!CBS_get_u16_length_prefixed(&server_key_exchange, &signature) || - CBS_len(&server_key_exchange) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - CBB transcript; - uint8_t *transcript_data; - size_t transcript_len; - if (!CBB_init(&transcript, 2*SSL3_RANDOM_SIZE + CBS_len(¶meter)) || - !CBB_add_bytes(&transcript, ssl->s3->client_random, SSL3_RANDOM_SIZE) || - !CBB_add_bytes(&transcript, ssl->s3->server_random, SSL3_RANDOM_SIZE) || - !CBB_add_bytes(&transcript, CBS_data(¶meter), CBS_len(¶meter)) || - !CBB_finish(&transcript, &transcript_data, &transcript_len)) { - CBB_cleanup(&transcript); - al = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto f_err; - } - - int sig_ok = ssl_public_key_verify( - ssl, CBS_data(&signature), CBS_len(&signature), signature_algorithm, - hs->peer_pubkey, transcript_data, transcript_len); - OPENSSL_free(transcript_data); - -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - sig_ok = 1; - ERR_clear_error(); -#endif - if (!sig_ok) { - /* bad signature */ - al = SSL_AD_DECRYPT_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); - goto f_err; - } - } else { - /* PSK ciphers are the only supported certificate-less ciphers. */ - assert(alg_a == SSL_aPSK); - - if (CBS_len(&server_key_exchange) > 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_EXTRA_DATA_IN_MESSAGE); - goto f_err; - } - } - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); -err: - DH_free(dh); - EC_POINT_free(srvr_ecpoint); - EC_KEY_free(ecdh); - return -1; -} - -static int ssl3_get_certificate_request(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int msg_ret = ssl->method->ssl_get_message(ssl); - if (msg_ret <= 0) { - return msg_ret; - } - - if (ssl->s3->tmp.message_type == SSL3_MT_SERVER_HELLO_DONE) { - ssl->s3->tmp.reuse_message = 1; - /* If we get here we don't need the handshake buffer as we won't be doing - * client auth. */ - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - return 1; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE_REQUEST) || - !ssl_hash_current_message(hs)) { - return -1; - } - - CBS cbs; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - - /* Get the certificate types. */ - CBS certificate_types; - if (!CBS_get_u8_length_prefixed(&cbs, &certificate_types)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - if (!CBS_stow(&certificate_types, &hs->certificate_types, - &hs->num_certificate_types)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return -1; - } - - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - CBS supported_signature_algorithms; - if (!CBS_get_u16_length_prefixed(&cbs, &supported_signature_algorithms) || - !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - } - - uint8_t alert = SSL_AD_DECODE_ERROR; - STACK_OF(X509_NAME) *ca_sk = ssl_parse_client_CA_list(ssl, &alert, &cbs); - if (ca_sk == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; - } - - if (CBS_len(&cbs) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - sk_X509_NAME_pop_free(ca_sk, X509_NAME_free); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - hs->cert_request = 1; - sk_X509_NAME_pop_free(hs->ca_names, X509_NAME_free); - hs->ca_names = ca_sk; - return 1; -} - -static int ssl3_get_server_hello_done(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_SERVER_HELLO_DONE) || - !ssl_hash_current_message(hs)) { - return -1; - } - - /* ServerHelloDone is empty. */ - if (ssl->init_num > 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - return 1; -} - -static int ssl3_send_client_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* Call cert_cb to update the certificate. */ - if (ssl->cert->cert_cb) { - int ret = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); - if (ret < 0) { - ssl->rwstate = SSL_X509_LOOKUP; - return -1; - } - if (ret == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return -1; - } - } - - if (!ssl_has_certificate(ssl)) { - /* Without a client certificate, the handshake buffer may be released. */ - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - - /* In SSL 3.0, the Certificate message is replaced with a warning alert. */ - if (ssl->version == SSL3_VERSION) { - if (!ssl->method->add_alert(ssl, SSL3_AL_WARNING, - SSL_AD_NO_CERTIFICATE)) { - return -1; - } - return 1; - } - } - - if (!ssl_auto_chain_if_needed(ssl) || - !ssl3_output_cert_chain(ssl)) { - return -1; - } - return 1; -} - -OPENSSL_COMPILE_ASSERT(sizeof(size_t) >= sizeof(unsigned), - SIZE_T_IS_SMALLER_THAN_UNSIGNED); - -static int ssl3_send_client_key_exchange(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - uint8_t *pms = NULL; - size_t pms_len = 0; - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CLIENT_KEY_EXCHANGE)) { - goto err; - } - - uint32_t alg_k = hs->new_cipher->algorithm_mkey; - uint32_t alg_a = hs->new_cipher->algorithm_auth; - - /* If using a PSK key exchange, prepare the pre-shared key. */ - unsigned psk_len = 0; - uint8_t psk[PSK_MAX_PSK_LEN]; - if (alg_a & SSL_aPSK) { - if (ssl->psk_client_callback == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_NO_CLIENT_CB); - goto err; - } - - char identity[PSK_MAX_IDENTITY_LEN + 1]; - OPENSSL_memset(identity, 0, sizeof(identity)); - psk_len = - ssl->psk_client_callback(ssl, hs->peer_psk_identity_hint, identity, - sizeof(identity), psk, sizeof(psk)); - if (psk_len == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - goto err; - } - assert(psk_len <= PSK_MAX_PSK_LEN); - - OPENSSL_free(hs->new_session->psk_identity); - hs->new_session->psk_identity = BUF_strdup(identity); - if (hs->new_session->psk_identity == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - /* Write out psk_identity. */ - CBB child; - if (!CBB_add_u16_length_prefixed(&body, &child) || - !CBB_add_bytes(&child, (const uint8_t *)identity, - OPENSSL_strnlen(identity, sizeof(identity))) || - !CBB_flush(&body)) { - goto err; - } - } - - /* Depending on the key exchange method, compute |pms| and |pms_len|. */ - if (alg_k & SSL_kRSA) { - pms_len = SSL_MAX_MASTER_KEY_LENGTH; - pms = OPENSSL_malloc(pms_len); - if (pms == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - RSA *rsa = EVP_PKEY_get0_RSA(hs->peer_pubkey); - if (rsa == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - - pms[0] = hs->client_version >> 8; - pms[1] = hs->client_version & 0xff; - if (!RAND_bytes(&pms[2], SSL_MAX_MASTER_KEY_LENGTH - 2)) { - goto err; - } - - CBB child, *enc_pms = &body; - size_t enc_pms_len; - /* In TLS, there is a length prefix. */ - if (ssl->version > SSL3_VERSION) { - if (!CBB_add_u16_length_prefixed(&body, &child)) { - goto err; - } - enc_pms = &child; - } - - uint8_t *ptr; - if (!CBB_reserve(enc_pms, &ptr, RSA_size(rsa)) || - !RSA_encrypt(rsa, &enc_pms_len, ptr, RSA_size(rsa), pms, pms_len, - RSA_PKCS1_PADDING) || - !CBB_did_write(enc_pms, enc_pms_len) || - !CBB_flush(&body)) { - goto err; - } - } else if (alg_k & (SSL_kECDHE|SSL_kDHE)) { - /* Generate a keypair and serialize the public half. */ - CBB child; - if (!SSL_ECDH_CTX_add_key(&hs->ecdh_ctx, &body, &child)) { - goto err; - } - - /* Compute the premaster. */ - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!SSL_ECDH_CTX_accept(&hs->ecdh_ctx, &child, &pms, &pms_len, &alert, - hs->peer_key, hs->peer_key_len)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - goto err; - } - if (!CBB_flush(&body)) { - goto err; - } - - /* The key exchange state may now be discarded. */ - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - OPENSSL_free(hs->peer_key); - hs->peer_key = NULL; - hs->peer_key_len = 0; - } else if (alg_k & SSL_kPSK) { - /* For plain PSK, other_secret is a block of 0s with the same length as - * the pre-shared key. */ - pms_len = psk_len; - pms = OPENSSL_malloc(pms_len); - if (pms == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - OPENSSL_memset(pms, 0, pms_len); - } else { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - - /* For a PSK cipher suite, other_secret is combined with the pre-shared - * key. */ - if (alg_a & SSL_aPSK) { - CBB pms_cbb, child; - uint8_t *new_pms; - size_t new_pms_len; - - CBB_zero(&pms_cbb); - if (!CBB_init(&pms_cbb, 2 + psk_len + 2 + pms_len) || - !CBB_add_u16_length_prefixed(&pms_cbb, &child) || - !CBB_add_bytes(&child, pms, pms_len) || - !CBB_add_u16_length_prefixed(&pms_cbb, &child) || - !CBB_add_bytes(&child, psk, psk_len) || - !CBB_finish(&pms_cbb, &new_pms, &new_pms_len)) { - CBB_cleanup(&pms_cbb); - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - OPENSSL_cleanse(pms, pms_len); - OPENSSL_free(pms); - pms = new_pms; - pms_len = new_pms_len; - } - - /* The message must be added to the finished hash before calculating the - * master secret. */ - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - hs->new_session->master_key_length = tls1_generate_master_secret( - hs, hs->new_session->master_key, pms, pms_len); - if (hs->new_session->master_key_length == 0) { - goto err; - } - hs->new_session->extended_master_secret = hs->extended_master_secret; - OPENSSL_cleanse(pms, pms_len); - OPENSSL_free(pms); - - return 1; - -err: - CBB_cleanup(&cbb); - if (pms != NULL) { - OPENSSL_cleanse(pms, pms_len); - OPENSSL_free(pms); - } - return -1; -} - -static int ssl3_send_cert_verify(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - assert(ssl_has_private_key(ssl)); - - CBB cbb, body, child; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CERTIFICATE_VERIFY)) { - goto err; - } - - uint16_t signature_algorithm; - if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { - goto err; - } - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - /* Write out the digest type in TLS 1.2. */ - if (!CBB_add_u16(&body, signature_algorithm)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - } - - /* Set aside space for the signature. */ - const size_t max_sig_len = ssl_private_key_max_signature_len(ssl); - uint8_t *ptr; - if (!CBB_add_u16_length_prefixed(&body, &child) || - !CBB_reserve(&child, &ptr, max_sig_len)) { - goto err; - } - - size_t sig_len = max_sig_len; - enum ssl_private_key_result_t sign_result; - if (hs->state == SSL3_ST_CW_CERT_VRFY_A) { - /* The SSL3 construction for CertificateVerify does not decompose into a - * single final digest and signature, and must be special-cased. */ - if (ssl3_protocol_version(ssl) == SSL3_VERSION) { - if (ssl->cert->key_method != NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY); - goto err; - } - - uint8_t digest[EVP_MAX_MD_SIZE]; - size_t digest_len; - if (!SSL_TRANSCRIPT_ssl3_cert_verify_hash(&hs->transcript, digest, - &digest_len, hs->new_session, - signature_algorithm)) { - goto err; - } - - sign_result = ssl_private_key_success; - - EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(ssl->cert->privatekey, NULL); - if (pctx == NULL || - !EVP_PKEY_sign_init(pctx) || - !EVP_PKEY_sign(pctx, ptr, &sig_len, digest, digest_len)) { - EVP_PKEY_CTX_free(pctx); - sign_result = ssl_private_key_failure; - goto err; - } - EVP_PKEY_CTX_free(pctx); - } else { - sign_result = ssl_private_key_sign( - ssl, ptr, &sig_len, max_sig_len, signature_algorithm, - (const uint8_t *)hs->transcript.buffer->data, - hs->transcript.buffer->length); - } - - /* The handshake buffer is no longer necessary. */ - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - } else { - assert(hs->state == SSL3_ST_CW_CERT_VRFY_B); - sign_result = ssl_private_key_complete(ssl, ptr, &sig_len, max_sig_len); - } - - switch (sign_result) { - case ssl_private_key_success: - break; - case ssl_private_key_failure: - goto err; - case ssl_private_key_retry: - ssl->rwstate = SSL_PRIVATE_KEY_OPERATION; - hs->state = SSL3_ST_CW_CERT_VRFY_B; - goto err; - } - - if (!CBB_did_write(&child, sig_len) || - !ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - return 1; - -err: - CBB_cleanup(&cbb); - return -1; -} - -static int ssl3_send_next_proto(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - static const uint8_t kZero[32] = {0}; - size_t padding_len = 32 - ((ssl->s3->next_proto_negotiated_len + 2) % 32); - - CBB cbb, body, child; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_NEXT_PROTO) || - !CBB_add_u8_length_prefixed(&body, &child) || - !CBB_add_bytes(&child, ssl->s3->next_proto_negotiated, - ssl->s3->next_proto_negotiated_len) || - !CBB_add_u8_length_prefixed(&body, &child) || - !CBB_add_bytes(&child, kZero, padding_len) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -static int ssl3_send_channel_id(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_do_channel_id_callback(ssl)) { - return -1; - } - - if (ssl->tlsext_channel_id_private == NULL) { - ssl->rwstate = SSL_CHANNEL_ID_LOOKUP; - return -1; - } - - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_CHANNEL_ID) || - !tls1_write_channel_id(hs, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -static int ssl3_get_new_session_ticket(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_NEW_SESSION_TICKET) || - !ssl_hash_current_message(hs)) { - return -1; - } - - CBS new_session_ticket, ticket; - uint32_t tlsext_tick_lifetime_hint; - CBS_init(&new_session_ticket, ssl->init_msg, ssl->init_num); - if (!CBS_get_u32(&new_session_ticket, &tlsext_tick_lifetime_hint) || - !CBS_get_u16_length_prefixed(&new_session_ticket, &ticket) || - CBS_len(&new_session_ticket) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - if (CBS_len(&ticket) == 0) { - /* RFC 5077 allows a server to change its mind and send no ticket after - * negotiating the extension. The value of |ticket_expected| is checked in - * |ssl_update_cache| so is cleared here to avoid an unnecessary update. */ - hs->ticket_expected = 0; - return 1; - } - - int session_renewed = ssl->session != NULL; - SSL_SESSION *session = hs->new_session; - if (session_renewed) { - /* The server is sending a new ticket for an existing session. Sessions are - * immutable once established, so duplicate all but the ticket of the - * existing session. */ - session = SSL_SESSION_dup(ssl->session, SSL_SESSION_INCLUDE_NONAUTH); - if (session == NULL) { - /* This should never happen. */ - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - } - - /* |tlsext_tick_lifetime_hint| is measured from when the ticket was issued. */ - ssl_session_rebase_time(ssl, session); - - if (!CBS_stow(&ticket, &session->tlsext_tick, &session->tlsext_ticklen)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - session->tlsext_tick_lifetime_hint = tlsext_tick_lifetime_hint; - - /* Generate a session ID for this session based on the session ticket. We use - * the session ID mechanism for detecting ticket resumption. This also fits in - * with assumptions elsewhere in OpenSSL.*/ - if (!EVP_Digest(CBS_data(&ticket), CBS_len(&ticket), - session->session_id, &session->session_id_length, - EVP_sha256(), NULL)) { - goto err; - } - - if (session_renewed) { - session->not_resumable = 0; - SSL_SESSION_free(ssl->session); - ssl->session = session; - } - - return 1; - -err: - if (session_renewed) { - SSL_SESSION_free(session); - } - return -1; -} diff --git a/Sources/BoringSSL/ssl/handshake_client.cc b/Sources/BoringSSL/ssl/handshake_client.cc new file mode 100644 index 000000000..583acebbe --- /dev/null +++ b/Sources/BoringSSL/ssl/handshake_client.cc @@ -0,0 +1,1836 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * + * Portions of the attached software ("Contribution") are developed by + * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. + * + * The Contribution is licensed pursuant to the OpenSSL open source + * license provided above. + * + * ECC cipher suite support in OpenSSL originally written by + * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories. + * + */ +/* ==================================================================== + * Copyright 2005 Nokia. All rights reserved. + * + * The portions of the attached software ("Contribution") is developed by + * Nokia Corporation and is licensed pursuant to the OpenSSL open source + * license. + * + * The Contribution, originally written by Mika Kousa and Pasi Eronen of + * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites + * support (see RFC 4279) to OpenSSL. + * + * No patent licenses or other rights except those expressly stated in + * the OpenSSL open source license shall be deemed granted or received + * expressly, by implication, estoppel, or otherwise. + * + * No assurances are provided by Nokia that the Contribution does not + * infringe the patent or other intellectual property rights of any third + * party or that the license provides you with all the necessary rights + * to make use of the Contribution. + * + * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN + * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA + * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY + * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR + * OTHERWISE. + */ + +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +enum ssl_client_hs_state_t { + state_start_connect = 0, + state_enter_early_data, + state_read_hello_verify_request, + state_read_server_hello, + state_tls13, + state_read_server_certificate, + state_read_certificate_status, + state_verify_server_certificate, + state_read_server_key_exchange, + state_read_certificate_request, + state_read_server_hello_done, + state_send_client_certificate, + state_send_client_key_exchange, + state_send_client_certificate_verify, + state_send_client_finished, + state_finish_flight, + state_read_session_ticket, + state_process_change_cipher_spec, + state_read_server_finished, + state_finish_client_handshake, + state_done, +}; + +// ssl_get_client_disabled sets |*out_mask_a| and |*out_mask_k| to masks of +// disabled algorithms. +static void ssl_get_client_disabled(SSL *ssl, uint32_t *out_mask_a, + uint32_t *out_mask_k) { + *out_mask_a = 0; + *out_mask_k = 0; + + // PSK requires a client callback. + if (ssl->psk_client_callback == NULL) { + *out_mask_a |= SSL_aPSK; + *out_mask_k |= SSL_kPSK; + } +} + +static int ssl_write_client_cipher_list(SSL_HANDSHAKE *hs, CBB *out) { + SSL *const ssl = hs->ssl; + uint32_t mask_a, mask_k; + ssl_get_client_disabled(ssl, &mask_a, &mask_k); + + CBB child; + if (!CBB_add_u16_length_prefixed(out, &child)) { + return 0; + } + + // Add a fake cipher suite. See draft-davidben-tls-grease-01. + if (ssl->ctx->grease_enabled && + !CBB_add_u16(&child, ssl_get_grease_value(ssl, ssl_grease_cipher))) { + return 0; + } + + // Add TLS 1.3 ciphers. Order ChaCha20-Poly1305 relative to AES-GCM based on + // hardware support. + if (hs->max_version >= TLS1_3_VERSION) { + if (!EVP_has_aes_hardware() && + !CBB_add_u16(&child, TLS1_CK_CHACHA20_POLY1305_SHA256 & 0xffff)) { + return 0; + } + if (!CBB_add_u16(&child, TLS1_CK_AES_128_GCM_SHA256 & 0xffff) || + !CBB_add_u16(&child, TLS1_CK_AES_256_GCM_SHA384 & 0xffff)) { + return 0; + } + if (EVP_has_aes_hardware() && + !CBB_add_u16(&child, TLS1_CK_CHACHA20_POLY1305_SHA256 & 0xffff)) { + return 0; + } + } + + if (hs->min_version < TLS1_3_VERSION) { + int any_enabled = 0; + for (const SSL_CIPHER *cipher : SSL_get_ciphers(ssl)) { + // Skip disabled ciphers + if ((cipher->algorithm_mkey & mask_k) || + (cipher->algorithm_auth & mask_a)) { + continue; + } + if (SSL_CIPHER_get_min_version(cipher) > hs->max_version || + SSL_CIPHER_get_max_version(cipher) < hs->min_version) { + continue; + } + any_enabled = 1; + if (!CBB_add_u16(&child, ssl_cipher_get_value(cipher))) { + return 0; + } + } + + // If all ciphers were disabled, return the error to the caller. + if (!any_enabled && hs->max_version < TLS1_3_VERSION) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHERS_AVAILABLE); + return 0; + } + } + + // For SSLv3, the SCSV is added. Otherwise the renegotiation extension is + // added. + if (hs->max_version == SSL3_VERSION && + !ssl->s3->initial_handshake_complete) { + if (!CBB_add_u16(&child, SSL3_CK_SCSV & 0xffff)) { + return 0; + } + } + + if (ssl->mode & SSL_MODE_SEND_FALLBACK_SCSV) { + if (!CBB_add_u16(&child, SSL3_CK_FALLBACK_SCSV & 0xffff)) { + return 0; + } + } + + return CBB_flush(out); +} + +int ssl_write_client_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CLIENT_HELLO)) { + return 0; + } + + // Renegotiations do not participate in session resumption. + int has_session_id = ssl->session != NULL && + !ssl->s3->initial_handshake_complete && + ssl->session->session_id_length > 0; + + CBB child; + if (!CBB_add_u16(&body, hs->client_version) || + !CBB_add_bytes(&body, ssl->s3->client_random, SSL3_RANDOM_SIZE) || + !CBB_add_u8_length_prefixed(&body, &child)) { + return 0; + } + + if (has_session_id) { + if (!CBB_add_bytes(&child, ssl->session->session_id, + ssl->session->session_id_length)) { + return 0; + } + } else { + // In TLS 1.3 experimental encodings, send a fake placeholder session ID + // when we do not otherwise have one to send. + if (hs->max_version >= TLS1_3_VERSION && + ssl_is_resumption_variant(ssl->tls13_variant) && + !CBB_add_bytes(&child, hs->session_id, hs->session_id_len)) { + return 0; + } + } + + if (SSL_is_dtls(ssl)) { + if (!CBB_add_u8_length_prefixed(&body, &child) || + !CBB_add_bytes(&child, ssl->d1->cookie, ssl->d1->cookie_len)) { + return 0; + } + } + + size_t header_len = + SSL_is_dtls(ssl) ? DTLS1_HM_HEADER_LENGTH : SSL3_HM_HEADER_LENGTH; + if (!ssl_write_client_cipher_list(hs, &body) || + !CBB_add_u8(&body, 1 /* one compression method */) || + !CBB_add_u8(&body, 0 /* null compression */) || + !ssl_add_clienthello_tlsext(hs, &body, header_len + CBB_len(&body))) { + return 0; + } + + Array msg; + if (!ssl->method->finish_message(ssl, cbb.get(), &msg)) { + return 0; + } + + // Now that the length prefixes have been computed, fill in the placeholder + // PSK binder. + if (hs->needs_psk_binder && + !tls13_write_psk_binder(hs, msg.data(), msg.size())) { + return 0; + } + + return ssl->method->add_message(ssl, std::move(msg)); +} + +static int parse_server_version(SSL_HANDSHAKE *hs, uint16_t *out, + const SSLMessage &msg) { + SSL *const ssl = hs->ssl; + if (msg.type != SSL3_MT_SERVER_HELLO && + msg.type != SSL3_MT_HELLO_RETRY_REQUEST) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + return 0; + } + + CBS server_hello = msg.body; + if (!CBS_get_u16(&server_hello, out)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + // The server version may also be in the supported_versions extension if + // applicable. + if (msg.type != SSL3_MT_SERVER_HELLO || *out != TLS1_2_VERSION) { + return 1; + } + + uint8_t sid_length; + if (!CBS_skip(&server_hello, SSL3_RANDOM_SIZE) || + !CBS_get_u8(&server_hello, &sid_length) || + !CBS_skip(&server_hello, sid_length + 2 /* cipher_suite */ + + 1 /* compression_method */)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + // The extensions block may not be present. + if (CBS_len(&server_hello) == 0) { + return 1; + } + + CBS extensions; + if (!CBS_get_u16_length_prefixed(&server_hello, &extensions) || + CBS_len(&server_hello) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + bool have_supported_versions; + CBS supported_versions; + const SSL_EXTENSION_TYPE ext_types[] = { + {TLSEXT_TYPE_supported_versions, &have_supported_versions, + &supported_versions}, + }; + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 1 /* ignore unknown */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return 0; + } + + if (have_supported_versions && + (!CBS_get_u16(&supported_versions, out) || + CBS_len(&supported_versions) != 0)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + return 1; +} + +static enum ssl_hs_wait_t do_start_connect(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_START, 1); + // |session_reused| must be reset in case this is a renegotiation. + ssl->s3->session_reused = false; + + // Freeze the version range. + if (!ssl_get_version_range(ssl, &hs->min_version, &hs->max_version)) { + return ssl_hs_error; + } + + // SSL 3.0 ClientHellos should use SSL 3.0 not TLS 1.0, for the record-layer + // version. + if (hs->max_version == SSL3_VERSION) { + ssl->s3->aead_write_ctx->SetVersionIfNullCipher(SSL3_VERSION); + } + + // Always advertise the ClientHello version from the original maximum version, + // even on renegotiation. The static RSA key exchange uses this field, and + // some servers fail when it changes across handshakes. + if (SSL_is_dtls(hs->ssl)) { + hs->client_version = + hs->max_version >= TLS1_2_VERSION ? DTLS1_2_VERSION : DTLS1_VERSION; + } else { + hs->client_version = + hs->max_version >= TLS1_2_VERSION ? TLS1_2_VERSION : hs->max_version; + } + + // If the configured session has expired or was created at a disabled + // version, drop it. + if (ssl->session != NULL) { + if (ssl->session->is_server || + !ssl_supports_version(hs, ssl->session->ssl_version) || + (ssl->session->session_id_length == 0 && + ssl->session->tlsext_ticklen == 0) || + ssl->session->not_resumable || + !ssl_session_is_time_valid(ssl, ssl->session)) { + ssl_set_session(ssl, NULL); + } + } + + if (!RAND_bytes(ssl->s3->client_random, sizeof(ssl->s3->client_random))) { + return ssl_hs_error; + } + + // Initialize a random session ID for the experimental TLS 1.3 variant + // requiring a session id. + if (ssl_is_resumption_variant(ssl->tls13_variant)) { + hs->session_id_len = sizeof(hs->session_id); + if (!RAND_bytes(hs->session_id, hs->session_id_len)) { + return ssl_hs_error; + } + } + + if (!ssl_write_client_hello(hs)) { + return ssl_hs_error; + } + + hs->state = state_enter_early_data; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_enter_early_data(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (SSL_is_dtls(ssl)) { + hs->state = state_read_hello_verify_request; + return ssl_hs_ok; + } + + if (!hs->early_data_offered) { + hs->state = state_read_server_hello; + return ssl_hs_ok; + } + + ssl->s3->aead_write_ctx->SetVersionIfNullCipher(ssl->session->ssl_version); + if (ssl_is_draft22(ssl->session->ssl_version) && + !ssl->method->add_change_cipher_spec(ssl)) { + return ssl_hs_error; + } + + if (!tls13_init_early_key_schedule(hs, ssl->session->master_key, + ssl->session->master_key_length) || + !tls13_derive_early_secrets(hs) || + !tls13_set_traffic_key(ssl, evp_aead_seal, hs->early_traffic_secret, + hs->hash_len)) { + return ssl_hs_error; + } + + // Stash the early data session, so connection properties may be queried out + // of it. + hs->in_early_data = true; + SSL_SESSION_up_ref(ssl->session); + hs->early_session.reset(ssl->session); + hs->can_early_write = true; + + hs->state = state_read_server_hello; + return ssl_hs_early_return; +} + +static enum ssl_hs_wait_t do_read_hello_verify_request(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + assert(SSL_is_dtls(ssl)); + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (msg.type != DTLS1_MT_HELLO_VERIFY_REQUEST) { + hs->state = state_read_server_hello; + return ssl_hs_ok; + } + + CBS hello_verify_request = msg.body, cookie; + uint16_t server_version; + if (!CBS_get_u16(&hello_verify_request, &server_version) || + !CBS_get_u8_length_prefixed(&hello_verify_request, &cookie) || + CBS_len(&cookie) > sizeof(ssl->d1->cookie) || + CBS_len(&hello_verify_request) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + OPENSSL_memcpy(ssl->d1->cookie, CBS_data(&cookie), CBS_len(&cookie)); + ssl->d1->cookie_len = CBS_len(&cookie); + + ssl->method->next_message(ssl); + + // DTLS resets the handshake buffer after HelloVerifyRequest. + if (!hs->transcript.Init()) { + return ssl_hs_error; + } + + if (!ssl_write_client_hello(hs)) { + return ssl_hs_error; + } + + hs->state = state_read_server_hello; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_read_server_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_server_hello; + } + + uint16_t server_version; + if (!parse_server_version(hs, &server_version, msg)) { + return ssl_hs_error; + } + + if (!ssl_supports_version(hs, server_version)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); + return ssl_hs_error; + } + + assert(ssl->s3->have_version == ssl->s3->initial_handshake_complete); + if (!ssl->s3->have_version) { + ssl->version = server_version; + // At this point, the connection's version is known and ssl->version is + // fixed. Begin enforcing the record-layer version. + ssl->s3->have_version = true; + ssl->s3->aead_write_ctx->SetVersionIfNullCipher(ssl->version); + } else if (server_version != ssl->version) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); + return ssl_hs_error; + } + + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + hs->state = state_tls13; + return ssl_hs_ok; + } + + // Clear some TLS 1.3 state that no longer needs to be retained. + hs->key_share.reset(); + hs->key_share_bytes.Reset(); + + // A TLS 1.2 server would not know to skip the early data we offered. Report + // an error code sooner. The caller may use this error code to implement the + // fallback described in draft-ietf-tls-tls13-18 appendix C.3. + if (hs->early_data_offered) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_ON_EARLY_DATA); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_PROTOCOL_VERSION); + return ssl_hs_error; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_SERVER_HELLO)) { + return ssl_hs_error; + } + + CBS server_hello = msg.body, server_random, session_id; + uint16_t cipher_suite; + uint8_t compression_method; + if (!CBS_skip(&server_hello, 2 /* version */) || + !CBS_get_bytes(&server_hello, &server_random, SSL3_RANDOM_SIZE) || + !CBS_get_u8_length_prefixed(&server_hello, &session_id) || + CBS_len(&session_id) > SSL3_SESSION_ID_SIZE || + !CBS_get_u16(&server_hello, &cipher_suite) || + !CBS_get_u8(&server_hello, &compression_method)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // Copy over the server random. + OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_random), + SSL3_RANDOM_SIZE); + + // TODO(davidben): Implement the TLS 1.1 and 1.2 downgrade sentinels once TLS + // 1.3 is finalized and we are not implementing a draft version. + + if (!ssl->s3->initial_handshake_complete && ssl->session != NULL && + ssl->session->session_id_length != 0 && + CBS_mem_equal(&session_id, ssl->session->session_id, + ssl->session->session_id_length)) { + ssl->s3->session_reused = true; + } else { + // The session wasn't resumed. Create a fresh SSL_SESSION to + // fill out. + ssl_set_session(ssl, NULL); + if (!ssl_get_new_session(hs, 0 /* client */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + // Note: session_id could be empty. + hs->new_session->session_id_length = CBS_len(&session_id); + OPENSSL_memcpy(hs->new_session->session_id, CBS_data(&session_id), + CBS_len(&session_id)); + } + + const SSL_CIPHER *cipher = SSL_get_cipher_by_value(cipher_suite); + if (cipher == NULL) { + // unknown cipher + OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CIPHER_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // The cipher must be allowed in the selected version and enabled. + uint32_t mask_a, mask_k; + ssl_get_client_disabled(ssl, &mask_a, &mask_k); + if ((cipher->algorithm_mkey & mask_k) || (cipher->algorithm_auth & mask_a) || + SSL_CIPHER_get_min_version(cipher) > ssl_protocol_version(ssl) || + SSL_CIPHER_get_max_version(cipher) < ssl_protocol_version(ssl) || + !sk_SSL_CIPHER_find(SSL_get_ciphers(ssl), NULL, cipher)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + if (ssl->session != NULL) { + if (ssl->session->ssl_version != ssl->version) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + if (ssl->session->cipher != cipher) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + if (!ssl_session_is_context_valid(ssl, ssl->session)) { + // This is actually a client application bug. + OPENSSL_PUT_ERROR(SSL, + SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + } else { + hs->new_session->cipher = cipher; + } + hs->new_cipher = cipher; + + // Now that the cipher is known, initialize the handshake hash and hash the + // ServerHello. + if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || + !ssl_hash_message(hs, msg)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + // If doing a full handshake, the server may request a client certificate + // which requires hashing the handshake transcript. Otherwise, the handshake + // buffer may be released. + if (ssl->session != NULL || + !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + hs->transcript.FreeBuffer(); + } + + // Only the NULL compression algorithm is supported. + if (compression_method != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // TLS extensions + if (!ssl_parse_serverhello_tlsext(hs, &server_hello)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); + return ssl_hs_error; + } + + // There should be nothing left over in the record. + if (CBS_len(&server_hello) != 0) { + // wrong packet length + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (ssl->session != NULL && + hs->extended_master_secret != ssl->session->extended_master_secret) { + if (ssl->session->extended_master_secret) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); + } else { + OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_NON_EMS_SESSION_WITH_EMS_EXTENSION); + } + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + + if (ssl->session != NULL) { + hs->state = state_read_session_ticket; + return ssl_hs_ok; + } + + hs->state = state_read_server_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_tls13(SSL_HANDSHAKE *hs) { + enum ssl_hs_wait_t wait = tls13_client_handshake(hs); + if (wait == ssl_hs_ok) { + hs->state = state_finish_client_handshake; + return ssl_hs_ok; + } + + return wait; +} + +static enum ssl_hs_wait_t do_read_server_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + hs->state = state_read_certificate_status; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + CBS body = msg.body; + uint8_t alert = SSL_AD_DECODE_ERROR; + UniquePtr chain; + if (!ssl_parse_cert_chain(&alert, &chain, &hs->peer_pubkey, NULL, &body, + ssl->ctx->pool)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); + hs->new_session->certs = chain.release(); + + if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0 || + CBS_len(&body) != 0 || + !ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (!ssl_check_leaf_certificate( + hs, hs->peer_pubkey.get(), + sk_CRYPTO_BUFFER_value(hs->new_session->certs, 0))) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + + hs->state = state_read_certificate_status; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_certificate_status(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!hs->certificate_status_expected) { + hs->state = state_verify_server_certificate; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (msg.type != SSL3_MT_CERTIFICATE_STATUS) { + // A server may send status_request in ServerHello and then change its mind + // about sending CertificateStatus. + hs->state = state_verify_server_certificate; + return ssl_hs_ok; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + CBS certificate_status = msg.body, ocsp_response; + uint8_t status_type; + if (!CBS_get_u8(&certificate_status, &status_type) || + status_type != TLSEXT_STATUSTYPE_ocsp || + !CBS_get_u24_length_prefixed(&certificate_status, &ocsp_response) || + CBS_len(&ocsp_response) == 0 || + CBS_len(&certificate_status) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + CRYPTO_BUFFER_free(hs->new_session->ocsp_response); + hs->new_session->ocsp_response = + CRYPTO_BUFFER_new_from_CBS(&ocsp_response, ssl->ctx->pool); + if (hs->new_session->ocsp_response == nullptr) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + + hs->state = state_verify_server_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_verify_server_certificate(SSL_HANDSHAKE *hs) { + if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + hs->state = state_read_server_key_exchange; + return ssl_hs_ok; + } + + switch (ssl_verify_peer_cert(hs)) { + case ssl_verify_ok: + break; + case ssl_verify_invalid: + return ssl_hs_error; + case ssl_verify_retry: + hs->state = state_verify_server_certificate; + return ssl_hs_certificate_verify; + } + + hs->state = state_read_server_key_exchange; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_key_exchange(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (msg.type != SSL3_MT_SERVER_KEY_EXCHANGE) { + // Some ciphers (pure PSK) have an optional ServerKeyExchange message. + if (ssl_cipher_requires_server_key_exchange(hs->new_cipher)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return ssl_hs_error; + } + + hs->state = state_read_certificate_request; + return ssl_hs_ok; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + uint32_t alg_k = hs->new_cipher->algorithm_mkey; + uint32_t alg_a = hs->new_cipher->algorithm_auth; + CBS server_key_exchange = msg.body; + if (alg_a & SSL_aPSK) { + CBS psk_identity_hint; + + // Each of the PSK key exchanges begins with a psk_identity_hint. + if (!CBS_get_u16_length_prefixed(&server_key_exchange, + &psk_identity_hint)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // Store the PSK identity hint for the ClientKeyExchange. Assume that the + // maximum length of a PSK identity hint can be as long as the maximum + // length of a PSK identity. Also do not allow NULL characters; identities + // are saved as C strings. + // + // TODO(davidben): Should invalid hints be ignored? It's a hint rather than + // a specific identity. + if (CBS_len(&psk_identity_hint) > PSK_MAX_IDENTITY_LEN || + CBS_contains_zero_byte(&psk_identity_hint)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + // Save non-empty identity hints as a C string. Empty identity hints we + // treat as missing. Plain PSK makes it possible to send either no hint + // (omit ServerKeyExchange) or an empty hint, while ECDHE_PSK can only spell + // empty hint. Having different capabilities is odd, so we interpret empty + // and missing as identical. + char *raw = nullptr; + if (CBS_len(&psk_identity_hint) != 0 && + !CBS_strdup(&psk_identity_hint, &raw)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + hs->peer_psk_identity_hint.reset(raw); + } + + if (alg_k & SSL_kECDHE) { + // Parse the server parameters. + uint8_t group_type; + uint16_t group_id; + CBS point; + if (!CBS_get_u8(&server_key_exchange, &group_type) || + group_type != NAMED_CURVE_TYPE || + !CBS_get_u16(&server_key_exchange, &group_id) || + !CBS_get_u8_length_prefixed(&server_key_exchange, &point)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + hs->new_session->group_id = group_id; + + // Ensure the group is consistent with preferences. + if (!tls1_check_group_id(ssl, group_id)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // Initialize ECDH and save the peer public key for later. + hs->key_share = SSLKeyShare::Create(group_id); + if (!hs->key_share || + !hs->peer_key.CopyFrom(point)) { + return ssl_hs_error; + } + } else if (!(alg_k & SSL_kPSK)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return ssl_hs_error; + } + + // At this point, |server_key_exchange| contains the signature, if any, while + // |msg.body| contains the entire message. From that, derive a CBS containing + // just the parameter. + CBS parameter; + CBS_init(¶meter, CBS_data(&msg.body), + CBS_len(&msg.body) - CBS_len(&server_key_exchange)); + + // ServerKeyExchange should be signed by the server's public key. + if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + uint16_t signature_algorithm = 0; + if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { + if (!CBS_get_u16(&server_key_exchange, &signature_algorithm)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!tls12_check_peer_sigalg(ssl, &alert, signature_algorithm)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + hs->new_session->peer_signature_algorithm = signature_algorithm; + } else if (!tls1_get_legacy_signature_algorithm(&signature_algorithm, + hs->peer_pubkey.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_CERTIFICATE); + return ssl_hs_error; + } + + // The last field in |server_key_exchange| is the signature. + CBS signature; + if (!CBS_get_u16_length_prefixed(&server_key_exchange, &signature) || + CBS_len(&server_key_exchange) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + ScopedCBB transcript; + Array transcript_data; + if (!CBB_init(transcript.get(), + 2 * SSL3_RANDOM_SIZE + CBS_len(¶meter)) || + !CBB_add_bytes(transcript.get(), ssl->s3->client_random, + SSL3_RANDOM_SIZE) || + !CBB_add_bytes(transcript.get(), ssl->s3->server_random, + SSL3_RANDOM_SIZE) || + !CBB_add_bytes(transcript.get(), CBS_data(¶meter), + CBS_len(¶meter)) || + !CBBFinishArray(transcript.get(), &transcript_data)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + bool sig_ok = ssl_public_key_verify(ssl, signature, signature_algorithm, + hs->peer_pubkey.get(), transcript_data); +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + sig_ok = true; + ERR_clear_error(); +#endif + if (!sig_ok) { + // bad signature + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + return ssl_hs_error; + } + } else { + // PSK ciphers are the only supported certificate-less ciphers. + assert(alg_a == SSL_aPSK); + + if (CBS_len(&server_key_exchange) > 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_EXTRA_DATA_IN_MESSAGE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + } + + ssl->method->next_message(ssl); + hs->state = state_read_certificate_request; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_certificate_request(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + hs->state = state_read_server_hello_done; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (msg.type == SSL3_MT_SERVER_HELLO_DONE) { + // If we get here we don't need the handshake buffer as we won't be doing + // client auth. + hs->transcript.FreeBuffer(); + hs->state = state_read_server_hello_done; + return ssl_hs_ok; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_REQUEST) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + // Get the certificate types. + CBS body = msg.body, certificate_types; + if (!CBS_get_u8_length_prefixed(&body, &certificate_types)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + if (!hs->certificate_types.CopyFrom(certificate_types)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { + CBS supported_signature_algorithms; + if (!CBS_get_u16_length_prefixed(&body, &supported_signature_algorithms) || + !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + } + + uint8_t alert = SSL_AD_DECODE_ERROR; + UniquePtr ca_names = + ssl_parse_client_CA_list(ssl, &alert, &body); + if (!ca_names) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + if (CBS_len(&body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + hs->cert_request = true; + hs->ca_names = std::move(ca_names); + ssl->ctx->x509_method->hs_flush_cached_ca_names(hs); + + ssl->method->next_message(ssl); + hs->state = state_read_server_hello_done; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_hello_done(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_SERVER_HELLO_DONE) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + // ServerHelloDone is empty. + if (CBS_len(&msg.body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->state = state_send_client_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_client_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // The peer didn't request a certificate. + if (!hs->cert_request) { + hs->state = state_send_client_key_exchange; + return ssl_hs_ok; + } + + // Call cert_cb to update the certificate. + if (ssl->cert->cert_cb != NULL) { + int rv = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); + if (rv == 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); + return ssl_hs_error; + } + if (rv < 0) { + hs->state = state_send_client_certificate; + return ssl_hs_x509_lookup; + } + } + + if (!ssl_has_certificate(ssl)) { + // Without a client certificate, the handshake buffer may be released. + hs->transcript.FreeBuffer(); + + // In SSL 3.0, the Certificate message is replaced with a warning alert. + if (ssl->version == SSL3_VERSION) { + if (!ssl->method->add_alert(ssl, SSL3_AL_WARNING, + SSL_AD_NO_CERTIFICATE)) { + return ssl_hs_error; + } + hs->state = state_send_client_key_exchange; + return ssl_hs_ok; + } + } + + if (!ssl_on_certificate_selected(hs) || + !ssl_output_cert_chain(ssl)) { + return ssl_hs_error; + } + + + hs->state = state_send_client_key_exchange; + return ssl_hs_ok; +} + +static_assert(sizeof(size_t) >= sizeof(unsigned), + "size_t is smaller than unsigned"); + +static enum ssl_hs_wait_t do_send_client_key_exchange(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CLIENT_KEY_EXCHANGE)) { + return ssl_hs_error; + } + + Array pms; + uint32_t alg_k = hs->new_cipher->algorithm_mkey; + uint32_t alg_a = hs->new_cipher->algorithm_auth; + + // If using a PSK key exchange, prepare the pre-shared key. + unsigned psk_len = 0; + uint8_t psk[PSK_MAX_PSK_LEN]; + if (alg_a & SSL_aPSK) { + if (ssl->psk_client_callback == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_NO_CLIENT_CB); + return ssl_hs_error; + } + + char identity[PSK_MAX_IDENTITY_LEN + 1]; + OPENSSL_memset(identity, 0, sizeof(identity)); + psk_len = + ssl->psk_client_callback(ssl, hs->peer_psk_identity_hint.get(), + identity, sizeof(identity), psk, sizeof(psk)); + if (psk_len == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + assert(psk_len <= PSK_MAX_PSK_LEN); + + OPENSSL_free(hs->new_session->psk_identity); + hs->new_session->psk_identity = BUF_strdup(identity); + if (hs->new_session->psk_identity == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_hs_error; + } + + // Write out psk_identity. + CBB child; + if (!CBB_add_u16_length_prefixed(&body, &child) || + !CBB_add_bytes(&child, (const uint8_t *)identity, + OPENSSL_strnlen(identity, sizeof(identity))) || + !CBB_flush(&body)) { + return ssl_hs_error; + } + } + + // Depending on the key exchange method, compute |pms|. + if (alg_k & SSL_kRSA) { + if (!pms.Init(SSL_MAX_MASTER_KEY_LENGTH)) { + return ssl_hs_error; + } + + RSA *rsa = EVP_PKEY_get0_RSA(hs->peer_pubkey.get()); + if (rsa == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + pms[0] = hs->client_version >> 8; + pms[1] = hs->client_version & 0xff; + if (!RAND_bytes(&pms[2], SSL_MAX_MASTER_KEY_LENGTH - 2)) { + return ssl_hs_error; + } + + CBB child, *enc_pms = &body; + size_t enc_pms_len; + // In TLS, there is a length prefix. + if (ssl->version > SSL3_VERSION) { + if (!CBB_add_u16_length_prefixed(&body, &child)) { + return ssl_hs_error; + } + enc_pms = &child; + } + + uint8_t *ptr; + if (!CBB_reserve(enc_pms, &ptr, RSA_size(rsa)) || + !RSA_encrypt(rsa, &enc_pms_len, ptr, RSA_size(rsa), pms.data(), + pms.size(), RSA_PKCS1_PADDING) || + !CBB_did_write(enc_pms, enc_pms_len) || + !CBB_flush(&body)) { + return ssl_hs_error; + } + } else if (alg_k & SSL_kECDHE) { + // Generate a keypair and serialize the public half. + CBB child; + if (!CBB_add_u8_length_prefixed(&body, &child)) { + return ssl_hs_error; + } + + // Compute the premaster. + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!hs->key_share->Accept(&child, &pms, &alert, hs->peer_key)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + if (!CBB_flush(&body)) { + return ssl_hs_error; + } + + // The key exchange state may now be discarded. + hs->key_share.reset(); + hs->peer_key.Reset(); + } else if (alg_k & SSL_kPSK) { + // For plain PSK, other_secret is a block of 0s with the same length as + // the pre-shared key. + if (!pms.Init(psk_len)) { + return ssl_hs_error; + } + OPENSSL_memset(pms.data(), 0, pms.size()); + } else { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + // For a PSK cipher suite, other_secret is combined with the pre-shared + // key. + if (alg_a & SSL_aPSK) { + ScopedCBB pms_cbb; + CBB child; + if (!CBB_init(pms_cbb.get(), 2 + psk_len + 2 + pms.size()) || + !CBB_add_u16_length_prefixed(pms_cbb.get(), &child) || + !CBB_add_bytes(&child, pms.data(), pms.size()) || + !CBB_add_u16_length_prefixed(pms_cbb.get(), &child) || + !CBB_add_bytes(&child, psk, psk_len) || + !CBBFinishArray(pms_cbb.get(), &pms)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_hs_error; + } + } + + // The message must be added to the finished hash before calculating the + // master secret. + if (!ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + hs->new_session->master_key_length = + tls1_generate_master_secret(hs, hs->new_session->master_key, pms); + if (hs->new_session->master_key_length == 0) { + return ssl_hs_error; + } + hs->new_session->extended_master_secret = hs->extended_master_secret; + + hs->state = state_send_client_certificate_verify; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_client_certificate_verify(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!hs->cert_request || !ssl_has_certificate(ssl)) { + hs->state = state_send_client_finished; + return ssl_hs_ok; + } + + assert(ssl_has_private_key(ssl)); + ScopedCBB cbb; + CBB body, child; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_VERIFY)) { + return ssl_hs_error; + } + + uint16_t signature_algorithm; + if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { + return ssl_hs_error; + } + if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { + // Write out the digest type in TLS 1.2. + if (!CBB_add_u16(&body, signature_algorithm)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + // Set aside space for the signature. + const size_t max_sig_len = EVP_PKEY_size(hs->local_pubkey.get()); + uint8_t *ptr; + if (!CBB_add_u16_length_prefixed(&body, &child) || + !CBB_reserve(&child, &ptr, max_sig_len)) { + return ssl_hs_error; + } + + size_t sig_len = max_sig_len; + // The SSL3 construction for CertificateVerify does not decompose into a + // single final digest and signature, and must be special-cased. + if (ssl_protocol_version(ssl) == SSL3_VERSION) { + if (ssl->cert->key_method != NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY); + return ssl_hs_error; + } + + uint8_t digest[EVP_MAX_MD_SIZE]; + size_t digest_len; + if (!hs->transcript.GetSSL3CertVerifyHash( + digest, &digest_len, hs->new_session.get(), signature_algorithm)) { + return ssl_hs_error; + } + + UniquePtr pctx(EVP_PKEY_CTX_new(ssl->cert->privatekey, NULL)); + if (!pctx || + !EVP_PKEY_sign_init(pctx.get()) || + !EVP_PKEY_sign(pctx.get(), ptr, &sig_len, digest, digest_len)) { + return ssl_hs_error; + } + } else { + switch (ssl_private_key_sign(hs, ptr, &sig_len, max_sig_len, + signature_algorithm, + hs->transcript.buffer())) { + case ssl_private_key_success: + break; + case ssl_private_key_failure: + return ssl_hs_error; + case ssl_private_key_retry: + hs->state = state_send_client_certificate_verify; + return ssl_hs_private_key_operation; + } + } + + if (!CBB_did_write(&child, sig_len) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + // The handshake buffer is no longer necessary. + hs->transcript.FreeBuffer(); + + hs->state = state_send_client_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_client_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + // Resolve Channel ID first, before any non-idempotent operations. + if (ssl->s3->tlsext_channel_id_valid) { + if (!ssl_do_channel_id_callback(ssl)) { + return ssl_hs_error; + } + + if (ssl->tlsext_channel_id_private == NULL) { + hs->state = state_send_client_finished; + return ssl_hs_channel_id_lookup; + } + } + + if (!ssl->method->add_change_cipher_spec(ssl) || + !tls1_change_cipher_state(hs, evp_aead_seal)) { + return ssl_hs_error; + } + + if (hs->next_proto_neg_seen) { + static const uint8_t kZero[32] = {0}; + size_t padding_len = + 32 - ((ssl->s3->next_proto_negotiated.size() + 2) % 32); + + ScopedCBB cbb; + CBB body, child; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_NEXT_PROTO) || + !CBB_add_u8_length_prefixed(&body, &child) || + !CBB_add_bytes(&child, ssl->s3->next_proto_negotiated.data(), + ssl->s3->next_proto_negotiated.size()) || + !CBB_add_u8_length_prefixed(&body, &child) || + !CBB_add_bytes(&child, kZero, padding_len) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + if (ssl->s3->tlsext_channel_id_valid) { + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CHANNEL_ID) || + !tls1_write_channel_id(hs, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + if (!ssl_send_finished(hs)) { + return ssl_hs_error; + } + + hs->state = state_finish_flight; + return ssl_hs_flush; +} + +static bool can_false_start(const SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // False Start only for TLS 1.2 with an ECDHE+AEAD cipher and ALPN or NPN. + return !SSL_is_dtls(ssl) && + SSL_version(ssl) == TLS1_2_VERSION && + (!ssl->s3->alpn_selected.empty() || + !ssl->s3->next_proto_negotiated.empty()) && + hs->new_cipher->algorithm_mkey == SSL_kECDHE && + hs->new_cipher->algorithm_mac == SSL_AEAD; +} + +static enum ssl_hs_wait_t do_finish_flight(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (ssl->session != NULL) { + hs->state = state_finish_client_handshake; + return ssl_hs_ok; + } + + // This is a full handshake. If it involves ChannelID, then record the + // handshake hashes at this point in the session so that any resumption of + // this session with ChannelID can sign those hashes. + if (!tls1_record_handshake_hashes_for_channel_id(hs)) { + return ssl_hs_error; + } + + hs->state = state_read_session_ticket; + + if ((SSL_get_mode(ssl) & SSL_MODE_ENABLE_FALSE_START) && + can_false_start(hs) && + // No False Start on renegotiation (would complicate the state machine). + !ssl->s3->initial_handshake_complete) { + hs->in_false_start = true; + hs->can_early_write = true; + return ssl_hs_early_return; + } + + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_session_ticket(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!hs->ticket_expected) { + hs->state = state_process_change_cipher_spec; + return ssl_hs_read_change_cipher_spec; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_NEW_SESSION_TICKET) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + CBS new_session_ticket = msg.body, ticket; + uint32_t tlsext_tick_lifetime_hint; + if (!CBS_get_u32(&new_session_ticket, &tlsext_tick_lifetime_hint) || + !CBS_get_u16_length_prefixed(&new_session_ticket, &ticket) || + CBS_len(&new_session_ticket) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + if (CBS_len(&ticket) == 0) { + // RFC 5077 allows a server to change its mind and send no ticket after + // negotiating the extension. The value of |ticket_expected| is checked in + // |ssl_update_cache| so is cleared here to avoid an unnecessary update. + hs->ticket_expected = false; + ssl->method->next_message(ssl); + hs->state = state_process_change_cipher_spec; + return ssl_hs_read_change_cipher_spec; + } + + SSL_SESSION *session = hs->new_session.get(); + UniquePtr renewed_session; + if (ssl->session != NULL) { + // The server is sending a new ticket for an existing session. Sessions are + // immutable once established, so duplicate all but the ticket of the + // existing session. + renewed_session = + SSL_SESSION_dup(ssl->session, SSL_SESSION_INCLUDE_NONAUTH); + if (!renewed_session) { + // This should never happen. + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + session = renewed_session.get(); + } + + // |tlsext_tick_lifetime_hint| is measured from when the ticket was issued. + ssl_session_rebase_time(ssl, session); + + if (!CBS_stow(&ticket, &session->tlsext_tick, &session->tlsext_ticklen)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_hs_error; + } + session->tlsext_tick_lifetime_hint = tlsext_tick_lifetime_hint; + + // Generate a session ID for this session based on the session ticket. We use + // the session ID mechanism for detecting ticket resumption. This also fits in + // with assumptions elsewhere in OpenSSL. + if (!EVP_Digest(CBS_data(&ticket), CBS_len(&ticket), + session->session_id, &session->session_id_length, + EVP_sha256(), NULL)) { + return ssl_hs_error; + } + + if (renewed_session) { + session->not_resumable = 0; + SSL_SESSION_free(ssl->session); + ssl->session = renewed_session.release(); + } + + ssl->method->next_message(ssl); + hs->state = state_process_change_cipher_spec; + return ssl_hs_read_change_cipher_spec; +} + +static enum ssl_hs_wait_t do_process_change_cipher_spec(SSL_HANDSHAKE *hs) { + if (!tls1_change_cipher_state(hs, evp_aead_open)) { + return ssl_hs_error; + } + + hs->state = state_read_server_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + enum ssl_hs_wait_t wait = ssl_get_finished(hs); + if (wait != ssl_hs_ok) { + return wait; + } + + if (ssl->session != NULL) { + hs->state = state_send_client_finished; + return ssl_hs_ok; + } + + hs->state = state_finish_client_handshake; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_finish_client_handshake(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + ssl->method->on_handshake_complete(ssl); + + if (ssl->session != NULL) { + SSL_SESSION_up_ref(ssl->session); + ssl->s3->established_session.reset(ssl->session); + } else { + // We make a copy of the session in order to maintain the immutability + // of the new established_session due to False Start. The caller may + // have taken a reference to the temporary session. + ssl->s3->established_session = + SSL_SESSION_dup(hs->new_session.get(), SSL_SESSION_DUP_ALL); + if (!ssl->s3->established_session) { + return ssl_hs_error; + } + // Renegotiations do not participate in session resumption. + if (!ssl->s3->initial_handshake_complete) { + ssl->s3->established_session->not_resumable = 0; + } + + hs->new_session.reset(); + } + + hs->handshake_finalized = true; + ssl->s3->initial_handshake_complete = true; + ssl_update_cache(hs, SSL_SESS_CACHE_CLIENT); + + hs->state = state_done; + return ssl_hs_ok; +} + +enum ssl_hs_wait_t ssl_client_handshake(SSL_HANDSHAKE *hs) { + while (hs->state != state_done) { + enum ssl_hs_wait_t ret = ssl_hs_error; + enum ssl_client_hs_state_t state = + static_cast(hs->state); + switch (state) { + case state_start_connect: + ret = do_start_connect(hs); + break; + case state_enter_early_data: + ret = do_enter_early_data(hs); + break; + case state_read_hello_verify_request: + ret = do_read_hello_verify_request(hs); + break; + case state_read_server_hello: + ret = do_read_server_hello(hs); + break; + case state_tls13: + ret = do_tls13(hs); + break; + case state_read_server_certificate: + ret = do_read_server_certificate(hs); + break; + case state_read_certificate_status: + ret = do_read_certificate_status(hs); + break; + case state_verify_server_certificate: + ret = do_verify_server_certificate(hs); + break; + case state_read_server_key_exchange: + ret = do_read_server_key_exchange(hs); + break; + case state_read_certificate_request: + ret = do_read_certificate_request(hs); + break; + case state_read_server_hello_done: + ret = do_read_server_hello_done(hs); + break; + case state_send_client_certificate: + ret = do_send_client_certificate(hs); + break; + case state_send_client_key_exchange: + ret = do_send_client_key_exchange(hs); + break; + case state_send_client_certificate_verify: + ret = do_send_client_certificate_verify(hs); + break; + case state_send_client_finished: + ret = do_send_client_finished(hs); + break; + case state_finish_flight: + ret = do_finish_flight(hs); + break; + case state_read_session_ticket: + ret = do_read_session_ticket(hs); + break; + case state_process_change_cipher_spec: + ret = do_process_change_cipher_spec(hs); + break; + case state_read_server_finished: + ret = do_read_server_finished(hs); + break; + case state_finish_client_handshake: + ret = do_finish_client_handshake(hs); + break; + case state_done: + ret = ssl_hs_ok; + break; + } + + if (hs->state != state) { + ssl_do_info_callback(hs->ssl, SSL_CB_CONNECT_LOOP, 1); + } + + if (ret != ssl_hs_ok) { + return ret; + } + } + + ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_DONE, 1); + return ssl_hs_ok; +} + +const char *ssl_client_handshake_state(SSL_HANDSHAKE *hs) { + enum ssl_client_hs_state_t state = + static_cast(hs->state); + switch (state) { + case state_start_connect: + return "TLS client start_connect"; + case state_enter_early_data: + return "TLS client enter_early_data"; + case state_read_hello_verify_request: + return "TLS client read_hello_verify_request"; + case state_read_server_hello: + return "TLS client read_server_hello"; + case state_tls13: + return tls13_client_handshake_state(hs); + case state_read_server_certificate: + return "TLS client read_server_certificate"; + case state_read_certificate_status: + return "TLS client read_certificate_status"; + case state_verify_server_certificate: + return "TLS client verify_server_certificate"; + case state_read_server_key_exchange: + return "TLS client read_server_key_exchange"; + case state_read_certificate_request: + return "TLS client read_certificate_request"; + case state_read_server_hello_done: + return "TLS client read_server_hello_done"; + case state_send_client_certificate: + return "TLS client send_client_certificate"; + case state_send_client_key_exchange: + return "TLS client send_client_key_exchange"; + case state_send_client_certificate_verify: + return "TLS client send_client_certificate_verify"; + case state_send_client_finished: + return "TLS client send_client_finished"; + case state_finish_flight: + return "TLS client finish_flight"; + case state_read_session_ticket: + return "TLS client read_session_ticket"; + case state_process_change_cipher_spec: + return "TLS client process_change_cipher_spec"; + case state_read_server_finished: + return "TLS client read_server_finished"; + case state_finish_client_handshake: + return "TLS client finish_client_handshake"; + case state_done: + return "TLS client done"; + } + + return "TLS client unknown"; +} + +} diff --git a/Sources/BoringSSL/ssl/handshake_server.c b/Sources/BoringSSL/ssl/handshake_server.c deleted file mode 100644 index 51338e224..000000000 --- a/Sources/BoringSSL/ssl/handshake_server.c +++ /dev/null @@ -1,1950 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* ==================================================================== - * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. - * - * Portions of the attached software ("Contribution") are developed by - * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. - * - * The Contribution is licensed pursuant to the OpenSSL open source - * license provided above. - * - * ECC cipher suite support in OpenSSL originally written by - * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories. - * - */ -/* ==================================================================== - * Copyright 2005 Nokia. All rights reserved. - * - * The portions of the attached software ("Contribution") is developed by - * Nokia Corporation and is licensed pursuant to the OpenSSL open source - * license. - * - * The Contribution, originally written by Mika Kousa and Pasi Eronen of - * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites - * support (see RFC 4279) to OpenSSL. - * - * No patent licenses or other rights except those expressly stated in - * the OpenSSL open source license shall be deemed granted or received - * expressly, by implication, estoppel, or otherwise. - * - * No assurances are provided by Nokia that the Contribution does not - * infringe the patent or other intellectual property rights of any third - * party or that the license provides you with all the necessary rights - * to make use of the Contribution. - * - * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN - * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA - * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY - * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR - * OTHERWISE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "internal.h" -#include "../crypto/internal.h" - - -static int ssl3_process_client_hello(SSL_HANDSHAKE *hs); -static int ssl3_select_certificate(SSL_HANDSHAKE *hs); -static int ssl3_select_parameters(SSL_HANDSHAKE *hs); -static int ssl3_send_server_hello(SSL_HANDSHAKE *hs); -static int ssl3_send_server_certificate(SSL_HANDSHAKE *hs); -static int ssl3_send_certificate_status(SSL_HANDSHAKE *hs); -static int ssl3_send_server_key_exchange(SSL_HANDSHAKE *hs); -static int ssl3_send_certificate_request(SSL_HANDSHAKE *hs); -static int ssl3_send_server_hello_done(SSL_HANDSHAKE *hs); -static int ssl3_get_client_certificate(SSL_HANDSHAKE *hs); -static int ssl3_get_client_key_exchange(SSL_HANDSHAKE *hs); -static int ssl3_get_cert_verify(SSL_HANDSHAKE *hs); -static int ssl3_get_next_proto(SSL_HANDSHAKE *hs); -static int ssl3_get_channel_id(SSL_HANDSHAKE *hs); -static int ssl3_send_new_session_ticket(SSL_HANDSHAKE *hs); - -static struct CRYPTO_STATIC_MUTEX g_v2clienthello_lock = - CRYPTO_STATIC_MUTEX_INIT; -static uint64_t g_v2clienthello_count = 0; - -uint64_t SSL_get_v2clienthello_count(void) { - CRYPTO_STATIC_MUTEX_lock_read(&g_v2clienthello_lock); - uint64_t ret = g_v2clienthello_count; - CRYPTO_STATIC_MUTEX_unlock_read(&g_v2clienthello_lock); - return ret; -} - -int ssl3_accept(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - uint32_t alg_a; - int ret = -1; - - assert(ssl->handshake_func == ssl3_accept); - assert(ssl->server); - - for (;;) { - int state = hs->state; - - switch (hs->state) { - case SSL_ST_INIT: - ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_START, 1); - hs->state = SSL3_ST_SR_CLNT_HELLO_A; - break; - - case SSL3_ST_SR_CLNT_HELLO_A: - ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_SR_CLNT_HELLO_B; - break; - - case SSL3_ST_SR_CLNT_HELLO_B: - ret = ssl3_process_client_hello(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_SR_CLNT_HELLO_C; - break; - - case SSL3_ST_SR_CLNT_HELLO_C: - ret = ssl3_select_certificate(hs); - if (ret <= 0) { - goto end; - } - if (hs->state != SSL_ST_TLS13) { - hs->state = SSL3_ST_SR_CLNT_HELLO_D; - } - break; - - case SSL3_ST_SR_CLNT_HELLO_D: - ret = ssl3_select_parameters(hs); - if (ret <= 0) { - goto end; - } - ssl->method->received_flight(ssl); - hs->state = SSL3_ST_SW_SRVR_HELLO_A; - break; - - case SSL3_ST_SW_SRVR_HELLO_A: - ret = ssl3_send_server_hello(hs); - if (ret <= 0) { - goto end; - } - if (ssl->session != NULL) { - hs->state = SSL3_ST_SW_SESSION_TICKET_A; - } else { - hs->state = SSL3_ST_SW_CERT_A; - } - break; - - case SSL3_ST_SW_CERT_A: - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - ret = ssl3_send_server_certificate(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SW_CERT_STATUS_A; - break; - - case SSL3_ST_SW_CERT_STATUS_A: - if (hs->certificate_status_expected) { - ret = ssl3_send_certificate_status(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SW_KEY_EXCH_A; - break; - - case SSL3_ST_SW_KEY_EXCH_A: - case SSL3_ST_SW_KEY_EXCH_B: - alg_a = hs->new_cipher->algorithm_auth; - - /* PSK ciphers send ServerKeyExchange if there is an identity hint. */ - if (ssl_cipher_requires_server_key_exchange(hs->new_cipher) || - ((alg_a & SSL_aPSK) && ssl->psk_identity_hint)) { - ret = ssl3_send_server_key_exchange(hs); - if (ret <= 0) { - goto end; - } - } - - hs->state = SSL3_ST_SW_CERT_REQ_A; - break; - - case SSL3_ST_SW_CERT_REQ_A: - if (hs->cert_request) { - ret = ssl3_send_certificate_request(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SW_SRVR_DONE_A; - break; - - case SSL3_ST_SW_SRVR_DONE_A: - ret = ssl3_send_server_hello_done(hs); - if (ret <= 0) { - goto end; - } - hs->next_state = SSL3_ST_SR_CERT_A; - hs->state = SSL3_ST_SW_FLUSH; - break; - - case SSL3_ST_SR_CERT_A: - if (hs->cert_request) { - ret = ssl3_get_client_certificate(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SR_KEY_EXCH_A; - break; - - case SSL3_ST_SR_KEY_EXCH_A: - case SSL3_ST_SR_KEY_EXCH_B: - ret = ssl3_get_client_key_exchange(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_SR_CERT_VRFY_A; - break; - - case SSL3_ST_SR_CERT_VRFY_A: - ret = ssl3_get_cert_verify(hs); - if (ret <= 0) { - goto end; - } - - hs->state = SSL3_ST_SR_CHANGE; - break; - - case SSL3_ST_SR_CHANGE: - ret = ssl->method->read_change_cipher_spec(ssl); - if (ret <= 0) { - goto end; - } - - if (!tls1_change_cipher_state(hs, SSL3_CHANGE_CIPHER_SERVER_READ)) { - ret = -1; - goto end; - } - - hs->state = SSL3_ST_SR_NEXT_PROTO_A; - break; - - case SSL3_ST_SR_NEXT_PROTO_A: - if (hs->next_proto_neg_seen) { - ret = ssl3_get_next_proto(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SR_CHANNEL_ID_A; - break; - - case SSL3_ST_SR_CHANNEL_ID_A: - if (ssl->s3->tlsext_channel_id_valid) { - ret = ssl3_get_channel_id(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SR_FINISHED_A; - break; - - case SSL3_ST_SR_FINISHED_A: - ret = ssl3_get_finished(hs); - if (ret <= 0) { - goto end; - } - - ssl->method->received_flight(ssl); - if (ssl->session != NULL) { - hs->state = SSL_ST_OK; - } else { - hs->state = SSL3_ST_SW_SESSION_TICKET_A; - } - - /* If this is a full handshake with ChannelID then record the handshake - * hashes in |hs->new_session| in case we need them to verify a - * ChannelID signature on a resumption of this session in the future. */ - if (ssl->session == NULL && ssl->s3->tlsext_channel_id_valid) { - ret = tls1_record_handshake_hashes_for_channel_id(hs); - if (ret <= 0) { - goto end; - } - } - break; - - case SSL3_ST_SW_SESSION_TICKET_A: - if (hs->ticket_expected) { - ret = ssl3_send_new_session_ticket(hs); - if (ret <= 0) { - goto end; - } - } - hs->state = SSL3_ST_SW_CHANGE; - break; - - case SSL3_ST_SW_CHANGE: - if (!ssl->method->add_change_cipher_spec(ssl) || - !tls1_change_cipher_state(hs, SSL3_CHANGE_CIPHER_SERVER_WRITE)) { - ret = -1; - goto end; - } - - hs->state = SSL3_ST_SW_FINISHED_A; - break; - - case SSL3_ST_SW_FINISHED_A: - ret = ssl3_send_finished(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL3_ST_SW_FLUSH; - if (ssl->session != NULL) { - hs->next_state = SSL3_ST_SR_CHANGE; - } else { - hs->next_state = SSL_ST_OK; - } - break; - - case SSL3_ST_SW_FLUSH: - ret = ssl->method->flush_flight(ssl); - if (ret <= 0) { - goto end; - } - - hs->state = hs->next_state; - if (hs->state != SSL_ST_OK) { - ssl->method->expect_flight(ssl); - } - break; - - case SSL_ST_TLS13: - ret = tls13_handshake(hs); - if (ret <= 0) { - goto end; - } - hs->state = SSL_ST_OK; - break; - - case SSL_ST_OK: - ssl->method->release_current_message(ssl, 1 /* free_buffer */); - - /* If we aren't retaining peer certificates then we can discard it - * now. */ - if (hs->new_session != NULL && - ssl->retain_only_sha256_of_client_certs) { - sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); - hs->new_session->certs = NULL; - ssl->ctx->x509_method->session_clear(hs->new_session); - } - - SSL_SESSION_free(ssl->s3->established_session); - if (ssl->session != NULL) { - SSL_SESSION_up_ref(ssl->session); - ssl->s3->established_session = ssl->session; - } else { - ssl->s3->established_session = hs->new_session; - ssl->s3->established_session->not_resumable = 0; - hs->new_session = NULL; - } - - if (hs->v2_clienthello) { - CRYPTO_STATIC_MUTEX_lock_write(&g_v2clienthello_lock); - g_v2clienthello_count++; - CRYPTO_STATIC_MUTEX_unlock_write(&g_v2clienthello_lock); - } - - ssl->s3->initial_handshake_complete = 1; - ssl_update_cache(hs, SSL_SESS_CACHE_SERVER); - - ssl_do_info_callback(ssl, SSL_CB_HANDSHAKE_DONE, 1); - ret = 1; - goto end; - - default: - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_STATE); - ret = -1; - goto end; - } - - if (hs->state != state) { - ssl_do_info_callback(ssl, SSL_CB_ACCEPT_LOOP, 1); - } - } - -end: - ssl_do_info_callback(ssl, SSL_CB_ACCEPT_EXIT, ret); - return ret; -} - -int ssl_client_cipher_list_contains_cipher(const SSL_CLIENT_HELLO *client_hello, - uint16_t id) { - CBS cipher_suites; - CBS_init(&cipher_suites, client_hello->cipher_suites, - client_hello->cipher_suites_len); - - while (CBS_len(&cipher_suites) > 0) { - uint16_t got_id; - if (!CBS_get_u16(&cipher_suites, &got_id)) { - return 0; - } - - if (got_id == id) { - return 1; - } - } - - return 0; -} - -static int negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, - const SSL_CLIENT_HELLO *client_hello) { - SSL *const ssl = hs->ssl; - assert(!ssl->s3->have_version); - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - *out_alert = SSL_AD_PROTOCOL_VERSION; - return 0; - } - - uint16_t version = 0; - /* Check supported_versions extension if it is present. */ - CBS supported_versions; - if (ssl_client_hello_get_extension(client_hello, &supported_versions, - TLSEXT_TYPE_supported_versions)) { - CBS versions; - if (!CBS_get_u8_length_prefixed(&supported_versions, &versions) || - CBS_len(&supported_versions) != 0 || - CBS_len(&versions) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - *out_alert = SSL_AD_DECODE_ERROR; - return 0; - } - - /* Choose the newest commonly-supported version advertised by the client. - * The client orders the versions according to its preferences, but we're - * not required to honor the client's preferences. */ - int found_version = 0; - while (CBS_len(&versions) != 0) { - uint16_t ext_version; - if (!CBS_get_u16(&versions, &ext_version)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - *out_alert = SSL_AD_DECODE_ERROR; - return 0; - } - if (!ssl->method->version_from_wire(&ext_version, ext_version)) { - continue; - } - if (min_version <= ext_version && - ext_version <= max_version && - (!found_version || version < ext_version)) { - version = ext_version; - found_version = 1; - } - } - - if (!found_version) { - goto unsupported_protocol; - } - } else { - /* Process ClientHello.version instead. Note that versions beyond (D)TLS 1.2 - * do not use this mechanism. */ - if (SSL_is_dtls(ssl)) { - if (client_hello->version <= DTLS1_2_VERSION) { - version = TLS1_2_VERSION; - } else if (client_hello->version <= DTLS1_VERSION) { - version = TLS1_1_VERSION; - } else { - goto unsupported_protocol; - } - } else { - if (client_hello->version >= TLS1_2_VERSION) { - version = TLS1_2_VERSION; - } else if (client_hello->version >= TLS1_1_VERSION) { - version = TLS1_1_VERSION; - } else if (client_hello->version >= TLS1_VERSION) { - version = TLS1_VERSION; - } else if (client_hello->version >= SSL3_VERSION) { - version = SSL3_VERSION; - } else { - goto unsupported_protocol; - } - } - - /* Apply our minimum and maximum version. */ - if (version > max_version) { - version = max_version; - } - - if (version < min_version) { - goto unsupported_protocol; - } - } - - /* Handle FALLBACK_SCSV. */ - if (ssl_client_cipher_list_contains_cipher(client_hello, - SSL3_CK_FALLBACK_SCSV & 0xffff) && - version < max_version) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INAPPROPRIATE_FALLBACK); - *out_alert = SSL3_AD_INAPPROPRIATE_FALLBACK; - return 0; - } - - hs->client_version = client_hello->version; - ssl->version = ssl->method->version_to_wire(version); - - /* At this point, the connection's version is known and |ssl->version| is - * fixed. Begin enforcing the record-layer version. */ - ssl->s3->have_version = 1; - - return 1; - -unsupported_protocol: - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); - *out_alert = SSL_AD_PROTOCOL_VERSION; - return 0; -} - -static STACK_OF(SSL_CIPHER) * - ssl_parse_client_cipher_list(const SSL_CLIENT_HELLO *client_hello) { - CBS cipher_suites; - CBS_init(&cipher_suites, client_hello->cipher_suites, - client_hello->cipher_suites_len); - - STACK_OF(SSL_CIPHER) *sk = sk_SSL_CIPHER_new_null(); - if (sk == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - while (CBS_len(&cipher_suites) > 0) { - uint16_t cipher_suite; - - if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST); - goto err; - } - - const SSL_CIPHER *c = SSL_get_cipher_by_value(cipher_suite); - if (c != NULL && !sk_SSL_CIPHER_push(sk, c)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - } - - return sk; - -err: - sk_SSL_CIPHER_free(sk); - return NULL; -} - -/* ssl_get_compatible_server_ciphers determines the key exchange and - * authentication cipher suite masks compatible with the server configuration - * and current ClientHello parameters of |hs|. It sets |*out_mask_k| to the key - * exchange mask and |*out_mask_a| to the authentication mask. */ -static void ssl_get_compatible_server_ciphers(SSL_HANDSHAKE *hs, - uint32_t *out_mask_k, - uint32_t *out_mask_a) { - SSL *const ssl = hs->ssl; - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - *out_mask_k = SSL_kGENERIC; - *out_mask_a = SSL_aGENERIC; - return; - } - - uint32_t mask_k = 0; - uint32_t mask_a = 0; - - if (ssl_has_certificate(ssl)) { - int type = ssl_private_key_type(ssl); - if (type == NID_rsaEncryption) { - mask_k |= SSL_kRSA; - mask_a |= SSL_aRSA; - } else if (ssl_is_ecdsa_key_type(type)) { - mask_a |= SSL_aECDSA; - } - } - - if (ssl->cert->dh_tmp != NULL || ssl->cert->dh_tmp_cb != NULL) { - mask_k |= SSL_kDHE; - } - - /* Check for a shared group to consider ECDHE ciphers. */ - uint16_t unused; - if (tls1_get_shared_group(hs, &unused)) { - mask_k |= SSL_kECDHE; - } - - /* PSK requires a server callback. */ - if (ssl->psk_server_callback != NULL) { - mask_k |= SSL_kPSK; - mask_a |= SSL_aPSK; - } - - *out_mask_k = mask_k; - *out_mask_a = mask_a; -} - -static const SSL_CIPHER *ssl3_choose_cipher( - SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello, - const struct ssl_cipher_preference_list_st *server_pref) { - SSL *const ssl = hs->ssl; - const SSL_CIPHER *c, *ret = NULL; - STACK_OF(SSL_CIPHER) *srvr = server_pref->ciphers, *prio, *allow; - int ok; - size_t cipher_index; - uint32_t alg_k, alg_a, mask_k, mask_a; - /* in_group_flags will either be NULL, or will point to an array of bytes - * which indicate equal-preference groups in the |prio| stack. See the - * comment about |in_group_flags| in the |ssl_cipher_preference_list_st| - * struct. */ - const uint8_t *in_group_flags; - /* group_min contains the minimal index so far found in a group, or -1 if no - * such value exists yet. */ - int group_min = -1; - - STACK_OF(SSL_CIPHER) *clnt = ssl_parse_client_cipher_list(client_hello); - if (clnt == NULL) { - return NULL; - } - - if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { - prio = srvr; - in_group_flags = server_pref->in_group_flags; - allow = clnt; - } else { - prio = clnt; - in_group_flags = NULL; - allow = srvr; - } - - ssl_get_compatible_server_ciphers(hs, &mask_k, &mask_a); - - for (size_t i = 0; i < sk_SSL_CIPHER_num(prio); i++) { - c = sk_SSL_CIPHER_value(prio, i); - - ok = 1; - - /* Check the TLS version. */ - if (SSL_CIPHER_get_min_version(c) > ssl3_protocol_version(ssl) || - SSL_CIPHER_get_max_version(c) < ssl3_protocol_version(ssl)) { - ok = 0; - } - - alg_k = c->algorithm_mkey; - alg_a = c->algorithm_auth; - - ok = ok && (alg_k & mask_k) && (alg_a & mask_a); - - if (ok && sk_SSL_CIPHER_find(allow, &cipher_index, c)) { - if (in_group_flags != NULL && in_group_flags[i] == 1) { - /* This element of |prio| is in a group. Update the minimum index found - * so far and continue looking. */ - if (group_min == -1 || (size_t)group_min > cipher_index) { - group_min = cipher_index; - } - } else { - if (group_min != -1 && (size_t)group_min < cipher_index) { - cipher_index = group_min; - } - ret = sk_SSL_CIPHER_value(allow, cipher_index); - break; - } - } - - if (in_group_flags != NULL && in_group_flags[i] == 0 && group_min != -1) { - /* We are about to leave a group, but we found a match in it, so that's - * our answer. */ - ret = sk_SSL_CIPHER_value(allow, group_min); - break; - } - } - - sk_SSL_CIPHER_free(clnt); - return ret; -} - -static int ssl3_process_client_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_CLIENT_HELLO)) { - return -1; - } - - SSL_CLIENT_HELLO client_hello; - if (!ssl_client_hello_init(ssl, &client_hello, ssl->init_msg, - ssl->init_num)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return -1; - } - - /* Run the early callback. */ - if (ssl->ctx->select_certificate_cb != NULL) { - switch (ssl->ctx->select_certificate_cb(&client_hello)) { - case 0: - ssl->rwstate = SSL_CERTIFICATE_SELECTION_PENDING; - return -1; - - case -1: - /* Connection rejected. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return -1; - - default: - /* fallthrough */; - } - } - - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!negotiate_version(hs, &alert, &client_hello)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; - } - - /* Load the client random. */ - if (client_hello.random_len != SSL3_RANDOM_SIZE) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - OPENSSL_memcpy(ssl->s3->client_random, client_hello.random, - client_hello.random_len); - - /* Only null compression is supported. TLS 1.3 further requires the peer - * advertise no other compression. */ - if (OPENSSL_memchr(client_hello.compression_methods, 0, - client_hello.compression_methods_len) == NULL || - (ssl3_protocol_version(ssl) >= TLS1_3_VERSION && - client_hello.compression_methods_len != 1)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMPRESSION_LIST); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return -1; - } - - /* TLS extensions. */ - if (!ssl_parse_clienthello_tlsext(hs, &client_hello)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); - return -1; - } - - return 1; -} - -static int ssl3_select_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* Call |cert_cb| to update server certificates if required. */ - if (ssl->cert->cert_cb != NULL) { - int rv = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); - if (rv == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return -1; - } - if (rv < 0) { - ssl->rwstate = SSL_X509_LOOKUP; - return -1; - } - } - - if (!ssl_auto_chain_if_needed(ssl)) { - return -1; - } - - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - /* Jump to the TLS 1.3 state machine. */ - hs->state = SSL_ST_TLS13; - hs->do_tls13_handshake = tls13_server_handshake; - return 1; - } - - SSL_CLIENT_HELLO client_hello; - if (!ssl_client_hello_init(ssl, &client_hello, ssl->init_msg, - ssl->init_num)) { - return -1; - } - - /* Negotiate the cipher suite. This must be done after |cert_cb| so the - * certificate is finalized. */ - hs->new_cipher = - ssl3_choose_cipher(hs, &client_hello, ssl_get_cipher_preferences(ssl)); - if (hs->new_cipher == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return -1; - } - - return 1; -} - -static int ssl3_select_parameters(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - uint8_t al = SSL_AD_INTERNAL_ERROR; - int ret = -1; - SSL_SESSION *session = NULL; - - SSL_CLIENT_HELLO client_hello; - if (!ssl_client_hello_init(ssl, &client_hello, ssl->init_msg, - ssl->init_num)) { - return -1; - } - - /* Determine whether we are doing session resumption. */ - int tickets_supported = 0, renew_ticket = 0; - switch (ssl_get_prev_session(ssl, &session, &tickets_supported, &renew_ticket, - &client_hello)) { - case ssl_session_success: - break; - case ssl_session_error: - goto err; - case ssl_session_retry: - ssl->rwstate = SSL_PENDING_SESSION; - goto err; - } - - if (session != NULL) { - if (session->extended_master_secret && !hs->extended_master_secret) { - /* A ClientHello without EMS that attempts to resume a session with EMS - * is fatal to the connection. */ - al = SSL_AD_HANDSHAKE_FAILURE; - OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); - goto f_err; - } - - if (!ssl_session_is_resumable(hs, session) || - /* If the client offers the EMS extension, but the previous session - * didn't use it, then negotiate a new session. */ - hs->extended_master_secret != session->extended_master_secret) { - SSL_SESSION_free(session); - session = NULL; - } - } - - if (session != NULL) { - /* Use the old session. */ - hs->ticket_expected = renew_ticket; - ssl->session = session; - session = NULL; - ssl->s3->session_reused = 1; - } else { - hs->ticket_expected = tickets_supported; - ssl_set_session(ssl, NULL); - if (!ssl_get_new_session(hs, 1 /* server */)) { - goto err; - } - - /* Clear the session ID if we want the session to be single-use. */ - if (!(ssl->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)) { - hs->new_session->session_id_length = 0; - } - } - - if (ssl->ctx->dos_protection_cb != NULL && - ssl->ctx->dos_protection_cb(&client_hello) == 0) { - /* Connection rejected for DOS reasons. */ - al = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); - goto f_err; - } - - if (ssl->session == NULL) { - hs->new_session->cipher = hs->new_cipher; - - /* On new sessions, stash the SNI value in the session. */ - if (hs->hostname != NULL) { - OPENSSL_free(hs->new_session->tlsext_hostname); - hs->new_session->tlsext_hostname = BUF_strdup(hs->hostname); - if (hs->new_session->tlsext_hostname == NULL) { - al = SSL_AD_INTERNAL_ERROR; - goto f_err; - } - } - - /* Determine whether to request a client certificate. */ - hs->cert_request = !!(ssl->verify_mode & SSL_VERIFY_PEER); - /* Only request a certificate if Channel ID isn't negotiated. */ - if ((ssl->verify_mode & SSL_VERIFY_PEER_IF_NO_OBC) && - ssl->s3->tlsext_channel_id_valid) { - hs->cert_request = 0; - } - /* CertificateRequest may only be sent in certificate-based ciphers. */ - if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - hs->cert_request = 0; - } - - if (!hs->cert_request) { - /* OpenSSL returns X509_V_OK when no certificates are requested. This is - * classed by them as a bug, but it's assumed by at least NGINX. */ - hs->new_session->verify_result = X509_V_OK; - } - } - - /* HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was - * deferred. Complete it now. */ - if (!ssl_negotiate_alpn(hs, &al, &client_hello)) { - goto f_err; - } - - /* Now that all parameters are known, initialize the handshake hash and hash - * the ClientHello. */ - if (!SSL_TRANSCRIPT_init_hash(&hs->transcript, ssl3_protocol_version(ssl), - hs->new_cipher->algorithm_prf) || - !ssl_hash_current_message(hs)) { - goto f_err; - } - - /* Release the handshake buffer if client authentication isn't required. */ - if (!hs->cert_request) { - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - } - - ret = 1; - - if (0) { - f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); - } - -err: - SSL_SESSION_free(session); - return ret; -} - -static int ssl3_send_server_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - - /* We only accept ChannelIDs on connections with ECDHE in order to avoid a - * known attack while we fix ChannelID itself. */ - if (ssl->s3->tlsext_channel_id_valid && - (hs->new_cipher->algorithm_mkey & SSL_kECDHE) == 0) { - ssl->s3->tlsext_channel_id_valid = 0; - } - - /* If this is a resumption and the original handshake didn't support - * ChannelID then we didn't record the original handshake hashes in the - * session and so cannot resume with ChannelIDs. */ - if (ssl->session != NULL && - ssl->session->original_handshake_hash_len == 0) { - ssl->s3->tlsext_channel_id_valid = 0; - } - - struct timeval now; - ssl_get_current_time(ssl, &now); - ssl->s3->server_random[0] = now.tv_sec >> 24; - ssl->s3->server_random[1] = now.tv_sec >> 16; - ssl->s3->server_random[2] = now.tv_sec >> 8; - ssl->s3->server_random[3] = now.tv_sec; - if (!RAND_bytes(ssl->s3->server_random + 4, SSL3_RANDOM_SIZE - 4)) { - return -1; - } - - /* TODO(davidben): Implement the TLS 1.1 and 1.2 downgrade sentinels once TLS - * 1.3 is finalized and we are not implementing a draft version. */ - - const SSL_SESSION *session = hs->new_session; - if (ssl->session != NULL) { - session = ssl->session; - } - - CBB cbb, body, session_id; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_SERVER_HELLO) || - !CBB_add_u16(&body, ssl->version) || - !CBB_add_bytes(&body, ssl->s3->server_random, SSL3_RANDOM_SIZE) || - !CBB_add_u8_length_prefixed(&body, &session_id) || - !CBB_add_bytes(&session_id, session->session_id, - session->session_id_length) || - !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher)) || - !CBB_add_u8(&body, 0 /* no compression */) || - !ssl_add_serverhello_tlsext(hs, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -static int ssl3_send_server_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_has_certificate(ssl)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_SET); - return -1; - } - - if (!ssl3_output_cert_chain(ssl)) { - return -1; - } - return 1; -} - -static int ssl3_send_certificate_status(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, body, ocsp_response; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CERTIFICATE_STATUS) || - !CBB_add_u8(&body, TLSEXT_STATUSTYPE_ocsp) || - !CBB_add_u24_length_prefixed(&body, &ocsp_response) || - !CBB_add_bytes(&ocsp_response, - CRYPTO_BUFFER_data(ssl->cert->ocsp_response), - CRYPTO_BUFFER_len(ssl->cert->ocsp_response)) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -static int ssl3_send_server_key_exchange(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, child; - CBB_zero(&cbb); - - /* Put together the parameters. */ - if (hs->state == SSL3_ST_SW_KEY_EXCH_A) { - uint32_t alg_k = hs->new_cipher->algorithm_mkey; - uint32_t alg_a = hs->new_cipher->algorithm_auth; - - /* Pre-allocate enough room to comfortably fit an ECDHE public key. */ - if (!CBB_init(&cbb, 128)) { - goto err; - } - - /* PSK ciphers begin with an identity hint. */ - if (alg_a & SSL_aPSK) { - size_t len = - (ssl->psk_identity_hint == NULL) ? 0 : strlen(ssl->psk_identity_hint); - if (!CBB_add_u16_length_prefixed(&cbb, &child) || - !CBB_add_bytes(&child, (const uint8_t *)ssl->psk_identity_hint, - len)) { - goto err; - } - } - - if (alg_k & SSL_kDHE) { - /* Determine the group to use. */ - DH *params = ssl->cert->dh_tmp; - if (params == NULL && ssl->cert->dh_tmp_cb != NULL) { - params = ssl->cert->dh_tmp_cb(ssl, 0, 1024); - } - if (params == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_TMP_DH_KEY); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - goto err; - } - - /* Set up DH, generate a key, and emit the public half. */ - DH *dh = DHparams_dup(params); - if (dh == NULL) { - goto err; - } - - SSL_ECDH_CTX_init_for_dhe(&hs->ecdh_ctx, dh); - if (!CBB_add_u16_length_prefixed(&cbb, &child) || - !BN_bn2cbb_padded(&child, BN_num_bytes(params->p), params->p) || - !CBB_add_u16_length_prefixed(&cbb, &child) || - !BN_bn2cbb_padded(&child, BN_num_bytes(params->g), params->g) || - !CBB_add_u16_length_prefixed(&cbb, &child) || - !SSL_ECDH_CTX_offer(&hs->ecdh_ctx, &child)) { - goto err; - } - } else if (alg_k & SSL_kECDHE) { - /* Determine the group to use. */ - uint16_t group_id; - if (!tls1_get_shared_group(hs, &group_id)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_TMP_ECDH_KEY); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - goto err; - } - hs->new_session->group_id = group_id; - - /* Set up ECDH, generate a key, and emit the public half. */ - if (!SSL_ECDH_CTX_init(&hs->ecdh_ctx, group_id) || - !CBB_add_u8(&cbb, NAMED_CURVE_TYPE) || - !CBB_add_u16(&cbb, group_id) || - !CBB_add_u8_length_prefixed(&cbb, &child) || - !SSL_ECDH_CTX_offer(&hs->ecdh_ctx, &child)) { - goto err; - } - } else { - assert(alg_k & SSL_kPSK); - } - - if (!CBB_finish(&cbb, &hs->server_params, &hs->server_params_len)) { - goto err; - } - } - - /* Assemble the message. */ - CBB body; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_SERVER_KEY_EXCHANGE) || - !CBB_add_bytes(&body, hs->server_params, hs->server_params_len)) { - goto err; - } - - /* Add a signature. */ - if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - if (!ssl_has_private_key(ssl)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - - /* Determine the signature algorithm. */ - uint16_t signature_algorithm; - if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { - goto err; - } - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - if (!CBB_add_u16(&body, signature_algorithm)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - } - - /* Add space for the signature. */ - const size_t max_sig_len = ssl_private_key_max_signature_len(ssl); - uint8_t *ptr; - if (!CBB_add_u16_length_prefixed(&body, &child) || - !CBB_reserve(&child, &ptr, max_sig_len)) { - goto err; - } - - size_t sig_len; - enum ssl_private_key_result_t sign_result; - if (hs->state == SSL3_ST_SW_KEY_EXCH_A) { - CBB transcript; - uint8_t *transcript_data; - size_t transcript_len; - if (!CBB_init(&transcript, - 2 * SSL3_RANDOM_SIZE + hs->server_params_len) || - !CBB_add_bytes(&transcript, ssl->s3->client_random, - SSL3_RANDOM_SIZE) || - !CBB_add_bytes(&transcript, ssl->s3->server_random, - SSL3_RANDOM_SIZE) || - !CBB_add_bytes(&transcript, hs->server_params, - hs->server_params_len) || - !CBB_finish(&transcript, &transcript_data, &transcript_len)) { - CBB_cleanup(&transcript); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - - sign_result = ssl_private_key_sign(ssl, ptr, &sig_len, max_sig_len, - signature_algorithm, transcript_data, - transcript_len); - OPENSSL_free(transcript_data); - } else { - assert(hs->state == SSL3_ST_SW_KEY_EXCH_B); - sign_result = ssl_private_key_complete(ssl, ptr, &sig_len, max_sig_len); - } - - switch (sign_result) { - case ssl_private_key_success: - if (!CBB_did_write(&child, sig_len)) { - goto err; - } - break; - case ssl_private_key_failure: - goto err; - case ssl_private_key_retry: - ssl->rwstate = SSL_PRIVATE_KEY_OPERATION; - hs->state = SSL3_ST_SW_KEY_EXCH_B; - goto err; - } - } - - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - OPENSSL_free(hs->server_params); - hs->server_params = NULL; - hs->server_params_len = 0; - - return 1; - -err: - CBB_cleanup(&cbb); - return -1; -} - -static int add_cert_types(SSL *ssl, CBB *cbb) { - /* Get configured signature algorithms. */ - int have_rsa_sign = 0; - int have_ecdsa_sign = 0; - const uint16_t *sig_algs; - size_t num_sig_algs = tls12_get_verify_sigalgs(ssl, &sig_algs); - for (size_t i = 0; i < num_sig_algs; i++) { - switch (sig_algs[i]) { - case SSL_SIGN_RSA_PKCS1_SHA512: - case SSL_SIGN_RSA_PKCS1_SHA384: - case SSL_SIGN_RSA_PKCS1_SHA256: - case SSL_SIGN_RSA_PKCS1_SHA1: - have_rsa_sign = 1; - break; - - case SSL_SIGN_ECDSA_SECP521R1_SHA512: - case SSL_SIGN_ECDSA_SECP384R1_SHA384: - case SSL_SIGN_ECDSA_SECP256R1_SHA256: - case SSL_SIGN_ECDSA_SHA1: - have_ecdsa_sign = 1; - break; - } - } - - if (have_rsa_sign && !CBB_add_u8(cbb, SSL3_CT_RSA_SIGN)) { - return 0; - } - - /* ECDSA certs can be used with RSA cipher suites as well so we don't need to - * check for SSL_kECDH or SSL_kECDHE. */ - if (ssl->version >= TLS1_VERSION && have_ecdsa_sign && - !CBB_add_u8(cbb, TLS_CT_ECDSA_SIGN)) { - return 0; - } - - return 1; -} - -static int ssl3_send_certificate_request(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, body, cert_types, sigalgs_cbb; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CERTIFICATE_REQUEST) || - !CBB_add_u8_length_prefixed(&body, &cert_types) || - !add_cert_types(ssl, &cert_types)) { - goto err; - } - - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - const uint16_t *sigalgs; - size_t num_sigalgs = tls12_get_verify_sigalgs(ssl, &sigalgs); - if (!CBB_add_u16_length_prefixed(&body, &sigalgs_cbb)) { - goto err; - } - - for (size_t i = 0; i < num_sigalgs; i++) { - if (!CBB_add_u16(&sigalgs_cbb, sigalgs[i])) { - goto err; - } - } - } - - if (!ssl_add_client_CA_list(ssl, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - return 1; - -err: - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; -} - -static int ssl3_send_server_hello_done(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_SERVER_HELLO_DONE) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -static int ssl3_get_client_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - assert(hs->cert_request); - - int msg_ret = ssl->method->ssl_get_message(ssl); - if (msg_ret <= 0) { - return msg_ret; - } - - if (ssl->s3->tmp.message_type != SSL3_MT_CERTIFICATE) { - if (ssl->version == SSL3_VERSION && - ssl->s3->tmp.message_type == SSL3_MT_CLIENT_KEY_EXCHANGE) { - /* In SSL 3.0, the Certificate message is omitted to signal no - * certificate. */ - if (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return -1; - } - - /* OpenSSL returns X509_V_OK when no certificates are received. This is - * classed by them as a bug, but it's assumed by at least NGINX. */ - hs->new_session->verify_result = X509_V_OK; - ssl->s3->tmp.reuse_message = 1; - return 1; - } - - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return -1; - } - - if (!ssl_hash_current_message(hs)) { - return -1; - } - - CBS certificate_msg; - CBS_init(&certificate_msg, ssl->init_msg, ssl->init_num); - - sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); - EVP_PKEY_free(hs->peer_pubkey); - hs->peer_pubkey = NULL; - uint8_t alert = SSL_AD_DECODE_ERROR; - hs->new_session->certs = ssl_parse_cert_chain( - &alert, &hs->peer_pubkey, - ssl->retain_only_sha256_of_client_certs ? hs->new_session->peer_sha256 - : NULL, - &certificate_msg, ssl->ctx->pool); - if (hs->new_session->certs == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; - } - - if (CBS_len(&certificate_msg) != 0 || - !ssl->ctx->x509_method->session_cache_objects(hs->new_session)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return -1; - } - - if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { - /* No client certificate so the handshake buffer may be discarded. */ - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - - /* In SSL 3.0, sending no certificate is signaled by omitting the - * Certificate message. */ - if (ssl->version == SSL3_VERSION) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATES_RETURNED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return -1; - } - - if (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { - /* Fail for TLS only if we required a certificate */ - OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return -1; - } - - /* OpenSSL returns X509_V_OK when no certificates are received. This is - * classed by them as a bug, but it's assumed by at least NGINX. */ - hs->new_session->verify_result = X509_V_OK; - return 1; - } - - /* The hash will have been filled in. */ - if (ssl->retain_only_sha256_of_client_certs) { - hs->new_session->peer_sha256_valid = 1; - } - - if (!ssl_verify_cert_chain(ssl, &hs->new_session->verify_result, - hs->new_session->x509_chain)) { - return -1; - } - return 1; -} - -static int ssl3_get_client_key_exchange(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al; - CBS client_key_exchange; - uint32_t alg_k; - uint32_t alg_a; - uint8_t *premaster_secret = NULL; - size_t premaster_secret_len = 0; - uint8_t *decrypt_buf = NULL; - - unsigned psk_len = 0; - uint8_t psk[PSK_MAX_PSK_LEN]; - - if (hs->state == SSL3_ST_SR_KEY_EXCH_A) { - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CLIENT_KEY_EXCHANGE) || - !ssl_hash_current_message(hs)) { - return -1; - } - } - - CBS_init(&client_key_exchange, ssl->init_msg, ssl->init_num); - alg_k = hs->new_cipher->algorithm_mkey; - alg_a = hs->new_cipher->algorithm_auth; - - /* If using a PSK key exchange, prepare the pre-shared key. */ - if (alg_a & SSL_aPSK) { - CBS psk_identity; - - /* If using PSK, the ClientKeyExchange contains a psk_identity. If PSK, - * then this is the only field in the message. */ - if (!CBS_get_u16_length_prefixed(&client_key_exchange, &psk_identity) || - ((alg_k & SSL_kPSK) && CBS_len(&client_key_exchange) != 0)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - al = SSL_AD_DECODE_ERROR; - goto f_err; - } - - if (ssl->psk_server_callback == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_NO_SERVER_CB); - al = SSL_AD_INTERNAL_ERROR; - goto f_err; - } - - if (CBS_len(&psk_identity) > PSK_MAX_IDENTITY_LEN || - CBS_contains_zero_byte(&psk_identity)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); - al = SSL_AD_ILLEGAL_PARAMETER; - goto f_err; - } - - if (!CBS_strdup(&psk_identity, &hs->new_session->psk_identity)) { - al = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto f_err; - } - - /* Look up the key for the identity. */ - psk_len = ssl->psk_server_callback(ssl, hs->new_session->psk_identity, psk, - sizeof(psk)); - if (psk_len > PSK_MAX_PSK_LEN) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - al = SSL_AD_INTERNAL_ERROR; - goto f_err; - } else if (psk_len == 0) { - /* PSK related to the given identity not found */ - OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); - al = SSL_AD_UNKNOWN_PSK_IDENTITY; - goto f_err; - } - } - - /* Depending on the key exchange method, compute |premaster_secret| and - * |premaster_secret_len|. */ - if (alg_k & SSL_kRSA) { - /* Allocate a buffer large enough for an RSA decryption. */ - const size_t rsa_size = ssl_private_key_max_signature_len(ssl); - decrypt_buf = OPENSSL_malloc(rsa_size); - if (decrypt_buf == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - enum ssl_private_key_result_t decrypt_result; - size_t decrypt_len; - if (hs->state == SSL3_ST_SR_KEY_EXCH_A) { - if (!ssl_has_private_key(ssl) || - ssl_private_key_type(ssl) != NID_rsaEncryption) { - al = SSL_AD_HANDSHAKE_FAILURE; - OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_RSA_CERTIFICATE); - goto f_err; - } - CBS encrypted_premaster_secret; - if (ssl->version > SSL3_VERSION) { - if (!CBS_get_u16_length_prefixed(&client_key_exchange, - &encrypted_premaster_secret) || - CBS_len(&client_key_exchange) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, - SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG); - goto f_err; - } - } else { - encrypted_premaster_secret = client_key_exchange; - } - - /* Decrypt with no padding. PKCS#1 padding will be removed as part of the - * timing-sensitive code below. */ - decrypt_result = ssl_private_key_decrypt( - ssl, decrypt_buf, &decrypt_len, rsa_size, - CBS_data(&encrypted_premaster_secret), - CBS_len(&encrypted_premaster_secret)); - } else { - assert(hs->state == SSL3_ST_SR_KEY_EXCH_B); - /* Complete async decrypt. */ - decrypt_result = - ssl_private_key_complete(ssl, decrypt_buf, &decrypt_len, rsa_size); - } - - switch (decrypt_result) { - case ssl_private_key_success: - break; - case ssl_private_key_failure: - goto err; - case ssl_private_key_retry: - ssl->rwstate = SSL_PRIVATE_KEY_OPERATION; - hs->state = SSL3_ST_SR_KEY_EXCH_B; - goto err; - } - - if (decrypt_len != rsa_size) { - al = SSL_AD_DECRYPT_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); - goto f_err; - } - - /* Prepare a random premaster, to be used on invalid padding. See RFC 5246, - * section 7.4.7.1. */ - premaster_secret_len = SSL_MAX_MASTER_KEY_LENGTH; - premaster_secret = OPENSSL_malloc(premaster_secret_len); - if (premaster_secret == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - if (!RAND_bytes(premaster_secret, premaster_secret_len)) { - goto err; - } - - /* The smallest padded premaster is 11 bytes of overhead. Small keys are - * publicly invalid. */ - if (decrypt_len < 11 + premaster_secret_len) { - al = SSL_AD_DECRYPT_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); - goto f_err; - } - - /* Check the padding. See RFC 3447, section 7.2.2. */ - size_t padding_len = decrypt_len - premaster_secret_len; - uint8_t good = constant_time_eq_int_8(decrypt_buf[0], 0) & - constant_time_eq_int_8(decrypt_buf[1], 2); - for (size_t i = 2; i < padding_len - 1; i++) { - good &= ~constant_time_is_zero_8(decrypt_buf[i]); - } - good &= constant_time_is_zero_8(decrypt_buf[padding_len - 1]); - - /* The premaster secret must begin with |client_version|. This too must be - * checked in constant time (http://eprint.iacr.org/2003/052/). */ - good &= constant_time_eq_8(decrypt_buf[padding_len], - (unsigned)(hs->client_version >> 8)); - good &= constant_time_eq_8(decrypt_buf[padding_len + 1], - (unsigned)(hs->client_version & 0xff)); - - /* Select, in constant time, either the decrypted premaster or the random - * premaster based on |good|. */ - for (size_t i = 0; i < premaster_secret_len; i++) { - premaster_secret[i] = constant_time_select_8( - good, decrypt_buf[padding_len + i], premaster_secret[i]); - } - - OPENSSL_free(decrypt_buf); - decrypt_buf = NULL; - } else if (alg_k & (SSL_kECDHE|SSL_kDHE)) { - /* Parse the ClientKeyExchange. */ - CBS peer_key; - if (!SSL_ECDH_CTX_get_key(&hs->ecdh_ctx, &client_key_exchange, &peer_key) || - CBS_len(&client_key_exchange) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - /* Compute the premaster. */ - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!SSL_ECDH_CTX_finish(&hs->ecdh_ctx, &premaster_secret, - &premaster_secret_len, &alert, CBS_data(&peer_key), - CBS_len(&peer_key))) { - al = alert; - goto f_err; - } - - /* The key exchange state may now be discarded. */ - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - } else if (alg_k & SSL_kPSK) { - /* For plain PSK, other_secret is a block of 0s with the same length as the - * pre-shared key. */ - premaster_secret_len = psk_len; - premaster_secret = OPENSSL_malloc(premaster_secret_len); - if (premaster_secret == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - OPENSSL_memset(premaster_secret, 0, premaster_secret_len); - } else { - al = SSL_AD_HANDSHAKE_FAILURE; - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CIPHER_TYPE); - goto f_err; - } - - /* For a PSK cipher suite, the actual pre-master secret is combined with the - * pre-shared key. */ - if (alg_a & SSL_aPSK) { - CBB new_premaster, child; - uint8_t *new_data; - size_t new_len; - - CBB_zero(&new_premaster); - if (!CBB_init(&new_premaster, 2 + psk_len + 2 + premaster_secret_len) || - !CBB_add_u16_length_prefixed(&new_premaster, &child) || - !CBB_add_bytes(&child, premaster_secret, premaster_secret_len) || - !CBB_add_u16_length_prefixed(&new_premaster, &child) || - !CBB_add_bytes(&child, psk, psk_len) || - !CBB_finish(&new_premaster, &new_data, &new_len)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - CBB_cleanup(&new_premaster); - goto err; - } - - OPENSSL_cleanse(premaster_secret, premaster_secret_len); - OPENSSL_free(premaster_secret); - premaster_secret = new_data; - premaster_secret_len = new_len; - } - - /* Compute the master secret */ - hs->new_session->master_key_length = tls1_generate_master_secret( - hs, hs->new_session->master_key, premaster_secret, premaster_secret_len); - if (hs->new_session->master_key_length == 0) { - goto err; - } - hs->new_session->extended_master_secret = hs->extended_master_secret; - - OPENSSL_cleanse(premaster_secret, premaster_secret_len); - OPENSSL_free(premaster_secret); - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); -err: - if (premaster_secret != NULL) { - OPENSSL_cleanse(premaster_secret, premaster_secret_len); - OPENSSL_free(premaster_secret); - } - OPENSSL_free(decrypt_buf); - - return -1; -} - -static int ssl3_get_cert_verify(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int al; - CBS certificate_verify, signature; - - /* Only RSA and ECDSA client certificates are supported, so a - * CertificateVerify is required if and only if there's a client certificate. - * */ - if (hs->peer_pubkey == NULL) { - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - return 1; - } - - int msg_ret = ssl->method->ssl_get_message(ssl); - if (msg_ret <= 0) { - return msg_ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE_VERIFY)) { - return -1; - } - - CBS_init(&certificate_verify, ssl->init_msg, ssl->init_num); - - /* Determine the digest type if needbe. */ - uint16_t signature_algorithm = 0; - if (ssl3_protocol_version(ssl) >= TLS1_2_VERSION) { - if (!CBS_get_u16(&certificate_verify, &signature_algorithm)) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - if (!tls12_check_peer_sigalg(ssl, &al, signature_algorithm)) { - goto f_err; - } - hs->new_session->peer_signature_algorithm = signature_algorithm; - } else if (hs->peer_pubkey->type == EVP_PKEY_RSA) { - signature_algorithm = SSL_SIGN_RSA_PKCS1_MD5_SHA1; - } else if (hs->peer_pubkey->type == EVP_PKEY_EC) { - signature_algorithm = SSL_SIGN_ECDSA_SHA1; - } else { - al = SSL_AD_UNSUPPORTED_CERTIFICATE; - OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); - goto f_err; - } - - /* Parse and verify the signature. */ - if (!CBS_get_u16_length_prefixed(&certificate_verify, &signature) || - CBS_len(&certificate_verify) != 0) { - al = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto f_err; - } - - int sig_ok; - /* The SSL3 construction for CertificateVerify does not decompose into a - * single final digest and signature, and must be special-cased. */ - if (ssl3_protocol_version(ssl) == SSL3_VERSION) { - uint8_t digest[EVP_MAX_MD_SIZE]; - size_t digest_len; - if (!SSL_TRANSCRIPT_ssl3_cert_verify_hash(&hs->transcript, digest, - &digest_len, hs->new_session, - signature_algorithm)) { - goto err; - } - - EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(hs->peer_pubkey, NULL); - sig_ok = pctx != NULL && - EVP_PKEY_verify_init(pctx) && - EVP_PKEY_verify(pctx, CBS_data(&signature), CBS_len(&signature), - digest, digest_len); - EVP_PKEY_CTX_free(pctx); - } else { - sig_ok = ssl_public_key_verify( - ssl, CBS_data(&signature), CBS_len(&signature), signature_algorithm, - hs->peer_pubkey, (const uint8_t *)hs->transcript.buffer->data, - hs->transcript.buffer->length); - } - -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - sig_ok = 1; - ERR_clear_error(); -#endif - if (!sig_ok) { - al = SSL_AD_DECRYPT_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); - goto f_err; - } - - /* The handshake buffer is no longer necessary, and we may hash the current - * message.*/ - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - if (!ssl_hash_current_message(hs)) { - goto err; - } - - return 1; - -f_err: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); -err: - return 0; -} - -/* ssl3_get_next_proto reads a Next Protocol Negotiation handshake message. It - * sets the next_proto member in s if found */ -static int ssl3_get_next_proto(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_NEXT_PROTO) || - !ssl_hash_current_message(hs)) { - return -1; - } - - CBS next_protocol, selected_protocol, padding; - CBS_init(&next_protocol, ssl->init_msg, ssl->init_num); - if (!CBS_get_u8_length_prefixed(&next_protocol, &selected_protocol) || - !CBS_get_u8_length_prefixed(&next_protocol, &padding) || - CBS_len(&next_protocol) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return 0; - } - - if (!CBS_stow(&selected_protocol, &ssl->s3->next_proto_negotiated, - &ssl->s3->next_proto_negotiated_len)) { - return 0; - } - - return 1; -} - -/* ssl3_get_channel_id reads and verifies a ClientID handshake message. */ -static int ssl3_get_channel_id(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int msg_ret = ssl->method->ssl_get_message(ssl); - if (msg_ret <= 0) { - return msg_ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CHANNEL_ID) || - !tls1_verify_channel_id(hs) || - !ssl_hash_current_message(hs)) { - return -1; - } - return 1; -} - -static int ssl3_send_new_session_ticket(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - const SSL_SESSION *session; - SSL_SESSION *session_copy = NULL; - if (ssl->session == NULL) { - /* Fix the timeout to measure from the ticket issuance time. */ - ssl_session_rebase_time(ssl, hs->new_session); - session = hs->new_session; - } else { - /* We are renewing an existing session. Duplicate the session to adjust the - * timeout. */ - session_copy = SSL_SESSION_dup(ssl->session, SSL_SESSION_INCLUDE_NONAUTH); - if (session_copy == NULL) { - return -1; - } - - ssl_session_rebase_time(ssl, session_copy); - session = session_copy; - } - - CBB cbb, body, ticket; - int ok = - ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_NEW_SESSION_TICKET) && - CBB_add_u32(&body, session->timeout) && - CBB_add_u16_length_prefixed(&body, &ticket) && - ssl_encrypt_ticket(ssl, &ticket, session) && - ssl_add_message_cbb(ssl, &cbb); - - SSL_SESSION_free(session_copy); - CBB_cleanup(&cbb); - - if (!ok) { - return -1; - } - - return 1; -} diff --git a/Sources/BoringSSL/ssl/handshake_server.cc b/Sources/BoringSSL/ssl/handshake_server.cc new file mode 100644 index 000000000..bb565e96a --- /dev/null +++ b/Sources/BoringSSL/ssl/handshake_server.cc @@ -0,0 +1,1662 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * + * Portions of the attached software ("Contribution") are developed by + * SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project. + * + * The Contribution is licensed pursuant to the OpenSSL open source + * license provided above. + * + * ECC cipher suite support in OpenSSL originally written by + * Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories. + * + */ +/* ==================================================================== + * Copyright 2005 Nokia. All rights reserved. + * + * The portions of the attached software ("Contribution") is developed by + * Nokia Corporation and is licensed pursuant to the OpenSSL open source + * license. + * + * The Contribution, originally written by Mika Kousa and Pasi Eronen of + * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites + * support (see RFC 4279) to OpenSSL. + * + * No patent licenses or other rights except those expressly stated in + * the OpenSSL open source license shall be deemed granted or received + * expressly, by implication, estoppel, or otherwise. + * + * No assurances are provided by Nokia that the Contribution does not + * infringe the patent or other intellectual property rights of any third + * party or that the license provides you with all the necessary rights + * to make use of the Contribution. + * + * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN + * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA + * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY + * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR + * OTHERWISE. */ + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../crypto/internal.h" + + +namespace bssl { + +enum ssl_server_hs_state_t { + state_start_accept = 0, + state_read_client_hello, + state_select_certificate, + state_tls13, + state_select_parameters, + state_send_server_hello, + state_send_server_certificate, + state_send_server_key_exchange, + state_send_server_hello_done, + state_read_client_certificate, + state_verify_client_certificate, + state_read_client_key_exchange, + state_read_client_certificate_verify, + state_read_change_cipher_spec, + state_process_change_cipher_spec, + state_read_next_proto, + state_read_channel_id, + state_read_client_finished, + state_send_server_finished, + state_finish_server_handshake, + state_done, +}; + +int ssl_client_cipher_list_contains_cipher(const SSL_CLIENT_HELLO *client_hello, + uint16_t id) { + CBS cipher_suites; + CBS_init(&cipher_suites, client_hello->cipher_suites, + client_hello->cipher_suites_len); + + while (CBS_len(&cipher_suites) > 0) { + uint16_t got_id; + if (!CBS_get_u16(&cipher_suites, &got_id)) { + return 0; + } + + if (got_id == id) { + return 1; + } + } + + return 0; +} + +static int negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, + const SSL_CLIENT_HELLO *client_hello) { + SSL *const ssl = hs->ssl; + assert(!ssl->s3->have_version); + CBS supported_versions, versions; + if (ssl_client_hello_get_extension(client_hello, &supported_versions, + TLSEXT_TYPE_supported_versions)) { + if (!CBS_get_u8_length_prefixed(&supported_versions, &versions) || + CBS_len(&supported_versions) != 0 || + CBS_len(&versions) == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + *out_alert = SSL_AD_DECODE_ERROR; + return 0; + } + } else { + // Convert the ClientHello version to an equivalent supported_versions + // extension. + static const uint8_t kTLSVersions[] = { + 0x03, 0x03, // TLS 1.2 + 0x03, 0x02, // TLS 1.1 + 0x03, 0x01, // TLS 1 + 0x03, 0x00, // SSL 3 + }; + + static const uint8_t kDTLSVersions[] = { + 0xfe, 0xfd, // DTLS 1.2 + 0xfe, 0xff, // DTLS 1.0 + }; + + size_t versions_len = 0; + if (SSL_is_dtls(ssl)) { + if (client_hello->version <= DTLS1_2_VERSION) { + versions_len = 4; + } else if (client_hello->version <= DTLS1_VERSION) { + versions_len = 2; + } + CBS_init(&versions, kDTLSVersions + sizeof(kDTLSVersions) - versions_len, + versions_len); + } else { + if (client_hello->version >= TLS1_2_VERSION) { + versions_len = 8; + } else if (client_hello->version >= TLS1_1_VERSION) { + versions_len = 6; + } else if (client_hello->version >= TLS1_VERSION) { + versions_len = 4; + } else if (client_hello->version >= SSL3_VERSION) { + versions_len = 2; + } + CBS_init(&versions, kTLSVersions + sizeof(kTLSVersions) - versions_len, + versions_len); + } + } + + if (!ssl_negotiate_version(hs, out_alert, &ssl->version, &versions)) { + return 0; + } + + // At this point, the connection's version is known and |ssl->version| is + // fixed. Begin enforcing the record-layer version. + ssl->s3->have_version = true; + ssl->s3->aead_write_ctx->SetVersionIfNullCipher(ssl->version); + + // Handle FALLBACK_SCSV. + if (ssl_client_cipher_list_contains_cipher(client_hello, + SSL3_CK_FALLBACK_SCSV & 0xffff) && + ssl_protocol_version(ssl) < hs->max_version) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INAPPROPRIATE_FALLBACK); + *out_alert = SSL3_AD_INAPPROPRIATE_FALLBACK; + return 0; + } + + return 1; +} + +static UniquePtr ssl_parse_client_cipher_list( + const SSL_CLIENT_HELLO *client_hello) { + CBS cipher_suites; + CBS_init(&cipher_suites, client_hello->cipher_suites, + client_hello->cipher_suites_len); + + UniquePtr sk(sk_SSL_CIPHER_new_null()); + if (!sk) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return nullptr; + } + + while (CBS_len(&cipher_suites) > 0) { + uint16_t cipher_suite; + + if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST); + return nullptr; + } + + const SSL_CIPHER *c = SSL_get_cipher_by_value(cipher_suite); + if (c != NULL && !sk_SSL_CIPHER_push(sk.get(), c)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return nullptr; + } + } + + return sk; +} + +// ssl_get_compatible_server_ciphers determines the key exchange and +// authentication cipher suite masks compatible with the server configuration +// and current ClientHello parameters of |hs|. It sets |*out_mask_k| to the key +// exchange mask and |*out_mask_a| to the authentication mask. +static void ssl_get_compatible_server_ciphers(SSL_HANDSHAKE *hs, + uint32_t *out_mask_k, + uint32_t *out_mask_a) { + SSL *const ssl = hs->ssl; + uint32_t mask_k = 0; + uint32_t mask_a = 0; + + if (ssl_has_certificate(ssl)) { + mask_a |= ssl_cipher_auth_mask_for_key(hs->local_pubkey.get()); + if (EVP_PKEY_id(hs->local_pubkey.get()) == EVP_PKEY_RSA) { + mask_k |= SSL_kRSA; + } + } + + // Check for a shared group to consider ECDHE ciphers. + uint16_t unused; + if (tls1_get_shared_group(hs, &unused)) { + mask_k |= SSL_kECDHE; + } + + // PSK requires a server callback. + if (ssl->psk_server_callback != NULL) { + mask_k |= SSL_kPSK; + mask_a |= SSL_aPSK; + } + + *out_mask_k = mask_k; + *out_mask_a = mask_a; +} + +static const SSL_CIPHER *ssl3_choose_cipher( + SSL_HANDSHAKE *hs, const SSL_CLIENT_HELLO *client_hello, + const struct ssl_cipher_preference_list_st *server_pref) { + SSL *const ssl = hs->ssl; + STACK_OF(SSL_CIPHER) *prio, *allow; + // in_group_flags will either be NULL, or will point to an array of bytes + // which indicate equal-preference groups in the |prio| stack. See the + // comment about |in_group_flags| in the |ssl_cipher_preference_list_st| + // struct. + const uint8_t *in_group_flags; + // group_min contains the minimal index so far found in a group, or -1 if no + // such value exists yet. + int group_min = -1; + + UniquePtr client_pref = + ssl_parse_client_cipher_list(client_hello); + if (!client_pref) { + return nullptr; + } + + if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { + prio = server_pref->ciphers; + in_group_flags = server_pref->in_group_flags; + allow = client_pref.get(); + } else { + prio = client_pref.get(); + in_group_flags = NULL; + allow = server_pref->ciphers; + } + + uint32_t mask_k, mask_a; + ssl_get_compatible_server_ciphers(hs, &mask_k, &mask_a); + + for (size_t i = 0; i < sk_SSL_CIPHER_num(prio); i++) { + const SSL_CIPHER *c = sk_SSL_CIPHER_value(prio, i); + + size_t cipher_index; + if (// Check if the cipher is supported for the current version. + SSL_CIPHER_get_min_version(c) <= ssl_protocol_version(ssl) && + ssl_protocol_version(ssl) <= SSL_CIPHER_get_max_version(c) && + // Check the cipher is supported for the server configuration. + (c->algorithm_mkey & mask_k) && + (c->algorithm_auth & mask_a) && + // Check the cipher is in the |allow| list. + sk_SSL_CIPHER_find(allow, &cipher_index, c)) { + if (in_group_flags != NULL && in_group_flags[i] == 1) { + // This element of |prio| is in a group. Update the minimum index found + // so far and continue looking. + if (group_min == -1 || (size_t)group_min > cipher_index) { + group_min = cipher_index; + } + } else { + if (group_min != -1 && (size_t)group_min < cipher_index) { + cipher_index = group_min; + } + return sk_SSL_CIPHER_value(allow, cipher_index); + } + } + + if (in_group_flags != NULL && in_group_flags[i] == 0 && group_min != -1) { + // We are about to leave a group, but we found a match in it, so that's + // our answer. + return sk_SSL_CIPHER_value(allow, group_min); + } + } + + return nullptr; +} + +static enum ssl_hs_wait_t do_start_accept(SSL_HANDSHAKE *hs) { + ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_START, 1); + hs->state = state_read_client_hello; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_HELLO)) { + return ssl_hs_error; + } + + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // Run the early callback. + if (ssl->ctx->select_certificate_cb != NULL) { + switch (ssl->ctx->select_certificate_cb(&client_hello)) { + case ssl_select_cert_retry: + return ssl_hs_certificate_selection_pending; + + case ssl_select_cert_error: + // Connection rejected. + OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + + default: + /* fallthrough */; + } + } + + // Freeze the version range after the early callback. + if (!ssl_get_version_range(ssl, &hs->min_version, &hs->max_version)) { + return ssl_hs_error; + } + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!negotiate_version(hs, &alert, &client_hello)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + hs->client_version = client_hello.version; + if (client_hello.random_len != SSL3_RANDOM_SIZE) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + OPENSSL_memcpy(ssl->s3->client_random, client_hello.random, + client_hello.random_len); + + // Only null compression is supported. TLS 1.3 further requires the peer + // advertise no other compression. + if (OPENSSL_memchr(client_hello.compression_methods, 0, + client_hello.compression_methods_len) == NULL || + (ssl_protocol_version(ssl) >= TLS1_3_VERSION && + client_hello.compression_methods_len != 1)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMPRESSION_LIST); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // TLS extensions. + if (!ssl_parse_clienthello_tlsext(hs, &client_hello)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); + return ssl_hs_error; + } + + hs->state = state_select_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_select_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + // Call |cert_cb| to update server certificates if required. + if (ssl->cert->cert_cb != NULL) { + int rv = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); + if (rv == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + if (rv < 0) { + return ssl_hs_x509_lookup; + } + } + + if (!ssl_on_certificate_selected(hs)) { + return ssl_hs_error; + } + + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + // Jump to the TLS 1.3 state machine. + hs->state = state_tls13; + return ssl_hs_ok; + } + + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + return ssl_hs_error; + } + + // Negotiate the cipher suite. This must be done after |cert_cb| so the + // certificate is finalized. + hs->new_cipher = + ssl3_choose_cipher(hs, &client_hello, ssl_get_cipher_preferences(ssl)); + if (hs->new_cipher == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + hs->state = state_select_parameters; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_tls13(SSL_HANDSHAKE *hs) { + enum ssl_hs_wait_t wait = tls13_server_handshake(hs); + if (wait == ssl_hs_ok) { + hs->state = state_finish_server_handshake; + return ssl_hs_ok; + } + + return wait; +} + +static enum ssl_hs_wait_t do_select_parameters(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + return ssl_hs_error; + } + + // Determine whether we are doing session resumption. + UniquePtr session; + bool tickets_supported = false, renew_ticket = false; + enum ssl_hs_wait_t wait = ssl_get_prev_session( + ssl, &session, &tickets_supported, &renew_ticket, &client_hello); + if (wait != ssl_hs_ok) { + return wait; + } + + if (session) { + if (session->extended_master_secret && !hs->extended_master_secret) { + // A ClientHello without EMS that attempts to resume a session with EMS + // is fatal to the connection. + OPENSSL_PUT_ERROR(SSL, SSL_R_RESUMED_EMS_SESSION_WITHOUT_EMS_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + if (!ssl_session_is_resumable(hs, session.get()) || + // If the client offers the EMS extension, but the previous session + // didn't use it, then negotiate a new session. + hs->extended_master_secret != session->extended_master_secret) { + session.reset(); + } + } + + if (session) { + // Use the old session. + hs->ticket_expected = renew_ticket; + ssl->session = session.release(); + ssl->s3->session_reused = true; + } else { + hs->ticket_expected = tickets_supported; + ssl_set_session(ssl, NULL); + if (!ssl_get_new_session(hs, 1 /* server */)) { + return ssl_hs_error; + } + + // Clear the session ID if we want the session to be single-use. + if (!(ssl->ctx->session_cache_mode & SSL_SESS_CACHE_SERVER)) { + hs->new_session->session_id_length = 0; + } + } + + if (ssl->ctx->dos_protection_cb != NULL && + ssl->ctx->dos_protection_cb(&client_hello) == 0) { + // Connection rejected for DOS reasons. + OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + if (ssl->session == NULL) { + hs->new_session->cipher = hs->new_cipher; + + // Determine whether to request a client certificate. + hs->cert_request = !!(ssl->verify_mode & SSL_VERIFY_PEER); + // Only request a certificate if Channel ID isn't negotiated. + if ((ssl->verify_mode & SSL_VERIFY_PEER_IF_NO_OBC) && + ssl->s3->tlsext_channel_id_valid) { + hs->cert_request = false; + } + // CertificateRequest may only be sent in certificate-based ciphers. + if (!ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + hs->cert_request = false; + } + + if (!hs->cert_request) { + // OpenSSL returns X509_V_OK when no certificates are requested. This is + // classed by them as a bug, but it's assumed by at least NGINX. + hs->new_session->verify_result = X509_V_OK; + } + } + + // HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was + // deferred. Complete it now. + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_negotiate_alpn(hs, &alert, &client_hello)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + // Now that all parameters are known, initialize the handshake hash and hash + // the ClientHello. + if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || + !ssl_hash_message(hs, msg)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + // Release the handshake buffer if client authentication isn't required. + if (!hs->cert_request) { + hs->transcript.FreeBuffer(); + } + + ssl->method->next_message(ssl); + + hs->state = state_send_server_hello; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // We only accept ChannelIDs on connections with ECDHE in order to avoid a + // known attack while we fix ChannelID itself. + if (ssl->s3->tlsext_channel_id_valid && + (hs->new_cipher->algorithm_mkey & SSL_kECDHE) == 0) { + ssl->s3->tlsext_channel_id_valid = false; + } + + // If this is a resumption and the original handshake didn't support + // ChannelID then we didn't record the original handshake hashes in the + // session and so cannot resume with ChannelIDs. + if (ssl->session != NULL && + ssl->session->original_handshake_hash_len == 0) { + ssl->s3->tlsext_channel_id_valid = false; + } + + struct OPENSSL_timeval now; + ssl_get_current_time(ssl, &now); + ssl->s3->server_random[0] = now.tv_sec >> 24; + ssl->s3->server_random[1] = now.tv_sec >> 16; + ssl->s3->server_random[2] = now.tv_sec >> 8; + ssl->s3->server_random[3] = now.tv_sec; + if (!RAND_bytes(ssl->s3->server_random + 4, SSL3_RANDOM_SIZE - 4)) { + return ssl_hs_error; + } + + // TODO(davidben): Implement the TLS 1.1 and 1.2 downgrade sentinels once TLS + // 1.3 is finalized and we are not implementing a draft version. + + const SSL_SESSION *session = hs->new_session.get(); + if (ssl->session != NULL) { + session = ssl->session; + } + + ScopedCBB cbb; + CBB body, session_id; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO) || + !CBB_add_u16(&body, ssl->version) || + !CBB_add_bytes(&body, ssl->s3->server_random, SSL3_RANDOM_SIZE) || + !CBB_add_u8_length_prefixed(&body, &session_id) || + !CBB_add_bytes(&session_id, session->session_id, + session->session_id_length) || + !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher)) || + !CBB_add_u8(&body, 0 /* no compression */) || + !ssl_add_serverhello_tlsext(hs, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + if (ssl->session != NULL) { + hs->state = state_send_server_finished; + } else { + hs->state = state_send_server_certificate; + } + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + ScopedCBB cbb; + + if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + if (!ssl_has_certificate(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_SET); + return ssl_hs_error; + } + + if (!ssl_output_cert_chain(ssl)) { + return ssl_hs_error; + } + + if (hs->certificate_status_expected) { + CBB body, ocsp_response; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_STATUS) || + !CBB_add_u8(&body, TLSEXT_STATUSTYPE_ocsp) || + !CBB_add_u24_length_prefixed(&body, &ocsp_response) || + !CBB_add_bytes(&ocsp_response, + CRYPTO_BUFFER_data(ssl->cert->ocsp_response), + CRYPTO_BUFFER_len(ssl->cert->ocsp_response)) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + } + + // Assemble ServerKeyExchange parameters if needed. + uint32_t alg_k = hs->new_cipher->algorithm_mkey; + uint32_t alg_a = hs->new_cipher->algorithm_auth; + if (ssl_cipher_requires_server_key_exchange(hs->new_cipher) || + ((alg_a & SSL_aPSK) && ssl->psk_identity_hint)) { + + // Pre-allocate enough room to comfortably fit an ECDHE public key. Prepend + // the client and server randoms for the signing transcript. + CBB child; + if (!CBB_init(cbb.get(), SSL3_RANDOM_SIZE * 2 + 128) || + !CBB_add_bytes(cbb.get(), ssl->s3->client_random, SSL3_RANDOM_SIZE) || + !CBB_add_bytes(cbb.get(), ssl->s3->server_random, SSL3_RANDOM_SIZE)) { + return ssl_hs_error; + } + + // PSK ciphers begin with an identity hint. + if (alg_a & SSL_aPSK) { + size_t len = + (ssl->psk_identity_hint == NULL) ? 0 : strlen(ssl->psk_identity_hint); + if (!CBB_add_u16_length_prefixed(cbb.get(), &child) || + !CBB_add_bytes(&child, (const uint8_t *)ssl->psk_identity_hint, + len)) { + return ssl_hs_error; + } + } + + if (alg_k & SSL_kECDHE) { + // Determine the group to use. + uint16_t group_id; + if (!tls1_get_shared_group(hs, &group_id)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + hs->new_session->group_id = group_id; + + // Set up ECDH, generate a key, and emit the public half. + hs->key_share = SSLKeyShare::Create(group_id); + if (!hs->key_share || + !CBB_add_u8(cbb.get(), NAMED_CURVE_TYPE) || + !CBB_add_u16(cbb.get(), group_id) || + !CBB_add_u8_length_prefixed(cbb.get(), &child) || + !hs->key_share->Offer(&child)) { + return ssl_hs_error; + } + } else { + assert(alg_k & SSL_kPSK); + } + + if (!CBBFinishArray(cbb.get(), &hs->server_params)) { + return ssl_hs_error; + } + } + + hs->state = state_send_server_key_exchange; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_key_exchange(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (hs->server_params.size() == 0) { + hs->state = state_send_server_hello_done; + return ssl_hs_ok; + } + + ScopedCBB cbb; + CBB body, child; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_SERVER_KEY_EXCHANGE) || + // |hs->server_params| contains a prefix for signing. + hs->server_params.size() < 2 * SSL3_RANDOM_SIZE || + !CBB_add_bytes(&body, hs->server_params.data() + 2 * SSL3_RANDOM_SIZE, + hs->server_params.size() - 2 * SSL3_RANDOM_SIZE)) { + return ssl_hs_error; + } + + // Add a signature. + if (ssl_cipher_uses_certificate_auth(hs->new_cipher)) { + if (!ssl_has_private_key(ssl)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + // Determine the signature algorithm. + uint16_t signature_algorithm; + if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { + return ssl_hs_error; + } + if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { + if (!CBB_add_u16(&body, signature_algorithm)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + // Add space for the signature. + const size_t max_sig_len = EVP_PKEY_size(hs->local_pubkey.get()); + uint8_t *ptr; + if (!CBB_add_u16_length_prefixed(&body, &child) || + !CBB_reserve(&child, &ptr, max_sig_len)) { + return ssl_hs_error; + } + + size_t sig_len; + switch (ssl_private_key_sign(hs, ptr, &sig_len, max_sig_len, + signature_algorithm, hs->server_params)) { + case ssl_private_key_success: + if (!CBB_did_write(&child, sig_len)) { + return ssl_hs_error; + } + break; + case ssl_private_key_failure: + return ssl_hs_error; + case ssl_private_key_retry: + return ssl_hs_private_key_operation; + } + } + + if (!ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + hs->server_params.Reset(); + + hs->state = state_send_server_hello_done; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_hello_done(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + ScopedCBB cbb; + CBB body; + + if (hs->cert_request) { + CBB cert_types, sigalgs_cbb; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_REQUEST) || + !CBB_add_u8_length_prefixed(&body, &cert_types) || + !CBB_add_u8(&cert_types, SSL3_CT_RSA_SIGN) || + (ssl_protocol_version(ssl) >= TLS1_VERSION && + !CBB_add_u8(&cert_types, TLS_CT_ECDSA_SIGN)) || + (ssl_protocol_version(ssl) >= TLS1_2_VERSION && + (!CBB_add_u16_length_prefixed(&body, &sigalgs_cbb) || + !tls12_add_verify_sigalgs(ssl, &sigalgs_cbb))) || + !ssl_add_client_CA_list(ssl, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_SERVER_HELLO_DONE) || + !ssl_add_message_cbb(ssl, cbb.get())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + hs->state = state_read_client_certificate; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_read_client_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!hs->cert_request) { + hs->state = state_verify_client_certificate; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (msg.type != SSL3_MT_CERTIFICATE) { + if (ssl->version == SSL3_VERSION && + msg.type == SSL3_MT_CLIENT_KEY_EXCHANGE) { + // In SSL 3.0, the Certificate message is omitted to signal no + // certificate. + if (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + // OpenSSL returns X509_V_OK when no certificates are received. This is + // classed by them as a bug, but it's assumed by at least NGINX. + hs->new_session->verify_result = X509_V_OK; + hs->state = state_verify_client_certificate; + return ssl_hs_ok; + } + + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return ssl_hs_error; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + CBS certificate_msg = msg.body; + uint8_t alert = SSL_AD_DECODE_ERROR; + UniquePtr chain; + if (!ssl_parse_cert_chain(&alert, &chain, &hs->peer_pubkey, + ssl->retain_only_sha256_of_client_certs + ? hs->new_session->peer_sha256 + : NULL, + &certificate_msg, ssl->ctx->pool)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); + hs->new_session->certs = chain.release(); + + if (CBS_len(&certificate_msg) != 0 || + !ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { + // No client certificate so the handshake buffer may be discarded. + hs->transcript.FreeBuffer(); + + // In SSL 3.0, sending no certificate is signaled by omitting the + // Certificate message. + if (ssl->version == SSL3_VERSION) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATES_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + if (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { + // Fail for TLS only if we required a certificate + OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + // OpenSSL returns X509_V_OK when no certificates are received. This is + // classed by them as a bug, but it's assumed by at least NGINX. + hs->new_session->verify_result = X509_V_OK; + } else if (ssl->retain_only_sha256_of_client_certs) { + // The hash will have been filled in. + hs->new_session->peer_sha256_valid = 1; + } + + ssl->method->next_message(ssl); + hs->state = state_verify_client_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_verify_client_certificate(SSL_HANDSHAKE *hs) { + if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) > 0) { + switch (ssl_verify_peer_cert(hs)) { + case ssl_verify_ok: + break; + case ssl_verify_invalid: + return ssl_hs_error; + case ssl_verify_retry: + return ssl_hs_certificate_verify; + } + } + + hs->state = state_read_client_key_exchange; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_key_exchange(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_KEY_EXCHANGE)) { + return ssl_hs_error; + } + + CBS client_key_exchange = msg.body; + uint32_t alg_k = hs->new_cipher->algorithm_mkey; + uint32_t alg_a = hs->new_cipher->algorithm_auth; + + // If using a PSK key exchange, parse the PSK identity. + if (alg_a & SSL_aPSK) { + CBS psk_identity; + + // If using PSK, the ClientKeyExchange contains a psk_identity. If PSK, + // then this is the only field in the message. + if (!CBS_get_u16_length_prefixed(&client_key_exchange, &psk_identity) || + ((alg_k & SSL_kPSK) && CBS_len(&client_key_exchange) != 0)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (CBS_len(&psk_identity) > PSK_MAX_IDENTITY_LEN || + CBS_contains_zero_byte(&psk_identity)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + if (!CBS_strdup(&psk_identity, &hs->new_session->psk_identity)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + // Depending on the key exchange method, compute |premaster_secret|. + Array premaster_secret; + if (alg_k & SSL_kRSA) { + CBS encrypted_premaster_secret; + if (ssl->version > SSL3_VERSION) { + if (!CBS_get_u16_length_prefixed(&client_key_exchange, + &encrypted_premaster_secret) || + CBS_len(&client_key_exchange) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + } else { + encrypted_premaster_secret = client_key_exchange; + } + + // Allocate a buffer large enough for an RSA decryption. + Array decrypt_buf; + if (!decrypt_buf.Init(EVP_PKEY_size(hs->local_pubkey.get()))) { + return ssl_hs_error; + } + + // Decrypt with no padding. PKCS#1 padding will be removed as part of the + // timing-sensitive code below. + size_t decrypt_len; + switch (ssl_private_key_decrypt(hs, decrypt_buf.data(), &decrypt_len, + decrypt_buf.size(), + encrypted_premaster_secret)) { + case ssl_private_key_success: + break; + case ssl_private_key_failure: + return ssl_hs_error; + case ssl_private_key_retry: + return ssl_hs_private_key_operation; + } + + if (decrypt_len != decrypt_buf.size()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + return ssl_hs_error; + } + + // Prepare a random premaster, to be used on invalid padding. See RFC 5246, + // section 7.4.7.1. + if (!premaster_secret.Init(SSL_MAX_MASTER_KEY_LENGTH) || + !RAND_bytes(premaster_secret.data(), premaster_secret.size())) { + return ssl_hs_error; + } + + // The smallest padded premaster is 11 bytes of overhead. Small keys are + // publicly invalid. + if (decrypt_len < 11 + premaster_secret.size()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + return ssl_hs_error; + } + + // Check the padding. See RFC 3447, section 7.2.2. + size_t padding_len = decrypt_len - premaster_secret.size(); + uint8_t good = constant_time_eq_int_8(decrypt_buf[0], 0) & + constant_time_eq_int_8(decrypt_buf[1], 2); + for (size_t i = 2; i < padding_len - 1; i++) { + good &= ~constant_time_is_zero_8(decrypt_buf[i]); + } + good &= constant_time_is_zero_8(decrypt_buf[padding_len - 1]); + + // The premaster secret must begin with |client_version|. This too must be + // checked in constant time (http://eprint.iacr.org/2003/052/). + good &= constant_time_eq_8(decrypt_buf[padding_len], + (unsigned)(hs->client_version >> 8)); + good &= constant_time_eq_8(decrypt_buf[padding_len + 1], + (unsigned)(hs->client_version & 0xff)); + + // Select, in constant time, either the decrypted premaster or the random + // premaster based on |good|. + for (size_t i = 0; i < premaster_secret.size(); i++) { + premaster_secret[i] = constant_time_select_8( + good, decrypt_buf[padding_len + i], premaster_secret[i]); + } + } else if (alg_k & SSL_kECDHE) { + // Parse the ClientKeyExchange. + CBS peer_key; + if (!CBS_get_u8_length_prefixed(&client_key_exchange, &peer_key) || + CBS_len(&client_key_exchange) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // Compute the premaster. + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!hs->key_share->Finish(&premaster_secret, &alert, peer_key)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + // The key exchange state may now be discarded. + hs->key_share.reset(); + } else if (!(alg_k & SSL_kPSK)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + // For a PSK cipher suite, the actual pre-master secret is combined with the + // pre-shared key. + if (alg_a & SSL_aPSK) { + if (ssl->psk_server_callback == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + // Look up the key for the identity. + uint8_t psk[PSK_MAX_PSK_LEN]; + unsigned psk_len = ssl->psk_server_callback( + ssl, hs->new_session->psk_identity, psk, sizeof(psk)); + if (psk_len > PSK_MAX_PSK_LEN) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } else if (psk_len == 0) { + // PSK related to the given identity not found. + OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNKNOWN_PSK_IDENTITY); + return ssl_hs_error; + } + + if (alg_k & SSL_kPSK) { + // In plain PSK, other_secret is a block of 0s with the same length as the + // pre-shared key. + if (!premaster_secret.Init(psk_len)) { + return ssl_hs_error; + } + OPENSSL_memset(premaster_secret.data(), 0, premaster_secret.size()); + } + + ScopedCBB new_premaster; + CBB child; + if (!CBB_init(new_premaster.get(), + 2 + psk_len + 2 + premaster_secret.size()) || + !CBB_add_u16_length_prefixed(new_premaster.get(), &child) || + !CBB_add_bytes(&child, premaster_secret.data(), + premaster_secret.size()) || + !CBB_add_u16_length_prefixed(new_premaster.get(), &child) || + !CBB_add_bytes(&child, psk, psk_len) || + !CBBFinishArray(new_premaster.get(), &premaster_secret)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_hs_error; + } + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + // Compute the master secret. + hs->new_session->master_key_length = tls1_generate_master_secret( + hs, hs->new_session->master_key, premaster_secret); + if (hs->new_session->master_key_length == 0) { + return ssl_hs_error; + } + hs->new_session->extended_master_secret = hs->extended_master_secret; + + ssl->method->next_message(ssl); + hs->state = state_read_client_certificate_verify; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_certificate_verify(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // Only RSA and ECDSA client certificates are supported, so a + // CertificateVerify is required if and only if there's a client certificate. + if (!hs->peer_pubkey) { + hs->transcript.FreeBuffer(); + hs->state = state_read_change_cipher_spec; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY)) { + return ssl_hs_error; + } + + CBS certificate_verify = msg.body, signature; + + // Determine the signature algorithm. + uint16_t signature_algorithm = 0; + if (ssl_protocol_version(ssl) >= TLS1_2_VERSION) { + if (!CBS_get_u16(&certificate_verify, &signature_algorithm)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!tls12_check_peer_sigalg(ssl, &alert, signature_algorithm)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + hs->new_session->peer_signature_algorithm = signature_algorithm; + } else if (!tls1_get_legacy_signature_algorithm(&signature_algorithm, + hs->peer_pubkey.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_ERROR_UNSUPPORTED_CERTIFICATE_TYPE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_CERTIFICATE); + return ssl_hs_error; + } + + // Parse and verify the signature. + if (!CBS_get_u16_length_prefixed(&certificate_verify, &signature) || + CBS_len(&certificate_verify) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + bool sig_ok; + // The SSL3 construction for CertificateVerify does not decompose into a + // single final digest and signature, and must be special-cased. + if (ssl_protocol_version(ssl) == SSL3_VERSION) { + uint8_t digest[EVP_MAX_MD_SIZE]; + size_t digest_len; + if (!hs->transcript.GetSSL3CertVerifyHash( + digest, &digest_len, hs->new_session.get(), signature_algorithm)) { + return ssl_hs_error; + } + + UniquePtr pctx( + EVP_PKEY_CTX_new(hs->peer_pubkey.get(), nullptr)); + sig_ok = pctx && + EVP_PKEY_verify_init(pctx.get()) && + EVP_PKEY_verify(pctx.get(), CBS_data(&signature), + CBS_len(&signature), digest, digest_len); + } else { + sig_ok = + ssl_public_key_verify(ssl, signature, signature_algorithm, + hs->peer_pubkey.get(), hs->transcript.buffer()); + } + +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + sig_ok = true; + ERR_clear_error(); +#endif + if (!sig_ok) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + return ssl_hs_error; + } + + // The handshake buffer is no longer necessary, and we may hash the current + // message. + hs->transcript.FreeBuffer(); + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->state = state_read_change_cipher_spec; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_change_cipher_spec(SSL_HANDSHAKE *hs) { + hs->state = state_process_change_cipher_spec; + return ssl_hs_read_change_cipher_spec; +} + +static enum ssl_hs_wait_t do_process_change_cipher_spec(SSL_HANDSHAKE *hs) { + if (!tls1_change_cipher_state(hs, evp_aead_open)) { + return ssl_hs_error; + } + + hs->state = state_read_next_proto; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_next_proto(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!hs->next_proto_neg_seen) { + hs->state = state_read_channel_id; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_NEXT_PROTO) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + CBS next_protocol = msg.body, selected_protocol, padding; + if (!CBS_get_u8_length_prefixed(&next_protocol, &selected_protocol) || + !CBS_get_u8_length_prefixed(&next_protocol, &padding) || + CBS_len(&next_protocol) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (!ssl->s3->next_proto_negotiated.CopyFrom(selected_protocol)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->state = state_read_channel_id; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_channel_id(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (!ssl->s3->tlsext_channel_id_valid) { + hs->state = state_read_client_finished; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CHANNEL_ID) || + !tls1_verify_channel_id(hs, msg) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->state = state_read_client_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + enum ssl_hs_wait_t wait = ssl_get_finished(hs); + if (wait != ssl_hs_ok) { + return wait; + } + + if (ssl->session != NULL) { + hs->state = state_finish_server_handshake; + } else { + hs->state = state_send_server_finished; + } + + // If this is a full handshake with ChannelID then record the handshake + // hashes in |hs->new_session| in case we need them to verify a + // ChannelID signature on a resumption of this session in the future. + if (ssl->session == NULL && ssl->s3->tlsext_channel_id_valid && + !tls1_record_handshake_hashes_for_channel_id(hs)) { + return ssl_hs_error; + } + + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (hs->ticket_expected) { + const SSL_SESSION *session; + UniquePtr session_copy; + if (ssl->session == NULL) { + // Fix the timeout to measure from the ticket issuance time. + ssl_session_rebase_time(ssl, hs->new_session.get()); + session = hs->new_session.get(); + } else { + // We are renewing an existing session. Duplicate the session to adjust + // the timeout. + session_copy = SSL_SESSION_dup(ssl->session, SSL_SESSION_INCLUDE_NONAUTH); + if (!session_copy) { + return ssl_hs_error; + } + + ssl_session_rebase_time(ssl, session_copy.get()); + session = session_copy.get(); + } + + ScopedCBB cbb; + CBB body, ticket; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_NEW_SESSION_TICKET) || + !CBB_add_u32(&body, session->timeout) || + !CBB_add_u16_length_prefixed(&body, &ticket) || + !ssl_encrypt_ticket(ssl, &ticket, session) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } + + if (!ssl->method->add_change_cipher_spec(ssl) || + !tls1_change_cipher_state(hs, evp_aead_seal) || + !ssl_send_finished(hs)) { + return ssl_hs_error; + } + + if (ssl->session != NULL) { + hs->state = state_read_change_cipher_spec; + } else { + hs->state = state_finish_server_handshake; + } + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_finish_server_handshake(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + ssl->method->on_handshake_complete(ssl); + + // If we aren't retaining peer certificates then we can discard it now. + if (hs->new_session != NULL && ssl->retain_only_sha256_of_client_certs) { + sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); + hs->new_session->certs = NULL; + ssl->ctx->x509_method->session_clear(hs->new_session.get()); + } + + if (ssl->session != NULL) { + SSL_SESSION_up_ref(ssl->session); + ssl->s3->established_session.reset(ssl->session); + } else { + ssl->s3->established_session = std::move(hs->new_session); + ssl->s3->established_session->not_resumable = 0; + } + + hs->handshake_finalized = true; + ssl->s3->initial_handshake_complete = true; + ssl_update_cache(hs, SSL_SESS_CACHE_SERVER); + + hs->state = state_done; + return ssl_hs_ok; +} + +enum ssl_hs_wait_t ssl_server_handshake(SSL_HANDSHAKE *hs) { + while (hs->state != state_done) { + enum ssl_hs_wait_t ret = ssl_hs_error; + enum ssl_server_hs_state_t state = + static_cast(hs->state); + switch (state) { + case state_start_accept: + ret = do_start_accept(hs); + break; + case state_read_client_hello: + ret = do_read_client_hello(hs); + break; + case state_select_certificate: + ret = do_select_certificate(hs); + break; + case state_tls13: + ret = do_tls13(hs); + break; + case state_select_parameters: + ret = do_select_parameters(hs); + break; + case state_send_server_hello: + ret = do_send_server_hello(hs); + break; + case state_send_server_certificate: + ret = do_send_server_certificate(hs); + break; + case state_send_server_key_exchange: + ret = do_send_server_key_exchange(hs); + break; + case state_send_server_hello_done: + ret = do_send_server_hello_done(hs); + break; + case state_read_client_certificate: + ret = do_read_client_certificate(hs); + break; + case state_verify_client_certificate: + ret = do_verify_client_certificate(hs); + break; + case state_read_client_key_exchange: + ret = do_read_client_key_exchange(hs); + break; + case state_read_client_certificate_verify: + ret = do_read_client_certificate_verify(hs); + break; + case state_read_change_cipher_spec: + ret = do_read_change_cipher_spec(hs); + break; + case state_process_change_cipher_spec: + ret = do_process_change_cipher_spec(hs); + break; + case state_read_next_proto: + ret = do_read_next_proto(hs); + break; + case state_read_channel_id: + ret = do_read_channel_id(hs); + break; + case state_read_client_finished: + ret = do_read_client_finished(hs); + break; + case state_send_server_finished: + ret = do_send_server_finished(hs); + break; + case state_finish_server_handshake: + ret = do_finish_server_handshake(hs); + break; + case state_done: + ret = ssl_hs_ok; + break; + } + + if (hs->state != state) { + ssl_do_info_callback(hs->ssl, SSL_CB_ACCEPT_LOOP, 1); + } + + if (ret != ssl_hs_ok) { + return ret; + } + } + + ssl_do_info_callback(hs->ssl, SSL_CB_HANDSHAKE_DONE, 1); + return ssl_hs_ok; +} + +const char *ssl_server_handshake_state(SSL_HANDSHAKE *hs) { + enum ssl_server_hs_state_t state = + static_cast(hs->state); + switch (state) { + case state_start_accept: + return "TLS server start_accept"; + case state_read_client_hello: + return "TLS server read_client_hello"; + case state_select_certificate: + return "TLS server select_certificate"; + case state_tls13: + return tls13_server_handshake_state(hs); + case state_select_parameters: + return "TLS server select_parameters"; + case state_send_server_hello: + return "TLS server send_server_hello"; + case state_send_server_certificate: + return "TLS server send_server_certificate"; + case state_send_server_key_exchange: + return "TLS server send_server_key_exchange"; + case state_send_server_hello_done: + return "TLS server send_server_hello_done"; + case state_read_client_certificate: + return "TLS server read_client_certificate"; + case state_verify_client_certificate: + return "TLS server verify_client_certificate"; + case state_read_client_key_exchange: + return "TLS server read_client_key_exchange"; + case state_read_client_certificate_verify: + return "TLS server read_client_certificate_verify"; + case state_read_change_cipher_spec: + return "TLS server read_change_cipher_spec"; + case state_process_change_cipher_spec: + return "TLS server process_change_cipher_spec"; + case state_read_next_proto: + return "TLS server read_next_proto"; + case state_read_channel_id: + return "TLS server read_channel_id"; + case state_read_client_finished: + return "TLS server read_client_finished"; + case state_send_server_finished: + return "TLS server send_server_finished"; + case state_finish_server_handshake: + return "TLS server finish_server_handshake"; + case state_done: + return "TLS server done"; + } + + return "TLS server unknown"; +} + +} diff --git a/Sources/BoringSSL/ssl/internal.h b/Sources/BoringSSL/ssl/internal.h index a6dfad51d..6c1438399 100644 --- a/Sources/BoringSSL/ssl/internal.h +++ b/Sources/BoringSSL/ssl/internal.h @@ -144,13 +144,27 @@ #include +#include + +#include +#include +#include +#include + #include +#include +#include +#include #include +#include #include +#include "../crypto/err/internal.h" +#include "../crypto/internal.h" + #if defined(OPENSSL_WINDOWS) -/* Windows defines struct timeval in winsock2.h. */ +// Windows defines struct timeval in winsock2.h. OPENSSL_MSVC_PRAGMA(warning(push, 3)) #include OPENSSL_MSVC_PRAGMA(warning(pop)) @@ -158,286 +172,612 @@ OPENSSL_MSVC_PRAGMA(warning(pop)) #include #endif -#if defined(__cplusplus) -extern "C" { + +namespace bssl { + +struct SSL_HANDSHAKE; +struct SSL_PROTOCOL_METHOD; + +// C++ utilities. + +// New behaves like |new| but uses |OPENSSL_malloc| for memory allocation. It +// returns nullptr on allocation error. It only implements single-object +// allocation and not new T[n]. +// +// Note: unlike |new|, this does not support non-public constructors. +template +T *New(Args &&... args) { + void *t = OPENSSL_malloc(sizeof(T)); + if (t == nullptr) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return nullptr; + } + return new (t) T(std::forward(args)...); +} + +// Delete behaves like |delete| but uses |OPENSSL_free| to release memory. +// +// Note: unlike |delete| this does not support non-public destructors. +template +void Delete(T *t) { + if (t != nullptr) { + t->~T(); + OPENSSL_free(t); + } +} + +// All types with kAllowUniquePtr set may be used with UniquePtr. Other types +// may be C structs which require a |BORINGSSL_MAKE_DELETER| registration. +namespace internal { +template +struct DeleterImpl::type> { + static void Free(T *t) { Delete(t); } +}; +} + +// MakeUnique behaves like |std::make_unique| but returns nullptr on allocation +// error. +template +UniquePtr MakeUnique(Args &&... args) { + return UniquePtr(New(std::forward(args)...)); +} + +#if defined(BORINGSSL_ALLOW_CXX_RUNTIME) +#define HAS_VIRTUAL_DESTRUCTOR +#define PURE_VIRTUAL = 0 +#else +// HAS_VIRTUAL_DESTRUCTOR should be declared in any base class which defines a +// virtual destructor. This avoids a dependency on |_ZdlPv| and prevents the +// class from being used with |delete|. +#define HAS_VIRTUAL_DESTRUCTOR \ + void operator delete(void *) { abort(); } + +// PURE_VIRTUAL should be used instead of = 0 when defining pure-virtual +// functions. This avoids a dependency on |__cxa_pure_virtual| but loses +// compile-time checking. +#define PURE_VIRTUAL { abort(); } #endif +// CONSTEXPR_ARRAY works around a VS 2015 bug where ranged for loops don't work +// on constexpr arrays. +#if defined(_MSC_VER) && !defined(__clang__) && _MSC_VER < 1910 +#define CONSTEXPR_ARRAY const +#else +#define CONSTEXPR_ARRAY constexpr +#endif -/* Cipher suites. */ +// Array is an owning array of elements of |T|. +template +class Array { + public: + // Array's default constructor creates an empty array. + Array() {} + Array(const Array &) = delete; + Array(Array &&other) { *this = std::move(other); } + + ~Array() { Reset(); } + + Array &operator=(const Array &) = delete; + Array &operator=(Array &&other) { + Reset(); + other.Release(&data_, &size_); + return *this; + } + + const T *data() const { return data_; } + T *data() { return data_; } + size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + + const T &operator[](size_t i) const { return data_[i]; } + T &operator[](size_t i) { return data_[i]; } + + T *begin() { return data_; } + const T *cbegin() const { return data_; } + T *end() { return data_ + size_; } + const T *cend() const { return data_ + size_; } + + void Reset() { Reset(nullptr, 0); } + + // Reset releases the current contents of the array and takes ownership of the + // raw pointer supplied by the caller. + void Reset(T *new_data, size_t new_size) { + for (size_t i = 0; i < size_; i++) { + data_[i].~T(); + } + OPENSSL_free(data_); + data_ = new_data; + size_ = new_size; + } + + // Release releases ownership of the array to a raw pointer supplied by the + // caller. + void Release(T **out, size_t *out_size) { + *out = data_; + *out_size = size_; + data_ = nullptr; + size_ = 0; + } + + // Init replaces the array with a newly-allocated array of |new_size| + // default-constructed copies of |T|. It returns true on success and false on + // error. + // + // Note that if |T| is a primitive type like |uint8_t|, it is uninitialized. + bool Init(size_t new_size) { + Reset(); + if (new_size == 0) { + return true; + } + + if (new_size > std::numeric_limits::max() / sizeof(T)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return false; + } + data_ = reinterpret_cast(OPENSSL_malloc(new_size * sizeof(T))); + if (data_ == nullptr) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + size_ = new_size; + for (size_t i = 0; i < size_; i++) { + new (&data_[i]) T; + } + return true; + } + + // CopyFrom replaces the array with a newly-allocated copy of |in|. It returns + // true on success and false on error. + bool CopyFrom(Span in) { + if (!Init(in.size())) { + return false; + } + OPENSSL_memcpy(data_, in.data(), in.size()); + return true; + } + + private: + T *data_ = nullptr; + size_t size_ = 0; +}; + +// CBBFinishArray behaves like |CBB_finish| but stores the result in an Array. +bool CBBFinishArray(CBB *cbb, Array *out); + + +// Protocol versions. +// +// Due to DTLS's historical wire version differences and to support multiple +// variants of the same protocol during development, we maintain two notions of +// version. +// +// The "version" or "wire version" is the actual 16-bit value that appears on +// the wire. It uniquely identifies a version and is also used at API +// boundaries. The set of supported versions differs between TLS and DTLS. Wire +// versions are opaque values and may not be compared numerically. +// +// The "protocol version" identifies the high-level handshake variant being +// used. DTLS versions map to the corresponding TLS versions. Draft TLS 1.3 +// variants all map to TLS 1.3. Protocol versions are sequential and may be +// compared numerically. + +// ssl_protocol_version_from_wire sets |*out| to the protocol version +// corresponding to wire version |version| and returns true. If |version| is not +// a valid TLS or DTLS version, it returns false. +// +// Note this simultaneously handles both DTLS and TLS. Use one of the +// higher-level functions below for most operations. +bool ssl_protocol_version_from_wire(uint16_t *out, uint16_t version); + +// ssl_get_version_range sets |*out_min_version| and |*out_max_version| to the +// minimum and maximum enabled protocol versions, respectively. +bool ssl_get_version_range(const SSL *ssl, uint16_t *out_min_version, + uint16_t *out_max_version); + +// ssl_supports_version returns whether |hs| supports |version|. +bool ssl_supports_version(SSL_HANDSHAKE *hs, uint16_t version); + +// ssl_add_supported_versions writes the supported versions of |hs| to |cbb|, in +// decreasing preference order. +bool ssl_add_supported_versions(SSL_HANDSHAKE *hs, CBB *cbb); + +// ssl_negotiate_version negotiates a common version based on |hs|'s preferences +// and the peer preference list in |peer_versions|. On success, it returns true +// and sets |*out_version| to the selected version. Otherwise, it returns false +// and sets |*out_alert| to an alert to send. +bool ssl_negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, + uint16_t *out_version, const CBS *peer_versions); + +// ssl_protocol_version returns |ssl|'s protocol version. It is an error to +// call this function before the version is determined. +uint16_t ssl_protocol_version(const SSL *ssl); + +// ssl_is_draft21 returns whether the version corresponds to a draft21 TLS 1.3 +// variant. +bool ssl_is_draft21(uint16_t version); + +// ssl_is_draft22 returns whether the version corresponds to a draft22 TLS 1.3 +// variant. +bool ssl_is_draft22(uint16_t version); + +// ssl_is_resumption_experiment returns whether the version corresponds to a +// TLS 1.3 resumption experiment. +bool ssl_is_resumption_experiment(uint16_t version); + +// ssl_is_resumption_variant returns whether the version corresponds to a +// TLS 1.3 resumption experiment. +bool ssl_is_resumption_variant(enum tls13_variant_t variant); + +// ssl_is_resumption_client_ccs_experiment returns whether the version +// corresponds to a TLS 1.3 resumption experiment that sends a client CCS. +bool ssl_is_resumption_client_ccs_experiment(uint16_t version); + +// ssl_is_resumption_record_version_experiment returns whether the version +// corresponds to a TLS 1.3 resumption experiment that modifies the record +// version. +bool ssl_is_resumption_record_version_experiment(uint16_t version); + + +// Cipher suites. + +} // namespace bssl + +struct ssl_cipher_st { + // name is the OpenSSL name for the cipher. + const char *name; + // standard_name is the IETF name for the cipher. + const char *standard_name; + // id is the cipher suite value bitwise OR-d with 0x03000000. + uint32_t id; + + // algorithm_* determine the cipher suite. See constants below for the values. + uint32_t algorithm_mkey; + uint32_t algorithm_auth; + uint32_t algorithm_enc; + uint32_t algorithm_mac; + uint32_t algorithm_prf; +}; + +namespace bssl { -/* Bits for |algorithm_mkey| (key exchange algorithm). */ -#define SSL_kRSA 0x00000001L -#define SSL_kDHE 0x00000002L -#define SSL_kECDHE 0x00000004L -/* SSL_kPSK is only set for plain PSK, not ECDHE_PSK. */ -#define SSL_kPSK 0x00000008L -#define SSL_kGENERIC 0x00000010L +// Bits for |algorithm_mkey| (key exchange algorithm). +#define SSL_kRSA 0x00000001u +#define SSL_kECDHE 0x00000002u +// SSL_kPSK is only set for plain PSK, not ECDHE_PSK. +#define SSL_kPSK 0x00000004u +#define SSL_kGENERIC 0x00000008u -/* Bits for |algorithm_auth| (server authentication). */ -#define SSL_aRSA 0x00000001L -#define SSL_aECDSA 0x00000002L -/* SSL_aPSK is set for both PSK and ECDHE_PSK. */ -#define SSL_aPSK 0x00000004L -#define SSL_aGENERIC 0x00000008L +// Bits for |algorithm_auth| (server authentication). +#define SSL_aRSA 0x00000001u +#define SSL_aECDSA 0x00000002u +// SSL_aPSK is set for both PSK and ECDHE_PSK. +#define SSL_aPSK 0x00000004u +#define SSL_aGENERIC 0x00000008u #define SSL_aCERT (SSL_aRSA | SSL_aECDSA) -/* Bits for |algorithm_enc| (symmetric encryption). */ -#define SSL_3DES 0x00000001L -#define SSL_AES128 0x00000002L -#define SSL_AES256 0x00000004L -#define SSL_AES128GCM 0x00000008L -#define SSL_AES256GCM 0x00000010L -#define SSL_eNULL 0x00000020L -#define SSL_CHACHA20POLY1305 0x00000040L +// Bits for |algorithm_enc| (symmetric encryption). +#define SSL_3DES 0x00000001u +#define SSL_AES128 0x00000002u +#define SSL_AES256 0x00000004u +#define SSL_AES128GCM 0x00000008u +#define SSL_AES256GCM 0x00000010u +#define SSL_eNULL 0x00000020u +#define SSL_CHACHA20POLY1305 0x00000040u #define SSL_AES (SSL_AES128 | SSL_AES256 | SSL_AES128GCM | SSL_AES256GCM) -/* Bits for |algorithm_mac| (symmetric authentication). */ -#define SSL_SHA1 0x00000001L -#define SSL_SHA256 0x00000002L -#define SSL_SHA384 0x00000004L -/* SSL_AEAD is set for all AEADs. */ -#define SSL_AEAD 0x00000008L +// Bits for |algorithm_mac| (symmetric authentication). +#define SSL_SHA1 0x00000001u +#define SSL_SHA256 0x00000002u +#define SSL_SHA384 0x00000004u +// SSL_AEAD is set for all AEADs. +#define SSL_AEAD 0x00000008u -/* Bits for |algorithm_prf| (handshake digest). */ +// Bits for |algorithm_prf| (handshake digest). #define SSL_HANDSHAKE_MAC_DEFAULT 0x1 #define SSL_HANDSHAKE_MAC_SHA256 0x2 #define SSL_HANDSHAKE_MAC_SHA384 0x4 -/* SSL_MAX_DIGEST is the number of digest types which exist. When adding a new - * one, update the table in ssl_cipher.c. */ +// SSL_MAX_DIGEST is the number of digest types which exist. When adding a new +// one, update the table in ssl_cipher.c. #define SSL_MAX_DIGEST 4 -/* ssl_cipher_get_evp_aead sets |*out_aead| to point to the correct EVP_AEAD - * object for |cipher| protocol version |version|. It sets |*out_mac_secret_len| - * and |*out_fixed_iv_len| to the MAC key length and fixed IV length, - * respectively. The MAC key length is zero except for legacy block and stream - * ciphers. It returns 1 on success and 0 on error. */ -int ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, - size_t *out_mac_secret_len, - size_t *out_fixed_iv_len, - const SSL_CIPHER *cipher, uint16_t version); - -/* ssl_get_handshake_digest returns the |EVP_MD| corresponding to - * |algorithm_prf| and the |version|. */ -const EVP_MD *ssl_get_handshake_digest(uint32_t algorithm_prf, - uint16_t version); - -/* ssl_create_cipher_list evaluates |rule_str| according to the ciphers in - * |ssl_method|. It sets |*out_cipher_list| to a newly-allocated - * |ssl_cipher_preference_list_st| containing the result. It returns - * |(*out_cipher_list)->ciphers| on success and NULL on failure. If |strict| is - * true, nonsense will be rejected. If false, nonsense will be silently - * ignored. */ -STACK_OF(SSL_CIPHER) * -ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, - struct ssl_cipher_preference_list_st **out_cipher_list, - const char *rule_str, int strict); - -/* ssl_cipher_get_value returns the cipher suite id of |cipher|. */ +// ssl_cipher_get_evp_aead sets |*out_aead| to point to the correct EVP_AEAD +// object for |cipher| protocol version |version|. It sets |*out_mac_secret_len| +// and |*out_fixed_iv_len| to the MAC key length and fixed IV length, +// respectively. The MAC key length is zero except for legacy block and stream +// ciphers. It returns true on success and false on error. +bool ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, + size_t *out_mac_secret_len, + size_t *out_fixed_iv_len, const SSL_CIPHER *cipher, + uint16_t version, int is_dtls); + +// ssl_get_handshake_digest returns the |EVP_MD| corresponding to |version| and +// |cipher|. +const EVP_MD *ssl_get_handshake_digest(uint16_t version, + const SSL_CIPHER *cipher); + +// ssl_create_cipher_list evaluates |rule_str|. It sets |*out_cipher_list| to a +// newly-allocated |ssl_cipher_preference_list_st| containing the result. It +// returns true on success and false on failure. If |strict| is true, nonsense +// will be rejected. If false, nonsense will be silently ignored. An empty +// result is considered an error regardless of |strict|. +bool ssl_create_cipher_list( + struct ssl_cipher_preference_list_st **out_cipher_list, + const char *rule_str, bool strict); + +// ssl_cipher_get_value returns the cipher suite id of |cipher|. uint16_t ssl_cipher_get_value(const SSL_CIPHER *cipher); -/* ssl_cipher_get_key_type returns the |EVP_PKEY_*| value corresponding to the - * server key used in |cipher| or |EVP_PKEY_NONE| if there is none. */ -int ssl_cipher_get_key_type(const SSL_CIPHER *cipher); +// ssl_cipher_auth_mask_for_key returns the mask of cipher |algorithm_auth| +// values suitable for use with |key| in TLS 1.2 and below. +uint32_t ssl_cipher_auth_mask_for_key(const EVP_PKEY *key); -/* ssl_cipher_uses_certificate_auth returns one if |cipher| authenticates the - * server and, optionally, the client with a certificate. Otherwise it returns - * zero. */ -int ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher); +// ssl_cipher_uses_certificate_auth returns whether |cipher| authenticates the +// server and, optionally, the client with a certificate. +bool ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher); -/* ssl_cipher_requires_server_key_exchange returns 1 if |cipher| requires a - * ServerKeyExchange message. Otherwise it returns 0. - * - * This function may return zero while still allowing |cipher| an optional - * ServerKeyExchange. This is the case for plain PSK ciphers. */ -int ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher); +// ssl_cipher_requires_server_key_exchange returns whether |cipher| requires a +// ServerKeyExchange message. +// +// This function may return false while still allowing |cipher| an optional +// ServerKeyExchange. This is the case for plain PSK ciphers. +bool ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher); -/* ssl_cipher_get_record_split_len, for TLS 1.0 CBC mode ciphers, returns the - * length of an encrypted 1-byte record, for use in record-splitting. Otherwise - * it returns zero. */ +// ssl_cipher_get_record_split_len, for TLS 1.0 CBC mode ciphers, returns the +// length of an encrypted 1-byte record, for use in record-splitting. Otherwise +// it returns zero. size_t ssl_cipher_get_record_split_len(const SSL_CIPHER *cipher); -/* Transcript layer. */ - -/* SSL_TRANSCRIPT maintains the handshake transcript as a combination of a - * buffer and running hash. */ -typedef struct ssl_transcript_st { - /* buffer, if non-NULL, contains the handshake transcript. */ - BUF_MEM *buffer; - /* hash, if initialized with an |EVP_MD|, maintains the handshake hash. For - * TLS 1.1 and below, it is the SHA-1 half. */ - EVP_MD_CTX hash; - /* md5, if initialized with an |EVP_MD|, maintains the MD5 half of the - * handshake hash for TLS 1.1 and below. */ - EVP_MD_CTX md5; -} SSL_TRANSCRIPT; - -/* SSL_TRANSCRIPT_init initializes the handshake transcript. If called on an - * existing transcript, it resets the transcript and hash. It returns one on - * success and zero on failure. */ -int SSL_TRANSCRIPT_init(SSL_TRANSCRIPT *transcript); - -/* SSL_TRANSCRIPT_init_hash initializes the handshake hash based on the PRF and - * contents of the handshake transcript. Subsequent calls to - * |SSL_TRANSCRIPT_update| will update the rolling hash. It returns one on - * success and zero on failure. It is an error to call this function after the - * handshake buffer is released. */ -int SSL_TRANSCRIPT_init_hash(SSL_TRANSCRIPT *transcript, uint16_t version, - int algorithm_prf); - -/* SSL_TRANSCRIPT_cleanup cleans up the hash and transcript. */ -void SSL_TRANSCRIPT_cleanup(SSL_TRANSCRIPT *transcript); - -/* SSL_TRANSCRIPT_free_buffer releases the handshake buffer. Subsequent calls to - * |SSL_TRANSCRIPT_update| will not update the handshake buffer. */ -void SSL_TRANSCRIPT_free_buffer(SSL_TRANSCRIPT *transcript); - -/* SSL_TRANSCRIPT_digest_len returns the length of the PRF hash. */ -size_t SSL_TRANSCRIPT_digest_len(const SSL_TRANSCRIPT *transcript); - -/* SSL_TRANSCRIPT_md returns the PRF hash. For TLS 1.1 and below, this is - * |EVP_md5_sha1|. */ -const EVP_MD *SSL_TRANSCRIPT_md(const SSL_TRANSCRIPT *transcript); - -/* SSL_TRANSCRIPT_update adds |in| to the handshake buffer and handshake hash, - * whichever is enabled. It returns one on success and zero on failure. */ -int SSL_TRANSCRIPT_update(SSL_TRANSCRIPT *transcript, const uint8_t *in, - size_t in_len); - -/* SSL_TRANSCRIPT_get_hash writes the handshake hash to |out| which must have - * room for at least |SSL_TRANSCRIPT_digest_len| bytes. On success, it returns - * one and sets |*out_len| to the number of bytes written. Otherwise, it returns - * zero. */ -int SSL_TRANSCRIPT_get_hash(const SSL_TRANSCRIPT *transcript, uint8_t *out, - size_t *out_len); - -/* SSL_TRANSCRIPT_ssl3_cert_verify_hash writes the SSL 3.0 CertificateVerify - * hash into the bytes pointed to by |out| and writes the number of bytes to - * |*out_len|. |out| must have room for |EVP_MAX_MD_SIZE| bytes. It returns one - * on success and zero on failure. */ -int SSL_TRANSCRIPT_ssl3_cert_verify_hash(SSL_TRANSCRIPT *transcript, - uint8_t *out, size_t *out_len, - const SSL_SESSION *session, - int signature_algorithm); - -/* SSL_TRANSCRIPT_finish_mac computes the MAC for the Finished message into the - * bytes pointed by |out| and writes the number of bytes to |*out_len|. |out| - * must have room for |EVP_MAX_MD_SIZE| bytes. It returns one on success and - * zero on failure. */ -int SSL_TRANSCRIPT_finish_mac(SSL_TRANSCRIPT *transcript, uint8_t *out, - size_t *out_len, const SSL_SESSION *session, - int from_server, uint16_t version); - -/* tls1_prf computes the PRF function for |ssl|. It writes |out_len| bytes to - * |out|, using |secret| as the secret and |label| as the label. |seed1| and - * |seed2| are concatenated to form the seed parameter. It returns one on - * success and zero on failure. */ -int tls1_prf(const EVP_MD *digest, uint8_t *out, size_t out_len, - const uint8_t *secret, size_t secret_len, const char *label, - size_t label_len, const uint8_t *seed1, size_t seed1_len, - const uint8_t *seed2, size_t seed2_len); - - -/* Encryption layer. */ - -/* SSL_AEAD_CTX contains information about an AEAD that is being used to encrypt - * an SSL connection. */ -typedef struct ssl_aead_ctx_st { - const SSL_CIPHER *cipher; - EVP_AEAD_CTX ctx; - /* fixed_nonce contains any bytes of the nonce that are fixed for all - * records. */ - uint8_t fixed_nonce[12]; - uint8_t fixed_nonce_len, variable_nonce_len; - /* variable_nonce_included_in_record is non-zero if the variable nonce - * for a record is included as a prefix before the ciphertext. */ - unsigned variable_nonce_included_in_record : 1; - /* random_variable_nonce is non-zero if the variable nonce is - * randomly generated, rather than derived from the sequence - * number. */ - unsigned random_variable_nonce : 1; - /* omit_length_in_ad is non-zero if the length should be omitted in the - * AEAD's ad parameter. */ - unsigned omit_length_in_ad : 1; - /* omit_version_in_ad is non-zero if the version should be omitted - * in the AEAD's ad parameter. */ - unsigned omit_version_in_ad : 1; - /* omit_ad is non-zero if the AEAD's ad parameter should be omitted. */ - unsigned omit_ad : 1; - /* xor_fixed_nonce is non-zero if the fixed nonce should be XOR'd into the - * variable nonce rather than prepended. */ - unsigned xor_fixed_nonce : 1; -} SSL_AEAD_CTX; - -/* SSL_AEAD_CTX_new creates a newly-allocated |SSL_AEAD_CTX| using the supplied - * key material. It returns NULL on error. Only one of |SSL_AEAD_CTX_open| or - * |SSL_AEAD_CTX_seal| may be used with the resulting object, depending on - * |direction|. |version| is the normalized protocol version, so DTLS 1.0 is - * represented as 0x0301, not 0xffef. */ -SSL_AEAD_CTX *SSL_AEAD_CTX_new(enum evp_aead_direction_t direction, - uint16_t version, const SSL_CIPHER *cipher, - const uint8_t *enc_key, size_t enc_key_len, - const uint8_t *mac_key, size_t mac_key_len, - const uint8_t *fixed_iv, size_t fixed_iv_len); - -/* SSL_AEAD_CTX_free frees |ctx|. */ -void SSL_AEAD_CTX_free(SSL_AEAD_CTX *ctx); - -/* SSL_AEAD_CTX_explicit_nonce_len returns the length of the explicit nonce for - * |ctx|, if any. |ctx| may be NULL to denote the null cipher. */ -size_t SSL_AEAD_CTX_explicit_nonce_len(const SSL_AEAD_CTX *ctx); - -/* SSL_AEAD_CTX_max_overhead returns the maximum overhead of calling - * |SSL_AEAD_CTX_seal|. |ctx| may be NULL to denote the null cipher. */ -size_t SSL_AEAD_CTX_max_overhead(const SSL_AEAD_CTX *ctx); - -/* SSL_AEAD_CTX_open authenticates and decrypts |in_len| bytes from |in| - * in-place. On success, it sets |*out| to the plaintext in |in| and returns - * one. Otherwise, it returns zero. |ctx| may be NULL to denote the null cipher. - * The output will always be |explicit_nonce_len| bytes ahead of |in|. */ -int SSL_AEAD_CTX_open(SSL_AEAD_CTX *ctx, CBS *out, uint8_t type, - uint16_t wire_version, const uint8_t seqnum[8], - uint8_t *in, size_t in_len); - -/* SSL_AEAD_CTX_seal encrypts and authenticates |in_len| bytes from |in| and - * writes the result to |out|. It returns one on success and zero on - * error. |ctx| may be NULL to denote the null cipher. - * - * If |in| and |out| alias then |out| + |explicit_nonce_len| must be == |in|. */ -int SSL_AEAD_CTX_seal(SSL_AEAD_CTX *ctx, uint8_t *out, size_t *out_len, - size_t max_out, uint8_t type, uint16_t wire_version, - const uint8_t seqnum[8], const uint8_t *in, - size_t in_len); +// Transcript layer. + +// SSLTranscript maintains the handshake transcript as a combination of a +// buffer and running hash. +class SSLTranscript { + public: + SSLTranscript(); + ~SSLTranscript(); + + // Init initializes the handshake transcript. If called on an existing + // transcript, it resets the transcript and hash. It returns true on success + // and false on failure. + bool Init(); + + // InitHash initializes the handshake hash based on the PRF and contents of + // the handshake transcript. Subsequent calls to |Update| will update the + // rolling hash. It returns one on success and zero on failure. It is an error + // to call this function after the handshake buffer is released. + bool InitHash(uint16_t version, const SSL_CIPHER *cipher); + + // UpdateForHelloRetryRequest resets the rolling hash with the + // HelloRetryRequest construction. It returns true on success and false on + // failure. It is an error to call this function before the handshake buffer + // is released. + bool UpdateForHelloRetryRequest(); + + // CopyHashContext copies the hash context into |ctx| and returns true on + // success. + bool CopyHashContext(EVP_MD_CTX *ctx); + + Span buffer() { + return MakeConstSpan(reinterpret_cast(buffer_->data), + buffer_->length); + } + + // FreeBuffer releases the handshake buffer. Subsequent calls to + // |Update| will not update the handshake buffer. + void FreeBuffer(); + + // DigestLen returns the length of the PRF hash. + size_t DigestLen() const; + + // Digest returns the PRF hash. For TLS 1.1 and below, this is + // |EVP_md5_sha1|. + const EVP_MD *Digest() const; + + // Update adds |in| to the handshake buffer and handshake hash, whichever is + // enabled. It returns true on success and false on failure. + bool Update(Span in); + + // GetHash writes the handshake hash to |out| which must have room for at + // least |DigestLen| bytes. On success, it returns true and sets |*out_len| to + // the number of bytes written. Otherwise, it returns false. + bool GetHash(uint8_t *out, size_t *out_len); + + // GetSSL3CertVerifyHash writes the SSL 3.0 CertificateVerify hash into the + // bytes pointed to by |out| and writes the number of bytes to + // |*out_len|. |out| must have room for |EVP_MAX_MD_SIZE| bytes. It returns + // one on success and zero on failure. + bool GetSSL3CertVerifyHash(uint8_t *out, size_t *out_len, + const SSL_SESSION *session, + uint16_t signature_algorithm); + + // GetFinishedMAC computes the MAC for the Finished message into the bytes + // pointed by |out| and writes the number of bytes to |*out_len|. |out| must + // have room for |EVP_MAX_MD_SIZE| bytes. It returns true on success and false + // on failure. + bool GetFinishedMAC(uint8_t *out, size_t *out_len, const SSL_SESSION *session, + bool from_server); + + private: + // buffer_, if non-null, contains the handshake transcript. + UniquePtr buffer_; + // hash, if initialized with an |EVP_MD|, maintains the handshake hash. For + // TLS 1.1 and below, it is the SHA-1 half. + ScopedEVP_MD_CTX hash_; + // md5, if initialized with an |EVP_MD|, maintains the MD5 half of the + // handshake hash for TLS 1.1 and below. + ScopedEVP_MD_CTX md5_; +}; + +// tls1_prf computes the PRF function for |ssl|. It fills |out|, using |secret| +// as the secret and |label| as the label. |seed1| and |seed2| are concatenated +// to form the seed parameter. It returns true on success and false on failure. +bool tls1_prf(const EVP_MD *digest, Span out, + Span secret, Span label, + Span seed1, Span seed2); + + +// Encryption layer. + +// SSLAEADContext contains information about an AEAD that is being used to +// encrypt an SSL connection. +class SSLAEADContext { + public: + SSLAEADContext(uint16_t version, bool is_dtls, const SSL_CIPHER *cipher); + ~SSLAEADContext(); + static constexpr bool kAllowUniquePtr = true; + + SSLAEADContext(const SSLAEADContext &&) = delete; + SSLAEADContext &operator=(const SSLAEADContext &&) = delete; + + // CreateNullCipher creates an |SSLAEADContext| for the null cipher. + static UniquePtr CreateNullCipher(bool is_dtls); + + // Create creates an |SSLAEADContext| using the supplied key material. It + // returns nullptr on error. Only one of |Open| or |Seal| may be used with the + // resulting object, depending on |direction|. |version| is the normalized + // protocol version, so DTLS 1.0 is represented as 0x0301, not 0xffef. + static UniquePtr Create(enum evp_aead_direction_t direction, + uint16_t version, int is_dtls, + const SSL_CIPHER *cipher, + Span enc_key, + Span mac_key, + Span fixed_iv); + + // SetVersionIfNullCipher sets the version the SSLAEADContext for the null + // cipher, to make version-specific determinations in the record layer prior + // to a cipher being selected. + void SetVersionIfNullCipher(uint16_t version); + + // ProtocolVersion returns the protocol version associated with this + // SSLAEADContext. It can only be called once |version_| has been set to a + // valid value. + uint16_t ProtocolVersion() const; + + // RecordVersion returns the record version that should be used with this + // SSLAEADContext for record construction and crypto. + uint16_t RecordVersion() const; + + const SSL_CIPHER *cipher() const { return cipher_; } + + // is_null_cipher returns true if this is the null cipher. + bool is_null_cipher() const { return !cipher_; } + + // ExplicitNonceLen returns the length of the explicit nonce. + size_t ExplicitNonceLen() const; + + // MaxOverhead returns the maximum overhead of calling |Seal|. + size_t MaxOverhead() const; + + // SuffixLen calculates the suffix length written by |SealScatter| and writes + // it to |*out_suffix_len|. It returns true on success and false on error. + // |in_len| and |extra_in_len| should equal the argument of the same names + // passed to |SealScatter|. + bool SuffixLen(size_t *out_suffix_len, size_t in_len, + size_t extra_in_len) const; + + // Open authenticates and decrypts |in| in-place. On success, it sets |*out| + // to the plaintext in |in| and returns true. Otherwise, it returns + // false. The output will always be |ExplicitNonceLen| bytes ahead of |in|. + bool Open(Span *out, uint8_t type, uint16_t record_version, + const uint8_t seqnum[8], Span in); + + // Seal encrypts and authenticates |in_len| bytes from |in| and writes the + // result to |out|. It returns true on success and false on error. + // + // If |in| and |out| alias then |out| + |ExplicitNonceLen| must be == |in|. + bool Seal(uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, + uint16_t record_version, const uint8_t seqnum[8], const uint8_t *in, + size_t in_len); + + // SealScatter encrypts and authenticates |in_len| bytes from |in| and splits + // the result between |out_prefix|, |out| and |out_suffix|. It returns one on + // success and zero on error. + // + // On successful return, exactly |ExplicitNonceLen| bytes are written to + // |out_prefix|, |in_len| bytes to |out|, and |SuffixLen| bytes to + // |out_suffix|. + // + // |extra_in| may point to an additional plaintext buffer. If present, + // |extra_in_len| additional bytes are encrypted and authenticated, and the + // ciphertext is written to the beginning of |out_suffix|. |SuffixLen| should + // be used to size |out_suffix| accordingly. + // + // If |in| and |out| alias then |out| must be == |in|. Other arguments may not + // alias anything. + bool SealScatter(uint8_t *out_prefix, uint8_t *out, uint8_t *out_suffix, + uint8_t type, uint16_t record_version, + const uint8_t seqnum[8], const uint8_t *in, size_t in_len, + const uint8_t *extra_in, size_t extra_in_len); + + bool GetIV(const uint8_t **out_iv, size_t *out_iv_len) const; + + private: + // GetAdditionalData writes the additional data into |out| and returns the + // number of bytes written. + size_t GetAdditionalData(uint8_t out[13], uint8_t type, + uint16_t record_version, const uint8_t seqnum[8], + size_t plaintext_len); + + const SSL_CIPHER *cipher_; + ScopedEVP_AEAD_CTX ctx_; + // fixed_nonce_ contains any bytes of the nonce that are fixed for all + // records. + uint8_t fixed_nonce_[12]; + uint8_t fixed_nonce_len_ = 0, variable_nonce_len_ = 0; + // version_ is the wire version that should be used with this AEAD. + uint16_t version_; + // is_dtls_ is whether DTLS is being used with this AEAD. + bool is_dtls_; + // variable_nonce_included_in_record_ is true if the variable nonce + // for a record is included as a prefix before the ciphertext. + bool variable_nonce_included_in_record_ : 1; + // random_variable_nonce_ is true if the variable nonce is + // randomly generated, rather than derived from the sequence + // number. + bool random_variable_nonce_ : 1; + // omit_length_in_ad_ is true if the length should be omitted in the + // AEAD's ad parameter. + bool omit_length_in_ad_ : 1; + // omit_version_in_ad_ is true if the version should be omitted + // in the AEAD's ad parameter. + bool omit_version_in_ad_ : 1; + // omit_ad_ is true if the AEAD's ad parameter should be omitted. + bool omit_ad_ : 1; + // xor_fixed_nonce_ is true if the fixed nonce should be XOR'd into the + // variable nonce rather than prepended. + bool xor_fixed_nonce_ : 1; +}; -/* DTLS replay bitmap. */ +// DTLS replay bitmap. -/* DTLS1_BITMAP maintains a sliding window of 64 sequence numbers to detect - * replayed packets. It should be initialized by zeroing every field. */ -typedef struct dtls1_bitmap_st { - /* map is a bit mask of the last 64 sequence numbers. Bit - * |1< *out, size_t *out_consumed, + uint8_t *out_alert, Span in); + +// dtls_open_record implements |tls_open_record| for DTLS. It only returns +// |ssl_open_record_partial| if |in| was empty and sets |*out_consumed| to +// zero. The caller should read one packet and try again. +enum ssl_open_record_t dtls_open_record(SSL *ssl, uint8_t *out_type, + Span *out, size_t *out_consumed, - uint8_t *out_alert, uint8_t *in, - size_t in_len); - -/* ssl_seal_align_prefix_len returns the length of the prefix before the start - * of the bulk of the ciphertext when sealing a record with |ssl|. Callers may - * use this to align buffers. - * - * Note when TLS 1.0 CBC record-splitting is enabled, this includes the one byte - * record and is the offset into second record's ciphertext. Thus sealing a - * small record may result in a smaller output than this value. - * - * TODO(davidben): Is this alignment valuable? Record-splitting makes this a - * mess. */ + uint8_t *out_alert, Span in); + +// ssl_seal_align_prefix_len returns the length of the prefix before the start +// of the bulk of the ciphertext when sealing a record with |ssl|. Callers may +// use this to align buffers. +// +// Note when TLS 1.0 CBC record-splitting is enabled, this includes the one byte +// record and is the offset into second record's ciphertext. Thus sealing a +// small record may result in a smaller output than this value. +// +// TODO(davidben): Is this alignment valuable? Record-splitting makes this a +// mess. size_t ssl_seal_align_prefix_len(const SSL *ssl); -/* tls_seal_record seals a new record of type |type| and body |in| and writes it - * to |out|. At most |max_out| bytes will be written. It returns one on success - * and zero on error. If enabled, |tls_seal_record| implements TLS 1.0 CBC 1/n-1 - * record splitting and may write two records concatenated. - * - * For a large record, the bulk of the ciphertext will begin - * |ssl_seal_align_prefix_len| bytes into out. Aligning |out| appropriately may - * improve performance. It writes at most |in_len| + |SSL_max_seal_overhead| - * bytes to |out|. - * - * |in| and |out| may not alias. */ +// tls_seal_record seals a new record of type |type| and body |in| and writes it +// to |out|. At most |max_out| bytes will be written. It returns one on success +// and zero on error. If enabled, |tls_seal_record| implements TLS 1.0 CBC 1/n-1 +// record splitting and may write two records concatenated. +// +// For a large record, the bulk of the ciphertext will begin +// |ssl_seal_align_prefix_len| bytes into out. Aligning |out| appropriately may +// improve performance. It writes at most |in_len| + |SSL_max_seal_overhead| +// bytes to |out|. +// +// |in| and |out| may not alias. int tls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, const uint8_t *in, size_t in_len); @@ -514,87 +854,83 @@ enum dtls1_use_epoch_t { dtls1_use_current_epoch, }; -/* dtls_max_seal_overhead returns the maximum overhead, in bytes, of sealing a - * record. */ +// dtls_max_seal_overhead returns the maximum overhead, in bytes, of sealing a +// record. size_t dtls_max_seal_overhead(const SSL *ssl, enum dtls1_use_epoch_t use_epoch); -/* dtls_seal_prefix_len returns the number of bytes of prefix to reserve in - * front of the plaintext when sealing a record in-place. */ +// dtls_seal_prefix_len returns the number of bytes of prefix to reserve in +// front of the plaintext when sealing a record in-place. size_t dtls_seal_prefix_len(const SSL *ssl, enum dtls1_use_epoch_t use_epoch); -/* dtls_seal_record implements |tls_seal_record| for DTLS. |use_epoch| selects - * which epoch's cipher state to use. Unlike |tls_seal_record|, |in| and |out| - * may alias but, if they do, |in| must be exactly |dtls_seal_prefix_len| bytes - * ahead of |out|. */ +// dtls_seal_record implements |tls_seal_record| for DTLS. |use_epoch| selects +// which epoch's cipher state to use. Unlike |tls_seal_record|, |in| and |out| +// may alias but, if they do, |in| must be exactly |dtls_seal_prefix_len| bytes +// ahead of |out|. int dtls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, uint8_t type, const uint8_t *in, size_t in_len, enum dtls1_use_epoch_t use_epoch); -/* ssl_process_alert processes |in| as an alert and updates |ssl|'s shutdown - * state. It returns one of |ssl_open_record_discard|, |ssl_open_record_error|, - * |ssl_open_record_close_notify|, or |ssl_open_record_fatal_alert| as - * appropriate. */ +// ssl_process_alert processes |in| as an alert and updates |ssl|'s shutdown +// state. It returns one of |ssl_open_record_discard|, |ssl_open_record_error|, +// |ssl_open_record_close_notify|, or |ssl_open_record_fatal_alert| as +// appropriate. enum ssl_open_record_t ssl_process_alert(SSL *ssl, uint8_t *out_alert, - const uint8_t *in, size_t in_len); + Span in); -/* Private key operations. */ +// Private key operations. -/* ssl_has_private_key returns one if |ssl| has a private key - * configured and zero otherwise. */ +// ssl_has_private_key returns one if |ssl| has a private key +// configured and zero otherwise. int ssl_has_private_key(const SSL *ssl); -/* ssl_is_ecdsa_key_type returns one if |type| is an ECDSA key type and zero - * otherwise. */ -int ssl_is_ecdsa_key_type(int type); - -/* ssl_private_key_* call the corresponding function on the - * |SSL_PRIVATE_KEY_METHOD| for |ssl|, if configured. Otherwise, they implement - * the operation with |EVP_PKEY|. */ - -int ssl_private_key_type(SSL *ssl); - -size_t ssl_private_key_max_signature_len(SSL *ssl); +// ssl_private_key_* perform the corresponding operation on +// |SSL_PRIVATE_KEY_METHOD|. If there is a custom private key configured, they +// call the corresponding function or |complete| depending on whether there is a +// pending operation. Otherwise, they implement the operation with +// |EVP_PKEY|. enum ssl_private_key_result_t ssl_private_key_sign( - SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, - uint16_t signature_algorithm, const uint8_t *in, size_t in_len); - -enum ssl_private_key_result_t ssl_private_key_decrypt( - SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, - const uint8_t *in, size_t in_len); + SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, + uint16_t sigalg, Span in); -enum ssl_private_key_result_t ssl_private_key_complete(SSL *ssl, uint8_t *out, - size_t *out_len, - size_t max_out); +enum ssl_private_key_result_t ssl_private_key_decrypt(SSL_HANDSHAKE *hs, + uint8_t *out, + size_t *out_len, + size_t max_out, + Span in); -/* ssl_private_key_supports_signature_algorithm returns one if |ssl|'s private - * key supports |signature_algorithm| and zero otherwise. */ -int ssl_private_key_supports_signature_algorithm(SSL *ssl, - uint16_t signature_algorithm); +// ssl_private_key_supports_signature_algorithm returns whether |hs|'s private +// key supports |sigalg|. +bool ssl_private_key_supports_signature_algorithm(SSL_HANDSHAKE *hs, + uint16_t sigalg); -/* ssl_public_key_verify verifies that the |signature| is valid for the public - * key |pkey| and input |in|, using the |signature_algorithm| specified. */ -int ssl_public_key_verify( - SSL *ssl, const uint8_t *signature, size_t signature_len, - uint16_t signature_algorithm, EVP_PKEY *pkey, - const uint8_t *in, size_t in_len); +// ssl_public_key_verify verifies that the |signature| is valid for the public +// key |pkey| and input |in|, using the signature algorithm |sigalg|. +bool ssl_public_key_verify(SSL *ssl, Span signature, + uint16_t sigalg, EVP_PKEY *pkey, + Span in); -/* Custom extensions */ +// Custom extensions -typedef struct ssl_handshake_st SSL_HANDSHAKE; +} // namespace bssl -/* ssl_custom_extension (a.k.a. SSL_CUSTOM_EXTENSION) is a structure that - * contains information about custom-extension callbacks. */ -struct ssl_custom_extension { +// |SSL_CUSTOM_EXTENSION| is a structure that contains information about +// custom-extension callbacks. It is defined unnamespaced for compatibility with +// |STACK_OF(SSL_CUSTOM_EXTENSION)|. +typedef struct ssl_custom_extension { SSL_custom_ext_add_cb add_callback; void *add_arg; SSL_custom_ext_free_cb free_callback; SSL_custom_ext_parse_cb parse_callback; void *parse_arg; uint16_t value; -}; +} SSL_CUSTOM_EXTENSION; + +DEFINE_STACK_OF(SSL_CUSTOM_EXTENSION) + +namespace bssl { void SSL_CUSTOM_EXTENSION_free(SSL_CUSTOM_EXTENSION *custom_extension); @@ -606,624 +942,711 @@ int custom_ext_parse_clienthello(SSL_HANDSHAKE *hs, int *out_alert, int custom_ext_add_serverhello(SSL_HANDSHAKE *hs, CBB *extensions); -/* ECDH groups. */ - -typedef struct ssl_ecdh_ctx_st SSL_ECDH_CTX; - -/* An SSL_ECDH_METHOD is an implementation of ECDH-like key exchanges for - * TLS. */ -typedef struct ssl_ecdh_method_st { - int nid; - uint16_t group_id; - const char name[8]; - - /* cleanup releases state in |ctx|. */ - void (*cleanup)(SSL_ECDH_CTX *ctx); - - /* offer generates a keypair and writes the public value to - * |out_public_key|. It returns one on success and zero on error. */ - int (*offer)(SSL_ECDH_CTX *ctx, CBB *out_public_key); - - /* accept performs a key exchange against the |peer_key| generated by |offer|. - * On success, it returns one, writes the public value to |out_public_key|, - * and sets |*out_secret| and |*out_secret_len| to a newly-allocated buffer - * containing the shared secret. The caller must release this buffer with - * |OPENSSL_free|. On failure, it returns zero and sets |*out_alert| to an - * alert to send to the peer. */ - int (*accept)(SSL_ECDH_CTX *ctx, CBB *out_public_key, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len); - - /* finish performs a key exchange against the |peer_key| generated by - * |accept|. On success, it returns one and sets |*out_secret| and - * |*out_secret_len| to a newly-allocated buffer containing the shared - * secret. The caller must release this buffer with |OPENSSL_free|. On - * failure, it returns zero and sets |*out_alert| to an alert to send to the - * peer. */ - int (*finish)(SSL_ECDH_CTX *ctx, uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len); - - /* get_key initializes |out| with a length-prefixed key from |cbs|. It returns - * one on success and zero on error. */ - int (*get_key)(CBS *cbs, CBS *out); - - /* add_key initializes |out_contents| to receive a key. Typically it will then - * be passed to |offer| or |accept|. It returns one on success and zero on - * error. */ - int (*add_key)(CBB *cbb, CBB *out_contents); -} SSL_ECDH_METHOD; - -struct ssl_ecdh_ctx_st { - const SSL_ECDH_METHOD *method; - void *data; +// Key shares. + +// SSLKeyShare abstracts over Diffie-Hellman-like key exchanges. +class SSLKeyShare { + public: + virtual ~SSLKeyShare() {} + static constexpr bool kAllowUniquePtr = true; + HAS_VIRTUAL_DESTRUCTOR + + // Create returns a SSLKeyShare instance for use with group |group_id| or + // nullptr on error. + static UniquePtr Create(uint16_t group_id); + + // GroupID returns the group ID. + virtual uint16_t GroupID() const PURE_VIRTUAL; + + // Offer generates a keypair and writes the public value to + // |out_public_key|. It returns true on success and false on error. + virtual bool Offer(CBB *out_public_key) PURE_VIRTUAL; + + // Accept performs a key exchange against the |peer_key| generated by |offer|. + // On success, it returns true, writes the public value to |out_public_key|, + // and sets |*out_secret| the shared secret. On failure, it returns false and + // sets |*out_alert| to an alert to send to the peer. + // + // The default implementation calls |Offer| and then |Finish|, assuming a key + // exchange protocol where the peers are symmetric. + virtual bool Accept(CBB *out_public_key, Array *out_secret, + uint8_t *out_alert, Span peer_key); + + // Finish performs a key exchange against the |peer_key| generated by + // |Accept|. On success, it returns true and sets |*out_secret| to the shared + // secret. On failure, it returns zero and sets |*out_alert| to an alert to + // send to the peer. + virtual bool Finish(Array *out_secret, uint8_t *out_alert, + Span peer_key) PURE_VIRTUAL; }; -/* ssl_nid_to_group_id looks up the group corresponding to |nid|. On success, it - * sets |*out_group_id| to the group ID and returns one. Otherwise, it returns - * zero. */ +// ssl_nid_to_group_id looks up the group corresponding to |nid|. On success, it +// sets |*out_group_id| to the group ID and returns one. Otherwise, it returns +// zero. int ssl_nid_to_group_id(uint16_t *out_group_id, int nid); -/* ssl_name_to_group_id looks up the group corresponding to the |name| string - * of length |len|. On success, it sets |*out_group_id| to the group ID and - * returns one. Otherwise, it returns zero. */ +// ssl_name_to_group_id looks up the group corresponding to the |name| string +// of length |len|. On success, it sets |*out_group_id| to the group ID and +// returns one. Otherwise, it returns zero. int ssl_name_to_group_id(uint16_t *out_group_id, const char *name, size_t len); -/* SSL_ECDH_CTX_init sets up |ctx| for use with curve |group_id|. It returns one - * on success and zero on error. */ -int SSL_ECDH_CTX_init(SSL_ECDH_CTX *ctx, uint16_t group_id); - -/* SSL_ECDH_CTX_init_for_dhe sets up |ctx| for use with legacy DHE-based ciphers - * where the server specifies a group. It takes ownership of |params|. */ -void SSL_ECDH_CTX_init_for_dhe(SSL_ECDH_CTX *ctx, DH *params); -/* SSL_ECDH_CTX_cleanup releases memory associated with |ctx|. It is legal to - * call it in the zero state. */ -void SSL_ECDH_CTX_cleanup(SSL_ECDH_CTX *ctx); +// Handshake messages. -/* SSL_ECDH_CTX_get_id returns the group ID for |ctx|. */ -uint16_t SSL_ECDH_CTX_get_id(const SSL_ECDH_CTX *ctx); - -/* SSL_ECDH_CTX_get_key calls the |get_key| method of |SSL_ECDH_METHOD|. */ -int SSL_ECDH_CTX_get_key(SSL_ECDH_CTX *ctx, CBS *cbs, CBS *out); - -/* SSL_ECDH_CTX_add_key calls the |add_key| method of |SSL_ECDH_METHOD|. */ -int SSL_ECDH_CTX_add_key(SSL_ECDH_CTX *ctx, CBB *cbb, CBB *out_contents); +struct SSLMessage { + bool is_v2_hello; + uint8_t type; + CBS body; + // raw is the entire serialized handshake message, including the TLS or DTLS + // message header. + CBS raw; +}; -/* SSL_ECDH_CTX_offer calls the |offer| method of |SSL_ECDH_METHOD|. */ -int SSL_ECDH_CTX_offer(SSL_ECDH_CTX *ctx, CBB *out_public_key); +// SSL_MAX_HANDSHAKE_FLIGHT is the number of messages, including +// ChangeCipherSpec, in the longest handshake flight. Currently this is the +// client's second leg in a full handshake when client certificates, NPN, and +// Channel ID, are all enabled. +#define SSL_MAX_HANDSHAKE_FLIGHT 7 -/* SSL_ECDH_CTX_accept calls the |accept| method of |SSL_ECDH_METHOD|. */ -int SSL_ECDH_CTX_accept(SSL_ECDH_CTX *ctx, CBB *out_public_key, - uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len); +extern const uint8_t kHelloRetryRequest[SSL3_RANDOM_SIZE]; -/* SSL_ECDH_CTX_finish the |finish| method of |SSL_ECDH_METHOD|. */ -int SSL_ECDH_CTX_finish(SSL_ECDH_CTX *ctx, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len); +// ssl_max_handshake_message_len returns the maximum number of bytes permitted +// in a handshake message for |ssl|. +size_t ssl_max_handshake_message_len(const SSL *ssl); -/* Handshake messages. */ +// tls_can_accept_handshake_data returns whether |ssl| is able to accept more +// data into handshake buffer. +bool tls_can_accept_handshake_data(const SSL *ssl, uint8_t *out_alert); -/* SSL_MAX_HANDSHAKE_FLIGHT is the number of messages, including - * ChangeCipherSpec, in the longest handshake flight. Currently this is the - * client's second leg in a full handshake when client certificates, NPN, and - * Channel ID, are all enabled. */ -#define SSL_MAX_HANDSHAKE_FLIGHT 7 +// tls_has_unprocessed_handshake_data returns whether there is buffered +// handshake data that has not been consumed by |get_message|. +bool tls_has_unprocessed_handshake_data(const SSL *ssl); -/* ssl_max_handshake_message_len returns the maximum number of bytes permitted - * in a handshake message for |ssl|. */ -size_t ssl_max_handshake_message_len(const SSL *ssl); +// dtls_has_unprocessed_handshake_data behaves like +// |tls_has_unprocessed_handshake_data| for DTLS. +bool dtls_has_unprocessed_handshake_data(const SSL *ssl); -/* dtls_clear_incoming_messages releases all buffered incoming messages. */ -void dtls_clear_incoming_messages(SSL *ssl); +struct DTLS_OUTGOING_MESSAGE { + DTLS_OUTGOING_MESSAGE() {} + DTLS_OUTGOING_MESSAGE(const DTLS_OUTGOING_MESSAGE &) = delete; + DTLS_OUTGOING_MESSAGE &operator=(const DTLS_OUTGOING_MESSAGE &) = delete; + ~DTLS_OUTGOING_MESSAGE() { Clear(); } -/* dtls_has_incoming_messages returns one if there are buffered incoming - * messages ahead of the current message and zero otherwise. */ -int dtls_has_incoming_messages(const SSL *ssl); + void Clear(); -typedef struct dtls_outgoing_message_st { - uint8_t *data; - uint32_t len; - uint16_t epoch; - char is_ccs; -} DTLS_OUTGOING_MESSAGE; + uint8_t *data = nullptr; + uint32_t len = 0; + uint16_t epoch = 0; + bool is_ccs = false; +}; -/* dtls_clear_outgoing_messages releases all buffered outgoing messages. */ +// dtls_clear_outgoing_messages releases all buffered outgoing messages. void dtls_clear_outgoing_messages(SSL *ssl); -/* Callbacks. */ +// Callbacks. -/* ssl_do_info_callback calls |ssl|'s info callback, if set. */ +// ssl_do_info_callback calls |ssl|'s info callback, if set. void ssl_do_info_callback(const SSL *ssl, int type, int value); -/* ssl_do_msg_callback calls |ssl|'s message callback, if set. */ +// ssl_do_msg_callback calls |ssl|'s message callback, if set. void ssl_do_msg_callback(SSL *ssl, int is_write, int content_type, - const void *buf, size_t len); + Span in); -/* Transport buffers. */ +// Transport buffers. -/* ssl_read_buffer returns a pointer to contents of the read buffer. */ -uint8_t *ssl_read_buffer(SSL *ssl); +class SSLBuffer { + public: + SSLBuffer() {} + ~SSLBuffer() { Clear(); } -/* ssl_read_buffer_len returns the length of the read buffer. */ -size_t ssl_read_buffer_len(const SSL *ssl); + SSLBuffer(const SSLBuffer &) = delete; + SSLBuffer &operator=(const SSLBuffer &) = delete; -/* ssl_read_buffer_extend_to extends the read buffer to the desired length. For - * TLS, it reads to the end of the buffer until the buffer is |len| bytes - * long. For DTLS, it reads a new packet and ignores |len|. It returns one on - * success, zero on EOF, and a negative number on error. - * - * It is an error to call |ssl_read_buffer_extend_to| in DTLS when the buffer is - * non-empty. */ + uint8_t *data() { return buf_ + offset_; } + size_t size() const { return size_; } + bool empty() const { return size_ == 0; } + size_t cap() const { return cap_; } + + Span span() { return MakeSpan(data(), size()); } + + Span remaining() { + return MakeSpan(data() + size(), cap() - size()); + } + + // Clear releases the buffer. + void Clear(); + + // EnsureCap ensures the buffer has capacity at least |new_cap|, aligned such + // that data written after |header_len| is aligned to a + // |SSL3_ALIGN_PAYLOAD|-byte boundary. It returns true on success and false + // on error. + bool EnsureCap(size_t header_len, size_t new_cap); + + // DidWrite extends the buffer by |len|. The caller must have filled in to + // this point. + void DidWrite(size_t len); + + // Consume consumes |len| bytes from the front of the buffer. The memory + // consumed will remain valid until the next call to |DiscardConsumed| or + // |Clear|. + void Consume(size_t len); + + // DiscardConsumed discards the consumed bytes from the buffer. If the buffer + // is now empty, it releases memory used by it. + void DiscardConsumed(); + + private: + // buf_ is the memory allocated for this buffer. + uint8_t *buf_ = nullptr; + // offset_ is the offset into |buf_| which the buffer contents start at. + uint16_t offset_ = 0; + // size_ is the size of the buffer contents from |buf_| + |offset_|. + uint16_t size_ = 0; + // cap_ is how much memory beyond |buf_| + |offset_| is available. + uint16_t cap_ = 0; +}; + +// ssl_read_buffer_extend_to extends the read buffer to the desired length. For +// TLS, it reads to the end of the buffer until the buffer is |len| bytes +// long. For DTLS, it reads a new packet and ignores |len|. It returns one on +// success, zero on EOF, and a negative number on error. +// +// It is an error to call |ssl_read_buffer_extend_to| in DTLS when the buffer is +// non-empty. int ssl_read_buffer_extend_to(SSL *ssl, size_t len); -/* ssl_read_buffer_consume consumes |len| bytes from the read buffer. It - * advances the data pointer and decrements the length. The memory consumed will - * remain valid until the next call to |ssl_read_buffer_extend| or it is - * discarded with |ssl_read_buffer_discard|. */ -void ssl_read_buffer_consume(SSL *ssl, size_t len); - -/* ssl_read_buffer_discard discards the consumed bytes from the read buffer. If - * the buffer is now empty, it releases memory used by it. */ -void ssl_read_buffer_discard(SSL *ssl); - -/* ssl_read_buffer_clear releases all memory associated with the read buffer and - * zero-initializes it. */ -void ssl_read_buffer_clear(SSL *ssl); - -/* ssl_write_buffer_is_pending returns one if the write buffer has pending data - * and zero if is empty. */ -int ssl_write_buffer_is_pending(const SSL *ssl); - -/* ssl_write_buffer_init initializes the write buffer. On success, it sets - * |*out_ptr| to the start of the write buffer with space for up to |max_len| - * bytes. It returns one on success and zero on failure. Call - * |ssl_write_buffer_set_len| to complete initialization. */ -int ssl_write_buffer_init(SSL *ssl, uint8_t **out_ptr, size_t max_len); - -/* ssl_write_buffer_set_len is called after |ssl_write_buffer_init| to complete - * initialization after |len| bytes are written to the buffer. */ -void ssl_write_buffer_set_len(SSL *ssl, size_t len); - -/* ssl_write_buffer_flush flushes the write buffer to the transport. It returns - * one on success and <= 0 on error. For DTLS, whether or not the write - * succeeds, the write buffer will be cleared. */ -int ssl_write_buffer_flush(SSL *ssl); +// ssl_handle_open_record handles the result of passing |ssl->s3->read_buffer| +// to a record-processing function. If |ret| is a success or if the caller +// should retry, it returns one and sets |*out_retry|. Otherwise, it returns <= +// 0. +int ssl_handle_open_record(SSL *ssl, bool *out_retry, ssl_open_record_t ret, + size_t consumed, uint8_t alert); -/* ssl_write_buffer_clear releases all memory associated with the write buffer - * and zero-initializes it. */ -void ssl_write_buffer_clear(SSL *ssl); +// ssl_write_buffer_flush flushes the write buffer to the transport. It returns +// one on success and <= 0 on error. For DTLS, whether or not the write +// succeeds, the write buffer will be cleared. +int ssl_write_buffer_flush(SSL *ssl); -/* Certificate functions. */ +// Certificate functions. -/* ssl_has_certificate returns one if a certificate and private key are - * configured and zero otherwise. */ +// ssl_has_certificate returns one if a certificate and private key are +// configured and zero otherwise. int ssl_has_certificate(const SSL *ssl); -/* ssl_parse_cert_chain parses a certificate list from |cbs| in the format used - * by a TLS Certificate message. On success, it returns a newly-allocated - * |CRYPTO_BUFFER| list and advances |cbs|. Otherwise, it returns NULL and sets - * |*out_alert| to an alert to send to the peer. - * - * If the list is non-empty then |*out_pubkey| will be set to a freshly - * allocated public-key from the leaf certificate. - * - * If the list is non-empty and |out_leaf_sha256| is non-NULL, it writes the - * SHA-256 hash of the leaf to |out_leaf_sha256|. */ -STACK_OF(CRYPTO_BUFFER) *ssl_parse_cert_chain(uint8_t *out_alert, - EVP_PKEY **out_pubkey, - uint8_t *out_leaf_sha256, - CBS *cbs, - CRYPTO_BUFFER_POOL *pool); - -/* ssl_add_cert_chain adds |ssl|'s certificate chain to |cbb| in the format used - * by a TLS Certificate message. If there is no certificate chain, it emits an - * empty certificate list. It returns one on success and zero on error. */ +// ssl_parse_cert_chain parses a certificate list from |cbs| in the format used +// by a TLS Certificate message. On success, it advances |cbs| and returns +// true. Otherwise, it returns false and sets |*out_alert| to an alert to send +// to the peer. +// +// If the list is non-empty then |*out_chain| and |*out_pubkey| will be set to +// the certificate chain and the leaf certificate's public key +// respectively. Otherwise, both will be set to nullptr. +// +// If the list is non-empty and |out_leaf_sha256| is non-NULL, it writes the +// SHA-256 hash of the leaf to |out_leaf_sha256|. +bool ssl_parse_cert_chain(uint8_t *out_alert, + UniquePtr *out_chain, + UniquePtr *out_pubkey, + uint8_t *out_leaf_sha256, CBS *cbs, + CRYPTO_BUFFER_POOL *pool); + +// ssl_add_cert_chain adds |ssl|'s certificate chain to |cbb| in the format used +// by a TLS Certificate message. If there is no certificate chain, it emits an +// empty certificate list. It returns one on success and zero on error. int ssl_add_cert_chain(SSL *ssl, CBB *cbb); -/* ssl_auto_chain_if_needed runs the deprecated auto-chaining logic if - * necessary. On success, it updates |ssl|'s certificate configuration as needed - * and returns one. Otherwise, it returns zero. */ -int ssl_auto_chain_if_needed(SSL *ssl); - -/* ssl_cert_check_digital_signature_key_usage parses the DER-encoded, X.509 - * certificate in |in| and returns one if doesn't specify a key usage or, if it - * does, if it includes digitalSignature. Otherwise it pushes to the error - * queue and returns zero. */ +// ssl_cert_check_digital_signature_key_usage parses the DER-encoded, X.509 +// certificate in |in| and returns one if doesn't specify a key usage or, if it +// does, if it includes digitalSignature. Otherwise it pushes to the error +// queue and returns zero. int ssl_cert_check_digital_signature_key_usage(const CBS *in); -/* ssl_cert_parse_pubkey extracts the public key from the DER-encoded, X.509 - * certificate in |in|. It returns an allocated |EVP_PKEY| or else returns NULL - * and pushes to the error queue. */ -EVP_PKEY *ssl_cert_parse_pubkey(const CBS *in); - -/* ssl_parse_client_CA_list parses a CA list from |cbs| in the format used by a - * TLS CertificateRequest message. On success, it returns a newly-allocated - * |X509_NAME| list and advances |cbs|. Otherwise, it returns NULL and sets - * |*out_alert| to an alert to send to the peer. */ -STACK_OF(X509_NAME) * - ssl_parse_client_CA_list(SSL *ssl, uint8_t *out_alert, CBS *cbs); - -/* ssl_add_client_CA_list adds the configured CA list to |cbb| in the format - * used by a TLS CertificateRequest message. It returns one on success and zero - * on error. */ +// ssl_cert_parse_pubkey extracts the public key from the DER-encoded, X.509 +// certificate in |in|. It returns an allocated |EVP_PKEY| or else returns +// nullptr and pushes to the error queue. +UniquePtr ssl_cert_parse_pubkey(const CBS *in); + +// ssl_parse_client_CA_list parses a CA list from |cbs| in the format used by a +// TLS CertificateRequest message. On success, it returns a newly-allocated +// |CRYPTO_BUFFER| list and advances |cbs|. Otherwise, it returns nullptr and +// sets |*out_alert| to an alert to send to the peer. +UniquePtr ssl_parse_client_CA_list(SSL *ssl, + uint8_t *out_alert, + CBS *cbs); + +// ssl_has_client_CAs returns there are configured CAs. +bool ssl_has_client_CAs(SSL *ssl); + +// ssl_add_client_CA_list adds the configured CA list to |cbb| in the format +// used by a TLS CertificateRequest message. It returns one on success and zero +// on error. int ssl_add_client_CA_list(SSL *ssl, CBB *cbb); -/* ssl_check_leaf_certificate returns one if |pkey| and |leaf| are suitable as - * a server's leaf certificate for |hs|. Otherwise, it returns zero and pushes - * an error on the error queue. */ +// ssl_check_leaf_certificate returns one if |pkey| and |leaf| are suitable as +// a server's leaf certificate for |hs|. Otherwise, it returns zero and pushes +// an error on the error queue. int ssl_check_leaf_certificate(SSL_HANDSHAKE *hs, EVP_PKEY *pkey, const CRYPTO_BUFFER *leaf); +// ssl_on_certificate_selected is called once the certificate has been selected. +// It finalizes the certificate and initializes |hs->local_pubkey|. It returns +// one on success and zero on error. +int ssl_on_certificate_selected(SSL_HANDSHAKE *hs); + -/* TLS 1.3 key derivation. */ +// TLS 1.3 key derivation. -/* tls13_init_key_schedule initializes the handshake hash and key derivation - * state. The cipher suite and PRF hash must have been selected at this point. - * It returns one on success and zero on error. */ -int tls13_init_key_schedule(SSL_HANDSHAKE *hs); +// tls13_init_key_schedule initializes the handshake hash and key derivation +// state, and incorporates the PSK. The cipher suite and PRF hash must have been +// selected at this point. It returns one on success and zero on error. +int tls13_init_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *psk, + size_t psk_len); -/* tls13_advance_key_schedule incorporates |in| into the key schedule with - * HKDF-Extract. It returns one on success and zero on error. */ +// tls13_init_early_key_schedule initializes the handshake hash and key +// derivation state from the resumption secret and incorporates the PSK to +// derive the early secrets. It returns one on success and zero on error. +int tls13_init_early_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *psk, + size_t psk_len); + +// tls13_advance_key_schedule incorporates |in| into the key schedule with +// HKDF-Extract. It returns one on success and zero on error. int tls13_advance_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *in, size_t len); -/* tls13_set_traffic_key sets the read or write traffic keys to - * |traffic_secret|. It returns one on success and zero on error. */ +// tls13_set_traffic_key sets the read or write traffic keys to +// |traffic_secret|. It returns one on success and zero on error. int tls13_set_traffic_key(SSL *ssl, enum evp_aead_direction_t direction, const uint8_t *traffic_secret, size_t traffic_secret_len); -/* tls13_derive_handshake_secrets derives the handshake traffic secret. It - * returns one on success and zero on error. */ +// tls13_derive_early_secrets derives the early traffic secret. It returns one +// on success and zero on error. +int tls13_derive_early_secrets(SSL_HANDSHAKE *hs); + +// tls13_derive_handshake_secrets derives the handshake traffic secret. It +// returns one on success and zero on error. int tls13_derive_handshake_secrets(SSL_HANDSHAKE *hs); -/* tls13_rotate_traffic_key derives the next read or write traffic secret. It - * returns one on success and zero on error. */ +// tls13_rotate_traffic_key derives the next read or write traffic secret. It +// returns one on success and zero on error. int tls13_rotate_traffic_key(SSL *ssl, enum evp_aead_direction_t direction); -/* tls13_derive_application_secrets derives the initial application data traffic - * and exporter secrets based on the handshake transcripts and |master_secret|. - * It returns one on success and zero on error. */ +// tls13_derive_application_secrets derives the initial application data traffic +// and exporter secrets based on the handshake transcripts and |master_secret|. +// It returns one on success and zero on error. int tls13_derive_application_secrets(SSL_HANDSHAKE *hs); -/* tls13_derive_resumption_secret derives the |resumption_secret|. */ +// tls13_derive_resumption_secret derives the |resumption_secret|. int tls13_derive_resumption_secret(SSL_HANDSHAKE *hs); -/* tls13_export_keying_material provides an exporter interface to use the - * |exporter_secret|. */ +// tls13_export_keying_material provides an exporter interface to use the +// |exporter_secret|. int tls13_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, const char *label, size_t label_len, const uint8_t *context, size_t context_len, int use_context); -/* tls13_finished_mac calculates the MAC of the handshake transcript to verify - * the integrity of the Finished message, and stores the result in |out| and - * length in |out_len|. |is_server| is 1 if this is for the Server Finished and - * 0 for the Client Finished. */ +// tls13_finished_mac calculates the MAC of the handshake transcript to verify +// the integrity of the Finished message, and stores the result in |out| and +// length in |out_len|. |is_server| is 1 if this is for the Server Finished and +// 0 for the Client Finished. int tls13_finished_mac(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, int is_server); -/* tls13_write_psk_binder calculates the PSK binder value and replaces the last - * bytes of |msg| with the resulting value. It returns 1 on success, and 0 on - * failure. */ +// tls13_derive_session_psk calculates the PSK for this session based on the +// resumption master secret and |nonce|. It returns true on success, and false +// on failure. +bool tls13_derive_session_psk(SSL_SESSION *session, Span nonce); + +// tls13_write_psk_binder calculates the PSK binder value and replaces the last +// bytes of |msg| with the resulting value. It returns 1 on success, and 0 on +// failure. int tls13_write_psk_binder(SSL_HANDSHAKE *hs, uint8_t *msg, size_t len); -/* tls13_verify_psk_binder verifies that the handshake transcript, truncated - * up to the binders has a valid signature using the value of |session|'s - * resumption secret. It returns 1 on success, and 0 on failure. */ +// tls13_verify_psk_binder verifies that the handshake transcript, truncated +// up to the binders has a valid signature using the value of |session|'s +// resumption secret. It returns 1 on success, and 0 on failure. int tls13_verify_psk_binder(SSL_HANDSHAKE *hs, SSL_SESSION *session, - CBS *binders); + const SSLMessage &msg, CBS *binders); -/* Handshake functions. */ +// Handshake functions. enum ssl_hs_wait_t { ssl_hs_error, ssl_hs_ok, + ssl_hs_read_server_hello, ssl_hs_read_message, ssl_hs_flush, - ssl_hs_flush_and_read_message, + ssl_hs_certificate_selection_pending, ssl_hs_x509_lookup, ssl_hs_channel_id_lookup, ssl_hs_private_key_operation, + ssl_hs_pending_session, + ssl_hs_pending_ticket, + ssl_hs_early_return, + ssl_hs_early_data_rejected, + ssl_hs_read_end_of_early_data, + ssl_hs_read_change_cipher_spec, + ssl_hs_certificate_verify, }; -struct ssl_handshake_st { - /* ssl is a non-owning pointer to the parent |SSL| object. */ +struct SSL_HANDSHAKE { + explicit SSL_HANDSHAKE(SSL *ssl); + ~SSL_HANDSHAKE(); + static constexpr bool kAllowUniquePtr = true; + + // ssl is a non-owning pointer to the parent |SSL| object. SSL *ssl; - /* do_tls13_handshake runs the TLS 1.3 handshake. On completion, it returns - * |ssl_hs_ok|. Otherwise, it returns a value corresponding to what operation - * is needed to progress. */ - enum ssl_hs_wait_t (*do_tls13_handshake)(SSL_HANDSHAKE *hs); + // wait contains the operation the handshake is currently blocking on or + // |ssl_hs_ok| if none. + enum ssl_hs_wait_t wait = ssl_hs_ok; - /* wait contains the operation |do_tls13_handshake| is currently blocking on - * or |ssl_hs_ok| if none. */ - enum ssl_hs_wait_t wait; + // state is the internal state for the TLS 1.2 and below handshake. Its + // values depend on |do_handshake| but the starting state is always zero. + int state = 0; - /* state contains one of the SSL3_ST_* values. */ - int state; + // tls13_state is the internal state for the TLS 1.3 handshake. Its values + // depend on |do_handshake| but the starting state is always zero. + int tls13_state = 0; - /* next_state is used when SSL_ST_FLUSH_DATA is entered */ - int next_state; + // min_version is the minimum accepted protocol version, taking account both + // |SSL_OP_NO_*| and |SSL_CTX_set_min_proto_version| APIs. + uint16_t min_version = 0; - /* tls13_state is the internal state for the TLS 1.3 handshake. Its values - * depend on |do_tls13_handshake| but the starting state is always zero. */ - int tls13_state; + // max_version is the maximum accepted protocol version, taking account both + // |SSL_OP_NO_*| and |SSL_CTX_set_max_proto_version| APIs. + uint16_t max_version = 0; - size_t hash_len; - uint8_t secret[EVP_MAX_MD_SIZE]; - uint8_t client_handshake_secret[EVP_MAX_MD_SIZE]; - uint8_t server_handshake_secret[EVP_MAX_MD_SIZE]; - uint8_t client_traffic_secret_0[EVP_MAX_MD_SIZE]; - uint8_t server_traffic_secret_0[EVP_MAX_MD_SIZE]; + // session_id is the session ID in the ClientHello, used for the experimental + // TLS 1.3 variant. + uint8_t session_id[SSL_MAX_SSL_SESSION_ID_LENGTH] = {0}; + uint8_t session_id_len = 0; + + size_t hash_len = 0; + uint8_t secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t early_traffic_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t client_handshake_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t server_handshake_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t client_traffic_secret_0[EVP_MAX_MD_SIZE] = {0}; + uint8_t server_traffic_secret_0[EVP_MAX_MD_SIZE] = {0}; + uint8_t expected_client_finished[EVP_MAX_MD_SIZE] = {0}; union { - /* sent is a bitset where the bits correspond to elements of kExtensions - * in t1_lib.c. Each bit is set if that extension was sent in a - * ClientHello. It's not used by servers. */ - uint32_t sent; - /* received is a bitset, like |sent|, but is used by servers to record - * which extensions were received from a client. */ + // sent is a bitset where the bits correspond to elements of kExtensions + // in t1_lib.c. Each bit is set if that extension was sent in a + // ClientHello. It's not used by servers. + uint32_t sent = 0; + // received is a bitset, like |sent|, but is used by servers to record + // which extensions were received from a client. uint32_t received; } extensions; union { - /* sent is a bitset where the bits correspond to elements of - * |client_custom_extensions| in the |SSL_CTX|. Each bit is set if that - * extension was sent in a ClientHello. It's not used by servers. */ - uint16_t sent; - /* received is a bitset, like |sent|, but is used by servers to record - * which custom extensions were received from a client. The bits here - * correspond to |server_custom_extensions|. */ + // sent is a bitset where the bits correspond to elements of + // |client_custom_extensions| in the |SSL_CTX|. Each bit is set if that + // extension was sent in a ClientHello. It's not used by servers. + uint16_t sent = 0; + // received is a bitset, like |sent|, but is used by servers to record + // which custom extensions were received from a client. The bits here + // correspond to |server_custom_extensions|. uint16_t received; } custom_extensions; - /* retry_group is the group ID selected by the server in HelloRetryRequest in - * TLS 1.3. */ - uint16_t retry_group; + // retry_group is the group ID selected by the server in HelloRetryRequest in + // TLS 1.3. + uint16_t retry_group = 0; + + // error, if |wait| is |ssl_hs_error|, is the error the handshake failed on. + UniquePtr error; + + // key_share is the current key exchange instance. + UniquePtr key_share; + + // transcript is the current handshake transcript. + SSLTranscript transcript; + + // cookie is the value of the cookie received from the server, if any. + Array cookie; + + // key_share_bytes is the value of the previously sent KeyShare extension by + // the client in TLS 1.3. + Array key_share_bytes; + + // ecdh_public_key, for servers, is the key share to be sent to the client in + // TLS 1.3. + Array ecdh_public_key; - /* ecdh_ctx is the current ECDH instance. */ - SSL_ECDH_CTX ecdh_ctx; + // peer_sigalgs are the signature algorithms that the peer supports. These are + // taken from the contents of the signature algorithms extension for a server + // or from the CertificateRequest for a client. + Array peer_sigalgs; - /* transcript is the current handshake transcript. */ - SSL_TRANSCRIPT transcript; + // peer_supported_group_list contains the supported group IDs advertised by + // the peer. This is only set on the server's end. The server does not + // advertise this extension to the client. + Array peer_supported_group_list; - /* cookie is the value of the cookie received from the server, if any. */ - uint8_t *cookie; - size_t cookie_len; + // peer_key is the peer's ECDH key for a TLS 1.2 client. + Array peer_key; - /* key_share_bytes is the value of the previously sent KeyShare extension by - * the client in TLS 1.3. */ - uint8_t *key_share_bytes; - size_t key_share_bytes_len; + // server_params, in a TLS 1.2 server, stores the ServerKeyExchange + // parameters. It has client and server randoms prepended for signing + // convenience. + Array server_params; - /* public_key, for servers, is the key share to be sent to the client in TLS - * 1.3. */ - uint8_t *public_key; - size_t public_key_len; + // peer_psk_identity_hint, on the client, is the psk_identity_hint sent by the + // server when using a TLS 1.2 PSK key exchange. + UniquePtr peer_psk_identity_hint; - /* peer_sigalgs are the signature algorithms that the peer supports. These are - * taken from the contents of the signature algorithms extension for a server - * or from the CertificateRequest for a client. */ - uint16_t *peer_sigalgs; - /* num_peer_sigalgs is the number of entries in |peer_sigalgs|. */ - size_t num_peer_sigalgs; + // ca_names, on the client, contains the list of CAs received in a + // CertificateRequest message. + UniquePtr ca_names; - /* peer_supported_group_list contains the supported group IDs advertised by - * the peer. This is only set on the server's end. The server does not - * advertise this extension to the client. */ - uint16_t *peer_supported_group_list; - size_t peer_supported_group_list_len; + // cached_x509_ca_names contains a cache of parsed versions of the elements of + // |ca_names|. This pointer is left non-owning so only + // |ssl_crypto_x509_method| needs to link against crypto/x509. + STACK_OF(X509_NAME) *cached_x509_ca_names = nullptr; - /* peer_key is the peer's ECDH key for a TLS 1.2 client. */ - uint8_t *peer_key; - size_t peer_key_len; + // certificate_types, on the client, contains the set of certificate types + // received in a CertificateRequest message. + Array certificate_types; - /* server_params, in TLS 1.2, stores the ServerKeyExchange parameters to be - * signed while the signature is being computed. */ - uint8_t *server_params; - size_t server_params_len; + // local_pubkey is the public key we are authenticating as. + UniquePtr local_pubkey; - /* peer_psk_identity_hint, on the client, is the psk_identity_hint sent by the - * server when using a TLS 1.2 PSK key exchange. */ - char *peer_psk_identity_hint; + // peer_pubkey is the public key parsed from the peer's leaf certificate. + UniquePtr peer_pubkey; - /* ca_names, on the client, contains the list of CAs received in a - * CertificateRequest message. */ - STACK_OF(X509_NAME) *ca_names; + // new_session is the new mutable session being established by the current + // handshake. It should not be cached. + UniquePtr new_session; - /* certificate_types, on the client, contains the set of certificate types - * received in a CertificateRequest message. */ - uint8_t *certificate_types; - size_t num_certificate_types; + // early_session is the session corresponding to the current 0-RTT state on + // the client if |in_early_data| is true. + UniquePtr early_session; - /* hostname, on the server, is the value of the SNI extension. */ - char *hostname; + // new_cipher is the cipher being negotiated in this handshake. + const SSL_CIPHER *new_cipher = nullptr; - /* peer_pubkey is the public key parsed from the peer's leaf certificate. */ - EVP_PKEY *peer_pubkey; + // key_block is the record-layer key block for TLS 1.2 and earlier. + Array key_block; - /* new_session is the new mutable session being established by the current - * handshake. It should not be cached. */ - SSL_SESSION *new_session; + // scts_requested is true if the SCT extension is in the ClientHello. + bool scts_requested:1; - /* new_cipher is the cipher being negotiated in this handshake. */ - const SSL_CIPHER *new_cipher; + // needs_psk_binder is true if the ClientHello has a placeholder PSK binder to + // be filled in. + bool needs_psk_binder:1; - /* key_block is the record-layer key block for TLS 1.2 and earlier. */ - uint8_t *key_block; - uint8_t key_block_len; + bool received_hello_retry_request:1; + bool sent_hello_retry_request:1; - /* session_tickets_sent, in TLS 1.3, is the number of tickets the server has - * sent. */ - uint8_t session_tickets_sent; + bool received_custom_extension:1; - /* scts_requested is one if the SCT extension is in the ClientHello. */ - unsigned scts_requested:1; + // handshake_finalized is true once the handshake has completed, at which + // point accessors should use the established state. + bool handshake_finalized:1; - /* needs_psk_binder if the ClientHello has a placeholder PSK binder to be - * filled in. */ - unsigned needs_psk_binder:1; + // accept_psk_mode stores whether the client's PSK mode is compatible with our + // preferences. + bool accept_psk_mode:1; - unsigned received_hello_retry_request:1; + // cert_request is true if a client certificate was requested. + bool cert_request:1; - /* accept_psk_mode stores whether the client's PSK mode is compatible with our - * preferences. */ - unsigned accept_psk_mode:1; + // certificate_status_expected is true if OCSP stapling was negotiated and the + // server is expected to send a CertificateStatus message. (This is used on + // both the client and server sides.) + bool certificate_status_expected:1; - /* cert_request is one if a client certificate was requested and zero - * otherwise. */ - unsigned cert_request:1; + // ocsp_stapling_requested is true if a client requested OCSP stapling. + bool ocsp_stapling_requested:1; - /* certificate_status_expected is one if OCSP stapling was negotiated and the - * server is expected to send a CertificateStatus message. (This is used on - * both the client and server sides.) */ - unsigned certificate_status_expected:1; + // should_ack_sni is used by a server and indicates that the SNI extension + // should be echoed in the ServerHello. + bool should_ack_sni:1; - /* ocsp_stapling_requested is one if a client requested OCSP stapling. */ - unsigned ocsp_stapling_requested:1; + // in_false_start is true if there is a pending client handshake in False + // Start. The client may write data at this point. + bool in_false_start:1; - /* should_ack_sni is used by a server and indicates that the SNI extension - * should be echoed in the ServerHello. */ - unsigned should_ack_sni:1; + // in_early_data is true if there is a pending handshake that has progressed + // enough to send and receive early data. + bool in_early_data:1; - /* in_false_start is one if there is a pending client handshake in False - * Start. The client may write data at this point. */ - unsigned in_false_start:1; + // early_data_offered is true if the client sent the early_data extension. + bool early_data_offered:1; - /* next_proto_neg_seen is one of NPN was negotiated. */ - unsigned next_proto_neg_seen:1; + // can_early_read is true if application data may be read at this point in the + // handshake. + bool can_early_read:1; - /* ticket_expected is one if a TLS 1.2 NewSessionTicket message is to be sent - * or received. */ - unsigned ticket_expected:1; + // can_early_write is true if application data may be written at this point in + // the handshake. + bool can_early_write:1; - /* v2_clienthello is one if we received a V2ClientHello. */ - unsigned v2_clienthello:1; + // next_proto_neg_seen is one of NPN was negotiated. + bool next_proto_neg_seen:1; - /* extended_master_secret is one if the extended master secret extension is - * negotiated in this handshake. */ - unsigned extended_master_secret:1; + // ticket_expected is true if a TLS 1.2 NewSessionTicket message is to be sent + // or received. + bool ticket_expected:1; - /* client_version is the value sent or received in the ClientHello version. */ - uint16_t client_version; -} /* SSL_HANDSHAKE */; + // extended_master_secret is true if the extended master secret extension is + // negotiated in this handshake. + bool extended_master_secret:1; -SSL_HANDSHAKE *ssl_handshake_new(SSL *ssl); + // pending_private_key_op is true if there is a pending private key operation + // in progress. + bool pending_private_key_op:1; -/* ssl_handshake_free releases all memory associated with |hs|. */ -void ssl_handshake_free(SSL_HANDSHAKE *hs); + // client_version is the value sent or received in the ClientHello version. + uint16_t client_version = 0; -/* ssl_check_message_type checks if the current message has type |type|. If so - * it returns one. Otherwise, it sends an alert and returns zero. */ -int ssl_check_message_type(SSL *ssl, int type); + // early_data_read is the amount of early data that has been read by the + // record layer. + uint16_t early_data_read = 0; -/* tls13_handshake runs the TLS 1.3 handshake. It returns one on success and <= - * 0 on error. */ -int tls13_handshake(SSL_HANDSHAKE *hs); + // early_data_written is the amount of early data that has been written by the + // record layer. + uint16_t early_data_written = 0; +}; + +UniquePtr ssl_handshake_new(SSL *ssl); + +// ssl_check_message_type checks if |msg| has type |type|. If so it returns +// one. Otherwise, it sends an alert and returns zero. +bool ssl_check_message_type(SSL *ssl, const SSLMessage &msg, int type); + +// ssl_run_handshake runs the TLS handshake. It returns one on success and <= 0 +// on error. It sets |out_early_return| to one if we've completed the handshake +// early. +int ssl_run_handshake(SSL_HANDSHAKE *hs, bool *out_early_return); -/* The following are implementations of |do_tls13_handshake| for the client and - * server. */ +// The following are implementations of |do_handshake| for the client and +// server. +enum ssl_hs_wait_t ssl_client_handshake(SSL_HANDSHAKE *hs); +enum ssl_hs_wait_t ssl_server_handshake(SSL_HANDSHAKE *hs); enum ssl_hs_wait_t tls13_client_handshake(SSL_HANDSHAKE *hs); enum ssl_hs_wait_t tls13_server_handshake(SSL_HANDSHAKE *hs); -/* tls13_post_handshake processes a post-handshake message. It returns one on - * success and zero on failure. */ -int tls13_post_handshake(SSL *ssl); +// The following functions return human-readable representations of the TLS +// handshake states for debugging. +const char *ssl_client_handshake_state(SSL_HANDSHAKE *hs); +const char *ssl_server_handshake_state(SSL_HANDSHAKE *hs); +const char *tls13_client_handshake_state(SSL_HANDSHAKE *hs); +const char *tls13_server_handshake_state(SSL_HANDSHAKE *hs); -int tls13_process_certificate(SSL_HANDSHAKE *hs, int allow_anonymous); -int tls13_process_certificate_verify(SSL_HANDSHAKE *hs); -int tls13_process_finished(SSL_HANDSHAKE *hs); +// tls13_post_handshake processes a post-handshake message. It returns one on +// success and zero on failure. +int tls13_post_handshake(SSL *ssl, const SSLMessage &msg); + +int tls13_process_certificate(SSL_HANDSHAKE *hs, const SSLMessage &msg, + int allow_anonymous); +int tls13_process_certificate_verify(SSL_HANDSHAKE *hs, const SSLMessage &msg); + +// tls13_process_finished processes |msg| as a Finished message from the +// peer. If |use_saved_value| is one, the verify_data is compared against +// |hs->expected_client_finished| rather than computed fresh. +int tls13_process_finished(SSL_HANDSHAKE *hs, const SSLMessage &msg, + int use_saved_value); int tls13_add_certificate(SSL_HANDSHAKE *hs); -enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs, - int is_first_run); + +// tls13_add_certificate_verify adds a TLS 1.3 CertificateVerify message to the +// handshake. If it returns |ssl_private_key_retry|, it should be called again +// to retry when the signing operation is completed. +enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs); + int tls13_add_finished(SSL_HANDSHAKE *hs); -int tls13_process_new_session_ticket(SSL *ssl); - -int ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t **out_secret, - size_t *out_secret_len, - uint8_t *out_alert, CBS *contents); -int ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, int *out_found, - uint8_t **out_secret, - size_t *out_secret_len, - uint8_t *out_alert, CBS *contents); -int ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); - -int ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, CBS *contents); -int ssl_ext_pre_shared_key_parse_clienthello(SSL_HANDSHAKE *hs, - SSL_SESSION **out_session, - CBS *out_binders, - uint8_t *out_alert, CBS *contents); -int ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); - -/* ssl_is_sct_list_valid does a shallow parse of the SCT list in |contents| and - * returns one iff it's valid. */ +int tls13_process_new_session_ticket(SSL *ssl, const SSLMessage &msg); + +bool ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, + Array *out_secret, + uint8_t *out_alert, CBS *contents); +bool ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, bool *out_found, + Array *out_secret, + uint8_t *out_alert, CBS *contents); +bool ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); + +bool ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents); +bool ssl_ext_pre_shared_key_parse_clienthello( + SSL_HANDSHAKE *hs, CBS *out_ticket, CBS *out_binders, + uint32_t *out_obfuscated_ticket_age, uint8_t *out_alert, CBS *contents); +bool ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out); + +// ssl_is_sct_list_valid does a shallow parse of the SCT list in |contents| and +// returns one iff it's valid. int ssl_is_sct_list_valid(const CBS *contents); int ssl_write_client_hello(SSL_HANDSHAKE *hs); -/* ssl_clear_tls13_state releases client state only needed for TLS 1.3. It - * should be called once the version is known to be TLS 1.2 or earlier. */ -void ssl_clear_tls13_state(SSL_HANDSHAKE *hs); - enum ssl_cert_verify_context_t { ssl_cert_verify_server, ssl_cert_verify_client, ssl_cert_verify_channel_id, }; -/* tls13_get_cert_verify_signature_input generates the message to be signed for - * TLS 1.3's CertificateVerify message. |cert_verify_context| determines the - * type of signature. It sets |*out| and |*out_len| to a newly allocated buffer - * containing the result. The caller must free it with |OPENSSL_free| to release - * it. This function returns one on success and zero on failure. */ -int tls13_get_cert_verify_signature_input( - SSL_HANDSHAKE *hs, uint8_t **out, size_t *out_len, +// tls13_get_cert_verify_signature_input generates the message to be signed for +// TLS 1.3's CertificateVerify message. |cert_verify_context| determines the +// type of signature. It sets |*out| to a newly allocated buffer containing the +// result. This function returns true on success and false on failure. +bool tls13_get_cert_verify_signature_input( + SSL_HANDSHAKE *hs, Array *out, enum ssl_cert_verify_context_t cert_verify_context); -/* ssl_negotiate_alpn negotiates the ALPN extension, if applicable. It returns - * one on successful negotiation or if nothing was negotiated. It returns zero - * and sets |*out_alert| to an alert on error. */ -int ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, - const SSL_CLIENT_HELLO *client_hello); +// ssl_is_alpn_protocol_allowed returns whether |protocol| is a valid server +// selection for |ssl|'s client preferences. +bool ssl_is_alpn_protocol_allowed(const SSL *ssl, Span protocol); + +// ssl_negotiate_alpn negotiates the ALPN extension, if applicable. It returns +// true on successful negotiation or if nothing was negotiated. It returns false +// and sets |*out_alert| to an alert on error. +bool ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, + const SSL_CLIENT_HELLO *client_hello); -typedef struct { +struct SSL_EXTENSION_TYPE { uint16_t type; - int *out_present; + bool *out_present; CBS *out_data; -} SSL_EXTENSION_TYPE; +}; -/* ssl_parse_extensions parses a TLS extensions block out of |cbs| and advances - * it. It writes the parsed extensions to pointers denoted by |ext_types|. On - * success, it fills in the |out_present| and |out_data| fields and returns one. - * Otherwise, it sets |*out_alert| to an alert to send and returns zero. Unknown - * extensions are rejected unless |ignore_unknown| is 1. */ +// ssl_parse_extensions parses a TLS extensions block out of |cbs| and advances +// it. It writes the parsed extensions to pointers denoted by |ext_types|. On +// success, it fills in the |out_present| and |out_data| fields and returns one. +// Otherwise, it sets |*out_alert| to an alert to send and returns zero. Unknown +// extensions are rejected unless |ignore_unknown| is 1. int ssl_parse_extensions(const CBS *cbs, uint8_t *out_alert, const SSL_EXTENSION_TYPE *ext_types, size_t num_ext_types, int ignore_unknown); +// ssl_verify_peer_cert verifies the peer certificate for |hs|. +enum ssl_verify_result_t ssl_verify_peer_cert(SSL_HANDSHAKE *hs); + +enum ssl_hs_wait_t ssl_get_finished(SSL_HANDSHAKE *hs); +bool ssl_send_finished(SSL_HANDSHAKE *hs); +bool ssl_output_cert_chain(SSL *ssl); -/* SSLKEYLOGFILE functions. */ -/* ssl_log_secret logs |secret| with label |label|, if logging is enabled for - * |ssl|. It returns one on success and zero on failure. */ +// SSLKEYLOGFILE functions. + +// ssl_log_secret logs |secret| with label |label|, if logging is enabled for +// |ssl|. It returns one on success and zero on failure. int ssl_log_secret(const SSL *ssl, const char *label, const uint8_t *secret, size_t secret_len); -/* ClientHello functions. */ +// ClientHello functions. -int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, const uint8_t *in, - size_t in_len); +int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, + const SSLMessage &msg); int ssl_client_hello_get_extension(const SSL_CLIENT_HELLO *client_hello, CBS *out, uint16_t extension_type); @@ -1232,7 +1655,7 @@ int ssl_client_cipher_list_contains_cipher(const SSL_CLIENT_HELLO *client_hello, uint16_t id); -/* GREASE. */ +// GREASE. enum ssl_grease_index_t { ssl_grease_cipher = 0, @@ -1243,420 +1666,709 @@ enum ssl_grease_index_t { ssl_grease_ticket_extension, }; -/* ssl_get_grease_value returns a GREASE value for |ssl|. For a given - * connection, the values for each index will be deterministic. This allows the - * same ClientHello be sent twice for a HelloRetryRequest or the same group be - * advertised in both supported_groups and key_shares. */ +// ssl_get_grease_value returns a GREASE value for |ssl|. For a given +// connection, the values for each index will be deterministic. This allows the +// same ClientHello be sent twice for a HelloRetryRequest or the same group be +// advertised in both supported_groups and key_shares. uint16_t ssl_get_grease_value(const SSL *ssl, enum ssl_grease_index_t index); -/* Signature algorithms. */ +// Signature algorithms. -/* tls1_parse_peer_sigalgs parses |sigalgs| as the list of peer signature - * algorithms and saves them on |hs|. It returns one on success and zero on - * error. */ +// tls1_parse_peer_sigalgs parses |sigalgs| as the list of peer signature +// algorithms and saves them on |hs|. It returns one on success and zero on +// error. int tls1_parse_peer_sigalgs(SSL_HANDSHAKE *hs, const CBS *sigalgs); -/* tls1_choose_signature_algorithm sets |*out| to a signature algorithm for use - * with |hs|'s private key based on the peer's preferences and the algorithms - * supported. It returns one on success and zero on error. */ +// tls1_get_legacy_signature_algorithm sets |*out| to the signature algorithm +// that should be used with |pkey| in TLS 1.1 and earlier. It returns one on +// success and zero if |pkey| may not be used at those versions. +int tls1_get_legacy_signature_algorithm(uint16_t *out, const EVP_PKEY *pkey); + +// tls1_choose_signature_algorithm sets |*out| to a signature algorithm for use +// with |hs|'s private key based on the peer's preferences and the algorithms +// supported. It returns one on success and zero on error. int tls1_choose_signature_algorithm(SSL_HANDSHAKE *hs, uint16_t *out); -/* tls12_get_verify_sigalgs sets |*out| to the signature algorithms acceptable - * for the peer signature and returns the length of the list. */ -size_t tls12_get_verify_sigalgs(const SSL *ssl, const uint16_t **out); +// tls12_add_verify_sigalgs adds the signature algorithms acceptable for the +// peer signature to |out|. It returns one on success and zero on error. +int tls12_add_verify_sigalgs(const SSL *ssl, CBB *out); -/* tls12_check_peer_sigalg checks if |sigalg| is acceptable for the peer - * signature. It returns one on success and zero on error, setting |*out_alert| - * to an alert to send. */ -int tls12_check_peer_sigalg(SSL *ssl, int *out_alert, uint16_t sigalg); +// tls12_check_peer_sigalg checks if |sigalg| is acceptable for the peer +// signature. It returns one on success and zero on error, setting |*out_alert| +// to an alert to send. +int tls12_check_peer_sigalg(SSL *ssl, uint8_t *out_alert, uint16_t sigalg); -/* Underdocumented functions. - * - * Functions below here haven't been touched up and may be underdocumented. */ +// Underdocumented functions. +// +// Functions below here haven't been touched up and may be underdocumented. #define TLSEXT_CHANNEL_ID_SIZE 128 -/* From RFC4492, used in encoding the curve type in ECParameters */ +// From RFC4492, used in encoding the curve type in ECParameters #define NAMED_CURVE_TYPE 3 -typedef struct cert_st { +struct CERT { EVP_PKEY *privatekey; - /* chain contains the certificate chain, with the leaf at the beginning. The - * first element of |chain| may be NULL to indicate that the leaf certificate - * has not yet been set. - * If |chain| != NULL -> len(chain) >= 1 - * If |chain[0]| == NULL -> len(chain) >= 2. - * |chain[1..]| != NULL */ + // chain contains the certificate chain, with the leaf at the beginning. The + // first element of |chain| may be NULL to indicate that the leaf certificate + // has not yet been set. + // If |chain| != NULL -> len(chain) >= 1 + // If |chain[0]| == NULL -> len(chain) >= 2. + // |chain[1..]| != NULL STACK_OF(CRYPTO_BUFFER) *chain; - /* x509_chain may contain a parsed copy of |chain[1..]|. This is only used as - * a cache in order to implement “get0” functions that return a non-owning - * pointer to the certificate chain. */ + // x509_chain may contain a parsed copy of |chain[1..]|. This is only used as + // a cache in order to implement “get0” functions that return a non-owning + // pointer to the certificate chain. STACK_OF(X509) *x509_chain; - /* x509_leaf may contain a parsed copy of the first element of |chain|. This - * is only used as a cache in order to implement “get0” functions that return - * a non-owning pointer to the certificate chain. */ + // x509_leaf may contain a parsed copy of the first element of |chain|. This + // is only used as a cache in order to implement “get0” functions that return + // a non-owning pointer to the certificate chain. X509 *x509_leaf; - /* x509_stash contains the last |X509| object append to the chain. This is a - * workaround for some third-party code that continue to use an |X509| object - * even after passing ownership with an “add0” function. */ + // x509_stash contains the last |X509| object append to the chain. This is a + // workaround for some third-party code that continue to use an |X509| object + // even after passing ownership with an “add0” function. X509 *x509_stash; - /* key_method, if non-NULL, is a set of callbacks to call for private key - * operations. */ + // key_method, if non-NULL, is a set of callbacks to call for private key + // operations. const SSL_PRIVATE_KEY_METHOD *key_method; - /* x509_method contains pointers to functions that might deal with |X509| - * compatibility, or might be a no-op, depending on the application. */ + // x509_method contains pointers to functions that might deal with |X509| + // compatibility, or might be a no-op, depending on the application. const SSL_X509_METHOD *x509_method; - DH *dh_tmp; - DH *(*dh_tmp_cb)(SSL *ssl, int is_export, int keysize); - - /* sigalgs, if non-NULL, is the set of signature algorithms supported by - * |privatekey| in decreasing order of preference. */ + // sigalgs, if non-NULL, is the set of signature algorithms supported by + // |privatekey| in decreasing order of preference. uint16_t *sigalgs; size_t num_sigalgs; - /* Certificate setup callback: if set is called whenever a - * certificate may be required (client or server). the callback - * can then examine any appropriate parameters and setup any - * certificates required. This allows advanced applications - * to select certificates on the fly: for example based on - * supported signature algorithms or curves. */ + // Certificate setup callback: if set is called whenever a + // certificate may be required (client or server). the callback + // can then examine any appropriate parameters and setup any + // certificates required. This allows advanced applications + // to select certificates on the fly: for example based on + // supported signature algorithms or curves. int (*cert_cb)(SSL *ssl, void *arg); void *cert_cb_arg; - /* Optional X509_STORE for certificate validation. If NULL the parent SSL_CTX - * store is used instead. */ + // Optional X509_STORE for certificate validation. If NULL the parent SSL_CTX + // store is used instead. X509_STORE *verify_store; - /* Signed certificate timestamp list to be sent to the client, if requested */ + // Signed certificate timestamp list to be sent to the client, if requested CRYPTO_BUFFER *signed_cert_timestamp_list; - /* OCSP response to be sent to the client, if requested. */ + // OCSP response to be sent to the client, if requested. CRYPTO_BUFFER *ocsp_response; - /* sid_ctx partitions the session space within a shared session cache or - * ticket key. Only sessions with a matching value will be accepted. */ + // sid_ctx partitions the session space within a shared session cache or + // ticket key. Only sessions with a matching value will be accepted. uint8_t sid_ctx_length; uint8_t sid_ctx[SSL_MAX_SID_CTX_LENGTH]; -} CERT; -/* SSL_METHOD is a compatibility structure to support the legacy version-locked - * methods. */ -struct ssl_method_st { - /* version, if non-zero, is the only protocol version acceptable to an - * SSL_CTX initialized from this method. */ - uint16_t version; - /* method is the underlying SSL_PROTOCOL_METHOD that initializes the - * SSL_CTX. */ - const SSL_PROTOCOL_METHOD *method; - /* x509_method contains pointers to functions that might deal with |X509| - * compatibility, or might be a no-op, depending on the application. */ - const SSL_X509_METHOD *x509_method; + // If enable_early_data is true, early data can be sent and accepted. + bool enable_early_data:1; }; -/* Used to hold functions for SSLv2 or SSLv3/TLSv1 functions */ -struct ssl_protocol_method_st { - /* is_dtls is one if the protocol is DTLS and zero otherwise. */ - char is_dtls; - /* min_version is the minimum implemented version. */ - uint16_t min_version; - /* max_version is the maximum implemented version. */ - uint16_t max_version; - /* version_from_wire maps |wire_version| to a protocol version. On success, it - * sets |*out_version| to the result and returns one. If the version is - * unknown, it returns zero. */ - int (*version_from_wire)(uint16_t *out_version, uint16_t wire_version); - /* version_to_wire maps |version| to the wire representation. It is an error - * to call it with an invalid version. */ - uint16_t (*version_to_wire)(uint16_t version); - int (*ssl_new)(SSL *ssl); +// |SSL_PROTOCOL_METHOD| abstracts between TLS and DTLS. +struct SSL_PROTOCOL_METHOD { + bool is_dtls; + bool (*ssl_new)(SSL *ssl); void (*ssl_free)(SSL *ssl); - /* ssl_get_message reads the next handshake message. On success, it returns - * one and sets |ssl->s3->tmp.message_type|, |ssl->init_msg|, and - * |ssl->init_num|. Otherwise, it returns <= 0. */ - int (*ssl_get_message)(SSL *ssl); - /* get_current_message sets |*out| to the current handshake message. This - * includes the protocol-specific message header. */ - void (*get_current_message)(const SSL *ssl, CBS *out); - /* release_current_message is called to release the current handshake message. - * If |free_buffer| is one, buffers will also be released. */ - void (*release_current_message)(SSL *ssl, int free_buffer); - /* read_app_data reads up to |len| bytes of application data into |buf|. On - * success, it returns the number of bytes read. Otherwise, it returns <= 0 - * and sets |*out_got_handshake| to whether the failure was due to a - * post-handshake handshake message. If so, it fills in the current message as - * in |ssl_get_message|. */ - int (*read_app_data)(SSL *ssl, int *out_got_handshake, uint8_t *buf, int len, - int peek); - int (*read_change_cipher_spec)(SSL *ssl); - void (*read_close_notify)(SSL *ssl); - int (*write_app_data)(SSL *ssl, const uint8_t *buf, int len); + // get_message sets |*out| to the current handshake message and returns true + // if one has been received. It returns false if more input is needed. + bool (*get_message)(SSL *ssl, SSLMessage *out); + // next_message is called to release the current handshake message. + void (*next_message)(SSL *ssl); + // Use the |ssl_open_handshake| wrapper. + ssl_open_record_t (*open_handshake)(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in); + // Use the |ssl_open_change_cipher_spec| wrapper. + ssl_open_record_t (*open_change_cipher_spec)(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in); + // Use the |ssl_open_app_data| wrapper. + ssl_open_record_t (*open_app_data)(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in); + int (*write_app_data)(SSL *ssl, bool *out_needs_handshake, const uint8_t *buf, + int len); int (*dispatch_alert)(SSL *ssl); - /* supports_cipher returns one if |cipher| is supported by this protocol and - * zero otherwise. */ - int (*supports_cipher)(const SSL_CIPHER *cipher); - /* init_message begins a new handshake message of type |type|. |cbb| is the - * root CBB to be passed into |finish_message|. |*body| is set to a child CBB - * the caller should write to. It returns one on success and zero on error. */ - int (*init_message)(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); - /* finish_message finishes a handshake message. It sets |*out_msg| to a - * newly-allocated buffer with the serialized message. The caller must - * release it with |OPENSSL_free| when done. It returns one on success and - * zero on error. */ - int (*finish_message)(SSL *ssl, CBB *cbb, uint8_t **out_msg, size_t *out_len); - /* add_message adds a handshake message to the pending flight. It returns one - * on success and zero on error. In either case, it takes ownership of |msg| - * and releases it with |OPENSSL_free| when done. */ - int (*add_message)(SSL *ssl, uint8_t *msg, size_t len); - /* add_change_cipher_spec adds a ChangeCipherSpec record to the pending - * flight. It returns one on success and zero on error. */ - int (*add_change_cipher_spec)(SSL *ssl); - /* add_alert adds an alert to the pending flight. It returns one on success - * and zero on error. */ - int (*add_alert)(SSL *ssl, uint8_t level, uint8_t desc); - /* flush_flight flushes the pending flight to the transport. It returns one on - * success and <= 0 on error. */ + // init_message begins a new handshake message of type |type|. |cbb| is the + // root CBB to be passed into |finish_message|. |*body| is set to a child CBB + // the caller should write to. It returns true on success and false on error. + bool (*init_message)(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); + // finish_message finishes a handshake message. It sets |*out_msg| to the + // serialized message. It returns true on success and false on error. + bool (*finish_message)(SSL *ssl, CBB *cbb, bssl::Array *out_msg); + // add_message adds a handshake message to the pending flight. It returns + // true on success and false on error. + bool (*add_message)(SSL *ssl, bssl::Array msg); + // add_change_cipher_spec adds a ChangeCipherSpec record to the pending + // flight. It returns true on success and false on error. + bool (*add_change_cipher_spec)(SSL *ssl); + // add_alert adds an alert to the pending flight. It returns true on success + // and false on error. + bool (*add_alert)(SSL *ssl, uint8_t level, uint8_t desc); + // flush_flight flushes the pending flight to the transport. It returns one on + // success and <= 0 on error. int (*flush_flight)(SSL *ssl); - /* expect_flight is called when the handshake expects a flight of messages from - * the peer. */ - void (*expect_flight)(SSL *ssl); - /* received_flight is called when the handshake has received a flight of - * messages from the peer. */ - void (*received_flight)(SSL *ssl); - /* set_read_state sets |ssl|'s read cipher state to |aead_ctx|. It takes - * ownership of |aead_ctx|. It returns one on success and zero if changing the - * read state is forbidden at this point. */ - int (*set_read_state)(SSL *ssl, SSL_AEAD_CTX *aead_ctx); - /* set_write_state sets |ssl|'s write cipher state to |aead_ctx|. It takes - * ownership of |aead_ctx|. It returns one on success and zero if changing the - * write state is forbidden at this point. */ - int (*set_write_state)(SSL *ssl, SSL_AEAD_CTX *aead_ctx); + // on_handshake_complete is called when the handshake is complete. + void (*on_handshake_complete)(SSL *ssl); + // set_read_state sets |ssl|'s read cipher state to |aead_ctx|. It returns + // true on success and false if changing the read state is forbidden at this + // point. + bool (*set_read_state)(SSL *ssl, UniquePtr aead_ctx); + // set_write_state sets |ssl|'s write cipher state to |aead_ctx|. It returns + // true on success and false if changing the write state is forbidden at this + // point. + bool (*set_write_state)(SSL *ssl, UniquePtr aead_ctx); }; -struct ssl_x509_method_st { - /* cert_clear frees and NULLs all X509-related state. */ - void (*cert_clear)(CERT *cert); - /* cert_flush_cached_chain drops any cached |X509|-based certificate chain - * from |cert|. */ - void (*cert_flush_cached_chain)(CERT *cert); - /* cert_flush_cached_chain drops any cached |X509|-based leaf certificate - * from |cert|. */ - void (*cert_flush_cached_leaf)(CERT *cert); - - /* session_cache_objects fills out |sess->x509_peer| and |sess->x509_chain| - * from |sess->certs| and erases |sess->x509_chain_without_leaf|. It returns - * one on success or zero on error. */ - int (*session_cache_objects)(SSL_SESSION *session); - /* session_dup duplicates any needed fields from |session| to |new_session|. - * It returns one on success or zero on error. */ - int (*session_dup)(SSL_SESSION *new_session, const SSL_SESSION *session); - /* session_clear frees any X509-related state from |session|. */ - void (*session_clear)(SSL_SESSION *session); +// The following wrappers call |open_*| but handle |read_shutdown| correctly. + +// ssl_open_handshake processes a record from |in| for reading a handshake +// message. +ssl_open_record_t ssl_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in); + +// ssl_open_change_cipher_spec processes a record from |in| for reading a +// ChangeCipherSpec. +ssl_open_record_t ssl_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in); + +// ssl_open_app_data processes a record from |in| for reading application data. +// On success, it returns |ssl_open_record_success| and sets |*out| to the +// input. If it encounters a post-handshake message, it returns +// |ssl_open_record_discard|. The caller should then retry, after processing any +// messages received with |get_message|. +ssl_open_record_t ssl_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in); + +// ssl_crypto_x509_method provides the |SSL_X509_METHOD| functions using +// crypto/x509. +extern const SSL_X509_METHOD ssl_crypto_x509_method; + +// ssl_noop_x509_method provides the |SSL_X509_METHOD| functions that avoid +// crypto/x509. +extern const SSL_X509_METHOD ssl_noop_x509_method; + +// ssl_cipher_preference_list_st contains a list of SSL_CIPHERs with +// equal-preference groups. For TLS clients, the groups are moot because the +// server picks the cipher and groups cannot be expressed on the wire. However, +// for servers, the equal-preference groups allow the client's preferences to +// be partially respected. (This only has an effect with +// SSL_OP_CIPHER_SERVER_PREFERENCE). +// +// The equal-preference groups are expressed by grouping SSL_CIPHERs together. +// All elements of a group have the same priority: no ordering is expressed +// within a group. +// +// The values in |ciphers| are in one-to-one correspondence with +// |in_group_flags|. (That is, sk_SSL_CIPHER_num(ciphers) is the number of +// bytes in |in_group_flags|.) The bytes in |in_group_flags| are either 1, to +// indicate that the corresponding SSL_CIPHER is not the last element of a +// group, or 0 to indicate that it is. +// +// For example, if |in_group_flags| contains all zeros then that indicates a +// traditional, fully-ordered preference. Every SSL_CIPHER is the last element +// of the group (i.e. they are all in a one-element group). +// +// For a more complex example, consider: +// ciphers: A B C D E F +// in_group_flags: 1 1 0 0 1 0 +// +// That would express the following, order: +// +// A E +// B -> D -> F +// C +struct ssl_cipher_preference_list_st { + STACK_OF(SSL_CIPHER) *ciphers; + uint8_t *in_group_flags; }; -/* ssl_noop_x509_method is implements the |ssl_x509_method_st| functions by - * doing nothing. */ -extern const struct ssl_x509_method_st ssl_noop_x509_method; +struct tlsext_ticket_key { + static constexpr bool kAllowUniquePtr = true; + + uint8_t name[SSL_TICKET_KEY_NAME_LEN]; + uint8_t hmac_key[16]; + uint8_t aes_key[16]; + // next_rotation_tv_sec is the time (in seconds from the epoch) when the + // current key should be superseded by a new key, or the time when a previous + // key should be dropped. If zero, then the key should not be automatically + // rotated. + uint64_t next_rotation_tv_sec; +}; -/* ssl_crypto_x509_method provides the |ssl_x509_method_st| functions using - * crypto/x509. */ -extern const struct ssl_x509_method_st ssl_crypto_x509_method; +} // namespace bssl -typedef struct ssl3_record_st { - /* type is the record type. */ - uint8_t type; - /* length is the number of unconsumed bytes in the record. */ - uint16_t length; - /* data is a non-owning pointer to the first unconsumed byte of the record. */ - uint8_t *data; -} SSL3_RECORD; - -typedef struct ssl3_buffer_st { - /* buf is the memory allocated for this buffer. */ - uint8_t *buf; - /* offset is the offset into |buf| which the buffer contents start at. */ - uint16_t offset; - /* len is the length of the buffer contents from |buf| + |offset|. */ - uint16_t len; - /* cap is how much memory beyond |buf| + |offset| is available. */ - uint16_t cap; -} SSL3_BUFFER; - -/* An ssl_shutdown_t describes the shutdown state of one end of the connection, - * whether it is alive or has been shutdown via close_notify or fatal alert. */ +DECLARE_LHASH_OF(SSL_SESSION) + +namespace bssl { + +// SSLContext backs the public |SSL_CTX| type. Due to compatibility constraints, +// it is a base class for |ssl_ctx_st|. +struct SSLContext { + const SSL_PROTOCOL_METHOD *method; + const SSL_X509_METHOD *x509_method; + + // lock is used to protect various operations on this object. + CRYPTO_MUTEX lock; + + // conf_max_version is the maximum acceptable protocol version configured by + // |SSL_CTX_set_max_proto_version|. Note this version is normalized in DTLS + // and is further constrainted by |SSL_OP_NO_*|. + uint16_t conf_max_version; + + // conf_min_version is the minimum acceptable protocol version configured by + // |SSL_CTX_set_min_proto_version|. Note this version is normalized in DTLS + // and is further constrainted by |SSL_OP_NO_*|. + uint16_t conf_min_version; + + // tls13_variant is the variant of TLS 1.3 we are using for this + // configuration. + enum tls13_variant_t tls13_variant; + + struct ssl_cipher_preference_list_st *cipher_list; + + X509_STORE *cert_store; + LHASH_OF(SSL_SESSION) *sessions; + // Most session-ids that will be cached, default is + // SSL_SESSION_CACHE_MAX_SIZE_DEFAULT. 0 is unlimited. + unsigned long session_cache_size; + SSL_SESSION *session_cache_head; + SSL_SESSION *session_cache_tail; + + // handshakes_since_cache_flush is the number of successful handshakes since + // the last cache flush. + int handshakes_since_cache_flush; + + // This can have one of 2 values, ored together, + // SSL_SESS_CACHE_CLIENT, + // SSL_SESS_CACHE_SERVER, + // Default is SSL_SESSION_CACHE_SERVER, which means only + // SSL_accept which cache SSL_SESSIONS. + int session_cache_mode; + + // session_timeout is the default lifetime for new sessions in TLS 1.2 and + // earlier, in seconds. + uint32_t session_timeout; + + // session_psk_dhe_timeout is the default lifetime for new sessions in TLS + // 1.3, in seconds. + uint32_t session_psk_dhe_timeout; + + // If this callback is not null, it will be called each time a session id is + // added to the cache. If this function returns 1, it means that the + // callback will do a SSL_SESSION_free() when it has finished using it. + // Otherwise, on 0, it means the callback has finished with it. If + // remove_session_cb is not null, it will be called when a session-id is + // removed from the cache. After the call, OpenSSL will SSL_SESSION_free() + // it. + int (*new_session_cb)(SSL *ssl, SSL_SESSION *sess); + void (*remove_session_cb)(SSL_CTX *ctx, SSL_SESSION *sess); + SSL_SESSION *(*get_session_cb)(SSL *ssl, const uint8_t *data, int len, + int *copy); + SSL_SESSION *(*get_session_cb_legacy)(SSL *ssl, uint8_t *data, int len, + int *copy); + + CRYPTO_refcount_t references; + + // if defined, these override the X509_verify_cert() calls + int (*app_verify_callback)(X509_STORE_CTX *store_ctx, void *arg); + void *app_verify_arg; + + enum ssl_verify_result_t (*custom_verify_callback)(SSL *ssl, + uint8_t *out_alert); + + // Default password callback. + pem_password_cb *default_passwd_callback; + + // Default password callback user data. + void *default_passwd_callback_userdata; + + // get client cert callback + int (*client_cert_cb)(SSL *ssl, X509 **out_x509, EVP_PKEY **out_pkey); + + // get channel id callback + void (*channel_id_cb)(SSL *ssl, EVP_PKEY **out_pkey); + + CRYPTO_EX_DATA ex_data; + + // custom_*_extensions stores any callback sets for custom extensions. Note + // that these pointers will be NULL if the stack would otherwise be empty. + STACK_OF(SSL_CUSTOM_EXTENSION) *client_custom_extensions; + STACK_OF(SSL_CUSTOM_EXTENSION) *server_custom_extensions; + + // Default values used when no per-SSL value is defined follow + + void (*info_callback)(const SSL *ssl, int type, int value); + + // what we put in client cert requests + STACK_OF(CRYPTO_BUFFER) *client_CA; + + // cached_x509_client_CA is a cache of parsed versions of the elements of + // |client_CA|. + STACK_OF(X509_NAME) *cached_x509_client_CA; + + + // Default values to use in SSL structures follow (these are copied by + // SSL_new) + + uint32_t options; + uint32_t mode; + uint32_t max_cert_list; + + CERT *cert; + + // callback that allows applications to peek at protocol messages + void (*msg_callback)(int write_p, int version, int content_type, + const void *buf, size_t len, SSL *ssl, void *arg); + void *msg_callback_arg; + + int verify_mode; + int (*default_verify_callback)( + int ok, X509_STORE_CTX *ctx); // called 'verify_callback' in the SSL + + X509_VERIFY_PARAM *param; + + // select_certificate_cb is called before most ClientHello processing and + // before the decision whether to resume a session is made. See + // |ssl_select_cert_result_t| for details of the return values. + enum ssl_select_cert_result_t (*select_certificate_cb)( + const SSL_CLIENT_HELLO *); + + // dos_protection_cb is called once the resumption decision for a ClientHello + // has been made. It returns one to continue the handshake or zero to + // abort. + int (*dos_protection_cb) (const SSL_CLIENT_HELLO *); + + // Maximum amount of data to send in one fragment. actual record size can be + // more than this due to padding and MAC overheads. + uint16_t max_send_fragment; + + // TLS extensions servername callback + int (*tlsext_servername_callback)(SSL *, int *, void *); + void *tlsext_servername_arg; + + // RFC 4507 session ticket keys. |tlsext_ticket_key_current| may be NULL + // before the first handshake and |tlsext_ticket_key_prev| may be NULL at any + // time. Automatically generated ticket keys are rotated as needed at + // handshake time. Hence, all access must be synchronized through |lock|. + struct tlsext_ticket_key *tlsext_ticket_key_current; + struct tlsext_ticket_key *tlsext_ticket_key_prev; + + // Callback to support customisation of ticket key setting + int (*tlsext_ticket_key_cb)(SSL *ssl, uint8_t *name, uint8_t *iv, + EVP_CIPHER_CTX *ectx, HMAC_CTX *hctx, int enc); + + // Server-only: psk_identity_hint is the default identity hint to send in + // PSK-based key exchanges. + char *psk_identity_hint; + + unsigned int (*psk_client_callback)(SSL *ssl, const char *hint, + char *identity, + unsigned int max_identity_len, + uint8_t *psk, unsigned int max_psk_len); + unsigned int (*psk_server_callback)(SSL *ssl, const char *identity, + uint8_t *psk, unsigned int max_psk_len); + + + // Next protocol negotiation information + // (for experimental NPN extension). + + // For a server, this contains a callback function by which the set of + // advertised protocols can be provided. + int (*next_protos_advertised_cb)(SSL *ssl, const uint8_t **out, + unsigned *out_len, void *arg); + void *next_protos_advertised_cb_arg; + // For a client, this contains a callback function that selects the + // next protocol from the list provided by the server. + int (*next_proto_select_cb)(SSL *ssl, uint8_t **out, uint8_t *out_len, + const uint8_t *in, unsigned in_len, void *arg); + void *next_proto_select_cb_arg; + + // ALPN information + // (we are in the process of transitioning from NPN to ALPN.) + + // For a server, this contains a callback function that allows the + // server to select the protocol for the connection. + // out: on successful return, this must point to the raw protocol + // name (without the length prefix). + // outlen: on successful return, this contains the length of |*out|. + // in: points to the client's list of supported protocols in + // wire-format. + // inlen: the length of |in|. + int (*alpn_select_cb)(SSL *ssl, const uint8_t **out, uint8_t *out_len, + const uint8_t *in, unsigned in_len, void *arg); + void *alpn_select_cb_arg; + + // For a client, this contains the list of supported protocols in wire + // format. + uint8_t *alpn_client_proto_list; + unsigned alpn_client_proto_list_len; + + // SRTP profiles we are willing to do from RFC 5764 + STACK_OF(SRTP_PROTECTION_PROFILE) *srtp_profiles; + + // Supported group values inherited by SSL structure + size_t supported_group_list_len; + uint16_t *supported_group_list; + + // The client's Channel ID private key. + EVP_PKEY *tlsext_channel_id_private; + + // keylog_callback, if not NULL, is the key logging callback. See + // |SSL_CTX_set_keylog_callback|. + void (*keylog_callback)(const SSL *ssl, const char *line); + + // current_time_cb, if not NULL, is the function to use to get the current + // time. It sets |*out_clock| to the current time. The |ssl| argument is + // always NULL. See |SSL_CTX_set_current_time_cb|. + void (*current_time_cb)(const SSL *ssl, struct timeval *out_clock); + + // pool is used for all |CRYPTO_BUFFER|s in case we wish to share certificate + // memory. + CRYPTO_BUFFER_POOL *pool; + + // ticket_aead_method contains function pointers for opening and sealing + // session tickets. + const SSL_TICKET_AEAD_METHOD *ticket_aead_method; + + // verify_sigalgs, if not empty, is the set of signature algorithms + // accepted from the peer in decreasing order of preference. + uint16_t *verify_sigalgs; + size_t num_verify_sigalgs; + + // retain_only_sha256_of_client_certs is true if we should compute the SHA256 + // hash of the peer's certificate and then discard it to save memory and + // session space. Only effective on the server side. + bool retain_only_sha256_of_client_certs:1; + + // quiet_shutdown is true if the connection should not send a close_notify on + // shutdown. + bool quiet_shutdown:1; + + // ocsp_stapling_enabled is only used by client connections and indicates + // whether OCSP stapling will be requested. + bool ocsp_stapling_enabled:1; + + // If true, a client will request certificate timestamps. + bool signed_cert_timestamps_enabled:1; + + // tlsext_channel_id_enabled is one if Channel ID is enabled and zero + // otherwise. For a server, means that we'll accept Channel IDs from clients. + // For a client, means that we'll advertise support. + bool tlsext_channel_id_enabled:1; + + // grease_enabled is one if draft-davidben-tls-grease-01 is enabled and zero + // otherwise. + bool grease_enabled:1; + + // allow_unknown_alpn_protos is one if the client allows unsolicited ALPN + // protocols from the peer. + bool allow_unknown_alpn_protos:1; + + // ed25519_enabled is one if Ed25519 is advertised in the handshake. + bool ed25519_enabled:1; +}; + +// An ssl_shutdown_t describes the shutdown state of one end of the connection, +// whether it is alive or has been shutdown via close_notify or fatal alert. enum ssl_shutdown_t { ssl_shutdown_none = 0, ssl_shutdown_close_notify = 1, - ssl_shutdown_fatal_alert = 2, + ssl_shutdown_error = 2, }; -typedef struct ssl3_state_st { - uint8_t read_sequence[8]; - uint8_t write_sequence[8]; +struct SSL3_STATE { + static constexpr bool kAllowUniquePtr = true; + + SSL3_STATE(); + ~SSL3_STATE(); + + uint8_t read_sequence[8] = {0}; + uint8_t write_sequence[8] = {0}; + + uint8_t server_random[SSL3_RANDOM_SIZE] = {0}; + uint8_t client_random[SSL3_RANDOM_SIZE] = {0}; + + // read_buffer holds data from the transport to be processed. + SSLBuffer read_buffer; + // write_buffer holds data to be written to the transport. + SSLBuffer write_buffer; + + // pending_app_data is the unconsumed application data. It points into + // |read_buffer|. + Span pending_app_data; - uint8_t server_random[SSL3_RANDOM_SIZE]; - uint8_t client_random[SSL3_RANDOM_SIZE]; + // partial write - check the numbers match + unsigned int wnum = 0; // number of bytes sent so far + int wpend_tot = 0; // number bytes written + int wpend_type = 0; + int wpend_ret = 0; // number of bytes submitted + const uint8_t *wpend_buf = nullptr; - /* read_buffer holds data from the transport to be processed. */ - SSL3_BUFFER read_buffer; - /* write_buffer holds data to be written to the transport. */ - SSL3_BUFFER write_buffer; + // read_shutdown is the shutdown state for the read half of the connection. + enum ssl_shutdown_t read_shutdown = ssl_shutdown_none; - SSL3_RECORD rrec; /* each decoded record goes in here */ + // write_shutdown is the shutdown state for the write half of the connection. + enum ssl_shutdown_t write_shutdown = ssl_shutdown_none; - /* partial write - check the numbers match */ - unsigned int wnum; /* number of bytes sent so far */ - int wpend_tot; /* number bytes written */ - int wpend_type; - int wpend_ret; /* number of bytes submitted */ - const uint8_t *wpend_buf; + // read_error, if |read_shutdown| is |ssl_shutdown_error|, is the error for + // the receive half of the connection. + UniquePtr read_error; - /* recv_shutdown is the shutdown state for the receive half of the - * connection. */ - enum ssl_shutdown_t recv_shutdown; + int alert_dispatch = 0; - /* recv_shutdown is the shutdown state for the send half of the connection. */ - enum ssl_shutdown_t send_shutdown; + int total_renegotiations = 0; - int alert_dispatch; + // This holds a variable that indicates what we were doing when a 0 or -1 is + // returned. This is needed for non-blocking IO so we know what request + // needs re-doing when in SSL_accept or SSL_connect + int rwstate = SSL_NOTHING; - int total_renegotiations; + // early_data_skipped is the amount of early data that has been skipped by the + // record layer. + uint16_t early_data_skipped = 0; - /* early_data_skipped is the amount of early data that has been skipped by the - * record layer. */ - uint16_t early_data_skipped; + // empty_record_count is the number of consecutive empty records received. + uint8_t empty_record_count = 0; - /* empty_record_count is the number of consecutive empty records received. */ - uint8_t empty_record_count; + // warning_alert_count is the number of consecutive warning alerts + // received. + uint8_t warning_alert_count = 0; - /* warning_alert_count is the number of consecutive warning alerts - * received. */ - uint8_t warning_alert_count; + // key_update_count is the number of consecutive KeyUpdates received. + uint8_t key_update_count = 0; - /* key_update_count is the number of consecutive KeyUpdates received. */ - uint8_t key_update_count; + // skip_early_data instructs the record layer to skip unexpected early data + // messages when 0RTT is rejected. + bool skip_early_data:1; - /* skip_early_data instructs the record layer to skip unexpected early data - * messages when 0RTT is rejected. */ - unsigned skip_early_data:1; + // have_version is true if the connection's final version is known. Otherwise + // the version has not been negotiated yet. + bool have_version:1; - /* have_version is true if the connection's final version is known. Otherwise - * the version has not been negotiated yet. */ - unsigned have_version:1; + // v2_hello_done is true if the peer's V2ClientHello, if any, has been handled + // and future messages should use the record layer. + bool v2_hello_done:1; - /* v2_hello_done is true if the peer's V2ClientHello, if any, has been handled - * and future messages should use the record layer. */ - unsigned v2_hello_done:1; + // is_v2_hello is true if the current handshake message was derived from a + // V2ClientHello rather than received from the peer directly. + bool is_v2_hello:1; - /* is_v2_hello is true if the current handshake message was derived from a - * V2ClientHello rather than received from the peer directly. */ - unsigned is_v2_hello:1; - - /* initial_handshake_complete is true if the initial handshake has - * completed. */ - unsigned initial_handshake_complete:1; - - /* session_reused indicates whether a session was resumed. */ - unsigned session_reused:1; - - unsigned send_connection_binding:1; - - /* In a client, this means that the server supported Channel ID and that a - * Channel ID was sent. In a server it means that we echoed support for - * Channel IDs and that tlsext_channel_id will be valid after the - * handshake. */ - unsigned tlsext_channel_id_valid:1; - - /* short_header is one if https://github.com/tlswg/tls13-spec/pull/762 has - * been negotiated. */ - unsigned short_header:1; - - uint8_t send_alert[2]; - - /* pending_flight is the pending outgoing flight. This is used to flush each - * handshake flight in a single write. */ - BUF_MEM *pending_flight; - - /* pending_flight_offset is the number of bytes of |pending_flight| which have - * been successfully written. */ - uint32_t pending_flight_offset; - - /* aead_read_ctx is the current read cipher state. */ - SSL_AEAD_CTX *aead_read_ctx; - - /* aead_write_ctx is the current write cipher state. */ - SSL_AEAD_CTX *aead_write_ctx; - - /* hs is the handshake state for the current handshake or NULL if there isn't - * one. */ - SSL_HANDSHAKE *hs; - - uint8_t write_traffic_secret[EVP_MAX_MD_SIZE]; - uint8_t read_traffic_secret[EVP_MAX_MD_SIZE]; - uint8_t exporter_secret[EVP_MAX_MD_SIZE]; - uint8_t write_traffic_secret_len; - uint8_t read_traffic_secret_len; - uint8_t exporter_secret_len; - - /* Connection binding to prevent renegotiation attacks */ - uint8_t previous_client_finished[12]; - uint8_t previous_client_finished_len; - uint8_t previous_server_finished_len; - uint8_t previous_server_finished[12]; - - /* State pertaining to the pending handshake. - * - * TODO(davidben): Move everything not needed after the handshake completes to - * |hs| and remove this. */ - struct { - int message_type; + // has_message is true if the current handshake message has been returned + // at least once by |get_message| and false otherwise. + bool has_message:1; - int reuse_message; - - uint8_t new_mac_secret_len; - uint8_t new_key_len; - uint8_t new_fixed_iv_len; - } tmp; - - /* established_session is the session established by the connection. This - * session is only filled upon the completion of the handshake and is - * immutable. */ - SSL_SESSION *established_session; - - /* Next protocol negotiation. For the client, this is the protocol that we - * sent in NextProtocol and is set when handling ServerHello extensions. - * - * For a server, this is the client's selected_protocol from NextProtocol and - * is set when handling the NextProtocol message, before the Finished - * message. */ - uint8_t *next_proto_negotiated; - size_t next_proto_negotiated_len; - - /* ALPN information - * (we are in the process of transitioning from NPN to ALPN.) */ - - /* In a server these point to the selected ALPN protocol after the - * ClientHello has been processed. In a client these contain the protocol - * that the server selected once the ServerHello has been processed. */ - uint8_t *alpn_selected; - size_t alpn_selected_len; - - /* For a server: - * If |tlsext_channel_id_valid| is true, then this contains the - * verified Channel ID from the client: a P256 point, (x,y), where - * each are big-endian values. */ - uint8_t tlsext_channel_id[64]; -} SSL3_STATE; - -/* lengths of messages */ + // initial_handshake_complete is true if the initial handshake has + // completed. + bool initial_handshake_complete:1; + + // session_reused indicates whether a session was resumed. + bool session_reused:1; + + bool send_connection_binding:1; + + // In a client, this means that the server supported Channel ID and that a + // Channel ID was sent. In a server it means that we echoed support for + // Channel IDs and that tlsext_channel_id will be valid after the + // handshake. + bool tlsext_channel_id_valid:1; + + // key_update_pending is true if we have a KeyUpdate acknowledgment + // outstanding. + bool key_update_pending:1; + + // wpend_pending is true if we have a pending write outstanding. + bool wpend_pending:1; + + uint8_t send_alert[2] = {0}; + + // hs_buf is the buffer of handshake data to process. + UniquePtr hs_buf; + + // pending_flight is the pending outgoing flight. This is used to flush each + // handshake flight in a single write. |write_buffer| must be written out + // before this data. + UniquePtr pending_flight; + + // pending_flight_offset is the number of bytes of |pending_flight| which have + // been successfully written. + uint32_t pending_flight_offset = 0; + + // aead_read_ctx is the current read cipher state. + UniquePtr aead_read_ctx; + + // aead_write_ctx is the current write cipher state. + UniquePtr aead_write_ctx; + + // hs is the handshake state for the current handshake or NULL if there isn't + // one. + UniquePtr hs; + + uint8_t write_traffic_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t read_traffic_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t exporter_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t early_exporter_secret[EVP_MAX_MD_SIZE] = {0}; + uint8_t write_traffic_secret_len = 0; + uint8_t read_traffic_secret_len = 0; + uint8_t exporter_secret_len = 0; + uint8_t early_exporter_secret_len = 0; + + // Connection binding to prevent renegotiation attacks + uint8_t previous_client_finished[12] = {0}; + uint8_t previous_client_finished_len = 0; + uint8_t previous_server_finished_len = 0; + uint8_t previous_server_finished[12] = {0}; + + // established_session is the session established by the connection. This + // session is only filled upon the completion of the handshake and is + // immutable. + UniquePtr established_session; + + // Next protocol negotiation. For the client, this is the protocol that we + // sent in NextProtocol and is set when handling ServerHello extensions. + // + // For a server, this is the client's selected_protocol from NextProtocol and + // is set when handling the NextProtocol message, before the Finished + // message. + Array next_proto_negotiated; + + // ALPN information + // (we are in the process of transitioning from NPN to ALPN.) + + // In a server these point to the selected ALPN protocol after the + // ClientHello has been processed. In a client these contain the protocol + // that the server selected once the ServerHello has been processed. + Array alpn_selected; + + // hostname, on the server, is the value of the SNI extension. + UniquePtr hostname; + + // For a server: + // If |tlsext_channel_id_valid| is true, then this contains the + // verified Channel ID from the client: a P256 point, (x,y), where + // each are big-endian values. + uint8_t tlsext_channel_id[64] = {0}; + + // ticket_age_skew is the difference, in seconds, between the client-sent + // ticket age and the server-computed value in TLS 1.3 server connections + // which resumed a session. + int32_t ticket_age_skew = 0; +}; + +// lengths of messages #define DTLS1_COOKIE_LENGTH 256 #define DTLS1_RT_HEADER_LENGTH 13 @@ -1675,148 +2387,179 @@ struct hm_header_st { uint32_t frag_len; }; -/* An hm_fragment is an incoming DTLS message, possibly not yet assembled. */ -typedef struct hm_fragment_st { - /* type is the type of the message. */ - uint8_t type; - /* seq is the sequence number of this message. */ - uint16_t seq; - /* msg_len is the length of the message body. */ - uint32_t msg_len; - /* data is a pointer to the message, including message header. It has length - * |DTLS1_HM_HEADER_LENGTH| + |msg_len|. */ - uint8_t *data; - /* reassembly is a bitmask of |msg_len| bits corresponding to which parts of - * the message have been received. It is NULL if the message is complete. */ - uint8_t *reassembly; -} hm_fragment; - -typedef struct dtls1_state_st { - /* send_cookie is true if we are resending the ClientHello - * with a cookie from a HelloVerifyRequest. */ - unsigned int send_cookie; - - uint8_t cookie[DTLS1_COOKIE_LENGTH]; - size_t cookie_len; - - /* The current data and handshake epoch. This is initially undefined, and - * starts at zero once the initial handshake is completed. */ - uint16_t r_epoch; - uint16_t w_epoch; - - /* records being received in the current epoch */ +// An hm_fragment is an incoming DTLS message, possibly not yet assembled. +struct hm_fragment { + static constexpr bool kAllowUniquePtr = true; + + hm_fragment() {} + hm_fragment(const hm_fragment &) = delete; + hm_fragment &operator=(const hm_fragment &) = delete; + + ~hm_fragment(); + + // type is the type of the message. + uint8_t type = 0; + // seq is the sequence number of this message. + uint16_t seq = 0; + // msg_len is the length of the message body. + uint32_t msg_len = 0; + // data is a pointer to the message, including message header. It has length + // |DTLS1_HM_HEADER_LENGTH| + |msg_len|. + uint8_t *data = nullptr; + // reassembly is a bitmask of |msg_len| bits corresponding to which parts of + // the message have been received. It is NULL if the message is complete. + uint8_t *reassembly = nullptr; +}; + +struct OPENSSL_timeval { + uint64_t tv_sec; + uint32_t tv_usec; +}; + +struct DTLS1_STATE { + static constexpr bool kAllowUniquePtr = true; + + DTLS1_STATE(); + ~DTLS1_STATE(); + + // has_change_cipher_spec is true if we have received a ChangeCipherSpec from + // the peer in this epoch. + bool has_change_cipher_spec:1; + + // outgoing_messages_complete is true if |outgoing_messages| has been + // completed by an attempt to flush it. Future calls to |add_message| and + // |add_change_cipher_spec| will start a new flight. + bool outgoing_messages_complete:1; + + // flight_has_reply is true if the current outgoing flight is complete and has + // processed at least one message. This is used to detect whether we or the + // peer sent the final flight. + bool flight_has_reply:1; + + uint8_t cookie[DTLS1_COOKIE_LENGTH] = {0}; + size_t cookie_len = 0; + + // The current data and handshake epoch. This is initially undefined, and + // starts at zero once the initial handshake is completed. + uint16_t r_epoch = 0; + uint16_t w_epoch = 0; + + // records being received in the current epoch DTLS1_BITMAP bitmap; - uint16_t handshake_write_seq; - uint16_t handshake_read_seq; + uint16_t handshake_write_seq = 0; + uint16_t handshake_read_seq = 0; - /* save last sequence number for retransmissions */ - uint8_t last_write_sequence[8]; + // save last sequence number for retransmissions + uint8_t last_write_sequence[8] = {0}; + UniquePtr last_aead_write_ctx; - /* incoming_messages is a ring buffer of incoming handshake messages that have - * yet to be processed. The front of the ring buffer is message number - * |handshake_read_seq|, at position |handshake_read_seq| % - * |SSL_MAX_HANDSHAKE_FLIGHT|. */ - hm_fragment *incoming_messages[SSL_MAX_HANDSHAKE_FLIGHT]; + // incoming_messages is a ring buffer of incoming handshake messages that have + // yet to be processed. The front of the ring buffer is message number + // |handshake_read_seq|, at position |handshake_read_seq| % + // |SSL_MAX_HANDSHAKE_FLIGHT|. + UniquePtr incoming_messages[SSL_MAX_HANDSHAKE_FLIGHT]; - /* outgoing_messages is the queue of outgoing messages from the last handshake - * flight. */ + // outgoing_messages is the queue of outgoing messages from the last handshake + // flight. DTLS_OUTGOING_MESSAGE outgoing_messages[SSL_MAX_HANDSHAKE_FLIGHT]; - uint8_t outgoing_messages_len; + uint8_t outgoing_messages_len = 0; - /* outgoing_written is the number of outgoing messages that have been - * written. */ - uint8_t outgoing_written; - /* outgoing_offset is the number of bytes of the next outgoing message have - * been written. */ - uint32_t outgoing_offset; + // outgoing_written is the number of outgoing messages that have been + // written. + uint8_t outgoing_written = 0; + // outgoing_offset is the number of bytes of the next outgoing message have + // been written. + uint32_t outgoing_offset = 0; - unsigned int mtu; /* max DTLS packet size */ + unsigned mtu = 0; // max DTLS packet size - /* num_timeouts is the number of times the retransmit timer has fired since - * the last time it was reset. */ - unsigned int num_timeouts; + // num_timeouts is the number of times the retransmit timer has fired since + // the last time it was reset. + unsigned num_timeouts = 0; - /* Indicates when the last handshake msg or heartbeat sent will - * timeout. */ - struct timeval next_timeout; + // Indicates when the last handshake msg or heartbeat sent will + // timeout. + struct OPENSSL_timeval next_timeout = {0, 0}; - /* timeout_duration_ms is the timeout duration in milliseconds. */ - unsigned timeout_duration_ms; -} DTLS1_STATE; + // timeout_duration_ms is the timeout duration in milliseconds. + unsigned timeout_duration_ms = 0; +}; -struct ssl_st { - /* method is the method table corresponding to the current protocol (DTLS or - * TLS). */ +// SSLConnection backs the public |SSL| type. Due to compatibility constraints, +// it is a base class for |ssl_st|. +struct SSLConnection { + // method is the method table corresponding to the current protocol (DTLS or + // TLS). const SSL_PROTOCOL_METHOD *method; - /* version is the protocol version. */ - int version; - - /* max_version is the maximum acceptable protocol version. Note this version - * is normalized in DTLS. */ - uint16_t max_version; + // version is the protocol version. + uint16_t version; - /* min_version is the minimum acceptable protocol version. Note this version - * is normalized in DTLS. */ - uint16_t min_version; + // conf_max_version is the maximum acceptable protocol version configured by + // |SSL_set_max_proto_version|. Note this version is normalized in DTLS and is + // further constrainted by |SSL_OP_NO_*|. + uint16_t conf_max_version; - uint16_t max_send_fragment; + // conf_min_version is the minimum acceptable protocol version configured by + // |SSL_set_min_proto_version|. Note this version is normalized in DTLS and is + // further constrainted by |SSL_OP_NO_*|. + uint16_t conf_min_version; - /* There are 2 BIO's even though they are normally both the same. This is so - * data can be read and written to different handlers */ + // tls13_variant is the variant of TLS 1.3 we are using for this + // configuration. + enum tls13_variant_t tls13_variant; - BIO *rbio; /* used by SSL_read */ - BIO *wbio; /* used by SSL_write */ + uint16_t max_send_fragment; - int (*handshake_func)(SSL_HANDSHAKE *hs); + // There are 2 BIO's even though they are normally both the same. This is so + // data can be read and written to different handlers - BUF_MEM *init_buf; /* buffer used during init */ + BIO *rbio; // used by SSL_read + BIO *wbio; // used by SSL_write - /* init_msg is a pointer to the current handshake message body. */ - const uint8_t *init_msg; - /* init_num is the length of the current handshake message body. */ - uint32_t init_num; + // do_handshake runs the handshake. On completion, it returns |ssl_hs_ok|. + // Otherwise, it returns a value corresponding to what operation is needed to + // progress. + enum ssl_hs_wait_t (*do_handshake)(SSL_HANDSHAKE *hs); - struct ssl3_state_st *s3; /* SSLv3 variables */ - struct dtls1_state_st *d1; /* DTLSv1 variables */ + SSL3_STATE *s3; // SSLv3 variables + DTLS1_STATE *d1; // DTLSv1 variables - /* callback that allows applications to peek at protocol messages */ + // callback that allows applications to peek at protocol messages void (*msg_callback)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg); void *msg_callback_arg; X509_VERIFY_PARAM *param; - /* crypto */ + // crypto struct ssl_cipher_preference_list_st *cipher_list; - /* session info */ - - /* client cert? */ - /* This is used to hold the server certificate used */ - struct cert_st /* CERT */ *cert; + // session info - /* This holds a variable that indicates what we were doing when a 0 or -1 is - * returned. This is needed for non-blocking IO so we know what request - * needs re-doing when in SSL_accept or SSL_connect */ - int rwstate; + // client cert? + // This is used to hold the server certificate used + CERT *cert; - /* initial_timeout_duration_ms is the default DTLS timeout duration in - * milliseconds. It's used to initialize the timer any time it's restarted. */ + // initial_timeout_duration_ms is the default DTLS timeout duration in + // milliseconds. It's used to initialize the timer any time it's restarted. unsigned initial_timeout_duration_ms; - /* session is the configured session to be offered by the client. This session - * is immutable. */ + // session is the configured session to be offered by the client. This session + // is immutable. SSL_SESSION *session; int (*verify_callback)(int ok, - X509_STORE_CTX *ctx); /* fail if callback returns 0 */ + X509_STORE_CTX *ctx); // fail if callback returns 0 + + enum ssl_verify_result_t (*custom_verify_callback)(SSL *ssl, + uint8_t *out_alert); void (*info_callback)(const SSL *ssl, int type, int value); - /* Server-only: psk_identity_hint is the identity hint to send in - * PSK-based key exchanges. */ + // Server-only: psk_identity_hint is the identity hint to send in + // PSK-based key exchanges. char *psk_identity_hint; unsigned int (*psk_client_callback)(SSL *ssl, const char *hint, @@ -1828,295 +2571,295 @@ struct ssl_st { SSL_CTX *ctx; - /* extra application data */ + // extra application data CRYPTO_EX_DATA ex_data; - /* for server side, keep the list of CA_dn we can use */ - STACK_OF(X509_NAME) *client_CA; + // for server side, keep the list of CA_dn we can use + STACK_OF(CRYPTO_BUFFER) *client_CA; - uint32_t options; /* protocol behaviour */ - uint32_t mode; /* API behaviour */ + // cached_x509_client_CA is a cache of parsed versions of the elements of + // |client_CA|. + STACK_OF(X509_NAME) *cached_x509_client_CA; + + uint32_t options; // protocol behaviour + uint32_t mode; // API behaviour uint32_t max_cert_list; char *tlsext_hostname; size_t supported_group_list_len; - uint16_t *supported_group_list; /* our list */ + uint16_t *supported_group_list; // our list - SSL_CTX *initial_ctx; /* initial ctx, used to store sessions */ + // session_ctx is the |SSL_CTX| used for the session cache and related + // settings. + SSL_CTX *session_ctx; - /* srtp_profiles is the list of configured SRTP protection profiles for - * DTLS-SRTP. */ + // srtp_profiles is the list of configured SRTP protection profiles for + // DTLS-SRTP. STACK_OF(SRTP_PROTECTION_PROFILE) *srtp_profiles; - /* srtp_profile is the selected SRTP protection profile for - * DTLS-SRTP. */ + // srtp_profile is the selected SRTP protection profile for + // DTLS-SRTP. const SRTP_PROTECTION_PROFILE *srtp_profile; - /* The client's Channel ID private key. */ + // The client's Channel ID private key. EVP_PKEY *tlsext_channel_id_private; - /* For a client, this contains the list of supported protocols in wire - * format. */ + // For a client, this contains the list of supported protocols in wire + // format. uint8_t *alpn_client_proto_list; unsigned alpn_client_proto_list_len; - /* renegotiate_mode controls how peer renegotiation attempts are handled. */ + // renegotiate_mode controls how peer renegotiation attempts are handled. enum ssl_renegotiate_mode_t renegotiate_mode; - /* verify_mode is a bitmask of |SSL_VERIFY_*| values. */ + // verify_mode is a bitmask of |SSL_VERIFY_*| values. uint8_t verify_mode; - /* server is true iff the this SSL* is the server half. Note: before the SSL* - * is initialized by either SSL_set_accept_state or SSL_set_connect_state, - * the side is not determined. In this state, server is always false. */ - unsigned server:1; + // server is true iff the this SSL* is the server half. Note: before the SSL* + // is initialized by either SSL_set_accept_state or SSL_set_connect_state, + // the side is not determined. In this state, server is always false. + bool server:1; + + // quiet_shutdown is true if the connection should not send a close_notify on + // shutdown. + bool quiet_shutdown:1; - /* quiet_shutdown is true if the connection should not send a close_notify on - * shutdown. */ - unsigned quiet_shutdown:1; + // Enable signed certificate time stamps. Currently client only. + bool signed_cert_timestamps_enabled:1; - /* Enable signed certificate time stamps. Currently client only. */ - unsigned signed_cert_timestamps_enabled:1; + // ocsp_stapling_enabled is only used by client connections and indicates + // whether OCSP stapling will be requested. + bool ocsp_stapling_enabled:1; - /* ocsp_stapling_enabled is only used by client connections and indicates - * whether OCSP stapling will be requested. */ - unsigned ocsp_stapling_enabled:1; + // tlsext_channel_id_enabled is copied from the |SSL_CTX|. For a server, + // means that we'll accept Channel IDs from clients. For a client, means that + // we'll advertise support. + bool tlsext_channel_id_enabled:1; - /* tlsext_channel_id_enabled is copied from the |SSL_CTX|. For a server, - * means that we'll accept Channel IDs from clients. For a client, means that - * we'll advertise support. */ - unsigned tlsext_channel_id_enabled:1; + // retain_only_sha256_of_client_certs is true if we should compute the SHA256 + // hash of the peer's certificate and then discard it to save memory and + // session space. Only effective on the server side. + bool retain_only_sha256_of_client_certs:1; - /* retain_only_sha256_of_client_certs is true if we should compute the SHA256 - * hash of the peer's certificate and then discard it to save memory and - * session space. Only effective on the server side. */ - unsigned retain_only_sha256_of_client_certs:1; + // early_data_accepted is true if early data was accepted by the server. + bool early_data_accepted:1; }; -/* From draft-ietf-tls-tls13-18, used in determining PSK modes. */ -#define SSL_PSK_KE 0x0 +// From draft-ietf-tls-tls13-18, used in determining PSK modes. #define SSL_PSK_DHE_KE 0x1 -/* From draft-ietf-tls-tls13-16, used in determining whether to respond with a - * KeyUpdate. */ +// From draft-ietf-tls-tls13-16, used in determining whether to respond with a +// KeyUpdate. #define SSL_KEY_UPDATE_NOT_REQUESTED 0 #define SSL_KEY_UPDATE_REQUESTED 1 +// kMaxEarlyDataAccepted is the advertised number of plaintext bytes of early +// data that will be accepted. This value should be slightly below +// kMaxEarlyDataSkipped in tls_record.c, which is measured in ciphertext. +static const size_t kMaxEarlyDataAccepted = 14336; + CERT *ssl_cert_new(const SSL_X509_METHOD *x509_method); CERT *ssl_cert_dup(CERT *cert); -void ssl_cert_clear_certs(CERT *c); -void ssl_cert_free(CERT *c); -int ssl_set_cert(CERT *cert, CRYPTO_BUFFER *buffer); +void ssl_cert_clear_certs(CERT *cert); +void ssl_cert_free(CERT *cert); +int ssl_set_cert(CERT *cert, UniquePtr buffer); int ssl_is_key_type_supported(int key_type); -/* ssl_compare_public_and_private_key returns one if |pubkey| is the public - * counterpart to |privkey|. Otherwise it returns zero and pushes a helpful - * message on the error queue. */ +// ssl_compare_public_and_private_key returns one if |pubkey| is the public +// counterpart to |privkey|. Otherwise it returns zero and pushes a helpful +// message on the error queue. int ssl_compare_public_and_private_key(const EVP_PKEY *pubkey, const EVP_PKEY *privkey); int ssl_cert_check_private_key(const CERT *cert, const EVP_PKEY *privkey); int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server); int ssl_encrypt_ticket(SSL *ssl, CBB *out, const SSL_SESSION *session); +int ssl_ctx_rotate_ticket_encryption_key(SSL_CTX *ctx); -/* ssl_session_new returns a newly-allocated blank |SSL_SESSION| or NULL on - * error. */ -SSL_SESSION *ssl_session_new(const SSL_X509_METHOD *x509_method); +// ssl_session_new returns a newly-allocated blank |SSL_SESSION| or nullptr on +// error. +UniquePtr ssl_session_new(const SSL_X509_METHOD *x509_method); -/* SSL_SESSION_parse parses an |SSL_SESSION| from |cbs| and advances |cbs| over - * the parsed data. */ -SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, - CRYPTO_BUFFER_POOL *pool); +// SSL_SESSION_parse parses an |SSL_SESSION| from |cbs| and advances |cbs| over +// the parsed data. +UniquePtr SSL_SESSION_parse(CBS *cbs, + const SSL_X509_METHOD *x509_method, + CRYPTO_BUFFER_POOL *pool); -/* ssl_session_is_context_valid returns one if |session|'s session ID context - * matches the one set on |ssl| and zero otherwise. */ +// ssl_session_is_context_valid returns one if |session|'s session ID context +// matches the one set on |ssl| and zero otherwise. int ssl_session_is_context_valid(const SSL *ssl, const SSL_SESSION *session); -/* ssl_session_is_time_valid returns one if |session| is still valid and zero if - * it has expired. */ +// ssl_session_is_time_valid returns one if |session| is still valid and zero if +// it has expired. int ssl_session_is_time_valid(const SSL *ssl, const SSL_SESSION *session); -/* ssl_session_is_resumable returns one if |session| is resumable for |hs| and - * zero otherwise. */ +// ssl_session_is_resumable returns one if |session| is resumable for |hs| and +// zero otherwise. int ssl_session_is_resumable(const SSL_HANDSHAKE *hs, const SSL_SESSION *session); -/* SSL_SESSION_get_digest returns the digest used in |session|. If the digest is - * invalid, it returns NULL. */ -const EVP_MD *SSL_SESSION_get_digest(const SSL_SESSION *session, - const SSL *ssl); - -void ssl_set_session(SSL *ssl, SSL_SESSION *session); +// ssl_session_protocol_version returns the protocol version associated with +// |session|. Note that despite the name, this is not the same as +// |SSL_SESSION_get_protocol_version|. The latter is based on upstream's name. +uint16_t ssl_session_protocol_version(const SSL_SESSION *session); -enum ssl_session_result_t { - ssl_session_success, - ssl_session_error, - ssl_session_retry, -}; +// ssl_session_get_digest returns the digest used in |session|. +const EVP_MD *ssl_session_get_digest(const SSL_SESSION *session); -/* ssl_get_prev_session looks up the previous session based on |client_hello|. - * On success, it sets |*out_session| to the session or NULL if none was found. - * If the session could not be looked up synchronously, it returns - * |ssl_session_retry| and should be called again. Otherwise, it returns - * |ssl_session_error|. */ -enum ssl_session_result_t ssl_get_prev_session( - SSL *ssl, SSL_SESSION **out_session, int *out_tickets_supported, - int *out_renew_ticket, const SSL_CLIENT_HELLO *client_hello); +void ssl_set_session(SSL *ssl, SSL_SESSION *session); -/* The following flags determine which parts of the session are duplicated. */ +// ssl_get_prev_session looks up the previous session based on |client_hello|. +// On success, it sets |*out_session| to the session or nullptr if none was +// found. If the session could not be looked up synchronously, it returns +// |ssl_hs_pending_session| and should be called again. If a ticket could not be +// decrypted immediately it returns |ssl_hs_pending_ticket| and should also +// be called again. Otherwise, it returns |ssl_hs_error|. +enum ssl_hs_wait_t ssl_get_prev_session(SSL *ssl, + UniquePtr *out_session, + bool *out_tickets_supported, + bool *out_renew_ticket, + const SSL_CLIENT_HELLO *client_hello); + +// The following flags determine which parts of the session are duplicated. #define SSL_SESSION_DUP_AUTH_ONLY 0x0 #define SSL_SESSION_INCLUDE_TICKET 0x1 #define SSL_SESSION_INCLUDE_NONAUTH 0x2 #define SSL_SESSION_DUP_ALL \ (SSL_SESSION_INCLUDE_TICKET | SSL_SESSION_INCLUDE_NONAUTH) -/* SSL_SESSION_dup returns a newly-allocated |SSL_SESSION| with a copy of the - * fields in |session| or NULL on error. The new session is non-resumable and - * must be explicitly marked resumable once it has been filled in. */ -OPENSSL_EXPORT SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *session, - int dup_flags); +// SSL_SESSION_dup returns a newly-allocated |SSL_SESSION| with a copy of the +// fields in |session| or nullptr on error. The new session is non-resumable and +// must be explicitly marked resumable once it has been filled in. +OPENSSL_EXPORT UniquePtr SSL_SESSION_dup(SSL_SESSION *session, + int dup_flags); -/* ssl_session_rebase_time updates |session|'s start time to the current time, - * adjusting the timeout so the expiration time is unchanged. */ +// ssl_session_rebase_time updates |session|'s start time to the current time, +// adjusting the timeout so the expiration time is unchanged. void ssl_session_rebase_time(SSL *ssl, SSL_SESSION *session); -/* ssl_session_renew_timeout calls |ssl_session_rebase_time| and renews - * |session|'s timeout to |timeout| (measured from the current time). The - * renewal is clamped to the session's auth_timeout. */ -void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, long timeout); +// ssl_session_renew_timeout calls |ssl_session_rebase_time| and renews +// |session|'s timeout to |timeout| (measured from the current time). The +// renewal is clamped to the session's auth_timeout. +void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, + uint32_t timeout); void ssl_cipher_preference_list_free( struct ssl_cipher_preference_list_st *cipher_list); -/* ssl_get_cipher_preferences returns the cipher preference list for TLS 1.2 and - * below. */ +// ssl_get_cipher_preferences returns the cipher preference list for TLS 1.2 and +// below. const struct ssl_cipher_preference_list_st *ssl_get_cipher_preferences( const SSL *ssl); -int ssl_verify_cert_chain(SSL *ssl, long *out_verify_result, - STACK_OF(X509) *cert_chain); void ssl_update_cache(SSL_HANDSHAKE *hs, int mode); -int ssl_verify_alarm_type(long type); - -int ssl3_get_finished(SSL_HANDSHAKE *hs); -int ssl3_send_alert(SSL *ssl, int level, int desc); -int ssl3_get_message(SSL *ssl); -void ssl3_get_current_message(const SSL *ssl, CBS *out); -void ssl3_release_current_message(SSL *ssl, int free_buffer); +int ssl_send_alert(SSL *ssl, int level, int desc); +bool ssl3_get_message(SSL *ssl, SSLMessage *out); +ssl_open_record_t ssl3_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in); +void ssl3_next_message(SSL *ssl); -int ssl3_send_finished(SSL_HANDSHAKE *hs); int ssl3_dispatch_alert(SSL *ssl); -int ssl3_read_app_data(SSL *ssl, int *out_got_handshake, uint8_t *buf, int len, - int peek); -int ssl3_read_change_cipher_spec(SSL *ssl); -void ssl3_read_close_notify(SSL *ssl); -int ssl3_read_handshake_bytes(SSL *ssl, uint8_t *buf, int len); -int ssl3_write_app_data(SSL *ssl, const uint8_t *buf, int len); -int ssl3_output_cert_chain(SSL *ssl); - -int ssl3_new(SSL *ssl); +ssl_open_record_t ssl3_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in); +ssl_open_record_t ssl3_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in); +int ssl3_write_app_data(SSL *ssl, bool *out_needs_handshake, const uint8_t *buf, + int len); + +bool ssl3_new(SSL *ssl); void ssl3_free(SSL *ssl); -int ssl3_accept(SSL_HANDSHAKE *hs); -int ssl3_connect(SSL_HANDSHAKE *hs); - -int ssl3_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); -int ssl3_finish_message(SSL *ssl, CBB *cbb, uint8_t **out_msg, size_t *out_len); -int ssl3_add_message(SSL *ssl, uint8_t *msg, size_t len); -int ssl3_add_change_cipher_spec(SSL *ssl); -int ssl3_add_alert(SSL *ssl, uint8_t level, uint8_t desc); + +bool ssl3_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); +bool ssl3_finish_message(SSL *ssl, CBB *cbb, Array *out_msg); +bool ssl3_add_message(SSL *ssl, Array msg); +bool ssl3_add_change_cipher_spec(SSL *ssl); +bool ssl3_add_alert(SSL *ssl, uint8_t level, uint8_t desc); int ssl3_flush_flight(SSL *ssl); -int dtls1_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); -int dtls1_finish_message(SSL *ssl, CBB *cbb, uint8_t **out_msg, - size_t *out_len); -int dtls1_add_message(SSL *ssl, uint8_t *msg, size_t len); -int dtls1_add_change_cipher_spec(SSL *ssl); -int dtls1_add_alert(SSL *ssl, uint8_t level, uint8_t desc); +bool dtls1_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type); +bool dtls1_finish_message(SSL *ssl, CBB *cbb, Array *out_msg); +bool dtls1_add_message(SSL *ssl, Array msg); +bool dtls1_add_change_cipher_spec(SSL *ssl); +bool dtls1_add_alert(SSL *ssl, uint8_t level, uint8_t desc); int dtls1_flush_flight(SSL *ssl); -/* ssl_add_message_cbb finishes the handshake message in |cbb| and adds it to - * the pending flight. It returns one on success and zero on error. */ -int ssl_add_message_cbb(SSL *ssl, CBB *cbb); - -/* ssl_hash_current_message incorporates the current handshake message into the - * handshake hash. It returns one on success and zero on allocation failure. */ -int ssl_hash_current_message(SSL_HANDSHAKE *hs); +// ssl_add_message_cbb finishes the handshake message in |cbb| and adds it to +// the pending flight. It returns true on success and false on error. +bool ssl_add_message_cbb(SSL *ssl, CBB *cbb); -/* dtls1_get_record reads a new input record. On success, it places it in - * |ssl->s3->rrec| and returns one. Otherwise it returns <= 0 on error or if - * more data is needed. */ -int dtls1_get_record(SSL *ssl); +// ssl_hash_message incorporates |msg| into the handshake hash. It returns true +// on success and false on allocation failure. +bool ssl_hash_message(SSL_HANDSHAKE *hs, const SSLMessage &msg); -int dtls1_read_app_data(SSL *ssl, int *out_got_handshake, uint8_t *buf, int len, - int peek); -int dtls1_read_change_cipher_spec(SSL *ssl); -void dtls1_read_close_notify(SSL *ssl); +ssl_open_record_t dtls1_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in); +ssl_open_record_t dtls1_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in); -int dtls1_write_app_data(SSL *ssl, const uint8_t *buf, int len); +int dtls1_write_app_data(SSL *ssl, bool *out_needs_handshake, + const uint8_t *buf, int len); -/* dtls1_write_record sends a record. It returns one on success and <= 0 on - * error. */ +// dtls1_write_record sends a record. It returns one on success and <= 0 on +// error. int dtls1_write_record(SSL *ssl, int type, const uint8_t *buf, size_t len, enum dtls1_use_epoch_t use_epoch); -int dtls1_send_finished(SSL *ssl, int a, int b, const char *sender, int slen); int dtls1_retransmit_outgoing_messages(SSL *ssl); -void dtls1_clear_record_buffer(SSL *ssl); -int dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, +bool dtls1_parse_fragment(CBS *cbs, struct hm_header_st *out_hdr, CBS *out_body); -int dtls1_check_timeout_num(SSL *ssl); -int dtls1_handshake_write(SSL *ssl); +bool dtls1_check_timeout_num(SSL *ssl); void dtls1_start_timer(SSL *ssl); void dtls1_stop_timer(SSL *ssl); -int dtls1_is_timer_expired(SSL *ssl); -void dtls1_double_timeout(SSL *ssl); +bool dtls1_is_timer_expired(SSL *ssl); unsigned int dtls1_min_mtu(void); -int dtls1_new(SSL *ssl); -int dtls1_accept(SSL *ssl); -int dtls1_connect(SSL *ssl); +bool dtls1_new(SSL *ssl); void dtls1_free(SSL *ssl); -int dtls1_get_message(SSL *ssl); -void dtls1_get_current_message(const SSL *ssl, CBS *out); -void dtls1_release_current_message(SSL *ssl, int free_buffer); +bool dtls1_get_message(SSL *ssl, SSLMessage *out); +ssl_open_record_t dtls1_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in); +void dtls1_next_message(SSL *ssl); int dtls1_dispatch_alert(SSL *ssl); -int tls1_change_cipher_state(SSL_HANDSHAKE *hs, int which); +int tls1_change_cipher_state(SSL_HANDSHAKE *hs, evp_aead_direction_t direction); int tls1_generate_master_secret(SSL_HANDSHAKE *hs, uint8_t *out, - const uint8_t *premaster, size_t premaster_len); + Span premaster); -/* tls1_get_grouplist sets |*out_group_ids| and |*out_group_ids_len| to the - * locally-configured group preference list. */ -void tls1_get_grouplist(SSL *ssl, const uint16_t **out_group_ids, - size_t *out_group_ids_len); +// tls1_get_grouplist returns the locally-configured group preference list. +Span tls1_get_grouplist(const SSL *ssl); -/* tls1_check_group_id returns one if |group_id| is consistent with - * locally-configured group preferences. */ -int tls1_check_group_id(SSL *ssl, uint16_t group_id); +// tls1_check_group_id returns one if |group_id| is consistent with +// locally-configured group preferences. +int tls1_check_group_id(const SSL *ssl, uint16_t group_id); -/* tls1_get_shared_group sets |*out_group_id| to the first preferred shared - * group between client and server preferences and returns one. If none may be - * found, it returns zero. */ +// tls1_get_shared_group sets |*out_group_id| to the first preferred shared +// group between client and server preferences and returns one. If none may be +// found, it returns zero. int tls1_get_shared_group(SSL_HANDSHAKE *hs, uint16_t *out_group_id); -/* tls1_set_curves converts the array of |ncurves| NIDs pointed to by |curves| - * into a newly allocated array of TLS group IDs. On success, the function - * returns one and writes the array to |*out_group_ids| and its size to - * |*out_group_ids_len|. Otherwise, it returns zero. */ +// tls1_set_curves converts the array of |ncurves| NIDs pointed to by |curves| +// into a newly allocated array of TLS group IDs. On success, the function +// returns one and writes the array to |*out_group_ids| and its size to +// |*out_group_ids_len|. Otherwise, it returns zero. int tls1_set_curves(uint16_t **out_group_ids, size_t *out_group_ids_len, const int *curves, size_t ncurves); -/* tls1_set_curves_list converts the string of curves pointed to by |curves| - * into a newly allocated array of TLS group IDs. On success, the function - * returns one and writes the array to |*out_group_ids| and its size to - * |*out_group_ids_len|. Otherwise, it returns zero. */ +// tls1_set_curves_list converts the string of curves pointed to by |curves| +// into a newly allocated array of TLS group IDs. On success, the function +// returns one and writes the array to |*out_group_ids| and its size to +// |*out_group_ids_len|. Otherwise, it returns zero. int tls1_set_curves_list(uint16_t **out_group_ids, size_t *out_group_ids_len, const char *curves); -/* ssl_add_clienthello_tlsext writes ClientHello extensions to |out|. It - * returns one on success and zero on failure. The |header_len| argument is the - * length of the ClientHello written so far and is used to compute the padding - * length. (It does not include the record header.) */ +// ssl_add_clienthello_tlsext writes ClientHello extensions to |out|. It +// returns one on success and zero on failure. The |header_len| argument is the +// length of the ClientHello written so far and is used to compute the padding +// length. (It does not include the record header.) int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len); int ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out); @@ -2126,60 +2869,143 @@ int ssl_parse_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs); #define tlsext_tick_md EVP_sha256 -/* tls_process_ticket processes a session ticket from the client. On success, - * it sets |*out_session| to the decrypted session or NULL if the ticket was - * rejected. If the ticket was valid, it sets |*out_renew_ticket| to whether - * the ticket should be renewed. It returns one on success and zero on fatal - * error. */ -int tls_process_ticket(SSL *ssl, SSL_SESSION **out_session, - int *out_renew_ticket, const uint8_t *ticket, - size_t ticket_len, const uint8_t *session_id, - size_t session_id_len); - -/* tls1_verify_channel_id processes the current message as a Channel ID message, - * and verifies the signature. If the key is valid, it saves the Channel ID and - * returns one. Otherwise, it returns zero. */ -int tls1_verify_channel_id(SSL_HANDSHAKE *hs); - -/* tls1_write_channel_id generates a Channel ID message and puts the output in - * |cbb|. |ssl->tlsext_channel_id_private| must already be set before calling. - * This function returns one on success and zero on error. */ -int tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb); - -/* tls1_channel_id_hash computes the hash to be signed by Channel ID and writes - * it to |out|, which must contain at least |EVP_MAX_MD_SIZE| bytes. It returns - * one on success and zero on failure. */ +// ssl_process_ticket processes a session ticket from the client. It returns +// one of: +// |ssl_ticket_aead_success|: |*out_session| is set to the parsed session and +// |*out_renew_ticket| is set to whether the ticket should be renewed. +// |ssl_ticket_aead_ignore_ticket|: |*out_renew_ticket| is set to whether a +// fresh ticket should be sent, but the given ticket cannot be used. +// |ssl_ticket_aead_retry|: the ticket could not be immediately decrypted. +// Retry later. +// |ssl_ticket_aead_error|: an error occured that is fatal to the connection. +enum ssl_ticket_aead_result_t ssl_process_ticket( + SSL *ssl, UniquePtr *out_session, bool *out_renew_ticket, + const uint8_t *ticket, size_t ticket_len, const uint8_t *session_id, + size_t session_id_len); + +// tls1_verify_channel_id processes |msg| as a Channel ID message, and verifies +// the signature. If the key is valid, it saves the Channel ID and returns +// one. Otherwise, it returns zero. +int tls1_verify_channel_id(SSL_HANDSHAKE *hs, const SSLMessage &msg); + +// tls1_write_channel_id generates a Channel ID message and puts the output in +// |cbb|. |ssl->tlsext_channel_id_private| must already be set before calling. +// This function returns true on success and false on error. +bool tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb); + +// tls1_channel_id_hash computes the hash to be signed by Channel ID and writes +// it to |out|, which must contain at least |EVP_MAX_MD_SIZE| bytes. It returns +// one on success and zero on failure. int tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len); int tls1_record_handshake_hashes_for_channel_id(SSL_HANDSHAKE *hs); -/* ssl_do_channel_id_callback checks runs |ssl->ctx->channel_id_cb| if - * necessary. It returns one on success and zero on fatal error. Note that, on - * success, |ssl->tlsext_channel_id_private| may be unset, in which case the - * operation should be retried later. */ +// ssl_do_channel_id_callback checks runs |ssl->ctx->channel_id_cb| if +// necessary. It returns one on success and zero on fatal error. Note that, on +// success, |ssl->tlsext_channel_id_private| may be unset, in which case the +// operation should be retried later. int ssl_do_channel_id_callback(SSL *ssl); -/* ssl3_can_false_start returns one if |ssl| is allowed to False Start and zero - * otherwise. */ -int ssl3_can_false_start(const SSL *ssl); +// ssl_can_write returns one if |ssl| is allowed to write and zero otherwise. +int ssl_can_write(const SSL *ssl); -/* ssl_get_version_range sets |*out_min_version| and |*out_max_version| to the - * minimum and maximum enabled protocol versions, respectively. */ -int ssl_get_version_range(const SSL *ssl, uint16_t *out_min_version, - uint16_t *out_max_version); +// ssl_can_read returns one if |ssl| is allowed to read and zero otherwise. +int ssl_can_read(const SSL *ssl); -/* ssl3_protocol_version returns |ssl|'s protocol version. It is an error to - * call this function before the version is determined. */ -uint16_t ssl3_protocol_version(const SSL *ssl); +void ssl_get_current_time(const SSL *ssl, struct OPENSSL_timeval *out_clock); +void ssl_ctx_get_current_time(const SSL_CTX *ctx, + struct OPENSSL_timeval *out_clock); -void ssl_get_current_time(const SSL *ssl, struct timeval *out_clock); - -/* ssl_reset_error_state resets state for |SSL_get_error|. */ +// ssl_reset_error_state resets state for |SSL_get_error|. void ssl_reset_error_state(SSL *ssl); +// ssl_set_read_error sets |ssl|'s read half into an error state, saving the +// current state of the error queue. +void ssl_set_read_error(SSL* ssl); + +} // namespace bssl + + +// Opaque C types. +// +// The following types are exported to C code as public typedefs, so they must +// be defined outside of the namespace. + +// ssl_method_st backs the public |SSL_METHOD| type. It is a compatibility +// structure to support the legacy version-locked methods. +struct ssl_method_st { + // version, if non-zero, is the only protocol version acceptable to an + // SSL_CTX initialized from this method. + uint16_t version; + // method is the underlying SSL_PROTOCOL_METHOD that initializes the + // SSL_CTX. + const bssl::SSL_PROTOCOL_METHOD *method; + // x509_method contains pointers to functions that might deal with |X509| + // compatibility, or might be a no-op, depending on the application. + const SSL_X509_METHOD *x509_method; +}; + +struct ssl_x509_method_st { + // check_client_CA_list returns one if |names| is a good list of X.509 + // distinguished names and zero otherwise. This is used to ensure that we can + // reject unparsable values at handshake time when using crypto/x509. + int (*check_client_CA_list)(STACK_OF(CRYPTO_BUFFER) *names); + + // cert_clear frees and NULLs all X509 certificate-related state. + void (*cert_clear)(bssl::CERT *cert); + // cert_free frees all X509-related state. + void (*cert_free)(bssl::CERT *cert); + // cert_flush_cached_chain drops any cached |X509|-based certificate chain + // from |cert|. + // cert_dup duplicates any needed fields from |cert| to |new_cert|. + void (*cert_dup)(bssl::CERT *new_cert, const bssl::CERT *cert); + void (*cert_flush_cached_chain)(bssl::CERT *cert); + // cert_flush_cached_chain drops any cached |X509|-based leaf certificate + // from |cert|. + void (*cert_flush_cached_leaf)(bssl::CERT *cert); + + // session_cache_objects fills out |sess->x509_peer| and |sess->x509_chain| + // from |sess->certs| and erases |sess->x509_chain_without_leaf|. It returns + // one on success or zero on error. + int (*session_cache_objects)(SSL_SESSION *session); + // session_dup duplicates any needed fields from |session| to |new_session|. + // It returns one on success or zero on error. + int (*session_dup)(SSL_SESSION *new_session, const SSL_SESSION *session); + // session_clear frees any X509-related state from |session|. + void (*session_clear)(SSL_SESSION *session); + // session_verify_cert_chain verifies the certificate chain in |session|, + // sets |session->verify_result| and returns one on success or zero on + // error. + int (*session_verify_cert_chain)(SSL_SESSION *session, SSL *ssl, + uint8_t *out_alert); + + // hs_flush_cached_ca_names drops any cached |X509_NAME|s from |hs|. + void (*hs_flush_cached_ca_names)(bssl::SSL_HANDSHAKE *hs); + // ssl_new does any neccessary initialisation of |ssl|. It returns one on + // success or zero on error. + int (*ssl_new)(SSL *ssl); + // ssl_free frees anything created by |ssl_new|. + void (*ssl_free)(SSL *ssl); + // ssl_flush_cached_client_CA drops any cached |X509_NAME|s from |ssl|. + void (*ssl_flush_cached_client_CA)(SSL *ssl); + // ssl_auto_chain_if_needed runs the deprecated auto-chaining logic if + // necessary. On success, it updates |ssl|'s certificate configuration as + // needed and returns one. Otherwise, it returns zero. + int (*ssl_auto_chain_if_needed)(SSL *ssl); + // ssl_ctx_new does any neccessary initialisation of |ctx|. It returns one on + // success or zero on error. + int (*ssl_ctx_new)(SSL_CTX *ctx); + // ssl_ctx_free frees anything created by |ssl_ctx_new|. + void (*ssl_ctx_free)(SSL_CTX *ctx); + // ssl_ctx_flush_cached_client_CA drops any cached |X509_NAME|s from |ctx|. + void (*ssl_ctx_flush_cached_client_CA)(SSL_CTX *ssl); +}; + +// The following types back public C-exposed types which must live in the global +// namespace. We use subclassing so the implementations may be C++ types with +// methods and destructor without polluting the global namespace. +struct ssl_ctx_st : public bssl::SSLContext {}; +struct ssl_st : public bssl::SSLConnection {}; -#if defined(__cplusplus) -} /* extern C */ -#endif -#endif /* OPENSSL_HEADER_SSL_INTERNAL_H */ +#endif // OPENSSL_HEADER_SSL_INTERNAL_H diff --git a/Sources/BoringSSL/ssl/s3_both.c b/Sources/BoringSSL/ssl/s3_both.c deleted file mode 100644 index 7fd09c654..000000000 --- a/Sources/BoringSSL/ssl/s3_both.c +++ /dev/null @@ -1,895 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). */ -/* ==================================================================== - * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. - * ECC cipher suite support in OpenSSL originally developed by - * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ - -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -SSL_HANDSHAKE *ssl_handshake_new(SSL *ssl) { - SSL_HANDSHAKE *hs = OPENSSL_malloc(sizeof(SSL_HANDSHAKE)); - if (hs == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; - } - OPENSSL_memset(hs, 0, sizeof(SSL_HANDSHAKE)); - hs->ssl = ssl; - hs->wait = ssl_hs_ok; - hs->state = SSL_ST_INIT; - if (!SSL_TRANSCRIPT_init(&hs->transcript)) { - ssl_handshake_free(hs); - return NULL; - } - return hs; -} - -void ssl_handshake_free(SSL_HANDSHAKE *hs) { - if (hs == NULL) { - return; - } - - OPENSSL_cleanse(hs->secret, sizeof(hs->secret)); - OPENSSL_cleanse(hs->client_handshake_secret, - sizeof(hs->client_handshake_secret)); - OPENSSL_cleanse(hs->server_handshake_secret, - sizeof(hs->server_handshake_secret)); - OPENSSL_cleanse(hs->client_traffic_secret_0, - sizeof(hs->client_traffic_secret_0)); - OPENSSL_cleanse(hs->server_traffic_secret_0, - sizeof(hs->server_traffic_secret_0)); - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - SSL_TRANSCRIPT_cleanup(&hs->transcript); - OPENSSL_free(hs->cookie); - OPENSSL_free(hs->key_share_bytes); - OPENSSL_free(hs->public_key); - SSL_SESSION_free(hs->new_session); - OPENSSL_free(hs->peer_sigalgs); - OPENSSL_free(hs->peer_supported_group_list); - OPENSSL_free(hs->peer_key); - OPENSSL_free(hs->server_params); - OPENSSL_free(hs->peer_psk_identity_hint); - sk_X509_NAME_pop_free(hs->ca_names, X509_NAME_free); - OPENSSL_free(hs->certificate_types); - - if (hs->key_block != NULL) { - OPENSSL_cleanse(hs->key_block, hs->key_block_len); - OPENSSL_free(hs->key_block); - } - - OPENSSL_free(hs->hostname); - EVP_PKEY_free(hs->peer_pubkey); - OPENSSL_free(hs); -} - -int ssl_check_message_type(SSL *ssl, int type) { - if (ssl->s3->tmp.message_type != type) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - ERR_add_error_dataf("got type %d, wanted type %d", - ssl->s3->tmp.message_type, type); - return 0; - } - - return 1; -} - -static int add_record_to_flight(SSL *ssl, uint8_t type, const uint8_t *in, - size_t in_len) { - /* We'll never add a flight while in the process of writing it out. */ - assert(ssl->s3->pending_flight_offset == 0); - - if (ssl->s3->pending_flight == NULL) { - ssl->s3->pending_flight = BUF_MEM_new(); - if (ssl->s3->pending_flight == NULL) { - return 0; - } - } - - size_t max_out = in_len + SSL_max_seal_overhead(ssl); - size_t new_cap = ssl->s3->pending_flight->length + max_out; - if (max_out < in_len || new_cap < max_out) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - - size_t len; - if (!BUF_MEM_reserve(ssl->s3->pending_flight, new_cap) || - !tls_seal_record(ssl, (uint8_t *)ssl->s3->pending_flight->data + - ssl->s3->pending_flight->length, - &len, max_out, type, in, in_len)) { - return 0; - } - - ssl->s3->pending_flight->length += len; - return 1; -} - -int ssl3_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { - /* Pick a modest size hint to save most of the |realloc| calls. */ - if (!CBB_init(cbb, 64) || - !CBB_add_u8(cbb, type) || - !CBB_add_u24_length_prefixed(cbb, body)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(cbb); - return 0; - } - - return 1; -} - -int ssl3_finish_message(SSL *ssl, CBB *cbb, uint8_t **out_msg, - size_t *out_len) { - if (!CBB_finish(cbb, out_msg, out_len)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - return 1; -} - -int ssl3_add_message(SSL *ssl, uint8_t *msg, size_t len) { - /* Add the message to the current flight, splitting into several records if - * needed. */ - int ret = 0; - size_t added = 0; - do { - size_t todo = len - added; - if (todo > ssl->max_send_fragment) { - todo = ssl->max_send_fragment; - } - - if (!add_record_to_flight(ssl, SSL3_RT_HANDSHAKE, msg + added, todo)) { - goto err; - } - added += todo; - } while (added < len); - - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HANDSHAKE, msg, len); - /* TODO(svaldez): Move this up a layer to fix abstraction for SSL_TRANSCRIPT - * on hs. */ - if (ssl->s3->hs != NULL && - !SSL_TRANSCRIPT_update(&ssl->s3->hs->transcript, msg, len)) { - goto err; - } - ret = 1; - -err: - OPENSSL_free(msg); - return ret; -} - -int ssl3_add_change_cipher_spec(SSL *ssl) { - static const uint8_t kChangeCipherSpec[1] = {SSL3_MT_CCS}; - - if (!add_record_to_flight(ssl, SSL3_RT_CHANGE_CIPHER_SPEC, kChangeCipherSpec, - sizeof(kChangeCipherSpec))) { - return 0; - } - - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_CHANGE_CIPHER_SPEC, - kChangeCipherSpec, sizeof(kChangeCipherSpec)); - return 1; -} - -int ssl3_add_alert(SSL *ssl, uint8_t level, uint8_t desc) { - uint8_t alert[2] = {level, desc}; - if (!add_record_to_flight(ssl, SSL3_RT_ALERT, alert, sizeof(alert))) { - return 0; - } - - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, alert, sizeof(alert)); - ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, ((int)level << 8) | desc); - return 1; -} - -int ssl_add_message_cbb(SSL *ssl, CBB *cbb) { - uint8_t *msg; - size_t len; - if (!ssl->method->finish_message(ssl, cbb, &msg, &len) || - !ssl->method->add_message(ssl, msg, len)) { - return 0; - } - - return 1; -} - -int ssl3_flush_flight(SSL *ssl) { - if (ssl->s3->pending_flight == NULL) { - return 1; - } - - if (ssl->s3->pending_flight->length > 0xffffffff || - ssl->s3->pending_flight->length > INT_MAX) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - /* The handshake flight buffer is mutually exclusive with application data. - * - * TODO(davidben): This will not be true when closure alerts use this. */ - if (ssl_write_buffer_is_pending(ssl)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - /* Write the pending flight. */ - while (ssl->s3->pending_flight_offset < ssl->s3->pending_flight->length) { - int ret = BIO_write( - ssl->wbio, - ssl->s3->pending_flight->data + ssl->s3->pending_flight_offset, - ssl->s3->pending_flight->length - ssl->s3->pending_flight_offset); - if (ret <= 0) { - ssl->rwstate = SSL_WRITING; - return ret; - } - - ssl->s3->pending_flight_offset += ret; - } - - if (BIO_flush(ssl->wbio) <= 0) { - ssl->rwstate = SSL_WRITING; - return -1; - } - - BUF_MEM_free(ssl->s3->pending_flight); - ssl->s3->pending_flight = NULL; - ssl->s3->pending_flight_offset = 0; - return 1; -} - -int ssl3_send_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - const SSL_SESSION *session = SSL_get_session(ssl); - - uint8_t finished[EVP_MAX_MD_SIZE]; - size_t finished_len; - if (!SSL_TRANSCRIPT_finish_mac(&hs->transcript, finished, &finished_len, - session, ssl->server, - ssl3_protocol_version(ssl))) { - return 0; - } - - /* Log the master secret, if logging is enabled. */ - if (!ssl_log_secret(ssl, "CLIENT_RANDOM", - session->master_key, - session->master_key_length)) { - return 0; - } - - /* Copy the Finished so we can use it for renegotiation checks. */ - if (ssl->version != SSL3_VERSION) { - if (finished_len > sizeof(ssl->s3->previous_client_finished) || - finished_len > sizeof(ssl->s3->previous_server_finished)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - if (ssl->server) { - OPENSSL_memcpy(ssl->s3->previous_server_finished, finished, finished_len); - ssl->s3->previous_server_finished_len = finished_len; - } else { - OPENSSL_memcpy(ssl->s3->previous_client_finished, finished, finished_len); - ssl->s3->previous_client_finished_len = finished_len; - } - } - - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_FINISHED) || - !CBB_add_bytes(&body, finished, finished_len) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return -1; - } - - return 1; -} - -int ssl3_get_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_FINISHED)) { - return -1; - } - - /* Snapshot the finished hash before incorporating the new message. */ - uint8_t finished[EVP_MAX_MD_SIZE]; - size_t finished_len; - if (!SSL_TRANSCRIPT_finish_mac(&hs->transcript, finished, &finished_len, - SSL_get_session(ssl), !ssl->server, - ssl3_protocol_version(ssl)) || - !ssl_hash_current_message(hs)) { - return -1; - } - - int finished_ok = ssl->init_num == finished_len && - CRYPTO_memcmp(ssl->init_msg, finished, finished_len) == 0; -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - finished_ok = 1; -#endif - if (!finished_ok) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); - return -1; - } - - /* Copy the Finished so we can use it for renegotiation checks. */ - if (ssl->version != SSL3_VERSION) { - if (finished_len > sizeof(ssl->s3->previous_client_finished) || - finished_len > sizeof(ssl->s3->previous_server_finished)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - if (ssl->server) { - OPENSSL_memcpy(ssl->s3->previous_client_finished, finished, finished_len); - ssl->s3->previous_client_finished_len = finished_len; - } else { - OPENSSL_memcpy(ssl->s3->previous_server_finished, finished, finished_len); - ssl->s3->previous_server_finished_len = finished_len; - } - } - - return 1; -} - -int ssl3_output_cert_chain(SSL *ssl) { - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_CERTIFICATE) || - !ssl_add_cert_chain(ssl, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - CBB_cleanup(&cbb); - return 0; - } - - return 1; -} - -size_t ssl_max_handshake_message_len(const SSL *ssl) { - /* kMaxMessageLen is the default maximum message size for handshakes which do - * not accept peer certificate chains. */ - static const size_t kMaxMessageLen = 16384; - - if (SSL_in_init(ssl)) { - if ((!ssl->server || (ssl->verify_mode & SSL_VERIFY_PEER)) && - kMaxMessageLen < ssl->max_cert_list) { - return ssl->max_cert_list; - } - return kMaxMessageLen; - } - - if (ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - /* In TLS 1.2 and below, the largest acceptable post-handshake message is - * a HelloRequest. */ - return 0; - } - - if (ssl->server) { - /* The largest acceptable post-handshake message for a server is a - * KeyUpdate. We will never initiate post-handshake auth. */ - return 1; - } - - /* Clients must accept NewSessionTicket and CertificateRequest, so allow the - * default size. */ - return kMaxMessageLen; -} - -static int extend_handshake_buffer(SSL *ssl, size_t length) { - if (!BUF_MEM_reserve(ssl->init_buf, length)) { - return -1; - } - while (ssl->init_buf->length < length) { - int ret = ssl3_read_handshake_bytes( - ssl, (uint8_t *)ssl->init_buf->data + ssl->init_buf->length, - length - ssl->init_buf->length); - if (ret <= 0) { - return ret; - } - ssl->init_buf->length += (size_t)ret; - } - return 1; -} - -static int read_v2_client_hello(SSL *ssl) { - /* Read the first 5 bytes, the size of the TLS record header. This is - * sufficient to detect a V2ClientHello and ensures that we never read beyond - * the first record. */ - int ret = ssl_read_buffer_extend_to(ssl, SSL3_RT_HEADER_LENGTH); - if (ret <= 0) { - return ret; - } - const uint8_t *p = ssl_read_buffer(ssl); - - /* Some dedicated error codes for protocol mixups should the application wish - * to interpret them differently. (These do not overlap with ClientHello or - * V2ClientHello.) */ - if (strncmp("GET ", (const char *)p, 4) == 0 || - strncmp("POST ", (const char *)p, 5) == 0 || - strncmp("HEAD ", (const char *)p, 5) == 0 || - strncmp("PUT ", (const char *)p, 4) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_HTTP_REQUEST); - return -1; - } - if (strncmp("CONNE", (const char *)p, 5) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_HTTPS_PROXY_REQUEST); - return -1; - } - - if ((p[0] & 0x80) == 0 || p[2] != SSL2_MT_CLIENT_HELLO || - p[3] != SSL3_VERSION_MAJOR) { - /* Not a V2ClientHello. */ - return 1; - } - - /* Determine the length of the V2ClientHello. */ - size_t msg_length = ((p[0] & 0x7f) << 8) | p[1]; - if (msg_length > (1024 * 4)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); - return -1; - } - if (msg_length < SSL3_RT_HEADER_LENGTH - 2) { - /* Reject lengths that are too short early. We have already read - * |SSL3_RT_HEADER_LENGTH| bytes, so we should not attempt to process an - * (invalid) V2ClientHello which would be shorter than that. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_LENGTH_MISMATCH); - return -1; - } - - /* Read the remainder of the V2ClientHello. */ - ret = ssl_read_buffer_extend_to(ssl, 2 + msg_length); - if (ret <= 0) { - return ret; - } - - CBS v2_client_hello; - CBS_init(&v2_client_hello, ssl_read_buffer(ssl) + 2, msg_length); - - /* The V2ClientHello without the length is incorporated into the handshake - * hash. This is only ever called at the start of the handshake, so hs is - * guaranteed to be non-NULL. */ - if (!SSL_TRANSCRIPT_update(&ssl->s3->hs->transcript, - CBS_data(&v2_client_hello), - CBS_len(&v2_client_hello))) { - return -1; - } - - ssl_do_msg_callback(ssl, 0 /* read */, 0 /* V2ClientHello */, - CBS_data(&v2_client_hello), CBS_len(&v2_client_hello)); - - uint8_t msg_type; - uint16_t version, cipher_spec_length, session_id_length, challenge_length; - CBS cipher_specs, session_id, challenge; - if (!CBS_get_u8(&v2_client_hello, &msg_type) || - !CBS_get_u16(&v2_client_hello, &version) || - !CBS_get_u16(&v2_client_hello, &cipher_spec_length) || - !CBS_get_u16(&v2_client_hello, &session_id_length) || - !CBS_get_u16(&v2_client_hello, &challenge_length) || - !CBS_get_bytes(&v2_client_hello, &cipher_specs, cipher_spec_length) || - !CBS_get_bytes(&v2_client_hello, &session_id, session_id_length) || - !CBS_get_bytes(&v2_client_hello, &challenge, challenge_length) || - CBS_len(&v2_client_hello) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - /* msg_type has already been checked. */ - assert(msg_type == SSL2_MT_CLIENT_HELLO); - - /* The client_random is the V2ClientHello challenge. Truncate or - * left-pad with zeros as needed. */ - size_t rand_len = CBS_len(&challenge); - if (rand_len > SSL3_RANDOM_SIZE) { - rand_len = SSL3_RANDOM_SIZE; - } - uint8_t random[SSL3_RANDOM_SIZE]; - OPENSSL_memset(random, 0, SSL3_RANDOM_SIZE); - OPENSSL_memcpy(random + (SSL3_RANDOM_SIZE - rand_len), CBS_data(&challenge), - rand_len); - - /* Write out an equivalent SSLv3 ClientHello. */ - size_t max_v3_client_hello = SSL3_HM_HEADER_LENGTH + 2 /* version */ + - SSL3_RANDOM_SIZE + 1 /* session ID length */ + - 2 /* cipher list length */ + - CBS_len(&cipher_specs) / 3 * 2 + - 1 /* compression length */ + 1 /* compression */; - CBB client_hello, hello_body, cipher_suites; - CBB_zero(&client_hello); - if (!BUF_MEM_reserve(ssl->init_buf, max_v3_client_hello) || - !CBB_init_fixed(&client_hello, (uint8_t *)ssl->init_buf->data, - ssl->init_buf->max) || - !CBB_add_u8(&client_hello, SSL3_MT_CLIENT_HELLO) || - !CBB_add_u24_length_prefixed(&client_hello, &hello_body) || - !CBB_add_u16(&hello_body, version) || - !CBB_add_bytes(&hello_body, random, SSL3_RANDOM_SIZE) || - /* No session id. */ - !CBB_add_u8(&hello_body, 0) || - !CBB_add_u16_length_prefixed(&hello_body, &cipher_suites)) { - CBB_cleanup(&client_hello); - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return -1; - } - - /* Copy the cipher suites. */ - while (CBS_len(&cipher_specs) > 0) { - uint32_t cipher_spec; - if (!CBS_get_u24(&cipher_specs, &cipher_spec)) { - CBB_cleanup(&client_hello); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return -1; - } - - /* Skip SSLv2 ciphers. */ - if ((cipher_spec & 0xff0000) != 0) { - continue; - } - if (!CBB_add_u16(&cipher_suites, cipher_spec)) { - CBB_cleanup(&client_hello); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - } - - /* Add the null compression scheme and finish. */ - if (!CBB_add_u8(&hello_body, 1) || !CBB_add_u8(&hello_body, 0) || - !CBB_finish(&client_hello, NULL, &ssl->init_buf->length)) { - CBB_cleanup(&client_hello); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - /* Consume and discard the V2ClientHello. */ - ssl_read_buffer_consume(ssl, 2 + msg_length); - ssl_read_buffer_discard(ssl); - - ssl->s3->is_v2_hello = 1; - /* This is the first message, so hs must be non-NULL. */ - ssl->s3->hs->v2_clienthello = 1; - return 1; -} - -int ssl3_get_message(SSL *ssl) { - /* Re-create the handshake buffer if needed. */ - if (ssl->init_buf == NULL) { - ssl->init_buf = BUF_MEM_new(); - if (ssl->init_buf == NULL) { - return -1; - } - } - - if (ssl->server && !ssl->s3->v2_hello_done) { - /* Bypass the record layer for the first message to handle V2ClientHello. */ - int ret = read_v2_client_hello(ssl); - if (ret <= 0) { - return ret; - } - ssl->s3->v2_hello_done = 1; - } - - if (ssl->s3->tmp.reuse_message) { - /* There must be a current message. */ - assert(ssl->init_msg != NULL); - ssl->s3->tmp.reuse_message = 0; - } else { - ssl3_release_current_message(ssl, 0 /* don't free buffer */); - } - - /* Read the message header, if we haven't yet. */ - int ret = extend_handshake_buffer(ssl, SSL3_HM_HEADER_LENGTH); - if (ret <= 0) { - return ret; - } - - /* Parse out the length. Cap it so the peer cannot force us to buffer up to - * 2^24 bytes. */ - const uint8_t *p = (uint8_t *)ssl->init_buf->data; - size_t msg_len = (((uint32_t)p[1]) << 16) | (((uint32_t)p[2]) << 8) | p[3]; - if (msg_len > ssl_max_handshake_message_len(ssl)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); - return -1; - } - - /* Read the message body, if we haven't yet. */ - ret = extend_handshake_buffer(ssl, SSL3_HM_HEADER_LENGTH + msg_len); - if (ret <= 0) { - return ret; - } - - /* We have now received a complete message. */ - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, ssl->init_buf->data, - ssl->init_buf->length); - - ssl->s3->tmp.message_type = ((const uint8_t *)ssl->init_buf->data)[0]; - ssl->init_msg = (uint8_t*)ssl->init_buf->data + SSL3_HM_HEADER_LENGTH; - ssl->init_num = ssl->init_buf->length - SSL3_HM_HEADER_LENGTH; - return 1; -} - -void ssl3_get_current_message(const SSL *ssl, CBS *out) { - CBS_init(out, (uint8_t *)ssl->init_buf->data, ssl->init_buf->length); -} - -int ssl_hash_current_message(SSL_HANDSHAKE *hs) { - /* V2ClientHellos are hashed implicitly. */ - if (hs->ssl->s3->is_v2_hello) { - return 1; - } - - CBS cbs; - hs->ssl->method->get_current_message(hs->ssl, &cbs); - return SSL_TRANSCRIPT_update(&hs->transcript, CBS_data(&cbs), CBS_len(&cbs)); -} - -void ssl3_release_current_message(SSL *ssl, int free_buffer) { - if (ssl->init_msg != NULL) { - /* |init_buf| never contains data beyond the current message. */ - assert(SSL3_HM_HEADER_LENGTH + ssl->init_num == ssl->init_buf->length); - - /* Clear the current message. */ - ssl->init_msg = NULL; - ssl->init_num = 0; - ssl->init_buf->length = 0; - ssl->s3->is_v2_hello = 0; - } - - if (free_buffer) { - BUF_MEM_free(ssl->init_buf); - ssl->init_buf = NULL; - } -} - -int ssl_verify_alarm_type(long type) { - int al; - - switch (type) { - case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: - case X509_V_ERR_UNABLE_TO_GET_CRL: - case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: - al = SSL_AD_UNKNOWN_CA; - break; - - case X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE: - case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: - case X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY: - case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: - case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: - case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: - case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: - case X509_V_ERR_CERT_NOT_YET_VALID: - case X509_V_ERR_CRL_NOT_YET_VALID: - case X509_V_ERR_CERT_UNTRUSTED: - case X509_V_ERR_CERT_REJECTED: - case X509_V_ERR_HOSTNAME_MISMATCH: - case X509_V_ERR_EMAIL_MISMATCH: - case X509_V_ERR_IP_ADDRESS_MISMATCH: - al = SSL_AD_BAD_CERTIFICATE; - break; - - case X509_V_ERR_CERT_SIGNATURE_FAILURE: - case X509_V_ERR_CRL_SIGNATURE_FAILURE: - al = SSL_AD_DECRYPT_ERROR; - break; - - case X509_V_ERR_CERT_HAS_EXPIRED: - case X509_V_ERR_CRL_HAS_EXPIRED: - al = SSL_AD_CERTIFICATE_EXPIRED; - break; - - case X509_V_ERR_CERT_REVOKED: - al = SSL_AD_CERTIFICATE_REVOKED; - break; - - case X509_V_ERR_UNSPECIFIED: - case X509_V_ERR_OUT_OF_MEM: - case X509_V_ERR_INVALID_CALL: - case X509_V_ERR_STORE_LOOKUP: - al = SSL_AD_INTERNAL_ERROR; - break; - - case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: - case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: - case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: - case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE: - case X509_V_ERR_CERT_CHAIN_TOO_LONG: - case X509_V_ERR_PATH_LENGTH_EXCEEDED: - case X509_V_ERR_INVALID_CA: - al = SSL_AD_UNKNOWN_CA; - break; - - case X509_V_ERR_APPLICATION_VERIFICATION: - al = SSL_AD_HANDSHAKE_FAILURE; - break; - - case X509_V_ERR_INVALID_PURPOSE: - al = SSL_AD_UNSUPPORTED_CERTIFICATE; - break; - - default: - al = SSL_AD_CERTIFICATE_UNKNOWN; - break; - } - - return al; -} - -int ssl_parse_extensions(const CBS *cbs, uint8_t *out_alert, - const SSL_EXTENSION_TYPE *ext_types, - size_t num_ext_types, int ignore_unknown) { - /* Reset everything. */ - for (size_t i = 0; i < num_ext_types; i++) { - *ext_types[i].out_present = 0; - CBS_init(ext_types[i].out_data, NULL, 0); - } - - CBS copy = *cbs; - while (CBS_len(©) != 0) { - uint16_t type; - CBS data; - if (!CBS_get_u16(©, &type) || - !CBS_get_u16_length_prefixed(©, &data)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); - *out_alert = SSL_AD_DECODE_ERROR; - return 0; - } - - const SSL_EXTENSION_TYPE *ext_type = NULL; - for (size_t i = 0; i < num_ext_types; i++) { - if (type == ext_types[i].type) { - ext_type = &ext_types[i]; - break; - } - } - - if (ext_type == NULL) { - if (ignore_unknown) { - continue; - } - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; - return 0; - } - - /* Duplicate ext_types are forbidden. */ - if (*ext_type->out_present) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_EXTENSION); - *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; - } - - *ext_type->out_present = 1; - *ext_type->out_data = data; - } - - return 1; -} diff --git a/Sources/BoringSSL/ssl/s3_both.cc b/Sources/BoringSSL/ssl/s3_both.cc new file mode 100644 index 000000000..ede4ba7ed --- /dev/null +++ b/Sources/BoringSSL/ssl/s3_both.cc @@ -0,0 +1,585 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ +/* ==================================================================== + * Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED. + * ECC cipher suite support in OpenSSL originally developed by + * SUN MICROSYSTEMS, INC., and contributed to the OpenSSL project. */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +static bool add_record_to_flight(SSL *ssl, uint8_t type, + Span in) { + // We'll never add a flight while in the process of writing it out. + assert(ssl->s3->pending_flight_offset == 0); + + if (ssl->s3->pending_flight == nullptr) { + ssl->s3->pending_flight.reset(BUF_MEM_new()); + if (ssl->s3->pending_flight == nullptr) { + return false; + } + } + + size_t max_out = in.size() + SSL_max_seal_overhead(ssl); + size_t new_cap = ssl->s3->pending_flight->length + max_out; + if (max_out < in.size() || new_cap < max_out) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return false; + } + + size_t len; + if (!BUF_MEM_reserve(ssl->s3->pending_flight.get(), new_cap) || + !tls_seal_record(ssl, + (uint8_t *)ssl->s3->pending_flight->data + + ssl->s3->pending_flight->length, + &len, max_out, type, in.data(), in.size())) { + return false; + } + + ssl->s3->pending_flight->length += len; + return true; +} + +bool ssl3_init_message(SSL *ssl, CBB *cbb, CBB *body, uint8_t type) { + // Pick a modest size hint to save most of the |realloc| calls. + if (!CBB_init(cbb, 64) || + !CBB_add_u8(cbb, type) || + !CBB_add_u24_length_prefixed(cbb, body)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + CBB_cleanup(cbb); + return false; + } + + return true; +} + +bool ssl3_finish_message(SSL *ssl, CBB *cbb, Array *out_msg) { + return CBBFinishArray(cbb, out_msg); +} + +bool ssl3_add_message(SSL *ssl, Array msg) { + // Add the message to the current flight, splitting into several records if + // needed. + Span rest = msg; + do { + Span chunk = rest.subspan(0, ssl->max_send_fragment); + rest = rest.subspan(chunk.size()); + + if (!add_record_to_flight(ssl, SSL3_RT_HANDSHAKE, chunk)) { + return false; + } + } while (!rest.empty()); + + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HANDSHAKE, msg); + // TODO(svaldez): Move this up a layer to fix abstraction for SSLTranscript on + // hs. + if (ssl->s3->hs != NULL && + !ssl->s3->hs->transcript.Update(msg)) { + return false; + } + return true; +} + +bool ssl3_add_change_cipher_spec(SSL *ssl) { + static const uint8_t kChangeCipherSpec[1] = {SSL3_MT_CCS}; + + if (!add_record_to_flight(ssl, SSL3_RT_CHANGE_CIPHER_SPEC, + kChangeCipherSpec)) { + return false; + } + + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_CHANGE_CIPHER_SPEC, + kChangeCipherSpec); + return true; +} + +bool ssl3_add_alert(SSL *ssl, uint8_t level, uint8_t desc) { + uint8_t alert[2] = {level, desc}; + if (!add_record_to_flight(ssl, SSL3_RT_ALERT, alert)) { + return false; + } + + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, alert); + ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, ((int)level << 8) | desc); + return true; +} + +int ssl3_flush_flight(SSL *ssl) { + if (ssl->s3->pending_flight == nullptr) { + return 1; + } + + if (ssl->s3->write_shutdown != ssl_shutdown_none) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); + return -1; + } + + if (ssl->s3->pending_flight->length > 0xffffffff || + ssl->s3->pending_flight->length > INT_MAX) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return -1; + } + + // If there is pending data in the write buffer, it must be flushed out before + // any new data in pending_flight. + if (!ssl->s3->write_buffer.empty()) { + int ret = ssl_write_buffer_flush(ssl); + if (ret <= 0) { + ssl->s3->rwstate = SSL_WRITING; + return ret; + } + } + + // Write the pending flight. + while (ssl->s3->pending_flight_offset < ssl->s3->pending_flight->length) { + int ret = BIO_write( + ssl->wbio, + ssl->s3->pending_flight->data + ssl->s3->pending_flight_offset, + ssl->s3->pending_flight->length - ssl->s3->pending_flight_offset); + if (ret <= 0) { + ssl->s3->rwstate = SSL_WRITING; + return ret; + } + + ssl->s3->pending_flight_offset += ret; + } + + if (BIO_flush(ssl->wbio) <= 0) { + ssl->s3->rwstate = SSL_WRITING; + return -1; + } + + ssl->s3->pending_flight.reset(); + ssl->s3->pending_flight_offset = 0; + return 1; +} + +static ssl_open_record_t read_v2_client_hello(SSL *ssl, size_t *out_consumed, + Span in) { + *out_consumed = 0; + assert(in.size() >= SSL3_RT_HEADER_LENGTH); + // Determine the length of the V2ClientHello. + size_t msg_length = ((in[0] & 0x7f) << 8) | in[1]; + if (msg_length > (1024 * 4)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return ssl_open_record_error; + } + if (msg_length < SSL3_RT_HEADER_LENGTH - 2) { + // Reject lengths that are too short early. We have already read + // |SSL3_RT_HEADER_LENGTH| bytes, so we should not attempt to process an + // (invalid) V2ClientHello which would be shorter than that. + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_LENGTH_MISMATCH); + return ssl_open_record_error; + } + + // Ask for the remainder of the V2ClientHello. + if (in.size() < 2 + msg_length) { + *out_consumed = 2 + msg_length; + return ssl_open_record_partial; + } + + CBS v2_client_hello = CBS(ssl->s3->read_buffer.span().subspan(2, msg_length)); + // The V2ClientHello without the length is incorporated into the handshake + // hash. This is only ever called at the start of the handshake, so hs is + // guaranteed to be non-NULL. + if (!ssl->s3->hs->transcript.Update(v2_client_hello)) { + return ssl_open_record_error; + } + + ssl_do_msg_callback(ssl, 0 /* read */, 0 /* V2ClientHello */, + v2_client_hello); + + uint8_t msg_type; + uint16_t version, cipher_spec_length, session_id_length, challenge_length; + CBS cipher_specs, session_id, challenge; + if (!CBS_get_u8(&v2_client_hello, &msg_type) || + !CBS_get_u16(&v2_client_hello, &version) || + !CBS_get_u16(&v2_client_hello, &cipher_spec_length) || + !CBS_get_u16(&v2_client_hello, &session_id_length) || + !CBS_get_u16(&v2_client_hello, &challenge_length) || + !CBS_get_bytes(&v2_client_hello, &cipher_specs, cipher_spec_length) || + !CBS_get_bytes(&v2_client_hello, &session_id, session_id_length) || + !CBS_get_bytes(&v2_client_hello, &challenge, challenge_length) || + CBS_len(&v2_client_hello) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_open_record_error; + } + + // msg_type has already been checked. + assert(msg_type == SSL2_MT_CLIENT_HELLO); + + // The client_random is the V2ClientHello challenge. Truncate or left-pad with + // zeros as needed. + size_t rand_len = CBS_len(&challenge); + if (rand_len > SSL3_RANDOM_SIZE) { + rand_len = SSL3_RANDOM_SIZE; + } + uint8_t random[SSL3_RANDOM_SIZE]; + OPENSSL_memset(random, 0, SSL3_RANDOM_SIZE); + OPENSSL_memcpy(random + (SSL3_RANDOM_SIZE - rand_len), CBS_data(&challenge), + rand_len); + + // Write out an equivalent SSLv3 ClientHello. + size_t max_v3_client_hello = SSL3_HM_HEADER_LENGTH + 2 /* version */ + + SSL3_RANDOM_SIZE + 1 /* session ID length */ + + 2 /* cipher list length */ + + CBS_len(&cipher_specs) / 3 * 2 + + 1 /* compression length */ + 1 /* compression */; + ScopedCBB client_hello; + CBB hello_body, cipher_suites; + if (!BUF_MEM_reserve(ssl->s3->hs_buf.get(), max_v3_client_hello) || + !CBB_init_fixed(client_hello.get(), (uint8_t *)ssl->s3->hs_buf->data, + ssl->s3->hs_buf->max) || + !CBB_add_u8(client_hello.get(), SSL3_MT_CLIENT_HELLO) || + !CBB_add_u24_length_prefixed(client_hello.get(), &hello_body) || + !CBB_add_u16(&hello_body, version) || + !CBB_add_bytes(&hello_body, random, SSL3_RANDOM_SIZE) || + // No session id. + !CBB_add_u8(&hello_body, 0) || + !CBB_add_u16_length_prefixed(&hello_body, &cipher_suites)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_open_record_error; + } + + // Copy the cipher suites. + while (CBS_len(&cipher_specs) > 0) { + uint32_t cipher_spec; + if (!CBS_get_u24(&cipher_specs, &cipher_spec)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_open_record_error; + } + + // Skip SSLv2 ciphers. + if ((cipher_spec & 0xff0000) != 0) { + continue; + } + if (!CBB_add_u16(&cipher_suites, cipher_spec)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_open_record_error; + } + } + + // Add the null compression scheme and finish. + if (!CBB_add_u8(&hello_body, 1) || + !CBB_add_u8(&hello_body, 0) || + !CBB_finish(client_hello.get(), NULL, &ssl->s3->hs_buf->length)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_open_record_error; + } + + *out_consumed = 2 + msg_length; + ssl->s3->is_v2_hello = true; + return ssl_open_record_success; +} + +static bool parse_message(const SSL *ssl, SSLMessage *out, + size_t *out_bytes_needed) { + if (!ssl->s3->hs_buf) { + *out_bytes_needed = 4; + return false; + } + + CBS cbs; + uint32_t len; + CBS_init(&cbs, reinterpret_cast(ssl->s3->hs_buf->data), + ssl->s3->hs_buf->length); + if (!CBS_get_u8(&cbs, &out->type) || + !CBS_get_u24(&cbs, &len)) { + *out_bytes_needed = 4; + return false; + } + + if (!CBS_get_bytes(&cbs, &out->body, len)) { + *out_bytes_needed = 4 + len; + return false; + } + + CBS_init(&out->raw, reinterpret_cast(ssl->s3->hs_buf->data), + 4 + len); + out->is_v2_hello = ssl->s3->is_v2_hello; + return true; +} + +bool ssl3_get_message(SSL *ssl, SSLMessage *out) { + size_t unused; + if (!parse_message(ssl, out, &unused)) { + return false; + } + if (!ssl->s3->has_message) { + if (!out->is_v2_hello) { + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HANDSHAKE, out->raw); + } + ssl->s3->has_message = true; + } + return true; +} + +bool tls_can_accept_handshake_data(const SSL *ssl, uint8_t *out_alert) { + // If there is a complete message, the caller must have consumed it first. + SSLMessage msg; + size_t bytes_needed; + if (parse_message(ssl, &msg, &bytes_needed)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + *out_alert = SSL_AD_INTERNAL_ERROR; + return false; + } + + // Enforce the limit so the peer cannot force us to buffer 16MB. + if (bytes_needed > 4 + ssl_max_handshake_message_len(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_EXCESSIVE_MESSAGE_SIZE); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return false; + } + + return true; +} + +bool tls_has_unprocessed_handshake_data(const SSL *ssl) { + size_t msg_len = 0; + if (ssl->s3->has_message) { + SSLMessage msg; + size_t unused; + if (parse_message(ssl, &msg, &unused)) { + msg_len = CBS_len(&msg.raw); + } + } + + return ssl->s3->hs_buf && ssl->s3->hs_buf->length > msg_len; +} + +ssl_open_record_t ssl3_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in) { + *out_consumed = 0; + // Re-create the handshake buffer if needed. + if (!ssl->s3->hs_buf) { + ssl->s3->hs_buf.reset(BUF_MEM_new()); + if (!ssl->s3->hs_buf) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return ssl_open_record_error; + } + } + + // Bypass the record layer for the first message to handle V2ClientHello. + if (ssl->server && !ssl->s3->v2_hello_done) { + // Ask for the first 5 bytes, the size of the TLS record header. This is + // sufficient to detect a V2ClientHello and ensures that we never read + // beyond the first record. + if (in.size() < SSL3_RT_HEADER_LENGTH) { + *out_consumed = SSL3_RT_HEADER_LENGTH; + return ssl_open_record_partial; + } + + // Some dedicated error codes for protocol mixups should the application + // wish to interpret them differently. (These do not overlap with + // ClientHello or V2ClientHello.) + const char *str = reinterpret_cast(in.data()); + if (strncmp("GET ", str, 4) == 0 || + strncmp("POST ", str, 5) == 0 || + strncmp("HEAD ", str, 5) == 0 || + strncmp("PUT ", str, 4) == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_HTTP_REQUEST); + *out_alert = 0; + return ssl_open_record_error; + } + if (strncmp("CONNE", str, 5) == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_HTTPS_PROXY_REQUEST); + *out_alert = 0; + return ssl_open_record_error; + } + + // Check for a V2ClientHello. + if ((in[0] & 0x80) != 0 && in[2] == SSL2_MT_CLIENT_HELLO && + in[3] == SSL3_VERSION_MAJOR) { + auto ret = read_v2_client_hello(ssl, out_consumed, in); + if (ret == ssl_open_record_error) { + *out_alert = 0; + } else if (ret == ssl_open_record_success) { + ssl->s3->v2_hello_done = true; + } + return ret; + } + + ssl->s3->v2_hello_done = true; + } + + uint8_t type; + Span body; + auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; + } + + // WatchGuard's TLS 1.3 interference bug is very distinctive: they drop the + // ServerHello and send the remaining encrypted application data records + // as-is. This manifests as an application data record when we expect + // handshake. Report a dedicated error code for this case. + if (!ssl->server && type == SSL3_RT_APPLICATION_DATA && + ssl->s3->aead_read_ctx->is_null_cipher()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_APPLICATION_DATA_INSTEAD_OF_HANDSHAKE); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + if (type != SSL3_RT_HANDSHAKE) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + // Append the entire handshake record to the buffer. + if (!BUF_MEM_append(ssl->s3->hs_buf.get(), body.data(), body.size())) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return ssl_open_record_error; + } + + return ssl_open_record_success; +} + +void ssl3_next_message(SSL *ssl) { + SSLMessage msg; + if (!ssl3_get_message(ssl, &msg) || + !ssl->s3->hs_buf || + ssl->s3->hs_buf->length < CBS_len(&msg.raw)) { + assert(0); + return; + } + + OPENSSL_memmove(ssl->s3->hs_buf->data, + ssl->s3->hs_buf->data + CBS_len(&msg.raw), + ssl->s3->hs_buf->length - CBS_len(&msg.raw)); + ssl->s3->hs_buf->length -= CBS_len(&msg.raw); + ssl->s3->is_v2_hello = false; + ssl->s3->has_message = false; + + // Post-handshake messages are rare, so release the buffer after every + // message. During the handshake, |on_handshake_complete| will release it. + if (!SSL_in_init(ssl) && ssl->s3->hs_buf->length == 0) { + ssl->s3->hs_buf.reset(); + } +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/s3_lib.c b/Sources/BoringSSL/ssl/s3_lib.cc similarity index 88% rename from Sources/BoringSSL/ssl/s3_lib.c rename to Sources/BoringSSL/ssl/s3_lib.cc index 57a27c707..b925cd72f 100644 --- a/Sources/BoringSSL/ssl/s3_lib.c +++ b/Sources/BoringSSL/ssl/s3_lib.cc @@ -152,7 +152,6 @@ #include #include -#include #include #include #include @@ -163,30 +162,45 @@ #include "internal.h" -int ssl3_new(SSL *ssl) { - SSL3_STATE *s3; +namespace bssl { - s3 = OPENSSL_malloc(sizeof *s3); - if (s3 == NULL) { - return 0; +SSL3_STATE::SSL3_STATE() + : skip_early_data(false), + have_version(false), + v2_hello_done(false), + is_v2_hello(false), + has_message(false), + initial_handshake_complete(false), + session_reused(false), + send_connection_binding(false), + tlsext_channel_id_valid(false), + key_update_pending(false), + wpend_pending(false) {} + +SSL3_STATE::~SSL3_STATE() {} + +bool ssl3_new(SSL *ssl) { + UniquePtr s3 = MakeUnique(); + if (!s3) { + return false; } - OPENSSL_memset(s3, 0, sizeof *s3); + s3->aead_read_ctx = SSLAEADContext::CreateNullCipher(SSL_is_dtls(ssl)); + s3->aead_write_ctx = SSLAEADContext::CreateNullCipher(SSL_is_dtls(ssl)); s3->hs = ssl_handshake_new(ssl); - if (s3->hs == NULL) { - OPENSSL_free(s3); - return 0; + if (!s3->aead_read_ctx || !s3->aead_write_ctx || !s3->hs) { + return false; } - ssl->s3 = s3; + ssl->s3 = s3.release(); - /* Set the version to the highest supported version. - * - * TODO(davidben): Move this field into |s3|, have it store the normalized - * protocol version, and implement this pre-negotiation quirk in |SSL_version| - * at the API boundary rather than in internal state. */ + // Set the version to the highest supported version. + // + // TODO(davidben): Move this field into |s3|, have it store the normalized + // protocol version, and implement this pre-negotiation quirk in |SSL_version| + // at the API boundary rather than in internal state. ssl->version = TLS1_2_VERSION; - return 1; + return true; } void ssl3_free(SSL *ssl) { @@ -194,19 +208,7 @@ void ssl3_free(SSL *ssl) { return; } - ssl_read_buffer_clear(ssl); - ssl_write_buffer_clear(ssl); - - SSL_SESSION_free(ssl->s3->established_session); - ssl_handshake_free(ssl->s3->hs); - OPENSSL_free(ssl->s3->next_proto_negotiated); - OPENSSL_free(ssl->s3->alpn_selected); - SSL_AEAD_CTX_free(ssl->s3->aead_read_ctx); - SSL_AEAD_CTX_free(ssl->s3->aead_write_ctx); - BUF_MEM_free(ssl->s3->pending_flight); - - OPENSSL_cleanse(ssl->s3, sizeof *ssl->s3); - OPENSSL_free(ssl->s3); + Delete(ssl->s3); ssl->s3 = NULL; } @@ -218,3 +220,5 @@ const struct ssl_cipher_preference_list_st *ssl_get_cipher_preferences( return ssl->ctx->cipher_list; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/s3_pkt.c b/Sources/BoringSSL/ssl/s3_pkt.cc similarity index 54% rename from Sources/BoringSSL/ssl/s3_pkt.c rename to Sources/BoringSSL/ssl/s3_pkt.cc index 2f919caed..e6518ba8f 100644 --- a/Sources/BoringSSL/ssl/s3_pkt.c +++ b/Sources/BoringSSL/ssl/s3_pkt.cc @@ -122,110 +122,74 @@ #include "internal.h" -static int do_ssl3_write(SSL *ssl, int type, const uint8_t *buf, unsigned len); - -/* ssl3_get_record reads a new input record. On success, it places it in - * |ssl->s3->rrec| and returns one. Otherwise it returns <= 0 on error or if - * more data is needed. */ -static int ssl3_get_record(SSL *ssl) { -again: - switch (ssl->s3->recv_shutdown) { - case ssl_shutdown_none: - break; - case ssl_shutdown_fatal_alert: - OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); - return -1; - case ssl_shutdown_close_notify: - return 0; - } +namespace bssl { - CBS body; - uint8_t type, alert = SSL_AD_DECODE_ERROR; - size_t consumed; - enum ssl_open_record_t open_ret = - tls_open_record(ssl, &type, &body, &consumed, &alert, - ssl_read_buffer(ssl), ssl_read_buffer_len(ssl)); - if (open_ret != ssl_open_record_partial) { - ssl_read_buffer_consume(ssl, consumed); - } - switch (open_ret) { - case ssl_open_record_partial: { - int read_ret = ssl_read_buffer_extend_to(ssl, consumed); - if (read_ret <= 0) { - return read_ret; - } - goto again; - } +static int do_ssl3_write(SSL *ssl, int type, const uint8_t *in, unsigned len); - case ssl_open_record_success: - if (CBS_len(&body) > 0xffff) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return -1; - } +int ssl3_write_app_data(SSL *ssl, bool *out_needs_handshake, const uint8_t *in, + int len) { + assert(ssl_can_write(ssl)); + assert(!ssl->s3->aead_write_ctx->is_null_cipher()); - SSL3_RECORD *rr = &ssl->s3->rrec; - rr->type = type; - rr->length = (uint16_t)CBS_len(&body); - rr->data = (uint8_t *)CBS_data(&body); - return 1; + *out_needs_handshake = false; - case ssl_open_record_discard: - goto again; - - case ssl_open_record_close_notify: - return 0; - - case ssl_open_record_fatal_alert: - return -1; - - case ssl_open_record_error: - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return -1; + if (ssl->s3->write_shutdown != ssl_shutdown_none) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); + return -1; } - assert(0); - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; -} - -int ssl3_write_app_data(SSL *ssl, const uint8_t *buf, int len) { - assert(!SSL_in_init(ssl) || SSL_in_false_start(ssl)); - unsigned tot, n, nw; assert(ssl->s3->wnum <= INT_MAX); tot = ssl->s3->wnum; ssl->s3->wnum = 0; - /* Ensure that if we end up with a smaller value of data to write out than - * the the original len from a write which didn't complete for non-blocking - * I/O and also somehow ended up avoiding the check for this in - * ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be possible to - * end up with (len-tot) as a large number that will then promptly send - * beyond the end of the users buffer ... so we trap and report the error in - * a way the user will notice. */ + // Ensure that if we end up with a smaller value of data to write out than + // the the original len from a write which didn't complete for non-blocking + // I/O and also somehow ended up avoiding the check for this in + // ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be possible to + // end up with (len-tot) as a large number that will then promptly send + // beyond the end of the users buffer ... so we trap and report the error in + // a way the user will notice. if (len < 0 || (size_t)len < tot) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_LENGTH); return -1; } + const int is_early_data_write = + !ssl->server && SSL_in_early_data(ssl) && ssl->s3->hs->can_early_write; + n = len - tot; for (;;) { - /* max contains the maximum number of bytes that we can put into a - * record. */ + // max contains the maximum number of bytes that we can put into a record. unsigned max = ssl->max_send_fragment; + if (is_early_data_write && max > ssl->session->ticket_max_early_data - + ssl->s3->hs->early_data_written) { + max = ssl->session->ticket_max_early_data - ssl->s3->hs->early_data_written; + if (max == 0) { + ssl->s3->wnum = tot; + ssl->s3->hs->can_early_write = false; + *out_needs_handshake = true; + return -1; + } + } + if (n > max) { nw = max; } else { nw = n; } - int ret = do_ssl3_write(ssl, SSL3_RT_APPLICATION_DATA, &buf[tot], nw); + int ret = do_ssl3_write(ssl, SSL3_RT_APPLICATION_DATA, &in[tot], nw); if (ret <= 0) { ssl->s3->wnum = tot; return ret; } + if (is_early_data_write) { + ssl->s3->hs->early_data_written += ret; + } + if (ret == (int)n || (ssl->mode & SSL_MODE_ENABLE_PARTIAL_WRITE)) { return tot + ret; } @@ -235,11 +199,11 @@ int ssl3_write_app_data(SSL *ssl, const uint8_t *buf, int len) { } } -static int ssl3_write_pending(SSL *ssl, int type, const uint8_t *buf, +static int ssl3_write_pending(SSL *ssl, int type, const uint8_t *in, unsigned int len) { if (ssl->s3->wpend_tot > (int)len || (!(ssl->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) && - ssl->s3->wpend_buf != buf) || + ssl->s3->wpend_buf != in) || ssl->s3->wpend_type != type) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_WRITE_RETRY); return -1; @@ -249,25 +213,19 @@ static int ssl3_write_pending(SSL *ssl, int type, const uint8_t *buf, if (ret <= 0) { return ret; } + ssl->s3->wpend_pending = false; return ssl->s3->wpend_ret; } -/* do_ssl3_write writes an SSL record of the given type. */ -static int do_ssl3_write(SSL *ssl, int type, const uint8_t *buf, unsigned len) { - /* If there is still data from the previous record, flush it. */ - if (ssl_write_buffer_is_pending(ssl)) { - return ssl3_write_pending(ssl, type, buf, len); - } - - /* The handshake flight buffer is mutually exclusive with application data. - * - * TODO(davidben): This will not be true when closure alerts use this. */ - if (ssl->s3->pending_flight != NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; +// do_ssl3_write writes an SSL record of the given type. +static int do_ssl3_write(SSL *ssl, int type, const uint8_t *in, unsigned len) { + // If there is still data from the previous record, flush it. + if (ssl->s3->wpend_pending) { + return ssl3_write_pending(ssl, type, in, len); } - if (len > SSL3_RT_MAX_PLAIN_LENGTH) { + SSLBuffer *buf = &ssl->s3->write_buffer; + if (len > SSL3_RT_MAX_PLAIN_LENGTH || buf->size() > 0) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return -1; } @@ -276,194 +234,189 @@ static int do_ssl3_write(SSL *ssl, int type, const uint8_t *buf, unsigned len) { return 0; } + size_t flight_len = 0; + if (ssl->s3->pending_flight != nullptr) { + flight_len = + ssl->s3->pending_flight->length - ssl->s3->pending_flight_offset; + } + size_t max_out = len + SSL_max_seal_overhead(ssl); - if (max_out < len) { + if (max_out < len || max_out + flight_len < max_out) { OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); return -1; } - uint8_t *out; + max_out += flight_len; + + if (!buf->EnsureCap(flight_len + ssl_seal_align_prefix_len(ssl), max_out)) { + return -1; + } + + // Add any unflushed handshake data as a prefix. This may be a KeyUpdate + // acknowledgment or 0-RTT key change messages. |pending_flight| must be clear + // when data is added to |write_buffer| or it will be written in the wrong + // order. + if (ssl->s3->pending_flight != nullptr) { + OPENSSL_memcpy( + buf->remaining().data(), + ssl->s3->pending_flight->data + ssl->s3->pending_flight_offset, + flight_len); + ssl->s3->pending_flight.reset(); + ssl->s3->pending_flight_offset = 0; + buf->DidWrite(flight_len); + } + size_t ciphertext_len; - if (!ssl_write_buffer_init(ssl, &out, max_out) || - !tls_seal_record(ssl, out, &ciphertext_len, max_out, type, buf, len)) { + if (!tls_seal_record(ssl, buf->remaining().data(), &ciphertext_len, + buf->remaining().size(), type, in, len)) { return -1; } - ssl_write_buffer_set_len(ssl, ciphertext_len); + buf->DidWrite(ciphertext_len); - /* memorize arguments so that ssl3_write_pending can detect bad write retries - * later */ + // Now that we've made progress on the connection, uncork KeyUpdate + // acknowledgments. + ssl->s3->key_update_pending = false; + + // Memorize arguments so that ssl3_write_pending can detect bad write retries + // later. ssl->s3->wpend_tot = len; - ssl->s3->wpend_buf = buf; + ssl->s3->wpend_buf = in; ssl->s3->wpend_type = type; ssl->s3->wpend_ret = len; + ssl->s3->wpend_pending = true; - /* we now just need to write the buffer */ - return ssl3_write_pending(ssl, type, buf, len); + // We now just need to write the buffer. + return ssl3_write_pending(ssl, type, in, len); } -static int consume_record(SSL *ssl, uint8_t *out, int len, int peek) { - SSL3_RECORD *rr = &ssl->s3->rrec; - - if (len <= 0) { - return len; - } +ssl_open_record_t ssl3_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in) { + assert(ssl_can_read(ssl)); + assert(!ssl->s3->aead_read_ctx->is_null_cipher()); - if (len > (int)rr->length) { - len = (int)rr->length; + uint8_t type; + Span body; + auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; } - OPENSSL_memcpy(out, rr->data, len); - if (!peek) { - rr->length -= len; - rr->data += len; - if (rr->length == 0) { - /* The record has been consumed, so we may now clear the buffer. */ - ssl_read_buffer_discard(ssl); - } - } - return len; -} + const bool is_early_data_read = ssl->server && SSL_in_early_data(ssl); -int ssl3_read_app_data(SSL *ssl, int *out_got_handshake, uint8_t *buf, int len, - int peek) { - assert(!SSL_in_init(ssl)); - assert(ssl->s3->initial_handshake_complete); - *out_got_handshake = 0; - - SSL3_RECORD *rr = &ssl->s3->rrec; - - for (;;) { - /* A previous iteration may have read a partial handshake message. Do not - * allow more app data in that case. */ - int has_hs_data = ssl->init_buf != NULL && ssl->init_buf->length > 0; - - /* Get new packet if necessary. */ - if (rr->length == 0 && !has_hs_data) { - int ret = ssl3_get_record(ssl); - if (ret <= 0) { - return ret; - } + if (type == SSL3_RT_HANDSHAKE) { + // If reading 0-RTT data, reject handshake data. 0-RTT data is terminated + // by an alert. + if (!ssl_is_draft21(ssl->version) && is_early_data_read) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; } - if (has_hs_data || rr->type == SSL3_RT_HANDSHAKE) { - /* Post-handshake data prior to TLS 1.3 is always renegotiation, which we - * never accept as a server. Otherwise |ssl3_get_message| will send - * |SSL_R_EXCESSIVE_MESSAGE_SIZE|. */ - if (ssl->server && ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_NO_RENEGOTIATION); - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); - return -1; - } - - /* Parse post-handshake handshake messages. */ - int ret = ssl3_get_message(ssl); - if (ret <= 0) { - return ret; - } - *out_got_handshake = 1; - return -1; + // Post-handshake data prior to TLS 1.3 is always renegotiation, which we + // never accept as a server. Otherwise |ssl3_get_message| will send + // |SSL_R_EXCESSIVE_MESSAGE_SIZE|. + if (ssl->server && ssl_protocol_version(ssl) < TLS1_3_VERSION) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); + *out_alert = SSL_AD_NO_RENEGOTIATION; + return ssl_open_record_error; } - if (rr->type != SSL3_RT_APPLICATION_DATA) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return -1; + if (!ssl->s3->hs_buf) { + ssl->s3->hs_buf.reset(BUF_MEM_new()); } - - if (rr->length != 0) { - return consume_record(ssl, buf, len, peek); + if (!ssl->s3->hs_buf || + !BUF_MEM_append(ssl->s3->hs_buf.get(), body.data(), body.size())) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return ssl_open_record_error; } - - /* Discard empty records and loop again. */ + return ssl_open_record_discard; } -} -int ssl3_read_change_cipher_spec(SSL *ssl) { - SSL3_RECORD *rr = &ssl->s3->rrec; - - if (rr->length == 0) { - int ret = ssl3_get_record(ssl); - if (ret <= 0) { - return ret; - } + // Handle the end_of_early_data alert. + static const uint8_t kEndOfEarlyData[2] = {SSL3_AL_WARNING, + TLS1_AD_END_OF_EARLY_DATA}; + if (!ssl_is_draft21(ssl->version) && is_early_data_read && + type == SSL3_RT_ALERT && body == kEndOfEarlyData) { + // Stop accepting early data. + ssl->s3->hs->can_early_read = false; + return ssl_open_record_discard; } - if (rr->type != SSL3_RT_CHANGE_CIPHER_SPEC) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + if (type != SSL3_RT_APPLICATION_DATA) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - return -1; + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; } - if (rr->length != 1 || rr->data[0] != SSL3_MT_CCS) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return -1; + if (is_early_data_read) { + if (body.size() > kMaxEarlyDataAccepted - ssl->s3->hs->early_data_read) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MUCH_READ_EARLY_DATA); + *out_alert = SSL3_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + ssl->s3->hs->early_data_read += body.size(); } - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, rr->data, - rr->length); + if (body.empty()) { + return ssl_open_record_discard; + } - rr->length = 0; - ssl_read_buffer_discard(ssl); - return 1; + *out = body; + return ssl_open_record_success; } -void ssl3_read_close_notify(SSL *ssl) { - /* Read records until an error or close_notify. */ - while (ssl3_get_record(ssl) > 0) { - ; +ssl_open_record_t ssl3_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in) { + uint8_t type; + Span body; + auto ret = tls_open_record(ssl, &type, &body, out_consumed, out_alert, in); + if (ret != ssl_open_record_success) { + return ret; } -} - -int ssl3_read_handshake_bytes(SSL *ssl, uint8_t *buf, int len) { - SSL3_RECORD *rr = &ssl->s3->rrec; - - for (;;) { - /* Get new packet if necessary. */ - if (rr->length == 0) { - int ret = ssl3_get_record(ssl); - if (ret <= 0) { - return ret; - } - } - if (rr->type != SSL3_RT_HANDSHAKE) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return -1; - } - - if (rr->length != 0) { - return consume_record(ssl, buf, len, 0 /* consume data */); - } + if (type != SSL3_RT_CHANGE_CIPHER_SPEC) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } - /* Discard empty records and loop again. */ + if (body.size() != 1 || body[0] != SSL3_MT_CCS) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_CHANGE_CIPHER_SPEC); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return ssl_open_record_error; } + + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_CHANGE_CIPHER_SPEC, body); + return ssl_open_record_success; } -int ssl3_send_alert(SSL *ssl, int level, int desc) { - /* It is illegal to send an alert when we've already sent a closing one. */ - if (ssl->s3->send_shutdown != ssl_shutdown_none) { +int ssl_send_alert(SSL *ssl, int level, int desc) { + // It is illegal to send an alert when we've already sent a closing one. + if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } if (level == SSL3_AL_WARNING && desc == SSL_AD_CLOSE_NOTIFY) { - ssl->s3->send_shutdown = ssl_shutdown_close_notify; + ssl->s3->write_shutdown = ssl_shutdown_close_notify; } else { assert(level == SSL3_AL_FATAL); - ssl->s3->send_shutdown = ssl_shutdown_fatal_alert; + assert(desc != SSL_AD_CLOSE_NOTIFY); + ssl->s3->write_shutdown = ssl_shutdown_error; } ssl->s3->alert_dispatch = 1; ssl->s3->send_alert[0] = level; ssl->s3->send_alert[1] = desc; - if (!ssl_write_buffer_is_pending(ssl)) { - /* Nothing is being written out, so the alert may be dispatched - * immediately. */ + if (ssl->s3->write_buffer.empty()) { + // Nothing is being written out, so the alert may be dispatched + // immediately. return ssl->method->dispatch_alert(ssl); } - /* The alert will be dispatched later. */ + // The alert will be dispatched later. return -1; } @@ -474,16 +427,17 @@ int ssl3_dispatch_alert(SSL *ssl) { } ssl->s3->alert_dispatch = 0; - /* If the alert is fatal, flush the BIO now. */ + // If the alert is fatal, flush the BIO now. if (ssl->s3->send_alert[0] == SSL3_AL_FATAL) { BIO_flush(ssl->wbio); } - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert, - 2); + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_ALERT, ssl->s3->send_alert); int alert = (ssl->s3->send_alert[0] << 8) | ssl->s3->send_alert[1]; ssl_do_info_callback(ssl, SSL_CB_WRITE_ALERT, alert); return 1; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/ssl_aead_ctx.c b/Sources/BoringSSL/ssl/ssl_aead_ctx.c deleted file mode 100644 index e18ba69ac..000000000 --- a/Sources/BoringSSL/ssl/ssl_aead_ctx.c +++ /dev/null @@ -1,335 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include - -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -SSL_AEAD_CTX *SSL_AEAD_CTX_new(enum evp_aead_direction_t direction, - uint16_t version, const SSL_CIPHER *cipher, - const uint8_t *enc_key, size_t enc_key_len, - const uint8_t *mac_key, size_t mac_key_len, - const uint8_t *fixed_iv, size_t fixed_iv_len) { - const EVP_AEAD *aead; - size_t expected_mac_key_len, expected_fixed_iv_len; - if (!ssl_cipher_get_evp_aead(&aead, &expected_mac_key_len, - &expected_fixed_iv_len, cipher, version) || - /* Ensure the caller returned correct key sizes. */ - expected_fixed_iv_len != fixed_iv_len || - expected_mac_key_len != mac_key_len) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; - if (mac_key_len > 0) { - /* This is a "stateful" AEAD (for compatibility with pre-AEAD cipher - * suites). */ - if (mac_key_len + enc_key_len + fixed_iv_len > sizeof(merged_key)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - OPENSSL_memcpy(merged_key, mac_key, mac_key_len); - OPENSSL_memcpy(merged_key + mac_key_len, enc_key, enc_key_len); - OPENSSL_memcpy(merged_key + mac_key_len + enc_key_len, fixed_iv, - fixed_iv_len); - enc_key = merged_key; - enc_key_len += mac_key_len; - enc_key_len += fixed_iv_len; - } - - SSL_AEAD_CTX *aead_ctx = OPENSSL_malloc(sizeof(SSL_AEAD_CTX)); - if (aead_ctx == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; - } - OPENSSL_memset(aead_ctx, 0, sizeof(SSL_AEAD_CTX)); - aead_ctx->cipher = cipher; - - if (!EVP_AEAD_CTX_init_with_direction( - &aead_ctx->ctx, aead, enc_key, enc_key_len, - EVP_AEAD_DEFAULT_TAG_LENGTH, direction)) { - OPENSSL_free(aead_ctx); - return NULL; - } - - assert(EVP_AEAD_nonce_length(aead) <= EVP_AEAD_MAX_NONCE_LENGTH); - OPENSSL_COMPILE_ASSERT(EVP_AEAD_MAX_NONCE_LENGTH < 256, - variable_nonce_len_doesnt_fit_in_uint8_t); - aead_ctx->variable_nonce_len = (uint8_t)EVP_AEAD_nonce_length(aead); - if (mac_key_len == 0) { - assert(fixed_iv_len <= sizeof(aead_ctx->fixed_nonce)); - OPENSSL_memcpy(aead_ctx->fixed_nonce, fixed_iv, fixed_iv_len); - aead_ctx->fixed_nonce_len = fixed_iv_len; - - if (cipher->algorithm_enc & SSL_CHACHA20POLY1305) { - /* The fixed nonce into the actual nonce (the sequence number). */ - aead_ctx->xor_fixed_nonce = 1; - aead_ctx->variable_nonce_len = 8; - } else { - /* The fixed IV is prepended to the nonce. */ - assert(fixed_iv_len <= aead_ctx->variable_nonce_len); - aead_ctx->variable_nonce_len -= fixed_iv_len; - } - - /* AES-GCM uses an explicit nonce. */ - if (cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)) { - aead_ctx->variable_nonce_included_in_record = 1; - } - - /* The TLS 1.3 construction XORs the fixed nonce into the sequence number - * and omits the additional data. */ - if (version >= TLS1_3_VERSION) { - aead_ctx->xor_fixed_nonce = 1; - aead_ctx->variable_nonce_len = 8; - aead_ctx->variable_nonce_included_in_record = 0; - aead_ctx->omit_ad = 1; - assert(fixed_iv_len >= aead_ctx->variable_nonce_len); - } - } else { - assert(version < TLS1_3_VERSION); - aead_ctx->variable_nonce_included_in_record = 1; - aead_ctx->random_variable_nonce = 1; - aead_ctx->omit_length_in_ad = 1; - aead_ctx->omit_version_in_ad = (version == SSL3_VERSION); - } - - return aead_ctx; -} - -void SSL_AEAD_CTX_free(SSL_AEAD_CTX *aead) { - if (aead == NULL) { - return; - } - EVP_AEAD_CTX_cleanup(&aead->ctx); - OPENSSL_free(aead); -} - -size_t SSL_AEAD_CTX_explicit_nonce_len(const SSL_AEAD_CTX *aead) { -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - aead = NULL; -#endif - - if (aead != NULL && aead->variable_nonce_included_in_record) { - return aead->variable_nonce_len; - } - return 0; -} - -size_t SSL_AEAD_CTX_max_overhead(const SSL_AEAD_CTX *aead) { -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - aead = NULL; -#endif - - if (aead == NULL) { - return 0; - } - return EVP_AEAD_max_overhead(aead->ctx.aead) + - SSL_AEAD_CTX_explicit_nonce_len(aead); -} - -/* ssl_aead_ctx_get_ad writes the additional data for |aead| into |out| and - * returns the number of bytes written. */ -static size_t ssl_aead_ctx_get_ad(SSL_AEAD_CTX *aead, uint8_t out[13], - uint8_t type, uint16_t wire_version, - const uint8_t seqnum[8], - size_t plaintext_len) { - if (aead->omit_ad) { - return 0; - } - - OPENSSL_memcpy(out, seqnum, 8); - size_t len = 8; - out[len++] = type; - if (!aead->omit_version_in_ad) { - out[len++] = (uint8_t)(wire_version >> 8); - out[len++] = (uint8_t)wire_version; - } - if (!aead->omit_length_in_ad) { - out[len++] = (uint8_t)(plaintext_len >> 8); - out[len++] = (uint8_t)plaintext_len; - } - return len; -} - -int SSL_AEAD_CTX_open(SSL_AEAD_CTX *aead, CBS *out, uint8_t type, - uint16_t wire_version, const uint8_t seqnum[8], - uint8_t *in, size_t in_len) { -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - aead = NULL; -#endif - - if (aead == NULL) { - /* Handle the initial NULL cipher. */ - CBS_init(out, in, in_len); - return 1; - } - - /* TLS 1.2 AEADs include the length in the AD and are assumed to have fixed - * overhead. Otherwise the parameter is unused. */ - size_t plaintext_len = 0; - if (!aead->omit_length_in_ad) { - size_t overhead = SSL_AEAD_CTX_max_overhead(aead); - if (in_len < overhead) { - /* Publicly invalid. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); - return 0; - } - plaintext_len = in_len - overhead; - } - uint8_t ad[13]; - size_t ad_len = ssl_aead_ctx_get_ad(aead, ad, type, wire_version, seqnum, - plaintext_len); - - /* Assemble the nonce. */ - uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; - size_t nonce_len = 0; - - /* Prepend the fixed nonce, or left-pad with zeros if XORing. */ - if (aead->xor_fixed_nonce) { - nonce_len = aead->fixed_nonce_len - aead->variable_nonce_len; - OPENSSL_memset(nonce, 0, nonce_len); - } else { - OPENSSL_memcpy(nonce, aead->fixed_nonce, aead->fixed_nonce_len); - nonce_len += aead->fixed_nonce_len; - } - - /* Add the variable nonce. */ - if (aead->variable_nonce_included_in_record) { - if (in_len < aead->variable_nonce_len) { - /* Publicly invalid. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); - return 0; - } - OPENSSL_memcpy(nonce + nonce_len, in, aead->variable_nonce_len); - in += aead->variable_nonce_len; - in_len -= aead->variable_nonce_len; - } else { - assert(aead->variable_nonce_len == 8); - OPENSSL_memcpy(nonce + nonce_len, seqnum, aead->variable_nonce_len); - } - nonce_len += aead->variable_nonce_len; - - /* XOR the fixed nonce, if necessary. */ - if (aead->xor_fixed_nonce) { - assert(nonce_len == aead->fixed_nonce_len); - for (size_t i = 0; i < aead->fixed_nonce_len; i++) { - nonce[i] ^= aead->fixed_nonce[i]; - } - } - - /* Decrypt in-place. */ - size_t len; - if (!EVP_AEAD_CTX_open(&aead->ctx, in, &len, in_len, nonce, nonce_len, - in, in_len, ad, ad_len)) { - return 0; - } - CBS_init(out, in, len); - return 1; -} - -int SSL_AEAD_CTX_seal(SSL_AEAD_CTX *aead, uint8_t *out, size_t *out_len, - size_t max_out, uint8_t type, uint16_t wire_version, - const uint8_t seqnum[8], const uint8_t *in, - size_t in_len) { -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - aead = NULL; -#endif - - if (aead == NULL) { - /* Handle the initial NULL cipher. */ - if (in_len > max_out) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return 0; - } - OPENSSL_memmove(out, in, in_len); - *out_len = in_len; - return 1; - } - - uint8_t ad[13]; - size_t ad_len = ssl_aead_ctx_get_ad(aead, ad, type, wire_version, seqnum, - in_len); - - /* Assemble the nonce. */ - uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; - size_t nonce_len = 0; - - /* Prepend the fixed nonce, or left-pad with zeros if XORing. */ - if (aead->xor_fixed_nonce) { - nonce_len = aead->fixed_nonce_len - aead->variable_nonce_len; - OPENSSL_memset(nonce, 0, nonce_len); - } else { - OPENSSL_memcpy(nonce, aead->fixed_nonce, aead->fixed_nonce_len); - nonce_len += aead->fixed_nonce_len; - } - - /* Select the variable nonce. */ - if (aead->random_variable_nonce) { - assert(aead->variable_nonce_included_in_record); - if (!RAND_bytes(nonce + nonce_len, aead->variable_nonce_len)) { - return 0; - } - } else { - /* When sending we use the sequence number as the variable part of the - * nonce. */ - assert(aead->variable_nonce_len == 8); - OPENSSL_memcpy(nonce + nonce_len, seqnum, aead->variable_nonce_len); - } - nonce_len += aead->variable_nonce_len; - - /* Emit the variable nonce if included in the record. */ - size_t extra_len = 0; - if (aead->variable_nonce_included_in_record) { - assert(!aead->xor_fixed_nonce); - if (max_out < aead->variable_nonce_len) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return 0; - } - if (out < in + in_len && in < out + aead->variable_nonce_len) { - OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); - return 0; - } - OPENSSL_memcpy(out, nonce + aead->fixed_nonce_len, - aead->variable_nonce_len); - extra_len = aead->variable_nonce_len; - out += aead->variable_nonce_len; - max_out -= aead->variable_nonce_len; - } - - /* XOR the fixed nonce, if necessary. */ - if (aead->xor_fixed_nonce) { - assert(nonce_len == aead->fixed_nonce_len); - for (size_t i = 0; i < aead->fixed_nonce_len; i++) { - nonce[i] ^= aead->fixed_nonce[i]; - } - } - - if (!EVP_AEAD_CTX_seal(&aead->ctx, out, out_len, max_out, nonce, nonce_len, - in, in_len, ad, ad_len)) { - return 0; - } - *out_len += extra_len; - return 1; -} diff --git a/Sources/BoringSSL/ssl/ssl_aead_ctx.cc b/Sources/BoringSSL/ssl/ssl_aead_ctx.cc new file mode 100644 index 000000000..775827c7a --- /dev/null +++ b/Sources/BoringSSL/ssl/ssl_aead_ctx.cc @@ -0,0 +1,415 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) +#define FUZZER_MODE true +#else +#define FUZZER_MODE false +#endif + +namespace bssl { + +SSLAEADContext::SSLAEADContext(uint16_t version_arg, bool is_dtls_arg, + const SSL_CIPHER *cipher_arg) + : cipher_(cipher_arg), + version_(version_arg), + is_dtls_(is_dtls_arg), + variable_nonce_included_in_record_(false), + random_variable_nonce_(false), + omit_length_in_ad_(false), + omit_version_in_ad_(false), + omit_ad_(false), + xor_fixed_nonce_(false) { + OPENSSL_memset(fixed_nonce_, 0, sizeof(fixed_nonce_)); +} + +SSLAEADContext::~SSLAEADContext() {} + +UniquePtr SSLAEADContext::CreateNullCipher(bool is_dtls) { + return MakeUnique(0 /* version */, is_dtls, + nullptr /* cipher */); +} + +UniquePtr SSLAEADContext::Create( + enum evp_aead_direction_t direction, uint16_t version, int is_dtls, + const SSL_CIPHER *cipher, Span enc_key, + Span mac_key, Span fixed_iv) { + const EVP_AEAD *aead; + uint16_t protocol_version; + size_t expected_mac_key_len, expected_fixed_iv_len; + if (!ssl_protocol_version_from_wire(&protocol_version, version) || + !ssl_cipher_get_evp_aead(&aead, &expected_mac_key_len, + &expected_fixed_iv_len, cipher, protocol_version, + is_dtls) || + // Ensure the caller returned correct key sizes. + expected_fixed_iv_len != fixed_iv.size() || + expected_mac_key_len != mac_key.size()) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return nullptr; + } + + uint8_t merged_key[EVP_AEAD_MAX_KEY_LENGTH]; + if (!mac_key.empty()) { + // This is a "stateful" AEAD (for compatibility with pre-AEAD cipher + // suites). + if (mac_key.size() + enc_key.size() + fixed_iv.size() > + sizeof(merged_key)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return nullptr; + } + OPENSSL_memcpy(merged_key, mac_key.data(), mac_key.size()); + OPENSSL_memcpy(merged_key + mac_key.size(), enc_key.data(), enc_key.size()); + OPENSSL_memcpy(merged_key + mac_key.size() + enc_key.size(), + fixed_iv.data(), fixed_iv.size()); + enc_key = MakeConstSpan(merged_key, + enc_key.size() + mac_key.size() + fixed_iv.size()); + } + + UniquePtr aead_ctx = + MakeUnique(version, is_dtls, cipher); + if (!aead_ctx) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return nullptr; + } + + assert(aead_ctx->ProtocolVersion() == protocol_version); + + if (!EVP_AEAD_CTX_init_with_direction( + aead_ctx->ctx_.get(), aead, enc_key.data(), enc_key.size(), + EVP_AEAD_DEFAULT_TAG_LENGTH, direction)) { + return nullptr; + } + + assert(EVP_AEAD_nonce_length(aead) <= EVP_AEAD_MAX_NONCE_LENGTH); + static_assert(EVP_AEAD_MAX_NONCE_LENGTH < 256, + "variable_nonce_len doesn't fit in uint8_t"); + aead_ctx->variable_nonce_len_ = (uint8_t)EVP_AEAD_nonce_length(aead); + if (mac_key.empty()) { + assert(fixed_iv.size() <= sizeof(aead_ctx->fixed_nonce_)); + OPENSSL_memcpy(aead_ctx->fixed_nonce_, fixed_iv.data(), fixed_iv.size()); + aead_ctx->fixed_nonce_len_ = fixed_iv.size(); + + if (cipher->algorithm_enc & SSL_CHACHA20POLY1305) { + // The fixed nonce into the actual nonce (the sequence number). + aead_ctx->xor_fixed_nonce_ = true; + aead_ctx->variable_nonce_len_ = 8; + } else { + // The fixed IV is prepended to the nonce. + assert(fixed_iv.size() <= aead_ctx->variable_nonce_len_); + aead_ctx->variable_nonce_len_ -= fixed_iv.size(); + } + + // AES-GCM uses an explicit nonce. + if (cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)) { + aead_ctx->variable_nonce_included_in_record_ = true; + } + + // The TLS 1.3 construction XORs the fixed nonce into the sequence number + // and omits the additional data. + if (protocol_version >= TLS1_3_VERSION) { + aead_ctx->xor_fixed_nonce_ = true; + aead_ctx->variable_nonce_len_ = 8; + aead_ctx->variable_nonce_included_in_record_ = false; + aead_ctx->omit_ad_ = true; + assert(fixed_iv.size() >= aead_ctx->variable_nonce_len_); + } + } else { + assert(protocol_version < TLS1_3_VERSION); + aead_ctx->variable_nonce_included_in_record_ = true; + aead_ctx->random_variable_nonce_ = true; + aead_ctx->omit_length_in_ad_ = true; + aead_ctx->omit_version_in_ad_ = (protocol_version == SSL3_VERSION); + } + + return aead_ctx; +} + +void SSLAEADContext::SetVersionIfNullCipher(uint16_t version) { + if (is_null_cipher()) { + version_ = version; + } +} + +uint16_t SSLAEADContext::ProtocolVersion() const { + uint16_t protocol_version; + if(!ssl_protocol_version_from_wire(&protocol_version, version_)) { + assert(false); + return 0; + } + return protocol_version; +} + +uint16_t SSLAEADContext::RecordVersion() const { + if (version_ == 0) { + assert(is_null_cipher()); + return is_dtls_ ? DTLS1_VERSION : TLS1_VERSION; + } + + if (ProtocolVersion() <= TLS1_2_VERSION) { + return version_; + } + + if (ssl_is_resumption_record_version_experiment(version_)) { + return TLS1_2_VERSION; + } + return TLS1_VERSION; +} + +size_t SSLAEADContext::ExplicitNonceLen() const { + if (!FUZZER_MODE && variable_nonce_included_in_record_) { + return variable_nonce_len_; + } + return 0; +} + +bool SSLAEADContext::SuffixLen(size_t *out_suffix_len, const size_t in_len, + const size_t extra_in_len) const { + if (is_null_cipher() || FUZZER_MODE) { + *out_suffix_len = extra_in_len; + return true; + } + return !!EVP_AEAD_CTX_tag_len(ctx_.get(), out_suffix_len, in_len, + extra_in_len); +} + +size_t SSLAEADContext::MaxOverhead() const { + return ExplicitNonceLen() + + (is_null_cipher() || FUZZER_MODE + ? 0 + : EVP_AEAD_max_overhead(EVP_AEAD_CTX_aead(ctx_.get()))); +} + +size_t SSLAEADContext::GetAdditionalData(uint8_t out[13], uint8_t type, + uint16_t record_version, + const uint8_t seqnum[8], + size_t plaintext_len) { + if (omit_ad_) { + return 0; + } + + OPENSSL_memcpy(out, seqnum, 8); + size_t len = 8; + out[len++] = type; + if (!omit_version_in_ad_) { + out[len++] = static_cast((record_version >> 8)); + out[len++] = static_cast(record_version); + } + if (!omit_length_in_ad_) { + out[len++] = static_cast((plaintext_len >> 8)); + out[len++] = static_cast(plaintext_len); + } + return len; +} + +bool SSLAEADContext::Open(Span *out, uint8_t type, + uint16_t record_version, const uint8_t seqnum[8], + Span in) { + if (is_null_cipher() || FUZZER_MODE) { + // Handle the initial NULL cipher. + *out = in; + return true; + } + + // TLS 1.2 AEADs include the length in the AD and are assumed to have fixed + // overhead. Otherwise the parameter is unused. + size_t plaintext_len = 0; + if (!omit_length_in_ad_) { + size_t overhead = MaxOverhead(); + if (in.size() < overhead) { + // Publicly invalid. + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); + return false; + } + plaintext_len = in.size() - overhead; + } + uint8_t ad[13]; + size_t ad_len = + GetAdditionalData(ad, type, record_version, seqnum, plaintext_len); + + // Assemble the nonce. + uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; + size_t nonce_len = 0; + + // Prepend the fixed nonce, or left-pad with zeros if XORing. + if (xor_fixed_nonce_) { + nonce_len = fixed_nonce_len_ - variable_nonce_len_; + OPENSSL_memset(nonce, 0, nonce_len); + } else { + OPENSSL_memcpy(nonce, fixed_nonce_, fixed_nonce_len_); + nonce_len += fixed_nonce_len_; + } + + // Add the variable nonce. + if (variable_nonce_included_in_record_) { + if (in.size() < variable_nonce_len_) { + // Publicly invalid. + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_PACKET_LENGTH); + return false; + } + OPENSSL_memcpy(nonce + nonce_len, in.data(), variable_nonce_len_); + in = in.subspan(variable_nonce_len_); + } else { + assert(variable_nonce_len_ == 8); + OPENSSL_memcpy(nonce + nonce_len, seqnum, variable_nonce_len_); + } + nonce_len += variable_nonce_len_; + + // XOR the fixed nonce, if necessary. + if (xor_fixed_nonce_) { + assert(nonce_len == fixed_nonce_len_); + for (size_t i = 0; i < fixed_nonce_len_; i++) { + nonce[i] ^= fixed_nonce_[i]; + } + } + + // Decrypt in-place. + size_t len; + if (!EVP_AEAD_CTX_open(ctx_.get(), in.data(), &len, in.size(), nonce, + nonce_len, in.data(), in.size(), ad, ad_len)) { + return false; + } + *out = in.subspan(0, len); + return true; +} + +bool SSLAEADContext::SealScatter(uint8_t *out_prefix, uint8_t *out, + uint8_t *out_suffix, uint8_t type, + uint16_t record_version, + const uint8_t seqnum[8], const uint8_t *in, + size_t in_len, const uint8_t *extra_in, + size_t extra_in_len) { + const size_t prefix_len = ExplicitNonceLen(); + size_t suffix_len; + if (!SuffixLen(&suffix_len, in_len, extra_in_len)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return false; + } + if ((in != out && buffers_alias(in, in_len, out, in_len)) || + buffers_alias(in, in_len, out_prefix, prefix_len) || + buffers_alias(in, in_len, out_suffix, suffix_len)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); + return false; + } + + if (is_null_cipher() || FUZZER_MODE) { + // Handle the initial NULL cipher. + OPENSSL_memmove(out, in, in_len); + OPENSSL_memmove(out_suffix, extra_in, extra_in_len); + return true; + } + + uint8_t ad[13]; + size_t ad_len = GetAdditionalData(ad, type, record_version, seqnum, in_len); + + // Assemble the nonce. + uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH]; + size_t nonce_len = 0; + + // Prepend the fixed nonce, or left-pad with zeros if XORing. + if (xor_fixed_nonce_) { + nonce_len = fixed_nonce_len_ - variable_nonce_len_; + OPENSSL_memset(nonce, 0, nonce_len); + } else { + OPENSSL_memcpy(nonce, fixed_nonce_, fixed_nonce_len_); + nonce_len += fixed_nonce_len_; + } + + // Select the variable nonce. + if (random_variable_nonce_) { + assert(variable_nonce_included_in_record_); + if (!RAND_bytes(nonce + nonce_len, variable_nonce_len_)) { + return false; + } + } else { + // When sending we use the sequence number as the variable part of the + // nonce. + assert(variable_nonce_len_ == 8); + OPENSSL_memcpy(nonce + nonce_len, seqnum, variable_nonce_len_); + } + nonce_len += variable_nonce_len_; + + // Emit the variable nonce if included in the record. + if (variable_nonce_included_in_record_) { + assert(!xor_fixed_nonce_); + if (buffers_alias(in, in_len, out_prefix, variable_nonce_len_)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); + return false; + } + OPENSSL_memcpy(out_prefix, nonce + fixed_nonce_len_, + variable_nonce_len_); + } + + // XOR the fixed nonce, if necessary. + if (xor_fixed_nonce_) { + assert(nonce_len == fixed_nonce_len_); + for (size_t i = 0; i < fixed_nonce_len_; i++) { + nonce[i] ^= fixed_nonce_[i]; + } + } + + size_t written_suffix_len; + bool result = !!EVP_AEAD_CTX_seal_scatter( + ctx_.get(), out, out_suffix, &written_suffix_len, suffix_len, nonce, + nonce_len, in, in_len, extra_in, extra_in_len, ad, ad_len); + assert(!result || written_suffix_len == suffix_len); + return result; +} + +bool SSLAEADContext::Seal(uint8_t *out, size_t *out_len, size_t max_out_len, + uint8_t type, uint16_t record_version, + const uint8_t seqnum[8], const uint8_t *in, + size_t in_len) { + const size_t prefix_len = ExplicitNonceLen(); + size_t suffix_len; + if (!SuffixLen(&suffix_len, in_len, 0)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return false; + } + if (in_len + prefix_len < in_len || + in_len + prefix_len + suffix_len < in_len + prefix_len) { + OPENSSL_PUT_ERROR(CIPHER, SSL_R_RECORD_TOO_LARGE); + return false; + } + if (in_len + prefix_len + suffix_len > max_out_len) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); + return false; + } + + if (!SealScatter(out, out + prefix_len, out + prefix_len + in_len, type, + record_version, seqnum, in, in_len, 0, 0)) { + return false; + } + *out_len = prefix_len + in_len + suffix_len; + return true; +} + +bool SSLAEADContext::GetIV(const uint8_t **out_iv, size_t *out_iv_len) const { + return !is_null_cipher() && + EVP_AEAD_CTX_get_iv(ctx_.get(), out_iv, out_iv_len); +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/ssl_asn1.c b/Sources/BoringSSL/ssl/ssl_asn1.cc similarity index 70% rename from Sources/BoringSSL/ssl/ssl_asn1.c rename to Sources/BoringSSL/ssl/ssl_asn1.cc index 3533225af..eb7df5b95 100644 --- a/Sources/BoringSSL/ssl/ssl_asn1.c +++ b/Sources/BoringSSL/ssl/ssl_asn1.cc @@ -80,11 +80,20 @@ * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR * OTHERWISE. */ +// Per C99, various stdint.h macros are unavailable in C++ unless some macros +// are defined. C++11 overruled this decision, but older Android NDKs still +// require it. +#if !defined(__STDC_LIMIT_MACROS) +#define __STDC_LIMIT_MACROS +#endif + #include #include #include +#include + #include #include #include @@ -95,184 +104,153 @@ #include "internal.h" -/* An SSL_SESSION is serialized as the following ASN.1 structure: - * - * SSLSession ::= SEQUENCE { - * version INTEGER (1), -- session structure version - * sslVersion INTEGER, -- protocol version number - * cipher OCTET STRING, -- two bytes long - * sessionID OCTET STRING, - * masterKey OCTET STRING, - * time [1] INTEGER, -- seconds since UNIX epoch - * timeout [2] INTEGER, -- in seconds - * peer [3] Certificate OPTIONAL, - * sessionIDContext [4] OCTET STRING OPTIONAL, - * verifyResult [5] INTEGER OPTIONAL, -- one of X509_V_* codes - * hostName [6] OCTET STRING OPTIONAL, - * -- from server_name extension - * pskIdentity [8] OCTET STRING OPTIONAL, - * ticketLifetimeHint [9] INTEGER OPTIONAL, -- client-only - * ticket [10] OCTET STRING OPTIONAL, -- client-only - * peerSHA256 [13] OCTET STRING OPTIONAL, - * originalHandshakeHash [14] OCTET STRING OPTIONAL, - * signedCertTimestampList [15] OCTET STRING OPTIONAL, - * -- contents of SCT extension - * ocspResponse [16] OCTET STRING OPTIONAL, - * -- stapled OCSP response from the server - * extendedMasterSecret [17] BOOLEAN OPTIONAL, - * groupID [18] INTEGER OPTIONAL, - * -- For historical reasons, for legacy DHE or - * -- static RSA ciphers, this field contains - * -- another value to be discarded. - * certChain [19] SEQUENCE OF Certificate OPTIONAL, - * ticketAgeAdd [21] OCTET STRING OPTIONAL, - * isServer [22] BOOLEAN DEFAULT TRUE, - * peerSignatureAlgorithm [23] INTEGER OPTIONAL, - * ticketMaxEarlyData [24] INTEGER OPTIONAL, - * authTimeout [25] INTEGER OPTIONAL, -- defaults to timeout - * earlyALPN [26] OCTET STRING OPTIONAL, - * } - * - * Note: historically this serialization has included other optional - * fields. Their presence is currently treated as a parse error: - * - * keyArg [0] IMPLICIT OCTET STRING OPTIONAL, - * pskIdentityHint [7] OCTET STRING OPTIONAL, - * compressionMethod [11] OCTET STRING OPTIONAL, - * srpUsername [12] OCTET STRING OPTIONAL, - * ticketFlags [20] INTEGER OPTIONAL, - */ +namespace bssl { + +// An SSL_SESSION is serialized as the following ASN.1 structure: +// +// SSLSession ::= SEQUENCE { +// version INTEGER (1), -- session structure version +// sslVersion INTEGER, -- protocol version number +// cipher OCTET STRING, -- two bytes long +// sessionID OCTET STRING, +// masterKey OCTET STRING, +// time [1] INTEGER, -- seconds since UNIX epoch +// timeout [2] INTEGER, -- in seconds +// peer [3] Certificate OPTIONAL, +// sessionIDContext [4] OCTET STRING OPTIONAL, +// verifyResult [5] INTEGER OPTIONAL, -- one of X509_V_* codes +// pskIdentity [8] OCTET STRING OPTIONAL, +// ticketLifetimeHint [9] INTEGER OPTIONAL, -- client-only +// ticket [10] OCTET STRING OPTIONAL, -- client-only +// peerSHA256 [13] OCTET STRING OPTIONAL, +// originalHandshakeHash [14] OCTET STRING OPTIONAL, +// signedCertTimestampList [15] OCTET STRING OPTIONAL, +// -- contents of SCT extension +// ocspResponse [16] OCTET STRING OPTIONAL, +// -- stapled OCSP response from the server +// extendedMasterSecret [17] BOOLEAN OPTIONAL, +// groupID [18] INTEGER OPTIONAL, +// certChain [19] SEQUENCE OF Certificate OPTIONAL, +// ticketAgeAdd [21] OCTET STRING OPTIONAL, +// isServer [22] BOOLEAN DEFAULT TRUE, +// peerSignatureAlgorithm [23] INTEGER OPTIONAL, +// ticketMaxEarlyData [24] INTEGER OPTIONAL, +// authTimeout [25] INTEGER OPTIONAL, -- defaults to timeout +// earlyALPN [26] OCTET STRING OPTIONAL, +// } +// +// Note: historically this serialization has included other optional +// fields. Their presence is currently treated as a parse error, except for +// hostName, which is ignored. +// +// keyArg [0] IMPLICIT OCTET STRING OPTIONAL, +// hostName [6] OCTET STRING OPTIONAL, +// pskIdentityHint [7] OCTET STRING OPTIONAL, +// compressionMethod [11] OCTET STRING OPTIONAL, +// srpUsername [12] OCTET STRING OPTIONAL, +// ticketFlags [20] INTEGER OPTIONAL, static const unsigned kVersion = 1; -static const int kTimeTag = +static const unsigned kTimeTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1; -static const int kTimeoutTag = +static const unsigned kTimeoutTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 2; -static const int kPeerTag = +static const unsigned kPeerTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 3; -static const int kSessionIDContextTag = +static const unsigned kSessionIDContextTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 4; -static const int kVerifyResultTag = +static const unsigned kVerifyResultTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 5; -static const int kHostNameTag = +static const unsigned kHostNameTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 6; -static const int kPSKIdentityTag = +static const unsigned kPSKIdentityTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 8; -static const int kTicketLifetimeHintTag = +static const unsigned kTicketLifetimeHintTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 9; -static const int kTicketTag = +static const unsigned kTicketTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 10; -static const int kPeerSHA256Tag = +static const unsigned kPeerSHA256Tag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 13; -static const int kOriginalHandshakeHashTag = +static const unsigned kOriginalHandshakeHashTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 14; -static const int kSignedCertTimestampListTag = +static const unsigned kSignedCertTimestampListTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 15; -static const int kOCSPResponseTag = +static const unsigned kOCSPResponseTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 16; -static const int kExtendedMasterSecretTag = +static const unsigned kExtendedMasterSecretTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 17; -static const int kGroupIDTag = +static const unsigned kGroupIDTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 18; -static const int kCertChainTag = +static const unsigned kCertChainTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 19; -static const int kTicketAgeAddTag = +static const unsigned kTicketAgeAddTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 21; -static const int kIsServerTag = +static const unsigned kIsServerTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 22; -static const int kPeerSignatureAlgorithmTag = +static const unsigned kPeerSignatureAlgorithmTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 23; -static const int kTicketMaxEarlyDataTag = +static const unsigned kTicketMaxEarlyDataTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 24; -static const int kAuthTimeoutTag = +static const unsigned kAuthTimeoutTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 25; -static const int kEarlyALPNTag = +static const unsigned kEarlyALPNTag = CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 26; static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, size_t *out_len, int for_ticket) { - CBB cbb, session, child, child2; - if (in == NULL || in->cipher == NULL) { return 0; } - CBB_zero(&cbb); - if (!CBB_init(&cbb, 0) || - !CBB_add_asn1(&cbb, &session, CBS_ASN1_SEQUENCE) || + ScopedCBB cbb; + CBB session, child, child2; + if (!CBB_init(cbb.get(), 0) || + !CBB_add_asn1(cbb.get(), &session, CBS_ASN1_SEQUENCE) || !CBB_add_asn1_uint64(&session, kVersion) || !CBB_add_asn1_uint64(&session, in->ssl_version) || !CBB_add_asn1(&session, &child, CBS_ASN1_OCTETSTRING) || !CBB_add_u16(&child, (uint16_t)(in->cipher->id & 0xffff)) || !CBB_add_asn1(&session, &child, CBS_ASN1_OCTETSTRING) || - /* The session ID is irrelevant for a session ticket. */ + // The session ID is irrelevant for a session ticket. !CBB_add_bytes(&child, in->session_id, for_ticket ? 0 : in->session_id_length) || !CBB_add_asn1(&session, &child, CBS_ASN1_OCTETSTRING) || - !CBB_add_bytes(&child, in->master_key, in->master_key_length)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - if (in->time < 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; - } - - if (!CBB_add_asn1(&session, &child, kTimeTag) || - !CBB_add_asn1_uint64(&child, in->time)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - if (in->timeout < 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; - } - - if (!CBB_add_asn1(&session, &child, kTimeoutTag) || + !CBB_add_bytes(&child, in->master_key, in->master_key_length) || + !CBB_add_asn1(&session, &child, kTimeTag) || + !CBB_add_asn1_uint64(&child, in->time) || + !CBB_add_asn1(&session, &child, kTimeoutTag) || !CBB_add_asn1_uint64(&child, in->timeout)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } - /* The peer certificate is only serialized if the SHA-256 isn't - * serialized instead. */ + // The peer certificate is only serialized if the SHA-256 isn't + // serialized instead. if (sk_CRYPTO_BUFFER_num(in->certs) > 0 && !in->peer_sha256_valid) { const CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(in->certs, 0); if (!CBB_add_asn1(&session, &child, kPeerTag) || !CBB_add_bytes(&child, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } - /* Although it is OPTIONAL and usually empty, OpenSSL has - * historically always encoded the sid_ctx. */ + // Although it is OPTIONAL and usually empty, OpenSSL has + // historically always encoded the sid_ctx. if (!CBB_add_asn1(&session, &child, kSessionIDContextTag) || !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&child2, in->sid_ctx, in->sid_ctx_length)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } if (in->verify_result != X509_V_OK) { if (!CBB_add_asn1(&session, &child, kVerifyResultTag) || !CBB_add_asn1_uint64(&child, in->verify_result)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - } - - if (in->tlsext_hostname) { - if (!CBB_add_asn1(&session, &child, kHostNameTag) || - !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || - !CBB_add_bytes(&child2, (const uint8_t *)in->tlsext_hostname, - strlen(in->tlsext_hostname))) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -282,7 +260,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_bytes(&child2, (const uint8_t *)in->psk_identity, strlen(in->psk_identity))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -290,7 +268,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, if (!CBB_add_asn1(&session, &child, kTicketLifetimeHintTag) || !CBB_add_asn1_uint64(&child, in->tlsext_tick_lifetime_hint)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -299,7 +277,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&child2, in->tlsext_tick, in->tlsext_ticklen)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -308,7 +286,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || !CBB_add_bytes(&child2, in->peer_sha256, sizeof(in->peer_sha256))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -318,26 +296,28 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_bytes(&child2, in->original_handshake_hash, in->original_handshake_hash_len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } - if (in->tlsext_signed_cert_timestamp_list_length > 0) { + if (in->signed_cert_timestamp_list != nullptr) { if (!CBB_add_asn1(&session, &child, kSignedCertTimestampListTag) || !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || - !CBB_add_bytes(&child2, in->tlsext_signed_cert_timestamp_list, - in->tlsext_signed_cert_timestamp_list_length)) { + !CBB_add_bytes(&child2, + CRYPTO_BUFFER_data(in->signed_cert_timestamp_list), + CRYPTO_BUFFER_len(in->signed_cert_timestamp_list))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } - if (in->ocsp_response_length > 0) { + if (in->ocsp_response != nullptr) { if (!CBB_add_asn1(&session, &child, kOCSPResponseTag) || !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || - !CBB_add_bytes(&child2, in->ocsp_response, in->ocsp_response_length)) { + !CBB_add_bytes(&child2, CRYPTO_BUFFER_data(in->ocsp_response), + CRYPTO_BUFFER_len(in->ocsp_response))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -346,7 +326,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_asn1(&child, &child2, CBS_ASN1_BOOLEAN) || !CBB_add_u8(&child2, 0xff)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -354,24 +334,24 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, (!CBB_add_asn1(&session, &child, kGroupIDTag) || !CBB_add_asn1_uint64(&child, in->group_id))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } - /* The certificate chain is only serialized if the leaf's SHA-256 isn't - * serialized instead. */ + // The certificate chain is only serialized if the leaf's SHA-256 isn't + // serialized instead. if (in->certs != NULL && !in->peer_sha256_valid && sk_CRYPTO_BUFFER_num(in->certs) >= 2) { if (!CBB_add_asn1(&session, &child, kCertChainTag)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(in->certs); i++) { const CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(in->certs, i); if (!CBB_add_bytes(&child, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } } @@ -381,7 +361,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_asn1(&child, &child2, CBS_ASN1_OCTETSTRING) || !CBB_add_u32(&child2, in->ticket_age_add)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -390,7 +370,7 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_asn1(&child, &child2, CBS_ASN1_BOOLEAN) || !CBB_add_u8(&child2, 0x00)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } @@ -398,21 +378,21 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, (!CBB_add_asn1(&session, &child, kPeerSignatureAlgorithmTag) || !CBB_add_asn1_uint64(&child, in->peer_signature_algorithm))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } if (in->ticket_max_early_data != 0 && (!CBB_add_asn1(&session, &child, kTicketMaxEarlyDataTag) || !CBB_add_asn1_uint64(&child, in->ticket_max_early_data))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } if (in->timeout != in->auth_timeout && (!CBB_add_asn1(&session, &child, kAuthTimeoutTag) || !CBB_add_asn1_uint64(&child, in->auth_timeout))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } if (in->early_alpn) { @@ -421,76 +401,23 @@ static int SSL_SESSION_to_bytes_full(const SSL_SESSION *in, uint8_t **out_data, !CBB_add_bytes(&child2, (const uint8_t *)in->early_alpn, in->early_alpn_len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } } - if (!CBB_finish(&cbb, out_data, out_len)) { + if (!CBB_finish(cbb.get(), out_data, out_len)) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return 0; } return 1; - - err: - CBB_cleanup(&cbb); - return 0; -} - -int SSL_SESSION_to_bytes(const SSL_SESSION *in, uint8_t **out_data, - size_t *out_len) { - if (in->not_resumable) { - /* If the caller has an unresumable session, e.g. if |SSL_get_session| were - * called on a TLS 1.3 or False Started connection, serialize with a - * placeholder value so it is not accidentally deserialized into a resumable - * one. */ - static const char kNotResumableSession[] = "NOT RESUMABLE"; - - *out_len = strlen(kNotResumableSession); - *out_data = BUF_memdup(kNotResumableSession, *out_len); - if (*out_data == NULL) { - return 0; - } - - return 1; - } - - return SSL_SESSION_to_bytes_full(in, out_data, out_len, 0); -} - -int SSL_SESSION_to_bytes_for_ticket(const SSL_SESSION *in, uint8_t **out_data, - size_t *out_len) { - return SSL_SESSION_to_bytes_full(in, out_data, out_len, 1); } -int i2d_SSL_SESSION(SSL_SESSION *in, uint8_t **pp) { - uint8_t *out; - size_t len; - - if (!SSL_SESSION_to_bytes(in, &out, &len)) { - return -1; - } - - if (len > INT_MAX) { - OPENSSL_free(out); - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return -1; - } - - if (pp) { - OPENSSL_memcpy(*pp, out, len); - *pp += len; - } - OPENSSL_free(out); - - return len; -} - -/* SSL_SESSION_parse_string gets an optional ASN.1 OCTET STRING - * explicitly tagged with |tag| from |cbs| and saves it in |*out|. On - * entry, if |*out| is not NULL, it frees the existing contents. If - * the element was not found, it sets |*out| to NULL. It returns one - * on success, whether or not the element was found, and zero on - * decode error. */ +// SSL_SESSION_parse_string gets an optional ASN.1 OCTET STRING +// explicitly tagged with |tag| from |cbs| and saves it in |*out|. On +// entry, if |*out| is not NULL, it frees the existing contents. If +// the element was not found, it sets |*out| to NULL. It returns one +// on success, whether or not the element was found, and zero on +// decode error. static int SSL_SESSION_parse_string(CBS *cbs, char **out, unsigned tag) { CBS value; int present; @@ -514,12 +441,12 @@ static int SSL_SESSION_parse_string(CBS *cbs, char **out, unsigned tag) { return 1; } -/* SSL_SESSION_parse_string gets an optional ASN.1 OCTET STRING - * explicitly tagged with |tag| from |cbs| and stows it in |*out_ptr| - * and |*out_len|. If |*out_ptr| is not NULL, it frees the existing - * contents. On entry, if the element was not found, it sets - * |*out_ptr| to NULL. It returns one on success, whether or not the - * element was found, and zero on decode error. */ +// SSL_SESSION_parse_string gets an optional ASN.1 OCTET STRING +// explicitly tagged with |tag| from |cbs| and stows it in |*out_ptr| +// and |*out_len|. If |*out_ptr| is not NULL, it frees the existing +// contents. On entry, if the element was not found, it sets +// |*out_ptr| to NULL. It returns one on success, whether or not the +// element was found, and zero on decode error. static int SSL_SESSION_parse_octet_string(CBS *cbs, uint8_t **out_ptr, size_t *out_len, unsigned tag) { CBS value; @@ -534,8 +461,31 @@ static int SSL_SESSION_parse_octet_string(CBS *cbs, uint8_t **out_ptr, return 1; } -/* SSL_SESSION_parse_bounded_octet_string parses an optional ASN.1 OCTET STRING - * explicitly tagged with |tag| of size at most |max_out|. */ +static int SSL_SESSION_parse_crypto_buffer(CBS *cbs, CRYPTO_BUFFER **out, + unsigned tag, + CRYPTO_BUFFER_POOL *pool) { + if (!CBS_peek_asn1_tag(cbs, tag)) { + return 1; + } + + CBS child, value; + if (!CBS_get_asn1(cbs, &child, tag) || + !CBS_get_asn1(&child, &value, CBS_ASN1_OCTETSTRING) || + CBS_len(&child) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); + return 0; + } + CRYPTO_BUFFER_free(*out); + *out = CRYPTO_BUFFER_new_from_CBS(&value, pool); + if (*out == nullptr) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + return 1; +} + +// SSL_SESSION_parse_bounded_octet_string parses an optional ASN.1 OCTET STRING +// explicitly tagged with |tag| of size at most |max_out|. static int SSL_SESSION_parse_bounded_octet_string( CBS *cbs, uint8_t *out, uint8_t *out_len, uint8_t max_out, unsigned tag) { CBS value; @@ -588,21 +538,29 @@ static int SSL_SESSION_parse_u16(CBS *cbs, uint16_t *out, unsigned tag, return 1; } -SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, - CRYPTO_BUFFER_POOL *pool) { - SSL_SESSION *ret = ssl_session_new(x509_method); - if (ret == NULL) { - goto err; +UniquePtr SSL_SESSION_parse(CBS *cbs, + const SSL_X509_METHOD *x509_method, + CRYPTO_BUFFER_POOL *pool) { + UniquePtr ret = ssl_session_new(x509_method); + if (!ret) { + return nullptr; } CBS session; uint64_t version, ssl_version; + uint16_t unused; if (!CBS_get_asn1(cbs, &session, CBS_ASN1_SEQUENCE) || !CBS_get_asn1_uint64(&session, &version) || version != kVersion || - !CBS_get_asn1_uint64(&session, &ssl_version)) { + !CBS_get_asn1_uint64(&session, &ssl_version) || + // Require sessions have versions valid in either TLS or DTLS. The session + // will not be used by the handshake if not applicable, but, for + // simplicity, never parse a session that does not pass + // |ssl_protocol_version_from_wire|. + ssl_version > UINT16_MAX || + !ssl_protocol_version_from_wire(&unused, ssl_version)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } ret->ssl_version = ssl_version; @@ -612,12 +570,12 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, !CBS_get_u16(&cipher, &cipher_value) || CBS_len(&cipher) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } ret->cipher = SSL_get_cipher_by_value(cipher_value); if (ret->cipher == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_CIPHER); - goto err; + return nullptr; } CBS session_id, master_key; @@ -626,7 +584,7 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, !CBS_get_asn1(&session, &master_key, CBS_ASN1_OCTETSTRING) || CBS_len(&master_key) > SSL_MAX_MASTER_KEY_LENGTH) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } OPENSSL_memcpy(ret->session_id, CBS_data(&session_id), CBS_len(&session_id)); ret->session_id_length = CBS_len(&session_id); @@ -634,43 +592,50 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, ret->master_key_length = CBS_len(&master_key); CBS child; - uint64_t time, timeout; + uint64_t timeout; if (!CBS_get_asn1(&session, &child, kTimeTag) || - !CBS_get_asn1_uint64(&child, &time) || - time > LONG_MAX || + !CBS_get_asn1_uint64(&child, &ret->time) || !CBS_get_asn1(&session, &child, kTimeoutTag) || !CBS_get_asn1_uint64(&child, &timeout) || - timeout > LONG_MAX) { + timeout > UINT32_MAX) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } - ret->time = (long)time; - ret->timeout = (long)timeout; + ret->timeout = (uint32_t)timeout; CBS peer; int has_peer; if (!CBS_get_optional_asn1(&session, &peer, &has_peer, kPeerTag) || (has_peer && CBS_len(&peer) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } - /* |peer| is processed with the certificate chain. */ + // |peer| is processed with the certificate chain. if (!SSL_SESSION_parse_bounded_octet_string( &session, ret->sid_ctx, &ret->sid_ctx_length, sizeof(ret->sid_ctx), kSessionIDContextTag) || !SSL_SESSION_parse_long(&session, &ret->verify_result, kVerifyResultTag, - X509_V_OK) || - !SSL_SESSION_parse_string(&session, &ret->tlsext_hostname, - kHostNameTag) || - !SSL_SESSION_parse_string(&session, &ret->psk_identity, + X509_V_OK)) { + return nullptr; + } + + // Skip the historical hostName field. + CBS unused_hostname; + if (!CBS_get_optional_asn1(&session, &unused_hostname, nullptr, + kHostNameTag)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); + return nullptr; + } + + if (!SSL_SESSION_parse_string(&session, &ret->psk_identity, kPSKIdentityTag) || !SSL_SESSION_parse_u32(&session, &ret->tlsext_tick_lifetime_hint, kTicketLifetimeHintTag, 0) || !SSL_SESSION_parse_octet_string(&session, &ret->tlsext_tick, &ret->tlsext_ticklen, kTicketTag)) { - goto err; + return nullptr; } if (CBS_peek_asn1_tag(&session, kPeerSHA256Tag)) { @@ -680,7 +645,7 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, CBS_len(&peer_sha256) != sizeof(ret->peer_sha256) || CBS_len(&child) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } OPENSSL_memcpy(ret->peer_sha256, CBS_data(&peer_sha256), sizeof(ret->peer_sha256)); @@ -693,14 +658,12 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, &session, ret->original_handshake_hash, &ret->original_handshake_hash_len, sizeof(ret->original_handshake_hash), kOriginalHandshakeHashTag) || - !SSL_SESSION_parse_octet_string( - &session, &ret->tlsext_signed_cert_timestamp_list, - &ret->tlsext_signed_cert_timestamp_list_length, - kSignedCertTimestampListTag) || - !SSL_SESSION_parse_octet_string( - &session, &ret->ocsp_response, &ret->ocsp_response_length, - kOCSPResponseTag)) { - goto err; + !SSL_SESSION_parse_crypto_buffer(&session, + &ret->signed_cert_timestamp_list, + kSignedCertTimestampListTag, pool) || + !SSL_SESSION_parse_crypto_buffer(&session, &ret->ocsp_response, + kOCSPResponseTag, pool)) { + return nullptr; } int extended_master_secret; @@ -708,27 +671,14 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, kExtendedMasterSecretTag, 0 /* default to false */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } ret->extended_master_secret = !!extended_master_secret; - uint32_t value; - if (!SSL_SESSION_parse_u32(&session, &value, kGroupIDTag, 0)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; - } - - /* Historically, the group_id field was used for key-exchange-specific - * information. Discard all but the group ID. */ - if (ret->cipher->algorithm_mkey & (SSL_kRSA | SSL_kDHE)) { - value = 0; - } - - if (value > 0xffff) { + if (!SSL_SESSION_parse_u16(&session, &ret->group_id, kGroupIDTag, 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } - ret->group_id = (uint16_t)value; CBS cert_chain; CBS_init(&cert_chain, NULL, 0); @@ -737,27 +687,25 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, kCertChainTag) || (has_cert_chain && CBS_len(&cert_chain) == 0)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } if (has_cert_chain && !has_peer) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } if (has_peer || has_cert_chain) { ret->certs = sk_CRYPTO_BUFFER_new_null(); if (ret->certs == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } if (has_peer) { - /* TODO(agl): this should use the |SSL_CTX|'s pool. */ - CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new_from_CBS(&peer, pool); - if (buffer == NULL || - !sk_CRYPTO_BUFFER_push(ret->certs, buffer)) { - CRYPTO_BUFFER_free(buffer); + UniquePtr buffer(CRYPTO_BUFFER_new_from_CBS(&peer, pool)); + if (!buffer || + !PushToStack(ret->certs, std::move(buffer))) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } } @@ -766,23 +714,22 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, if (!CBS_get_any_asn1_element(&cert_chain, &cert, NULL, NULL) || CBS_len(&cert) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } - /* TODO(agl): this should use the |SSL_CTX|'s pool. */ CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new_from_CBS(&cert, pool); if (buffer == NULL || !sk_CRYPTO_BUFFER_push(ret->certs, buffer)) { CRYPTO_BUFFER_free(buffer); OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } } } - if (!x509_method->session_cache_objects(ret)) { + if (!x509_method->session_cache_objects(ret.get())) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } CBS age_add; @@ -792,7 +739,7 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, (age_add_present && !CBS_get_u32(&age_add, &ret->ticket_age_add)) || CBS_len(&age_add) != 0) { - goto err; + return nullptr; } ret->ticket_age_add_valid = age_add_present; @@ -800,7 +747,7 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, if (!CBS_get_optional_asn1_bool(&session, &is_server, kIsServerTag, 1 /* default to true */)) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } /* TODO: in time we can include |is_server| for servers too, then we can enforce that client and server sessions are never mixed up. */ @@ -811,34 +758,83 @@ SSL_SESSION *SSL_SESSION_parse(CBS *cbs, const SSL_X509_METHOD *x509_method, kPeerSignatureAlgorithmTag, 0) || !SSL_SESSION_parse_u32(&session, &ret->ticket_max_early_data, kTicketMaxEarlyDataTag, 0) || - !SSL_SESSION_parse_long(&session, &ret->auth_timeout, kAuthTimeoutTag, - ret->timeout) || + !SSL_SESSION_parse_u32(&session, &ret->auth_timeout, kAuthTimeoutTag, + ret->timeout) || !SSL_SESSION_parse_octet_string(&session, &ret->early_alpn, &ret->early_alpn_len, kEarlyALPNTag) || CBS_len(&session) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - goto err; + return nullptr; } return ret; +} + +} // namespace bssl + +using namespace bssl; + +int SSL_SESSION_to_bytes(const SSL_SESSION *in, uint8_t **out_data, + size_t *out_len) { + if (in->not_resumable) { + // If the caller has an unresumable session, e.g. if |SSL_get_session| were + // called on a TLS 1.3 or False Started connection, serialize with a + // placeholder value so it is not accidentally deserialized into a resumable + // one. + static const char kNotResumableSession[] = "NOT RESUMABLE"; + + *out_len = strlen(kNotResumableSession); + *out_data = (uint8_t *)BUF_memdup(kNotResumableSession, *out_len); + if (*out_data == NULL) { + return 0; + } + + return 1; + } + + return SSL_SESSION_to_bytes_full(in, out_data, out_len, 0); +} + +int SSL_SESSION_to_bytes_for_ticket(const SSL_SESSION *in, uint8_t **out_data, + size_t *out_len) { + return SSL_SESSION_to_bytes_full(in, out_data, out_len, 1); +} + +int i2d_SSL_SESSION(SSL_SESSION *in, uint8_t **pp) { + uint8_t *out; + size_t len; + + if (!SSL_SESSION_to_bytes(in, &out, &len)) { + return -1; + } + + if (len > INT_MAX) { + OPENSSL_free(out); + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return -1; + } + + if (pp) { + OPENSSL_memcpy(*pp, out, len); + *pp += len; + } + OPENSSL_free(out); -err: - SSL_SESSION_free(ret); - return NULL; + return len; } SSL_SESSION *SSL_SESSION_from_bytes(const uint8_t *in, size_t in_len, const SSL_CTX *ctx) { CBS cbs; CBS_init(&cbs, in, in_len); - SSL_SESSION *ret = SSL_SESSION_parse(&cbs, ctx->x509_method, ctx->pool); - if (ret == NULL) { + UniquePtr ret = + SSL_SESSION_parse(&cbs, ctx->x509_method, ctx->pool); + if (!ret) { return NULL; } if (CBS_len(&cbs) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_SSL_SESSION); - SSL_SESSION_free(ret); return NULL; } - return ret; + return ret.release(); } diff --git a/Sources/BoringSSL/ssl/ssl_buffer.c b/Sources/BoringSSL/ssl/ssl_buffer.c deleted file mode 100644 index c27db8ba8..000000000 --- a/Sources/BoringSSL/ssl/ssl_buffer.c +++ /dev/null @@ -1,312 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -OPENSSL_COMPILE_ASSERT(0xffff <= INT_MAX, uint16_fits_in_int); - -OPENSSL_COMPILE_ASSERT((SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) == 0, - align_to_a_power_of_two); - -/* setup_buffer initializes |buf| with capacity |cap|, aligned such that data - * written after |header_len| is aligned to a |SSL3_ALIGN_PAYLOAD|-byte - * boundary. It returns one on success and zero on error. */ -static int setup_buffer(SSL3_BUFFER *buf, size_t header_len, size_t cap) { - if (buf->buf != NULL || cap > 0xffff) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - /* Add up to |SSL3_ALIGN_PAYLOAD| - 1 bytes of slack for alignment. */ - buf->buf = OPENSSL_malloc(cap + SSL3_ALIGN_PAYLOAD - 1); - if (buf->buf == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - - /* Arrange the buffer such that the record body is aligned. */ - buf->offset = (0 - header_len - (uintptr_t)buf->buf) & - (SSL3_ALIGN_PAYLOAD - 1); - buf->len = 0; - buf->cap = cap; - return 1; -} - -static void consume_buffer(SSL3_BUFFER *buf, size_t len) { - if (len > buf->len) { - abort(); - } - buf->offset += (uint16_t)len; - buf->len -= (uint16_t)len; - buf->cap -= (uint16_t)len; -} - -static void clear_buffer(SSL3_BUFFER *buf) { - OPENSSL_free(buf->buf); - OPENSSL_memset(buf, 0, sizeof(SSL3_BUFFER)); -} - -OPENSSL_COMPILE_ASSERT(DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH <= - 0xffff, - maximum_read_buffer_too_large); - -/* setup_read_buffer initializes the read buffer if not already initialized. It - * returns one on success and zero on failure. */ -static int setup_read_buffer(SSL *ssl) { - SSL3_BUFFER *buf = &ssl->s3->read_buffer; - - if (buf->buf != NULL) { - return 1; - } - - size_t header_len = ssl_record_prefix_len(ssl); - size_t cap = SSL3_RT_MAX_ENCRYPTED_LENGTH; - if (SSL_is_dtls(ssl)) { - cap += DTLS1_RT_HEADER_LENGTH; - } else { - cap += SSL3_RT_HEADER_LENGTH; - } - - return setup_buffer(buf, header_len, cap); -} - -uint8_t *ssl_read_buffer(SSL *ssl) { - return ssl->s3->read_buffer.buf + ssl->s3->read_buffer.offset; -} - -size_t ssl_read_buffer_len(const SSL *ssl) { - return ssl->s3->read_buffer.len; -} - -static int dtls_read_buffer_next_packet(SSL *ssl) { - SSL3_BUFFER *buf = &ssl->s3->read_buffer; - - if (buf->len > 0) { - /* It is an error to call |dtls_read_buffer_extend| when the read buffer is - * not empty. */ - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } - - /* Read a single packet from |ssl->rbio|. |buf->cap| must fit in an int. */ - int ret = BIO_read(ssl->rbio, buf->buf + buf->offset, (int)buf->cap); - if (ret <= 0) { - ssl->rwstate = SSL_READING; - return ret; - } - /* |BIO_read| was bound by |buf->cap|, so this cannot overflow. */ - buf->len = (uint16_t)ret; - return 1; -} - -static int tls_read_buffer_extend_to(SSL *ssl, size_t len) { - SSL3_BUFFER *buf = &ssl->s3->read_buffer; - - if (len > buf->cap) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return -1; - } - - /* Read until the target length is reached. */ - while (buf->len < len) { - /* The amount of data to read is bounded by |buf->cap|, which must fit in an - * int. */ - int ret = BIO_read(ssl->rbio, buf->buf + buf->offset + buf->len, - (int)(len - buf->len)); - if (ret <= 0) { - ssl->rwstate = SSL_READING; - return ret; - } - /* |BIO_read| was bound by |buf->cap - buf->len|, so this cannot - * overflow. */ - buf->len += (uint16_t)ret; - } - - return 1; -} - -int ssl_read_buffer_extend_to(SSL *ssl, size_t len) { - /* |ssl_read_buffer_extend_to| implicitly discards any consumed data. */ - ssl_read_buffer_discard(ssl); - - if (!setup_read_buffer(ssl)) { - return -1; - } - - if (ssl->rbio == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); - return -1; - } - - int ret; - if (SSL_is_dtls(ssl)) { - /* |len| is ignored for a datagram transport. */ - ret = dtls_read_buffer_next_packet(ssl); - } else { - ret = tls_read_buffer_extend_to(ssl, len); - } - - if (ret <= 0) { - /* If the buffer was empty originally and remained empty after attempting to - * extend it, release the buffer until the next attempt. */ - ssl_read_buffer_discard(ssl); - } - return ret; -} - -void ssl_read_buffer_consume(SSL *ssl, size_t len) { - SSL3_BUFFER *buf = &ssl->s3->read_buffer; - - consume_buffer(buf, len); - - /* The TLS stack never reads beyond the current record, so there will never be - * unconsumed data. If read-ahead is ever reimplemented, - * |ssl_read_buffer_discard| will require a |memcpy| to shift the excess back - * to the front of the buffer, to ensure there is enough space for the next - * record. */ - assert(SSL_is_dtls(ssl) || len == 0 || buf->len == 0); -} - -void ssl_read_buffer_discard(SSL *ssl) { - if (ssl->s3->read_buffer.len == 0) { - ssl_read_buffer_clear(ssl); - } -} - -void ssl_read_buffer_clear(SSL *ssl) { - clear_buffer(&ssl->s3->read_buffer); -} - - -int ssl_write_buffer_is_pending(const SSL *ssl) { - return ssl->s3->write_buffer.len > 0; -} - -OPENSSL_COMPILE_ASSERT(SSL3_RT_HEADER_LENGTH * 2 + - SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD * 2 + - SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, - maximum_tls_write_buffer_too_large); - -OPENSSL_COMPILE_ASSERT(DTLS1_RT_HEADER_LENGTH + - SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD + - SSL3_RT_MAX_PLAIN_LENGTH <= 0xffff, - maximum_dtls_write_buffer_too_large); - -int ssl_write_buffer_init(SSL *ssl, uint8_t **out_ptr, size_t max_len) { - SSL3_BUFFER *buf = &ssl->s3->write_buffer; - - if (buf->buf != NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - size_t header_len = ssl_seal_align_prefix_len(ssl); - - /* TODO(davidben): This matches the original behavior in keeping the malloc - * size consistent. Does this matter? |cap| could just be |max_len|. */ - size_t cap = SSL3_RT_MAX_PLAIN_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD; - if (SSL_is_dtls(ssl)) { - cap += DTLS1_RT_HEADER_LENGTH; - } else { - cap += SSL3_RT_HEADER_LENGTH; - if (ssl->mode & SSL_MODE_CBC_RECORD_SPLITTING) { - cap += SSL3_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD; - } - } - - if (max_len > cap) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return 0; - } - - if (!setup_buffer(buf, header_len, cap)) { - return 0; - } - *out_ptr = buf->buf + buf->offset; - return 1; -} - -void ssl_write_buffer_set_len(SSL *ssl, size_t len) { - SSL3_BUFFER *buf = &ssl->s3->write_buffer; - - if (len > buf->cap) { - abort(); - } - buf->len = len; -} - -static int tls_write_buffer_flush(SSL *ssl) { - SSL3_BUFFER *buf = &ssl->s3->write_buffer; - - while (buf->len > 0) { - int ret = BIO_write(ssl->wbio, buf->buf + buf->offset, buf->len); - if (ret <= 0) { - ssl->rwstate = SSL_WRITING; - return ret; - } - consume_buffer(buf, (size_t)ret); - } - ssl_write_buffer_clear(ssl); - return 1; -} - -static int dtls_write_buffer_flush(SSL *ssl) { - SSL3_BUFFER *buf = &ssl->s3->write_buffer; - if (buf->len == 0) { - return 1; - } - - int ret = BIO_write(ssl->wbio, buf->buf + buf->offset, buf->len); - if (ret <= 0) { - ssl->rwstate = SSL_WRITING; - /* If the write failed, drop the write buffer anyway. Datagram transports - * can't write half a packet, so the caller is expected to retry from the - * top. */ - ssl_write_buffer_clear(ssl); - return ret; - } - ssl_write_buffer_clear(ssl); - return 1; -} - -int ssl_write_buffer_flush(SSL *ssl) { - if (ssl->wbio == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); - return -1; - } - - if (SSL_is_dtls(ssl)) { - return dtls_write_buffer_flush(ssl); - } else { - return tls_write_buffer_flush(ssl); - } -} - -void ssl_write_buffer_clear(SSL *ssl) { - clear_buffer(&ssl->s3->write_buffer); -} diff --git a/Sources/BoringSSL/ssl/ssl_buffer.cc b/Sources/BoringSSL/ssl/ssl_buffer.cc new file mode 100644 index 000000000..da1de9301 --- /dev/null +++ b/Sources/BoringSSL/ssl/ssl_buffer.cc @@ -0,0 +1,286 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +// BIO uses int instead of size_t. No lengths will exceed uint16_t, so this will +// not overflow. +static_assert(0xffff <= INT_MAX, "uint16_t does not fit in int"); + +static_assert((SSL3_ALIGN_PAYLOAD & (SSL3_ALIGN_PAYLOAD - 1)) == 0, + "SSL3_ALIGN_PAYLOAD must be a power of 2"); + +void SSLBuffer::Clear() { + free(buf_); // Allocated with malloc(). + buf_ = nullptr; + offset_ = 0; + size_ = 0; + cap_ = 0; +} + +bool SSLBuffer::EnsureCap(size_t header_len, size_t new_cap) { + if (new_cap > 0xffff) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + + if (cap_ >= new_cap) { + return true; + } + + // Add up to |SSL3_ALIGN_PAYLOAD| - 1 bytes of slack for alignment. + // + // Since this buffer gets allocated quite frequently and doesn't contain any + // sensitive data, we allocate with malloc rather than |OPENSSL_malloc| and + // avoid zeroing on free. + uint8_t *new_buf = (uint8_t *)malloc(new_cap + SSL3_ALIGN_PAYLOAD - 1); + if (new_buf == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + // Offset the buffer such that the record body is aligned. + size_t new_offset = + (0 - header_len - (uintptr_t)new_buf) & (SSL3_ALIGN_PAYLOAD - 1); + + if (buf_ != NULL) { + OPENSSL_memcpy(new_buf + new_offset, buf_ + offset_, size_); + free(buf_); // Allocated with malloc(). + } + + buf_ = new_buf; + offset_ = new_offset; + cap_ = new_cap; + return true; +} + +void SSLBuffer::DidWrite(size_t new_size) { + if (new_size > cap() - size()) { + abort(); + } + size_ += new_size; +} + +void SSLBuffer::Consume(size_t len) { + if (len > size_) { + abort(); + } + offset_ += (uint16_t)len; + size_ -= (uint16_t)len; + cap_ -= (uint16_t)len; +} + +void SSLBuffer::DiscardConsumed() { + if (size_ == 0) { + Clear(); + } +} + +static int dtls_read_buffer_next_packet(SSL *ssl) { + SSLBuffer *buf = &ssl->s3->read_buffer; + + if (!buf->empty()) { + // It is an error to call |dtls_read_buffer_extend| when the read buffer is + // not empty. + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return -1; + } + + // Read a single packet from |ssl->rbio|. |buf->cap()| must fit in an int. + int ret = BIO_read(ssl->rbio, buf->data(), static_cast(buf->cap())); + if (ret <= 0) { + ssl->s3->rwstate = SSL_READING; + return ret; + } + buf->DidWrite(static_cast(ret)); + return 1; +} + +static int tls_read_buffer_extend_to(SSL *ssl, size_t len) { + SSLBuffer *buf = &ssl->s3->read_buffer; + + if (len > buf->cap()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); + return -1; + } + + // Read until the target length is reached. + while (buf->size() < len) { + // The amount of data to read is bounded by |buf->cap|, which must fit in an + // int. + int ret = BIO_read(ssl->rbio, buf->data() + buf->size(), + static_cast(len - buf->size())); + if (ret <= 0) { + ssl->s3->rwstate = SSL_READING; + return ret; + } + buf->DidWrite(static_cast(ret)); + } + + return 1; +} + +int ssl_read_buffer_extend_to(SSL *ssl, size_t len) { + // |ssl_read_buffer_extend_to| implicitly discards any consumed data. + ssl->s3->read_buffer.DiscardConsumed(); + + if (SSL_is_dtls(ssl)) { + static_assert( + DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH <= 0xffff, + "DTLS read buffer is too large"); + + // The |len| parameter is ignored in DTLS. + len = DTLS1_RT_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH; + } + + if (!ssl->s3->read_buffer.EnsureCap(ssl_record_prefix_len(ssl), len)) { + return -1; + } + + if (ssl->rbio == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); + return -1; + } + + int ret; + if (SSL_is_dtls(ssl)) { + // |len| is ignored for a datagram transport. + ret = dtls_read_buffer_next_packet(ssl); + } else { + ret = tls_read_buffer_extend_to(ssl, len); + } + + if (ret <= 0) { + // If the buffer was empty originally and remained empty after attempting to + // extend it, release the buffer until the next attempt. + ssl->s3->read_buffer.DiscardConsumed(); + } + return ret; +} + +int ssl_handle_open_record(SSL *ssl, bool *out_retry, ssl_open_record_t ret, + size_t consumed, uint8_t alert) { + *out_retry = false; + if (ret != ssl_open_record_partial) { + ssl->s3->read_buffer.Consume(consumed); + } + if (ret != ssl_open_record_success) { + // Nothing was returned to the caller, so discard anything marked consumed. + ssl->s3->read_buffer.DiscardConsumed(); + } + switch (ret) { + case ssl_open_record_success: + return 1; + + case ssl_open_record_partial: { + int read_ret = ssl_read_buffer_extend_to(ssl, consumed); + if (read_ret <= 0) { + return read_ret; + } + *out_retry = true; + return 1; + } + + case ssl_open_record_discard: + *out_retry = true; + return 1; + + case ssl_open_record_close_notify: + return 0; + + case ssl_open_record_error: + if (alert != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + } + return -1; + } + assert(0); + return -1; +} + + +static_assert(SSL3_RT_HEADER_LENGTH * 2 + + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD * 2 + + SSL3_RT_MAX_PLAIN_LENGTH <= + 0xffff, + "maximum TLS write buffer is too large"); + +static_assert(DTLS1_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD + + SSL3_RT_MAX_PLAIN_LENGTH <= + 0xffff, + "maximum DTLS write buffer is too large"); + +static int tls_write_buffer_flush(SSL *ssl) { + SSLBuffer *buf = &ssl->s3->write_buffer; + + while (!buf->empty()) { + int ret = BIO_write(ssl->wbio, buf->data(), buf->size()); + if (ret <= 0) { + ssl->s3->rwstate = SSL_WRITING; + return ret; + } + buf->Consume(static_cast(ret)); + } + buf->Clear(); + return 1; +} + +static int dtls_write_buffer_flush(SSL *ssl) { + SSLBuffer *buf = &ssl->s3->write_buffer; + if (buf->empty()) { + return 1; + } + + int ret = BIO_write(ssl->wbio, buf->data(), buf->size()); + if (ret <= 0) { + ssl->s3->rwstate = SSL_WRITING; + // If the write failed, drop the write buffer anyway. Datagram transports + // can't write half a packet, so the caller is expected to retry from the + // top. + buf->Clear(); + return ret; + } + buf->Clear(); + return 1; +} + +int ssl_write_buffer_flush(SSL *ssl) { + if (ssl->wbio == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BIO_NOT_SET); + return -1; + } + + if (SSL_is_dtls(ssl)) { + return dtls_write_buffer_flush(ssl); + } else { + return tls_write_buffer_flush(ssl); + } +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/ssl_cert.c b/Sources/BoringSSL/ssl/ssl_cert.cc similarity index 61% rename from Sources/BoringSSL/ssl/ssl_cert.c rename to Sources/BoringSSL/ssl/ssl_cert.cc index c60c6fa22..9a3eef35b 100644 --- a/Sources/BoringSSL/ssl/ssl_cert.c +++ b/Sources/BoringSSL/ssl/ssl_cert.cc @@ -118,31 +118,25 @@ #include #include +#include + #include #include #include -#include #include #include #include #include #include -#include #include "../crypto/internal.h" #include "internal.h" -int SSL_get_ex_data_X509_STORE_CTX_idx(void) { - /* The ex_data index to go from |X509_STORE_CTX| to |SSL| always uses the - * reserved app_data slot. Before ex_data was introduced, app_data was used. - * Avoid breaking any software which assumes |X509_STORE_CTX_get_app_data| - * works. */ - return 0; -} +namespace bssl { CERT *ssl_cert_new(const SSL_X509_METHOD *x509_method) { - CERT *ret = OPENSSL_malloc(sizeof(CERT)); + CERT *ret = (CERT *)OPENSSL_malloc(sizeof(CERT)); if (ret == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return NULL; @@ -159,7 +153,7 @@ static CRYPTO_BUFFER *buffer_up_ref(CRYPTO_BUFFER *buffer) { } CERT *ssl_cert_dup(CERT *cert) { - CERT *ret = OPENSSL_malloc(sizeof(CERT)); + CERT *ret = (CERT *)OPENSSL_malloc(sizeof(CERT)); if (ret == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return NULL; @@ -177,18 +171,9 @@ CERT *ssl_cert_dup(CERT *cert) { ret->key_method = cert->key_method; ret->x509_method = cert->x509_method; - if (cert->dh_tmp != NULL) { - ret->dh_tmp = DHparams_dup(cert->dh_tmp); - if (ret->dh_tmp == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_DH_LIB); - goto err; - } - } - ret->dh_tmp_cb = cert->dh_tmp_cb; - if (cert->sigalgs != NULL) { - ret->sigalgs = - BUF_memdup(cert->sigalgs, cert->num_sigalgs * sizeof(cert->sigalgs[0])); + ret->sigalgs = (uint16_t *)BUF_memdup( + cert->sigalgs, cert->num_sigalgs * sizeof(cert->sigalgs[0])); if (ret->sigalgs == NULL) { goto err; } @@ -198,10 +183,7 @@ CERT *ssl_cert_dup(CERT *cert) { ret->cert_cb = cert->cert_cb; ret->cert_cb_arg = cert->cert_cb_arg; - if (cert->verify_store != NULL) { - X509_STORE_up_ref(cert->verify_store); - ret->verify_store = cert->verify_store; - } + ret->x509_method->cert_dup(ret, cert); if (cert->signed_cert_timestamp_list != NULL) { CRYPTO_BUFFER_up_ref(cert->signed_cert_timestamp_list); @@ -216,6 +198,8 @@ CERT *ssl_cert_dup(CERT *cert) { ret->sid_ctx_length = cert->sid_ctx_length; OPENSSL_memcpy(ret->sid_ctx, cert->sid_ctx, sizeof(ret->sid_ctx)); + ret->enable_early_data = cert->enable_early_data; + return ret; err: @@ -223,7 +207,7 @@ CERT *ssl_cert_dup(CERT *cert) { return NULL; } -/* Free up and clear all certificates and chains */ +// Free up and clear all certificates and chains void ssl_cert_clear_certs(CERT *cert) { if (cert == NULL) { return; @@ -238,335 +222,226 @@ void ssl_cert_clear_certs(CERT *cert) { cert->key_method = NULL; } -void ssl_cert_free(CERT *c) { - if (c == NULL) { +void ssl_cert_free(CERT *cert) { + if (cert == NULL) { return; } - DH_free(c->dh_tmp); - - ssl_cert_clear_certs(c); - OPENSSL_free(c->sigalgs); - X509_STORE_free(c->verify_store); - CRYPTO_BUFFER_free(c->signed_cert_timestamp_list); - CRYPTO_BUFFER_free(c->ocsp_response); + ssl_cert_clear_certs(cert); + cert->x509_method->cert_free(cert); + OPENSSL_free(cert->sigalgs); + CRYPTO_BUFFER_free(cert->signed_cert_timestamp_list); + CRYPTO_BUFFER_free(cert->ocsp_response); - OPENSSL_free(c); + OPENSSL_free(cert); } -static void ssl_cert_set_cert_cb(CERT *c, int (*cb)(SSL *ssl, void *arg), +static void ssl_cert_set_cert_cb(CERT *cert, int (*cb)(SSL *ssl, void *arg), void *arg) { - c->cert_cb = cb; - c->cert_cb_arg = arg; -} - -int ssl_set_cert(CERT *cert, CRYPTO_BUFFER *buffer) { + cert->cert_cb = cb; + cert->cert_cb_arg = arg; +} + +enum leaf_cert_and_privkey_result_t { + leaf_cert_and_privkey_error, + leaf_cert_and_privkey_ok, + leaf_cert_and_privkey_mismatch, +}; + +// check_leaf_cert_and_privkey checks whether the certificate in |leaf_buffer| +// and the private key in |privkey| are suitable and coherent. It returns +// |leaf_cert_and_privkey_error| and pushes to the error queue if a problem is +// found. If the certificate and private key are valid, but incoherent, it +// returns |leaf_cert_and_privkey_mismatch|. Otherwise it returns +// |leaf_cert_and_privkey_ok|. +static enum leaf_cert_and_privkey_result_t check_leaf_cert_and_privkey( + CRYPTO_BUFFER *leaf_buffer, EVP_PKEY *privkey) { CBS cert_cbs; - CRYPTO_BUFFER_init_CBS(buffer, &cert_cbs); - EVP_PKEY *pubkey = ssl_cert_parse_pubkey(&cert_cbs); - if (pubkey == NULL) { - return 0; + CRYPTO_BUFFER_init_CBS(leaf_buffer, &cert_cbs); + UniquePtr pubkey = ssl_cert_parse_pubkey(&cert_cbs); + if (!pubkey) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return leaf_cert_and_privkey_error; } if (!ssl_is_key_type_supported(pubkey->type)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); - EVP_PKEY_free(pubkey); - return 0; + return leaf_cert_and_privkey_error; } - /* An ECC certificate may be usable for ECDH or ECDSA. We only support ECDSA - * certificates, so sanity-check the key usage extension. */ + // An ECC certificate may be usable for ECDH or ECDSA. We only support ECDSA + // certificates, so sanity-check the key usage extension. if (pubkey->type == EVP_PKEY_EC && !ssl_cert_check_digital_signature_key_usage(&cert_cbs)) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); - EVP_PKEY_free(pubkey); - return 0; - } - - if (cert->privatekey != NULL) { - /* Sanity-check that the private key and the certificate match, unless the - * key is opaque (in case of, say, a smartcard). */ - if (!EVP_PKEY_is_opaque(cert->privatekey) && - !ssl_compare_public_and_private_key(pubkey, cert->privatekey)) { - /* don't fail for a cert/key mismatch, just free current private key - * (when switching to a different cert & key, first this function should - * be used, then ssl_set_pkey */ - EVP_PKEY_free(cert->privatekey); - cert->privatekey = NULL; - /* clear error queue */ - ERR_clear_error(); - } - } - - EVP_PKEY_free(pubkey); - - cert->x509_method->cert_flush_cached_leaf(cert); - - if (cert->chain != NULL) { - CRYPTO_BUFFER_free(sk_CRYPTO_BUFFER_value(cert->chain, 0)); - sk_CRYPTO_BUFFER_set(cert->chain, 0, buffer); - CRYPTO_BUFFER_up_ref(buffer); - return 1; - } - - cert->chain = sk_CRYPTO_BUFFER_new_null(); - if (cert->chain == NULL) { - return 0; + return leaf_cert_and_privkey_error; } - if (!sk_CRYPTO_BUFFER_push(cert->chain, buffer)) { - sk_CRYPTO_BUFFER_free(cert->chain); - cert->chain = NULL; - return 0; + if (privkey != NULL && + // Sanity-check that the private key and the certificate match. + !ssl_compare_public_and_private_key(pubkey.get(), privkey)) { + ERR_clear_error(); + return leaf_cert_and_privkey_mismatch; } - CRYPTO_BUFFER_up_ref(buffer); - return 1; + return leaf_cert_and_privkey_ok; } -int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, size_t der_len, - const uint8_t *der) { - CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new(der, der_len, NULL); - if (buffer == NULL) { +static int cert_set_chain_and_key( + CERT *cert, CRYPTO_BUFFER *const *certs, size_t num_certs, + EVP_PKEY *privkey, const SSL_PRIVATE_KEY_METHOD *privkey_method) { + if (num_certs == 0 || + (privkey == NULL && privkey_method == NULL)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } - const int ok = ssl_set_cert(ctx->cert, buffer); - CRYPTO_BUFFER_free(buffer); - return ok; -} - -int SSL_use_certificate_ASN1(SSL *ssl, const uint8_t *der, size_t der_len) { - CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new(der, der_len, NULL); - if (buffer == NULL) { + if (privkey != NULL && privkey_method != NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_HAVE_BOTH_PRIVKEY_AND_METHOD); return 0; } - const int ok = ssl_set_cert(ssl->cert, buffer); - CRYPTO_BUFFER_free(buffer); - return ok; -} - -int ssl_verify_cert_chain(SSL *ssl, long *out_verify_result, - STACK_OF(X509) *cert_chain) { - if (cert_chain == NULL || sk_X509_num(cert_chain) == 0) { - return 0; - } - - X509_STORE *verify_store = ssl->ctx->cert_store; - if (ssl->cert->verify_store != NULL) { - verify_store = ssl->cert->verify_store; + switch (check_leaf_cert_and_privkey(certs[0], privkey)) { + case leaf_cert_and_privkey_error: + return 0; + case leaf_cert_and_privkey_mismatch: + OPENSSL_PUT_ERROR(SSL, SSL_R_CERTIFICATE_AND_PRIVATE_KEY_MISMATCH); + return 0; + case leaf_cert_and_privkey_ok: + break; } - X509 *leaf = sk_X509_value(cert_chain, 0); - int ret = 0; - X509_STORE_CTX ctx; - if (!X509_STORE_CTX_init(&ctx, verify_store, leaf, cert_chain)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); + STACK_OF(CRYPTO_BUFFER) *certs_sk = sk_CRYPTO_BUFFER_new_null(); + if (certs_sk == NULL) { return 0; } - if (!X509_STORE_CTX_set_ex_data(&ctx, SSL_get_ex_data_X509_STORE_CTX_idx(), - ssl)) { - goto err; - } - - /* We need to inherit the verify parameters. These can be determined by the - * context: if its a server it will verify SSL client certificates or vice - * versa. */ - X509_STORE_CTX_set_default(&ctx, ssl->server ? "ssl_client" : "ssl_server"); - - /* Anything non-default in "param" should overwrite anything in the ctx. */ - X509_VERIFY_PARAM_set1(X509_STORE_CTX_get0_param(&ctx), ssl->param); - - if (ssl->verify_callback) { - X509_STORE_CTX_set_verify_cb(&ctx, ssl->verify_callback); - } - - int verify_ret; - if (ssl->ctx->app_verify_callback != NULL) { - verify_ret = ssl->ctx->app_verify_callback(&ctx, ssl->ctx->app_verify_arg); - } else { - verify_ret = X509_verify_cert(&ctx); - } - - *out_verify_result = ctx.error; - - /* If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the result. */ - if (verify_ret <= 0 && ssl->verify_mode != SSL_VERIFY_NONE) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, ssl_verify_alarm_type(ctx.error)); - OPENSSL_PUT_ERROR(SSL, SSL_R_CERTIFICATE_VERIFY_FAILED); - goto err; - } - - ERR_clear_error(); - ret = 1; - -err: - X509_STORE_CTX_cleanup(&ctx); - return ret; -} - -static void set_client_CA_list(STACK_OF(X509_NAME) **ca_list, - STACK_OF(X509_NAME) *name_list) { - sk_X509_NAME_pop_free(*ca_list, X509_NAME_free); - *ca_list = name_list; -} - -STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list) { - STACK_OF(X509_NAME) *ret = sk_X509_NAME_new_null(); - if (ret == NULL) { - return NULL; - } - for (size_t i = 0; i < sk_X509_NAME_num(list); i++) { - X509_NAME *name = X509_NAME_dup(sk_X509_NAME_value(list, i)); - if (name == NULL || !sk_X509_NAME_push(ret, name)) { - X509_NAME_free(name); - sk_X509_NAME_pop_free(ret, X509_NAME_free); - return NULL; + for (size_t i = 0; i < num_certs; i++) { + if (!sk_CRYPTO_BUFFER_push(certs_sk, certs[i])) { + sk_CRYPTO_BUFFER_pop_free(certs_sk, CRYPTO_BUFFER_free); + return 0; } + CRYPTO_BUFFER_up_ref(certs[i]); } - return ret; -} - -void SSL_set_client_CA_list(SSL *ssl, STACK_OF(X509_NAME) *name_list) { - set_client_CA_list(&ssl->client_CA, name_list); -} + EVP_PKEY_free(cert->privatekey); + cert->privatekey = privkey; + if (privkey != NULL) { + EVP_PKEY_up_ref(privkey); + } + cert->key_method = privkey_method; -void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list) { - set_client_CA_list(&ctx->client_CA, name_list); -} + sk_CRYPTO_BUFFER_pop_free(cert->chain, CRYPTO_BUFFER_free); + cert->chain = certs_sk; -STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list(const SSL_CTX *ctx) { - return ctx->client_CA; + return 1; } -STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *ssl) { - /* For historical reasons, this function is used both to query configuration - * state on a server as well as handshake state on a client. However, whether - * |ssl| is a client or server is not known until explicitly configured with - * |SSL_set_connect_state|. If |handshake_func| is NULL, |ssl| is in an - * indeterminate mode and |ssl->server| is unset. */ - if (ssl->handshake_func != NULL && !ssl->server) { - if (ssl->s3->hs != NULL) { - return ssl->s3->hs->ca_names; - } - - return NULL; - } - - if (ssl->client_CA != NULL) { - return ssl->client_CA; +int ssl_set_cert(CERT *cert, UniquePtr buffer) { + switch (check_leaf_cert_and_privkey(buffer.get(), cert->privatekey)) { + case leaf_cert_and_privkey_error: + return 0; + case leaf_cert_and_privkey_mismatch: + // don't fail for a cert/key mismatch, just free current private key + // (when switching to a different cert & key, first this function should + // be used, then |ssl_set_pkey|. + EVP_PKEY_free(cert->privatekey); + cert->privatekey = NULL; + break; + case leaf_cert_and_privkey_ok: + break; } - return ssl->ctx->client_CA; -} -static int add_client_CA(STACK_OF(X509_NAME) **sk, X509 *x509) { - X509_NAME *name; + cert->x509_method->cert_flush_cached_leaf(cert); - if (x509 == NULL) { - return 0; - } - if (*sk == NULL) { - *sk = sk_X509_NAME_new_null(); - if (*sk == NULL) { - return 0; - } + if (cert->chain != NULL) { + CRYPTO_BUFFER_free(sk_CRYPTO_BUFFER_value(cert->chain, 0)); + sk_CRYPTO_BUFFER_set(cert->chain, 0, buffer.release()); + return 1; } - name = X509_NAME_dup(X509_get_subject_name(x509)); - if (name == NULL) { + cert->chain = sk_CRYPTO_BUFFER_new_null(); + if (cert->chain == NULL) { return 0; } - if (!sk_X509_NAME_push(*sk, name)) { - X509_NAME_free(name); + if (!PushToStack(cert->chain, std::move(buffer))) { + sk_CRYPTO_BUFFER_free(cert->chain); + cert->chain = NULL; return 0; } return 1; } -int SSL_add_client_CA(SSL *ssl, X509 *x509) { - return add_client_CA(&ssl->client_CA, x509); -} - -int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x509) { - return add_client_CA(&ctx->client_CA, x509); -} - int ssl_has_certificate(const SSL *ssl) { return ssl->cert->chain != NULL && sk_CRYPTO_BUFFER_value(ssl->cert->chain, 0) != NULL && ssl_has_private_key(ssl); } -STACK_OF(CRYPTO_BUFFER) *ssl_parse_cert_chain(uint8_t *out_alert, - EVP_PKEY **out_pubkey, - uint8_t *out_leaf_sha256, - CBS *cbs, - CRYPTO_BUFFER_POOL *pool) { - *out_pubkey = NULL; - - STACK_OF(CRYPTO_BUFFER) *ret = sk_CRYPTO_BUFFER_new_null(); - if (ret == NULL) { - *out_alert = SSL_AD_INTERNAL_ERROR; - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; - } +bool ssl_parse_cert_chain(uint8_t *out_alert, + UniquePtr *out_chain, + UniquePtr *out_pubkey, + uint8_t *out_leaf_sha256, CBS *cbs, + CRYPTO_BUFFER_POOL *pool) { + out_chain->reset(); + out_pubkey->reset(); CBS certificate_list; if (!CBS_get_u24_length_prefixed(cbs, &certificate_list)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; + return false; } + if (CBS_len(&certificate_list) == 0) { + return true; + } + + UniquePtr chain(sk_CRYPTO_BUFFER_new_null()); + if (!chain) { + *out_alert = SSL_AD_INTERNAL_ERROR; + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + UniquePtr pubkey; while (CBS_len(&certificate_list) > 0) { CBS certificate; if (!CBS_get_u24_length_prefixed(&certificate_list, &certificate) || CBS_len(&certificate) == 0) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_LENGTH_MISMATCH); - goto err; + return false; } - if (sk_CRYPTO_BUFFER_num(ret) == 0) { - *out_pubkey = ssl_cert_parse_pubkey(&certificate); - if (*out_pubkey == NULL) { + if (sk_CRYPTO_BUFFER_num(chain.get()) == 0) { + pubkey = ssl_cert_parse_pubkey(&certificate); + if (!pubkey) { *out_alert = SSL_AD_DECODE_ERROR; - goto err; + return false; } - /* Retain the hash of the leaf certificate if requested. */ + // Retain the hash of the leaf certificate if requested. if (out_leaf_sha256 != NULL) { SHA256(CBS_data(&certificate), CBS_len(&certificate), out_leaf_sha256); } } - CRYPTO_BUFFER *buf = - CRYPTO_BUFFER_new_from_CBS(&certificate, pool); - if (buf == NULL) { - *out_alert = SSL_AD_DECODE_ERROR; - goto err; - } - - if (!sk_CRYPTO_BUFFER_push(ret, buf)) { + UniquePtr buf( + CRYPTO_BUFFER_new_from_CBS(&certificate, pool)); + if (!buf || + !PushToStack(chain.get(), std::move(buf))) { *out_alert = SSL_AD_INTERNAL_ERROR; - CRYPTO_BUFFER_free(buf); OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return false; } } - return ret; - -err: - EVP_PKEY_free(*out_pubkey); - *out_pubkey = NULL; - sk_CRYPTO_BUFFER_pop_free(ret, CRYPTO_BUFFER_free); - return NULL; + *out_chain = std::move(chain); + *out_pubkey = std::move(pubkey); + return true; } int ssl_add_cert_chain(SSL *ssl, CBB *cbb) { @@ -576,7 +451,8 @@ int ssl_add_cert_chain(SSL *ssl, CBB *cbb) { CBB certs; if (!CBB_add_u24_length_prefixed(cbb, &certs)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } STACK_OF(CRYPTO_BUFFER) *chain = ssl->cert->chain; @@ -587,20 +463,17 @@ int ssl_add_cert_chain(SSL *ssl, CBB *cbb) { !CBB_add_bytes(&child, CRYPTO_BUFFER_data(buffer), CRYPTO_BUFFER_len(buffer)) || !CBB_flush(&certs)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } } return CBB_flush(cbb); - -err: - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; } -/* ssl_cert_skip_to_spki parses a DER-encoded, X.509 certificate from |in| and - * positions |*out_tbs_cert| to cover the TBSCertificate, starting at the - * subjectPublicKeyInfo. */ +// ssl_cert_skip_to_spki parses a DER-encoded, X.509 certificate from |in| and +// positions |*out_tbs_cert| to cover the TBSCertificate, starting at the +// subjectPublicKeyInfo. static int ssl_cert_skip_to_spki(const CBS *in, CBS *out_tbs_cert) { /* From RFC 5280, section 4.1 * Certificate ::= SEQUENCE { @@ -623,19 +496,19 @@ static int ssl_cert_skip_to_spki(const CBS *in, CBS *out_tbs_cert) { if (!CBS_get_asn1(&buf, &toplevel, CBS_ASN1_SEQUENCE) || CBS_len(&buf) != 0 || !CBS_get_asn1(&toplevel, out_tbs_cert, CBS_ASN1_SEQUENCE) || - /* version */ + // version !CBS_get_optional_asn1( out_tbs_cert, NULL, NULL, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) || - /* serialNumber */ + // serialNumber !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_INTEGER) || - /* signature algorithm */ + // signature algorithm !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || - /* issuer */ + // issuer !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || - /* validity */ + // validity !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE) || - /* subject */ + // subject !CBS_get_asn1(out_tbs_cert, NULL, CBS_ASN1_SEQUENCE)) { return 0; } @@ -643,18 +516,24 @@ static int ssl_cert_skip_to_spki(const CBS *in, CBS *out_tbs_cert) { return 1; } -EVP_PKEY *ssl_cert_parse_pubkey(const CBS *in) { +UniquePtr ssl_cert_parse_pubkey(const CBS *in) { CBS buf = *in, tbs_cert; if (!ssl_cert_skip_to_spki(&buf, &tbs_cert)) { OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); - return NULL; + return nullptr; } - return EVP_parse_public_key(&tbs_cert); + return UniquePtr(EVP_parse_public_key(&tbs_cert)); } int ssl_compare_public_and_private_key(const EVP_PKEY *pubkey, const EVP_PKEY *privkey) { + if (EVP_PKEY_is_opaque(privkey)) { + // We cannot check an opaque private key and have to trust that it + // matches. + return 1; + } + int ret = 0; switch (EVP_PKEY_cmp(pubkey, privkey)) { @@ -669,6 +548,7 @@ int ssl_compare_public_and_private_key(const EVP_PKEY *pubkey, break; case -2: OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_KEY_TYPE); + break; default: assert(0); break; @@ -691,15 +571,13 @@ int ssl_cert_check_private_key(const CERT *cert, const EVP_PKEY *privkey) { CBS cert_cbs; CRYPTO_BUFFER_init_CBS(sk_CRYPTO_BUFFER_value(cert->chain, 0), &cert_cbs); - EVP_PKEY *pubkey = ssl_cert_parse_pubkey(&cert_cbs); + UniquePtr pubkey = ssl_cert_parse_pubkey(&cert_cbs); if (!pubkey) { OPENSSL_PUT_ERROR(X509, X509_R_UNKNOWN_KEY_TYPE); return 0; } - const int ok = ssl_compare_public_and_private_key(pubkey, privkey); - EVP_PKEY_free(pubkey); - return ok; + return ssl_compare_public_and_private_key(pubkey.get(), privkey); } int ssl_cert_check_digital_signature_key_usage(const CBS *in) { @@ -708,20 +586,21 @@ int ssl_cert_check_digital_signature_key_usage(const CBS *in) { CBS tbs_cert, outer_extensions; int has_extensions; if (!ssl_cert_skip_to_spki(&buf, &tbs_cert) || - /* subjectPublicKeyInfo */ + // subjectPublicKeyInfo !CBS_get_asn1(&tbs_cert, NULL, CBS_ASN1_SEQUENCE) || - /* issuerUniqueID */ + // issuerUniqueID !CBS_get_optional_asn1( &tbs_cert, NULL, NULL, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 1) || - /* subjectUniqueID */ + // subjectUniqueID !CBS_get_optional_asn1( &tbs_cert, NULL, NULL, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 2) || !CBS_get_optional_asn1( &tbs_cert, &outer_extensions, &has_extensions, CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 3)) { - goto parse_err; + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); + return 0; } if (!has_extensions) { @@ -730,7 +609,8 @@ int ssl_cert_check_digital_signature_key_usage(const CBS *in) { CBS extensions; if (!CBS_get_asn1(&outer_extensions, &extensions, CBS_ASN1_SEQUENCE)) { - goto parse_err; + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); + return 0; } while (CBS_len(&extensions) > 0) { @@ -741,7 +621,8 @@ int ssl_cert_check_digital_signature_key_usage(const CBS *in) { !CBS_get_asn1(&extension, NULL, CBS_ASN1_BOOLEAN)) || !CBS_get_asn1(&extension, &contents, CBS_ASN1_OCTETSTRING) || CBS_len(&extension) != 0) { - goto parse_err; + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); + return 0; } static const uint8_t kKeyUsageOID[3] = {0x55, 0x1d, 0x0f}; @@ -754,13 +635,15 @@ int ssl_cert_check_digital_signature_key_usage(const CBS *in) { CBS bit_string; if (!CBS_get_asn1(&contents, &bit_string, CBS_ASN1_BITSTRING) || CBS_len(&contents) != 0) { - goto parse_err; + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); + return 0; } - /* This is the KeyUsage extension. See - * https://tools.ietf.org/html/rfc5280#section-4.2.1.3 */ + // This is the KeyUsage extension. See + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 if (!CBS_is_valid_asn1_bitstring(&bit_string)) { - goto parse_err; + OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); + return 0; } if (!CBS_asn1_bitstring_has_bit(&bit_string, 0)) { @@ -771,33 +654,27 @@ int ssl_cert_check_digital_signature_key_usage(const CBS *in) { return 1; } - /* No KeyUsage extension found. */ + // No KeyUsage extension found. return 1; - -parse_err: - OPENSSL_PUT_ERROR(SSL, SSL_R_CANNOT_PARSE_LEAF_CERT); - return 0; } -static int ca_dn_cmp(const X509_NAME **a, const X509_NAME **b) { - return X509_NAME_cmp(*a, *b); -} +UniquePtr ssl_parse_client_CA_list(SSL *ssl, + uint8_t *out_alert, + CBS *cbs) { + CRYPTO_BUFFER_POOL *const pool = ssl->ctx->pool; -STACK_OF(X509_NAME) * - ssl_parse_client_CA_list(SSL *ssl, uint8_t *out_alert, CBS *cbs) { - STACK_OF(X509_NAME) *ret = sk_X509_NAME_new(ca_dn_cmp); - X509_NAME *name = NULL; - if (ret == NULL) { + UniquePtr ret(sk_CRYPTO_BUFFER_new_null()); + if (!ret) { *out_alert = SSL_AD_INTERNAL_ERROR; OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; + return nullptr; } CBS child; if (!CBS_get_u16_length_prefixed(cbs, &child)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_LENGTH_MISMATCH); - goto err; + return nullptr; } while (CBS_len(&child) > 0) { @@ -805,33 +682,37 @@ STACK_OF(X509_NAME) * if (!CBS_get_u16_length_prefixed(&child, &distinguished_name)) { *out_alert = SSL_AD_DECODE_ERROR; OPENSSL_PUT_ERROR(SSL, SSL_R_CA_DN_TOO_LONG); - goto err; - } - - const uint8_t *ptr = CBS_data(&distinguished_name); - /* A u16 length cannot overflow a long. */ - name = d2i_X509_NAME(NULL, &ptr, (long)CBS_len(&distinguished_name)); - if (name == NULL || - ptr != CBS_data(&distinguished_name) + CBS_len(&distinguished_name)) { - *out_alert = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; + return nullptr; } - if (!sk_X509_NAME_push(ret, name)) { + UniquePtr buffer( + CRYPTO_BUFFER_new_from_CBS(&distinguished_name, pool)); + if (!buffer || + !PushToStack(ret.get(), std::move(buffer))) { *out_alert = SSL_AD_INTERNAL_ERROR; OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; + return nullptr; } - name = NULL; + } + + if (!ssl->ctx->x509_method->check_client_CA_list(ret.get())) { + *out_alert = SSL_AD_INTERNAL_ERROR; + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return nullptr; } return ret; +} -err: - X509_NAME_free(name); - sk_X509_NAME_pop_free(ret, X509_NAME_free); - return NULL; +bool ssl_has_client_CAs(SSL *ssl) { + STACK_OF(CRYPTO_BUFFER) *names = ssl->client_CA; + if (names == NULL) { + names = ssl->ctx->client_CA; + } + if (names == NULL) { + return false; + } + return sk_CRYPTO_BUFFER_num(names) > 0; } int ssl_add_client_CA_list(SSL *ssl, CBB *cbb) { @@ -840,21 +721,18 @@ int ssl_add_client_CA_list(SSL *ssl, CBB *cbb) { return 0; } - STACK_OF(X509_NAME) *sk = SSL_get_client_CA_list(ssl); - if (sk == NULL) { + STACK_OF(CRYPTO_BUFFER) *names = ssl->client_CA; + if (names == NULL) { + names = ssl->ctx->client_CA; + } + if (names == NULL) { return CBB_flush(cbb); } - for (size_t i = 0; i < sk_X509_NAME_num(sk); i++) { - X509_NAME *name = sk_X509_NAME_value(sk, i); - int len = i2d_X509_NAME(name, NULL); - if (len < 0) { - return 0; - } - uint8_t *ptr; + for (const CRYPTO_BUFFER *name : names) { if (!CBB_add_u16_length_prefixed(&child, &name_cbb) || - !CBB_add_space(&name_cbb, &ptr, (size_t)len) || - (len > 0 && i2d_X509_NAME(name, &ptr) < 0)) { + !CBB_add_bytes(&name_cbb, CRYPTO_BUFFER_data(name), + CRYPTO_BUFFER_len(name))) { return 0; } } @@ -862,71 +740,34 @@ int ssl_add_client_CA_list(SSL *ssl, CBB *cbb) { return CBB_flush(cbb); } -static int set_cert_store(X509_STORE **store_ptr, X509_STORE *new_store, int take_ref) { - X509_STORE_free(*store_ptr); - *store_ptr = new_store; - - if (new_store != NULL && take_ref) { - X509_STORE_up_ref(new_store); - } - - return 1; -} - -int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { - return set_cert_store(&ctx->cert->verify_store, store, 0); -} - -int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { - return set_cert_store(&ctx->cert->verify_store, store, 1); -} - -int SSL_set0_verify_cert_store(SSL *ssl, X509_STORE *store) { - return set_cert_store(&ssl->cert->verify_store, store, 0); -} - -int SSL_set1_verify_cert_store(SSL *ssl, X509_STORE *store) { - return set_cert_store(&ssl->cert->verify_store, store, 1); -} - -void SSL_CTX_set_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, void *arg), - void *arg) { - ssl_cert_set_cert_cb(ctx->cert, cb, arg); -} - -void SSL_set_cert_cb(SSL *ssl, int (*cb)(SSL *ssl, void *arg), void *arg) { - ssl_cert_set_cert_cb(ssl->cert, cb, arg); -} - int ssl_check_leaf_certificate(SSL_HANDSHAKE *hs, EVP_PKEY *pkey, const CRYPTO_BUFFER *leaf) { SSL *const ssl = hs->ssl; - assert(ssl3_protocol_version(ssl) < TLS1_3_VERSION); + assert(ssl_protocol_version(ssl) < TLS1_3_VERSION); - /* Check the certificate's type matches the cipher. */ - int expected_type = ssl_cipher_get_key_type(hs->new_cipher); - assert(expected_type != EVP_PKEY_NONE); - if (pkey->type != expected_type) { + // Check the certificate's type matches the cipher. + if (!(hs->new_cipher->algorithm_auth & ssl_cipher_auth_mask_for_key(pkey))) { OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CERTIFICATE_TYPE); return 0; } - if (hs->new_cipher->algorithm_auth & SSL_aECDSA) { + // Check key usages for all key types but RSA. This is needed to distinguish + // ECDH certificates, which we do not support, from ECDSA certificates. In + // principle, we should check RSA key usages based on cipher, but this breaks + // buggy antivirus deployments. Other key types are always used for signing. + // + // TODO(davidben): Get more recent data on RSA key usages. + if (EVP_PKEY_id(pkey) != EVP_PKEY_RSA) { CBS leaf_cbs; CBS_init(&leaf_cbs, CRYPTO_BUFFER_data(leaf), CRYPTO_BUFFER_len(leaf)); - /* ECDSA and ECDH certificates use the same public key format. Instead, - * they are distinguished by the key usage extension in the certificate. */ if (!ssl_cert_check_digital_signature_key_usage(&leaf_cbs)) { return 0; } + } + if (EVP_PKEY_id(pkey) == EVP_PKEY_EC) { + // Check the key's group and point format are acceptable. EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey); - if (ec_key == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECC_CERT); - return 0; - } - - /* Check the key's group and point format are acceptable. */ uint16_t group_id; if (!ssl_nid_to_group_id( &group_id, EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key))) || @@ -940,36 +781,84 @@ int ssl_check_leaf_certificate(SSL_HANDSHAKE *hs, EVP_PKEY *pkey, return 1; } -static int do_client_cert_cb(SSL *ssl, void *arg) { - if (ssl_has_certificate(ssl) || ssl->ctx->client_cert_cb == NULL) { +int ssl_on_certificate_selected(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (!ssl_has_certificate(ssl)) { + // Nothing to do. return 1; } - X509 *x509 = NULL; - EVP_PKEY *pkey = NULL; - int ret = ssl->ctx->client_cert_cb(ssl, &x509, &pkey); - if (ret < 0) { - return -1; + if (!ssl->ctx->x509_method->ssl_auto_chain_if_needed(ssl)) { + return 0; } - if (ret != 0) { - if (!SSL_use_certificate(ssl, x509) || - !SSL_use_PrivateKey(ssl, pkey)) { - return 0; - } + CBS leaf; + CRYPTO_BUFFER_init_CBS(sk_CRYPTO_BUFFER_value(ssl->cert->chain, 0), &leaf); + + hs->local_pubkey = ssl_cert_parse_pubkey(&leaf); + return hs->local_pubkey != NULL; +} + +} // namespace bssl + +using namespace bssl; + +int SSL_set_chain_and_key(SSL *ssl, CRYPTO_BUFFER *const *certs, + size_t num_certs, EVP_PKEY *privkey, + const SSL_PRIVATE_KEY_METHOD *privkey_method) { + return cert_set_chain_and_key(ssl->cert, certs, num_certs, privkey, + privkey_method); +} + +int SSL_CTX_set_chain_and_key(SSL_CTX *ctx, CRYPTO_BUFFER *const *certs, + size_t num_certs, EVP_PKEY *privkey, + const SSL_PRIVATE_KEY_METHOD *privkey_method) { + return cert_set_chain_and_key(ctx->cert, certs, num_certs, privkey, + privkey_method); +} + +int SSL_CTX_use_certificate_ASN1(SSL_CTX *ctx, size_t der_len, + const uint8_t *der) { + UniquePtr buffer(CRYPTO_BUFFER_new(der, der_len, NULL)); + if (!buffer) { + return 0; } - X509_free(x509); - EVP_PKEY_free(pkey); - return 1; + return ssl_set_cert(ctx->cert, std::move(buffer)); +} + +int SSL_use_certificate_ASN1(SSL *ssl, const uint8_t *der, size_t der_len) { + UniquePtr buffer(CRYPTO_BUFFER_new(der, der_len, NULL)); + if (!buffer) { + return 0; + } + + return ssl_set_cert(ssl->cert, std::move(buffer)); +} + +void SSL_CTX_set_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, void *arg), + void *arg) { + ssl_cert_set_cert_cb(ctx->cert, cb, arg); } -void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, - X509 **out_x509, - EVP_PKEY **out_pkey)) { - /* Emulate the old client certificate callback with the new one. */ - SSL_CTX_set_cert_cb(ctx, do_client_cert_cb, NULL); - ctx->client_cert_cb = cb; +void SSL_set_cert_cb(SSL *ssl, int (*cb)(SSL *ssl, void *arg), void *arg) { + ssl_cert_set_cert_cb(ssl->cert, cb, arg); +} + +STACK_OF(CRYPTO_BUFFER) *SSL_get0_peer_certificates(const SSL *ssl) { + SSL_SESSION *session = SSL_get_session(ssl); + if (session == NULL) { + return NULL; + } + + return session->certs; +} + +STACK_OF(CRYPTO_BUFFER) *SSL_get0_server_requested_CAs(const SSL *ssl) { + if (ssl->s3->hs == NULL) { + return NULL; + } + return ssl->s3->hs->ca_names.get(); } static int set_signed_cert_timestamp_list(CERT *cert, const uint8_t *list, @@ -1010,3 +899,15 @@ int SSL_set_ocsp_response(SSL *ssl, const uint8_t *response, ssl->cert->ocsp_response = CRYPTO_BUFFER_new(response, response_len, NULL); return ssl->cert->ocsp_response != NULL; } + +void SSL_CTX_set0_client_CAs(SSL_CTX *ctx, STACK_OF(CRYPTO_BUFFER) *name_list) { + ctx->x509_method->ssl_ctx_flush_cached_client_CA(ctx); + sk_CRYPTO_BUFFER_pop_free(ctx->client_CA, CRYPTO_BUFFER_free); + ctx->client_CA = name_list; +} + +void SSL_set0_client_CAs(SSL *ssl, STACK_OF(CRYPTO_BUFFER) *name_list) { + ssl->ctx->x509_method->ssl_flush_cached_client_CA(ssl); + sk_CRYPTO_BUFFER_pop_free(ssl->client_CA, CRYPTO_BUFFER_free); + ssl->client_CA = name_list; +} diff --git a/Sources/BoringSSL/ssl/ssl_cipher.c b/Sources/BoringSSL/ssl/ssl_cipher.cc similarity index 67% rename from Sources/BoringSSL/ssl/ssl_cipher.c rename to Sources/BoringSSL/ssl/ssl_cipher.cc index 4a7459f46..87dc7cd1b 100644 --- a/Sources/BoringSSL/ssl/ssl_cipher.c +++ b/Sources/BoringSSL/ssl/ssl_cipher.cc @@ -154,12 +154,15 @@ #include "../crypto/internal.h" -/* kCiphers is an array of all supported ciphers, sorted by id. */ +namespace bssl { + +// kCiphers is an array of all supported ciphers, sorted by id. static const SSL_CIPHER kCiphers[] = { - /* The RSA ciphers */ - /* Cipher 02 */ + // The RSA ciphers + // Cipher 02 { SSL3_TXT_RSA_NULL_SHA, + "TLS_RSA_WITH_NULL_SHA", SSL3_CK_RSA_NULL_SHA, SSL_kRSA, SSL_aRSA, @@ -168,9 +171,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher 0A */ + // Cipher 0A { SSL3_TXT_RSA_DES_192_CBC3_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", SSL3_CK_RSA_DES_192_CBC3_SHA, SSL_kRSA, SSL_aRSA, @@ -180,11 +184,12 @@ static const SSL_CIPHER kCiphers[] = { }, - /* New AES ciphersuites */ + // New AES ciphersuites - /* Cipher 2F */ + // Cipher 2F { TLS1_TXT_RSA_WITH_AES_128_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA", TLS1_CK_RSA_WITH_AES_128_SHA, SSL_kRSA, SSL_aRSA, @@ -193,20 +198,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher 33 */ - { - TLS1_TXT_DHE_RSA_WITH_AES_128_SHA, - TLS1_CK_DHE_RSA_WITH_AES_128_SHA, - SSL_kDHE, - SSL_aRSA, - SSL_AES128, - SSL_SHA1, - SSL_HANDSHAKE_MAC_DEFAULT, - }, - - /* Cipher 35 */ + // Cipher 35 { TLS1_TXT_RSA_WITH_AES_256_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA", TLS1_CK_RSA_WITH_AES_256_SHA, SSL_kRSA, SSL_aRSA, @@ -215,23 +210,13 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher 39 */ - { - TLS1_TXT_DHE_RSA_WITH_AES_256_SHA, - TLS1_CK_DHE_RSA_WITH_AES_256_SHA, - SSL_kDHE, - SSL_aRSA, - SSL_AES256, - SSL_SHA1, - SSL_HANDSHAKE_MAC_DEFAULT, - }, - - /* TLS v1.2 ciphersuites */ + // TLS v1.2 ciphersuites - /* Cipher 3C */ + // Cipher 3C { TLS1_TXT_RSA_WITH_AES_128_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA256", TLS1_CK_RSA_WITH_AES_128_SHA256, SSL_kRSA, SSL_aRSA, @@ -240,9 +225,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher 3D */ + // Cipher 3D { TLS1_TXT_RSA_WITH_AES_256_SHA256, + "TLS_RSA_WITH_AES_256_CBC_SHA256", TLS1_CK_RSA_WITH_AES_256_SHA256, SSL_kRSA, SSL_aRSA, @@ -251,33 +237,12 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher 67 */ - { - TLS1_TXT_DHE_RSA_WITH_AES_128_SHA256, - TLS1_CK_DHE_RSA_WITH_AES_128_SHA256, - SSL_kDHE, - SSL_aRSA, - SSL_AES128, - SSL_SHA256, - SSL_HANDSHAKE_MAC_SHA256, - }, + // PSK cipher suites. - /* Cipher 6B */ - { - TLS1_TXT_DHE_RSA_WITH_AES_256_SHA256, - TLS1_CK_DHE_RSA_WITH_AES_256_SHA256, - SSL_kDHE, - SSL_aRSA, - SSL_AES256, - SSL_SHA256, - SSL_HANDSHAKE_MAC_SHA256, - }, - - /* PSK cipher suites. */ - - /* Cipher 8C */ + // Cipher 8C { TLS1_TXT_PSK_WITH_AES_128_CBC_SHA, + "TLS_PSK_WITH_AES_128_CBC_SHA", TLS1_CK_PSK_WITH_AES_128_CBC_SHA, SSL_kPSK, SSL_aPSK, @@ -286,9 +251,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher 8D */ + // Cipher 8D { TLS1_TXT_PSK_WITH_AES_256_CBC_SHA, + "TLS_PSK_WITH_AES_256_CBC_SHA", TLS1_CK_PSK_WITH_AES_256_CBC_SHA, SSL_kPSK, SSL_aPSK, @@ -297,11 +263,12 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* GCM ciphersuites from RFC5288 */ + // GCM ciphersuites from RFC5288 - /* Cipher 9C */ + // Cipher 9C { TLS1_TXT_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256", TLS1_CK_RSA_WITH_AES_128_GCM_SHA256, SSL_kRSA, SSL_aRSA, @@ -310,9 +277,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher 9D */ + // Cipher 9D { TLS1_TXT_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_256_GCM_SHA384", TLS1_CK_RSA_WITH_AES_256_GCM_SHA384, SSL_kRSA, SSL_aRSA, @@ -321,33 +289,12 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA384, }, - /* Cipher 9E */ - { - TLS1_TXT_DHE_RSA_WITH_AES_128_GCM_SHA256, - TLS1_CK_DHE_RSA_WITH_AES_128_GCM_SHA256, - SSL_kDHE, - SSL_aRSA, - SSL_AES128GCM, - SSL_AEAD, - SSL_HANDSHAKE_MAC_SHA256, - }, + // TLS 1.3 suites. - /* Cipher 9F */ - { - TLS1_TXT_DHE_RSA_WITH_AES_256_GCM_SHA384, - TLS1_CK_DHE_RSA_WITH_AES_256_GCM_SHA384, - SSL_kDHE, - SSL_aRSA, - SSL_AES256GCM, - SSL_AEAD, - SSL_HANDSHAKE_MAC_SHA384, - }, - - /* TLS 1.3 suites. */ - - /* Cipher 1301 */ + // Cipher 1301 { TLS1_TXT_AES_128_GCM_SHA256, + "TLS_AES_128_GCM_SHA256", TLS1_CK_AES_128_GCM_SHA256, SSL_kGENERIC, SSL_aGENERIC, @@ -356,9 +303,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher 1302 */ + // Cipher 1302 { TLS1_TXT_AES_256_GCM_SHA384, + "TLS_AES_256_GCM_SHA384", TLS1_CK_AES_256_GCM_SHA384, SSL_kGENERIC, SSL_aGENERIC, @@ -367,9 +315,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA384, }, - /* Cipher 1303 */ + // Cipher 1303 { TLS1_TXT_CHACHA20_POLY1305_SHA256, + "TLS_CHACHA20_POLY1305_SHA256", TLS1_CK_CHACHA20_POLY1305_SHA256, SSL_kGENERIC, SSL_aGENERIC, @@ -378,9 +327,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher C009 */ + // Cipher C009 { TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aECDSA, @@ -389,9 +339,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher C00A */ + // Cipher C00A { TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aECDSA, @@ -400,9 +351,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher C013 */ + // Cipher C013 { TLS1_TXT_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_RSA_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aRSA, @@ -411,9 +363,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher C014 */ + // Cipher C014 { TLS1_TXT_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_RSA_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aRSA, @@ -423,11 +376,12 @@ static const SSL_CIPHER kCiphers[] = { }, - /* HMAC based TLS v1.2 ciphersuites from RFC5289 */ + // HMAC based TLS v1.2 ciphersuites from RFC5289 - /* Cipher C023 */ + // Cipher C023 { TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", TLS1_CK_ECDHE_ECDSA_WITH_AES_128_SHA256, SSL_kECDHE, SSL_aECDSA, @@ -436,9 +390,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher C024 */ + // Cipher C024 { TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", TLS1_CK_ECDHE_ECDSA_WITH_AES_256_SHA384, SSL_kECDHE, SSL_aECDSA, @@ -447,9 +402,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA384, }, - /* Cipher C027 */ + // Cipher C027 { TLS1_TXT_ECDHE_RSA_WITH_AES_128_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", TLS1_CK_ECDHE_RSA_WITH_AES_128_SHA256, SSL_kECDHE, SSL_aRSA, @@ -458,9 +414,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher C028 */ + // Cipher C028 { TLS1_TXT_ECDHE_RSA_WITH_AES_256_SHA384, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", TLS1_CK_ECDHE_RSA_WITH_AES_256_SHA384, SSL_kECDHE, SSL_aRSA, @@ -470,11 +427,12 @@ static const SSL_CIPHER kCiphers[] = { }, - /* GCM based TLS v1.2 ciphersuites from RFC5289 */ + // GCM based TLS v1.2 ciphersuites from RFC5289 - /* Cipher C02B */ + // Cipher C02B { TLS1_TXT_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", TLS1_CK_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, SSL_kECDHE, SSL_aECDSA, @@ -483,9 +441,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher C02C */ + // Cipher C02C { TLS1_TXT_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", TLS1_CK_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, SSL_kECDHE, SSL_aECDSA, @@ -494,9 +453,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA384, }, - /* Cipher C02F */ + // Cipher C02F { TLS1_TXT_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", TLS1_CK_ECDHE_RSA_WITH_AES_128_GCM_SHA256, SSL_kECDHE, SSL_aRSA, @@ -505,9 +465,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher C030 */ + // Cipher C030 { TLS1_TXT_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", TLS1_CK_ECDHE_RSA_WITH_AES_256_GCM_SHA384, SSL_kECDHE, SSL_aRSA, @@ -516,11 +477,12 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA384, }, - /* ECDHE-PSK cipher suites. */ + // ECDHE-PSK cipher suites. - /* Cipher C035 */ + // Cipher C035 { TLS1_TXT_ECDHE_PSK_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA", TLS1_CK_ECDHE_PSK_WITH_AES_128_CBC_SHA, SSL_kECDHE, SSL_aPSK, @@ -529,9 +491,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* Cipher C036 */ + // Cipher C036 { TLS1_TXT_ECDHE_PSK_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA", TLS1_CK_ECDHE_PSK_WITH_AES_256_CBC_SHA, SSL_kECDHE, SSL_aPSK, @@ -540,11 +503,12 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_DEFAULT, }, - /* ChaCha20-Poly1305 cipher suites. */ + // ChaCha20-Poly1305 cipher suites. - /* Cipher CCA8 */ + // Cipher CCA8 { TLS1_TXT_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aRSA, @@ -553,9 +517,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher CCA9 */ + // Cipher CCA9 { TLS1_TXT_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aECDSA, @@ -564,9 +529,10 @@ static const SSL_CIPHER kCiphers[] = { SSL_HANDSHAKE_MAC_SHA256, }, - /* Cipher CCAB */ + // Cipher CCAB { TLS1_TXT_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256", TLS1_CK_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256, SSL_kECDHE, SSL_aPSK, @@ -587,66 +553,59 @@ static const size_t kCiphersLen = OPENSSL_ARRAY_SIZE(kCiphers); typedef struct cipher_order_st { const SSL_CIPHER *cipher; - int active; - int in_group; + bool active; + bool in_group; struct cipher_order_st *next, *prev; } CIPHER_ORDER; typedef struct cipher_alias_st { - /* name is the name of the cipher alias. */ + // name is the name of the cipher alias. const char *name; - /* The following fields are bitmasks for the corresponding fields on - * |SSL_CIPHER|. A cipher matches a cipher alias iff, for each bitmask, the - * bit corresponding to the cipher's value is set to 1. If any bitmask is - * all zeroes, the alias matches nothing. Use |~0u| for the default value. */ + // The following fields are bitmasks for the corresponding fields on + // |SSL_CIPHER|. A cipher matches a cipher alias iff, for each bitmask, the + // bit corresponding to the cipher's value is set to 1. If any bitmask is + // all zeroes, the alias matches nothing. Use |~0u| for the default value. uint32_t algorithm_mkey; uint32_t algorithm_auth; uint32_t algorithm_enc; uint32_t algorithm_mac; - /* min_version, if non-zero, matches all ciphers which were added in that - * particular protocol version. */ + // min_version, if non-zero, matches all ciphers which were added in that + // particular protocol version. uint16_t min_version; } CIPHER_ALIAS; static const CIPHER_ALIAS kCipherAliases[] = { - /* "ALL" doesn't include eNULL. It must be explicitly enabled. */ - {"ALL", ~0u, ~0u, ~SSL_eNULL, ~0u, 0}, + // "ALL" doesn't include eNULL. It must be explicitly enabled. + {"ALL", ~0u, ~0u, ~0u, ~0u, 0}, - /* The "COMPLEMENTOFDEFAULT" rule is omitted. It matches nothing. */ + // The "COMPLEMENTOFDEFAULT" rule is omitted. It matches nothing. - /* key exchange aliases - * (some of those using only a single bit here combine - * multiple key exchange algs according to the RFCs, - * e.g. kEDH combines DHE_DSS and DHE_RSA) */ + // key exchange aliases + // (some of those using only a single bit here combine + // multiple key exchange algs according to the RFCs. {"kRSA", SSL_kRSA, ~0u, ~0u, ~0u, 0}, - {"kDHE", SSL_kDHE, ~0u, ~0u, ~0u, 0}, - {"kEDH", SSL_kDHE, ~0u, ~0u, ~0u, 0}, - {"DH", SSL_kDHE, ~0u, ~0u, ~0u, 0}, - {"kECDHE", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"kEECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"ECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"kPSK", SSL_kPSK, ~0u, ~0u, ~0u, 0}, - /* server authentication aliases */ - {"aRSA", ~0u, SSL_aRSA, ~SSL_eNULL, ~0u, 0}, + // server authentication aliases + {"aRSA", ~0u, SSL_aRSA, ~0u, ~0u, 0}, {"aECDSA", ~0u, SSL_aECDSA, ~0u, ~0u, 0}, {"ECDSA", ~0u, SSL_aECDSA, ~0u, ~0u, 0}, {"aPSK", ~0u, SSL_aPSK, ~0u, ~0u, 0}, - /* aliases combining key exchange and server authentication */ - {"DHE", SSL_kDHE, ~0u, ~0u, ~0u, 0}, - {"EDH", SSL_kDHE, ~0u, ~0u, ~0u, 0}, + // aliases combining key exchange and server authentication {"ECDHE", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, {"EECDH", SSL_kECDHE, ~0u, ~0u, ~0u, 0}, - {"RSA", SSL_kRSA, SSL_aRSA, ~SSL_eNULL, ~0u, 0}, + {"RSA", SSL_kRSA, SSL_aRSA, ~0u, ~0u, 0}, {"PSK", SSL_kPSK, SSL_aPSK, ~0u, ~0u, 0}, - /* symmetric encryption aliases */ + // symmetric encryption aliases {"3DES", ~0u, ~0u, SSL_3DES, ~0u, 0}, {"AES128", ~0u, ~0u, SSL_AES128 | SSL_AES128GCM, ~0u, 0}, {"AES256", ~0u, ~0u, SSL_AES256 | SSL_AES256GCM, ~0u, 0}, @@ -654,28 +613,28 @@ static const CIPHER_ALIAS kCipherAliases[] = { {"AESGCM", ~0u, ~0u, SSL_AES128GCM | SSL_AES256GCM, ~0u, 0}, {"CHACHA20", ~0u, ~0u, SSL_CHACHA20POLY1305, ~0u, 0}, - /* MAC aliases */ - {"SHA1", ~0u, ~0u, ~SSL_eNULL, SSL_SHA1, 0}, - {"SHA", ~0u, ~0u, ~SSL_eNULL, SSL_SHA1, 0}, + // MAC aliases + {"SHA1", ~0u, ~0u, ~0u, SSL_SHA1, 0}, + {"SHA", ~0u, ~0u, ~0u, SSL_SHA1, 0}, {"SHA256", ~0u, ~0u, ~0u, SSL_SHA256, 0}, {"SHA384", ~0u, ~0u, ~0u, SSL_SHA384, 0}, - /* Legacy protocol minimum version aliases. "TLSv1" is intentionally the - * same as "SSLv3". */ - {"SSLv3", ~0u, ~0u, ~SSL_eNULL, ~0u, SSL3_VERSION}, - {"TLSv1", ~0u, ~0u, ~SSL_eNULL, ~0u, SSL3_VERSION}, - {"TLSv1.2", ~0u, ~0u, ~SSL_eNULL, ~0u, TLS1_2_VERSION}, + // Legacy protocol minimum version aliases. "TLSv1" is intentionally the + // same as "SSLv3". + {"SSLv3", ~0u, ~0u, ~0u, ~0u, SSL3_VERSION}, + {"TLSv1", ~0u, ~0u, ~0u, ~0u, SSL3_VERSION}, + {"TLSv1.2", ~0u, ~0u, ~0u, ~0u, TLS1_2_VERSION}, - /* Legacy strength classes. */ - {"HIGH", ~0u, ~0u, ~SSL_eNULL, ~0u, 0}, - {"FIPS", ~0u, ~0u, ~SSL_eNULL, ~0u, 0}, + // Legacy strength classes. + {"HIGH", ~0u, ~0u, ~0u, ~0u, 0}, + {"FIPS", ~0u, ~0u, ~0u, ~0u, 0}, }; static const size_t kCipherAliasesLen = OPENSSL_ARRAY_SIZE(kCipherAliases); static int ssl_cipher_id_cmp(const void *in_a, const void *in_b) { - const SSL_CIPHER *a = in_a; - const SSL_CIPHER *b = in_b; + const SSL_CIPHER *a = reinterpret_cast(in_a); + const SSL_CIPHER *b = reinterpret_cast(in_b); if (a->id > b->id) { return 1; @@ -686,38 +645,34 @@ static int ssl_cipher_id_cmp(const void *in_a, const void *in_b) { } } -const SSL_CIPHER *SSL_get_cipher_by_value(uint16_t value) { - SSL_CIPHER c; - - c.id = 0x03000000L | value; - return bsearch(&c, kCiphers, kCiphersLen, sizeof(SSL_CIPHER), - ssl_cipher_id_cmp); -} - -int ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, - size_t *out_mac_secret_len, - size_t *out_fixed_iv_len, - const SSL_CIPHER *cipher, uint16_t version) { +bool ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, + size_t *out_mac_secret_len, + size_t *out_fixed_iv_len, const SSL_CIPHER *cipher, + uint16_t version, int is_dtls) { *out_aead = NULL; *out_mac_secret_len = 0; *out_fixed_iv_len = 0; + const int is_tls12 = version == TLS1_2_VERSION && !is_dtls; + if (cipher->algorithm_mac == SSL_AEAD) { if (cipher->algorithm_enc == SSL_AES128GCM) { - *out_aead = EVP_aead_aes_128_gcm(); + *out_aead = + is_tls12 ? EVP_aead_aes_128_gcm_tls12() : EVP_aead_aes_128_gcm(); *out_fixed_iv_len = 4; } else if (cipher->algorithm_enc == SSL_AES256GCM) { - *out_aead = EVP_aead_aes_256_gcm(); + *out_aead = + is_tls12 ? EVP_aead_aes_256_gcm_tls12() : EVP_aead_aes_256_gcm(); *out_fixed_iv_len = 4; } else if (cipher->algorithm_enc == SSL_CHACHA20POLY1305) { *out_aead = EVP_aead_chacha20_poly1305(); *out_fixed_iv_len = 12; } else { - return 0; + return false; } - /* In TLS 1.3, the iv_len is equal to the AEAD nonce length whereas the code - * above computes the TLS 1.2 construction. */ + // In TLS 1.3, the iv_len is equal to the AEAD nonce length whereas the code + // above computes the TLS 1.2 construction. if (version >= TLS1_3_VERSION) { *out_fixed_iv_len = EVP_AEAD_nonce_length(*out_aead); } @@ -759,7 +714,7 @@ int ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, *out_aead = EVP_aead_aes_256_cbc_sha1_tls(); } } else { - return 0; + return false; } *out_mac_secret_len = SHA_DIGEST_LENGTH; @@ -769,27 +724,27 @@ int ssl_cipher_get_evp_aead(const EVP_AEAD **out_aead, } else if (cipher->algorithm_enc == SSL_AES256) { *out_aead = EVP_aead_aes_256_cbc_sha256_tls(); } else { - return 0; + return false; } *out_mac_secret_len = SHA256_DIGEST_LENGTH; } else if (cipher->algorithm_mac == SSL_SHA384) { if (cipher->algorithm_enc != SSL_AES256) { - return 0; + return false; } *out_aead = EVP_aead_aes_256_cbc_sha384_tls(); *out_mac_secret_len = SHA384_DIGEST_LENGTH; } else { - return 0; + return false; } - return 1; + return true; } -const EVP_MD *ssl_get_handshake_digest(uint32_t algorithm_prf, - uint16_t version) { - switch (algorithm_prf) { +const EVP_MD *ssl_get_handshake_digest(uint16_t version, + const SSL_CIPHER *cipher) { + switch (cipher->algorithm_prf) { case SSL_HANDSHAKE_MAC_DEFAULT: return version >= TLS1_2_VERSION ? EVP_sha256() : EVP_md5_sha1(); case SSL_HANDSHAKE_MAC_SHA256: @@ -797,17 +752,22 @@ const EVP_MD *ssl_get_handshake_digest(uint32_t algorithm_prf, case SSL_HANDSHAKE_MAC_SHA384: return EVP_sha384(); default: + assert(0); return NULL; } } -#define ITEM_SEP(a) \ - (((a) == ':') || ((a) == ' ') || ((a) == ';') || ((a) == ',')) +static bool is_cipher_list_separator(char c, int is_strict) { + if (c == ':') { + return true; + } + return !is_strict && (c == ' ' || c == ';' || c == ','); +} -/* rule_equals returns one iff the NUL-terminated string |rule| is equal to the - * |buf_len| bytes at |buf|. */ -static int rule_equals(const char *rule, const char *buf, size_t buf_len) { - /* |strncmp| alone only checks that |buf| is a prefix of |rule|. */ +// rule_equals returns whether the NUL-terminated string |rule| is equal to the +// |buf_len| bytes at |buf|. +static bool rule_equals(const char *rule, const char *buf, size_t buf_len) { + // |strncmp| alone only checks that |buf| is a prefix of |rule|. return strncmp(rule, buf, buf_len) == 0 && rule[buf_len] == '\0'; } @@ -851,28 +811,23 @@ static void ll_append_head(CIPHER_ORDER **head, CIPHER_ORDER *curr, *head = curr; } -static void ssl_cipher_collect_ciphers(const SSL_PROTOCOL_METHOD *ssl_method, - CIPHER_ORDER *co_list, +static void ssl_cipher_collect_ciphers(CIPHER_ORDER *co_list, CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p) { - /* The set of ciphers is static, but some subset may be unsupported by - * |ssl_method|, so the list may be smaller. */ size_t co_list_num = 0; - for (size_t i = 0; i < kCiphersLen; i++) { - const SSL_CIPHER *cipher = &kCiphers[i]; - if (ssl_method->supports_cipher(cipher) && - /* TLS 1.3 ciphers do not participate in this mechanism. */ - cipher->algorithm_mkey != SSL_kGENERIC) { - co_list[co_list_num].cipher = cipher; + for (const SSL_CIPHER &cipher : kCiphers) { + // TLS 1.3 ciphers do not participate in this mechanism. + if (cipher.algorithm_mkey != SSL_kGENERIC) { + co_list[co_list_num].cipher = &cipher; co_list[co_list_num].next = NULL; co_list[co_list_num].prev = NULL; - co_list[co_list_num].active = 0; - co_list[co_list_num].in_group = 0; + co_list[co_list_num].active = false; + co_list[co_list_num].in_group = false; co_list_num++; } } - /* Prepare linked list from list entries. */ + // Prepare linked list from list entries. if (co_list_num > 0) { co_list[0].prev = NULL; @@ -894,33 +849,33 @@ static void ssl_cipher_collect_ciphers(const SSL_PROTOCOL_METHOD *ssl_method, } } -/* ssl_cipher_apply_rule applies the rule type |rule| to ciphers matching its - * parameters in the linked list from |*head_p| to |*tail_p|. It writes the new - * head and tail of the list to |*head_p| and |*tail_p|, respectively. - * - * - If |cipher_id| is non-zero, only that cipher is selected. - * - Otherwise, if |strength_bits| is non-negative, it selects ciphers - * of that strength. - * - Otherwise, it selects ciphers that match each bitmasks in |alg_*| and - * |min_version|. */ +// ssl_cipher_apply_rule applies the rule type |rule| to ciphers matching its +// parameters in the linked list from |*head_p| to |*tail_p|. It writes the new +// head and tail of the list to |*head_p| and |*tail_p|, respectively. +// +// - If |cipher_id| is non-zero, only that cipher is selected. +// - Otherwise, if |strength_bits| is non-negative, it selects ciphers +// of that strength. +// - Otherwise, it selects ciphers that match each bitmasks in |alg_*| and +// |min_version|. static void ssl_cipher_apply_rule( uint32_t cipher_id, uint32_t alg_mkey, uint32_t alg_auth, uint32_t alg_enc, uint32_t alg_mac, uint16_t min_version, int rule, - int strength_bits, int in_group, CIPHER_ORDER **head_p, + int strength_bits, bool in_group, CIPHER_ORDER **head_p, CIPHER_ORDER **tail_p) { CIPHER_ORDER *head, *tail, *curr, *next, *last; const SSL_CIPHER *cp; - int reverse = 0; + bool reverse = false; if (cipher_id == 0 && strength_bits == -1 && min_version == 0 && (alg_mkey == 0 || alg_auth == 0 || alg_enc == 0 || alg_mac == 0)) { - /* The rule matches nothing, so bail early. */ + // The rule matches nothing, so bail early. return; } if (rule == CIPHER_DEL) { - /* needed to maintain sorting between currently deleted ciphers */ - reverse = 1; + // needed to maintain sorting between currently deleted ciphers + reverse = true; } head = *head_p; @@ -948,8 +903,8 @@ static void ssl_cipher_apply_rule( next = reverse ? curr->prev : curr->next; cp = curr->cipher; - /* Selection criteria is either a specific cipher, the value of - * |strength_bits|, or the algorithms used. */ + // Selection criteria is either a specific cipher, the value of + // |strength_bits|, or the algorithms used. if (cipher_id != 0) { if (cipher_id != cp->id) { continue; @@ -963,40 +918,42 @@ static void ssl_cipher_apply_rule( !(alg_auth & cp->algorithm_auth) || !(alg_enc & cp->algorithm_enc) || !(alg_mac & cp->algorithm_mac) || - (min_version != 0 && SSL_CIPHER_get_min_version(cp) != min_version)) { + (min_version != 0 && SSL_CIPHER_get_min_version(cp) != min_version) || + // The NULL cipher must be selected explicitly. + cp->algorithm_enc == SSL_eNULL) { continue; } } - /* add the cipher if it has not been added yet. */ + // add the cipher if it has not been added yet. if (rule == CIPHER_ADD) { - /* reverse == 0 */ + // reverse == false if (!curr->active) { ll_append_tail(&head, curr, &tail); - curr->active = 1; + curr->active = true; curr->in_group = in_group; } } - /* Move the added cipher to this location */ + // Move the added cipher to this location else if (rule == CIPHER_ORD) { - /* reverse == 0 */ + // reverse == false if (curr->active) { ll_append_tail(&head, curr, &tail); - curr->in_group = 0; + curr->in_group = false; } } else if (rule == CIPHER_DEL) { - /* reverse == 1 */ + // reverse == true if (curr->active) { - /* most recently deleted ciphersuites get best positions - * for any future CIPHER_ADD (note that the CIPHER_DEL loop - * works in reverse to maintain the order) */ + // most recently deleted ciphersuites get best positions + // for any future CIPHER_ADD (note that the CIPHER_DEL loop + // works in reverse to maintain the order) ll_append_head(&head, curr, &tail); - curr->active = 0; - curr->in_group = 0; + curr->active = false; + curr->in_group = false; } } else if (rule == CIPHER_KILL) { - /* reverse == 0 */ + // reverse == false if (head == curr) { head = curr->next; } else { @@ -1006,7 +963,7 @@ static void ssl_cipher_apply_rule( if (tail == curr) { tail = curr->prev; } - curr->active = 0; + curr->active = false; if (curr->next != NULL) { curr->next->prev = curr->prev; } @@ -1022,16 +979,13 @@ static void ssl_cipher_apply_rule( *tail_p = tail; } -static int ssl_cipher_strength_sort(CIPHER_ORDER **head_p, - CIPHER_ORDER **tail_p) { - int max_strength_bits, i, *number_uses; - CIPHER_ORDER *curr; - - /* This routine sorts the ciphers with descending strength. The sorting must - * keep the pre-sorted sequence, so we apply the normal sorting routine as - * '+' movement to the end of the list. */ - max_strength_bits = 0; - curr = *head_p; +static bool ssl_cipher_strength_sort(CIPHER_ORDER **head_p, + CIPHER_ORDER **tail_p) { + // This routine sorts the ciphers with descending strength. The sorting must + // keep the pre-sorted sequence, so we apply the normal sorting routine as + // '+' movement to the end of the list. + int max_strength_bits = 0; + CIPHER_ORDER *curr = *head_p; while (curr != NULL) { if (curr->active && SSL_CIPHER_get_bits(curr->cipher, NULL) > max_strength_bits) { @@ -1040,14 +994,13 @@ static int ssl_cipher_strength_sort(CIPHER_ORDER **head_p, curr = curr->next; } - number_uses = OPENSSL_malloc((max_strength_bits + 1) * sizeof(int)); - if (!number_uses) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; + Array number_uses; + if (!number_uses.Init(max_strength_bits + 1)) { + return false; } - OPENSSL_memset(number_uses, 0, (max_strength_bits + 1) * sizeof(int)); + OPENSSL_memset(number_uses.data(), 0, (max_strength_bits + 1) * sizeof(int)); - /* Now find the strength_bits values actually used. */ + // Now find the strength_bits values actually used. curr = *head_p; while (curr != NULL) { if (curr->active) { @@ -1056,25 +1009,25 @@ static int ssl_cipher_strength_sort(CIPHER_ORDER **head_p, curr = curr->next; } - /* Go through the list of used strength_bits values in descending order. */ - for (i = max_strength_bits; i >= 0; i--) { + // Go through the list of used strength_bits values in descending order. + for (int i = max_strength_bits; i >= 0; i--) { if (number_uses[i] > 0) { - ssl_cipher_apply_rule(0, 0, 0, 0, 0, 0, CIPHER_ORD, i, 0, head_p, tail_p); + ssl_cipher_apply_rule(0, 0, 0, 0, 0, 0, CIPHER_ORD, i, false, head_p, + tail_p); } } - OPENSSL_free(number_uses); - return 1; + return true; } -static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, - const char *rule_str, - CIPHER_ORDER **head_p, - CIPHER_ORDER **tail_p, int strict) { +static bool ssl_cipher_process_rulestr(const char *rule_str, + CIPHER_ORDER **head_p, + CIPHER_ORDER **tail_p, bool strict) { uint32_t alg_mkey, alg_auth, alg_enc, alg_mac; uint16_t min_version; const char *l, *buf; - int multi, skip_rule, rule, ok, in_group = 0, has_group = 0; + int rule; + bool multi, skip_rule, in_group = false, has_group = false; size_t j, buf_len; uint32_t cipher_id; char ch; @@ -1084,15 +1037,15 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, ch = *l; if (ch == '\0') { - break; /* done */ + break; // done } if (in_group) { if (ch == ']') { if (*tail_p) { - (*tail_p)->in_group = 0; + (*tail_p)->in_group = false; } - in_group = 0; + in_group = false; l++; continue; } @@ -1104,7 +1057,7 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, } else if (!(ch >= 'a' && ch <= 'z') && !(ch >= 'A' && ch <= 'Z') && !(ch >= '0' && ch <= '9')) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_OPERATOR_IN_GROUP); - return 0; + return false; } else { rule = CIPHER_ADD; } @@ -1121,73 +1074,71 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, rule = CIPHER_SPECIAL; l++; } else if (ch == '[') { - if (in_group) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NESTED_GROUP); - return 0; - } - in_group = 1; - has_group = 1; + assert(!in_group); + in_group = true; + has_group = true; l++; continue; } else { rule = CIPHER_ADD; } - /* If preference groups are enabled, the only legal operator is +. - * Otherwise the in_group bits will get mixed up. */ + // If preference groups are enabled, the only legal operator is +. + // Otherwise the in_group bits will get mixed up. if (has_group && rule != CIPHER_ADD) { OPENSSL_PUT_ERROR(SSL, SSL_R_MIXED_SPECIAL_OPERATOR_WITH_GROUPS); - return 0; + return false; } - if (ITEM_SEP(ch)) { + if (is_cipher_list_separator(ch, strict)) { l++; continue; } - multi = 0; + multi = false; cipher_id = 0; alg_mkey = ~0u; alg_auth = ~0u; alg_enc = ~0u; alg_mac = ~0u; min_version = 0; - skip_rule = 0; + skip_rule = false; for (;;) { ch = *l; buf = l; buf_len = 0; - while (((ch >= 'A') && (ch <= 'Z')) || ((ch >= '0') && (ch <= '9')) || - ((ch >= 'a') && (ch <= 'z')) || (ch == '-') || (ch == '.')) { + while ((ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || + (ch >= 'a' && ch <= 'z') || ch == '-' || ch == '.' || ch == '_') { ch = *(++l); buf_len++; } if (buf_len == 0) { - /* We hit something we cannot deal with, it is no command or separator - * nor alphanumeric, so we call this an error. */ + // We hit something we cannot deal with, it is no command or separator + // nor alphanumeric, so we call this an error. OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); - return 0; + return false; } if (rule == CIPHER_SPECIAL) { break; } - /* Look for a matching exact cipher. These aren't allowed in multipart - * rules. */ + // Look for a matching exact cipher. These aren't allowed in multipart + // rules. if (!multi && ch != '+') { for (j = 0; j < kCiphersLen; j++) { const SSL_CIPHER *cipher = &kCiphers[j]; - if (rule_equals(cipher->name, buf, buf_len)) { + if (rule_equals(cipher->name, buf, buf_len) || + rule_equals(cipher->standard_name, buf, buf_len)) { cipher_id = cipher->id; break; } } } if (cipher_id == 0) { - /* If not an exact cipher, look for a matching cipher alias. */ + // If not an exact cipher, look for a matching cipher alias. for (j = 0; j < kCipherAliasesLen; j++) { if (rule_equals(kCipherAliases[j].name, buf, buf_len)) { alg_mkey &= kCipherAliases[j].algorithm_mkey; @@ -1197,7 +1148,7 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, if (min_version != 0 && min_version != kCipherAliases[j].min_version) { - skip_rule = 1; + skip_rule = true; } else { min_version = kCipherAliases[j].min_version; } @@ -1205,39 +1156,35 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, } } if (j == kCipherAliasesLen) { - skip_rule = 1; + skip_rule = true; if (strict) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); - return 0; + return false; } } } - /* Check for a multipart rule. */ + // Check for a multipart rule. if (ch != '+') { break; } l++; - multi = 1; + multi = true; } - /* Ok, we have the rule, now apply it. */ + // Ok, we have the rule, now apply it. if (rule == CIPHER_SPECIAL) { - /* special command */ - ok = 0; - if (buf_len == 8 && !strncmp(buf, "STRENGTH", 8)) { - ok = ssl_cipher_strength_sort(head_p, tail_p); - } else { + if (buf_len != 8 || strncmp(buf, "STRENGTH", 8) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); + return false; } - - if (ok == 0) { - return 0; + if (!ssl_cipher_strength_sort(head_p, tail_p)) { + return false; } - /* We do not support any "multi" options together with "@", so throw away - * the rest of the command, if any left, until end or ':' is found. */ - while (*l != '\0' && !ITEM_SEP(*l)) { + // We do not support any "multi" options together with "@", so throw away + // the rest of the command, if any left, until end or ':' is found. + while (*l != '\0' && !is_cipher_list_separator(*l, strict)) { l++; } } else if (!skip_rule) { @@ -1248,97 +1195,96 @@ static int ssl_cipher_process_rulestr(const SSL_PROTOCOL_METHOD *ssl_method, if (in_group) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_COMMAND); - return 0; + return false; } - return 1; + return true; } -STACK_OF(SSL_CIPHER) * -ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, - struct ssl_cipher_preference_list_st **out_cipher_list, - const char *rule_str, int strict) { +bool ssl_create_cipher_list( + struct ssl_cipher_preference_list_st **out_cipher_list, + const char *rule_str, bool strict) { STACK_OF(SSL_CIPHER) *cipherstack = NULL; CIPHER_ORDER *co_list = NULL, *head = NULL, *tail = NULL, *curr; uint8_t *in_group_flags = NULL; unsigned int num_in_group_flags = 0; struct ssl_cipher_preference_list_st *pref_list = NULL; - /* Return with error if nothing to do. */ + // Return with error if nothing to do. if (rule_str == NULL || out_cipher_list == NULL) { - return NULL; + return false; } - /* Now we have to collect the available ciphers from the compiled in ciphers. - * We cannot get more than the number compiled in, so it is used for - * allocation. */ - co_list = OPENSSL_malloc(sizeof(CIPHER_ORDER) * kCiphersLen); + // Now we have to collect the available ciphers from the compiled in ciphers. + // We cannot get more than the number compiled in, so it is used for + // allocation. + co_list = (CIPHER_ORDER *)OPENSSL_malloc(sizeof(CIPHER_ORDER) * kCiphersLen); if (co_list == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return NULL; + return false; } - ssl_cipher_collect_ciphers(ssl_method, co_list, &head, &tail); + ssl_cipher_collect_ciphers(co_list, &head, &tail); - /* Now arrange all ciphers by preference: - * TODO(davidben): Compute this order once and copy it. */ + // Now arrange all ciphers by preference: + // TODO(davidben): Compute this order once and copy it. - /* Everything else being equal, prefer ECDHE_ECDSA and ECDHE_RSA over other - * key exchange mechanisms */ + // Everything else being equal, prefer ECDHE_ECDSA and ECDHE_RSA over other + // key exchange mechanisms ssl_cipher_apply_rule(0, SSL_kECDHE, SSL_aECDSA, ~0u, ~0u, 0, CIPHER_ADD, -1, - 0, &head, &tail); - ssl_cipher_apply_rule(0, SSL_kECDHE, ~0u, ~0u, ~0u, 0, CIPHER_ADD, -1, 0, + false, &head, &tail); + ssl_cipher_apply_rule(0, SSL_kECDHE, ~0u, ~0u, ~0u, 0, CIPHER_ADD, -1, false, &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_DEL, -1, 0, &head, + ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_DEL, -1, false, &head, &tail); - /* Order the bulk ciphers. First the preferred AEAD ciphers. We prefer - * CHACHA20 unless there is hardware support for fast and constant-time - * AES_GCM. Of the two CHACHA20 variants, the new one is preferred over the - * old one. */ + // Order the bulk ciphers. First the preferred AEAD ciphers. We prefer + // CHACHA20 unless there is hardware support for fast and constant-time + // AES_GCM. Of the two CHACHA20 variants, the new one is preferred over the + // old one. if (EVP_has_aes_hardware()) { - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128GCM, ~0u, 0, CIPHER_ADD, -1, 0, - &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256GCM, ~0u, 0, CIPHER_ADD, -1, 0, - &head, &tail); + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128GCM, ~0u, 0, CIPHER_ADD, -1, + false, &head, &tail); + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256GCM, ~0u, 0, CIPHER_ADD, -1, + false, &head, &tail); ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_CHACHA20POLY1305, ~0u, 0, CIPHER_ADD, - -1, 0, &head, &tail); + -1, false, &head, &tail); } else { ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_CHACHA20POLY1305, ~0u, 0, CIPHER_ADD, - -1, 0, &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128GCM, ~0u, 0, CIPHER_ADD, -1, 0, - &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256GCM, ~0u, 0, CIPHER_ADD, -1, 0, - &head, &tail); + -1, false, &head, &tail); + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128GCM, ~0u, 0, CIPHER_ADD, -1, + false, &head, &tail); + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256GCM, ~0u, 0, CIPHER_ADD, -1, + false, &head, &tail); } - /* Then the legacy non-AEAD ciphers: AES_128_CBC, AES_256_CBC, - * 3DES_EDE_CBC_SHA. */ - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128, ~0u, 0, CIPHER_ADD, -1, 0, + // Then the legacy non-AEAD ciphers: AES_128_CBC, AES_256_CBC, + // 3DES_EDE_CBC_SHA. + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES128, ~0u, 0, CIPHER_ADD, -1, false, &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256, ~0u, 0, CIPHER_ADD, -1, 0, + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_AES256, ~0u, 0, CIPHER_ADD, -1, false, + &head, &tail); + ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_3DES, ~0u, 0, CIPHER_ADD, -1, false, &head, &tail); - ssl_cipher_apply_rule(0, ~0u, ~0u, SSL_3DES, ~0u, 0, CIPHER_ADD, -1, 0, &head, - &tail); - /* Temporarily enable everything else for sorting */ - ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_ADD, -1, 0, &head, + // Temporarily enable everything else for sorting + ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_ADD, -1, false, &head, &tail); - /* Move ciphers without forward secrecy to the end. */ - ssl_cipher_apply_rule(0, (SSL_kRSA | SSL_kPSK), ~0u, ~0u, ~0u, 0, - CIPHER_ORD, -1, 0, &head, &tail); + // Move ciphers without forward secrecy to the end. + ssl_cipher_apply_rule(0, (SSL_kRSA | SSL_kPSK), ~0u, ~0u, ~0u, 0, CIPHER_ORD, + -1, false, &head, &tail); - /* Now disable everything (maintaining the ordering!) */ - ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_DEL, -1, 0, &head, + // Now disable everything (maintaining the ordering!) + ssl_cipher_apply_rule(0, ~0u, ~0u, ~0u, ~0u, 0, CIPHER_DEL, -1, false, &head, &tail); - /* If the rule_string begins with DEFAULT, apply the default rule before - * using the (possibly available) additional rules. */ + // If the rule_string begins with DEFAULT, apply the default rule before + // using the (possibly available) additional rules. const char *rule_p = rule_str; if (strncmp(rule_str, "DEFAULT", 7) == 0) { - if (!ssl_cipher_process_rulestr(ssl_method, SSL_DEFAULT_CIPHER_LIST, &head, - &tail, strict)) { + if (!ssl_cipher_process_rulestr(SSL_DEFAULT_CIPHER_LIST, &head, &tail, + strict)) { goto err; } rule_p += 7; @@ -1348,24 +1294,24 @@ ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, } if (*rule_p != '\0' && - !ssl_cipher_process_rulestr(ssl_method, rule_p, &head, &tail, strict)) { + !ssl_cipher_process_rulestr(rule_p, &head, &tail, strict)) { goto err; } - /* Allocate new "cipherstack" for the result, return with error - * if we cannot get one. */ + // Allocate new "cipherstack" for the result, return with error + // if we cannot get one. cipherstack = sk_SSL_CIPHER_new_null(); if (cipherstack == NULL) { goto err; } - in_group_flags = OPENSSL_malloc(kCiphersLen); + in_group_flags = (uint8_t *)OPENSSL_malloc(kCiphersLen); if (!in_group_flags) { goto err; } - /* The cipher selection for the list is done. The ciphers are added - * to the resulting precedence to the STACK_OF(SSL_CIPHER). */ + // The cipher selection for the list is done. The ciphers are added + // to the resulting precedence to the STACK_OF(SSL_CIPHER). for (curr = head; curr != NULL; curr = curr->next) { if (curr->active) { if (!sk_SSL_CIPHER_push(cipherstack, curr->cipher)) { @@ -1374,19 +1320,24 @@ ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, in_group_flags[num_in_group_flags++] = curr->in_group; } } - OPENSSL_free(co_list); /* Not needed any longer */ + OPENSSL_free(co_list); // Not needed any longer co_list = NULL; - pref_list = OPENSSL_malloc(sizeof(struct ssl_cipher_preference_list_st)); + pref_list = (ssl_cipher_preference_list_st *)OPENSSL_malloc( + sizeof(struct ssl_cipher_preference_list_st)); if (!pref_list) { goto err; } pref_list->ciphers = cipherstack; - pref_list->in_group_flags = OPENSSL_malloc(num_in_group_flags); - if (!pref_list->in_group_flags) { - goto err; + pref_list->in_group_flags = NULL; + if (num_in_group_flags) { + pref_list->in_group_flags = (uint8_t *)OPENSSL_malloc(num_in_group_flags); + if (!pref_list->in_group_flags) { + goto err; + } + OPENSSL_memcpy(pref_list->in_group_flags, in_group_flags, + num_in_group_flags); } - OPENSSL_memcpy(pref_list->in_group_flags, in_group_flags, num_in_group_flags); OPENSSL_free(in_group_flags); in_group_flags = NULL; if (*out_cipher_list != NULL) { @@ -1395,7 +1346,14 @@ ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, *out_cipher_list = pref_list; pref_list = NULL; - return cipherstack; + // Configuring an empty cipher list is an error but still updates the + // output. + if (sk_SSL_CIPHER_num((*out_cipher_list)->ciphers) == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); + return false; + } + + return true; err: OPENSSL_free(co_list); @@ -1405,77 +1363,160 @@ ssl_create_cipher_list(const SSL_PROTOCOL_METHOD *ssl_method, OPENSSL_free(pref_list->in_group_flags); } OPENSSL_free(pref_list); - return NULL; + return false; } -uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *cipher) { return cipher->id; } - uint16_t ssl_cipher_get_value(const SSL_CIPHER *cipher) { uint32_t id = cipher->id; - /* All ciphers are SSLv3. */ + // All ciphers are SSLv3. assert((id & 0xff000000) == 0x03000000); return id & 0xffff; } -int SSL_CIPHER_is_AES(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_AES) != 0; +uint32_t ssl_cipher_auth_mask_for_key(const EVP_PKEY *key) { + switch (EVP_PKEY_id(key)) { + case EVP_PKEY_RSA: + return SSL_aRSA; + case EVP_PKEY_EC: + case EVP_PKEY_ED25519: + // Ed25519 keys in TLS 1.2 repurpose the ECDSA ciphers. + return SSL_aECDSA; + default: + return 0; + } } -int SSL_CIPHER_has_SHA1_HMAC(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mac & SSL_SHA1) != 0; +bool ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher) { + return (cipher->algorithm_auth & SSL_aCERT) != 0; } -int SSL_CIPHER_has_SHA256_HMAC(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mac & SSL_SHA256) != 0; +bool ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher) { + // Ephemeral Diffie-Hellman key exchanges require a ServerKeyExchange. It is + // optional or omitted in all others. + return (cipher->algorithm_mkey & SSL_kECDHE) != 0; } -int SSL_CIPHER_is_AEAD(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mac & SSL_AEAD) != 0; -} +size_t ssl_cipher_get_record_split_len(const SSL_CIPHER *cipher) { + size_t block_size; + switch (cipher->algorithm_enc) { + case SSL_3DES: + block_size = 8; + break; + case SSL_AES128: + case SSL_AES256: + block_size = 16; + break; + default: + return 0; + } -int SSL_CIPHER_is_AESGCM(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & (SSL_AES128GCM | SSL_AES256GCM)) != 0; + // All supported TLS 1.0 ciphers use SHA-1. + assert(cipher->algorithm_mac == SSL_SHA1); + size_t ret = 1 + SHA_DIGEST_LENGTH; + ret += block_size - (ret % block_size); + return ret; } -int SSL_CIPHER_is_AES128GCM(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_AES128GCM) != 0; -} +} // namespace bssl -int SSL_CIPHER_is_AES128CBC(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_AES128) != 0; -} +using namespace bssl; + +const SSL_CIPHER *SSL_get_cipher_by_value(uint16_t value) { + SSL_CIPHER c; -int SSL_CIPHER_is_AES256CBC(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_AES256) != 0; + c.id = 0x03000000L | value; + return reinterpret_cast(bsearch( + &c, kCiphers, kCiphersLen, sizeof(SSL_CIPHER), ssl_cipher_id_cmp)); } -int SSL_CIPHER_is_CHACHA20POLY1305(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_CHACHA20POLY1305) != 0; +uint32_t SSL_CIPHER_get_id(const SSL_CIPHER *cipher) { return cipher->id; } + +int SSL_CIPHER_is_aead(const SSL_CIPHER *cipher) { + return (cipher->algorithm_mac & SSL_AEAD) != 0; } -int SSL_CIPHER_is_NULL(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_eNULL) != 0; +int SSL_CIPHER_get_cipher_nid(const SSL_CIPHER *cipher) { + switch (cipher->algorithm_enc) { + case SSL_eNULL: + return NID_undef; + case SSL_3DES: + return NID_des_ede3_cbc; + case SSL_AES128: + return NID_aes_128_cbc; + case SSL_AES256: + return NID_aes_256_cbc; + case SSL_AES128GCM: + return NID_aes_128_gcm; + case SSL_AES256GCM: + return NID_aes_256_gcm; + case SSL_CHACHA20POLY1305: + return NID_chacha20_poly1305; + } + assert(0); + return NID_undef; } -int SSL_CIPHER_is_block_cipher(const SSL_CIPHER *cipher) { - return (cipher->algorithm_enc & SSL_eNULL) == 0 && - cipher->algorithm_mac != SSL_AEAD; +int SSL_CIPHER_get_digest_nid(const SSL_CIPHER *cipher) { + switch (cipher->algorithm_mac) { + case SSL_AEAD: + return NID_undef; + case SSL_SHA1: + return NID_sha1; + case SSL_SHA256: + return NID_sha256; + case SSL_SHA384: + return NID_sha384; + } + assert(0); + return NID_undef; } -int SSL_CIPHER_is_ECDSA(const SSL_CIPHER *cipher) { - return (cipher->algorithm_auth & SSL_aECDSA) != 0; +int SSL_CIPHER_get_kx_nid(const SSL_CIPHER *cipher) { + switch (cipher->algorithm_mkey) { + case SSL_kRSA: + return NID_kx_rsa; + case SSL_kECDHE: + return NID_kx_ecdhe; + case SSL_kPSK: + return NID_kx_psk; + case SSL_kGENERIC: + return NID_kx_any; + } + assert(0); + return NID_undef; } -int SSL_CIPHER_is_DHE(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mkey & SSL_kDHE) != 0; +int SSL_CIPHER_get_auth_nid(const SSL_CIPHER *cipher) { + switch (cipher->algorithm_auth) { + case SSL_aRSA: + return NID_auth_rsa; + case SSL_aECDSA: + return NID_auth_ecdsa; + case SSL_aPSK: + return NID_auth_psk; + case SSL_aGENERIC: + return NID_auth_any; + } + assert(0); + return NID_undef; } -int SSL_CIPHER_is_ECDHE(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mkey & SSL_kECDHE) != 0; +int SSL_CIPHER_get_prf_nid(const SSL_CIPHER *cipher) { + switch (cipher->algorithm_prf) { + case SSL_HANDSHAKE_MAC_DEFAULT: + return NID_md5_sha1; + case SSL_HANDSHAKE_MAC_SHA256: + return NID_sha256; + case SSL_HANDSHAKE_MAC_SHA384: + return NID_sha384; + } + assert(0); + return NID_undef; } -int SSL_CIPHER_is_static_RSA(const SSL_CIPHER *cipher) { - return (cipher->algorithm_mkey & SSL_kRSA) != 0; +int SSL_CIPHER_is_block_cipher(const SSL_CIPHER *cipher) { + return (cipher->algorithm_enc & SSL_eNULL) == 0 && + cipher->algorithm_mac != SSL_AEAD; } uint16_t SSL_CIPHER_get_min_version(const SSL_CIPHER *cipher) { @@ -1485,8 +1526,8 @@ uint16_t SSL_CIPHER_get_min_version(const SSL_CIPHER *cipher) { } if (cipher->algorithm_prf != SSL_HANDSHAKE_MAC_DEFAULT) { - /* Cipher suites before TLS 1.2 use the default PRF, while all those added - * afterwards specify a particular hash. */ + // Cipher suites before TLS 1.2 use the default PRF, while all those added + // afterwards specify a particular hash. return TLS1_2_VERSION; } return SSL3_VERSION; @@ -1500,7 +1541,7 @@ uint16_t SSL_CIPHER_get_max_version(const SSL_CIPHER *cipher) { return TLS1_2_VERSION; } -/* return the actual cipher being used */ +// return the actual cipher being used const char *SSL_CIPHER_get_name(const SSL_CIPHER *cipher) { if (cipher != NULL) { return cipher->name; @@ -1509,6 +1550,10 @@ const char *SSL_CIPHER_get_name(const SSL_CIPHER *cipher) { return "(NONE)"; } +const char *SSL_CIPHER_standard_name(const SSL_CIPHER *cipher) { + return cipher->standard_name; +} + const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher) { if (cipher == NULL) { return ""; @@ -1518,15 +1563,6 @@ const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher) { case SSL_kRSA: return "RSA"; - case SSL_kDHE: - switch (cipher->algorithm_auth) { - case SSL_aRSA: - return "DHE_RSA"; - default: - assert(0); - return "UNKNOWN"; - } - case SSL_kECDHE: switch (cipher->algorithm_auth) { case SSL_aECDSA: @@ -1554,79 +1590,12 @@ const char *SSL_CIPHER_get_kx_name(const SSL_CIPHER *cipher) { } } -static const char *ssl_cipher_get_enc_name(const SSL_CIPHER *cipher) { - switch (cipher->algorithm_enc) { - case SSL_3DES: - return "3DES_EDE_CBC"; - case SSL_AES128: - return "AES_128_CBC"; - case SSL_AES256: - return "AES_256_CBC"; - case SSL_AES128GCM: - return "AES_128_GCM"; - case SSL_AES256GCM: - return "AES_256_GCM"; - case SSL_CHACHA20POLY1305: - return "CHACHA20_POLY1305"; - break; - default: - assert(0); - return "UNKNOWN"; - } -} - -static const char *ssl_cipher_get_prf_name(const SSL_CIPHER *cipher) { - switch (cipher->algorithm_prf) { - case SSL_HANDSHAKE_MAC_DEFAULT: - /* Before TLS 1.2, the PRF component is the hash used in the HMAC, which - * is SHA-1 for all supported ciphers. */ - assert(cipher->algorithm_mac == SSL_SHA1); - return "SHA"; - case SSL_HANDSHAKE_MAC_SHA256: - return "SHA256"; - case SSL_HANDSHAKE_MAC_SHA384: - return "SHA384"; - } - assert(0); - return "UNKNOWN"; -} - char *SSL_CIPHER_get_rfc_name(const SSL_CIPHER *cipher) { if (cipher == NULL) { return NULL; } - const char *kx_name = SSL_CIPHER_get_kx_name(cipher); - const char *enc_name = ssl_cipher_get_enc_name(cipher); - const char *prf_name = ssl_cipher_get_prf_name(cipher); - - /* The final name is TLS_{kx_name}_WITH_{enc_name}_{prf_name} or - * TLS_{enc_name}_{prf_name} depending on whether the cipher is AEAD-only. */ - size_t len = 4 + strlen(enc_name) + 1 + strlen(prf_name) + 1; - - if (cipher->algorithm_mkey != SSL_kGENERIC) { - len += strlen(kx_name) + 6; - } - - char *ret = OPENSSL_malloc(len); - if (ret == NULL) { - return NULL; - } - - if (BUF_strlcpy(ret, "TLS_", len) >= len || - (cipher->algorithm_mkey != SSL_kGENERIC && - (BUF_strlcat(ret, kx_name, len) >= len || - BUF_strlcat(ret, "_WITH_", len) >= len)) || - BUF_strlcat(ret, enc_name, len) >= len || - BUF_strlcat(ret, "_", len) >= len || - BUF_strlcat(ret, prf_name, len) >= len) { - assert(0); - OPENSSL_free(ret); - return NULL; - } - - assert(strlen(ret) + 1 == len); - return ret; + return OPENSSL_strdup(SSL_CIPHER_standard_name(cipher)); } int SSL_CIPHER_get_bits(const SSL_CIPHER *cipher, int *out_alg_bits) { @@ -1686,10 +1655,6 @@ const char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, kx = "RSA"; break; - case SSL_kDHE: - kx = "DH"; - break; - case SSL_kECDHE: kx = "ECDH"; break; @@ -1786,7 +1751,7 @@ const char *SSL_CIPHER_description(const SSL_CIPHER *cipher, char *buf, if (buf == NULL) { len = 128; - buf = OPENSSL_malloc(len); + buf = (char *)OPENSSL_malloc(len); if (buf == NULL) { return NULL; } @@ -1803,58 +1768,10 @@ const char *SSL_CIPHER_get_version(const SSL_CIPHER *cipher) { return "TLSv1/SSLv3"; } -COMP_METHOD *SSL_COMP_get_compression_methods(void) { return NULL; } +STACK_OF(SSL_COMP) *SSL_COMP_get_compression_methods(void) { return NULL; } int SSL_COMP_add_compression_method(int id, COMP_METHOD *cm) { return 1; } const char *SSL_COMP_get_name(const COMP_METHOD *comp) { return NULL; } void SSL_COMP_free_compression_methods(void) {} - -int ssl_cipher_get_key_type(const SSL_CIPHER *cipher) { - uint32_t alg_a = cipher->algorithm_auth; - - if (alg_a & SSL_aECDSA) { - return EVP_PKEY_EC; - } else if (alg_a & SSL_aRSA) { - return EVP_PKEY_RSA; - } - - return EVP_PKEY_NONE; -} - -int ssl_cipher_uses_certificate_auth(const SSL_CIPHER *cipher) { - return (cipher->algorithm_auth & SSL_aCERT) != 0; -} - -int ssl_cipher_requires_server_key_exchange(const SSL_CIPHER *cipher) { - /* Ephemeral Diffie-Hellman key exchanges require a ServerKeyExchange. */ - if (cipher->algorithm_mkey & SSL_kDHE || - cipher->algorithm_mkey & SSL_kECDHE) { - return 1; - } - - /* It is optional in all others. */ - return 0; -} - -size_t ssl_cipher_get_record_split_len(const SSL_CIPHER *cipher) { - size_t block_size; - switch (cipher->algorithm_enc) { - case SSL_3DES: - block_size = 8; - break; - case SSL_AES128: - case SSL_AES256: - block_size = 16; - break; - default: - return 0; - } - - /* All supported TLS 1.0 ciphers use SHA-1. */ - assert(cipher->algorithm_mac == SSL_SHA1); - size_t ret = 1 + SHA_DIGEST_LENGTH; - ret += block_size - (ret % block_size); - return ret; -} diff --git a/Sources/BoringSSL/ssl/ssl_ecdh.c b/Sources/BoringSSL/ssl/ssl_ecdh.c deleted file mode 100644 index f49d5661a..000000000 --- a/Sources/BoringSSL/ssl/ssl_ecdh.c +++ /dev/null @@ -1,465 +0,0 @@ -/* Copyright (c) 2015, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "internal.h" -#include "../crypto/internal.h" - - -/* |EC_POINT| implementation. */ - -static void ssl_ec_point_cleanup(SSL_ECDH_CTX *ctx) { - BIGNUM *private_key = (BIGNUM *)ctx->data; - BN_clear_free(private_key); -} - -static int ssl_ec_point_offer(SSL_ECDH_CTX *ctx, CBB *out) { - assert(ctx->data == NULL); - BIGNUM *private_key = BN_new(); - if (private_key == NULL) { - return 0; - } - ctx->data = private_key; - - /* Set up a shared |BN_CTX| for all operations. */ - BN_CTX *bn_ctx = BN_CTX_new(); - if (bn_ctx == NULL) { - return 0; - } - BN_CTX_start(bn_ctx); - - int ret = 0; - EC_POINT *public_key = NULL; - EC_GROUP *group = EC_GROUP_new_by_curve_name(ctx->method->nid); - if (group == NULL) { - goto err; - } - - /* Generate a private key. */ - if (!BN_rand_range_ex(private_key, 1, EC_GROUP_get0_order(group))) { - goto err; - } - - /* Compute the corresponding public key and serialize it. */ - public_key = EC_POINT_new(group); - if (public_key == NULL || - !EC_POINT_mul(group, public_key, private_key, NULL, NULL, bn_ctx) || - !EC_POINT_point2cbb(out, group, public_key, POINT_CONVERSION_UNCOMPRESSED, - bn_ctx)) { - goto err; - } - - ret = 1; - -err: - EC_GROUP_free(group); - EC_POINT_free(public_key); - BN_CTX_end(bn_ctx); - BN_CTX_free(bn_ctx); - return ret; -} - -static int ssl_ec_point_finish(SSL_ECDH_CTX *ctx, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len) { - BIGNUM *private_key = (BIGNUM *)ctx->data; - assert(private_key != NULL); - *out_alert = SSL_AD_INTERNAL_ERROR; - - /* Set up a shared |BN_CTX| for all operations. */ - BN_CTX *bn_ctx = BN_CTX_new(); - if (bn_ctx == NULL) { - return 0; - } - BN_CTX_start(bn_ctx); - - int ret = 0; - EC_GROUP *group = EC_GROUP_new_by_curve_name(ctx->method->nid); - EC_POINT *peer_point = NULL, *result = NULL; - uint8_t *secret = NULL; - if (group == NULL) { - goto err; - } - - /* Compute the x-coordinate of |peer_key| * |private_key|. */ - peer_point = EC_POINT_new(group); - result = EC_POINT_new(group); - if (peer_point == NULL || result == NULL) { - goto err; - } - BIGNUM *x = BN_CTX_get(bn_ctx); - if (x == NULL) { - goto err; - } - if (!EC_POINT_oct2point(group, peer_point, peer_key, peer_key_len, bn_ctx)) { - *out_alert = SSL_AD_DECODE_ERROR; - goto err; - } - if (!EC_POINT_mul(group, result, NULL, peer_point, private_key, bn_ctx) || - !EC_POINT_get_affine_coordinates_GFp(group, result, x, NULL, bn_ctx)) { - goto err; - } - - /* Encode the x-coordinate left-padded with zeros. */ - size_t secret_len = (EC_GROUP_get_degree(group) + 7) / 8; - secret = OPENSSL_malloc(secret_len); - if (secret == NULL || !BN_bn2bin_padded(secret, secret_len, x)) { - goto err; - } - - *out_secret = secret; - *out_secret_len = secret_len; - secret = NULL; - ret = 1; - -err: - EC_GROUP_free(group); - EC_POINT_free(peer_point); - EC_POINT_free(result); - BN_CTX_end(bn_ctx); - BN_CTX_free(bn_ctx); - OPENSSL_free(secret); - return ret; -} - -static int ssl_ec_point_accept(SSL_ECDH_CTX *ctx, CBB *out_public_key, - uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len) { - *out_alert = SSL_AD_INTERNAL_ERROR; - if (!ssl_ec_point_offer(ctx, out_public_key) || - !ssl_ec_point_finish(ctx, out_secret, out_secret_len, out_alert, peer_key, - peer_key_len)) { - return 0; - } - return 1; -} - -/* X25119 implementation. */ - -static void ssl_x25519_cleanup(SSL_ECDH_CTX *ctx) { - if (ctx->data == NULL) { - return; - } - OPENSSL_cleanse(ctx->data, 32); - OPENSSL_free(ctx->data); -} - -static int ssl_x25519_offer(SSL_ECDH_CTX *ctx, CBB *out) { - assert(ctx->data == NULL); - - ctx->data = OPENSSL_malloc(32); - if (ctx->data == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - uint8_t public_key[32]; - X25519_keypair(public_key, (uint8_t *)ctx->data); - return CBB_add_bytes(out, public_key, sizeof(public_key)); -} - -static int ssl_x25519_finish(SSL_ECDH_CTX *ctx, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len) { - assert(ctx->data != NULL); - *out_alert = SSL_AD_INTERNAL_ERROR; - - uint8_t *secret = OPENSSL_malloc(32); - if (secret == NULL) { - return 0; - } - - if (peer_key_len != 32 || - !X25519(secret, (uint8_t *)ctx->data, peer_key)) { - OPENSSL_free(secret); - *out_alert = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); - return 0; - } - - *out_secret = secret; - *out_secret_len = 32; - return 1; -} - -static int ssl_x25519_accept(SSL_ECDH_CTX *ctx, CBB *out_public_key, - uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len) { - *out_alert = SSL_AD_INTERNAL_ERROR; - if (!ssl_x25519_offer(ctx, out_public_key) || - !ssl_x25519_finish(ctx, out_secret, out_secret_len, out_alert, peer_key, - peer_key_len)) { - return 0; - } - return 1; -} - - -/* Legacy DHE-based implementation. */ - -static void ssl_dhe_cleanup(SSL_ECDH_CTX *ctx) { - DH_free((DH *)ctx->data); -} - -static int ssl_dhe_offer(SSL_ECDH_CTX *ctx, CBB *out) { - DH *dh = (DH *)ctx->data; - /* The group must have been initialized already, but not the key. */ - assert(dh != NULL); - assert(dh->priv_key == NULL); - - /* Due to a bug in yaSSL, the public key must be zero padded to the size of - * the prime. */ - return DH_generate_key(dh) && - BN_bn2cbb_padded(out, BN_num_bytes(dh->p), dh->pub_key); -} - -static int ssl_dhe_finish(SSL_ECDH_CTX *ctx, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len) { - DH *dh = (DH *)ctx->data; - assert(dh != NULL); - assert(dh->priv_key != NULL); - *out_alert = SSL_AD_INTERNAL_ERROR; - - int secret_len = 0; - uint8_t *secret = NULL; - BIGNUM *peer_point = BN_bin2bn(peer_key, peer_key_len, NULL); - if (peer_point == NULL) { - goto err; - } - - secret = OPENSSL_malloc(DH_size(dh)); - if (secret == NULL) { - goto err; - } - secret_len = DH_compute_key(secret, peer_point, dh); - if (secret_len <= 0) { - goto err; - } - - *out_secret = secret; - *out_secret_len = (size_t)secret_len; - BN_free(peer_point); - return 1; - -err: - if (secret_len > 0) { - OPENSSL_cleanse(secret, (size_t)secret_len); - } - OPENSSL_free(secret); - BN_free(peer_point); - return 0; -} - -static int ssl_dhe_accept(SSL_ECDH_CTX *ctx, CBB *out_public_key, - uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len) { - *out_alert = SSL_AD_INTERNAL_ERROR; - if (!ssl_dhe_offer(ctx, out_public_key) || - !ssl_dhe_finish(ctx, out_secret, out_secret_len, out_alert, peer_key, - peer_key_len)) { - return 0; - } - return 1; -} - -static const SSL_ECDH_METHOD kDHEMethod = { - NID_undef, 0, "", - ssl_dhe_cleanup, - ssl_dhe_offer, - ssl_dhe_accept, - ssl_dhe_finish, - CBS_get_u16_length_prefixed, - CBB_add_u16_length_prefixed, -}; - -static const SSL_ECDH_METHOD kMethods[] = { - { - NID_X9_62_prime256v1, - SSL_CURVE_SECP256R1, - "P-256", - ssl_ec_point_cleanup, - ssl_ec_point_offer, - ssl_ec_point_accept, - ssl_ec_point_finish, - CBS_get_u8_length_prefixed, - CBB_add_u8_length_prefixed, - }, - { - NID_secp384r1, - SSL_CURVE_SECP384R1, - "P-384", - ssl_ec_point_cleanup, - ssl_ec_point_offer, - ssl_ec_point_accept, - ssl_ec_point_finish, - CBS_get_u8_length_prefixed, - CBB_add_u8_length_prefixed, - }, - { - NID_secp521r1, - SSL_CURVE_SECP521R1, - "P-521", - ssl_ec_point_cleanup, - ssl_ec_point_offer, - ssl_ec_point_accept, - ssl_ec_point_finish, - CBS_get_u8_length_prefixed, - CBB_add_u8_length_prefixed, - }, - { - NID_X25519, - SSL_CURVE_X25519, - "X25519", - ssl_x25519_cleanup, - ssl_x25519_offer, - ssl_x25519_accept, - ssl_x25519_finish, - CBS_get_u8_length_prefixed, - CBB_add_u8_length_prefixed, - }, -}; - -static const SSL_ECDH_METHOD *method_from_group_id(uint16_t group_id) { - for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) { - if (kMethods[i].group_id == group_id) { - return &kMethods[i]; - } - } - return NULL; -} - -static const SSL_ECDH_METHOD *method_from_nid(int nid) { - for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) { - if (kMethods[i].nid == nid) { - return &kMethods[i]; - } - } - return NULL; -} - -static const SSL_ECDH_METHOD *method_from_name(const char *name, size_t len) { - for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kMethods); i++) { - if (len == strlen(kMethods[i].name) && - !strncmp(kMethods[i].name, name, len)) { - return &kMethods[i]; - } - } - return NULL; -} - -const char* SSL_get_curve_name(uint16_t group_id) { - const SSL_ECDH_METHOD *method = method_from_group_id(group_id); - if (method == NULL) { - return NULL; - } - return method->name; -} - -int ssl_nid_to_group_id(uint16_t *out_group_id, int nid) { - const SSL_ECDH_METHOD *method = method_from_nid(nid); - if (method == NULL) { - return 0; - } - *out_group_id = method->group_id; - return 1; -} - -int ssl_name_to_group_id(uint16_t *out_group_id, const char *name, size_t len) { - const SSL_ECDH_METHOD *method = method_from_name(name, len); - if (method == NULL) { - return 0; - } - *out_group_id = method->group_id; - return 1; -} - -int SSL_ECDH_CTX_init(SSL_ECDH_CTX *ctx, uint16_t group_id) { - SSL_ECDH_CTX_cleanup(ctx); - - const SSL_ECDH_METHOD *method = method_from_group_id(group_id); - if (method == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_ELLIPTIC_CURVE); - return 0; - } - ctx->method = method; - return 1; -} - -void SSL_ECDH_CTX_init_for_dhe(SSL_ECDH_CTX *ctx, DH *params) { - SSL_ECDH_CTX_cleanup(ctx); - - ctx->method = &kDHEMethod; - ctx->data = params; -} - -void SSL_ECDH_CTX_cleanup(SSL_ECDH_CTX *ctx) { - if (ctx->method == NULL) { - return; - } - ctx->method->cleanup(ctx); - ctx->method = NULL; - ctx->data = NULL; -} - -uint16_t SSL_ECDH_CTX_get_id(const SSL_ECDH_CTX *ctx) { - return ctx->method->group_id; -} - -int SSL_ECDH_CTX_get_key(SSL_ECDH_CTX *ctx, CBS *cbs, CBS *out) { - if (ctx->method == NULL) { - return 0; - } - return ctx->method->get_key(cbs, out); -} - -int SSL_ECDH_CTX_add_key(SSL_ECDH_CTX *ctx, CBB *cbb, CBB *out_contents) { - if (ctx->method == NULL) { - return 0; - } - return ctx->method->add_key(cbb, out_contents); -} - -int SSL_ECDH_CTX_offer(SSL_ECDH_CTX *ctx, CBB *out_public_key) { - return ctx->method->offer(ctx, out_public_key); -} - -int SSL_ECDH_CTX_accept(SSL_ECDH_CTX *ctx, CBB *out_public_key, - uint8_t **out_secret, size_t *out_secret_len, - uint8_t *out_alert, const uint8_t *peer_key, - size_t peer_key_len) { - return ctx->method->accept(ctx, out_public_key, out_secret, out_secret_len, - out_alert, peer_key, peer_key_len); -} - -int SSL_ECDH_CTX_finish(SSL_ECDH_CTX *ctx, uint8_t **out_secret, - size_t *out_secret_len, uint8_t *out_alert, - const uint8_t *peer_key, size_t peer_key_len) { - return ctx->method->finish(ctx, out_secret, out_secret_len, out_alert, - peer_key, peer_key_len); -} diff --git a/Sources/BoringSSL/ssl/ssl_file.c b/Sources/BoringSSL/ssl/ssl_file.cc similarity index 93% rename from Sources/BoringSSL/ssl/ssl_file.c rename to Sources/BoringSSL/ssl/ssl_file.cc index 59351a32f..bafa64ab0 100644 --- a/Sources/BoringSSL/ssl/ssl_file.c +++ b/Sources/BoringSSL/ssl/ssl_file.cc @@ -128,8 +128,8 @@ static int xname_cmp(const X509_NAME **a, const X509_NAME **b) { return X509_NAME_cmp(*a, *b); } -/* TODO(davidben): Is there any reason this doesn't call - * |SSL_add_file_cert_subjects_to_stack|? */ +// TODO(davidben): Is there any reason this doesn't call +// |SSL_add_file_cert_subjects_to_stack|? STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file) { BIO *in; X509 *x = NULL; @@ -164,7 +164,7 @@ STACK_OF(X509_NAME) *SSL_load_client_CA_file(const char *file) { goto err; } - /* Check for duplicates. */ + // Check for duplicates. if (sk_X509_NAME_find(sk, NULL, xn)) { continue; } @@ -222,7 +222,7 @@ int SSL_add_file_cert_subjects_to_stack(STACK_OF(X509_NAME) *stack, goto err; } - /* Check for duplicates. */ + // Check for duplicates. if (sk_X509_NAME_find(stack, NULL, xn)) { continue; } @@ -493,15 +493,15 @@ int SSL_CTX_use_PrivateKey_file(SSL_CTX *ctx, const char *file, int type) { return ret; } -/* Read a file that contains our certificate in "PEM" format, possibly followed - * by a sequence of CA certificates that should be sent to the peer in the - * Certificate message. */ +// Read a file that contains our certificate in "PEM" format, possibly followed +// by a sequence of CA certificates that should be sent to the peer in the +// Certificate message. int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file) { BIO *in; int ret = 0; X509 *x = NULL; - ERR_clear_error(); /* clear error stack for SSL_CTX_use_certificate() */ + ERR_clear_error(); // clear error stack for SSL_CTX_use_certificate() in = BIO_new(BIO_s_file()); if (in == NULL) { @@ -524,12 +524,12 @@ int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file) { ret = SSL_CTX_use_certificate(ctx, x); if (ERR_peek_error() != 0) { - ret = 0; /* Key/certificate mismatch doesn't imply ret==0 ... */ + ret = 0; // Key/certificate mismatch doesn't imply ret==0 ... } if (ret) { - /* If we could set up our certificate, now proceed to the CA - * certificates. */ + // If we could set up our certificate, now proceed to the CA + // certificates. X509 *ca; int r; uint32_t err; @@ -545,18 +545,18 @@ int SSL_CTX_use_certificate_chain_file(SSL_CTX *ctx, const char *file) { ret = 0; goto end; } - /* Note that we must not free r if it was successfully added to the chain - * (while we must free the main certificate, since its reference count is - * increased by SSL_CTX_use_certificate). */ + // Note that we must not free r if it was successfully added to the chain + // (while we must free the main certificate, since its reference count is + // increased by SSL_CTX_use_certificate). } - /* When the while loop ends, it's usually just EOF. */ + // When the while loop ends, it's usually just EOF. err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { ERR_clear_error(); } else { - ret = 0; /* some real error */ + ret = 0; // some real error } } @@ -570,6 +570,14 @@ void SSL_CTX_set_default_passwd_cb(SSL_CTX *ctx, pem_password_cb *cb) { ctx->default_passwd_callback = cb; } +pem_password_cb *SSL_CTX_get_default_passwd_cb(const SSL_CTX *ctx) { + return ctx->default_passwd_callback; +} + void SSL_CTX_set_default_passwd_cb_userdata(SSL_CTX *ctx, void *data) { ctx->default_passwd_callback_userdata = data; } + +void *SSL_CTX_get_default_passwd_cb_userdata(const SSL_CTX *ctx) { + return ctx->default_passwd_callback_userdata; +} diff --git a/Sources/BoringSSL/ssl/ssl_key_share.cc b/Sources/BoringSSL/ssl/ssl_key_share.cc new file mode 100644 index 000000000..a5ae57813 --- /dev/null +++ b/Sources/BoringSSL/ssl/ssl_key_share.cc @@ -0,0 +1,250 @@ +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../crypto/internal.h" + + +namespace bssl { + +namespace { + +class ECKeyShare : public SSLKeyShare { + public: + ECKeyShare(int nid, uint16_t group_id) : nid_(nid), group_id_(group_id) {} + ~ECKeyShare() override {} + + uint16_t GroupID() const override { return group_id_; } + + bool Offer(CBB *out) override { + assert(!private_key_); + // Set up a shared |BN_CTX| for all operations. + UniquePtr bn_ctx(BN_CTX_new()); + if (!bn_ctx) { + return false; + } + BN_CTXScope scope(bn_ctx.get()); + + // Generate a private key. + UniquePtr group(EC_GROUP_new_by_curve_name(nid_)); + private_key_.reset(BN_new()); + if (!group || !private_key_ || + !BN_rand_range_ex(private_key_.get(), 1, + EC_GROUP_get0_order(group.get()))) { + return false; + } + + // Compute the corresponding public key and serialize it. + UniquePtr public_key(EC_POINT_new(group.get())); + if (!public_key || + !EC_POINT_mul(group.get(), public_key.get(), private_key_.get(), NULL, + NULL, bn_ctx.get()) || + !EC_POINT_point2cbb(out, group.get(), public_key.get(), + POINT_CONVERSION_UNCOMPRESSED, bn_ctx.get())) { + return false; + } + + return true; + } + + bool Finish(Array *out_secret, uint8_t *out_alert, + Span peer_key) override { + assert(private_key_); + *out_alert = SSL_AD_INTERNAL_ERROR; + + // Set up a shared |BN_CTX| for all operations. + UniquePtr bn_ctx(BN_CTX_new()); + if (!bn_ctx) { + return false; + } + BN_CTXScope scope(bn_ctx.get()); + + UniquePtr group(EC_GROUP_new_by_curve_name(nid_)); + if (!group) { + return false; + } + + UniquePtr peer_point(EC_POINT_new(group.get())); + UniquePtr result(EC_POINT_new(group.get())); + BIGNUM *x = BN_CTX_get(bn_ctx.get()); + if (!peer_point || !result || !x) { + return false; + } + + if (!EC_POINT_oct2point(group.get(), peer_point.get(), peer_key.data(), + peer_key.size(), bn_ctx.get())) { + *out_alert = SSL_AD_DECODE_ERROR; + return false; + } + + // Compute the x-coordinate of |peer_key| * |private_key_|. + if (!EC_POINT_mul(group.get(), result.get(), NULL, peer_point.get(), + private_key_.get(), bn_ctx.get()) || + !EC_POINT_get_affine_coordinates_GFp(group.get(), result.get(), x, NULL, + bn_ctx.get())) { + return false; + } + + // Encode the x-coordinate left-padded with zeros. + Array secret; + if (!secret.Init((EC_GROUP_get_degree(group.get()) + 7) / 8) || + !BN_bn2bin_padded(secret.data(), secret.size(), x)) { + return false; + } + + *out_secret = std::move(secret); + return true; + } + + private: + UniquePtr private_key_; + int nid_; + uint16_t group_id_; +}; + +class X25519KeyShare : public SSLKeyShare { + public: + X25519KeyShare() {} + ~X25519KeyShare() override { + OPENSSL_cleanse(private_key_, sizeof(private_key_)); + } + + uint16_t GroupID() const override { return SSL_CURVE_X25519; } + + bool Offer(CBB *out) override { + uint8_t public_key[32]; + X25519_keypair(public_key, private_key_); + return !!CBB_add_bytes(out, public_key, sizeof(public_key)); + } + + bool Finish(Array *out_secret, uint8_t *out_alert, + Span peer_key) override { + *out_alert = SSL_AD_INTERNAL_ERROR; + + Array secret; + if (!secret.Init(32)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + if (peer_key.size() != 32 || + !X25519(secret.data(), private_key_, peer_key.data())) { + *out_alert = SSL_AD_DECODE_ERROR; + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ECPOINT); + return false; + } + + *out_secret = std::move(secret); + return true; + } + + private: + uint8_t private_key_[32]; +}; + +CONSTEXPR_ARRAY struct { + int nid; + uint16_t group_id; + const char name[8], alias[11]; +} kNamedGroups[] = { + {NID_secp224r1, SSL_CURVE_SECP224R1, "P-224", "secp224r1"}, + {NID_X9_62_prime256v1, SSL_CURVE_SECP256R1, "P-256", "prime256v1"}, + {NID_secp384r1, SSL_CURVE_SECP384R1, "P-384", "secp384r1"}, + {NID_secp521r1, SSL_CURVE_SECP521R1, "P-521", "secp521r1"}, + {NID_X25519, SSL_CURVE_X25519, "X25519", "x25519"}, +}; + +} // namespace + +UniquePtr SSLKeyShare::Create(uint16_t group_id) { + switch (group_id) { + case SSL_CURVE_SECP224R1: + return UniquePtr( + New(NID_secp224r1, SSL_CURVE_SECP224R1)); + case SSL_CURVE_SECP256R1: + return UniquePtr( + New(NID_X9_62_prime256v1, SSL_CURVE_SECP256R1)); + case SSL_CURVE_SECP384R1: + return UniquePtr( + New(NID_secp384r1, SSL_CURVE_SECP384R1)); + case SSL_CURVE_SECP521R1: + return UniquePtr( + New(NID_secp521r1, SSL_CURVE_SECP521R1)); + case SSL_CURVE_X25519: + return UniquePtr(New()); + default: + return nullptr; + } +} + +bool SSLKeyShare::Accept(CBB *out_public_key, Array *out_secret, + uint8_t *out_alert, Span peer_key) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return Offer(out_public_key) && + Finish(out_secret, out_alert, peer_key); +} + +int ssl_nid_to_group_id(uint16_t *out_group_id, int nid) { + for (const auto &group : kNamedGroups) { + if (group.nid == nid) { + *out_group_id = group.group_id; + return 1; + } + } + return 0; +} + +int ssl_name_to_group_id(uint16_t *out_group_id, const char *name, size_t len) { + for (const auto &group : kNamedGroups) { + if (len == strlen(group.name) && + !strncmp(group.name, name, len)) { + *out_group_id = group.group_id; + return 1; + } + if (len == strlen(group.alias) && + !strncmp(group.alias, name, len)) { + *out_group_id = group.group_id; + return 1; + } + } + return 0; +} + +} // namespace bssl + +using namespace bssl; + +const char* SSL_get_curve_name(uint16_t group_id) { + for (const auto &group : kNamedGroups) { + if (group.group_id == group_id) { + return group.name; + } + } + return nullptr; +} diff --git a/Sources/BoringSSL/ssl/ssl_lib.c b/Sources/BoringSSL/ssl/ssl_lib.cc similarity index 66% rename from Sources/BoringSSL/ssl/ssl_lib.c rename to Sources/BoringSSL/ssl/ssl_lib.cc index e37f9f944..122313fa9 100644 --- a/Sources/BoringSSL/ssl/ssl_lib.c +++ b/Sources/BoringSSL/ssl/ssl_lib.cc @@ -146,7 +146,6 @@ #include #include -#include #include #include #include @@ -163,22 +162,24 @@ #endif -/* |SSL_R_UNKNOWN_PROTOCOL| is no longer emitted, but continue to define it - * to avoid downstream churn. */ +namespace bssl { + +// |SSL_R_UNKNOWN_PROTOCOL| is no longer emitted, but continue to define it +// to avoid downstream churn. OPENSSL_DECLARE_ERROR_REASON(SSL, UNKNOWN_PROTOCOL) -/* The following errors are no longer emitted, but are used in nginx without - * #ifdefs. */ +// The following errors are no longer emitted, but are used in nginx without +// #ifdefs. OPENSSL_DECLARE_ERROR_REASON(SSL, BLOCK_CIPHER_PAD_IS_WRONG) OPENSSL_DECLARE_ERROR_REASON(SSL, NO_CIPHERS_SPECIFIED) -/* Some error codes are special. Ensure the make_errors.go script never - * regresses this. */ -OPENSSL_COMPILE_ASSERT(SSL_R_TLSV1_ALERT_NO_RENEGOTIATION == - SSL_AD_NO_RENEGOTIATION + SSL_AD_REASON_OFFSET, - ssl_alert_reason_code_mismatch); +// Some error codes are special. Ensure the make_errors.go script never +// regresses this. +static_assert(SSL_R_TLSV1_ALERT_NO_RENEGOTIATION == + SSL_AD_NO_RENEGOTIATION + SSL_AD_REASON_OFFSET, + "alert reason code mismatch"); -/* kMaxHandshakeSize is the maximum size, in bytes, of a handshake message. */ +// kMaxHandshakeSize is the maximum size, in bytes, of a handshake message. static const size_t kMaxHandshakeSize = (1u << 24) - 1; static CRYPTO_EX_DATA_CLASS g_ex_data_class_ssl = @@ -186,11 +187,298 @@ static CRYPTO_EX_DATA_CLASS g_ex_data_class_ssl = static CRYPTO_EX_DATA_CLASS g_ex_data_class_ssl_ctx = CRYPTO_EX_DATA_CLASS_INIT_WITH_APP_DATA; +bool CBBFinishArray(CBB *cbb, Array *out) { + uint8_t *ptr; + size_t len; + if (!CBB_finish(cbb, &ptr, &len)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + out->Reset(ptr, len); + return true; +} + +void ssl_reset_error_state(SSL *ssl) { + // Functions which use |SSL_get_error| must reset I/O and error state on + // entry. + ssl->s3->rwstate = SSL_NOTHING; + ERR_clear_error(); + ERR_clear_system_error(); +} + +void ssl_set_read_error(SSL* ssl) { + ssl->s3->read_shutdown = ssl_shutdown_error; + ssl->s3->read_error.reset(ERR_save_state()); +} + +static bool check_read_error(const SSL *ssl) { + if (ssl->s3->read_shutdown == ssl_shutdown_error) { + ERR_restore_state(ssl->s3->read_error.get()); + return false; + } + return true; +} + +int ssl_can_write(const SSL *ssl) { + return !SSL_in_init(ssl) || ssl->s3->hs->can_early_write; +} + +int ssl_can_read(const SSL *ssl) { + return !SSL_in_init(ssl) || ssl->s3->hs->can_early_read; +} + +ssl_open_record_t ssl_open_handshake(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, Span in) { + *out_consumed = 0; + if (!check_read_error(ssl)) { + *out_alert = 0; + return ssl_open_record_error; + } + auto ret = ssl->method->open_handshake(ssl, out_consumed, out_alert, in); + if (ret == ssl_open_record_error) { + ssl_set_read_error(ssl); + } + return ret; +} + +ssl_open_record_t ssl_open_change_cipher_spec(SSL *ssl, size_t *out_consumed, + uint8_t *out_alert, + Span in) { + *out_consumed = 0; + if (!check_read_error(ssl)) { + *out_alert = 0; + return ssl_open_record_error; + } + auto ret = + ssl->method->open_change_cipher_spec(ssl, out_consumed, out_alert, in); + if (ret == ssl_open_record_error) { + ssl_set_read_error(ssl); + } + return ret; +} + +ssl_open_record_t ssl_open_app_data(SSL *ssl, Span *out, + size_t *out_consumed, uint8_t *out_alert, + Span in) { + *out_consumed = 0; + if (!check_read_error(ssl)) { + *out_alert = 0; + return ssl_open_record_error; + } + auto ret = ssl->method->open_app_data(ssl, out, out_consumed, out_alert, in); + if (ret == ssl_open_record_error) { + ssl_set_read_error(ssl); + } + return ret; +} + +void ssl_cipher_preference_list_free( + struct ssl_cipher_preference_list_st *cipher_list) { + if (cipher_list == NULL) { + return; + } + sk_SSL_CIPHER_free(cipher_list->ciphers); + OPENSSL_free(cipher_list->in_group_flags); + OPENSSL_free(cipher_list); +} + +void ssl_update_cache(SSL_HANDSHAKE *hs, int mode) { + SSL *const ssl = hs->ssl; + SSL_CTX *ctx = ssl->session_ctx; + // Never cache sessions with empty session IDs. + if (ssl->s3->established_session->session_id_length == 0 || + ssl->s3->established_session->not_resumable || + (ctx->session_cache_mode & mode) != mode) { + return; + } + + // Clients never use the internal session cache. + int use_internal_cache = ssl->server && !(ctx->session_cache_mode & + SSL_SESS_CACHE_NO_INTERNAL_STORE); + + // A client may see new sessions on abbreviated handshakes if the server + // decides to renew the ticket. Once the handshake is completed, it should be + // inserted into the cache. + if (ssl->s3->established_session.get() != ssl->session || + (!ssl->server && hs->ticket_expected)) { + if (use_internal_cache) { + SSL_CTX_add_session(ctx, ssl->s3->established_session.get()); + } + if (ctx->new_session_cb != NULL) { + SSL_SESSION_up_ref(ssl->s3->established_session.get()); + if (!ctx->new_session_cb(ssl, ssl->s3->established_session.get())) { + // |new_session_cb|'s return value signals whether it took ownership. + SSL_SESSION_free(ssl->s3->established_session.get()); + } + } + } + + if (use_internal_cache && + !(ctx->session_cache_mode & SSL_SESS_CACHE_NO_AUTO_CLEAR)) { + // Automatically flush the internal session cache every 255 connections. + int flush_cache = 0; + CRYPTO_MUTEX_lock_write(&ctx->lock); + ctx->handshakes_since_cache_flush++; + if (ctx->handshakes_since_cache_flush >= 255) { + flush_cache = 1; + ctx->handshakes_since_cache_flush = 0; + } + CRYPTO_MUTEX_unlock_write(&ctx->lock); + + if (flush_cache) { + struct OPENSSL_timeval now; + ssl_get_current_time(ssl, &now); + SSL_CTX_flush_sessions(ctx, now.tv_sec); + } + } +} + +static int cbb_add_hex(CBB *cbb, const uint8_t *in, size_t in_len) { + static const char hextable[] = "0123456789abcdef"; + uint8_t *out; + + if (!CBB_add_space(cbb, &out, in_len * 2)) { + return 0; + } + + for (size_t i = 0; i < in_len; i++) { + *(out++) = (uint8_t)hextable[in[i] >> 4]; + *(out++) = (uint8_t)hextable[in[i] & 0xf]; + } + + return 1; +} + +int ssl_log_secret(const SSL *ssl, const char *label, const uint8_t *secret, + size_t secret_len) { + if (ssl->ctx->keylog_callback == NULL) { + return 1; + } + + ScopedCBB cbb; + uint8_t *out; + size_t out_len; + if (!CBB_init(cbb.get(), strlen(label) + 1 + SSL3_RANDOM_SIZE * 2 + 1 + + secret_len * 2 + 1) || + !CBB_add_bytes(cbb.get(), (const uint8_t *)label, strlen(label)) || + !CBB_add_bytes(cbb.get(), (const uint8_t *)" ", 1) || + !cbb_add_hex(cbb.get(), ssl->s3->client_random, SSL3_RANDOM_SIZE) || + !CBB_add_bytes(cbb.get(), (const uint8_t *)" ", 1) || + !cbb_add_hex(cbb.get(), secret, secret_len) || + !CBB_add_u8(cbb.get(), 0 /* NUL */) || + !CBB_finish(cbb.get(), &out, &out_len)) { + return 0; + } + + ssl->ctx->keylog_callback(ssl, (const char *)out); + OPENSSL_free(out); + return 1; +} + +void ssl_do_info_callback(const SSL *ssl, int type, int value) { + void (*cb)(const SSL *ssl, int type, int value) = NULL; + if (ssl->info_callback != NULL) { + cb = ssl->info_callback; + } else if (ssl->ctx->info_callback != NULL) { + cb = ssl->ctx->info_callback; + } + + if (cb != NULL) { + cb(ssl, type, value); + } +} + +void ssl_do_msg_callback(SSL *ssl, int is_write, int content_type, + Span in) { + if (ssl->msg_callback == NULL) { + return; + } + + // |version| is zero when calling for |SSL3_RT_HEADER| and |SSL2_VERSION| for + // a V2ClientHello. + int version; + switch (content_type) { + case 0: + // V2ClientHello + version = SSL2_VERSION; + break; + case SSL3_RT_HEADER: + version = 0; + break; + default: + version = SSL_version(ssl); + } + + ssl->msg_callback(is_write, version, content_type, in.data(), in.size(), ssl, + ssl->msg_callback_arg); +} + +void ssl_get_current_time(const SSL *ssl, struct OPENSSL_timeval *out_clock) { + // TODO(martinkr): Change callers to |ssl_ctx_get_current_time| and drop the + // |ssl| arg from |current_time_cb| if possible. + ssl_ctx_get_current_time(ssl->ctx, out_clock); +} + +void ssl_ctx_get_current_time(const SSL_CTX *ctx, + struct OPENSSL_timeval *out_clock) { + if (ctx->current_time_cb != NULL) { + // TODO(davidben): Update current_time_cb to use OPENSSL_timeval. See + // https://crbug.com/boringssl/155. + struct timeval clock; + ctx->current_time_cb(nullptr /* ssl */, &clock); + if (clock.tv_sec < 0) { + assert(0); + out_clock->tv_sec = 0; + out_clock->tv_usec = 0; + } else { + out_clock->tv_sec = (uint64_t)clock.tv_sec; + out_clock->tv_usec = (uint32_t)clock.tv_usec; + } + return; + } + +#if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) + out_clock->tv_sec = 1234; + out_clock->tv_usec = 1234; +#elif defined(OPENSSL_WINDOWS) + struct _timeb time; + _ftime(&time); + if (time.time < 0) { + assert(0); + out_clock->tv_sec = 0; + out_clock->tv_usec = 0; + } else { + out_clock->tv_sec = time.time; + out_clock->tv_usec = time.millitm * 1000; + } +#else + struct timeval clock; + gettimeofday(&clock, NULL); + if (clock.tv_sec < 0) { + assert(0); + out_clock->tv_sec = 0; + out_clock->tv_usec = 0; + } else { + out_clock->tv_sec = (uint64_t)clock.tv_sec; + out_clock->tv_usec = (uint32_t)clock.tv_usec; + } +#endif +} + +} // namespace bssl + +using namespace bssl; + int SSL_library_init(void) { CRYPTO_library_init(); return 1; } +int OPENSSL_init_ssl(uint64_t opts, const OPENSSL_INIT_SETTINGS *settings) { + CRYPTO_library_init(); + return 1; +} + static uint32_t ssl_session_hash(const SSL_SESSION *sess) { const uint8_t *session_id = sess->session_id; @@ -210,11 +498,11 @@ static uint32_t ssl_session_hash(const SSL_SESSION *sess) { return hash; } -/* NB: If this function (or indeed the hash function which uses a sort of - * coarser function than this one) is changed, ensure - * SSL_CTX_has_matching_session_id() is checked accordingly. It relies on being - * able to construct an SSL_SESSION that will collide with any existing session - * with a matching session ID. */ +// NB: If this function (or indeed the hash function which uses a sort of +// coarser function than this one) is changed, ensure +// SSL_CTX_has_matching_session_id() is checked accordingly. It relies on being +// able to construct an SSL_SESSION that will collide with any existing session +// with a matching session ID. static int ssl_session_cmp(const SSL_SESSION *a, const SSL_SESSION *b) { if (a->ssl_version != b->ssl_version) { return 1; @@ -235,12 +523,7 @@ SSL_CTX *SSL_CTX_new(const SSL_METHOD *method) { return NULL; } - if (SSL_get_ex_data_X509_STORE_CTX_idx() < 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_X509_VERIFICATION_SETUP_PROBLEMS); - goto err; - } - - ret = OPENSSL_malloc(sizeof(SSL_CTX)); + ret = (SSL_CTX *)OPENSSL_malloc(sizeof(SSL_CTX)); if (ret == NULL) { goto err; } @@ -271,25 +554,16 @@ SSL_CTX *SSL_CTX_new(const SSL_METHOD *method) { if (ret->sessions == NULL) { goto err; } - ret->cert_store = X509_STORE_new(); - if (ret->cert_store == NULL) { + + if (!ret->x509_method->ssl_ctx_new(ret)) { goto err; } - ssl_create_cipher_list(ret->method, &ret->cipher_list, - SSL_DEFAULT_CIPHER_LIST, 1 /* strict */); - if (ret->cipher_list == NULL || - sk_SSL_CIPHER_num(ret->cipher_list->ciphers) <= 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_LIBRARY_HAS_NO_CIPHERS); + if (!SSL_CTX_set_strict_cipher_list(ret, SSL_DEFAULT_CIPHER_LIST)) { goto err2; } - ret->param = X509_VERIFY_PARAM_new(); - if (!ret->param) { - goto err; - } - - ret->client_CA = sk_X509_NAME_new_null(); + ret->client_CA = sk_CRYPTO_BUFFER_new_null(); if (ret->client_CA == NULL) { goto err; } @@ -298,21 +572,17 @@ SSL_CTX *SSL_CTX_new(const SSL_METHOD *method) { ret->max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH; - /* Setup RFC4507 ticket keys */ - if (!RAND_bytes(ret->tlsext_tick_key_name, 16) || - !RAND_bytes(ret->tlsext_tick_hmac_key, 16) || - !RAND_bytes(ret->tlsext_tick_aes_key, 16)) { - ret->options |= SSL_OP_NO_TICKET; - } - - /* Disable the auto-chaining feature by default. Once this has stuck without - * problems, the feature will be removed entirely. */ + // Disable the auto-chaining feature by default. Once this has stuck without + // problems, the feature will be removed entirely. ret->mode = SSL_MODE_NO_AUTO_CHAIN; - /* Lock the SSL_CTX to the specified version, for compatibility with legacy - * uses of SSL_METHOD. */ + // Lock the SSL_CTX to the specified version, for compatibility with legacy + // uses of SSL_METHOD, but we do not set the minimum version for + // |SSLv3_method|. if (!SSL_CTX_set_max_proto_version(ret, method->version) || - !SSL_CTX_set_min_proto_version(ret, method->version)) { + !SSL_CTX_set_min_proto_version(ret, method->version == SSL3_VERSION + ? 0 // default + : method->version)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); goto err2; } @@ -337,33 +607,34 @@ void SSL_CTX_free(SSL_CTX *ctx) { return; } - X509_VERIFY_PARAM_free(ctx->param); - - /* Free internal session cache. However: the remove_cb() may reference the - * ex_data of SSL_CTX, thus the ex_data store can only be removed after the - * sessions were flushed. As the ex_data handling routines might also touch - * the session cache, the most secure solution seems to be: empty (flush) the - * cache, then free ex_data, then finally free the cache. (See ticket - * [openssl.org #212].) */ + // Free internal session cache. However: the remove_cb() may reference the + // ex_data of SSL_CTX, thus the ex_data store can only be removed after the + // sessions were flushed. As the ex_data handling routines might also touch + // the session cache, the most secure solution seems to be: empty (flush) the + // cache, then free ex_data, then finally free the cache. (See ticket + // [openssl.org #212].) SSL_CTX_flush_sessions(ctx, 0); CRYPTO_free_ex_data(&g_ex_data_class_ssl_ctx, ctx, &ctx->ex_data); CRYPTO_MUTEX_cleanup(&ctx->lock); lh_SSL_SESSION_free(ctx->sessions); - X509_STORE_free(ctx->cert_store); ssl_cipher_preference_list_free(ctx->cipher_list); ssl_cert_free(ctx->cert); sk_SSL_CUSTOM_EXTENSION_pop_free(ctx->client_custom_extensions, SSL_CUSTOM_EXTENSION_free); sk_SSL_CUSTOM_EXTENSION_pop_free(ctx->server_custom_extensions, SSL_CUSTOM_EXTENSION_free); - sk_X509_NAME_pop_free(ctx->client_CA, X509_NAME_free); + sk_CRYPTO_BUFFER_pop_free(ctx->client_CA, CRYPTO_BUFFER_free); + ctx->x509_method->ssl_ctx_free(ctx); sk_SRTP_PROTECTION_PROFILE_free(ctx->srtp_profiles); OPENSSL_free(ctx->psk_identity_hint); OPENSSL_free(ctx->supported_group_list); OPENSSL_free(ctx->alpn_client_proto_list); EVP_PKEY_free(ctx->tlsext_channel_id_private); + OPENSSL_free(ctx->verify_sigalgs); + OPENSSL_free(ctx->tlsext_ticket_key_current); + OPENSSL_free(ctx->tlsext_ticket_key_prev); OPENSSL_free(ctx); } @@ -378,17 +649,18 @@ SSL *SSL_new(SSL_CTX *ctx) { return NULL; } - SSL *ssl = OPENSSL_malloc(sizeof(SSL)); + SSL *ssl = (SSL *)OPENSSL_malloc(sizeof(SSL)); if (ssl == NULL) { goto err; } OPENSSL_memset(ssl, 0, sizeof(SSL)); - ssl->min_version = ctx->min_version; - ssl->max_version = ctx->max_version; + ssl->conf_min_version = ctx->conf_min_version; + ssl->conf_max_version = ctx->conf_max_version; + ssl->tls13_variant = ctx->tls13_variant; - /* RFC 6347 states that implementations SHOULD use an initial timer value of - * 1 second. */ + // RFC 6347 states that implementations SHOULD use an initial timer value of + // 1 second. ssl->initial_timeout_duration_ms = 1000; ssl->options = ctx->options; @@ -404,25 +676,25 @@ SSL *SSL_new(SSL_CTX *ctx) { ssl->msg_callback_arg = ctx->msg_callback_arg; ssl->verify_mode = ctx->verify_mode; ssl->verify_callback = ctx->default_verify_callback; + ssl->custom_verify_callback = ctx->custom_verify_callback; ssl->retain_only_sha256_of_client_certs = ctx->retain_only_sha256_of_client_certs; - ssl->param = X509_VERIFY_PARAM_new(); - if (!ssl->param) { - goto err; - } - X509_VERIFY_PARAM_inherit(ssl->param, ctx->param); ssl->quiet_shutdown = ctx->quiet_shutdown; ssl->max_send_fragment = ctx->max_send_fragment; SSL_CTX_up_ref(ctx); ssl->ctx = ctx; SSL_CTX_up_ref(ctx); - ssl->initial_ctx = ctx; + ssl->session_ctx = ctx; + + if (!ssl->ctx->x509_method->ssl_new(ssl)) { + goto err; + } if (ctx->supported_group_list) { - ssl->supported_group_list = BUF_memdup(ctx->supported_group_list, - ctx->supported_group_list_len * 2); + ssl->supported_group_list = (uint16_t *)BUF_memdup( + ctx->supported_group_list, ctx->supported_group_list_len * 2); if (!ssl->supported_group_list) { goto err; } @@ -430,8 +702,8 @@ SSL *SSL_new(SSL_CTX *ctx) { } if (ctx->alpn_client_proto_list) { - ssl->alpn_client_proto_list = BUF_memdup(ctx->alpn_client_proto_list, - ctx->alpn_client_proto_list_len); + ssl->alpn_client_proto_list = (uint8_t *)BUF_memdup( + ctx->alpn_client_proto_list, ctx->alpn_client_proto_list_len); if (ssl->alpn_client_proto_list == NULL) { goto err; } @@ -444,8 +716,6 @@ SSL *SSL_new(SSL_CTX *ctx) { goto err; } - ssl->rwstate = SSL_NOTHING; - CRYPTO_new_ex_data(&ssl->ex_data); ssl->psk_identity_hint = NULL; @@ -481,16 +751,16 @@ void SSL_free(SSL *ssl) { return; } - X509_VERIFY_PARAM_free(ssl->param); + if (ssl->ctx != NULL) { + ssl->ctx->x509_method->ssl_free(ssl); + } CRYPTO_free_ex_data(&g_ex_data_class_ssl, ssl, &ssl->ex_data); BIO_free_all(ssl->rbio); BIO_free_all(ssl->wbio); - BUF_MEM_free(ssl->init_buf); - - /* add extra stuff */ + // add extra stuff ssl_cipher_preference_list_free(ssl->cipher_list); SSL_SESSION_free(ssl->session); @@ -498,12 +768,12 @@ void SSL_free(SSL *ssl) { ssl_cert_free(ssl->cert); OPENSSL_free(ssl->tlsext_hostname); - SSL_CTX_free(ssl->initial_ctx); + SSL_CTX_free(ssl->session_ctx); OPENSSL_free(ssl->supported_group_list); OPENSSL_free(ssl->alpn_client_proto_list); EVP_PKEY_free(ssl->tlsext_channel_id_private); OPENSSL_free(ssl->psk_identity_hint); - sk_X509_NAME_pop_free(ssl->client_CA, X509_NAME_free); + sk_CRYPTO_BUFFER_pop_free(ssl->client_CA, CRYPTO_BUFFER_free); sk_SRTP_PROTECTION_PROFILE_free(ssl->srtp_profiles); if (ssl->method != NULL) { @@ -515,13 +785,13 @@ void SSL_free(SSL *ssl) { } void SSL_set_connect_state(SSL *ssl) { - ssl->server = 0; - ssl->handshake_func = ssl3_connect; + ssl->server = false; + ssl->do_handshake = ssl_client_handshake; } void SSL_set_accept_state(SSL *ssl) { - ssl->server = 1; - ssl->handshake_func = ssl3_accept; + ssl->server = true; + ssl->do_handshake = ssl_server_handshake; } void SSL_set0_rbio(SSL *ssl, BIO *rbio) { @@ -535,35 +805,35 @@ void SSL_set0_wbio(SSL *ssl, BIO *wbio) { } void SSL_set_bio(SSL *ssl, BIO *rbio, BIO *wbio) { - /* For historical reasons, this function has many different cases in ownership - * handling. */ + // For historical reasons, this function has many different cases in ownership + // handling. - /* If nothing has changed, do nothing */ + // If nothing has changed, do nothing if (rbio == SSL_get_rbio(ssl) && wbio == SSL_get_wbio(ssl)) { return; } - /* If the two arguments are equal, one fewer reference is granted than - * taken. */ + // If the two arguments are equal, one fewer reference is granted than + // taken. if (rbio != NULL && rbio == wbio) { BIO_up_ref(rbio); } - /* If only the wbio is changed, adopt only one reference. */ + // If only the wbio is changed, adopt only one reference. if (rbio == SSL_get_rbio(ssl)) { SSL_set0_wbio(ssl, wbio); return; } - /* There is an asymmetry here for historical reasons. If only the rbio is - * changed AND the rbio and wbio were originally different, then we only adopt - * one reference. */ + // There is an asymmetry here for historical reasons. If only the rbio is + // changed AND the rbio and wbio were originally different, then we only adopt + // one reference. if (wbio == SSL_get_wbio(ssl) && SSL_get_rbio(ssl) != SSL_get_wbio(ssl)) { SSL_set0_rbio(ssl, rbio); return; } - /* Otherwise, adopt both references. */ + // Otherwise, adopt both references. SSL_set0_rbio(ssl, rbio); SSL_set0_wbio(ssl, wbio); } @@ -572,18 +842,10 @@ BIO *SSL_get_rbio(const SSL *ssl) { return ssl->rbio; } BIO *SSL_get_wbio(const SSL *ssl) { return ssl->wbio; } -void ssl_reset_error_state(SSL *ssl) { - /* Functions which use |SSL_get_error| must reset I/O and error state on - * entry. */ - ssl->rwstate = SSL_NOTHING; - ERR_clear_error(); - ERR_clear_system_error(); -} - int SSL_do_handshake(SSL *ssl) { ssl_reset_error_state(ssl); - if (ssl->handshake_func == NULL) { + if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_TYPE_NOT_SET); return -1; } @@ -592,30 +854,28 @@ int SSL_do_handshake(SSL *ssl) { return 1; } - if (ssl->s3->hs == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return -1; - } + // Run the handshake. + SSL_HANDSHAKE *hs = ssl->s3->hs.get(); - /* Run the handshake. */ - assert(ssl->s3->hs != NULL); - int ret = ssl->handshake_func(ssl->s3->hs); + bool early_return = false; + int ret = ssl_run_handshake(hs, &early_return); + ssl_do_info_callback( + ssl, ssl->server ? SSL_CB_ACCEPT_EXIT : SSL_CB_CONNECT_EXIT, ret); if (ret <= 0) { return ret; } - /* Destroy the handshake object if the handshake has completely finished. */ - if (!SSL_in_init(ssl)) { - ssl_handshake_free(ssl->s3->hs); - ssl->s3->hs = NULL; + // Destroy the handshake object if the handshake has completely finished. + if (!early_return) { + ssl->s3->hs.reset(); } return 1; } int SSL_connect(SSL *ssl) { - if (ssl->handshake_func == NULL) { - /* Not properly initialized yet */ + if (ssl->do_handshake == NULL) { + // Not properly initialized yet SSL_set_connect_state(ssl); } @@ -623,32 +883,35 @@ int SSL_connect(SSL *ssl) { } int SSL_accept(SSL *ssl) { - if (ssl->handshake_func == NULL) { - /* Not properly initialized yet */ + if (ssl->do_handshake == NULL) { + // Not properly initialized yet SSL_set_accept_state(ssl); } return SSL_do_handshake(ssl); } -static int ssl_do_renegotiate(SSL *ssl) { - /* We do not accept renegotiations as a server or SSL 3.0. SSL 3.0 will be - * removed entirely in the future and requires retaining more data for - * renegotiation_info. */ +static int ssl_do_post_handshake(SSL *ssl, const SSLMessage &msg) { + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return tls13_post_handshake(ssl, msg); + } + + // We do not accept renegotiations as a server or SSL 3.0. SSL 3.0 will be + // removed entirely in the future and requires retaining more data for + // renegotiation_info. if (ssl->server || ssl->version == SSL3_VERSION) { goto no_renegotiation; } - if (ssl->s3->tmp.message_type != SSL3_MT_HELLO_REQUEST || - ssl->init_num != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + if (msg.type != SSL3_MT_HELLO_REQUEST || CBS_len(&msg.body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_HELLO_REQUEST); return 0; } switch (ssl->renegotiate_mode) { case ssl_renegotiate_ignore: - /* Ignore the HelloRequest. */ + // Ignore the HelloRequest. return 1; case ssl_renegotiate_once: @@ -664,21 +927,22 @@ static int ssl_do_renegotiate(SSL *ssl) { break; } - /* Renegotiation is only supported at quiescent points in the application - * protocol, namely in HTTPS, just before reading the HTTP response. Require - * the record-layer be idle and avoid complexities of sending a handshake - * record while an application_data record is being written. */ - if (ssl_write_buffer_is_pending(ssl)) { + // Renegotiation is only supported at quiescent points in the application + // protocol, namely in HTTPS, just before reading the HTTP response. Require + // the record-layer be idle and avoid complexities of sending a handshake + // record while an application_data record is being written. + if (!ssl->s3->write_buffer.empty() || + ssl->s3->write_shutdown != ssl_shutdown_none) { goto no_renegotiation; } - /* Begin a new handshake. */ - if (ssl->s3->hs != NULL) { + // Begin a new handshake. + if (ssl->s3->hs != nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); return 0; } ssl->s3->hs = ssl_handshake_new(ssl); - if (ssl->s3->hs == NULL) { + if (ssl->s3->hs == nullptr) { return 0; } @@ -686,32 +950,29 @@ static int ssl_do_renegotiate(SSL *ssl) { return 1; no_renegotiation: - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_NO_RENEGOTIATION); OPENSSL_PUT_ERROR(SSL, SSL_R_NO_RENEGOTIATION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_NO_RENEGOTIATION); return 0; } -static int ssl_do_post_handshake(SSL *ssl) { - if (ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return ssl_do_renegotiate(ssl); - } - - return tls13_post_handshake(ssl); -} - -static int ssl_read_impl(SSL *ssl, void *buf, int num, int peek) { +static int ssl_read_impl(SSL *ssl) { ssl_reset_error_state(ssl); - if (ssl->handshake_func == NULL) { + if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } - for (;;) { - /* Complete the current handshake, if any. False Start will cause - * |SSL_do_handshake| to return mid-handshake, so this may require multiple - * iterations. */ - while (SSL_in_init(ssl)) { + // Replay post-handshake message errors. + if (!check_read_error(ssl)) { + return -1; + } + + while (ssl->s3->pending_app_data.empty()) { + // Complete the current handshake, if any. False Start will cause + // |SSL_do_handshake| to return mid-handshake, so this may require multiple + // iterations. + while (!ssl_can_read(ssl)) { int ret = SSL_do_handshake(ssl); if (ret < 0) { return ret; @@ -722,121 +983,228 @@ static int ssl_read_impl(SSL *ssl, void *buf, int num, int peek) { } } - int got_handshake; - int ret = ssl->method->read_app_data(ssl, &got_handshake, buf, num, peek); - if (ret > 0 || !got_handshake) { - ssl->s3->key_update_count = 0; - return ret; + // Process any buffered post-handshake messages. + SSLMessage msg; + if (ssl->method->get_message(ssl, &msg)) { + // If we received an interrupt in early read (EndOfEarlyData), loop again + // for the handshake to process it. + if (SSL_in_init(ssl)) { + ssl->s3->hs->can_early_read = false; + continue; + } + + // Handle the post-handshake message and try again. + if (!ssl_do_post_handshake(ssl, msg)) { + ssl_set_read_error(ssl); + return -1; + } + ssl->method->next_message(ssl); + continue; // Loop again. We may have begun a new handshake. } - /* Handle the post-handshake message and try again. */ - if (!ssl_do_post_handshake(ssl)) { - return -1; + uint8_t alert = SSL_AD_DECODE_ERROR; + size_t consumed = 0; + auto ret = ssl_open_app_data(ssl, &ssl->s3->pending_app_data, &consumed, + &alert, ssl->s3->read_buffer.span()); + bool retry; + int bio_ret = ssl_handle_open_record(ssl, &retry, ret, consumed, alert); + if (bio_ret <= 0) { + return bio_ret; + } + if (!retry) { + assert(!ssl->s3->pending_app_data.empty()); + ssl->s3->key_update_count = 0; } - ssl->method->release_current_message(ssl, 1 /* free buffer */); } + + return 1; } int SSL_read(SSL *ssl, void *buf, int num) { - return ssl_read_impl(ssl, buf, num, 0 /* consume bytes */); + int ret = SSL_peek(ssl, buf, num); + if (ret <= 0) { + return ret; + } + // TODO(davidben): In DTLS, should the rest of the record be discarded? DTLS + // is not a stream. See https://crbug.com/boringssl/65. + ssl->s3->pending_app_data = + ssl->s3->pending_app_data.subspan(static_cast(ret)); + if (ssl->s3->pending_app_data.empty()) { + ssl->s3->read_buffer.DiscardConsumed(); + } + return ret; } int SSL_peek(SSL *ssl, void *buf, int num) { - return ssl_read_impl(ssl, buf, num, 1 /* peek */); + int ret = ssl_read_impl(ssl); + if (ret <= 0) { + return ret; + } + if (num <= 0) { + return num; + } + size_t todo = + std::min(ssl->s3->pending_app_data.size(), static_cast(num)); + OPENSSL_memcpy(buf, ssl->s3->pending_app_data.data(), todo); + return static_cast(todo); } int SSL_write(SSL *ssl, const void *buf, int num) { ssl_reset_error_state(ssl); - if (ssl->handshake_func == NULL) { + if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } - if (ssl->s3->send_shutdown != ssl_shutdown_none) { + if (ssl->s3->write_shutdown != ssl_shutdown_none) { OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } - /* If necessary, complete the handshake implicitly. */ - if (SSL_in_init(ssl) && !SSL_in_false_start(ssl)) { - int ret = SSL_do_handshake(ssl); - if (ret < 0) { - return ret; - } - if (ret == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_HANDSHAKE_FAILURE); - return -1; + int ret = 0; + bool needs_handshake = false; + do { + // If necessary, complete the handshake implicitly. + if (!ssl_can_write(ssl)) { + ret = SSL_do_handshake(ssl); + if (ret < 0) { + return ret; + } + if (ret == 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_HANDSHAKE_FAILURE); + return -1; + } } - } - return ssl->method->write_app_data(ssl, buf, num); + ret = ssl->method->write_app_data(ssl, &needs_handshake, + (const uint8_t *)buf, num); + } while (needs_handshake); + return ret; } int SSL_shutdown(SSL *ssl) { ssl_reset_error_state(ssl); - if (ssl->handshake_func == NULL) { + if (ssl->do_handshake == NULL) { OPENSSL_PUT_ERROR(SSL, SSL_R_UNINITIALIZED); return -1; } - /* If we are in the middle of a handshake, silently succeed. Consumers often - * call this function before |SSL_free|, whether the handshake succeeded or - * not. We assume the caller has already handled failed handshakes. */ + // If we are in the middle of a handshake, silently succeed. Consumers often + // call this function before |SSL_free|, whether the handshake succeeded or + // not. We assume the caller has already handled failed handshakes. if (SSL_in_init(ssl)) { return 1; } if (ssl->quiet_shutdown) { - /* Do nothing if configured not to send a close_notify. */ - ssl->s3->send_shutdown = ssl_shutdown_close_notify; - ssl->s3->recv_shutdown = ssl_shutdown_close_notify; + // Do nothing if configured not to send a close_notify. + ssl->s3->write_shutdown = ssl_shutdown_close_notify; + ssl->s3->read_shutdown = ssl_shutdown_close_notify; return 1; } - /* This function completes in two stages. It sends a close_notify and then it - * waits for a close_notify to come in. Perform exactly one action and return - * whether or not it succeeds. */ + // This function completes in two stages. It sends a close_notify and then it + // waits for a close_notify to come in. Perform exactly one action and return + // whether or not it succeeds. - if (ssl->s3->send_shutdown != ssl_shutdown_close_notify) { - /* Send a close_notify. */ - if (ssl3_send_alert(ssl, SSL3_AL_WARNING, SSL_AD_CLOSE_NOTIFY) <= 0) { + if (ssl->s3->write_shutdown != ssl_shutdown_close_notify) { + // Send a close_notify. + if (ssl_send_alert(ssl, SSL3_AL_WARNING, SSL_AD_CLOSE_NOTIFY) <= 0) { return -1; } } else if (ssl->s3->alert_dispatch) { - /* Finish sending the close_notify. */ + // Finish sending the close_notify. if (ssl->method->dispatch_alert(ssl) <= 0) { return -1; } - } else if (ssl->s3->recv_shutdown != ssl_shutdown_close_notify) { - /* Wait for the peer's close_notify. */ - ssl->method->read_close_notify(ssl); - if (ssl->s3->recv_shutdown != ssl_shutdown_close_notify) { - return -1; + } else if (ssl->s3->read_shutdown != ssl_shutdown_close_notify) { + if (SSL_is_dtls(ssl)) { + // Bidirectional shutdown doesn't make sense for an unordered + // transport. DTLS alerts also aren't delivered reliably, so we may even + // time out because the peer never received our close_notify. Report to + // the caller that the channel has fully shut down. + if (ssl->s3->read_shutdown == ssl_shutdown_error) { + ERR_restore_state(ssl->s3->read_error.get()); + return -1; + } + ssl->s3->read_shutdown = ssl_shutdown_close_notify; + } else { + // Keep discarding data until we see a close_notify. + for (;;) { + ssl->s3->pending_app_data = Span(); + int ret = ssl_read_impl(ssl); + if (ret <= 0) { + break; + } + } + if (ssl->s3->read_shutdown != ssl_shutdown_close_notify) { + return -1; + } } } - /* Return 0 for unidirectional shutdown and 1 for bidirectional shutdown. */ - return ssl->s3->recv_shutdown == ssl_shutdown_close_notify; + // Return 0 for unidirectional shutdown and 1 for bidirectional shutdown. + return ssl->s3->read_shutdown == ssl_shutdown_close_notify; } int SSL_send_fatal_alert(SSL *ssl, uint8_t alert) { if (ssl->s3->alert_dispatch) { if (ssl->s3->send_alert[0] != SSL3_AL_FATAL || ssl->s3->send_alert[1] != alert) { - /* We are already attempting to write a different alert. */ + // We are already attempting to write a different alert. OPENSSL_PUT_ERROR(SSL, SSL_R_PROTOCOL_IS_SHUTDOWN); return -1; } return ssl->method->dispatch_alert(ssl); } - return ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_send_alert(ssl, SSL3_AL_FATAL, alert); } void SSL_CTX_set_early_data_enabled(SSL_CTX *ctx, int enabled) { - ctx->enable_early_data = !!enabled; + ctx->cert->enable_early_data = !!enabled; +} + +void SSL_CTX_set_tls13_variant(SSL_CTX *ctx, enum tls13_variant_t variant) { + ctx->tls13_variant = variant; +} + +void SSL_set_tls13_variant(SSL *ssl, enum tls13_variant_t variant) { + ssl->tls13_variant = variant; +} + +void SSL_set_early_data_enabled(SSL *ssl, int enabled) { + ssl->cert->enable_early_data = !!enabled; +} + +int SSL_in_early_data(const SSL *ssl) { + if (ssl->s3->hs == NULL) { + return 0; + } + return ssl->s3->hs->in_early_data; +} + +int SSL_early_data_accepted(const SSL *ssl) { + return ssl->early_data_accepted; +} + +void SSL_reset_early_data_reject(SSL *ssl) { + SSL_HANDSHAKE *hs = ssl->s3->hs.get(); + if (hs == NULL || + hs->wait != ssl_hs_early_data_rejected) { + abort(); + } + + hs->wait = ssl_hs_ok; + hs->in_early_data = false; + hs->early_session.reset(); + + // Discard any unfinished writes from the perspective of |SSL_write|'s + // retry. The handshake will transparently flush out the pending record + // (discarded by the server) to keep the framing correct. + ssl->s3->wpend_pending = false; } static int bio_retry_reason_to_error(int reason) { @@ -855,8 +1223,8 @@ int SSL_get_error(const SSL *ssl, int ret_code) { return SSL_ERROR_NONE; } - /* Make things return SSL_ERROR_SYSCALL when doing SSL_do_handshake etc, - * where we do encode the error */ + // Make things return SSL_ERROR_SYSCALL when doing SSL_do_handshake etc, + // where we do encode the error uint32_t err = ERR_peek_error(); if (err != 0) { if (ERR_GET_LIB(err) == ERR_LIB_SYS) { @@ -866,16 +1234,16 @@ int SSL_get_error(const SSL *ssl, int ret_code) { } if (ret_code == 0) { - if (ssl->s3->recv_shutdown == ssl_shutdown_close_notify) { + if (ssl->s3->read_shutdown == ssl_shutdown_close_notify) { return SSL_ERROR_ZERO_RETURN; } - /* An EOF was observed which violates the protocol, and the underlying - * transport does not participate in the error queue. Bubble up to the - * caller. */ + // An EOF was observed which violates the protocol, and the underlying + // transport does not participate in the error queue. Bubble up to the + // caller. return SSL_ERROR_SYSCALL; } - switch (ssl->rwstate) { + switch (ssl->s3->rwstate) { case SSL_PENDING_SESSION: return SSL_ERROR_PENDING_SESSION; @@ -889,8 +1257,8 @@ int SSL_get_error(const SSL *ssl, int ret_code) { } if (BIO_should_write(bio)) { - /* TODO(davidben): OpenSSL historically checked for writes on the read - * BIO. Can this be removed? */ + // TODO(davidben): OpenSSL historically checked for writes on the read + // BIO. Can this be removed? return SSL_ERROR_WANT_WRITE; } @@ -908,8 +1276,8 @@ int SSL_get_error(const SSL *ssl, int ret_code) { } if (BIO_should_read(bio)) { - /* TODO(davidben): OpenSSL historically checked for reads on the write - * BIO. Can this be removed? */ + // TODO(davidben): OpenSSL historically checked for reads on the write + // BIO. Can this be removed? return SSL_ERROR_WANT_READ; } @@ -928,57 +1296,18 @@ int SSL_get_error(const SSL *ssl, int ret_code) { case SSL_PRIVATE_KEY_OPERATION: return SSL_ERROR_WANT_PRIVATE_KEY_OPERATION; - } - - return SSL_ERROR_SYSCALL; -} -static int set_min_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, - uint16_t version) { - if (version == 0) { - *out = method->min_version; - return 1; - } - - if (version == TLS1_3_VERSION) { - version = TLS1_3_DRAFT_VERSION; - } - - return method->version_from_wire(out, version); -} + case SSL_PENDING_TICKET: + return SSL_ERROR_PENDING_TICKET; -static int set_max_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, - uint16_t version) { - if (version == 0) { - *out = method->max_version; - /* TODO(svaldez): Enable TLS 1.3 by default once fully implemented. */ - if (*out > TLS1_2_VERSION) { - *out = TLS1_2_VERSION; - } - return 1; - } + case SSL_EARLY_DATA_REJECTED: + return SSL_ERROR_EARLY_DATA_REJECTED; - if (version == TLS1_3_VERSION) { - version = TLS1_3_DRAFT_VERSION; + case SSL_CERTIFICATE_VERIFY: + return SSL_ERROR_WANT_CERTIFICATE_VERIFY; } - return method->version_from_wire(out, version); -} - -int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version) { - return set_min_version(ctx->method, &ctx->min_version, version); -} - -int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version) { - return set_max_version(ctx->method, &ctx->max_version, version); -} - -int SSL_set_min_proto_version(SSL *ssl, uint16_t version) { - return set_min_version(ssl->method, &ssl->min_version, version); -} - -int SSL_set_max_proto_version(SSL *ssl, uint16_t version) { - return set_max_version(ssl->method, &ssl->max_version, version); + return SSL_ERROR_SYSCALL; } uint32_t SSL_CTX_set_options(SSL_CTX *ctx, uint32_t options) { @@ -1035,22 +1364,25 @@ void SSL_CTX_set0_buffer_pool(SSL_CTX *ctx, CRYPTO_BUFFER_POOL *pool) { int SSL_get_tls_unique(const SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out) { - /* tls-unique is not defined for SSL 3.0 or TLS 1.3. */ + *out_len = 0; + OPENSSL_memset(out, 0, max_out); + + // tls-unique is not defined for SSL 3.0 or TLS 1.3. if (!ssl->s3->initial_handshake_complete || - ssl3_protocol_version(ssl) < TLS1_VERSION || - ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - goto err; + ssl_protocol_version(ssl) < TLS1_VERSION || + ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return 0; } - /* The tls-unique value is the first Finished message in the handshake, which - * is the client's in a full handshake and the server's for a resumption. See - * https://tools.ietf.org/html/rfc5929#section-3.1. */ + // The tls-unique value is the first Finished message in the handshake, which + // is the client's in a full handshake and the server's for a resumption. See + // https://tools.ietf.org/html/rfc5929#section-3.1. const uint8_t *finished = ssl->s3->previous_client_finished; size_t finished_len = ssl->s3->previous_client_finished_len; if (ssl->session != NULL) { - /* tls-unique is broken for resumed sessions unless EMS is used. */ + // tls-unique is broken for resumed sessions unless EMS is used. if (!ssl->session->extended_master_secret) { - goto err; + return 0; } finished = ssl->s3->previous_server_finished; finished_len = ssl->s3->previous_server_finished_len; @@ -1063,11 +1395,6 @@ int SSL_get_tls_unique(const SSL *ssl, uint8_t *out, size_t *out_len, OPENSSL_memcpy(out, finished, *out_len); return 1; - -err: - *out_len = 0; - OPENSSL_memset(out, 0, max_out); - return 0; } static int set_session_id_context(CERT *cert, const uint8_t *sid_ctx, @@ -1077,7 +1404,7 @@ static int set_session_id_context(CERT *cert, const uint8_t *sid_ctx, return 0; } - OPENSSL_COMPILE_ASSERT(sizeof(cert->sid_ctx) < 256, sid_ctx_too_large); + static_assert(sizeof(cert->sid_ctx) < 256, "sid_ctx too large"); cert->sid_ctx_length = (uint8_t)sid_ctx_len; OPENSSL_memcpy(cert->sid_ctx, sid_ctx, sid_ctx_len); return 1; @@ -1098,16 +1425,6 @@ const uint8_t *SSL_get0_session_id_context(const SSL *ssl, size_t *out_len) { return ssl->cert->sid_ctx; } -void ssl_cipher_preference_list_free( - struct ssl_cipher_preference_list_st *cipher_list) { - if (cipher_list == NULL) { - return; - } - sk_SSL_CIPHER_free(cipher_list->ciphers); - OPENSSL_free(cipher_list->in_group_flags); - OPENSSL_free(cipher_list); -} - void SSL_certs_clear(SSL *ssl) { ssl_cert_clear_certs(ssl->cert); } int SSL_get_fd(const SSL *ssl) { return SSL_get_rfd(ssl); } @@ -1153,7 +1470,7 @@ int SSL_set_wfd(SSL *ssl, int fd) { BIO_set_fd(bio, fd, BIO_NOCLOSE); SSL_set0_wbio(ssl, bio); } else { - /* Copy the rbio over to the wbio. */ + // Copy the rbio over to the wbio. BIO_up_ref(rbio); SSL_set0_wbio(ssl, rbio); } @@ -1173,7 +1490,7 @@ int SSL_set_rfd(SSL *ssl, int fd) { BIO_set_fd(bio, fd, BIO_NOCLOSE); SSL_set0_rbio(ssl, bio); } else { - /* Copy the wbio over to the rbio. */ + // Copy the wbio over to the rbio. BIO_up_ref(wbio); SSL_set0_rbio(ssl, wbio); } @@ -1191,8 +1508,8 @@ static size_t copy_finished(void *out, size_t out_len, const uint8_t *in, size_t SSL_get_finished(const SSL *ssl, void *buf, size_t count) { if (!ssl->s3->initial_handshake_complete || - ssl3_protocol_version(ssl) < TLS1_VERSION || - ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { + ssl_protocol_version(ssl) < TLS1_VERSION || + ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 0; } @@ -1207,8 +1524,8 @@ size_t SSL_get_finished(const SSL *ssl, void *buf, size_t count) { size_t SSL_get_peer_finished(const SSL *ssl, void *buf, size_t count) { if (!ssl->s3->initial_handshake_complete || - ssl3_protocol_version(ssl) < TLS1_VERSION || - ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { + ssl_protocol_version(ssl) < TLS1_VERSION || + ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 0; } @@ -1224,21 +1541,21 @@ size_t SSL_get_peer_finished(const SSL *ssl, void *buf, size_t count) { int SSL_get_verify_mode(const SSL *ssl) { return ssl->verify_mode; } int SSL_get_extms_support(const SSL *ssl) { - /* TLS 1.3 does not require extended master secret and always reports as - * supporting it. */ + // TLS 1.3 does not require extended master secret and always reports as + // supporting it. if (!ssl->s3->have_version) { return 0; } - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { return 1; } - /* If the initial handshake completed, query the established session. */ + // If the initial handshake completed, query the established session. if (ssl->s3->established_session != NULL) { return ssl->s3->established_session->extended_master_secret; } - /* Otherwise, query the in-progress handshake. */ + // Otherwise, query the in-progress handshake. if (ssl->s3->hs != NULL) { return ssl->s3->hs->extended_master_secret; } @@ -1255,18 +1572,15 @@ void SSL_CTX_set_read_ahead(SSL_CTX *ctx, int yes) { } void SSL_set_read_ahead(SSL *ssl, int yes) { } int SSL_pending(const SSL *ssl) { - if (ssl->s3->rrec.type != SSL3_RT_APPLICATION_DATA) { - return 0; - } - return ssl->s3->rrec.length; + return static_cast(ssl->s3->pending_app_data.size()); } -/* Fix this so it checks all the valid key/cert options */ +// Fix this so it checks all the valid key/cert options int SSL_CTX_check_private_key(const SSL_CTX *ctx) { return ssl_cert_check_private_key(ctx->cert, ctx->cert->privatekey); } -/* Fix this function so that it takes an optional type parameter */ +// Fix this function so that it takes an optional type parameter int SSL_check_private_key(const SSL *ssl) { return ssl_cert_check_private_key(ssl->cert, ssl->cert->privatekey); } @@ -1276,7 +1590,7 @@ long SSL_get_default_timeout(const SSL *ssl) { } int SSL_renegotiate(SSL *ssl) { - /* Caller-initiated renegotiation is not supported. */ + // Caller-initiated renegotiation is not supported. OPENSSL_PUT_ERROR(SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return 0; } @@ -1347,13 +1661,12 @@ int SSL_get_secure_renegotiation_support(const SSL *ssl) { if (!ssl->s3->have_version) { return 0; } - return ssl3_protocol_version(ssl) >= TLS1_3_VERSION || + return ssl_protocol_version(ssl) >= TLS1_3_VERSION || ssl->s3->send_connection_binding; } -LHASH_OF(SSL_SESSION) *SSL_CTX_sessions(SSL_CTX *ctx) { return ctx->sessions; } - size_t SSL_CTX_sess_number(const SSL_CTX *ctx) { + MutexReadLock lock(const_cast(&ctx->lock)); return lh_SSL_SESSION_num_items(ctx->sessions); } @@ -1386,10 +1699,18 @@ int SSL_CTX_get_tlsext_ticket_keys(SSL_CTX *ctx, void *out, size_t len) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH); return 0; } - uint8_t *out_bytes = out; - OPENSSL_memcpy(out_bytes, ctx->tlsext_tick_key_name, 16); - OPENSSL_memcpy(out_bytes + 16, ctx->tlsext_tick_hmac_key, 16); - OPENSSL_memcpy(out_bytes + 32, ctx->tlsext_tick_aes_key, 16); + + // The default ticket keys are initialized lazily. Trigger a key + // rotation to initialize them. + if (!ssl_ctx_rotate_ticket_encryption_key(ctx)) { + return 0; + } + + uint8_t *out_bytes = reinterpret_cast(out); + MutexReadLock lock(&ctx->lock); + OPENSSL_memcpy(out_bytes, ctx->tlsext_ticket_key_current->name, 16); + OPENSSL_memcpy(out_bytes + 16, ctx->tlsext_ticket_key_current->hmac_key, 16); + OPENSSL_memcpy(out_bytes + 32, ctx->tlsext_ticket_key_current->aes_key, 16); return 1; } @@ -1401,10 +1722,22 @@ int SSL_CTX_set_tlsext_ticket_keys(SSL_CTX *ctx, const void *in, size_t len) { OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_TICKET_KEYS_LENGTH); return 0; } - const uint8_t *in_bytes = in; - OPENSSL_memcpy(ctx->tlsext_tick_key_name, in_bytes, 16); - OPENSSL_memcpy(ctx->tlsext_tick_hmac_key, in_bytes + 16, 16); - OPENSSL_memcpy(ctx->tlsext_tick_aes_key, in_bytes + 32, 16); + if (!ctx->tlsext_ticket_key_current) { + ctx->tlsext_ticket_key_current = + (tlsext_ticket_key *)OPENSSL_malloc(sizeof(tlsext_ticket_key)); + if (!ctx->tlsext_ticket_key_current) { + return 0; + } + } + OPENSSL_memset(ctx->tlsext_ticket_key_current, 0, sizeof(tlsext_ticket_key)); + const uint8_t *in_bytes = reinterpret_cast(in); + OPENSSL_memcpy(ctx->tlsext_ticket_key_current->name, in_bytes, 16); + OPENSSL_memcpy(ctx->tlsext_ticket_key_current->hmac_key, in_bytes + 16, 16); + OPENSSL_memcpy(ctx->tlsext_ticket_key_current->aes_key, in_bytes + 32, 16); + OPENSSL_free(ctx->tlsext_ticket_key_prev); + ctx->tlsext_ticket_key_prev = nullptr; + // Disable automatic key rotation. + ctx->tlsext_ticket_key_current->next_rotation_tv_sec = 0; return 1; } @@ -1439,8 +1772,8 @@ int SSL_set1_curves_list(SSL *ssl, const char *curves) { } uint16_t SSL_get_curve_id(const SSL *ssl) { - /* TODO(davidben): This checks the wrong session if there is a renegotiation in - * progress. */ + // TODO(davidben): This checks the wrong session if there is a renegotiation + // in progress. SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return 0; @@ -1450,23 +1783,22 @@ uint16_t SSL_get_curve_id(const SSL *ssl) { } int SSL_CTX_set_tmp_dh(SSL_CTX *ctx, const DH *dh) { - DH_free(ctx->cert->dh_tmp); - ctx->cert->dh_tmp = DHparams_dup(dh); - if (ctx->cert->dh_tmp == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_DH_LIB); - return 0; - } return 1; } int SSL_set_tmp_dh(SSL *ssl, const DH *dh) { - DH_free(ssl->cert->dh_tmp); - ssl->cert->dh_tmp = DHparams_dup(dh); - if (ssl->cert->dh_tmp == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_DH_LIB); + return 1; +} + +STACK_OF(SSL_CIPHER) *SSL_CTX_get_ciphers(const SSL_CTX *ctx) { + return ctx->cipher_list->ciphers; +} + +int SSL_CTX_cipher_in_group(const SSL_CTX *ctx, size_t i) { + if (i >= sk_SSL_CIPHER_num(ctx->cipher_list->ciphers)) { return 0; } - return 1; + return ctx->cipher_list->in_group_flags[i]; } STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl) { @@ -1484,92 +1816,37 @@ STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl) { } const char *SSL_get_cipher_list(const SSL *ssl, int n) { - const SSL_CIPHER *c; - STACK_OF(SSL_CIPHER) *sk; - if (ssl == NULL) { return NULL; } - sk = SSL_get_ciphers(ssl); + STACK_OF(SSL_CIPHER) *sk = SSL_get_ciphers(ssl); if (sk == NULL || n < 0 || (size_t)n >= sk_SSL_CIPHER_num(sk)) { return NULL; } - c = sk_SSL_CIPHER_value(sk, n); + const SSL_CIPHER *c = sk_SSL_CIPHER_value(sk, n); if (c == NULL) { - return NULL; - } - - return c->name; -} - -int SSL_CTX_set_cipher_list(SSL_CTX *ctx, const char *str) { - STACK_OF(SSL_CIPHER) *cipher_list = - ssl_create_cipher_list(ctx->method, &ctx->cipher_list, str, - 0 /* not strict */); - if (cipher_list == NULL) { - return 0; - } - - /* |ssl_create_cipher_list| may succeed but return an empty cipher list. */ - if (sk_SSL_CIPHER_num(cipher_list) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); - return 0; - } - - return 1; -} - -int SSL_CTX_set_strict_cipher_list(SSL_CTX *ctx, const char *str) { - STACK_OF(SSL_CIPHER) *cipher_list = - ssl_create_cipher_list(ctx->method, &ctx->cipher_list, str, - 1 /* strict */); - if (cipher_list == NULL) { - return 0; - } - - /* |ssl_create_cipher_list| may succeed but return an empty cipher list. */ - if (sk_SSL_CIPHER_num(cipher_list) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); - return 0; + return NULL; } - return 1; + return c->name; } -int SSL_set_cipher_list(SSL *ssl, const char *str) { - STACK_OF(SSL_CIPHER) *cipher_list = - ssl_create_cipher_list(ssl->ctx->method, &ssl->cipher_list, str, - 0 /* not strict */); - if (cipher_list == NULL) { - return 0; - } +int SSL_CTX_set_cipher_list(SSL_CTX *ctx, const char *str) { + return ssl_create_cipher_list(&ctx->cipher_list, str, false /* not strict */); +} - /* |ssl_create_cipher_list| may succeed but return an empty cipher list. */ - if (sk_SSL_CIPHER_num(cipher_list) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); - return 0; - } +int SSL_CTX_set_strict_cipher_list(SSL_CTX *ctx, const char *str) { + return ssl_create_cipher_list(&ctx->cipher_list, str, true /* strict */); +} - return 1; +int SSL_set_cipher_list(SSL *ssl, const char *str) { + return ssl_create_cipher_list(&ssl->cipher_list, str, false /* not strict */); } int SSL_set_strict_cipher_list(SSL *ssl, const char *str) { - STACK_OF(SSL_CIPHER) *cipher_list = - ssl_create_cipher_list(ssl->ctx->method, &ssl->cipher_list, str, - 1 /* strict */); - if (cipher_list == NULL) { - return 0; - } - - /* |ssl_create_cipher_list| may succeed but return an empty cipher list. */ - if (sk_SSL_CIPHER_num(cipher_list) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CIPHER_MATCH); - return 0; - } - - return 1; + return ssl_create_cipher_list(&ssl->cipher_list, str, true /* strict */); } const char *SSL_get_servername(const SSL *ssl, const int type) { @@ -1577,77 +1854,76 @@ const char *SSL_get_servername(const SSL *ssl, const int type) { return NULL; } - /* Historically, |SSL_get_servername| was also the configuration getter - * corresponding to |SSL_set_tlsext_host_name|. */ + // Historically, |SSL_get_servername| was also the configuration getter + // corresponding to |SSL_set_tlsext_host_name|. if (ssl->tlsext_hostname != NULL) { return ssl->tlsext_hostname; } - /* During the handshake, report the handshake value. */ - if (ssl->s3->hs != NULL) { - return ssl->s3->hs->hostname; - } - - /* SSL_get_servername may also be called after the handshake to look up the - * SNI value. - * - * TODO(davidben): This is almost unused. Can we remove it? */ - SSL_SESSION *session = SSL_get_session(ssl); - if (session == NULL) { - return NULL; - } - return session->tlsext_hostname; + return ssl->s3->hostname.get(); } int SSL_get_servername_type(const SSL *ssl) { - SSL_SESSION *session = SSL_get_session(ssl); - if (session == NULL || session->tlsext_hostname == NULL) { + if (SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name) == NULL) { return -1; } return TLSEXT_NAMETYPE_host_name; } +void SSL_CTX_set_custom_verify( + SSL_CTX *ctx, int mode, + enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)) { + ctx->verify_mode = mode; + ctx->custom_verify_callback = callback; +} + +void SSL_set_custom_verify( + SSL *ssl, int mode, + enum ssl_verify_result_t (*callback)(SSL *ssl, uint8_t *out_alert)) { + ssl->verify_mode = mode; + ssl->custom_verify_callback = callback; +} + void SSL_CTX_enable_signed_cert_timestamps(SSL_CTX *ctx) { - ctx->signed_cert_timestamps_enabled = 1; + ctx->signed_cert_timestamps_enabled = true; } void SSL_enable_signed_cert_timestamps(SSL *ssl) { - ssl->signed_cert_timestamps_enabled = 1; + ssl->signed_cert_timestamps_enabled = true; } void SSL_CTX_enable_ocsp_stapling(SSL_CTX *ctx) { - ctx->ocsp_stapling_enabled = 1; + ctx->ocsp_stapling_enabled = true; } void SSL_enable_ocsp_stapling(SSL *ssl) { - ssl->ocsp_stapling_enabled = 1; + ssl->ocsp_stapling_enabled = true; } void SSL_get0_signed_cert_timestamp_list(const SSL *ssl, const uint8_t **out, size_t *out_len) { SSL_SESSION *session = SSL_get_session(ssl); - - *out_len = 0; - *out = NULL; - if (ssl->server || !session || !session->tlsext_signed_cert_timestamp_list) { + if (ssl->server || !session || !session->signed_cert_timestamp_list) { + *out_len = 0; + *out = NULL; return; } - *out = session->tlsext_signed_cert_timestamp_list; - *out_len = session->tlsext_signed_cert_timestamp_list_length; + *out = CRYPTO_BUFFER_data(session->signed_cert_timestamp_list); + *out_len = CRYPTO_BUFFER_len(session->signed_cert_timestamp_list); } void SSL_get0_ocsp_response(const SSL *ssl, const uint8_t **out, size_t *out_len) { SSL_SESSION *session = SSL_get_session(ssl); - - *out_len = 0; - *out = NULL; if (ssl->server || !session || !session->ocsp_response) { + *out_len = 0; + *out = NULL; return; } - *out = session->ocsp_response; - *out_len = session->ocsp_response_length; + + *out = CRYPTO_BUFFER_data(session->ocsp_response); + *out_len = CRYPTO_BUFFER_len(session->ocsp_response); } int SSL_set_tlsext_host_name(SSL *ssl, const char *name) { @@ -1682,32 +1958,31 @@ int SSL_CTX_set_tlsext_servername_arg(SSL_CTX *ctx, void *arg) { return 1; } -int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, - const uint8_t *server, unsigned server_len, - const uint8_t *client, unsigned client_len) { - unsigned int i, j; +int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, const uint8_t *peer, + unsigned peer_len, const uint8_t *supported, + unsigned supported_len) { const uint8_t *result; - int status = OPENSSL_NPN_UNSUPPORTED; - - /* For each protocol in server preference order, see if we support it. */ - for (i = 0; i < server_len;) { - for (j = 0; j < client_len;) { - if (server[i] == client[j] && - OPENSSL_memcmp(&server[i + 1], &client[j + 1], server[i]) == 0) { - /* We found a match */ - result = &server[i]; + int status; + + // For each protocol in peer preference order, see if we support it. + for (unsigned i = 0; i < peer_len;) { + for (unsigned j = 0; j < supported_len;) { + if (peer[i] == supported[j] && + OPENSSL_memcmp(&peer[i + 1], &supported[j + 1], peer[i]) == 0) { + // We found a match + result = &peer[i]; status = OPENSSL_NPN_NEGOTIATED; goto found; } - j += client[j]; + j += supported[j]; j++; } - i += server[i]; + i += peer[i]; i++; } - /* There's no overlap between our protocols and the server's list. */ - result = client; + // There's no overlap between our protocols and the peer's list. + result = supported; status = OPENSSL_NPN_NO_OVERLAP; found: @@ -1718,12 +1993,8 @@ int SSL_select_next_proto(uint8_t **out, uint8_t *out_len, void SSL_get0_next_proto_negotiated(const SSL *ssl, const uint8_t **out_data, unsigned *out_len) { - *out_data = ssl->s3->next_proto_negotiated; - if (*out_data == NULL) { - *out_len = 0; - } else { - *out_len = ssl->s3->next_proto_negotiated_len; - } + *out_data = ssl->s3->next_proto_negotiated.data(); + *out_len = ssl->s3->next_proto_negotiated.size(); } void SSL_CTX_set_next_protos_advertised_cb( @@ -1745,7 +2016,7 @@ void SSL_CTX_set_next_proto_select_cb( int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const uint8_t *protos, unsigned protos_len) { OPENSSL_free(ctx->alpn_client_proto_list); - ctx->alpn_client_proto_list = BUF_memdup(protos, protos_len); + ctx->alpn_client_proto_list = (uint8_t *)BUF_memdup(protos, protos_len); if (!ctx->alpn_client_proto_list) { return 1; } @@ -1756,7 +2027,7 @@ int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const uint8_t *protos, int SSL_set_alpn_protos(SSL *ssl, const uint8_t *protos, unsigned protos_len) { OPENSSL_free(ssl->alpn_client_proto_list); - ssl->alpn_client_proto_list = BUF_memdup(protos, protos_len); + ssl->alpn_client_proto_list = (uint8_t *)BUF_memdup(protos, protos_len); if (!ssl->alpn_client_proto_list) { return 1; } @@ -1776,17 +2047,18 @@ void SSL_CTX_set_alpn_select_cb(SSL_CTX *ctx, void SSL_get0_alpn_selected(const SSL *ssl, const uint8_t **out_data, unsigned *out_len) { - *out_data = NULL; - if (ssl->s3) { - *out_data = ssl->s3->alpn_selected; - } - if (*out_data == NULL) { - *out_len = 0; + if (SSL_in_early_data(ssl) && !ssl->server) { + *out_data = ssl->s3->hs->early_session->early_alpn; + *out_len = ssl->s3->hs->early_session->early_alpn_len; } else { - *out_len = ssl->s3->alpn_selected_len; + *out_data = ssl->s3->alpn_selected.data(); + *out_len = ssl->s3->alpn_selected.size(); } } +void SSL_CTX_set_allow_unknown_alpn_protos(SSL_CTX *ctx, int enabled) { + ctx->allow_unknown_alpn_protos = !!enabled; +} void SSL_CTX_set_tls_channel_id_enabled(SSL_CTX *ctx, int enabled) { ctx->tlsext_channel_id_enabled = !!enabled; @@ -1822,7 +2094,7 @@ int SSL_CTX_set1_tls_channel_id(SSL_CTX *ctx, EVP_PKEY *private_key) { EVP_PKEY_free(ctx->tlsext_channel_id_private); EVP_PKEY_up_ref(private_key); ctx->tlsext_channel_id_private = private_key; - ctx->tlsext_channel_id_enabled = 1; + ctx->tlsext_channel_id_enabled = true; return 1; } @@ -1836,7 +2108,7 @@ int SSL_set1_tls_channel_id(SSL *ssl, EVP_PKEY *private_key) { EVP_PKEY_free(ssl->tlsext_channel_id_private); EVP_PKEY_up_ref(private_key); ssl->tlsext_channel_id_private = private_key; - ssl->tlsext_channel_id_enabled = 1; + ssl->tlsext_channel_id_enabled = true; return 1; } @@ -1855,95 +2127,8 @@ size_t SSL_get0_certificate_types(SSL *ssl, const uint8_t **out_types) { *out_types = NULL; return 0; } - *out_types = ssl->s3->hs->certificate_types; - return ssl->s3->hs->num_certificate_types; -} - -void ssl_update_cache(SSL_HANDSHAKE *hs, int mode) { - SSL *const ssl = hs->ssl; - SSL_CTX *ctx = ssl->initial_ctx; - /* Never cache sessions with empty session IDs. */ - if (ssl->s3->established_session->session_id_length == 0 || - (ctx->session_cache_mode & mode) != mode) { - return; - } - - /* Clients never use the internal session cache. */ - int use_internal_cache = ssl->server && !(ctx->session_cache_mode & - SSL_SESS_CACHE_NO_INTERNAL_STORE); - - /* A client may see new sessions on abbreviated handshakes if the server - * decides to renew the ticket. Once the handshake is completed, it should be - * inserted into the cache. */ - if (ssl->s3->established_session != ssl->session || - (!ssl->server && hs->ticket_expected)) { - if (use_internal_cache) { - SSL_CTX_add_session(ctx, ssl->s3->established_session); - } - if (ctx->new_session_cb != NULL) { - SSL_SESSION_up_ref(ssl->s3->established_session); - if (!ctx->new_session_cb(ssl, ssl->s3->established_session)) { - /* |new_session_cb|'s return value signals whether it took ownership. */ - SSL_SESSION_free(ssl->s3->established_session); - } - } - } - - if (use_internal_cache && - !(ctx->session_cache_mode & SSL_SESS_CACHE_NO_AUTO_CLEAR)) { - /* Automatically flush the internal session cache every 255 connections. */ - int flush_cache = 0; - CRYPTO_MUTEX_lock_write(&ctx->lock); - ctx->handshakes_since_cache_flush++; - if (ctx->handshakes_since_cache_flush >= 255) { - flush_cache = 1; - ctx->handshakes_since_cache_flush = 0; - } - CRYPTO_MUTEX_unlock_write(&ctx->lock); - - if (flush_cache) { - struct timeval now; - ssl_get_current_time(ssl, &now); - SSL_CTX_flush_sessions(ctx, (long)now.tv_sec); - } - } -} - -static const char *ssl_get_version(int version) { - switch (version) { - /* Report TLS 1.3 draft version as TLS 1.3 in the public API. */ - case TLS1_3_DRAFT_VERSION: - return "TLSv1.3"; - - case TLS1_2_VERSION: - return "TLSv1.2"; - - case TLS1_1_VERSION: - return "TLSv1.1"; - - case TLS1_VERSION: - return "TLSv1"; - - case SSL3_VERSION: - return "SSLv3"; - - case DTLS1_VERSION: - return "DTLSv1"; - - case DTLS1_2_VERSION: - return "DTLSv1.2"; - - default: - return "unknown"; - } -} - -const char *SSL_get_version(const SSL *ssl) { - return ssl_get_version(ssl->version); -} - -const char *SSL_SESSION_get_version(const SSL_SESSION *session) { - return ssl_get_version(session->ssl_version); + *out_types = ssl->s3->hs->certificate_types.data(); + return ssl->s3->hs->certificate_types.size(); } EVP_PKEY *SSL_get_privatekey(const SSL *ssl) { @@ -1963,14 +2148,11 @@ EVP_PKEY *SSL_CTX_get0_privatekey(const SSL_CTX *ctx) { } const SSL_CIPHER *SSL_get_current_cipher(const SSL *ssl) { - if (ssl->s3->aead_write_ctx == NULL) { - return NULL; - } - return ssl->s3->aead_write_ctx->cipher; + return ssl->s3->aead_write_ctx->cipher(); } int SSL_session_reused(const SSL *ssl) { - return ssl->s3->session_reused; + return ssl->s3->session_reused || SSL_in_early_data(ssl); } const COMP_METHOD *SSL_get_current_compression(SSL *ssl) { return NULL; } @@ -1994,44 +2176,35 @@ void SSL_set_quiet_shutdown(SSL *ssl, int mode) { int SSL_get_quiet_shutdown(const SSL *ssl) { return ssl->quiet_shutdown; } void SSL_set_shutdown(SSL *ssl, int mode) { - /* It is an error to clear any bits that have already been set. (We can't try - * to get a second close_notify or send two.) */ + // It is an error to clear any bits that have already been set. (We can't try + // to get a second close_notify or send two.) assert((SSL_get_shutdown(ssl) & mode) == SSL_get_shutdown(ssl)); if (mode & SSL_RECEIVED_SHUTDOWN && - ssl->s3->recv_shutdown == ssl_shutdown_none) { - ssl->s3->recv_shutdown = ssl_shutdown_close_notify; + ssl->s3->read_shutdown == ssl_shutdown_none) { + ssl->s3->read_shutdown = ssl_shutdown_close_notify; } if (mode & SSL_SENT_SHUTDOWN && - ssl->s3->send_shutdown == ssl_shutdown_none) { - ssl->s3->send_shutdown = ssl_shutdown_close_notify; + ssl->s3->write_shutdown == ssl_shutdown_none) { + ssl->s3->write_shutdown = ssl_shutdown_close_notify; } } int SSL_get_shutdown(const SSL *ssl) { int ret = 0; - if (ssl->s3->recv_shutdown != ssl_shutdown_none) { - /* Historically, OpenSSL set |SSL_RECEIVED_SHUTDOWN| on both close_notify - * and fatal alert. */ + if (ssl->s3->read_shutdown != ssl_shutdown_none) { + // Historically, OpenSSL set |SSL_RECEIVED_SHUTDOWN| on both close_notify + // and fatal alert. ret |= SSL_RECEIVED_SHUTDOWN; } - if (ssl->s3->send_shutdown == ssl_shutdown_close_notify) { - /* Historically, OpenSSL set |SSL_SENT_SHUTDOWN| on only close_notify. */ + if (ssl->s3->write_shutdown == ssl_shutdown_close_notify) { + // Historically, OpenSSL set |SSL_SENT_SHUTDOWN| on only close_notify. ret |= SSL_SENT_SHUTDOWN; } return ret; } -int SSL_version(const SSL *ssl) { - /* Report TLS 1.3 draft version as TLS 1.3 in the public API. */ - if (ssl->version == TLS1_3_DRAFT_VERSION) { - return TLS1_3_VERSION; - } - - return ssl->version; -} - SSL_CTX *SSL_get_SSL_CTX(const SSL *ssl) { return ssl->ctx; } SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx) { @@ -2039,14 +2212,14 @@ SSL_CTX *SSL_set_SSL_CTX(SSL *ssl, SSL_CTX *ctx) { return ssl->ctx; } - /* One cannot change the X.509 callbacks during a connection. */ + // One cannot change the X.509 callbacks during a connection. if (ssl->ctx->x509_method != ctx->x509_method) { assert(0); return NULL; } if (ctx == NULL) { - ctx = ssl->initial_ctx; + ctx = ssl->session_ctx; } ssl_cert_free(ssl->cert); @@ -2084,17 +2257,17 @@ char *SSL_get_shared_ciphers(const SSL *ssl, char *buf, int len) { } int SSL_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; if (!CRYPTO_get_ex_new_index(&g_ex_data_class_ssl, &index, argl, argp, - dup_func, free_func)) { + free_func)) { return -1; } return index; } -int SSL_set_ex_data(SSL *ssl, int idx, void *arg) { - return CRYPTO_set_ex_data(&ssl->ex_data, idx, arg); +int SSL_set_ex_data(SSL *ssl, int idx, void *data) { + return CRYPTO_set_ex_data(&ssl->ex_data, idx, data); } void *SSL_get_ex_data(const SSL *ssl, int idx) { @@ -2102,92 +2275,70 @@ void *SSL_get_ex_data(const SSL *ssl, int idx) { } int SSL_CTX_get_ex_new_index(long argl, void *argp, CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, + CRYPTO_EX_dup *dup_unused, CRYPTO_EX_free *free_func) { int index; if (!CRYPTO_get_ex_new_index(&g_ex_data_class_ssl_ctx, &index, argl, argp, - dup_func, free_func)) { + free_func)) { return -1; } return index; } -int SSL_CTX_set_ex_data(SSL_CTX *ctx, int idx, void *arg) { - return CRYPTO_set_ex_data(&ctx->ex_data, idx, arg); +int SSL_CTX_set_ex_data(SSL_CTX *ctx, int idx, void *data) { + return CRYPTO_set_ex_data(&ctx->ex_data, idx, data); } void *SSL_CTX_get_ex_data(const SSL_CTX *ctx, int idx) { return CRYPTO_get_ex_data(&ctx->ex_data, idx); } -int SSL_want(const SSL *ssl) { return ssl->rwstate; } +int SSL_want(const SSL *ssl) { return ssl->s3->rwstate; } void SSL_CTX_set_tmp_rsa_callback(SSL_CTX *ctx, RSA *(*cb)(SSL *ssl, int is_export, - int keylength)) { -} + int keylength)) {} void SSL_set_tmp_rsa_callback(SSL *ssl, RSA *(*cb)(SSL *ssl, int is_export, - int keylength)) { -} + int keylength)) {} void SSL_CTX_set_tmp_dh_callback(SSL_CTX *ctx, - DH *(*callback)(SSL *ssl, int is_export, - int keylength)) { - ctx->cert->dh_tmp_cb = callback; -} + DH *(*cb)(SSL *ssl, int is_export, + int keylength)) {} -void SSL_set_tmp_dh_callback(SSL *ssl, DH *(*callback)(SSL *ssl, int is_export, - int keylength)) { - ssl->cert->dh_tmp_cb = callback; -} +void SSL_set_tmp_dh_callback(SSL *ssl, DH *(*cb)(SSL *ssl, int is_export, + int keylength)) {} -int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint) { +static int use_psk_identity_hint(char **out, const char *identity_hint) { if (identity_hint != NULL && strlen(identity_hint) > PSK_MAX_IDENTITY_LEN) { OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); return 0; } - OPENSSL_free(ctx->psk_identity_hint); + // Clear currently configured hint, if any. + OPENSSL_free(*out); + *out = NULL; - if (identity_hint != NULL) { - ctx->psk_identity_hint = BUF_strdup(identity_hint); - if (ctx->psk_identity_hint == NULL) { + // Treat the empty hint as not supplying one. Plain PSK makes it possible to + // send either no hint (omit ServerKeyExchange) or an empty hint, while + // ECDHE_PSK can only spell empty hint. Having different capabilities is odd, + // so we interpret empty and missing as identical. + if (identity_hint != NULL && identity_hint[0] != '\0') { + *out = BUF_strdup(identity_hint); + if (*out == NULL) { return 0; } - } else { - ctx->psk_identity_hint = NULL; } return 1; } -int SSL_use_psk_identity_hint(SSL *ssl, const char *identity_hint) { - if (ssl == NULL) { - return 0; - } - - if (identity_hint != NULL && strlen(identity_hint) > PSK_MAX_IDENTITY_LEN) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); - return 0; - } - - /* Clear currently configured hint, if any. */ - OPENSSL_free(ssl->psk_identity_hint); - ssl->psk_identity_hint = NULL; - - /* Treat the empty hint as not supplying one. Plain PSK makes it possible to - * send either no hint (omit ServerKeyExchange) or an empty hint, while - * ECDHE_PSK can only spell empty hint. Having different capabilities is odd, - * so we interpret empty and missing as identical. */ - if (identity_hint != NULL && identity_hint[0] != '\0') { - ssl->psk_identity_hint = BUF_strdup(identity_hint); - if (ssl->psk_identity_hint == NULL) { - return 0; - } - } +int SSL_CTX_use_psk_identity_hint(SSL_CTX *ctx, const char *identity_hint) { + return use_psk_identity_hint(&ctx->psk_identity_hint, identity_hint); +} - return 1; +int SSL_use_psk_identity_hint(SSL *ssl, const char *identity_hint) { + return use_psk_identity_hint(&ssl->psk_identity_hint, identity_hint); } const char *SSL_get_psk_identity_hint(const SSL *ssl) { @@ -2272,56 +2423,16 @@ void SSL_CTX_set_current_time_cb(SSL_CTX *ctx, ctx->current_time_cb = cb; } -static int cbb_add_hex(CBB *cbb, const uint8_t *in, size_t in_len) { - static const char hextable[] = "0123456789abcdef"; - uint8_t *out; - - if (!CBB_add_space(cbb, &out, in_len * 2)) { - return 0; - } - - for (size_t i = 0; i < in_len; i++) { - *(out++) = (uint8_t)hextable[in[i] >> 4]; - *(out++) = (uint8_t)hextable[in[i] & 0xf]; - } - - return 1; -} - -int ssl_log_secret(const SSL *ssl, const char *label, const uint8_t *secret, - size_t secret_len) { - if (ssl->ctx->keylog_callback == NULL) { - return 1; - } - - CBB cbb; - uint8_t *out; - size_t out_len; - if (!CBB_init(&cbb, strlen(label) + 1 + SSL3_RANDOM_SIZE * 2 + 1 + - secret_len * 2 + 1) || - !CBB_add_bytes(&cbb, (const uint8_t *)label, strlen(label)) || - !CBB_add_bytes(&cbb, (const uint8_t *)" ", 1) || - !cbb_add_hex(&cbb, ssl->s3->client_random, SSL3_RANDOM_SIZE) || - !CBB_add_bytes(&cbb, (const uint8_t *)" ", 1) || - !cbb_add_hex(&cbb, secret, secret_len) || - !CBB_add_u8(&cbb, 0 /* NUL */) || - !CBB_finish(&cbb, &out, &out_len)) { - CBB_cleanup(&cbb); - return 0; - } - - ssl->ctx->keylog_callback(ssl, (const char *)out); - OPENSSL_free(out); - return 1; -} - int SSL_is_init_finished(const SSL *ssl) { return !SSL_in_init(ssl); } int SSL_in_init(const SSL *ssl) { - SSL_HANDSHAKE *hs = ssl->s3->hs; - return hs != NULL && hs->state != SSL_ST_OK; + // This returns false once all the handshake state has been finalized, to + // allow callbacks and getters based on SSL_in_init to return the correct + // values. + SSL_HANDSHAKE *hs = ssl->s3->hs.get(); + return hs != nullptr && !hs->handshake_finalized; } int SSL_in_false_start(const SSL *ssl) { @@ -2342,121 +2453,13 @@ void SSL_get_structure_sizes(size_t *ssl_size, size_t *ssl_ctx_size, *ssl_session_size = sizeof(SSL_SESSION); } -int ssl3_can_false_start(const SSL *ssl) { - const SSL_CIPHER *const cipher = SSL_get_current_cipher(ssl); - - /* False Start only for TLS 1.2 with an ECDHE+AEAD cipher and ALPN or NPN. */ - return !SSL_is_dtls(ssl) && - SSL_version(ssl) == TLS1_2_VERSION && - (ssl->s3->alpn_selected != NULL || - ssl->s3->next_proto_negotiated != NULL) && - cipher != NULL && - cipher->algorithm_mkey == SSL_kECDHE && - cipher->algorithm_mac == SSL_AEAD; -} - -const struct { - uint16_t version; - uint32_t flag; -} kVersions[] = { - {SSL3_VERSION, SSL_OP_NO_SSLv3}, - {TLS1_VERSION, SSL_OP_NO_TLSv1}, - {TLS1_1_VERSION, SSL_OP_NO_TLSv1_1}, - {TLS1_2_VERSION, SSL_OP_NO_TLSv1_2}, - {TLS1_3_VERSION, SSL_OP_NO_TLSv1_3}, -}; - -static const size_t kVersionsLen = OPENSSL_ARRAY_SIZE(kVersions); - -int ssl_get_version_range(const SSL *ssl, uint16_t *out_min_version, - uint16_t *out_max_version) { - /* For historical reasons, |SSL_OP_NO_DTLSv1| aliases |SSL_OP_NO_TLSv1|, but - * DTLS 1.0 should be mapped to TLS 1.1. */ - uint32_t options = ssl->options; - if (SSL_is_dtls(ssl)) { - options &= ~SSL_OP_NO_TLSv1_1; - if (options & SSL_OP_NO_DTLSv1) { - options |= SSL_OP_NO_TLSv1_1; - } - } - - uint16_t min_version = ssl->min_version; - uint16_t max_version = ssl->max_version; - - /* Bound the range to only those implemented in this protocol. */ - if (min_version < ssl->method->min_version) { - min_version = ssl->method->min_version; - } - if (max_version > ssl->method->max_version) { - max_version = ssl->method->max_version; - } - - /* OpenSSL's API for controlling versions entails blacklisting individual - * protocols. This has two problems. First, on the client, the protocol can - * only express a contiguous range of versions. Second, a library consumer - * trying to set a maximum version cannot disable protocol versions that get - * added in a future version of the library. - * - * To account for both of these, OpenSSL interprets the client-side bitmask - * as a min/max range by picking the lowest contiguous non-empty range of - * enabled protocols. Note that this means it is impossible to set a maximum - * version of the higest supported TLS version in a future-proof way. */ - int any_enabled = 0; - for (size_t i = 0; i < kVersionsLen; i++) { - /* Only look at the versions already enabled. */ - if (min_version > kVersions[i].version) { - continue; - } - if (max_version < kVersions[i].version) { - break; - } - - if (!(options & kVersions[i].flag)) { - /* The minimum version is the first enabled version. */ - if (!any_enabled) { - any_enabled = 1; - min_version = kVersions[i].version; - } - continue; - } - - /* If there is a disabled version after the first enabled one, all versions - * after it are implicitly disabled. */ - if (any_enabled) { - max_version = kVersions[i-1].version; - break; - } - } - - if (!any_enabled) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SSL_VERSION); - return 0; - } - - *out_min_version = min_version; - *out_max_version = max_version; - return 1; -} - -uint16_t ssl3_protocol_version(const SSL *ssl) { - assert(ssl->s3->have_version); - uint16_t version; - if (!ssl->method->version_from_wire(&version, ssl->version)) { - /* TODO(davidben): Use the internal version representation for ssl->version - * and map to the public API representation at API boundaries. */ - assert(0); - return 0; - } - - return version; -} - int SSL_is_server(const SSL *ssl) { return ssl->server; } int SSL_is_dtls(const SSL *ssl) { return ssl->method->is_dtls; } -void SSL_CTX_set_select_certificate_cb(SSL_CTX *ctx, - int (*cb)(const SSL_CLIENT_HELLO *)) { +void SSL_CTX_set_select_certificate_cb( + SSL_CTX *ctx, + enum ssl_select_cert_result_t (*cb)(const SSL_CLIENT_HELLO *)) { ctx->select_certificate_cb = cb; } @@ -2471,15 +2474,9 @@ void SSL_set_renegotiate_mode(SSL *ssl, enum ssl_renegotiate_mode_t mode) { int SSL_get_ivs(const SSL *ssl, const uint8_t **out_read_iv, const uint8_t **out_write_iv, size_t *out_iv_len) { - if (ssl->s3->aead_read_ctx == NULL || ssl->s3->aead_write_ctx == NULL) { - return 0; - } - size_t write_iv_len; - if (!EVP_AEAD_CTX_get_iv(&ssl->s3->aead_read_ctx->ctx, out_read_iv, - out_iv_len) || - !EVP_AEAD_CTX_get_iv(&ssl->s3->aead_write_ctx->ctx, out_write_iv, - &write_iv_len) || + if (!ssl->s3->aead_read_ctx->GetIV(out_read_iv, out_iv_len) || + !ssl->s3->aead_write_ctx->GetIV(out_write_iv, &write_iv_len) || *out_iv_len != write_iv_len) { return 0; } @@ -2495,9 +2492,9 @@ static uint64_t be_to_u64(const uint8_t in[8]) { } uint64_t SSL_get_read_sequence(const SSL *ssl) { - /* TODO(davidben): Internally represent sequence numbers as uint64_t. */ + // TODO(davidben): Internally represent sequence numbers as uint64_t. if (SSL_is_dtls(ssl)) { - /* max_seq_num already includes the epoch. */ + // max_seq_num already includes the epoch. assert(ssl->d1->r_epoch == (ssl->d1->bitmap.max_seq_num >> 48)); return ssl->d1->bitmap.max_seq_num; } @@ -2514,8 +2511,8 @@ uint64_t SSL_get_write_sequence(const SSL *ssl) { } uint16_t SSL_get_peer_signature_algorithm(const SSL *ssl) { - /* TODO(davidben): This checks the wrong session if there is a renegotiation - * in progress. */ + // TODO(davidben): This checks the wrong session if there is a renegotiation + // in progress. SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return 0; @@ -2547,7 +2544,7 @@ size_t SSL_get_server_random(const SSL *ssl, uint8_t *out, size_t max_out) { } const SSL_CIPHER *SSL_get_pending_cipher(const SSL *ssl) { - SSL_HANDSHAKE *hs = ssl->s3->hs; + SSL_HANDSHAKE *hs = ssl->s3->hs.get(); if (hs == NULL) { return NULL; } @@ -2566,37 +2563,24 @@ void SSL_CTX_set_grease_enabled(SSL_CTX *ctx, int enabled) { ctx->grease_enabled = !!enabled; } -void SSL_CTX_set_short_header_enabled(SSL_CTX *ctx, int enabled) { - ctx->short_header_enabled = !!enabled; +int32_t SSL_get_ticket_age_skew(const SSL *ssl) { + return ssl->s3->ticket_age_skew; } int SSL_clear(SSL *ssl) { - /* In OpenSSL, reusing a client |SSL| with |SSL_clear| causes the previously - * established session to be offered the next time around. wpa_supplicant - * depends on this behavior, so emulate it. */ - SSL_SESSION *session = NULL; + // In OpenSSL, reusing a client |SSL| with |SSL_clear| causes the previously + // established session to be offered the next time around. wpa_supplicant + // depends on this behavior, so emulate it. + UniquePtr session; if (!ssl->server && ssl->s3->established_session != NULL) { - session = ssl->s3->established_session; - SSL_SESSION_up_ref(session); + session.reset(ssl->s3->established_session.get()); + SSL_SESSION_up_ref(session.get()); } - /* TODO(davidben): Some state on |ssl| is reset both in |SSL_new| and - * |SSL_clear| because it is per-connection state rather than configuration - * state. Per-connection state should be on |ssl->s3| and |ssl->d1| so it is - * naturally reset at the right points between |SSL_new|, |SSL_clear|, and - * |ssl3_new|. */ - - ssl->rwstate = SSL_NOTHING; - - BUF_MEM_free(ssl->init_buf); - ssl->init_buf = NULL; - ssl->init_msg = NULL; - ssl->init_num = 0; - - /* The ssl->d1->mtu is simultaneously configuration (preserved across - * clear) and connection-specific state (gets reset). - * - * TODO(davidben): Avoid this. */ + // The ssl->d1->mtu is simultaneously configuration (preserved across + // clear) and connection-specific state (gets reset). + // + // TODO(davidben): Avoid this. unsigned mtu = 0; if (ssl->d1 != NULL) { mtu = ssl->d1->mtu; @@ -2604,7 +2588,6 @@ int SSL_clear(SSL *ssl) { ssl->method->ssl_free(ssl); if (!ssl->method->ssl_new(ssl)) { - SSL_SESSION_free(session); return 0; } @@ -2612,52 +2595,13 @@ int SSL_clear(SSL *ssl) { ssl->d1->mtu = mtu; } - if (session != NULL) { - SSL_set_session(ssl, session); - SSL_SESSION_free(session); + if (session != nullptr) { + SSL_set_session(ssl, session.get()); } return 1; } -void ssl_do_info_callback(const SSL *ssl, int type, int value) { - void (*cb)(const SSL *ssl, int type, int value) = NULL; - if (ssl->info_callback != NULL) { - cb = ssl->info_callback; - } else if (ssl->ctx->info_callback != NULL) { - cb = ssl->ctx->info_callback; - } - - if (cb != NULL) { - cb(ssl, type, value); - } -} - -void ssl_do_msg_callback(SSL *ssl, int is_write, int content_type, - const void *buf, size_t len) { - if (ssl->msg_callback == NULL) { - return; - } - - /* |version| is zero when calling for |SSL3_RT_HEADER| and |SSL2_VERSION| for - * a V2ClientHello. */ - int version; - switch (content_type) { - case 0: - /* V2ClientHello */ - version = SSL2_VERSION; - break; - case SSL3_RT_HEADER: - version = 0; - break; - default: - version = SSL_version(ssl); - } - - ssl->msg_callback(is_write, version, content_type, buf, len, ssl, - ssl->msg_callback_arg); -} - int SSL_CTX_sess_connect(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_connect_good(const SSL_CTX *ctx) { return 0; } int SSL_CTX_sess_connect_renegotiate(const SSL_CTX *ctx) { return 0; } @@ -2700,37 +2644,7 @@ int SSL_set_tmp_ecdh(SSL *ssl, const EC_KEY *ec_key) { return SSL_set1_curves(ssl, &nid, 1); } -void ssl_get_current_time(const SSL *ssl, struct timeval *out_clock) { - if (ssl->ctx->current_time_cb != NULL) { - ssl->ctx->current_time_cb(ssl, out_clock); - return; - } - -#if defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) - out_clock->tv_sec = 1234; - out_clock->tv_usec = 1234; -#elif defined(OPENSSL_WINDOWS) - struct _timeb time; - _ftime(&time); - out_clock->tv_sec = time.time; - out_clock->tv_usec = time.millitm * 1000; -#else - gettimeofday(out_clock, NULL); -#endif -} - -int SSL_CTX_set_min_version(SSL_CTX *ctx, uint16_t version) { - return SSL_CTX_set_min_proto_version(ctx, version); -} - -int SSL_CTX_set_max_version(SSL_CTX *ctx, uint16_t version) { - return SSL_CTX_set_max_proto_version(ctx, version); -} - -int SSL_set_min_version(SSL *ssl, uint16_t version) { - return SSL_set_min_proto_version(ssl, version); -} - -int SSL_set_max_version(SSL *ssl, uint16_t version) { - return SSL_set_max_proto_version(ssl, version); +void SSL_CTX_set_ticket_aead_method(SSL_CTX *ctx, + const SSL_TICKET_AEAD_METHOD *aead_method) { + ctx->ticket_aead_method = aead_method; } diff --git a/Sources/BoringSSL/ssl/ssl_privkey.c b/Sources/BoringSSL/ssl/ssl_privkey.c deleted file mode 100644 index 79622473d..000000000 --- a/Sources/BoringSSL/ssl/ssl_privkey.c +++ /dev/null @@ -1,683 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] */ - -#include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "internal.h" - - -int ssl_is_key_type_supported(int key_type) { - return key_type == EVP_PKEY_RSA || key_type == EVP_PKEY_EC; -} - -static int ssl_set_pkey(CERT *cert, EVP_PKEY *pkey) { - if (!ssl_is_key_type_supported(pkey->type)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); - return 0; - } - - if (cert->chain != NULL && - sk_CRYPTO_BUFFER_value(cert->chain, 0) != NULL && - /* Sanity-check that the private key and the certificate match, unless - * the key is opaque (in case of, say, a smartcard). */ - !EVP_PKEY_is_opaque(pkey) && - !ssl_cert_check_private_key(cert, pkey)) { - return 0; - } - - EVP_PKEY_free(cert->privatekey); - EVP_PKEY_up_ref(pkey); - cert->privatekey = pkey; - - return 1; -} - -int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa) { - EVP_PKEY *pkey; - int ret; - - if (rsa == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - pkey = EVP_PKEY_new(); - if (pkey == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); - return 0; - } - - RSA_up_ref(rsa); - EVP_PKEY_assign_RSA(pkey, rsa); - - ret = ssl_set_pkey(ssl->cert, pkey); - EVP_PKEY_free(pkey); - - return ret; -} - -int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey) { - if (pkey == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - return ssl_set_pkey(ssl->cert, pkey); -} - -int SSL_use_PrivateKey_ASN1(int type, SSL *ssl, const uint8_t *der, - size_t der_len) { - if (der_len > LONG_MAX) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - - const uint8_t *p = der; - EVP_PKEY *pkey = d2i_PrivateKey(type, NULL, &p, (long)der_len); - if (pkey == NULL || p != der + der_len) { - OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); - EVP_PKEY_free(pkey); - return 0; - } - - int ret = SSL_use_PrivateKey(ssl, pkey); - EVP_PKEY_free(pkey); - return ret; -} - -int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa) { - int ret; - EVP_PKEY *pkey; - - if (rsa == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - pkey = EVP_PKEY_new(); - if (pkey == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); - return 0; - } - - RSA_up_ref(rsa); - EVP_PKEY_assign_RSA(pkey, rsa); - - ret = ssl_set_pkey(ctx->cert, pkey); - EVP_PKEY_free(pkey); - return ret; -} - -int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const uint8_t *der, - size_t der_len) { - RSA *rsa = RSA_private_key_from_bytes(der, der_len); - if (rsa == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); - return 0; - } - - int ret = SSL_CTX_use_RSAPrivateKey(ctx, rsa); - RSA_free(rsa); - return ret; -} - -int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey) { - if (pkey == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); - return 0; - } - - return ssl_set_pkey(ctx->cert, pkey); -} - -int SSL_CTX_use_PrivateKey_ASN1(int type, SSL_CTX *ctx, const uint8_t *der, - size_t der_len) { - if (der_len > LONG_MAX) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - - const uint8_t *p = der; - EVP_PKEY *pkey = d2i_PrivateKey(type, NULL, &p, (long)der_len); - if (pkey == NULL || p != der + der_len) { - OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); - EVP_PKEY_free(pkey); - return 0; - } - - int ret = SSL_CTX_use_PrivateKey(ctx, pkey); - EVP_PKEY_free(pkey); - return ret; -} - -void SSL_set_private_key_method(SSL *ssl, - const SSL_PRIVATE_KEY_METHOD *key_method) { - ssl->cert->key_method = key_method; -} - -void SSL_CTX_set_private_key_method(SSL_CTX *ctx, - const SSL_PRIVATE_KEY_METHOD *key_method) { - ctx->cert->key_method = key_method; -} - -static int set_signing_algorithm_prefs(CERT *cert, const uint16_t *prefs, - size_t num_prefs) { - OPENSSL_free(cert->sigalgs); - - cert->num_sigalgs = 0; - cert->sigalgs = BUF_memdup(prefs, num_prefs * sizeof(prefs[0])); - if (cert->sigalgs == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - cert->num_sigalgs = num_prefs; - - return 1; -} - -int SSL_CTX_set_signing_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, - size_t num_prefs) { - return set_signing_algorithm_prefs(ctx->cert, prefs, num_prefs); -} - - -int SSL_set_signing_algorithm_prefs(SSL *ssl, const uint16_t *prefs, - size_t num_prefs) { - return set_signing_algorithm_prefs(ssl->cert, prefs, num_prefs); -} - -int SSL_set_private_key_digest_prefs(SSL *ssl, const int *digest_nids, - size_t num_digests) { - OPENSSL_free(ssl->cert->sigalgs); - - OPENSSL_COMPILE_ASSERT(sizeof(int) >= 2 * sizeof(uint16_t), - digest_list_conversion_cannot_overflow); - - ssl->cert->num_sigalgs = 0; - ssl->cert->sigalgs = OPENSSL_malloc(sizeof(uint16_t) * 2 * num_digests); - if (ssl->cert->sigalgs == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - - /* Convert the digest list to a signature algorithms list. - * - * TODO(davidben): Replace this API with one that can express RSA-PSS, etc. */ - for (size_t i = 0; i < num_digests; i++) { - switch (digest_nids[i]) { - case NID_sha1: - ssl->cert->sigalgs[ssl->cert->num_sigalgs] = SSL_SIGN_RSA_PKCS1_SHA1; - ssl->cert->sigalgs[ssl->cert->num_sigalgs + 1] = SSL_SIGN_ECDSA_SHA1; - ssl->cert->num_sigalgs += 2; - break; - case NID_sha256: - ssl->cert->sigalgs[ssl->cert->num_sigalgs] = SSL_SIGN_RSA_PKCS1_SHA256; - ssl->cert->sigalgs[ssl->cert->num_sigalgs + 1] = - SSL_SIGN_ECDSA_SECP256R1_SHA256; - ssl->cert->num_sigalgs += 2; - break; - case NID_sha384: - ssl->cert->sigalgs[ssl->cert->num_sigalgs] = SSL_SIGN_RSA_PKCS1_SHA384; - ssl->cert->sigalgs[ssl->cert->num_sigalgs + 1] = - SSL_SIGN_ECDSA_SECP384R1_SHA384; - ssl->cert->num_sigalgs += 2; - break; - case NID_sha512: - ssl->cert->sigalgs[ssl->cert->num_sigalgs] = SSL_SIGN_RSA_PKCS1_SHA512; - ssl->cert->sigalgs[ssl->cert->num_sigalgs + 1] = - SSL_SIGN_ECDSA_SECP521R1_SHA512; - ssl->cert->num_sigalgs += 2; - break; - } - } - - return 1; -} - -int ssl_has_private_key(const SSL *ssl) { - return ssl->cert->privatekey != NULL || ssl->cert->key_method != NULL; -} - -int ssl_is_ecdsa_key_type(int type) { - switch (type) { - /* TODO(davidben): Remove support for |EVP_PKEY_EC| key types. */ - case EVP_PKEY_EC: - case NID_X9_62_prime256v1: - case NID_secp384r1: - case NID_secp521r1: - return 1; - default: - return 0; - } -} - -int ssl_private_key_type(SSL *ssl) { - if (ssl->cert->key_method != NULL) { - return ssl->cert->key_method->type(ssl); - } - switch (EVP_PKEY_id(ssl->cert->privatekey)) { - case EVP_PKEY_RSA: - return NID_rsaEncryption; - case EVP_PKEY_EC: - return EC_GROUP_get_curve_name( - EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(ssl->cert->privatekey))); - default: - return NID_undef; - } -} - -size_t ssl_private_key_max_signature_len(SSL *ssl) { - if (ssl->cert->key_method != NULL) { - return ssl->cert->key_method->max_signature_len(ssl); - } - return EVP_PKEY_size(ssl->cert->privatekey); -} - -/* TODO(davidben): Forbid RSA-PKCS1 in TLS 1.3. For now we allow it because NSS - * has yet to start doing RSA-PSS, so enforcing it would complicate interop - * testing. */ -static int is_rsa_pkcs1(const EVP_MD **out_md, uint16_t sigalg) { - switch (sigalg) { - case SSL_SIGN_RSA_PKCS1_MD5_SHA1: - *out_md = EVP_md5_sha1(); - return 1; - case SSL_SIGN_RSA_PKCS1_SHA1: - *out_md = EVP_sha1(); - return 1; - case SSL_SIGN_RSA_PKCS1_SHA256: - *out_md = EVP_sha256(); - return 1; - case SSL_SIGN_RSA_PKCS1_SHA384: - *out_md = EVP_sha384(); - return 1; - case SSL_SIGN_RSA_PKCS1_SHA512: - *out_md = EVP_sha512(); - return 1; - default: - return 0; - } -} - -static int ssl_sign_rsa_pkcs1(SSL *ssl, uint8_t *out, size_t *out_len, - size_t max_out, const EVP_MD *md, - const uint8_t *in, size_t in_len) { - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); - *out_len = max_out; - int ret = EVP_DigestSignInit(&ctx, NULL, md, NULL, ssl->cert->privatekey) && - EVP_DigestSignUpdate(&ctx, in, in_len) && - EVP_DigestSignFinal(&ctx, out, out_len); - EVP_MD_CTX_cleanup(&ctx); - return ret; -} - -static int ssl_verify_rsa_pkcs1(SSL *ssl, const uint8_t *signature, - size_t signature_len, const EVP_MD *md, - EVP_PKEY *pkey, const uint8_t *in, - size_t in_len) { - if (pkey->type != EVP_PKEY_RSA) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - EVP_MD_CTX md_ctx; - EVP_MD_CTX_init(&md_ctx); - int ret = EVP_DigestVerifyInit(&md_ctx, NULL, md, NULL, pkey) && - EVP_DigestVerifyUpdate(&md_ctx, in, in_len) && - EVP_DigestVerifyFinal(&md_ctx, signature, signature_len); - EVP_MD_CTX_cleanup(&md_ctx); - return ret; -} - -static int is_ecdsa(int *out_curve, const EVP_MD **out_md, uint16_t sigalg) { - switch (sigalg) { - case SSL_SIGN_ECDSA_SHA1: - *out_curve = NID_undef; - *out_md = EVP_sha1(); - return 1; - case SSL_SIGN_ECDSA_SECP256R1_SHA256: - *out_curve = NID_X9_62_prime256v1; - *out_md = EVP_sha256(); - return 1; - case SSL_SIGN_ECDSA_SECP384R1_SHA384: - *out_curve = NID_secp384r1; - *out_md = EVP_sha384(); - return 1; - case SSL_SIGN_ECDSA_SECP521R1_SHA512: - *out_curve = NID_secp521r1; - *out_md = EVP_sha512(); - return 1; - default: - return 0; - } -} - -static int ssl_sign_ecdsa(SSL *ssl, uint8_t *out, size_t *out_len, - size_t max_out, int curve, const EVP_MD *md, - const uint8_t *in, size_t in_len) { - EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(ssl->cert->privatekey); - if (ec_key == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - /* In TLS 1.3, the curve is also specified by the signature algorithm. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION && - (curve == NID_undef || - EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)) != curve)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); - *out_len = max_out; - int ret = EVP_DigestSignInit(&ctx, NULL, md, NULL, ssl->cert->privatekey) && - EVP_DigestSignUpdate(&ctx, in, in_len) && - EVP_DigestSignFinal(&ctx, out, out_len); - EVP_MD_CTX_cleanup(&ctx); - return ret; -} - -static int ssl_verify_ecdsa(SSL *ssl, const uint8_t *signature, - size_t signature_len, int curve, const EVP_MD *md, - EVP_PKEY *pkey, const uint8_t *in, size_t in_len) { - EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey); - if (ec_key == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - /* In TLS 1.3, the curve is also specified by the signature algorithm. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION && - (curve == NID_undef || - EC_GROUP_get_curve_name(EC_KEY_get0_group(ec_key)) != curve)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - EVP_MD_CTX md_ctx; - EVP_MD_CTX_init(&md_ctx); - int ret = EVP_DigestVerifyInit(&md_ctx, NULL, md, NULL, pkey) && - EVP_DigestVerifyUpdate(&md_ctx, in, in_len) && - EVP_DigestVerifyFinal(&md_ctx, signature, signature_len); - EVP_MD_CTX_cleanup(&md_ctx); - return ret; -} - -static int is_rsa_pss(const EVP_MD **out_md, uint16_t sigalg) { - switch (sigalg) { - case SSL_SIGN_RSA_PSS_SHA256: - *out_md = EVP_sha256(); - return 1; - case SSL_SIGN_RSA_PSS_SHA384: - *out_md = EVP_sha384(); - return 1; - case SSL_SIGN_RSA_PSS_SHA512: - *out_md = EVP_sha512(); - return 1; - default: - return 0; - } -} - -static int ssl_sign_rsa_pss(SSL *ssl, uint8_t *out, size_t *out_len, - size_t max_out, const EVP_MD *md, - const uint8_t *in, size_t in_len) { - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); - *out_len = max_out; - EVP_PKEY_CTX *pctx; - int ret = - EVP_DigestSignInit(&ctx, &pctx, md, NULL, ssl->cert->privatekey) && - EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) && - EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1 /* salt len = hash len */) && - EVP_DigestSignUpdate(&ctx, in, in_len) && - EVP_DigestSignFinal(&ctx, out, out_len); - EVP_MD_CTX_cleanup(&ctx); - return ret; -} - -static int ssl_verify_rsa_pss(SSL *ssl, const uint8_t *signature, - size_t signature_len, const EVP_MD *md, - EVP_PKEY *pkey, const uint8_t *in, - size_t in_len) { - if (pkey->type != EVP_PKEY_RSA) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; - } - - EVP_MD_CTX md_ctx; - EVP_MD_CTX_init(&md_ctx); - EVP_PKEY_CTX *pctx; - int ret = - EVP_DigestVerifyInit(&md_ctx, &pctx, md, NULL, pkey) && - EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) && - EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1 /* salt len = hash len */) && - EVP_DigestVerifyUpdate(&md_ctx, in, in_len) && - EVP_DigestVerifyFinal(&md_ctx, signature, signature_len); - EVP_MD_CTX_cleanup(&md_ctx); - return ret; -} - -enum ssl_private_key_result_t ssl_private_key_sign( - SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, - uint16_t signature_algorithm, const uint8_t *in, size_t in_len) { - if (ssl->cert->key_method != NULL) { - if (ssl->cert->key_method->sign != NULL) { - return ssl->cert->key_method->sign(ssl, out, out_len, max_out, - signature_algorithm, in, in_len); - } - - /* TODO(davidben): Remove support for |sign_digest|-only - * |SSL_PRIVATE_KEY_METHOD|s. */ - const EVP_MD *md; - int curve; - if (!is_rsa_pkcs1(&md, signature_algorithm) && - !is_ecdsa(&curve, &md, signature_algorithm)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL_FOR_CUSTOM_KEY); - return ssl_private_key_failure; - } - - uint8_t hash[EVP_MAX_MD_SIZE]; - unsigned hash_len; - if (!EVP_Digest(in, in_len, hash, &hash_len, md, NULL)) { - return ssl_private_key_failure; - } - - return ssl->cert->key_method->sign_digest(ssl, out, out_len, max_out, md, - hash, hash_len); - } - - const EVP_MD *md; - if (is_rsa_pkcs1(&md, signature_algorithm) && - ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return ssl_sign_rsa_pkcs1(ssl, out, out_len, max_out, md, in, in_len) - ? ssl_private_key_success - : ssl_private_key_failure; - } - - int curve; - if (is_ecdsa(&curve, &md, signature_algorithm)) { - return ssl_sign_ecdsa(ssl, out, out_len, max_out, curve, md, in, in_len) - ? ssl_private_key_success - : ssl_private_key_failure; - } - - if (is_rsa_pss(&md, signature_algorithm)) { - return ssl_sign_rsa_pss(ssl, out, out_len, max_out, md, in, in_len) - ? ssl_private_key_success - : ssl_private_key_failure; - } - - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return ssl_private_key_failure; -} - -int ssl_public_key_verify(SSL *ssl, const uint8_t *signature, - size_t signature_len, uint16_t signature_algorithm, - EVP_PKEY *pkey, const uint8_t *in, size_t in_len) { - const EVP_MD *md; - if (is_rsa_pkcs1(&md, signature_algorithm) && - ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return ssl_verify_rsa_pkcs1(ssl, signature, signature_len, md, pkey, in, - in_len); - } - - int curve; - if (is_ecdsa(&curve, &md, signature_algorithm)) { - return ssl_verify_ecdsa(ssl, signature, signature_len, curve, md, pkey, in, - in_len); - } - - if (is_rsa_pss(&md, signature_algorithm)) { - return ssl_verify_rsa_pss(ssl, signature, signature_len, md, pkey, in, - in_len); - } - - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); - return 0; -} - -enum ssl_private_key_result_t ssl_private_key_decrypt( - SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, - const uint8_t *in, size_t in_len) { - if (ssl->cert->key_method != NULL) { - return ssl->cert->key_method->decrypt(ssl, out, out_len, max_out, in, - in_len); - } - - RSA *rsa = EVP_PKEY_get0_RSA(ssl->cert->privatekey); - if (rsa == NULL) { - /* Decrypt operations are only supported for RSA keys. */ - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return ssl_private_key_failure; - } - - /* Decrypt with no padding. PKCS#1 padding will be removed as part - * of the timing-sensitive code by the caller. */ - if (!RSA_decrypt(rsa, out_len, out, max_out, in, in_len, RSA_NO_PADDING)) { - return ssl_private_key_failure; - } - return ssl_private_key_success; -} - -enum ssl_private_key_result_t ssl_private_key_complete(SSL *ssl, uint8_t *out, - size_t *out_len, - size_t max_out) { - /* Only custom keys may be asynchronous. */ - return ssl->cert->key_method->complete(ssl, out, out_len, max_out); -} - -int ssl_private_key_supports_signature_algorithm(SSL *ssl, - uint16_t signature_algorithm) { - const EVP_MD *md; - if (is_rsa_pkcs1(&md, signature_algorithm) && - ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return ssl_private_key_type(ssl) == NID_rsaEncryption; - } - - int curve; - if (is_ecdsa(&curve, &md, signature_algorithm)) { - int type = ssl_private_key_type(ssl); - if (!ssl_is_ecdsa_key_type(type)) { - return 0; - } - - /* Prior to TLS 1.3, ECDSA curves did not match the signature algorithm. */ - if (ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return 1; - } - - return curve != NID_undef && type == curve; - } - - if (is_rsa_pss(&md, signature_algorithm)) { - if (ssl_private_key_type(ssl) != NID_rsaEncryption) { - return 0; - } - - /* Ensure the RSA key is large enough for the hash. RSASSA-PSS requires that - * emLen be at least hLen + sLen + 2. Both hLen and sLen are the size of the - * hash in TLS. Reasonable RSA key sizes are large enough for the largest - * defined RSASSA-PSS algorithm, but 1024-bit RSA is slightly too large for - * SHA-512. 1024-bit RSA is sometimes used for test credentials, so check - * the size to fall back to another algorithm. */ - if (ssl_private_key_max_signature_len(ssl) < 2 * EVP_MD_size(md) + 2) { - return 0; - } - - /* RSA-PSS is only supported by message-based private keys. */ - if (ssl->cert->key_method != NULL && ssl->cert->key_method->sign == NULL) { - return 0; - } - - return 1; - } - - return 0; -} diff --git a/Sources/BoringSSL/ssl/ssl_privkey.cc b/Sources/BoringSSL/ssl/ssl_privkey.cc new file mode 100644 index 000000000..134ad561a --- /dev/null +++ b/Sources/BoringSSL/ssl/ssl_privkey.cc @@ -0,0 +1,488 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] */ + +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "internal.h" +#include "../crypto/internal.h" + + +namespace bssl { + +int ssl_is_key_type_supported(int key_type) { + return key_type == EVP_PKEY_RSA || key_type == EVP_PKEY_EC || + key_type == EVP_PKEY_ED25519; +} + +static int ssl_set_pkey(CERT *cert, EVP_PKEY *pkey) { + if (!ssl_is_key_type_supported(pkey->type)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CERTIFICATE_TYPE); + return 0; + } + + if (cert->chain != NULL && + sk_CRYPTO_BUFFER_value(cert->chain, 0) != NULL && + // Sanity-check that the private key and the certificate match. + !ssl_cert_check_private_key(cert, pkey)) { + return 0; + } + + EVP_PKEY_free(cert->privatekey); + EVP_PKEY_up_ref(pkey); + cert->privatekey = pkey; + + return 1; +} + +typedef struct { + uint16_t sigalg; + int pkey_type; + int curve; + const EVP_MD *(*digest_func)(void); + char is_rsa_pss; +} SSL_SIGNATURE_ALGORITHM; + +static const SSL_SIGNATURE_ALGORITHM kSignatureAlgorithms[] = { + {SSL_SIGN_RSA_PKCS1_MD5_SHA1, EVP_PKEY_RSA, NID_undef, &EVP_md5_sha1, 0}, + {SSL_SIGN_RSA_PKCS1_SHA1, EVP_PKEY_RSA, NID_undef, &EVP_sha1, 0}, + {SSL_SIGN_RSA_PKCS1_SHA256, EVP_PKEY_RSA, NID_undef, &EVP_sha256, 0}, + {SSL_SIGN_RSA_PKCS1_SHA384, EVP_PKEY_RSA, NID_undef, &EVP_sha384, 0}, + {SSL_SIGN_RSA_PKCS1_SHA512, EVP_PKEY_RSA, NID_undef, &EVP_sha512, 0}, + + {SSL_SIGN_RSA_PSS_SHA256, EVP_PKEY_RSA, NID_undef, &EVP_sha256, 1}, + {SSL_SIGN_RSA_PSS_SHA384, EVP_PKEY_RSA, NID_undef, &EVP_sha384, 1}, + {SSL_SIGN_RSA_PSS_SHA512, EVP_PKEY_RSA, NID_undef, &EVP_sha512, 1}, + + {SSL_SIGN_ECDSA_SHA1, EVP_PKEY_EC, NID_undef, &EVP_sha1, 0}, + {SSL_SIGN_ECDSA_SECP256R1_SHA256, EVP_PKEY_EC, NID_X9_62_prime256v1, + &EVP_sha256, 0}, + {SSL_SIGN_ECDSA_SECP384R1_SHA384, EVP_PKEY_EC, NID_secp384r1, &EVP_sha384, + 0}, + {SSL_SIGN_ECDSA_SECP521R1_SHA512, EVP_PKEY_EC, NID_secp521r1, &EVP_sha512, + 0}, + + {SSL_SIGN_ED25519, EVP_PKEY_ED25519, NID_undef, NULL, 0}, +}; + +static const SSL_SIGNATURE_ALGORITHM *get_signature_algorithm(uint16_t sigalg) { + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kSignatureAlgorithms); i++) { + if (kSignatureAlgorithms[i].sigalg == sigalg) { + return &kSignatureAlgorithms[i]; + } + } + return NULL; +} + +int ssl_has_private_key(const SSL *ssl) { + return ssl->cert->privatekey != NULL || ssl->cert->key_method != NULL; +} + +static int pkey_supports_algorithm(const SSL *ssl, EVP_PKEY *pkey, + uint16_t sigalg) { + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + if (alg == NULL || + EVP_PKEY_id(pkey) != alg->pkey_type) { + return 0; + } + + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + // RSA keys may only be used with RSA-PSS. + if (alg->pkey_type == EVP_PKEY_RSA && !alg->is_rsa_pss) { + return 0; + } + + // EC keys have a curve requirement. + if (alg->pkey_type == EVP_PKEY_EC && + (alg->curve == NID_undef || + EC_GROUP_get_curve_name( + EC_KEY_get0_group(EVP_PKEY_get0_EC_KEY(pkey))) != alg->curve)) { + return 0; + } + } + + return 1; +} + +static int setup_ctx(SSL *ssl, EVP_MD_CTX *ctx, EVP_PKEY *pkey, uint16_t sigalg, + int is_verify) { + if (!pkey_supports_algorithm(ssl, pkey, sigalg)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_SIGNATURE_TYPE); + return 0; + } + + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + const EVP_MD *digest = alg->digest_func != NULL ? alg->digest_func() : NULL; + EVP_PKEY_CTX *pctx; + if (is_verify) { + if (!EVP_DigestVerifyInit(ctx, &pctx, digest, NULL, pkey)) { + return 0; + } + } else if (!EVP_DigestSignInit(ctx, &pctx, digest, NULL, pkey)) { + return 0; + } + + if (alg->is_rsa_pss) { + if (!EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) || + !EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1 /* salt len = hash len */)) { + return 0; + } + } + + return 1; +} + +enum ssl_private_key_result_t ssl_private_key_sign( + SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, size_t max_out, + uint16_t sigalg, Span in) { + SSL *const ssl = hs->ssl; + if (ssl->cert->key_method != NULL) { + enum ssl_private_key_result_t ret; + if (hs->pending_private_key_op) { + ret = ssl->cert->key_method->complete(ssl, out, out_len, max_out); + } else { + ret = ssl->cert->key_method->sign(ssl, out, out_len, max_out, sigalg, + in.data(), in.size()); + } + hs->pending_private_key_op = ret == ssl_private_key_retry; + return ret; + } + + *out_len = max_out; + ScopedEVP_MD_CTX ctx; + if (!setup_ctx(ssl, ctx.get(), ssl->cert->privatekey, sigalg, 0 /* sign */) || + !EVP_DigestSign(ctx.get(), out, out_len, in.data(), in.size())) { + return ssl_private_key_failure; + } + return ssl_private_key_success; +} + +bool ssl_public_key_verify(SSL *ssl, Span signature, + uint16_t sigalg, EVP_PKEY *pkey, + Span in) { + ScopedEVP_MD_CTX ctx; + return setup_ctx(ssl, ctx.get(), pkey, sigalg, 1 /* verify */) && + EVP_DigestVerify(ctx.get(), signature.data(), signature.size(), + in.data(), in.size()); +} + +enum ssl_private_key_result_t ssl_private_key_decrypt(SSL_HANDSHAKE *hs, + uint8_t *out, + size_t *out_len, + size_t max_out, + Span in) { + SSL *const ssl = hs->ssl; + if (ssl->cert->key_method != NULL) { + enum ssl_private_key_result_t ret; + if (hs->pending_private_key_op) { + ret = ssl->cert->key_method->complete(ssl, out, out_len, max_out); + } else { + ret = ssl->cert->key_method->decrypt(ssl, out, out_len, max_out, + in.data(), in.size()); + } + hs->pending_private_key_op = ret == ssl_private_key_retry; + return ret; + } + + RSA *rsa = EVP_PKEY_get0_RSA(ssl->cert->privatekey); + if (rsa == NULL) { + // Decrypt operations are only supported for RSA keys. + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_private_key_failure; + } + + // Decrypt with no padding. PKCS#1 padding will be removed as part of the + // timing-sensitive code by the caller. + if (!RSA_decrypt(rsa, out_len, out, max_out, in.data(), in.size(), + RSA_NO_PADDING)) { + return ssl_private_key_failure; + } + return ssl_private_key_success; +} + +bool ssl_private_key_supports_signature_algorithm(SSL_HANDSHAKE *hs, + uint16_t sigalg) { + SSL *const ssl = hs->ssl; + if (!pkey_supports_algorithm(ssl, hs->local_pubkey.get(), sigalg)) { + return false; + } + + // Ensure the RSA key is large enough for the hash. RSASSA-PSS requires that + // emLen be at least hLen + sLen + 2. Both hLen and sLen are the size of the + // hash in TLS. Reasonable RSA key sizes are large enough for the largest + // defined RSASSA-PSS algorithm, but 1024-bit RSA is slightly too small for + // SHA-512. 1024-bit RSA is sometimes used for test credentials, so check the + // size so that we can fall back to another algorithm in that case. + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + if (alg->is_rsa_pss && (size_t)EVP_PKEY_size(hs->local_pubkey.get()) < + 2 * EVP_MD_size(alg->digest_func()) + 2) { + return false; + } + + return true; +} + +} // namespace bssl + +using namespace bssl; + +int SSL_use_RSAPrivateKey(SSL *ssl, RSA *rsa) { + if (rsa == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + UniquePtr pkey(EVP_PKEY_new()); + if (!pkey || + !EVP_PKEY_set1_RSA(pkey.get(), rsa)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); + return 0; + } + + return ssl_set_pkey(ssl->cert, pkey.get()); +} + +int SSL_use_RSAPrivateKey_ASN1(SSL *ssl, const uint8_t *der, size_t der_len) { + UniquePtr rsa(RSA_private_key_from_bytes(der, der_len)); + if (!rsa) { + OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); + return 0; + } + + return SSL_use_RSAPrivateKey(ssl, rsa.get()); +} + +int SSL_use_PrivateKey(SSL *ssl, EVP_PKEY *pkey) { + if (pkey == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + return ssl_set_pkey(ssl->cert, pkey); +} + +int SSL_use_PrivateKey_ASN1(int type, SSL *ssl, const uint8_t *der, + size_t der_len) { + if (der_len > LONG_MAX) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; + } + + const uint8_t *p = der; + UniquePtr pkey(d2i_PrivateKey(type, NULL, &p, (long)der_len)); + if (!pkey || p != der + der_len) { + OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); + return 0; + } + + return SSL_use_PrivateKey(ssl, pkey.get()); +} + +int SSL_CTX_use_RSAPrivateKey(SSL_CTX *ctx, RSA *rsa) { + if (rsa == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + UniquePtr pkey(EVP_PKEY_new()); + if (!pkey || + !EVP_PKEY_set1_RSA(pkey.get(), rsa)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_EVP_LIB); + return 0; + } + + return ssl_set_pkey(ctx->cert, pkey.get()); +} + +int SSL_CTX_use_RSAPrivateKey_ASN1(SSL_CTX *ctx, const uint8_t *der, + size_t der_len) { + UniquePtr rsa(RSA_private_key_from_bytes(der, der_len)); + if (!rsa) { + OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); + return 0; + } + + return SSL_CTX_use_RSAPrivateKey(ctx, rsa.get()); +} + +int SSL_CTX_use_PrivateKey(SSL_CTX *ctx, EVP_PKEY *pkey) { + if (pkey == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); + return 0; + } + + return ssl_set_pkey(ctx->cert, pkey); +} + +int SSL_CTX_use_PrivateKey_ASN1(int type, SSL_CTX *ctx, const uint8_t *der, + size_t der_len) { + if (der_len > LONG_MAX) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; + } + + const uint8_t *p = der; + UniquePtr pkey(d2i_PrivateKey(type, NULL, &p, (long)der_len)); + if (!pkey || p != der + der_len) { + OPENSSL_PUT_ERROR(SSL, ERR_R_ASN1_LIB); + return 0; + } + + return SSL_CTX_use_PrivateKey(ctx, pkey.get()); +} + +void SSL_set_private_key_method(SSL *ssl, + const SSL_PRIVATE_KEY_METHOD *key_method) { + ssl->cert->key_method = key_method; +} + +void SSL_CTX_set_private_key_method(SSL_CTX *ctx, + const SSL_PRIVATE_KEY_METHOD *key_method) { + ctx->cert->key_method = key_method; +} + +const char *SSL_get_signature_algorithm_name(uint16_t sigalg, + int include_curve) { + switch (sigalg) { + case SSL_SIGN_RSA_PKCS1_MD5_SHA1: + return "rsa_pkcs1_md5_sha1"; + case SSL_SIGN_RSA_PKCS1_SHA1: + return "rsa_pkcs1_sha1"; + case SSL_SIGN_RSA_PKCS1_SHA256: + return "rsa_pkcs1_sha256"; + case SSL_SIGN_RSA_PKCS1_SHA384: + return "rsa_pkcs1_sha384"; + case SSL_SIGN_RSA_PKCS1_SHA512: + return "rsa_pkcs1_sha512"; + case SSL_SIGN_ECDSA_SHA1: + return "ecdsa_sha1"; + case SSL_SIGN_ECDSA_SECP256R1_SHA256: + return include_curve ? "ecdsa_secp256r1_sha256" : "ecdsa_sha256"; + case SSL_SIGN_ECDSA_SECP384R1_SHA384: + return include_curve ? "ecdsa_secp384r1_sha384" : "ecdsa_sha384"; + case SSL_SIGN_ECDSA_SECP521R1_SHA512: + return include_curve ? "ecdsa_secp521r1_sha512" : "ecdsa_sha512"; + case SSL_SIGN_RSA_PSS_SHA256: + return "rsa_pss_sha256"; + case SSL_SIGN_RSA_PSS_SHA384: + return "rsa_pss_sha384"; + case SSL_SIGN_RSA_PSS_SHA512: + return "rsa_pss_sha512"; + case SSL_SIGN_ED25519: + return "ed25519"; + default: + return NULL; + } +} + +int SSL_get_signature_algorithm_key_type(uint16_t sigalg) { + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + return alg != nullptr ? alg->pkey_type : EVP_PKEY_NONE; +} + +const EVP_MD *SSL_get_signature_algorithm_digest(uint16_t sigalg) { + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + if (alg == nullptr || alg->digest_func == nullptr) { + return nullptr; + } + return alg->digest_func(); +} + +int SSL_is_signature_algorithm_rsa_pss(uint16_t sigalg) { + const SSL_SIGNATURE_ALGORITHM *alg = get_signature_algorithm(sigalg); + return alg != nullptr && alg->is_rsa_pss; +} + +static int set_algorithm_prefs(uint16_t **out_prefs, size_t *out_num_prefs, + const uint16_t *prefs, size_t num_prefs) { + OPENSSL_free(*out_prefs); + + *out_num_prefs = 0; + *out_prefs = (uint16_t *)BUF_memdup(prefs, num_prefs * sizeof(prefs[0])); + if (*out_prefs == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + *out_num_prefs = num_prefs; + + return 1; +} + +int SSL_CTX_set_signing_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, + size_t num_prefs) { + return set_algorithm_prefs(&ctx->cert->sigalgs, &ctx->cert->num_sigalgs, + prefs, num_prefs); +} + +int SSL_set_signing_algorithm_prefs(SSL *ssl, const uint16_t *prefs, + size_t num_prefs) { + return set_algorithm_prefs(&ssl->cert->sigalgs, &ssl->cert->num_sigalgs, + prefs, num_prefs); +} + +int SSL_CTX_set_verify_algorithm_prefs(SSL_CTX *ctx, const uint16_t *prefs, + size_t num_prefs) { + return set_algorithm_prefs(&ctx->verify_sigalgs, &ctx->num_verify_sigalgs, + prefs, num_prefs); +} diff --git a/Sources/BoringSSL/ssl/ssl_session.c b/Sources/BoringSSL/ssl/ssl_session.cc similarity index 60% rename from Sources/BoringSSL/ssl/ssl_session.c rename to Sources/BoringSSL/ssl/ssl_session.cc index bbe88c360..34e7b3174 100644 --- a/Sources/BoringSSL/ssl/ssl_session.c +++ b/Sources/BoringSSL/ssl/ssl_session.cc @@ -139,7 +139,10 @@ #include #include +#include + #include +#include #include #include #include @@ -148,9 +151,11 @@ #include "../crypto/internal.h" -/* The address of this is a magic value, a pointer to which is returned by - * SSL_magic_pending_session_ptr(). It allows a session callback to indicate - * that it needs to asynchronously fetch session information. */ +namespace bssl { + +// The address of this is a magic value, a pointer to which is returned by +// SSL_magic_pending_session_ptr(). It allows a session callback to indicate +// that it needs to asynchronously fetch session information. static const char g_pending_session_magic = 0; static CRYPTO_EX_DATA_CLASS g_ex_data_class = @@ -160,32 +165,29 @@ static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *session); static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *session); static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *session, int lock); -SSL_SESSION *ssl_session_new(const SSL_X509_METHOD *x509_method) { - SSL_SESSION *session = OPENSSL_malloc(sizeof(SSL_SESSION)); - if (session == NULL) { +UniquePtr ssl_session_new(const SSL_X509_METHOD *x509_method) { + UniquePtr session( + (SSL_SESSION *)OPENSSL_malloc(sizeof(SSL_SESSION))); + if (!session) { OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); return 0; } - OPENSSL_memset(session, 0, sizeof(SSL_SESSION)); + OPENSSL_memset(session.get(), 0, sizeof(SSL_SESSION)); session->x509_method = x509_method; session->verify_result = X509_V_ERR_INVALID_CALL; session->references = 1; session->timeout = SSL_DEFAULT_SESSION_TIMEOUT; session->auth_timeout = SSL_DEFAULT_SESSION_TIMEOUT; - session->time = (long)time(NULL); + session->time = time(NULL); CRYPTO_new_ex_data(&session->ex_data); return session; } -SSL_SESSION *SSL_SESSION_new(const SSL_CTX *ctx) { - return ssl_session_new(ctx->x509_method); -} - -SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { - SSL_SESSION *new_session = ssl_session_new(session->x509_method); - if (new_session == NULL) { - goto err; +UniquePtr SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { + UniquePtr new_session = ssl_session_new(session->x509_method); + if (!new_session) { + return nullptr; } new_session->is_server = session->is_server; @@ -193,77 +195,61 @@ SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { new_session->sid_ctx_length = session->sid_ctx_length; OPENSSL_memcpy(new_session->sid_ctx, session->sid_ctx, session->sid_ctx_length); - /* Copy the key material. */ + // Copy the key material. new_session->master_key_length = session->master_key_length; OPENSSL_memcpy(new_session->master_key, session->master_key, session->master_key_length); new_session->cipher = session->cipher; - /* Copy authentication state. */ + // Copy authentication state. if (session->psk_identity != NULL) { new_session->psk_identity = BUF_strdup(session->psk_identity); if (new_session->psk_identity == NULL) { - goto err; + return nullptr; } } if (session->certs != NULL) { new_session->certs = sk_CRYPTO_BUFFER_new_null(); if (new_session->certs == NULL) { - goto err; + return nullptr; } for (size_t i = 0; i < sk_CRYPTO_BUFFER_num(session->certs); i++) { CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(session->certs, i); if (!sk_CRYPTO_BUFFER_push(new_session->certs, buffer)) { - goto err; + return nullptr; } CRYPTO_BUFFER_up_ref(buffer); } } - if (!session->x509_method->session_dup(new_session, session)) { - goto err; + if (!session->x509_method->session_dup(new_session.get(), session)) { + return nullptr; } new_session->verify_result = session->verify_result; - new_session->ocsp_response_length = session->ocsp_response_length; if (session->ocsp_response != NULL) { - new_session->ocsp_response = BUF_memdup(session->ocsp_response, - session->ocsp_response_length); - if (new_session->ocsp_response == NULL) { - goto err; - } + new_session->ocsp_response = session->ocsp_response; + CRYPTO_BUFFER_up_ref(new_session->ocsp_response); } - new_session->tlsext_signed_cert_timestamp_list_length = - session->tlsext_signed_cert_timestamp_list_length; - if (session->tlsext_signed_cert_timestamp_list != NULL) { - new_session->tlsext_signed_cert_timestamp_list = - BUF_memdup(session->tlsext_signed_cert_timestamp_list, - session->tlsext_signed_cert_timestamp_list_length); - if (new_session->tlsext_signed_cert_timestamp_list == NULL) { - goto err; - } + if (session->signed_cert_timestamp_list != NULL) { + new_session->signed_cert_timestamp_list = + session->signed_cert_timestamp_list; + CRYPTO_BUFFER_up_ref(new_session->signed_cert_timestamp_list); } OPENSSL_memcpy(new_session->peer_sha256, session->peer_sha256, SHA256_DIGEST_LENGTH); new_session->peer_sha256_valid = session->peer_sha256_valid; - if (session->tlsext_hostname != NULL) { - new_session->tlsext_hostname = BUF_strdup(session->tlsext_hostname); - if (new_session->tlsext_hostname == NULL) { - goto err; - } - } - new_session->peer_signature_algorithm = session->peer_signature_algorithm; new_session->timeout = session->timeout; new_session->auth_timeout = session->auth_timeout; new_session->time = session->time; - /* Copy non-authentication connection properties. */ + // Copy non-authentication connection properties. if (dup_flags & SSL_SESSION_INCLUDE_NONAUTH) { new_session->session_id_length = session->session_id_length; OPENSSL_memcpy(new_session->session_id, session->session_id, @@ -283,55 +269,48 @@ SSL_SESSION *SSL_SESSION_dup(SSL_SESSION *session, int dup_flags) { if (session->early_alpn != NULL) { new_session->early_alpn = - BUF_memdup(session->early_alpn, session->early_alpn_len); + (uint8_t *)BUF_memdup(session->early_alpn, session->early_alpn_len); if (new_session->early_alpn == NULL) { - goto err; + return nullptr; } } new_session->early_alpn_len = session->early_alpn_len; } - /* Copy the ticket. */ + // Copy the ticket. if (dup_flags & SSL_SESSION_INCLUDE_TICKET) { if (session->tlsext_tick != NULL) { new_session->tlsext_tick = - BUF_memdup(session->tlsext_tick, session->tlsext_ticklen); + (uint8_t *)BUF_memdup(session->tlsext_tick, session->tlsext_ticklen); if (new_session->tlsext_tick == NULL) { - goto err; + return nullptr; } } new_session->tlsext_ticklen = session->tlsext_ticklen; } - /* The new_session does not get a copy of the ex_data. */ + // The new_session does not get a copy of the ex_data. new_session->not_resumable = 1; return new_session; - -err: - SSL_SESSION_free(new_session); - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; } void ssl_session_rebase_time(SSL *ssl, SSL_SESSION *session) { - struct timeval now; + struct OPENSSL_timeval now; ssl_get_current_time(ssl, &now); - /* To avoid overflows and underflows, if we've gone back in time or any value - * is negative, update the time, but mark the session expired. */ - if (session->time > now.tv_sec || - session->time < 0 || - now.tv_sec < 0) { + // To avoid overflows and underflows, if we've gone back in time, update the + // time, but mark the session expired. + if (session->time > now.tv_sec) { session->time = now.tv_sec; session->timeout = 0; session->auth_timeout = 0; return; } - /* Adjust the session time and timeouts. If the session has already expired, - * clamp the timeouts at zero. */ - long delta = now.tv_sec - session->time; + // Adjust the session time and timeouts. If the session has already expired, + // clamp the timeouts at zero. + uint64_t delta = now.tv_sec - session->time; session->time = now.tv_sec; if (session->timeout < delta) { session->timeout = 0; @@ -345,9 +324,10 @@ void ssl_session_rebase_time(SSL *ssl, SSL_SESSION *session) { } } -void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, long timeout) { - /* Rebase the timestamp relative to the current time so |timeout| is measured - * correctly. */ +void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, + uint32_t timeout) { + // Rebase the timestamp relative to the current time so |timeout| is measured + // correctly. ssl_session_rebase_time(ssl, session); if (session->timeout > timeout) { @@ -360,156 +340,21 @@ void ssl_session_renew_timeout(SSL *ssl, SSL_SESSION *session, long timeout) { } } -int SSL_SESSION_up_ref(SSL_SESSION *session) { - CRYPTO_refcount_inc(&session->references); - return 1; -} - -void SSL_SESSION_free(SSL_SESSION *session) { - if (session == NULL || - !CRYPTO_refcount_dec_and_test_zero(&session->references)) { - return; - } - - CRYPTO_free_ex_data(&g_ex_data_class, session, &session->ex_data); - - OPENSSL_cleanse(session->master_key, sizeof(session->master_key)); - OPENSSL_cleanse(session->session_id, sizeof(session->session_id)); - sk_CRYPTO_BUFFER_pop_free(session->certs, CRYPTO_BUFFER_free); - session->x509_method->session_clear(session); - OPENSSL_free(session->tlsext_hostname); - OPENSSL_free(session->tlsext_tick); - OPENSSL_free(session->tlsext_signed_cert_timestamp_list); - OPENSSL_free(session->ocsp_response); - OPENSSL_free(session->psk_identity); - OPENSSL_free(session->early_alpn); - OPENSSL_cleanse(session, sizeof(*session)); - OPENSSL_free(session); -} - -const uint8_t *SSL_SESSION_get_id(const SSL_SESSION *session, - unsigned *out_len) { - if (out_len != NULL) { - *out_len = session->session_id_length; - } - return session->session_id; -} - -long SSL_SESSION_get_timeout(const SSL_SESSION *session) { - return session->timeout; -} - -long SSL_SESSION_get_time(const SSL_SESSION *session) { - if (session == NULL) { - /* NULL should crash, but silently accept it here for compatibility. */ +uint16_t ssl_session_protocol_version(const SSL_SESSION *session) { + uint16_t ret; + if (!ssl_protocol_version_from_wire(&ret, session->ssl_version)) { + // An |SSL_SESSION| will never have an invalid version. This is enforced by + // the parser. + assert(0); return 0; } - return session->time; -} - -X509 *SSL_SESSION_get0_peer(const SSL_SESSION *session) { - return session->x509_peer; -} -size_t SSL_SESSION_get_master_key(const SSL_SESSION *session, uint8_t *out, - size_t max_out) { - /* TODO(davidben): Fix master_key_length's type and remove these casts. */ - if (max_out == 0) { - return (size_t)session->master_key_length; - } - if (max_out > (size_t)session->master_key_length) { - max_out = (size_t)session->master_key_length; - } - OPENSSL_memcpy(out, session->master_key, max_out); - return max_out; -} - -long SSL_SESSION_set_time(SSL_SESSION *session, long time) { - if (session == NULL) { - return 0; - } - - session->time = time; - return time; -} - -long SSL_SESSION_set_timeout(SSL_SESSION *session, long timeout) { - if (session == NULL) { - return 0; - } - - session->timeout = timeout; - session->auth_timeout = timeout; - return 1; -} - -int SSL_SESSION_set1_id_context(SSL_SESSION *session, const uint8_t *sid_ctx, - size_t sid_ctx_len) { - if (sid_ctx_len > sizeof(session->sid_ctx)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); - return 0; - } - - assert(sizeof(session->sid_ctx) < 256); - session->sid_ctx_length = (uint8_t)sid_ctx_len; - OPENSSL_memcpy(session->sid_ctx, sid_ctx, sid_ctx_len); - - return 1; -} - -SSL_SESSION *SSL_magic_pending_session_ptr(void) { - return (SSL_SESSION *)&g_pending_session_magic; -} - -SSL_SESSION *SSL_get_session(const SSL *ssl) { - /* Once the handshake completes we return the established session. Otherwise - * we return the intermediate session, either |session| (for resumption) or - * |new_session| if doing a full handshake. */ - if (!SSL_in_init(ssl)) { - return ssl->s3->established_session; - } - if (ssl->s3->hs->new_session != NULL) { - return ssl->s3->hs->new_session; - } - return ssl->session; -} - -SSL_SESSION *SSL_get1_session(SSL *ssl) { - SSL_SESSION *ret = SSL_get_session(ssl); - if (ret != NULL) { - SSL_SESSION_up_ref(ret); - } return ret; } -int SSL_SESSION_get_ex_new_index(long argl, void *argp, - CRYPTO_EX_unused *unused, - CRYPTO_EX_dup *dup_func, - CRYPTO_EX_free *free_func) { - int index; - if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, dup_func, - free_func)) { - return -1; - } - return index; -} - -int SSL_SESSION_set_ex_data(SSL_SESSION *session, int idx, void *arg) { - return CRYPTO_set_ex_data(&session->ex_data, idx, arg); -} - -void *SSL_SESSION_get_ex_data(const SSL_SESSION *session, int idx) { - return CRYPTO_get_ex_data(&session->ex_data, idx); -} - -const EVP_MD *SSL_SESSION_get_digest(const SSL_SESSION *session, - const SSL *ssl) { - uint16_t version; - if (!ssl->method->version_from_wire(&version, session->ssl_version)) { - return NULL; - } - - return ssl_get_handshake_digest(session->cipher->algorithm_prf, version); +const EVP_MD *ssl_session_get_digest(const SSL_SESSION *session) { + return ssl_get_handshake_digest(ssl_session_protocol_version(session), + session->cipher); } int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server) { @@ -519,7 +364,7 @@ int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server) { return 0; } - SSL_SESSION *session = ssl_session_new(ssl->ctx->x509_method); + UniquePtr session = ssl_session_new(ssl->ctx->x509_method); if (session == NULL) { return 0; } @@ -527,33 +372,33 @@ int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server) { session->is_server = is_server; session->ssl_version = ssl->version; - /* Fill in the time from the |SSL_CTX|'s clock. */ - struct timeval now; + // Fill in the time from the |SSL_CTX|'s clock. + struct OPENSSL_timeval now; ssl_get_current_time(ssl, &now); session->time = now.tv_sec; - uint16_t version = ssl3_protocol_version(ssl); + uint16_t version = ssl_protocol_version(ssl); if (version >= TLS1_3_VERSION) { - /* TLS 1.3 uses tickets as authenticators, so we are willing to use them for - * longer. */ - session->timeout = ssl->initial_ctx->session_psk_dhe_timeout; + // TLS 1.3 uses tickets as authenticators, so we are willing to use them for + // longer. + session->timeout = ssl->session_ctx->session_psk_dhe_timeout; session->auth_timeout = SSL_DEFAULT_SESSION_AUTH_TIMEOUT; } else { - /* TLS 1.2 resumption does not incorporate new key material, so we use a - * much shorter timeout. */ - session->timeout = ssl->initial_ctx->session_timeout; - session->auth_timeout = ssl->initial_ctx->session_timeout; + // TLS 1.2 resumption does not incorporate new key material, so we use a + // much shorter timeout. + session->timeout = ssl->session_ctx->session_timeout; + session->auth_timeout = ssl->session_ctx->session_timeout; } if (is_server) { if (hs->ticket_expected || version >= TLS1_3_VERSION) { - /* Don't set session IDs for sessions resumed with tickets. This will keep - * them out of the session cache. */ + // Don't set session IDs for sessions resumed with tickets. This will keep + // them out of the session cache. session->session_id_length = 0; } else { session->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; if (!RAND_bytes(session->session_id, session->session_id_length)) { - goto err; + return 0; } } } else { @@ -562,80 +407,121 @@ int ssl_get_new_session(SSL_HANDSHAKE *hs, int is_server) { if (ssl->cert->sid_ctx_length > sizeof(session->sid_ctx)) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; + return 0; } OPENSSL_memcpy(session->sid_ctx, ssl->cert->sid_ctx, ssl->cert->sid_ctx_length); session->sid_ctx_length = ssl->cert->sid_ctx_length; - /* The session is marked not resumable until it is completely filled in. */ + // The session is marked not resumable until it is completely filled in. session->not_resumable = 1; session->verify_result = X509_V_ERR_INVALID_CALL; - SSL_SESSION_free(hs->new_session); - hs->new_session = session; + hs->new_session = std::move(session); ssl_set_session(ssl, NULL); return 1; - -err: - SSL_SESSION_free(session); - return 0; } -int ssl_encrypt_ticket(SSL *ssl, CBB *out, const SSL_SESSION *session) { - int ret = 0; +int ssl_ctx_rotate_ticket_encryption_key(SSL_CTX *ctx) { + OPENSSL_timeval now; + ssl_ctx_get_current_time(ctx, &now); + { + // Avoid acquiring a write lock in the common case (i.e. a non-default key + // is used or the default keys have not expired yet). + MutexReadLock lock(&ctx->lock); + if (ctx->tlsext_ticket_key_current && + (ctx->tlsext_ticket_key_current->next_rotation_tv_sec == 0 || + ctx->tlsext_ticket_key_current->next_rotation_tv_sec > now.tv_sec) && + (!ctx->tlsext_ticket_key_prev || + ctx->tlsext_ticket_key_prev->next_rotation_tv_sec > now.tv_sec)) { + return 1; + } + } - /* Serialize the SSL_SESSION to be encoded into the ticket. */ - uint8_t *session_buf = NULL; - size_t session_len; - if (!SSL_SESSION_to_bytes_for_ticket(session, &session_buf, &session_len)) { - return -1; + MutexWriteLock lock(&ctx->lock); + if (!ctx->tlsext_ticket_key_current || + (ctx->tlsext_ticket_key_current->next_rotation_tv_sec != 0 && + ctx->tlsext_ticket_key_current->next_rotation_tv_sec <= now.tv_sec)) { + // The current key has not been initialized or it is expired. + auto new_key = bssl::MakeUnique(); + if (!new_key) { + return 0; + } + OPENSSL_memset(new_key.get(), 0, sizeof(struct tlsext_ticket_key)); + if (ctx->tlsext_ticket_key_current) { + // The current key expired. Rotate it to prev and bump up its rotation + // timestamp. Note that even with the new rotation time it may still be + // expired and get droppped below. + ctx->tlsext_ticket_key_current->next_rotation_tv_sec += + SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL; + OPENSSL_free(ctx->tlsext_ticket_key_prev); + ctx->tlsext_ticket_key_prev = ctx->tlsext_ticket_key_current; + } + ctx->tlsext_ticket_key_current = new_key.release(); + RAND_bytes(ctx->tlsext_ticket_key_current->name, 16); + RAND_bytes(ctx->tlsext_ticket_key_current->hmac_key, 16); + RAND_bytes(ctx->tlsext_ticket_key_current->aes_key, 16); + ctx->tlsext_ticket_key_current->next_rotation_tv_sec = + now.tv_sec + SSL_DEFAULT_TICKET_KEY_ROTATION_INTERVAL; + } + + // Drop an expired prev key. + if (ctx->tlsext_ticket_key_prev && + ctx->tlsext_ticket_key_prev->next_rotation_tv_sec <= now.tv_sec) { + OPENSSL_free(ctx->tlsext_ticket_key_prev); + ctx->tlsext_ticket_key_prev = nullptr; } - EVP_CIPHER_CTX ctx; - EVP_CIPHER_CTX_init(&ctx); - HMAC_CTX hctx; - HMAC_CTX_init(&hctx); + return 1; +} + +static int ssl_encrypt_ticket_with_cipher_ctx(SSL *ssl, CBB *out, + const uint8_t *session_buf, + size_t session_len) { + ScopedEVP_CIPHER_CTX ctx; + ScopedHMAC_CTX hctx; - /* If the session is too long, emit a dummy value rather than abort the - * connection. */ + // If the session is too long, emit a dummy value rather than abort the + // connection. static const size_t kMaxTicketOverhead = 16 + EVP_MAX_IV_LENGTH + EVP_MAX_BLOCK_LENGTH + EVP_MAX_MD_SIZE; if (session_len > 0xffff - kMaxTicketOverhead) { static const char kTicketPlaceholder[] = "TICKET TOO LARGE"; - if (CBB_add_bytes(out, (const uint8_t *)kTicketPlaceholder, - strlen(kTicketPlaceholder))) { - ret = 1; - } - goto err; + return CBB_add_bytes(out, (const uint8_t *)kTicketPlaceholder, + strlen(kTicketPlaceholder)); } - /* Initialize HMAC and cipher contexts. If callback present it does all the - * work otherwise use generated values from parent ctx. */ - SSL_CTX *tctx = ssl->initial_ctx; + // Initialize HMAC and cipher contexts. If callback present it does all the + // work otherwise use generated values from parent ctx. + SSL_CTX *tctx = ssl->session_ctx; uint8_t iv[EVP_MAX_IV_LENGTH]; uint8_t key_name[16]; if (tctx->tlsext_ticket_key_cb != NULL) { - if (tctx->tlsext_ticket_key_cb(ssl, key_name, iv, &ctx, &hctx, + if (tctx->tlsext_ticket_key_cb(ssl, key_name, iv, ctx.get(), hctx.get(), 1 /* encrypt */) < 0) { - goto err; + return 0; } } else { + // Rotate ticket key if necessary. + if (!ssl_ctx_rotate_ticket_encryption_key(tctx)) { + return 0; + } + MutexReadLock lock(&tctx->lock); if (!RAND_bytes(iv, 16) || - !EVP_EncryptInit_ex(&ctx, EVP_aes_128_cbc(), NULL, - tctx->tlsext_tick_aes_key, iv) || - !HMAC_Init_ex(&hctx, tctx->tlsext_tick_hmac_key, 16, tlsext_tick_md(), - NULL)) { - goto err; + !EVP_EncryptInit_ex(ctx.get(), EVP_aes_128_cbc(), NULL, + tctx->tlsext_ticket_key_current->aes_key, iv) || + !HMAC_Init_ex(hctx.get(), tctx->tlsext_ticket_key_current->hmac_key, 16, + tlsext_tick_md(), NULL)) { + return 0; } - OPENSSL_memcpy(key_name, tctx->tlsext_tick_key_name, 16); + OPENSSL_memcpy(key_name, tctx->tlsext_ticket_key_current->name, 16); } uint8_t *ptr; if (!CBB_add_bytes(out, key_name, 16) || - !CBB_add_bytes(out, iv, EVP_CIPHER_CTX_iv_length(&ctx)) || + !CBB_add_bytes(out, iv, EVP_CIPHER_CTX_iv_length(ctx.get())) || !CBB_reserve(out, &ptr, session_len + EVP_MAX_BLOCK_LENGTH)) { - goto err; + return 0; } size_t total = 0; @@ -644,33 +530,76 @@ int ssl_encrypt_ticket(SSL *ssl, CBB *out, const SSL_SESSION *session) { total = session_len; #else int len; - if (!EVP_EncryptUpdate(&ctx, ptr + total, &len, session_buf, session_len)) { - goto err; + if (!EVP_EncryptUpdate(ctx.get(), ptr + total, &len, session_buf, session_len)) { + return 0; } total += len; - if (!EVP_EncryptFinal_ex(&ctx, ptr + total, &len)) { - goto err; + if (!EVP_EncryptFinal_ex(ctx.get(), ptr + total, &len)) { + return 0; } total += len; #endif if (!CBB_did_write(out, total)) { - goto err; + return 0; } unsigned hlen; - if (!HMAC_Update(&hctx, CBB_data(out), CBB_len(out)) || + if (!HMAC_Update(hctx.get(), CBB_data(out), CBB_len(out)) || !CBB_reserve(out, &ptr, EVP_MAX_MD_SIZE) || - !HMAC_Final(&hctx, ptr, &hlen) || + !HMAC_Final(hctx.get(), ptr, &hlen) || !CBB_did_write(out, hlen)) { - goto err; + return 0; + } + + return 1; +} + +static int ssl_encrypt_ticket_with_method(SSL *ssl, CBB *out, + const uint8_t *session_buf, + size_t session_len) { + const SSL_TICKET_AEAD_METHOD *method = ssl->session_ctx->ticket_aead_method; + const size_t max_overhead = method->max_overhead(ssl); + const size_t max_out = session_len + max_overhead; + if (max_out < max_overhead) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; + } + + uint8_t *ptr; + if (!CBB_reserve(out, &ptr, max_out)) { + return 0; + } + + size_t out_len; + if (!method->seal(ssl, ptr, &out_len, max_out, session_buf, session_len)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TICKET_ENCRYPTION_FAILED); + return 0; + } + + if (!CBB_did_write(out, out_len)) { + return 0; + } + + return 1; +} + +int ssl_encrypt_ticket(SSL *ssl, CBB *out, const SSL_SESSION *session) { + // Serialize the SSL_SESSION to be encoded into the ticket. + uint8_t *session_buf = NULL; + size_t session_len; + if (!SSL_SESSION_to_bytes_for_ticket(session, &session_buf, &session_len)) { + return -1; } - ret = 1; + int ret = 0; + if (ssl->session_ctx->ticket_aead_method) { + ret = ssl_encrypt_ticket_with_method(ssl, out, session_buf, session_len); + } else { + ret = + ssl_encrypt_ticket_with_cipher_ctx(ssl, out, session_buf, session_len); + } -err: OPENSSL_free(session_buf); - EVP_CIPHER_CTX_cleanup(&ctx); - HMAC_CTX_cleanup(&hctx); return ret; } @@ -689,193 +618,160 @@ int ssl_session_is_time_valid(const SSL *ssl, const SSL_SESSION *session) { return 0; } - struct timeval now; + struct OPENSSL_timeval now; ssl_get_current_time(ssl, &now); - /* Reject tickets from the future to avoid underflow. */ - if ((long)now.tv_sec < session->time) { + // Reject tickets from the future to avoid underflow. + if (now.tv_sec < session->time) { return 0; } - return session->timeout > (long)now.tv_sec - session->time; + return session->timeout > now.tv_sec - session->time; } int ssl_session_is_resumable(const SSL_HANDSHAKE *hs, const SSL_SESSION *session) { const SSL *const ssl = hs->ssl; return ssl_session_is_context_valid(ssl, session) && - /* The session must have been created by the same type of end point as - * we're now using it with. */ + // The session must have been created by the same type of end point as + // we're now using it with. ssl->server == session->is_server && - /* The session must not be expired. */ + // The session must not be expired. ssl_session_is_time_valid(ssl, session) && /* Only resume if the session's version matches the negotiated * version. */ ssl->version == session->ssl_version && - /* Only resume if the session's cipher matches the negotiated one. */ + // Only resume if the session's cipher matches the negotiated one. hs->new_cipher == session->cipher && - /* If the session contains a client certificate (either the full - * certificate or just the hash) then require that the form of the - * certificate matches the current configuration. */ + // If the session contains a client certificate (either the full + // certificate or just the hash) then require that the form of the + // certificate matches the current configuration. ((sk_CRYPTO_BUFFER_num(session->certs) == 0 && !session->peer_sha256_valid) || session->peer_sha256_valid == ssl->retain_only_sha256_of_client_certs); } -/* ssl_lookup_session looks up |session_id| in the session cache and sets - * |*out_session| to an |SSL_SESSION| object if found. The caller takes - * ownership of the result. */ -static enum ssl_session_result_t ssl_lookup_session( - SSL *ssl, SSL_SESSION **out_session, const uint8_t *session_id, +// ssl_lookup_session looks up |session_id| in the session cache and sets +// |*out_session| to an |SSL_SESSION| object if found. +static enum ssl_hs_wait_t ssl_lookup_session( + SSL *ssl, UniquePtr *out_session, const uint8_t *session_id, size_t session_id_len) { - *out_session = NULL; + out_session->reset(); if (session_id_len == 0 || session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { - return ssl_session_success; + return ssl_hs_ok; } - SSL_SESSION *session = NULL; - /* Try the internal cache, if it exists. */ - if (!(ssl->initial_ctx->session_cache_mode & + UniquePtr session; + // Try the internal cache, if it exists. + if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = ssl->version; data.session_id_length = session_id_len; OPENSSL_memcpy(data.session_id, session_id, session_id_len); - CRYPTO_MUTEX_lock_read(&ssl->initial_ctx->lock); - session = lh_SSL_SESSION_retrieve(ssl->initial_ctx->sessions, &data); - if (session != NULL) { - SSL_SESSION_up_ref(session); + MutexReadLock lock(&ssl->session_ctx->lock); + session.reset(lh_SSL_SESSION_retrieve(ssl->session_ctx->sessions, &data)); + if (session) { + // |lh_SSL_SESSION_retrieve| returns a non-owning pointer. + SSL_SESSION_up_ref(session.get()); } - /* TODO(davidben): This should probably move it to the front of the list. */ - CRYPTO_MUTEX_unlock_read(&ssl->initial_ctx->lock); + // TODO(davidben): This should probably move it to the front of the list. } - /* Fall back to the external cache, if it exists. */ - if (session == NULL && - ssl->initial_ctx->get_session_cb != NULL) { + // Fall back to the external cache, if it exists. + if (!session && (ssl->session_ctx->get_session_cb != nullptr || + ssl->session_ctx->get_session_cb_legacy != nullptr)) { int copy = 1; - session = ssl->initial_ctx->get_session_cb(ssl, (uint8_t *)session_id, - session_id_len, ©); + if (ssl->session_ctx->get_session_cb != nullptr) { + session.reset(ssl->session_ctx->get_session_cb(ssl, session_id, + session_id_len, ©)); + } else { + session.reset(ssl->session_ctx->get_session_cb_legacy( + ssl, const_cast(session_id), session_id_len, ©)); + } - if (session == NULL) { - return ssl_session_success; + if (!session) { + return ssl_hs_ok; } - if (session == SSL_magic_pending_session_ptr()) { - return ssl_session_retry; + if (session.get() == SSL_magic_pending_session_ptr()) { + session.release(); // This pointer is not actually owned. + return ssl_hs_pending_session; } - /* Increment reference count now if the session callback asks us to do so - * (note that if the session structures returned by the callback are shared - * between threads, it must handle the reference count itself [i.e. copy == - * 0], or things won't be thread-safe). */ + // Increment reference count now if the session callback asks us to do so + // (note that if the session structures returned by the callback are shared + // between threads, it must handle the reference count itself [i.e. copy == + // 0], or things won't be thread-safe). if (copy) { - SSL_SESSION_up_ref(session); + SSL_SESSION_up_ref(session.get()); } - /* Add the externally cached session to the internal cache if necessary. */ - if (!(ssl->initial_ctx->session_cache_mode & + // Add the externally cached session to the internal cache if necessary. + if (!(ssl->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { - SSL_CTX_add_session(ssl->initial_ctx, session); + SSL_CTX_add_session(ssl->session_ctx, session.get()); } } - if (session != NULL && - !ssl_session_is_time_valid(ssl, session)) { - /* The session was from the cache, so remove it. */ - SSL_CTX_remove_session(ssl->initial_ctx, session); - SSL_SESSION_free(session); - session = NULL; + if (session && !ssl_session_is_time_valid(ssl, session.get())) { + // The session was from the cache, so remove it. + SSL_CTX_remove_session(ssl->session_ctx, session.get()); + session.reset(); } - *out_session = session; - return ssl_session_success; + *out_session = std::move(session); + return ssl_hs_ok; } -enum ssl_session_result_t ssl_get_prev_session( - SSL *ssl, SSL_SESSION **out_session, int *out_tickets_supported, - int *out_renew_ticket, const SSL_CLIENT_HELLO *client_hello) { - /* This is used only by servers. */ +enum ssl_hs_wait_t ssl_get_prev_session(SSL *ssl, + UniquePtr *out_session, + bool *out_tickets_supported, + bool *out_renew_ticket, + const SSL_CLIENT_HELLO *client_hello) { + // This is used only by servers. assert(ssl->server); - SSL_SESSION *session = NULL; - int renew_ticket = 0; + UniquePtr session; + bool renew_ticket = false; - /* If tickets are disabled, always behave as if no tickets are present. */ + // If tickets are disabled, always behave as if no tickets are present. const uint8_t *ticket = NULL; size_t ticket_len = 0; - const int tickets_supported = + const bool tickets_supported = !(SSL_get_options(ssl) & SSL_OP_NO_TICKET) && ssl->version > SSL3_VERSION && SSL_early_callback_ctx_extension_get( client_hello, TLSEXT_TYPE_session_ticket, &ticket, &ticket_len); if (tickets_supported && ticket_len > 0) { - if (!tls_process_ticket(ssl, &session, &renew_ticket, ticket, ticket_len, - client_hello->session_id, - client_hello->session_id_len)) { - return ssl_session_error; + switch (ssl_process_ticket(ssl, &session, &renew_ticket, ticket, ticket_len, + client_hello->session_id, + client_hello->session_id_len)) { + case ssl_ticket_aead_success: + break; + case ssl_ticket_aead_ignore_ticket: + assert(!session); + break; + case ssl_ticket_aead_error: + return ssl_hs_error; + case ssl_ticket_aead_retry: + return ssl_hs_pending_ticket; } } else { - /* The client didn't send a ticket, so the session ID is a real ID. */ - enum ssl_session_result_t lookup_ret = ssl_lookup_session( + // The client didn't send a ticket, so the session ID is a real ID. + enum ssl_hs_wait_t lookup_ret = ssl_lookup_session( ssl, &session, client_hello->session_id, client_hello->session_id_len); - if (lookup_ret != ssl_session_success) { + if (lookup_ret != ssl_hs_ok) { return lookup_ret; } } - *out_session = session; + *out_session = std::move(session); *out_tickets_supported = tickets_supported; *out_renew_ticket = renew_ticket; - return ssl_session_success; -} - -int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session) { - /* Although |session| is inserted into two structures (a doubly-linked list - * and the hash table), |ctx| only takes one reference. */ - SSL_SESSION_up_ref(session); - - SSL_SESSION *old_session; - CRYPTO_MUTEX_lock_write(&ctx->lock); - if (!lh_SSL_SESSION_insert(ctx->sessions, &old_session, session)) { - CRYPTO_MUTEX_unlock_write(&ctx->lock); - SSL_SESSION_free(session); - return 0; - } - - if (old_session != NULL) { - if (old_session == session) { - /* |session| was already in the cache. */ - CRYPTO_MUTEX_unlock_write(&ctx->lock); - SSL_SESSION_free(old_session); - return 0; - } - - /* There was a session ID collision. |old_session| must be removed from - * the linked list and released. */ - SSL_SESSION_list_remove(ctx, old_session); - SSL_SESSION_free(old_session); - } - - SSL_SESSION_list_add(ctx, session); - - /* Enforce any cache size limits. */ - if (SSL_CTX_sess_get_cache_size(ctx) > 0) { - while (SSL_CTX_sess_number(ctx) > SSL_CTX_sess_get_cache_size(ctx)) { - if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) { - break; - } - } - } - - CRYPTO_MUTEX_unlock_write(&ctx->lock); - return 1; -} - -int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session) { - return remove_session_lock(ctx, session, 1); + return ssl_hs_ok; } static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *session, int lock) { @@ -898,7 +794,6 @@ static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *session, int lock) { } if (ret) { - found_session->not_resumable = 1; if (ctx->remove_session_cb != NULL) { ctx->remove_session_cb(ctx, found_session); } @@ -909,18 +804,6 @@ static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *session, int lock) { return ret; } -int SSL_set_session(SSL *ssl, SSL_SESSION *session) { - /* SSL_set_session may only be called before the handshake has started. */ - if (ssl->s3->initial_handshake_complete || - ssl->s3->hs == NULL || - ssl->s3->hs->state != SSL_ST_INIT) { - abort(); - } - - ssl_set_session(ssl, session); - return 1; -} - void ssl_set_session(SSL *ssl, SSL_SESSION *session) { if (ssl->session == session) { return; @@ -933,22 +816,306 @@ void ssl_set_session(SSL *ssl, SSL_SESSION *session) { } } -long SSL_CTX_set_timeout(SSL_CTX *ctx, long timeout) { +// locked by SSL_CTX in the calling function +static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *session) { + if (session->next == NULL || session->prev == NULL) { + return; + } + + if (session->next == (SSL_SESSION *)&ctx->session_cache_tail) { + // last element in list + if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { + // only one element in list + ctx->session_cache_head = NULL; + ctx->session_cache_tail = NULL; + } else { + ctx->session_cache_tail = session->prev; + session->prev->next = (SSL_SESSION *)&(ctx->session_cache_tail); + } + } else { + if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { + // first element in list + ctx->session_cache_head = session->next; + session->next->prev = (SSL_SESSION *)&(ctx->session_cache_head); + } else { // middle of list + session->next->prev = session->prev; + session->prev->next = session->next; + } + } + session->prev = session->next = NULL; +} + +static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *session) { + if (session->next != NULL && session->prev != NULL) { + SSL_SESSION_list_remove(ctx, session); + } + + if (ctx->session_cache_head == NULL) { + ctx->session_cache_head = session; + ctx->session_cache_tail = session; + session->prev = (SSL_SESSION *)&(ctx->session_cache_head); + session->next = (SSL_SESSION *)&(ctx->session_cache_tail); + } else { + session->next = ctx->session_cache_head; + session->next->prev = session; + session->prev = (SSL_SESSION *)&(ctx->session_cache_head); + ctx->session_cache_head = session; + } +} + +} // namespace bssl + +using namespace bssl; + +SSL_SESSION *SSL_SESSION_new(const SSL_CTX *ctx) { + return ssl_session_new(ctx->x509_method).release(); +} + +int SSL_SESSION_up_ref(SSL_SESSION *session) { + CRYPTO_refcount_inc(&session->references); + return 1; +} + +void SSL_SESSION_free(SSL_SESSION *session) { + if (session == NULL || + !CRYPTO_refcount_dec_and_test_zero(&session->references)) { + return; + } + + CRYPTO_free_ex_data(&g_ex_data_class, session, &session->ex_data); + + OPENSSL_cleanse(session->master_key, sizeof(session->master_key)); + OPENSSL_cleanse(session->session_id, sizeof(session->session_id)); + sk_CRYPTO_BUFFER_pop_free(session->certs, CRYPTO_BUFFER_free); + session->x509_method->session_clear(session); + OPENSSL_free(session->tlsext_tick); + CRYPTO_BUFFER_free(session->signed_cert_timestamp_list); + CRYPTO_BUFFER_free(session->ocsp_response); + OPENSSL_free(session->psk_identity); + OPENSSL_free(session->early_alpn); + OPENSSL_free(session); +} + +const uint8_t *SSL_SESSION_get_id(const SSL_SESSION *session, + unsigned *out_len) { + if (out_len != NULL) { + *out_len = session->session_id_length; + } + return session->session_id; +} + +uint32_t SSL_SESSION_get_timeout(const SSL_SESSION *session) { + return session->timeout; +} + +uint64_t SSL_SESSION_get_time(const SSL_SESSION *session) { + if (session == NULL) { + // NULL should crash, but silently accept it here for compatibility. + return 0; + } + return session->time; +} + +X509 *SSL_SESSION_get0_peer(const SSL_SESSION *session) { + return session->x509_peer; +} + +size_t SSL_SESSION_get_master_key(const SSL_SESSION *session, uint8_t *out, + size_t max_out) { + // TODO(davidben): Fix master_key_length's type and remove these casts. + if (max_out == 0) { + return (size_t)session->master_key_length; + } + if (max_out > (size_t)session->master_key_length) { + max_out = (size_t)session->master_key_length; + } + OPENSSL_memcpy(out, session->master_key, max_out); + return max_out; +} + +uint64_t SSL_SESSION_set_time(SSL_SESSION *session, uint64_t time) { + if (session == NULL) { + return 0; + } + + session->time = time; + return time; +} + +uint32_t SSL_SESSION_set_timeout(SSL_SESSION *session, uint32_t timeout) { + if (session == NULL) { + return 0; + } + + session->timeout = timeout; + session->auth_timeout = timeout; + return 1; +} + +int SSL_SESSION_set1_id_context(SSL_SESSION *session, const uint8_t *sid_ctx, + size_t sid_ctx_len) { + if (sid_ctx_len > sizeof(session->sid_ctx)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); + return 0; + } + + static_assert(sizeof(session->sid_ctx) < 256, "sid_ctx_len does not fit"); + session->sid_ctx_length = (uint8_t)sid_ctx_len; + OPENSSL_memcpy(session->sid_ctx, sid_ctx, sid_ctx_len); + + return 1; +} + +int SSL_SESSION_should_be_single_use(const SSL_SESSION *session) { + return ssl_session_protocol_version(session) >= TLS1_3_VERSION; +} + +int SSL_SESSION_is_resumable(const SSL_SESSION *session) { + return !session->not_resumable; +} + +int SSL_SESSION_has_ticket(const SSL_SESSION *session) { + return session->tlsext_ticklen > 0; +} + +void SSL_SESSION_get0_ticket(const SSL_SESSION *session, + const uint8_t **out_ticket, size_t *out_len) { + if (out_ticket != nullptr) { + *out_ticket = session->tlsext_tick; + } + *out_len = session->tlsext_ticklen; +} + +uint32_t SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *session) { + return session->tlsext_tick_lifetime_hint; +} + +SSL_SESSION *SSL_magic_pending_session_ptr(void) { + return (SSL_SESSION *)&g_pending_session_magic; +} + +SSL_SESSION *SSL_get_session(const SSL *ssl) { + // Once the handshake completes we return the established session. Otherwise + // we return the intermediate session, either |session| (for resumption) or + // |new_session| if doing a full handshake. + if (!SSL_in_init(ssl)) { + return ssl->s3->established_session.get(); + } + SSL_HANDSHAKE *hs = ssl->s3->hs.get(); + if (hs->early_session) { + return hs->early_session.get(); + } + if (hs->new_session) { + return hs->new_session.get(); + } + return ssl->session; +} + +SSL_SESSION *SSL_get1_session(SSL *ssl) { + SSL_SESSION *ret = SSL_get_session(ssl); + if (ret != NULL) { + SSL_SESSION_up_ref(ret); + } + return ret; +} + +int SSL_SESSION_get_ex_new_index(long argl, void *argp, + CRYPTO_EX_unused *unused, + CRYPTO_EX_dup *dup_unused, + CRYPTO_EX_free *free_func) { + int index; + if (!CRYPTO_get_ex_new_index(&g_ex_data_class, &index, argl, argp, + free_func)) { + return -1; + } + return index; +} + +int SSL_SESSION_set_ex_data(SSL_SESSION *session, int idx, void *arg) { + return CRYPTO_set_ex_data(&session->ex_data, idx, arg); +} + +void *SSL_SESSION_get_ex_data(const SSL_SESSION *session, int idx) { + return CRYPTO_get_ex_data(&session->ex_data, idx); +} + +int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *session) { + // Although |session| is inserted into two structures (a doubly-linked list + // and the hash table), |ctx| only takes one reference. + SSL_SESSION_up_ref(session); + UniquePtr owned_session(session); + + SSL_SESSION *old_session; + MutexWriteLock lock(&ctx->lock); + if (!lh_SSL_SESSION_insert(ctx->sessions, &old_session, session)) { + return 0; + } + // |ctx->sessions| took ownership of |session| and gave us back a reference to + // |old_session|. (|old_session| may be the same as |session|, in which case + // we traded identical references with |ctx->sessions|.) + owned_session.release(); + owned_session.reset(old_session); + + if (old_session != NULL) { + if (old_session == session) { + // |session| was already in the cache. There are no linked list pointers + // to update. + return 0; + } + + // There was a session ID collision. |old_session| was replaced with + // |session| in the hash table, so |old_session| must be removed from the + // linked list to match. + SSL_SESSION_list_remove(ctx, old_session); + } + + SSL_SESSION_list_add(ctx, session); + + // Enforce any cache size limits. + if (SSL_CTX_sess_get_cache_size(ctx) > 0) { + while (lh_SSL_SESSION_num_items(ctx->sessions) > + SSL_CTX_sess_get_cache_size(ctx)) { + if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) { + break; + } + } + } + + return 1; +} + +int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *session) { + return remove_session_lock(ctx, session, 1); +} + +int SSL_set_session(SSL *ssl, SSL_SESSION *session) { + // SSL_set_session may only be called before the handshake has started. + if (ssl->s3->initial_handshake_complete || + ssl->s3->hs == NULL || + ssl->s3->hs->state != 0) { + abort(); + } + + ssl_set_session(ssl, session); + return 1; +} + +uint32_t SSL_CTX_set_timeout(SSL_CTX *ctx, uint32_t timeout) { if (ctx == NULL) { return 0; } - /* Historically, zero was treated as |SSL_DEFAULT_SESSION_TIMEOUT|. */ + // Historically, zero was treated as |SSL_DEFAULT_SESSION_TIMEOUT|. if (timeout == 0) { timeout = SSL_DEFAULT_SESSION_TIMEOUT; } - long old_timeout = ctx->session_timeout; + uint32_t old_timeout = ctx->session_timeout; ctx->session_timeout = timeout; return old_timeout; } -long SSL_CTX_get_timeout(const SSL_CTX *ctx) { +uint32_t SSL_CTX_get_timeout(const SSL_CTX *ctx) { if (ctx == NULL) { return 0; } @@ -956,27 +1123,26 @@ long SSL_CTX_get_timeout(const SSL_CTX *ctx) { return ctx->session_timeout; } -void SSL_CTX_set_session_psk_dhe_timeout(SSL_CTX *ctx, long timeout) { +void SSL_CTX_set_session_psk_dhe_timeout(SSL_CTX *ctx, uint32_t timeout) { ctx->session_psk_dhe_timeout = timeout; } typedef struct timeout_param_st { SSL_CTX *ctx; - long time; + uint64_t time; LHASH_OF(SSL_SESSION) *cache; } TIMEOUT_PARAM; static void timeout_doall_arg(SSL_SESSION *session, void *void_param) { - TIMEOUT_PARAM *param = void_param; + TIMEOUT_PARAM *param = reinterpret_cast(void_param); if (param->time == 0 || + session->time + session->timeout < session->time || param->time > (session->time + session->timeout)) { - /* timeout */ - /* The reason we don't call SSL_CTX_remove_session() is to - * save on locking overhead */ + // The reason we don't call SSL_CTX_remove_session() is to + // save on locking overhead (void) lh_SSL_SESSION_delete(param->cache, session); SSL_SESSION_list_remove(param->ctx, session); - session->not_resumable = 1; if (param->ctx->remove_session_cb != NULL) { param->ctx->remove_session_cb(param->ctx, session); } @@ -984,7 +1150,7 @@ static void timeout_doall_arg(SSL_SESSION *session, void *void_param) { } } -void SSL_CTX_flush_sessions(SSL_CTX *ctx, long time) { +void SSL_CTX_flush_sessions(SSL_CTX *ctx, uint64_t time) { TIMEOUT_PARAM tp; tp.ctx = ctx; @@ -993,56 +1159,8 @@ void SSL_CTX_flush_sessions(SSL_CTX *ctx, long time) { return; } tp.time = time; - CRYPTO_MUTEX_lock_write(&ctx->lock); + MutexWriteLock lock(&ctx->lock); lh_SSL_SESSION_doall_arg(tp.cache, timeout_doall_arg, &tp); - CRYPTO_MUTEX_unlock_write(&ctx->lock); -} - -/* locked by SSL_CTX in the calling function */ -static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *session) { - if (session->next == NULL || session->prev == NULL) { - return; - } - - if (session->next == (SSL_SESSION *)&ctx->session_cache_tail) { - /* last element in list */ - if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { - /* only one element in list */ - ctx->session_cache_head = NULL; - ctx->session_cache_tail = NULL; - } else { - ctx->session_cache_tail = session->prev; - session->prev->next = (SSL_SESSION *)&(ctx->session_cache_tail); - } - } else { - if (session->prev == (SSL_SESSION *)&ctx->session_cache_head) { - /* first element in list */ - ctx->session_cache_head = session->next; - session->next->prev = (SSL_SESSION *)&(ctx->session_cache_head); - } else { /* middle of list */ - session->next->prev = session->prev; - session->prev->next = session->next; - } - } - session->prev = session->next = NULL; -} - -static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *session) { - if (session->next != NULL && session->prev != NULL) { - SSL_SESSION_list_remove(ctx, session); - } - - if (ctx->session_cache_head == NULL) { - ctx->session_cache_head = session; - ctx->session_cache_tail = session; - session->prev = (SSL_SESSION *)&(ctx->session_cache_head); - session->next = (SSL_SESSION *)&(ctx->session_cache_tail); - } else { - session->next = ctx->session_cache_head; - session->next->prev = session; - session->prev = (SSL_SESSION *)&(ctx->session_cache_head); - ctx->session_cache_head = session; - } } void SSL_CTX_sess_set_new_cb(SSL_CTX *ctx, @@ -1065,14 +1183,21 @@ void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx))(SSL_CTX *ctx, } void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx, - SSL_SESSION *(*cb)(SSL *ssl, - uint8_t *id, int id_len, - int *out_copy)) { + SSL_SESSION *(*cb)(SSL *ssl, const uint8_t *id, + int id_len, int *out_copy)) { ctx->get_session_cb = cb; } -SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))( - SSL *ssl, uint8_t *id, int id_len, int *out_copy) { +void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx, + SSL_SESSION *(*cb)(SSL *ssl, uint8_t *id, + int id_len, int *out_copy)) { + ctx->get_session_cb_legacy = cb; +} + +SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx))(SSL *ssl, + const uint8_t *id, + int id_len, + int *out_copy) { return ctx->get_session_cb; } diff --git a/Sources/BoringSSL/ssl/ssl_stat.c b/Sources/BoringSSL/ssl/ssl_stat.cc similarity index 59% rename from Sources/BoringSSL/ssl/ssl_stat.c rename to Sources/BoringSSL/ssl/ssl_stat.cc index 571b4a9a2..01153e94b 100644 --- a/Sources/BoringSSL/ssl/ssl_stat.c +++ b/Sources/BoringSSL/ssl/ssl_stat.cc @@ -88,236 +88,17 @@ #include "internal.h" -static int ssl_state(const SSL *ssl) { - if (ssl->s3->hs == NULL) { - assert(ssl->s3->initial_handshake_complete); - return SSL_ST_OK; - } - - return ssl->s3->hs->state; -} - const char *SSL_state_string_long(const SSL *ssl) { - switch (ssl_state(ssl)) { - case SSL_ST_ACCEPT: - return "before accept initialization"; - - case SSL_ST_CONNECT: - return "before connect initialization"; - - case SSL_ST_OK: - return "SSL negotiation finished successfully"; - - case SSL_ST_RENEGOTIATE: - return "SSL renegotiate ciphers"; - - /* SSLv3 additions */ - case SSL3_ST_CW_CLNT_HELLO_A: - return "SSLv3 write client hello A"; - - case SSL3_ST_CR_SRVR_HELLO_A: - return "SSLv3 read server hello A"; - - case SSL3_ST_CR_CERT_A: - return "SSLv3 read server certificate A"; - - case SSL3_ST_CR_KEY_EXCH_A: - return "SSLv3 read server key exchange A"; - - case SSL3_ST_CR_CERT_REQ_A: - return "SSLv3 read server certificate request A"; - - case SSL3_ST_CR_SESSION_TICKET_A: - return "SSLv3 read server session ticket A"; - - case SSL3_ST_CR_SRVR_DONE_A: - return "SSLv3 read server done A"; - - case SSL3_ST_CW_CERT_A: - return "SSLv3 write client certificate A"; - - case SSL3_ST_CW_KEY_EXCH_A: - return "SSLv3 write client key exchange A"; - - case SSL3_ST_CW_CERT_VRFY_A: - return "SSLv3 write certificate verify A"; - - case SSL3_ST_CW_CERT_VRFY_B: - return "SSLv3 write certificate verify B"; - - case SSL3_ST_CW_CHANGE: - case SSL3_ST_SW_CHANGE: - return "SSLv3 write change cipher spec"; - - case SSL3_ST_CW_FINISHED_A: - case SSL3_ST_SW_FINISHED_A: - return "SSLv3 write finished A"; - - case SSL3_ST_CR_CHANGE: - case SSL3_ST_SR_CHANGE: - return "SSLv3 read change cipher spec"; - - case SSL3_ST_CR_FINISHED_A: - case SSL3_ST_SR_FINISHED_A: - return "SSLv3 read finished A"; - - case SSL3_ST_CW_FLUSH: - case SSL3_ST_SW_FLUSH: - return "SSLv3 flush data"; - - case SSL3_ST_SR_CLNT_HELLO_A: - return "SSLv3 read client hello A"; - - case SSL3_ST_SR_CLNT_HELLO_B: - return "SSLv3 read client hello B"; - - case SSL3_ST_SR_CLNT_HELLO_C: - return "SSLv3 read client hello C"; - - case SSL3_ST_SW_SRVR_HELLO_A: - return "SSLv3 write server hello A"; - - case SSL3_ST_SW_CERT_A: - return "SSLv3 write certificate A"; - - case SSL3_ST_SW_KEY_EXCH_A: - return "SSLv3 write key exchange A"; - - case SSL3_ST_SW_CERT_REQ_A: - return "SSLv3 write certificate request A"; - - case SSL3_ST_SW_SESSION_TICKET_A: - return "SSLv3 write session ticket A"; - - case SSL3_ST_SW_SRVR_DONE_A: - return "SSLv3 write server done A"; - - case SSL3_ST_SR_CERT_A: - return "SSLv3 read client certificate A"; - - case SSL3_ST_SR_KEY_EXCH_A: - return "SSLv3 read client key exchange A"; - - case SSL3_ST_SR_KEY_EXCH_B: - return "SSLv3 read client key exchange B"; - - case SSL3_ST_SR_CERT_VRFY_A: - return "SSLv3 read certificate verify A"; - - /* DTLS */ - case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: - return "DTLS1 read hello verify request A"; - - default: - return "unknown state"; + if (ssl->s3->hs == nullptr) { + return "SSL negotiation finished successfully"; } + + return ssl->server ? ssl_server_handshake_state(ssl->s3->hs.get()) + : ssl_client_handshake_state(ssl->s3->hs.get()); } const char *SSL_state_string(const SSL *ssl) { - switch (ssl_state(ssl)) { - case SSL_ST_ACCEPT: - return "AINIT "; - - case SSL_ST_CONNECT: - return "CINIT "; - - case SSL_ST_OK: - return "SSLOK "; - - /* SSLv3 additions */ - case SSL3_ST_SW_FLUSH: - case SSL3_ST_CW_FLUSH: - return "3FLUSH"; - - case SSL3_ST_CW_CLNT_HELLO_A: - return "3WCH_A"; - - case SSL3_ST_CR_SRVR_HELLO_A: - return "3RSH_A"; - - case SSL3_ST_CR_CERT_A: - return "3RSC_A"; - - case SSL3_ST_CR_KEY_EXCH_A: - return "3RSKEA"; - - case SSL3_ST_CR_CERT_REQ_A: - return "3RCR_A"; - - case SSL3_ST_CR_SRVR_DONE_A: - return "3RSD_A"; - - case SSL3_ST_CW_CERT_A: - return "3WCC_A"; - - case SSL3_ST_CW_KEY_EXCH_A: - return "3WCKEA"; - - case SSL3_ST_CW_CERT_VRFY_A: - return "3WCV_A"; - - case SSL3_ST_CW_CERT_VRFY_B: - return "3WCV_B"; - - case SSL3_ST_SW_CHANGE: - case SSL3_ST_CW_CHANGE: - return "3WCCS_"; - - case SSL3_ST_SW_FINISHED_A: - case SSL3_ST_CW_FINISHED_A: - return "3WFINA"; - - case SSL3_ST_CR_CHANGE: - case SSL3_ST_SR_CHANGE: - return "3RCCS_"; - - case SSL3_ST_SR_FINISHED_A: - case SSL3_ST_CR_FINISHED_A: - return "3RFINA"; - - case SSL3_ST_SR_CLNT_HELLO_A: - return "3RCH_A"; - - case SSL3_ST_SR_CLNT_HELLO_B: - return "3RCH_B"; - - case SSL3_ST_SR_CLNT_HELLO_C: - return "3RCH_C"; - - case SSL3_ST_SW_SRVR_HELLO_A: - return "3WSH_A"; - - case SSL3_ST_SW_CERT_A: - return "3WSC_A"; - - case SSL3_ST_SW_KEY_EXCH_A: - return "3WSKEA"; - - case SSL3_ST_SW_KEY_EXCH_B: - return "3WSKEB"; - - case SSL3_ST_SW_CERT_REQ_A: - return "3WCR_A"; - - case SSL3_ST_SW_SRVR_DONE_A: - return "3WSD_A"; - - case SSL3_ST_SR_CERT_A: - return "3RCC_A"; - - case SSL3_ST_SR_KEY_EXCH_A: - return "3RCKEA"; - - case SSL3_ST_SR_CERT_VRFY_A: - return "3RCV_A"; - - /* DTLS */ - case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A: - return "DRCHVA"; - - default: - return "UNKWN "; - } + return "!!!!!!"; } const char *SSL_alert_type_string_long(int value) { diff --git a/Sources/BoringSSL/ssl/ssl_transcript.c b/Sources/BoringSSL/ssl/ssl_transcript.cc similarity index 60% rename from Sources/BoringSSL/ssl/ssl_transcript.c rename to Sources/BoringSSL/ssl/ssl_transcript.cc index 9cc37778c..2033dfd47 100644 --- a/Sources/BoringSSL/ssl/ssl_transcript.c +++ b/Sources/BoringSSL/ssl/ssl_transcript.cc @@ -150,136 +150,134 @@ #include "internal.h" -int SSL_TRANSCRIPT_init(SSL_TRANSCRIPT *transcript) { - SSL_TRANSCRIPT_cleanup(transcript); - transcript->buffer = BUF_MEM_new(); - return transcript->buffer != NULL; +namespace bssl { + +SSLTranscript::SSLTranscript() {} + +SSLTranscript::~SSLTranscript() {} + +bool SSLTranscript::Init() { + buffer_.reset(BUF_MEM_new()); + if (!buffer_) { + return false; + } + + hash_.Reset(); + md5_.Reset(); + return true; } -/* init_digest_with_data calls |EVP_DigestInit_ex| on |ctx| with |md| and then - * writes the data in |buf| to it. */ -static int init_digest_with_data(EVP_MD_CTX *ctx, const EVP_MD *md, - const BUF_MEM *buf) { +// InitDigestWithData calls |EVP_DigestInit_ex| on |ctx| with |md| and then +// writes the data in |buf| to it. +static bool InitDigestWithData(EVP_MD_CTX *ctx, const EVP_MD *md, + const BUF_MEM *buf) { if (!EVP_DigestInit_ex(ctx, md, NULL)) { - return 0; + return false; } EVP_DigestUpdate(ctx, buf->data, buf->length); - return 1; + return true; } -int SSL_TRANSCRIPT_init_hash(SSL_TRANSCRIPT *transcript, uint16_t version, - int algorithm_prf) { - const EVP_MD *md = ssl_get_handshake_digest(algorithm_prf, version); +bool SSLTranscript::InitHash(uint16_t version, const SSL_CIPHER *cipher) { + const EVP_MD *md = ssl_get_handshake_digest(version, cipher); - /* To support SSL 3.0's Finished and CertificateVerify constructions, - * EVP_md5_sha1() is split into MD5 and SHA-1 halves. When SSL 3.0 is removed, - * we can simplify this. */ + // To support SSL 3.0's Finished and CertificateVerify constructions, + // EVP_md5_sha1() is split into MD5 and SHA-1 halves. When SSL 3.0 is removed, + // we can simplify this. if (md == EVP_md5_sha1()) { - if (!init_digest_with_data(&transcript->md5, EVP_md5(), - transcript->buffer)) { - return 0; + if (!InitDigestWithData(md5_.get(), EVP_md5(), buffer_.get())) { + return false; } md = EVP_sha1(); } - if (!init_digest_with_data(&transcript->hash, md, transcript->buffer)) { - return 0; - } - - return 1; + return InitDigestWithData(hash_.get(), md, buffer_.get()); } -void SSL_TRANSCRIPT_cleanup(SSL_TRANSCRIPT *transcript) { - SSL_TRANSCRIPT_free_buffer(transcript); - EVP_MD_CTX_cleanup(&transcript->hash); - EVP_MD_CTX_cleanup(&transcript->md5); +void SSLTranscript::FreeBuffer() { + buffer_.reset(); } -void SSL_TRANSCRIPT_free_buffer(SSL_TRANSCRIPT *transcript) { - BUF_MEM_free(transcript->buffer); - transcript->buffer = NULL; +size_t SSLTranscript::DigestLen() const { + return EVP_MD_size(Digest()); } -size_t SSL_TRANSCRIPT_digest_len(const SSL_TRANSCRIPT *transcript) { - return EVP_MD_size(SSL_TRANSCRIPT_md(transcript)); +const EVP_MD *SSLTranscript::Digest() const { + if (EVP_MD_CTX_md(md5_.get()) != nullptr) { + return EVP_md5_sha1(); + } + return EVP_MD_CTX_md(hash_.get()); } -const EVP_MD *SSL_TRANSCRIPT_md(const SSL_TRANSCRIPT *transcript) { - if (EVP_MD_CTX_md(&transcript->md5) != NULL) { - return EVP_md5_sha1(); +bool SSLTranscript::UpdateForHelloRetryRequest() { + if (buffer_) { + buffer_->length = 0; + } + + uint8_t old_hash[EVP_MAX_MD_SIZE]; + size_t hash_len; + if (!GetHash(old_hash, &hash_len)) { + return false; + } + const uint8_t header[4] = {SSL3_MT_MESSAGE_HASH, 0, 0, + static_cast(hash_len)}; + if (!EVP_DigestInit_ex(hash_.get(), Digest(), nullptr) || + !Update(header) || + !Update(MakeConstSpan(old_hash, hash_len))) { + return false; } - return EVP_MD_CTX_md(&transcript->hash); + return true; } -int SSL_TRANSCRIPT_update(SSL_TRANSCRIPT *transcript, const uint8_t *in, - size_t in_len) { - /* Depending on the state of the handshake, either the handshake buffer may be - * active, the rolling hash, or both. */ - if (transcript->buffer != NULL) { - size_t new_len = transcript->buffer->length + in_len; - if (new_len < in_len) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - if (!BUF_MEM_grow(transcript->buffer, new_len)) { - return 0; - } - OPENSSL_memcpy(transcript->buffer->data + new_len - in_len, in, in_len); +bool SSLTranscript::CopyHashContext(EVP_MD_CTX *ctx) { + return EVP_MD_CTX_copy_ex(ctx, hash_.get()); +} + +bool SSLTranscript::Update(Span in) { + // Depending on the state of the handshake, either the handshake buffer may be + // active, the rolling hash, or both. + if (buffer_ && + !BUF_MEM_append(buffer_.get(), in.data(), in.size())) { + return false; } - if (EVP_MD_CTX_md(&transcript->hash) != NULL) { - EVP_DigestUpdate(&transcript->hash, in, in_len); + if (EVP_MD_CTX_md(hash_.get()) != NULL) { + EVP_DigestUpdate(hash_.get(), in.data(), in.size()); } - if (EVP_MD_CTX_md(&transcript->md5) != NULL) { - EVP_DigestUpdate(&transcript->md5, in, in_len); + if (EVP_MD_CTX_md(md5_.get()) != NULL) { + EVP_DigestUpdate(md5_.get(), in.data(), in.size()); } - return 1; + return true; } -int SSL_TRANSCRIPT_get_hash(const SSL_TRANSCRIPT *transcript, uint8_t *out, - size_t *out_len) { - int ret = 0; - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); +bool SSLTranscript::GetHash(uint8_t *out, size_t *out_len) { + ScopedEVP_MD_CTX ctx; unsigned md5_len = 0; - if (EVP_MD_CTX_md(&transcript->md5) != NULL) { - if (!EVP_MD_CTX_copy_ex(&ctx, &transcript->md5) || - !EVP_DigestFinal_ex(&ctx, out, &md5_len)) { - goto err; + if (EVP_MD_CTX_md(md5_.get()) != NULL) { + if (!EVP_MD_CTX_copy_ex(ctx.get(), md5_.get()) || + !EVP_DigestFinal_ex(ctx.get(), out, &md5_len)) { + return false; } } unsigned len; - if (!EVP_MD_CTX_copy_ex(&ctx, &transcript->hash) || - !EVP_DigestFinal_ex(&ctx, out + md5_len, &len)) { - goto err; + if (!EVP_MD_CTX_copy_ex(ctx.get(), hash_.get()) || + !EVP_DigestFinal_ex(ctx.get(), out + md5_len, &len)) { + return false; } *out_len = md5_len + len; - ret = 1; - -err: - EVP_MD_CTX_cleanup(&ctx); - return ret; + return true; } -static int ssl3_handshake_mac(SSL_TRANSCRIPT *transcript, - const SSL_SESSION *session, - const EVP_MD_CTX *ctx_template, - const char *sender, size_t sender_len, - uint8_t *p, size_t *out_len) { - unsigned int len; - size_t npad, n; - unsigned int i; - uint8_t md_buf[EVP_MAX_MD_SIZE]; - EVP_MD_CTX ctx; - - EVP_MD_CTX_init(&ctx); - if (!EVP_MD_CTX_copy_ex(&ctx, ctx_template)) { - EVP_MD_CTX_cleanup(&ctx); +static bool SSL3HandshakeMAC(const SSL_SESSION *session, + const EVP_MD_CTX *ctx_template, const char *sender, + size_t sender_len, uint8_t *p, size_t *out_len) { + ScopedEVP_MD_CTX ctx; + if (!EVP_MD_CTX_copy_ex(ctx.get(), ctx_template)) { OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); - return 0; + return false; } static const uint8_t kPad1[48] = { @@ -296,110 +294,105 @@ static int ssl3_handshake_mac(SSL_TRANSCRIPT *transcript, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, }; - n = EVP_MD_CTX_size(&ctx); + size_t n = EVP_MD_CTX_size(ctx.get()); - npad = (48 / n) * n; - if (sender != NULL) { - EVP_DigestUpdate(&ctx, sender, sender_len); - } - EVP_DigestUpdate(&ctx, session->master_key, session->master_key_length); - EVP_DigestUpdate(&ctx, kPad1, npad); - EVP_DigestFinal_ex(&ctx, md_buf, &i); + size_t npad = (48 / n) * n; + EVP_DigestUpdate(ctx.get(), sender, sender_len); + EVP_DigestUpdate(ctx.get(), session->master_key, session->master_key_length); + EVP_DigestUpdate(ctx.get(), kPad1, npad); + unsigned md_buf_len; + uint8_t md_buf[EVP_MAX_MD_SIZE]; + EVP_DigestFinal_ex(ctx.get(), md_buf, &md_buf_len); - if (!EVP_DigestInit_ex(&ctx, EVP_MD_CTX_md(&ctx), NULL)) { - EVP_MD_CTX_cleanup(&ctx); + if (!EVP_DigestInit_ex(ctx.get(), EVP_MD_CTX_md(ctx.get()), NULL)) { OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); - return 0; + return false; } - EVP_DigestUpdate(&ctx, session->master_key, session->master_key_length); - EVP_DigestUpdate(&ctx, kPad2, npad); - EVP_DigestUpdate(&ctx, md_buf, i); - EVP_DigestFinal_ex(&ctx, p, &len); - - EVP_MD_CTX_cleanup(&ctx); + EVP_DigestUpdate(ctx.get(), session->master_key, session->master_key_length); + EVP_DigestUpdate(ctx.get(), kPad2, npad); + EVP_DigestUpdate(ctx.get(), md_buf, md_buf_len); + unsigned len; + EVP_DigestFinal_ex(ctx.get(), p, &len); *out_len = len; - return 1; + return true; } -int SSL_TRANSCRIPT_ssl3_cert_verify_hash(SSL_TRANSCRIPT *transcript, - uint8_t *out, size_t *out_len, - const SSL_SESSION *session, - int signature_algorithm) { - if (SSL_TRANSCRIPT_md(transcript) != EVP_md5_sha1()) { +bool SSLTranscript::GetSSL3CertVerifyHash(uint8_t *out, size_t *out_len, + const SSL_SESSION *session, + uint16_t signature_algorithm) { + if (Digest() != EVP_md5_sha1()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; + return false; } if (signature_algorithm == SSL_SIGN_RSA_PKCS1_MD5_SHA1) { size_t md5_len, len; - if (!ssl3_handshake_mac(transcript, session, &transcript->md5, NULL, 0, out, - &md5_len) || - !ssl3_handshake_mac(transcript, session, &transcript->hash, NULL, 0, - out + md5_len, &len)) { - return 0; + if (!SSL3HandshakeMAC(session, md5_.get(), NULL, 0, out, &md5_len) || + !SSL3HandshakeMAC(session, hash_.get(), NULL, 0, out + md5_len, &len)) { + return false; } *out_len = md5_len + len; - return 1; + return true; } if (signature_algorithm == SSL_SIGN_ECDSA_SHA1) { - return ssl3_handshake_mac(transcript, session, &transcript->hash, NULL, 0, - out, out_len); + return SSL3HandshakeMAC(session, hash_.get(), NULL, 0, out, out_len); } OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; + return false; } -int SSL_TRANSCRIPT_finish_mac(SSL_TRANSCRIPT *transcript, uint8_t *out, - size_t *out_len, const SSL_SESSION *session, - int from_server, uint16_t version) { - if (version == SSL3_VERSION) { - if (SSL_TRANSCRIPT_md(transcript) != EVP_md5_sha1()) { +bool SSLTranscript::GetFinishedMAC(uint8_t *out, size_t *out_len, + const SSL_SESSION *session, + bool from_server) { + if (session->ssl_version == SSL3_VERSION) { + if (Digest() != EVP_md5_sha1()) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; + return false; } const char *sender = from_server ? SSL3_MD_SERVER_FINISHED_CONST : SSL3_MD_CLIENT_FINISHED_CONST; const size_t sender_len = 4; size_t md5_len, len; - if (!ssl3_handshake_mac(transcript, session, &transcript->md5, sender, - sender_len, out, &md5_len) || - !ssl3_handshake_mac(transcript, session, &transcript->hash, sender, - sender_len, out + md5_len, &len)) { - return 0; + if (!SSL3HandshakeMAC(session, md5_.get(), sender, sender_len, out, + &md5_len) || + !SSL3HandshakeMAC(session, hash_.get(), sender, sender_len, + out + md5_len, &len)) { + return false; } *out_len = md5_len + len; - return 1; + return true; } - /* At this point, the handshake should have released the handshake buffer on - * its own. */ - assert(transcript->buffer == NULL); + // At this point, the handshake should have released the handshake buffer on + // its own. + assert(!buffer_); - const char *label = TLS_MD_CLIENT_FINISH_CONST; - size_t label_len = TLS_MD_SERVER_FINISH_CONST_SIZE; - if (from_server) { - label = TLS_MD_SERVER_FINISH_CONST; - label_len = TLS_MD_SERVER_FINISH_CONST_SIZE; - } + static const char kClientLabel[] = "client finished"; + static const char kServerLabel[] = "server finished"; + auto label = from_server + ? MakeConstSpan(kServerLabel, sizeof(kServerLabel) - 1) + : MakeConstSpan(kClientLabel, sizeof(kClientLabel) - 1); uint8_t digests[EVP_MAX_MD_SIZE]; size_t digests_len; - if (!SSL_TRANSCRIPT_get_hash(transcript, digests, &digests_len)) { - return 0; + if (!GetHash(digests, &digests_len)) { + return false; } static const size_t kFinishedLen = 12; - if (!tls1_prf(SSL_TRANSCRIPT_md(transcript), out, kFinishedLen, - session->master_key, session->master_key_length, label, - label_len, digests, digests_len, NULL, 0)) { - return 0; + if (!tls1_prf(Digest(), MakeSpan(out, kFinishedLen), + MakeConstSpan(session->master_key, session->master_key_length), + label, MakeConstSpan(digests, digests_len), {})) { + return false; } *out_len = kFinishedLen; - return 1; + return true; } + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/ssl_versions.cc b/Sources/BoringSSL/ssl/ssl_versions.cc new file mode 100644 index 000000000..15b029433 --- /dev/null +++ b/Sources/BoringSSL/ssl/ssl_versions.cc @@ -0,0 +1,472 @@ +/* Copyright (c) 2017, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include + +#include +#include + +#include "internal.h" +#include "../crypto/internal.h" + + +namespace bssl { + +bool ssl_protocol_version_from_wire(uint16_t *out, uint16_t version) { + switch (version) { + case SSL3_VERSION: + case TLS1_VERSION: + case TLS1_1_VERSION: + case TLS1_2_VERSION: + *out = version; + return true; + + case TLS1_3_DRAFT_VERSION: + case TLS1_3_DRAFT21_VERSION: + case TLS1_3_DRAFT22_VERSION: + case TLS1_3_EXPERIMENT_VERSION: + case TLS1_3_EXPERIMENT2_VERSION: + case TLS1_3_EXPERIMENT3_VERSION: + *out = TLS1_3_VERSION; + return true; + + case DTLS1_VERSION: + // DTLS 1.0 is analogous to TLS 1.1, not TLS 1.0. + *out = TLS1_1_VERSION; + return true; + + case DTLS1_2_VERSION: + *out = TLS1_2_VERSION; + return true; + + default: + return false; + } +} + +// The follow arrays are the supported versions for TLS and DTLS, in order of +// decreasing preference. + +static const uint16_t kTLSVersions[] = { + TLS1_3_DRAFT22_VERSION, + TLS1_3_EXPERIMENT3_VERSION, + TLS1_3_EXPERIMENT2_VERSION, + TLS1_3_EXPERIMENT_VERSION, + TLS1_3_DRAFT_VERSION, + TLS1_3_DRAFT21_VERSION, + TLS1_2_VERSION, + TLS1_1_VERSION, + TLS1_VERSION, + SSL3_VERSION, +}; + +static const uint16_t kDTLSVersions[] = { + DTLS1_2_VERSION, + DTLS1_VERSION, +}; + +static void get_method_versions(const SSL_PROTOCOL_METHOD *method, + const uint16_t **out, size_t *out_num) { + if (method->is_dtls) { + *out = kDTLSVersions; + *out_num = OPENSSL_ARRAY_SIZE(kDTLSVersions); + } else { + *out = kTLSVersions; + *out_num = OPENSSL_ARRAY_SIZE(kTLSVersions); + } +} + +static bool method_supports_version(const SSL_PROTOCOL_METHOD *method, + uint16_t version) { + const uint16_t *versions; + size_t num_versions; + get_method_versions(method, &versions, &num_versions); + for (size_t i = 0; i < num_versions; i++) { + if (versions[i] == version) { + return true; + } + } + return false; +} + +// The following functions map between API versions and wire versions. The +// public API works on wire versions, except that TLS 1.3 draft versions all +// appear as TLS 1.3. This will get collapsed back down when TLS 1.3 is +// finalized. + +static const char *ssl_version_to_string(uint16_t version) { + switch (version) { + case TLS1_3_DRAFT_VERSION: + case TLS1_3_DRAFT21_VERSION: + case TLS1_3_DRAFT22_VERSION: + case TLS1_3_EXPERIMENT_VERSION: + case TLS1_3_EXPERIMENT2_VERSION: + case TLS1_3_EXPERIMENT3_VERSION: + return "TLSv1.3"; + + case TLS1_2_VERSION: + return "TLSv1.2"; + + case TLS1_1_VERSION: + return "TLSv1.1"; + + case TLS1_VERSION: + return "TLSv1"; + + case SSL3_VERSION: + return "SSLv3"; + + case DTLS1_VERSION: + return "DTLSv1"; + + case DTLS1_2_VERSION: + return "DTLSv1.2"; + + default: + return "unknown"; + } +} + +static uint16_t wire_version_to_api(uint16_t version) { + switch (version) { + // Report TLS 1.3 draft versions as TLS 1.3 in the public API. + case TLS1_3_DRAFT_VERSION: + case TLS1_3_DRAFT21_VERSION: + case TLS1_3_DRAFT22_VERSION: + case TLS1_3_EXPERIMENT_VERSION: + case TLS1_3_EXPERIMENT2_VERSION: + case TLS1_3_EXPERIMENT3_VERSION: + return TLS1_3_VERSION; + default: + return version; + } +} + +// api_version_to_wire maps |version| to some representative wire version. In +// particular, it picks an arbitrary TLS 1.3 representative. This should only be +// used in context where that does not matter. +static bool api_version_to_wire(uint16_t *out, uint16_t version) { + if (version == TLS1_3_DRAFT_VERSION || + version == TLS1_3_DRAFT21_VERSION || + version == TLS1_3_DRAFT22_VERSION || + version == TLS1_3_EXPERIMENT_VERSION || + version == TLS1_3_EXPERIMENT2_VERSION || + version == TLS1_3_EXPERIMENT3_VERSION) { + return false; + } + if (version == TLS1_3_VERSION) { + version = TLS1_3_DRAFT_VERSION; + } + + // Check it is a real protocol version. + uint16_t unused; + if (!ssl_protocol_version_from_wire(&unused, version)) { + return false; + } + + *out = version; + return true; +} + +static bool set_version_bound(const SSL_PROTOCOL_METHOD *method, uint16_t *out, + uint16_t version) { + if (!api_version_to_wire(&version, version) || + !method_supports_version(method, version) || + !ssl_protocol_version_from_wire(out, version)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_SSL_VERSION); + return false; + } + + return true; +} + +static bool set_min_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, + uint16_t version) { + // Zero is interpreted as the default minimum version. + if (version == 0) { + // SSL 3.0 is disabled by default and TLS 1.0 does not exist in DTLS. + *out = method->is_dtls ? TLS1_1_VERSION : TLS1_VERSION; + return true; + } + + return set_version_bound(method, out, version); +} + +static bool set_max_version(const SSL_PROTOCOL_METHOD *method, uint16_t *out, + uint16_t version) { + // Zero is interpreted as the default maximum version. + if (version == 0) { + *out = TLS1_2_VERSION; + return true; + } + + return set_version_bound(method, out, version); +} + +const struct { + uint16_t version; + uint32_t flag; +} kProtocolVersions[] = { + {SSL3_VERSION, SSL_OP_NO_SSLv3}, + {TLS1_VERSION, SSL_OP_NO_TLSv1}, + {TLS1_1_VERSION, SSL_OP_NO_TLSv1_1}, + {TLS1_2_VERSION, SSL_OP_NO_TLSv1_2}, + {TLS1_3_VERSION, SSL_OP_NO_TLSv1_3}, +}; + +bool ssl_get_version_range(const SSL *ssl, uint16_t *out_min_version, + uint16_t *out_max_version) { + // For historical reasons, |SSL_OP_NO_DTLSv1| aliases |SSL_OP_NO_TLSv1|, but + // DTLS 1.0 should be mapped to TLS 1.1. + uint32_t options = ssl->options; + if (SSL_is_dtls(ssl)) { + options &= ~SSL_OP_NO_TLSv1_1; + if (options & SSL_OP_NO_DTLSv1) { + options |= SSL_OP_NO_TLSv1_1; + } + } + + uint16_t min_version = ssl->conf_min_version; + uint16_t max_version = ssl->conf_max_version; + + // OpenSSL's API for controlling versions entails blacklisting individual + // protocols. This has two problems. First, on the client, the protocol can + // only express a contiguous range of versions. Second, a library consumer + // trying to set a maximum version cannot disable protocol versions that get + // added in a future version of the library. + // + // To account for both of these, OpenSSL interprets the client-side bitmask + // as a min/max range by picking the lowest contiguous non-empty range of + // enabled protocols. Note that this means it is impossible to set a maximum + // version of the higest supported TLS version in a future-proof way. + bool any_enabled = false; + for (size_t i = 0; i < OPENSSL_ARRAY_SIZE(kProtocolVersions); i++) { + // Only look at the versions already enabled. + if (min_version > kProtocolVersions[i].version) { + continue; + } + if (max_version < kProtocolVersions[i].version) { + break; + } + + if (!(options & kProtocolVersions[i].flag)) { + // The minimum version is the first enabled version. + if (!any_enabled) { + any_enabled = true; + min_version = kProtocolVersions[i].version; + } + continue; + } + + // If there is a disabled version after the first enabled one, all versions + // after it are implicitly disabled. + if (any_enabled) { + max_version = kProtocolVersions[i-1].version; + break; + } + } + + if (!any_enabled) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SUPPORTED_VERSIONS_ENABLED); + return false; + } + + *out_min_version = min_version; + *out_max_version = max_version; + return true; +} + +static uint16_t ssl_version(const SSL *ssl) { + // In early data, we report the predicted version. + if (SSL_in_early_data(ssl) && !ssl->server) { + return ssl->s3->hs->early_session->ssl_version; + } + return ssl->version; +} + +uint16_t ssl_protocol_version(const SSL *ssl) { + assert(ssl->s3->have_version); + uint16_t version; + if (!ssl_protocol_version_from_wire(&version, ssl->version)) { + // |ssl->version| will always be set to a valid version. + assert(0); + return 0; + } + + return version; +} + +bool ssl_supports_version(SSL_HANDSHAKE *hs, uint16_t version) { + SSL *const ssl = hs->ssl; + uint16_t protocol_version; + if (!method_supports_version(ssl->method, version) || + !ssl_protocol_version_from_wire(&protocol_version, version) || + hs->min_version > protocol_version || + protocol_version > hs->max_version) { + return false; + } + + // TLS 1.3 variants must additionally match |tls13_variant|. + if (protocol_version != TLS1_3_VERSION || + (ssl->tls13_variant == tls13_experiment && + version == TLS1_3_EXPERIMENT_VERSION) || + (ssl->tls13_variant == tls13_experiment2 && + version == TLS1_3_EXPERIMENT2_VERSION) || + (ssl->tls13_variant == tls13_experiment3 && + version == TLS1_3_EXPERIMENT3_VERSION) || + (ssl->tls13_variant == tls13_draft21 && + version == TLS1_3_DRAFT21_VERSION) || + (ssl->tls13_variant == tls13_draft22 && + version == TLS1_3_DRAFT22_VERSION) || + (ssl->tls13_variant == tls13_default && + version == TLS1_3_DRAFT_VERSION)) { + return true; + } + + // The server, when not configured at |tls13_default|, should additionally + // enable all variants, except draft-21 which is implemented solely for QUIC + // interop testing and will not be deployed, and draft-22 which will be + // enabled once the draft is finalized and ready to be deployed in Chrome. + // Currently, this is to implement the draft-18 vs. experiments field trials. + // In the future, this will be to transition cleanly to a final draft-22 + // which hopefully includes the deployability fixes. + if (ssl->server && + ssl->tls13_variant != tls13_default && + version != TLS1_3_DRAFT21_VERSION && + version != TLS1_3_DRAFT22_VERSION) { + return true; + } + + return false; +} + +bool ssl_add_supported_versions(SSL_HANDSHAKE *hs, CBB *cbb) { + const uint16_t *versions; + size_t num_versions; + get_method_versions(hs->ssl->method, &versions, &num_versions); + for (size_t i = 0; i < num_versions; i++) { + if (ssl_supports_version(hs, versions[i]) && + !CBB_add_u16(cbb, versions[i])) { + return false; + } + } + return true; +} + +bool ssl_negotiate_version(SSL_HANDSHAKE *hs, uint8_t *out_alert, + uint16_t *out_version, const CBS *peer_versions) { + const uint16_t *versions; + size_t num_versions; + get_method_versions(hs->ssl->method, &versions, &num_versions); + for (size_t i = 0; i < num_versions; i++) { + if (!ssl_supports_version(hs, versions[i])) { + continue; + } + + CBS copy = *peer_versions; + while (CBS_len(©) != 0) { + uint16_t version; + if (!CBS_get_u16(©, &version)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + *out_alert = SSL_AD_DECODE_ERROR; + return false; + } + + if (version == versions[i]) { + *out_version = version; + return true; + } + } + } + + OPENSSL_PUT_ERROR(SSL, SSL_R_UNSUPPORTED_PROTOCOL); + *out_alert = SSL_AD_PROTOCOL_VERSION; + return false; +} + +bool ssl_is_draft21(uint16_t version) { + return version == TLS1_3_DRAFT21_VERSION || version == TLS1_3_DRAFT22_VERSION; +} + +bool ssl_is_draft22(uint16_t version) { + return version == TLS1_3_DRAFT22_VERSION; +} + +bool ssl_is_resumption_experiment(uint16_t version) { + return version == TLS1_3_EXPERIMENT_VERSION || + version == TLS1_3_EXPERIMENT2_VERSION || + version == TLS1_3_EXPERIMENT3_VERSION || + version == TLS1_3_DRAFT22_VERSION; +} + +bool ssl_is_resumption_variant(enum tls13_variant_t variant) { + return variant == tls13_experiment || variant == tls13_experiment2 || + variant == tls13_experiment3 || variant == tls13_draft22; +} + +bool ssl_is_resumption_client_ccs_experiment(uint16_t version) { + return version == TLS1_3_EXPERIMENT_VERSION || + version == TLS1_3_EXPERIMENT2_VERSION || + version == TLS1_3_DRAFT22_VERSION; +} + +bool ssl_is_resumption_record_version_experiment(uint16_t version) { + return version == TLS1_3_EXPERIMENT2_VERSION || + version == TLS1_3_EXPERIMENT3_VERSION || + version == TLS1_3_DRAFT22_VERSION; +} + +} // namespace bssl + +using namespace bssl; + +int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, uint16_t version) { + return set_min_version(ctx->method, &ctx->conf_min_version, version); +} + +int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, uint16_t version) { + return set_max_version(ctx->method, &ctx->conf_max_version, version); +} + +int SSL_set_min_proto_version(SSL *ssl, uint16_t version) { + return set_min_version(ssl->method, &ssl->conf_min_version, version); +} + +int SSL_set_max_proto_version(SSL *ssl, uint16_t version) { + return set_max_version(ssl->method, &ssl->conf_max_version, version); +} + +int SSL_version(const SSL *ssl) { + return wire_version_to_api(ssl_version(ssl)); +} + +const char *SSL_get_version(const SSL *ssl) { + return ssl_version_to_string(ssl_version(ssl)); +} + +const char *SSL_SESSION_get_version(const SSL_SESSION *session) { + return ssl_version_to_string(session->ssl_version); +} + +uint16_t SSL_SESSION_get_protocol_version(const SSL_SESSION *session) { + return wire_version_to_api(session->ssl_version); +} + +int SSL_SESSION_set_protocol_version(SSL_SESSION *session, uint16_t version) { + // This picks a representative TLS 1.3 version, but this API should only be + // used on unit test sessions anyway. + return api_version_to_wire(&session->ssl_version, version); +} diff --git a/Sources/BoringSSL/ssl/ssl_x509.c b/Sources/BoringSSL/ssl/ssl_x509.cc similarity index 50% rename from Sources/BoringSSL/ssl/ssl_x509.c rename to Sources/BoringSSL/ssl/ssl_x509.cc index 2955c2137..2b7ba8393 100644 --- a/Sources/BoringSSL/ssl/ssl_x509.c +++ b/Sources/BoringSSL/ssl/ssl_x509.cc @@ -152,9 +152,433 @@ #include #include "internal.h" +#include "../crypto/internal.h" +namespace bssl { + +// check_ssl_x509_method asserts that |ssl| has the X509-based method +// installed. Calling an X509-based method on an |ssl| with a different method +// will likely misbehave and possibly crash or leak memory. +static void check_ssl_x509_method(const SSL *ssl) { + assert(ssl == NULL || ssl->ctx->x509_method == &ssl_crypto_x509_method); +} + +// check_ssl_ctx_x509_method acts like |check_ssl_x509_method|, but for an +// |SSL_CTX|. +static void check_ssl_ctx_x509_method(const SSL_CTX *ctx) { + assert(ctx == NULL || ctx->x509_method == &ssl_crypto_x509_method); +} + +// x509_to_buffer returns a |CRYPTO_BUFFER| that contains the serialised +// contents of |x509|. +static UniquePtr x509_to_buffer(X509 *x509) { + uint8_t *buf = NULL; + int cert_len = i2d_X509(x509, &buf); + if (cert_len <= 0) { + return 0; + } + + UniquePtr buffer(CRYPTO_BUFFER_new(buf, cert_len, NULL)); + OPENSSL_free(buf); + + return buffer; +} + +// new_leafless_chain returns a fresh stack of buffers set to {NULL}. +static STACK_OF(CRYPTO_BUFFER) *new_leafless_chain(void) { + STACK_OF(CRYPTO_BUFFER) *chain = sk_CRYPTO_BUFFER_new_null(); + if (chain == NULL) { + return NULL; + } + + if (!sk_CRYPTO_BUFFER_push(chain, NULL)) { + sk_CRYPTO_BUFFER_free(chain); + return NULL; + } + + return chain; +} + +// ssl_cert_set_chain sets elements 1.. of |cert->chain| to the serialised +// forms of elements of |chain|. It returns one on success or zero on error, in +// which case no change to |cert->chain| is made. It preverses the existing +// leaf from |cert->chain|, if any. +static int ssl_cert_set_chain(CERT *cert, STACK_OF(X509) *chain) { + UniquePtr new_chain; + + if (cert->chain != NULL) { + new_chain.reset(sk_CRYPTO_BUFFER_new_null()); + if (!new_chain) { + return 0; + } + + CRYPTO_BUFFER *leaf = sk_CRYPTO_BUFFER_value(cert->chain, 0); + if (!sk_CRYPTO_BUFFER_push(new_chain.get(), leaf)) { + return 0; + } + // |leaf| might be NULL if it's a “leafless” chain. + if (leaf != NULL) { + CRYPTO_BUFFER_up_ref(leaf); + } + } + + for (X509 *x509 : chain) { + if (!new_chain) { + new_chain.reset(new_leafless_chain()); + if (!new_chain) { + return 0; + } + } + + UniquePtr buffer = x509_to_buffer(x509); + if (!buffer || + !PushToStack(new_chain.get(), std::move(buffer))) { + return 0; + } + } + + sk_CRYPTO_BUFFER_pop_free(cert->chain, CRYPTO_BUFFER_free); + cert->chain = new_chain.release(); + + return 1; +} + +static void ssl_crypto_x509_cert_flush_cached_leaf(CERT *cert) { + X509_free(cert->x509_leaf); + cert->x509_leaf = NULL; +} + +static void ssl_crypto_x509_cert_flush_cached_chain(CERT *cert) { + sk_X509_pop_free(cert->x509_chain, X509_free); + cert->x509_chain = NULL; +} + +static int ssl_crypto_x509_check_client_CA_list( + STACK_OF(CRYPTO_BUFFER) *names) { + for (const CRYPTO_BUFFER *buffer : names) { + const uint8_t *inp = CRYPTO_BUFFER_data(buffer); + X509_NAME *name = d2i_X509_NAME(NULL, &inp, CRYPTO_BUFFER_len(buffer)); + const int ok = name != NULL && inp == CRYPTO_BUFFER_data(buffer) + + CRYPTO_BUFFER_len(buffer); + X509_NAME_free(name); + if (!ok) { + return 0; + } + } + + return 1; +} + +static void ssl_crypto_x509_cert_clear(CERT *cert) { + ssl_crypto_x509_cert_flush_cached_leaf(cert); + ssl_crypto_x509_cert_flush_cached_chain(cert); + + X509_free(cert->x509_stash); + cert->x509_stash = NULL; +} + +static void ssl_crypto_x509_cert_free(CERT *cert) { + ssl_crypto_x509_cert_clear(cert); + X509_STORE_free(cert->verify_store); +} + +static void ssl_crypto_x509_cert_dup(CERT *new_cert, const CERT *cert) { + if (cert->verify_store != NULL) { + X509_STORE_up_ref(cert->verify_store); + new_cert->verify_store = cert->verify_store; + } +} + +static int ssl_crypto_x509_session_cache_objects(SSL_SESSION *sess) { + bssl::UniquePtr chain; + if (sk_CRYPTO_BUFFER_num(sess->certs) > 0) { + chain.reset(sk_X509_new_null()); + if (!chain) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + } + + X509 *leaf = nullptr; + for (CRYPTO_BUFFER *cert : sess->certs) { + UniquePtr x509(X509_parse_from_buffer(cert)); + if (!x509) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return 0; + } + if (leaf == nullptr) { + leaf = x509.get(); + } + if (!PushToStack(chain.get(), std::move(x509))) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + } + + sk_X509_pop_free(sess->x509_chain, X509_free); + sess->x509_chain = chain.release(); + sk_X509_pop_free(sess->x509_chain_without_leaf, X509_free); + sess->x509_chain_without_leaf = NULL; + + X509_free(sess->x509_peer); + if (leaf != NULL) { + X509_up_ref(leaf); + } + sess->x509_peer = leaf; + return 1; +} + +static int ssl_crypto_x509_session_dup(SSL_SESSION *new_session, + const SSL_SESSION *session) { + if (session->x509_peer != NULL) { + X509_up_ref(session->x509_peer); + new_session->x509_peer = session->x509_peer; + } + if (session->x509_chain != NULL) { + new_session->x509_chain = X509_chain_up_ref(session->x509_chain); + if (new_session->x509_chain == NULL) { + return 0; + } + } + + return 1; +} + +static void ssl_crypto_x509_session_clear(SSL_SESSION *session) { + X509_free(session->x509_peer); + session->x509_peer = NULL; + sk_X509_pop_free(session->x509_chain, X509_free); + session->x509_chain = NULL; + sk_X509_pop_free(session->x509_chain_without_leaf, X509_free); + session->x509_chain_without_leaf = NULL; +} + +static int ssl_verify_alarm_type(long type) { + switch (type) { + case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: + case X509_V_ERR_UNABLE_TO_GET_CRL: + case X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER: + return SSL_AD_UNKNOWN_CA; + + case X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE: + case X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE: + case X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY: + case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: + case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: + case X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD: + case X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD: + case X509_V_ERR_CERT_NOT_YET_VALID: + case X509_V_ERR_CRL_NOT_YET_VALID: + case X509_V_ERR_CERT_UNTRUSTED: + case X509_V_ERR_CERT_REJECTED: + case X509_V_ERR_HOSTNAME_MISMATCH: + case X509_V_ERR_EMAIL_MISMATCH: + case X509_V_ERR_IP_ADDRESS_MISMATCH: + return SSL_AD_BAD_CERTIFICATE; + + case X509_V_ERR_CERT_SIGNATURE_FAILURE: + case X509_V_ERR_CRL_SIGNATURE_FAILURE: + return SSL_AD_DECRYPT_ERROR; + + case X509_V_ERR_CERT_HAS_EXPIRED: + case X509_V_ERR_CRL_HAS_EXPIRED: + return SSL_AD_CERTIFICATE_EXPIRED; + + case X509_V_ERR_CERT_REVOKED: + return SSL_AD_CERTIFICATE_REVOKED; + + case X509_V_ERR_UNSPECIFIED: + case X509_V_ERR_OUT_OF_MEM: + case X509_V_ERR_INVALID_CALL: + case X509_V_ERR_STORE_LOOKUP: + return SSL_AD_INTERNAL_ERROR; + + case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: + case X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN: + case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY: + case X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE: + case X509_V_ERR_CERT_CHAIN_TOO_LONG: + case X509_V_ERR_PATH_LENGTH_EXCEEDED: + case X509_V_ERR_INVALID_CA: + return SSL_AD_UNKNOWN_CA; + + case X509_V_ERR_APPLICATION_VERIFICATION: + return SSL_AD_HANDSHAKE_FAILURE; + + case X509_V_ERR_INVALID_PURPOSE: + return SSL_AD_UNSUPPORTED_CERTIFICATE; + + default: + return SSL_AD_CERTIFICATE_UNKNOWN; + } +} + +static int ssl_crypto_x509_session_verify_cert_chain(SSL_SESSION *session, + SSL *ssl, + uint8_t *out_alert) { + *out_alert = SSL_AD_INTERNAL_ERROR; + STACK_OF(X509) *const cert_chain = session->x509_chain; + if (cert_chain == NULL || sk_X509_num(cert_chain) == 0) { + return 0; + } + + X509_STORE *verify_store = ssl->ctx->cert_store; + if (ssl->cert->verify_store != NULL) { + verify_store = ssl->cert->verify_store; + } + + X509 *leaf = sk_X509_value(cert_chain, 0); + ScopedX509_STORE_CTX ctx; + if (!X509_STORE_CTX_init(ctx.get(), verify_store, leaf, cert_chain)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); + return 0; + } + if (!X509_STORE_CTX_set_ex_data(ctx.get(), + SSL_get_ex_data_X509_STORE_CTX_idx(), ssl)) { + return 0; + } + + // We need to inherit the verify parameters. These can be determined by the + // context: if its a server it will verify SSL client certificates or vice + // versa. + X509_STORE_CTX_set_default(ctx.get(), + ssl->server ? "ssl_client" : "ssl_server"); + + // Anything non-default in "param" should overwrite anything in the ctx. + X509_VERIFY_PARAM_set1(X509_STORE_CTX_get0_param(ctx.get()), ssl->param); + + if (ssl->verify_callback) { + X509_STORE_CTX_set_verify_cb(ctx.get(), ssl->verify_callback); + } + + int verify_ret; + if (ssl->ctx->app_verify_callback != NULL) { + verify_ret = + ssl->ctx->app_verify_callback(ctx.get(), ssl->ctx->app_verify_arg); + } else { + verify_ret = X509_verify_cert(ctx.get()); + } + + session->verify_result = ctx->error; + + // If |SSL_VERIFY_NONE|, the error is non-fatal, but we keep the result. + if (verify_ret <= 0 && ssl->verify_mode != SSL_VERIFY_NONE) { + *out_alert = ssl_verify_alarm_type(ctx->error); + return 0; + } + + ERR_clear_error(); + return 1; +} + +static void ssl_crypto_x509_hs_flush_cached_ca_names(SSL_HANDSHAKE *hs) { + sk_X509_NAME_pop_free(hs->cached_x509_ca_names, X509_NAME_free); + hs->cached_x509_ca_names = NULL; +} + +static int ssl_crypto_x509_ssl_new(SSL *ssl) { + ssl->param = X509_VERIFY_PARAM_new(); + if (ssl->param == NULL) { + return 0; + } + X509_VERIFY_PARAM_inherit(ssl->param, ssl->ctx->param); + return 1; +} + +static void ssl_crypto_x509_ssl_flush_cached_client_CA(SSL *ssl) { + sk_X509_NAME_pop_free(ssl->cached_x509_client_CA, X509_NAME_free); + ssl->cached_x509_client_CA = NULL; +} + +static void ssl_crypto_x509_ssl_free(SSL *ssl) { + ssl_crypto_x509_ssl_flush_cached_client_CA(ssl); + X509_VERIFY_PARAM_free(ssl->param); +} + +static int ssl_crypto_x509_ssl_auto_chain_if_needed(SSL *ssl) { + // Only build a chain if there are no intermediates configured and the feature + // isn't disabled. + if ((ssl->mode & SSL_MODE_NO_AUTO_CHAIN) || + !ssl_has_certificate(ssl) || + ssl->cert->chain == NULL || + sk_CRYPTO_BUFFER_num(ssl->cert->chain) > 1) { + return 1; + } + + UniquePtr leaf( + X509_parse_from_buffer(sk_CRYPTO_BUFFER_value(ssl->cert->chain, 0))); + if (!leaf) { + OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); + return 0; + } + + ScopedX509_STORE_CTX ctx; + if (!X509_STORE_CTX_init(ctx.get(), ssl->ctx->cert_store, leaf.get(), NULL)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); + return 0; + } + + // Attempt to build a chain, ignoring the result. + X509_verify_cert(ctx.get()); + ERR_clear_error(); + + // Remove the leaf from the generated chain. + X509_free(sk_X509_shift(ctx->chain)); + + if (!ssl_cert_set_chain(ssl->cert, ctx->chain)) { + return 0; + } + + ssl_crypto_x509_cert_flush_cached_chain(ssl->cert); + + return 1; +} + +static void ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(SSL_CTX *ctx) { + sk_X509_NAME_pop_free(ctx->cached_x509_client_CA, X509_NAME_free); + ctx->cached_x509_client_CA = NULL; +} + +static int ssl_crypto_x509_ssl_ctx_new(SSL_CTX *ctx) { + ctx->cert_store = X509_STORE_new(); + ctx->param = X509_VERIFY_PARAM_new(); + return (ctx->cert_store != NULL && ctx->param != NULL); +} + +static void ssl_crypto_x509_ssl_ctx_free(SSL_CTX *ctx) { + ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(ctx); + X509_VERIFY_PARAM_free(ctx->param); + X509_STORE_free(ctx->cert_store); +} + +const SSL_X509_METHOD ssl_crypto_x509_method = { + ssl_crypto_x509_check_client_CA_list, + ssl_crypto_x509_cert_clear, + ssl_crypto_x509_cert_free, + ssl_crypto_x509_cert_dup, + ssl_crypto_x509_cert_flush_cached_chain, + ssl_crypto_x509_cert_flush_cached_leaf, + ssl_crypto_x509_session_cache_objects, + ssl_crypto_x509_session_dup, + ssl_crypto_x509_session_clear, + ssl_crypto_x509_session_verify_cert_chain, + ssl_crypto_x509_hs_flush_cached_ca_names, + ssl_crypto_x509_ssl_new, + ssl_crypto_x509_ssl_free, + ssl_crypto_x509_ssl_flush_cached_client_CA, + ssl_crypto_x509_ssl_auto_chain_if_needed, + ssl_crypto_x509_ssl_ctx_new, + ssl_crypto_x509_ssl_ctx_free, + ssl_crypto_x509_ssl_ctx_flush_cached_client_CA, +}; + +} // namespace bssl + +using namespace bssl; + X509 *SSL_get_peer_certificate(const SSL *ssl) { + check_ssl_x509_method(ssl); if (ssl == NULL) { return NULL; } @@ -167,6 +591,7 @@ X509 *SSL_get_peer_certificate(const SSL *ssl) { } STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl) { + check_ssl_x509_method(ssl); if (ssl == NULL) { return NULL; } @@ -180,8 +605,8 @@ STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl) { return session->x509_chain; } - /* OpenSSL historically didn't include the leaf certificate in the returned - * certificate chain, but only for servers. */ + // OpenSSL historically didn't include the leaf certificate in the returned + // certificate chain, but only for servers. if (session->x509_chain_without_leaf == NULL) { session->x509_chain_without_leaf = sk_X509_new_null(); if (session->x509_chain_without_leaf == NULL) { @@ -203,6 +628,7 @@ STACK_OF(X509) *SSL_get_peer_cert_chain(const SSL *ssl) { } STACK_OF(X509) *SSL_get_peer_full_cert_chain(const SSL *ssl) { + check_ssl_x509_method(ssl); SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return NULL; @@ -212,54 +638,74 @@ STACK_OF(X509) *SSL_get_peer_full_cert_chain(const SSL *ssl) { } int SSL_CTX_set_purpose(SSL_CTX *ctx, int purpose) { + check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set_purpose(ctx->param, purpose); } int SSL_set_purpose(SSL *ssl, int purpose) { + check_ssl_x509_method(ssl); return X509_VERIFY_PARAM_set_purpose(ssl->param, purpose); } int SSL_CTX_set_trust(SSL_CTX *ctx, int trust) { + check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set_trust(ctx->param, trust); } int SSL_set_trust(SSL *ssl, int trust) { + check_ssl_x509_method(ssl); return X509_VERIFY_PARAM_set_trust(ssl->param, trust); } int SSL_CTX_set1_param(SSL_CTX *ctx, const X509_VERIFY_PARAM *param) { + check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_set1(ctx->param, param); } int SSL_set1_param(SSL *ssl, const X509_VERIFY_PARAM *param) { + check_ssl_x509_method(ssl); return X509_VERIFY_PARAM_set1(ssl->param, param); } -X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx) { return ctx->param; } +X509_VERIFY_PARAM *SSL_CTX_get0_param(SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); + return ctx->param; +} -X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl) { return ssl->param; } +X509_VERIFY_PARAM *SSL_get0_param(SSL *ssl) { + check_ssl_x509_method(ssl); + return ssl->param; +} int SSL_get_verify_depth(const SSL *ssl) { + check_ssl_x509_method(ssl); return X509_VERIFY_PARAM_get_depth(ssl->param); } int (*SSL_get_verify_callback(const SSL *ssl))(int, X509_STORE_CTX *) { + check_ssl_x509_method(ssl); return ssl->verify_callback; } -int SSL_CTX_get_verify_mode(const SSL_CTX *ctx) { return ctx->verify_mode; } +int SSL_CTX_get_verify_mode(const SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); + return ctx->verify_mode; +} int SSL_CTX_get_verify_depth(const SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); return X509_VERIFY_PARAM_get_depth(ctx->param); } int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx))( int ok, X509_STORE_CTX *store_ctx) { + check_ssl_ctx_x509_method(ctx); return ctx->default_verify_callback; } void SSL_set_verify(SSL *ssl, int mode, int (*callback)(int ok, X509_STORE_CTX *store_ctx)) { + check_ssl_x509_method(ssl); ssl->verify_mode = mode; if (callback != NULL) { ssl->verify_callback = callback; @@ -267,6 +713,7 @@ void SSL_set_verify(SSL *ssl, int mode, } void SSL_set_verify_depth(SSL *ssl, int depth) { + check_ssl_x509_method(ssl); X509_VERIFY_PARAM_set_depth(ssl->param, depth); } @@ -274,36 +721,43 @@ void SSL_CTX_set_cert_verify_callback(SSL_CTX *ctx, int (*cb)(X509_STORE_CTX *store_ctx, void *arg), void *arg) { + check_ssl_ctx_x509_method(ctx); ctx->app_verify_callback = cb; ctx->app_verify_arg = arg; } void SSL_CTX_set_verify(SSL_CTX *ctx, int mode, int (*cb)(int, X509_STORE_CTX *)) { + check_ssl_ctx_x509_method(ctx); ctx->verify_mode = mode; ctx->default_verify_callback = cb; } void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth) { + check_ssl_ctx_x509_method(ctx); X509_VERIFY_PARAM_set_depth(ctx->param, depth); } int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); return X509_STORE_set_default_paths(ctx->cert_store); } int SSL_CTX_load_verify_locations(SSL_CTX *ctx, const char *ca_file, const char *ca_dir) { + check_ssl_ctx_x509_method(ctx); return X509_STORE_load_locations(ctx->cert_store, ca_file, ca_dir); } void SSL_set_verify_result(SSL *ssl, long result) { + check_ssl_x509_method(ssl); if (result != X509_V_OK) { abort(); } } long SSL_get_verify_result(const SSL *ssl) { + check_ssl_x509_method(ssl); SSL_SESSION *session = SSL_get_session(ssl); if (session == NULL) { return X509_V_ERR_INVALID_CALL; @@ -312,154 +766,42 @@ long SSL_get_verify_result(const SSL *ssl) { } X509_STORE *SSL_CTX_get_cert_store(const SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); return ctx->cert_store; } void SSL_CTX_set_cert_store(SSL_CTX *ctx, X509_STORE *store) { + check_ssl_ctx_x509_method(ctx); X509_STORE_free(ctx->cert_store); ctx->cert_store = store; } -static void ssl_crypto_x509_flush_cached_leaf(CERT *cert) { - X509_free(cert->x509_leaf); - cert->x509_leaf = NULL; -} - -static void ssl_crypto_x509_flush_cached_chain(CERT *cert) { - sk_X509_pop_free(cert->x509_chain, X509_free); - cert->x509_chain = NULL; -} - -static void ssl_crypto_x509_clear(CERT *cert) { - ssl_crypto_x509_flush_cached_leaf(cert); - ssl_crypto_x509_flush_cached_chain(cert); - - X509_free(cert->x509_stash); - cert->x509_stash = NULL; -} - -static int ssl_crypto_x509_session_cache_objects(SSL_SESSION *sess) { - STACK_OF(X509) *chain = NULL; - const size_t num_certs = sk_CRYPTO_BUFFER_num(sess->certs); - - if (num_certs > 0) { - chain = sk_X509_new_null(); - if (chain == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - } - - X509 *leaf = NULL; - for (size_t i = 0; i < num_certs; i++) { - X509 *x509 = X509_parse_from_buffer(sk_CRYPTO_BUFFER_value(sess->certs, i)); - if (x509 == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; - } - if (!sk_X509_push(chain, x509)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - X509_free(x509); - goto err; - } - if (i == 0) { - leaf = x509; - } - } - - sk_X509_pop_free(sess->x509_chain, X509_free); - sess->x509_chain = chain; - sk_X509_pop_free(sess->x509_chain_without_leaf, X509_free); - sess->x509_chain_without_leaf = NULL; - - X509_free(sess->x509_peer); - if (leaf != NULL) { - X509_up_ref(leaf); - } - sess->x509_peer = leaf; - - return 1; - -err: - sk_X509_pop_free(chain, X509_free); - return 0; -} - -static int ssl_crypto_x509_session_dup(SSL_SESSION *new_session, - const SSL_SESSION *session) { - if (session->x509_peer != NULL) { - X509_up_ref(session->x509_peer); - new_session->x509_peer = session->x509_peer; - } - if (session->x509_chain != NULL) { - new_session->x509_chain = X509_chain_up_ref(session->x509_chain); - if (new_session->x509_chain == NULL) { - return 0; - } - } - - return 1; -} - -static void ssl_crypto_x509_session_clear(SSL_SESSION *session) { - X509_free(session->x509_peer); - session->x509_peer = NULL; - sk_X509_pop_free(session->x509_chain, X509_free); - session->x509_chain = NULL; - sk_X509_pop_free(session->x509_chain_without_leaf, X509_free); - session->x509_chain_without_leaf = NULL; -} - -const SSL_X509_METHOD ssl_crypto_x509_method = { - ssl_crypto_x509_clear, - ssl_crypto_x509_flush_cached_chain, - ssl_crypto_x509_flush_cached_leaf, - ssl_crypto_x509_session_cache_objects, - ssl_crypto_x509_session_dup, - ssl_crypto_x509_session_clear, -}; - -/* x509_to_buffer returns a |CRYPTO_BUFFER| that contains the serialised - * contents of |x509|. */ -static CRYPTO_BUFFER *x509_to_buffer(X509 *x509) { - uint8_t *buf = NULL; - int cert_len = i2d_X509(x509, &buf); - if (cert_len <= 0) { - return 0; - } - - CRYPTO_BUFFER *buffer = CRYPTO_BUFFER_new(buf, cert_len, NULL); - OPENSSL_free(buf); - - return buffer; -} - static int ssl_use_certificate(CERT *cert, X509 *x) { if (x == NULL) { OPENSSL_PUT_ERROR(SSL, ERR_R_PASSED_NULL_PARAMETER); return 0; } - CRYPTO_BUFFER *buffer = x509_to_buffer(x); - if (buffer == NULL) { + UniquePtr buffer = x509_to_buffer(x); + if (!buffer) { return 0; } - const int ok = ssl_set_cert(cert, buffer); - CRYPTO_BUFFER_free(buffer); - return ok; + return ssl_set_cert(cert, std::move(buffer)); } int SSL_use_certificate(SSL *ssl, X509 *x) { + check_ssl_x509_method(ssl); return ssl_use_certificate(ssl->cert, x); } int SSL_CTX_use_certificate(SSL_CTX *ctx, X509 *x) { + check_ssl_ctx_x509_method(ctx); return ssl_use_certificate(ctx->cert, x); } -/* ssl_cert_cache_leaf_cert sets |cert->x509_leaf|, if currently NULL, from the - * first element of |cert->chain|. */ +// ssl_cert_cache_leaf_cert sets |cert->x509_leaf|, if currently NULL, from the +// first element of |cert->chain|. static int ssl_cert_cache_leaf_cert(CERT *cert) { assert(cert->x509_method); @@ -487,84 +829,23 @@ static X509 *ssl_cert_get0_leaf(CERT *cert) { } X509 *SSL_get_certificate(const SSL *ssl) { + check_ssl_x509_method(ssl); return ssl_cert_get0_leaf(ssl->cert); } X509 *SSL_CTX_get0_certificate(const SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); + MutexWriteLock lock(const_cast(&ctx->lock)); return ssl_cert_get0_leaf(ctx->cert); } -/* new_leafless_chain returns a fresh stack of buffers set to {NULL}. */ -static STACK_OF(CRYPTO_BUFFER) *new_leafless_chain(void) { - STACK_OF(CRYPTO_BUFFER) *chain = sk_CRYPTO_BUFFER_new_null(); - if (chain == NULL) { - return NULL; - } - - if (!sk_CRYPTO_BUFFER_push(chain, NULL)) { - sk_CRYPTO_BUFFER_free(chain); - return NULL; - } - - return chain; -} - -/* ssl_cert_set_chain sets elements 1.. of |cert->chain| to the serialised - * forms of elements of |chain|. It returns one on success or zero on error, in - * which case no change to |cert->chain| is made. It preverses the existing - * leaf from |cert->chain|, if any. */ -static int ssl_cert_set_chain(CERT *cert, STACK_OF(X509) *chain) { - STACK_OF(CRYPTO_BUFFER) *new_chain = NULL; - - if (cert->chain != NULL) { - new_chain = sk_CRYPTO_BUFFER_new_null(); - if (new_chain == NULL) { - return 0; - } - - CRYPTO_BUFFER *leaf = sk_CRYPTO_BUFFER_value(cert->chain, 0); - if (!sk_CRYPTO_BUFFER_push(new_chain, leaf)) { - goto err; - } - /* |leaf| might be NULL if it's a “leafless” chain. */ - if (leaf != NULL) { - CRYPTO_BUFFER_up_ref(leaf); - } - } - - for (size_t i = 0; i < sk_X509_num(chain); i++) { - if (new_chain == NULL) { - new_chain = new_leafless_chain(); - if (new_chain == NULL) { - goto err; - } - } - - CRYPTO_BUFFER *buffer = x509_to_buffer(sk_X509_value(chain, i)); - if (buffer == NULL || - !sk_CRYPTO_BUFFER_push(new_chain, buffer)) { - CRYPTO_BUFFER_free(buffer); - goto err; - } - } - - sk_CRYPTO_BUFFER_pop_free(cert->chain, CRYPTO_BUFFER_free); - cert->chain = new_chain; - - return 1; - -err: - sk_CRYPTO_BUFFER_pop_free(new_chain, CRYPTO_BUFFER_free); - return 0; -} - static int ssl_cert_set0_chain(CERT *cert, STACK_OF(X509) *chain) { if (!ssl_cert_set_chain(cert, chain)) { return 0; } sk_X509_pop_free(chain, X509_free); - ssl_crypto_x509_flush_cached_chain(cert); + ssl_crypto_x509_cert_flush_cached_chain(cert); return 1; } @@ -573,31 +854,25 @@ static int ssl_cert_set1_chain(CERT *cert, STACK_OF(X509) *chain) { return 0; } - ssl_crypto_x509_flush_cached_chain(cert); + ssl_crypto_x509_cert_flush_cached_chain(cert); return 1; } static int ssl_cert_append_cert(CERT *cert, X509 *x509) { assert(cert->x509_method); - CRYPTO_BUFFER *buffer = x509_to_buffer(x509); - if (buffer == NULL) { + UniquePtr buffer = x509_to_buffer(x509); + if (!buffer) { return 0; } if (cert->chain != NULL) { - if (!sk_CRYPTO_BUFFER_push(cert->chain, buffer)) { - CRYPTO_BUFFER_free(buffer); - return 0; - } - - return 1; + return PushToStack(cert->chain, std::move(buffer)); } cert->chain = new_leafless_chain(); if (cert->chain == NULL || - !sk_CRYPTO_BUFFER_push(cert->chain, buffer)) { - CRYPTO_BUFFER_free(buffer); + !PushToStack(cert->chain, std::move(buffer))) { sk_CRYPTO_BUFFER_free(cert->chain); cert->chain = NULL; return 0; @@ -613,7 +888,7 @@ static int ssl_cert_add0_chain_cert(CERT *cert, X509 *x509) { X509_free(cert->x509_stash); cert->x509_stash = x509; - ssl_crypto_x509_flush_cached_chain(cert); + ssl_crypto_x509_cert_flush_cached_chain(cert); return 1; } @@ -622,103 +897,72 @@ static int ssl_cert_add1_chain_cert(CERT *cert, X509 *x509) { return 0; } - ssl_crypto_x509_flush_cached_chain(cert); + ssl_crypto_x509_cert_flush_cached_chain(cert); return 1; } int SSL_CTX_set0_chain(SSL_CTX *ctx, STACK_OF(X509) *chain) { + check_ssl_ctx_x509_method(ctx); return ssl_cert_set0_chain(ctx->cert, chain); } int SSL_CTX_set1_chain(SSL_CTX *ctx, STACK_OF(X509) *chain) { + check_ssl_ctx_x509_method(ctx); return ssl_cert_set1_chain(ctx->cert, chain); } int SSL_set0_chain(SSL *ssl, STACK_OF(X509) *chain) { + check_ssl_x509_method(ssl); return ssl_cert_set0_chain(ssl->cert, chain); } int SSL_set1_chain(SSL *ssl, STACK_OF(X509) *chain) { + check_ssl_x509_method(ssl); return ssl_cert_set1_chain(ssl->cert, chain); } int SSL_CTX_add0_chain_cert(SSL_CTX *ctx, X509 *x509) { + check_ssl_ctx_x509_method(ctx); return ssl_cert_add0_chain_cert(ctx->cert, x509); } int SSL_CTX_add1_chain_cert(SSL_CTX *ctx, X509 *x509) { + check_ssl_ctx_x509_method(ctx); return ssl_cert_add1_chain_cert(ctx->cert, x509); } int SSL_CTX_add_extra_chain_cert(SSL_CTX *ctx, X509 *x509) { + check_ssl_ctx_x509_method(ctx); return SSL_CTX_add0_chain_cert(ctx, x509); } int SSL_add0_chain_cert(SSL *ssl, X509 *x509) { + check_ssl_x509_method(ssl); return ssl_cert_add0_chain_cert(ssl->cert, x509); } int SSL_add1_chain_cert(SSL *ssl, X509 *x509) { + check_ssl_x509_method(ssl); return ssl_cert_add1_chain_cert(ssl->cert, x509); } int SSL_CTX_clear_chain_certs(SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); return SSL_CTX_set0_chain(ctx, NULL); } int SSL_CTX_clear_extra_chain_certs(SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); return SSL_CTX_clear_chain_certs(ctx); } int SSL_clear_chain_certs(SSL *ssl) { + check_ssl_x509_method(ssl); return SSL_set0_chain(ssl, NULL); } -int ssl_auto_chain_if_needed(SSL *ssl) { - /* Only build a chain if there are no intermediates configured and the feature - * isn't disabled. */ - if ((ssl->mode & SSL_MODE_NO_AUTO_CHAIN) || - !ssl_has_certificate(ssl) || - ssl->cert->chain == NULL || - sk_CRYPTO_BUFFER_num(ssl->cert->chain) > 1) { - return 1; - } - - X509 *leaf = - X509_parse_from_buffer(sk_CRYPTO_BUFFER_value(ssl->cert->chain, 0)); - if (!leaf) { - OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); - return 0; - } - - X509_STORE_CTX ctx; - if (!X509_STORE_CTX_init(&ctx, ssl->ctx->cert_store, leaf, NULL)) { - X509_free(leaf); - OPENSSL_PUT_ERROR(SSL, ERR_R_X509_LIB); - return 0; - } - - /* Attempt to build a chain, ignoring the result. */ - X509_verify_cert(&ctx); - X509_free(leaf); - ERR_clear_error(); - - /* Remove the leaf from the generated chain. */ - X509_free(sk_X509_shift(ctx.chain)); - - const int ok = ssl_cert_set_chain(ssl->cert, ctx.chain); - X509_STORE_CTX_cleanup(&ctx); - if (!ok) { - return 0; - } - - ssl_crypto_x509_flush_cached_chain(ssl->cert); - - return 1; -} - -/* ssl_cert_cache_chain_certs fills in |cert->x509_chain| from elements 1.. of - * |cert->chain|. */ +// ssl_cert_cache_chain_certs fills in |cert->x509_chain| from elements 1.. of +// |cert->chain|. static int ssl_cert_cache_chain_certs(CERT *cert) { assert(cert->x509_method); @@ -728,30 +972,27 @@ static int ssl_cert_cache_chain_certs(CERT *cert) { return 1; } - STACK_OF(X509) *chain = sk_X509_new_null(); - if (chain == NULL) { + UniquePtr chain(sk_X509_new_null()); + if (!chain) { return 0; } for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(cert->chain); i++) { CRYPTO_BUFFER *buffer = sk_CRYPTO_BUFFER_value(cert->chain, i); - X509 *x509 = X509_parse_from_buffer(buffer); - if (x509 == NULL || - !sk_X509_push(chain, x509)) { - X509_free(x509); - goto err; + UniquePtr x509(X509_parse_from_buffer(buffer)); + if (!x509 || + !PushToStack(chain.get(), std::move(x509))) { + return 0; } } - cert->x509_chain = chain; + cert->x509_chain = chain.release(); return 1; - -err: - sk_X509_pop_free(chain, X509_free); - return 0; } int SSL_CTX_get0_chain_certs(const SSL_CTX *ctx, STACK_OF(X509) **out_chain) { + check_ssl_ctx_x509_method(ctx); + MutexWriteLock lock(const_cast(&ctx->lock)); if (!ssl_cert_cache_chain_certs(ctx->cert)) { *out_chain = NULL; return 0; @@ -767,6 +1008,7 @@ int SSL_CTX_get_extra_chain_certs(const SSL_CTX *ctx, } int SSL_get0_chain_certs(const SSL *ssl, STACK_OF(X509) **out_chain) { + check_ssl_x509_method(ssl); if (!ssl_cert_cache_chain_certs(ssl->cert)) { *out_chain = NULL; return 0; @@ -777,7 +1019,7 @@ int SSL_get0_chain_certs(const SSL *ssl, STACK_OF(X509) **out_chain) { } static SSL_SESSION *ssl_session_new_with_crypto_x509(void) { - return ssl_session_new(&ssl_crypto_x509_method); + return ssl_session_new(&ssl_crypto_x509_method).release(); } SSL_SESSION *d2i_SSL_SESSION_bio(BIO *bio, SSL_SESSION **out) { @@ -800,16 +1042,258 @@ SSL_SESSION *d2i_SSL_SESSION(SSL_SESSION **a, const uint8_t **pp, long length) { CBS cbs; CBS_init(&cbs, *pp, length); - SSL_SESSION *ret = SSL_SESSION_parse(&cbs, &ssl_crypto_x509_method, - NULL /* no buffer pool */); - if (ret == NULL) { + UniquePtr ret = SSL_SESSION_parse(&cbs, &ssl_crypto_x509_method, + NULL /* no buffer pool */); + if (!ret) { return NULL; } if (a) { SSL_SESSION_free(*a); - *a = ret; + *a = ret.get(); } *pp = CBS_data(&cbs); - return ret; + return ret.release(); +} + +STACK_OF(X509_NAME) *SSL_dup_CA_list(STACK_OF(X509_NAME) *list) { + return sk_X509_NAME_deep_copy(list, X509_NAME_dup, X509_NAME_free); +} + +static void set_client_CA_list(STACK_OF(CRYPTO_BUFFER) **ca_list, + const STACK_OF(X509_NAME) *name_list, + CRYPTO_BUFFER_POOL *pool) { + UniquePtr buffers(sk_CRYPTO_BUFFER_new_null()); + if (!buffers) { + return; + } + + for (X509_NAME *name : name_list) { + uint8_t *outp = NULL; + int len = i2d_X509_NAME(name, &outp); + if (len < 0) { + return; + } + + UniquePtr buffer(CRYPTO_BUFFER_new(outp, len, pool)); + OPENSSL_free(outp); + if (!buffer || + !PushToStack(buffers.get(), std::move(buffer))) { + return; + } + } + + sk_CRYPTO_BUFFER_pop_free(*ca_list, CRYPTO_BUFFER_free); + *ca_list = buffers.release(); +} + +void SSL_set_client_CA_list(SSL *ssl, STACK_OF(X509_NAME) *name_list) { + check_ssl_x509_method(ssl); + ssl->ctx->x509_method->ssl_flush_cached_client_CA(ssl); + set_client_CA_list(&ssl->client_CA, name_list, ssl->ctx->pool); + sk_X509_NAME_pop_free(name_list, X509_NAME_free); +} + +void SSL_CTX_set_client_CA_list(SSL_CTX *ctx, STACK_OF(X509_NAME) *name_list) { + check_ssl_ctx_x509_method(ctx); + ctx->x509_method->ssl_ctx_flush_cached_client_CA(ctx); + set_client_CA_list(&ctx->client_CA, name_list, ctx->pool); + sk_X509_NAME_pop_free(name_list, X509_NAME_free); +} + +static STACK_OF(X509_NAME) * + buffer_names_to_x509(const STACK_OF(CRYPTO_BUFFER) *names, + STACK_OF(X509_NAME) **cached) { + if (names == NULL) { + return NULL; + } + + if (*cached != NULL) { + return *cached; + } + + UniquePtr new_cache(sk_X509_NAME_new_null()); + if (!new_cache) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return NULL; + } + + for (const CRYPTO_BUFFER *buffer : names) { + const uint8_t *inp = CRYPTO_BUFFER_data(buffer); + UniquePtr name( + d2i_X509_NAME(nullptr, &inp, CRYPTO_BUFFER_len(buffer))); + if (!name || + inp != CRYPTO_BUFFER_data(buffer) + CRYPTO_BUFFER_len(buffer) || + !PushToStack(new_cache.get(), std::move(name))) { + return NULL; + } + } + + *cached = new_cache.release(); + return *cached; +} + +STACK_OF(X509_NAME) *SSL_get_client_CA_list(const SSL *ssl) { + check_ssl_x509_method(ssl); + // For historical reasons, this function is used both to query configuration + // state on a server as well as handshake state on a client. However, whether + // |ssl| is a client or server is not known until explicitly configured with + // |SSL_set_connect_state|. If |do_handshake| is NULL, |ssl| is in an + // indeterminate mode and |ssl->server| is unset. + if (ssl->do_handshake != NULL && !ssl->server) { + if (ssl->s3->hs != NULL) { + return buffer_names_to_x509(ssl->s3->hs->ca_names.get(), + &ssl->s3->hs->cached_x509_ca_names); + } + + return NULL; + } + + if (ssl->client_CA != NULL) { + return buffer_names_to_x509( + ssl->client_CA, (STACK_OF(X509_NAME) **)&ssl->cached_x509_client_CA); + } + return SSL_CTX_get_client_CA_list(ssl->ctx); +} + +STACK_OF(X509_NAME) *SSL_CTX_get_client_CA_list(const SSL_CTX *ctx) { + check_ssl_ctx_x509_method(ctx); + // This is a logically const operation that may be called on multiple threads, + // so it needs to lock around updating |cached_x509_client_CA|. + MutexWriteLock lock(const_cast(&ctx->lock)); + return buffer_names_to_x509( + ctx->client_CA, + const_cast(&ctx->cached_x509_client_CA)); +} + +static int add_client_CA(STACK_OF(CRYPTO_BUFFER) **names, X509 *x509, + CRYPTO_BUFFER_POOL *pool) { + if (x509 == NULL) { + return 0; + } + + uint8_t *outp = NULL; + int len = i2d_X509_NAME(X509_get_subject_name(x509), &outp); + if (len < 0) { + return 0; + } + + UniquePtr buffer(CRYPTO_BUFFER_new(outp, len, pool)); + OPENSSL_free(outp); + if (!buffer) { + return 0; + } + + int alloced = 0; + if (*names == NULL) { + *names = sk_CRYPTO_BUFFER_new_null(); + alloced = 1; + + if (*names == NULL) { + return 0; + } + } + + if (!PushToStack(*names, std::move(buffer))) { + if (alloced) { + sk_CRYPTO_BUFFER_pop_free(*names, CRYPTO_BUFFER_free); + *names = NULL; + } + return 0; + } + + return 1; +} + +int SSL_add_client_CA(SSL *ssl, X509 *x509) { + check_ssl_x509_method(ssl); + if (!add_client_CA(&ssl->client_CA, x509, ssl->ctx->pool)) { + return 0; + } + + ssl_crypto_x509_ssl_flush_cached_client_CA(ssl); + return 1; +} + +int SSL_CTX_add_client_CA(SSL_CTX *ctx, X509 *x509) { + check_ssl_ctx_x509_method(ctx); + if (!add_client_CA(&ctx->client_CA, x509, ctx->pool)) { + return 0; + } + + ssl_crypto_x509_ssl_ctx_flush_cached_client_CA(ctx); + return 1; +} + +static int do_client_cert_cb(SSL *ssl, void *arg) { + if (ssl_has_certificate(ssl) || ssl->ctx->client_cert_cb == NULL) { + return 1; + } + + X509 *x509 = NULL; + EVP_PKEY *pkey = NULL; + int ret = ssl->ctx->client_cert_cb(ssl, &x509, &pkey); + if (ret < 0) { + return -1; + } + UniquePtr free_x509(x509); + UniquePtr free_pkey(pkey); + + if (ret != 0) { + if (!SSL_use_certificate(ssl, x509) || + !SSL_use_PrivateKey(ssl, pkey)) { + return 0; + } + } + + return 1; +} + +void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx, int (*cb)(SSL *ssl, + X509 **out_x509, + EVP_PKEY **out_pkey)) { + check_ssl_ctx_x509_method(ctx); + // Emulate the old client certificate callback with the new one. + SSL_CTX_set_cert_cb(ctx, do_client_cert_cb, NULL); + ctx->client_cert_cb = cb; +} + +static int set_cert_store(X509_STORE **store_ptr, X509_STORE *new_store, + int take_ref) { + X509_STORE_free(*store_ptr); + *store_ptr = new_store; + + if (new_store != NULL && take_ref) { + X509_STORE_up_ref(new_store); + } + + return 1; +} + +int SSL_get_ex_data_X509_STORE_CTX_idx(void) { + // The ex_data index to go from |X509_STORE_CTX| to |SSL| always uses the + // reserved app_data slot. Before ex_data was introduced, app_data was used. + // Avoid breaking any software which assumes |X509_STORE_CTX_get_app_data| + // works. + return 0; +} + +int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { + check_ssl_ctx_x509_method(ctx); + return set_cert_store(&ctx->cert->verify_store, store, 0); +} + +int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *store) { + check_ssl_ctx_x509_method(ctx); + return set_cert_store(&ctx->cert->verify_store, store, 1); +} + +int SSL_set0_verify_cert_store(SSL *ssl, X509_STORE *store) { + check_ssl_x509_method(ssl); + return set_cert_store(&ssl->cert->verify_store, store, 0); +} + +int SSL_set1_verify_cert_store(SSL *ssl, X509_STORE *store) { + check_ssl_x509_method(ssl); + return set_cert_store(&ssl->cert->verify_store, store, 1); } diff --git a/Sources/BoringSSL/ssl/t1_enc.c b/Sources/BoringSSL/ssl/t1_enc.c deleted file mode 100644 index 9f11e0566..000000000 --- a/Sources/BoringSSL/ssl/t1_enc.c +++ /dev/null @@ -1,561 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* ==================================================================== - * Copyright 2005 Nokia. All rights reserved. - * - * The portions of the attached software ("Contribution") is developed by - * Nokia Corporation and is licensed pursuant to the OpenSSL open source - * license. - * - * The Contribution, originally written by Mika Kousa and Pasi Eronen of - * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites - * support (see RFC 4279) to OpenSSL. - * - * No patent licenses or other rights except those expressly stated in - * the OpenSSL open source license shall be deemed granted or received - * expressly, by implication, estoppel, or otherwise. - * - * No assurances are provided by Nokia that the Contribution does not - * infringe the patent or other intellectual property rights of any third - * party or that the license provides you with all the necessary rights - * to make use of the Contribution. - * - * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN - * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA - * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY - * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR - * OTHERWISE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -/* tls1_P_hash computes the TLS P_ function as described in RFC 5246, - * section 5. It XORs |out_len| bytes to |out|, using |md| as the hash and - * |secret| as the secret. |seed1| through |seed3| are concatenated to form the - * seed parameter. It returns one on success and zero on failure. */ -static int tls1_P_hash(uint8_t *out, size_t out_len, const EVP_MD *md, - const uint8_t *secret, size_t secret_len, - const uint8_t *seed1, size_t seed1_len, - const uint8_t *seed2, size_t seed2_len, - const uint8_t *seed3, size_t seed3_len) { - HMAC_CTX ctx, ctx_tmp, ctx_init; - uint8_t A1[EVP_MAX_MD_SIZE]; - unsigned A1_len; - int ret = 0; - - size_t chunk = EVP_MD_size(md); - - HMAC_CTX_init(&ctx); - HMAC_CTX_init(&ctx_tmp); - HMAC_CTX_init(&ctx_init); - if (!HMAC_Init_ex(&ctx_init, secret, secret_len, md, NULL) || - !HMAC_CTX_copy_ex(&ctx, &ctx_init) || - !HMAC_Update(&ctx, seed1, seed1_len) || - !HMAC_Update(&ctx, seed2, seed2_len) || - !HMAC_Update(&ctx, seed3, seed3_len) || - !HMAC_Final(&ctx, A1, &A1_len)) { - goto err; - } - - for (;;) { - unsigned len; - uint8_t hmac[EVP_MAX_MD_SIZE]; - if (!HMAC_CTX_copy_ex(&ctx, &ctx_init) || - !HMAC_Update(&ctx, A1, A1_len) || - /* Save a copy of |ctx| to compute the next A1 value below. */ - (out_len > chunk && !HMAC_CTX_copy_ex(&ctx_tmp, &ctx)) || - !HMAC_Update(&ctx, seed1, seed1_len) || - !HMAC_Update(&ctx, seed2, seed2_len) || - !HMAC_Update(&ctx, seed3, seed3_len) || - !HMAC_Final(&ctx, hmac, &len)) { - goto err; - } - assert(len == chunk); - - /* XOR the result into |out|. */ - if (len > out_len) { - len = out_len; - } - unsigned i; - for (i = 0; i < len; i++) { - out[i] ^= hmac[i]; - } - out += len; - out_len -= len; - - if (out_len == 0) { - break; - } - - /* Calculate the next A1 value. */ - if (!HMAC_Final(&ctx_tmp, A1, &A1_len)) { - goto err; - } - } - - ret = 1; - -err: - HMAC_CTX_cleanup(&ctx); - HMAC_CTX_cleanup(&ctx_tmp); - HMAC_CTX_cleanup(&ctx_init); - OPENSSL_cleanse(A1, sizeof(A1)); - return ret; -} - -int tls1_prf(const EVP_MD *digest, uint8_t *out, size_t out_len, - const uint8_t *secret, size_t secret_len, const char *label, - size_t label_len, const uint8_t *seed1, size_t seed1_len, - const uint8_t *seed2, size_t seed2_len) { - if (out_len == 0) { - return 1; - } - - OPENSSL_memset(out, 0, out_len); - - if (digest == EVP_md5_sha1()) { - /* If using the MD5/SHA1 PRF, |secret| is partitioned between SHA-1 and - * MD5, MD5 first. */ - size_t secret_half = secret_len - (secret_len / 2); - if (!tls1_P_hash(out, out_len, EVP_md5(), secret, secret_half, - (const uint8_t *)label, label_len, seed1, seed1_len, seed2, - seed2_len)) { - return 0; - } - - /* Note that, if |secret_len| is odd, the two halves share a byte. */ - secret = secret + (secret_len - secret_half); - secret_len = secret_half; - - digest = EVP_sha1(); - } - - if (!tls1_P_hash(out, out_len, digest, secret, secret_len, - (const uint8_t *)label, label_len, seed1, seed1_len, seed2, - seed2_len)) { - return 0; - } - - return 1; -} - -static int ssl3_prf(uint8_t *out, size_t out_len, const uint8_t *secret, - size_t secret_len, const char *label, size_t label_len, - const uint8_t *seed1, size_t seed1_len, - const uint8_t *seed2, size_t seed2_len) { - EVP_MD_CTX md5; - EVP_MD_CTX sha1; - uint8_t buf[16], smd[SHA_DIGEST_LENGTH]; - uint8_t c = 'A'; - size_t i, j, k; - - k = 0; - EVP_MD_CTX_init(&md5); - EVP_MD_CTX_init(&sha1); - for (i = 0; i < out_len; i += MD5_DIGEST_LENGTH) { - k++; - if (k > sizeof(buf)) { - /* bug: 'buf' is too small for this ciphersuite */ - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - for (j = 0; j < k; j++) { - buf[j] = c; - } - c++; - if (!EVP_DigestInit_ex(&sha1, EVP_sha1(), NULL)) { - OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); - return 0; - } - EVP_DigestUpdate(&sha1, buf, k); - EVP_DigestUpdate(&sha1, secret, secret_len); - /* |label| is ignored for SSLv3. */ - if (seed1_len) { - EVP_DigestUpdate(&sha1, seed1, seed1_len); - } - if (seed2_len) { - EVP_DigestUpdate(&sha1, seed2, seed2_len); - } - EVP_DigestFinal_ex(&sha1, smd, NULL); - - if (!EVP_DigestInit_ex(&md5, EVP_md5(), NULL)) { - OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); - return 0; - } - EVP_DigestUpdate(&md5, secret, secret_len); - EVP_DigestUpdate(&md5, smd, SHA_DIGEST_LENGTH); - if (i + MD5_DIGEST_LENGTH > out_len) { - EVP_DigestFinal_ex(&md5, smd, NULL); - OPENSSL_memcpy(out, smd, out_len - i); - } else { - EVP_DigestFinal_ex(&md5, out, NULL); - } - - out += MD5_DIGEST_LENGTH; - } - - OPENSSL_cleanse(smd, SHA_DIGEST_LENGTH); - EVP_MD_CTX_cleanup(&md5); - EVP_MD_CTX_cleanup(&sha1); - - return 1; -} - -static int tls1_setup_key_block(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (hs->key_block_len != 0) { - return 1; - } - - SSL_SESSION *session = ssl->session; - if (hs->new_session != NULL) { - session = hs->new_session; - } - - const EVP_AEAD *aead = NULL; - size_t mac_secret_len, fixed_iv_len; - if (session->cipher == NULL || - !ssl_cipher_get_evp_aead(&aead, &mac_secret_len, &fixed_iv_len, - session->cipher, ssl3_protocol_version(ssl))) { - OPENSSL_PUT_ERROR(SSL, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); - return 0; - } - size_t key_len = EVP_AEAD_key_length(aead); - if (mac_secret_len > 0) { - /* For "stateful" AEADs (i.e. compatibility with pre-AEAD cipher suites) the - * key length reported by |EVP_AEAD_key_length| will include the MAC key - * bytes and initial implicit IV. */ - if (key_len < mac_secret_len + fixed_iv_len) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - key_len -= mac_secret_len + fixed_iv_len; - } - - assert(mac_secret_len < 256); - assert(key_len < 256); - assert(fixed_iv_len < 256); - - ssl->s3->tmp.new_mac_secret_len = (uint8_t)mac_secret_len; - ssl->s3->tmp.new_key_len = (uint8_t)key_len; - ssl->s3->tmp.new_fixed_iv_len = (uint8_t)fixed_iv_len; - - size_t key_block_len = SSL_get_key_block_len(ssl); - - uint8_t *keyblock = OPENSSL_malloc(key_block_len); - if (keyblock == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - - if (!SSL_generate_key_block(ssl, keyblock, key_block_len)) { - OPENSSL_free(keyblock); - return 0; - } - - assert(key_block_len < 256); - hs->key_block_len = (uint8_t)key_block_len; - hs->key_block = keyblock; - return 1; -} - -int tls1_change_cipher_state(SSL_HANDSHAKE *hs, int which) { - SSL *const ssl = hs->ssl; - /* Ensure the key block is set up. */ - if (!tls1_setup_key_block(hs)) { - return 0; - } - - /* is_read is true if we have just read a ChangeCipherSpec message - i.e. we - * need to update the read cipherspec. Otherwise we have just written one. */ - const char is_read = (which & SSL3_CC_READ) != 0; - /* use_client_keys is true if we wish to use the keys for the "client write" - * direction. This is the case if we're a client sending a ChangeCipherSpec, - * or a server reading a client's ChangeCipherSpec. */ - const char use_client_keys = which == SSL3_CHANGE_CIPHER_CLIENT_WRITE || - which == SSL3_CHANGE_CIPHER_SERVER_READ; - - size_t mac_secret_len = ssl->s3->tmp.new_mac_secret_len; - size_t key_len = ssl->s3->tmp.new_key_len; - size_t iv_len = ssl->s3->tmp.new_fixed_iv_len; - assert((mac_secret_len + key_len + iv_len) * 2 == hs->key_block_len); - - const uint8_t *key_data = hs->key_block; - const uint8_t *client_write_mac_secret = key_data; - key_data += mac_secret_len; - const uint8_t *server_write_mac_secret = key_data; - key_data += mac_secret_len; - const uint8_t *client_write_key = key_data; - key_data += key_len; - const uint8_t *server_write_key = key_data; - key_data += key_len; - const uint8_t *client_write_iv = key_data; - key_data += iv_len; - const uint8_t *server_write_iv = key_data; - key_data += iv_len; - - const uint8_t *mac_secret, *key, *iv; - if (use_client_keys) { - mac_secret = client_write_mac_secret; - key = client_write_key; - iv = client_write_iv; - } else { - mac_secret = server_write_mac_secret; - key = server_write_key; - iv = server_write_iv; - } - - SSL_AEAD_CTX *aead_ctx = SSL_AEAD_CTX_new( - is_read ? evp_aead_open : evp_aead_seal, ssl3_protocol_version(ssl), - hs->new_cipher, key, key_len, mac_secret, mac_secret_len, iv, iv_len); - if (aead_ctx == NULL) { - return 0; - } - - if (is_read) { - return ssl->method->set_read_state(ssl, aead_ctx); - } - - return ssl->method->set_write_state(ssl, aead_ctx); -} - -size_t SSL_get_key_block_len(const SSL *ssl) { - return 2 * ((size_t)ssl->s3->tmp.new_mac_secret_len + - (size_t)ssl->s3->tmp.new_key_len + - (size_t)ssl->s3->tmp.new_fixed_iv_len); -} - -int SSL_generate_key_block(const SSL *ssl, uint8_t *out, size_t out_len) { - if (ssl3_protocol_version(ssl) == SSL3_VERSION) { - return ssl3_prf(out, out_len, SSL_get_session(ssl)->master_key, - SSL_get_session(ssl)->master_key_length, - TLS_MD_KEY_EXPANSION_CONST, TLS_MD_KEY_EXPANSION_CONST_SIZE, - ssl->s3->server_random, SSL3_RANDOM_SIZE, - ssl->s3->client_random, SSL3_RANDOM_SIZE); - } - - const EVP_MD *digest = ssl_get_handshake_digest( - SSL_get_session(ssl)->cipher->algorithm_prf, ssl3_protocol_version(ssl)); - if (digest == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - return tls1_prf(digest, out, out_len, SSL_get_session(ssl)->master_key, - SSL_get_session(ssl)->master_key_length, - TLS_MD_KEY_EXPANSION_CONST, TLS_MD_KEY_EXPANSION_CONST_SIZE, - ssl->s3->server_random, SSL3_RANDOM_SIZE, - ssl->s3->client_random, SSL3_RANDOM_SIZE); -} - -int tls1_generate_master_secret(SSL_HANDSHAKE *hs, uint8_t *out, - const uint8_t *premaster, - size_t premaster_len) { - const SSL *ssl = hs->ssl; - if (hs->extended_master_secret) { - uint8_t digests[EVP_MAX_MD_SIZE]; - size_t digests_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, digests, &digests_len) || - !tls1_prf(SSL_TRANSCRIPT_md(&hs->transcript), out, - SSL3_MASTER_SECRET_SIZE, premaster, premaster_len, - TLS_MD_EXTENDED_MASTER_SECRET_CONST, - TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE, digests, - digests_len, NULL, 0)) { - return 0; - } - } else { - if (ssl3_protocol_version(ssl) == SSL3_VERSION) { - if (!ssl3_prf(out, SSL3_MASTER_SECRET_SIZE, premaster, premaster_len, - TLS_MD_MASTER_SECRET_CONST, TLS_MD_MASTER_SECRET_CONST_SIZE, - ssl->s3->client_random, SSL3_RANDOM_SIZE, - ssl->s3->server_random, SSL3_RANDOM_SIZE)) { - return 0; - } - } else { - if (!tls1_prf(SSL_TRANSCRIPT_md(&hs->transcript), out, - SSL3_MASTER_SECRET_SIZE, premaster, premaster_len, - TLS_MD_MASTER_SECRET_CONST, TLS_MD_MASTER_SECRET_CONST_SIZE, - ssl->s3->client_random, SSL3_RANDOM_SIZE, - ssl->s3->server_random, SSL3_RANDOM_SIZE)) { - return 0; - } - } - } - - return SSL3_MASTER_SECRET_SIZE; -} - -int SSL_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, - const char *label, size_t label_len, - const uint8_t *context, size_t context_len, - int use_context) { - if (!ssl->s3->have_version || ssl->version == SSL3_VERSION) { - return 0; - } - - /* Exporters may not be used in the middle of a renegotiation. */ - if (SSL_in_init(ssl) && !SSL_in_false_start(ssl)) { - return 0; - } - - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return tls13_export_keying_material(ssl, out, out_len, label, label_len, - context, context_len, use_context); - } - - size_t seed_len = 2 * SSL3_RANDOM_SIZE; - if (use_context) { - if (context_len >= 1u << 16) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - seed_len += 2 + context_len; - } - uint8_t *seed = OPENSSL_malloc(seed_len); - if (seed == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - return 0; - } - - OPENSSL_memcpy(seed, ssl->s3->client_random, SSL3_RANDOM_SIZE); - OPENSSL_memcpy(seed + SSL3_RANDOM_SIZE, ssl->s3->server_random, - SSL3_RANDOM_SIZE); - if (use_context) { - seed[2 * SSL3_RANDOM_SIZE] = (uint8_t)(context_len >> 8); - seed[2 * SSL3_RANDOM_SIZE + 1] = (uint8_t)context_len; - OPENSSL_memcpy(seed + 2 * SSL3_RANDOM_SIZE + 2, context, context_len); - } - - const EVP_MD *digest = ssl_get_handshake_digest( - SSL_get_session(ssl)->cipher->algorithm_prf, ssl3_protocol_version(ssl)); - if (digest == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - int ret = tls1_prf(digest, out, out_len, SSL_get_session(ssl)->master_key, - SSL_get_session(ssl)->master_key_length, label, label_len, - seed, seed_len, NULL, 0); - OPENSSL_free(seed); - return ret; -} diff --git a/Sources/BoringSSL/ssl/t1_enc.cc b/Sources/BoringSSL/ssl/t1_enc.cc new file mode 100644 index 000000000..2a099878f --- /dev/null +++ b/Sources/BoringSSL/ssl/t1_enc.cc @@ -0,0 +1,503 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* ==================================================================== + * Copyright 2005 Nokia. All rights reserved. + * + * The portions of the attached software ("Contribution") is developed by + * Nokia Corporation and is licensed pursuant to the OpenSSL open source + * license. + * + * The Contribution, originally written by Mika Kousa and Pasi Eronen of + * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites + * support (see RFC 4279) to OpenSSL. + * + * No patent licenses or other rights except those expressly stated in + * the OpenSSL open source license shall be deemed granted or received + * expressly, by implication, estoppel, or otherwise. + * + * No assurances are provided by Nokia that the Contribution does not + * infringe the patent or other intellectual property rights of any third + * party or that the license provides you with all the necessary rights + * to make use of the Contribution. + * + * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN + * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA + * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY + * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR + * OTHERWISE. */ + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +// tls1_P_hash computes the TLS P_ function as described in RFC 5246, +// section 5. It XORs |out.size()| bytes to |out|, using |md| as the hash and +// |secret| as the secret. |label|, |seed1|, and |seed2| are concatenated to +// form the seed parameter. It returns true on success and false on failure. +static bool tls1_P_hash(Span out, const EVP_MD *md, + Span secret, Span label, + Span seed1, Span seed2) { + ScopedHMAC_CTX ctx, ctx_tmp, ctx_init; + uint8_t A1[EVP_MAX_MD_SIZE]; + unsigned A1_len; + bool ret = false; + + size_t chunk = EVP_MD_size(md); + + if (!HMAC_Init_ex(ctx_init.get(), secret.data(), secret.size(), md, + nullptr) || + !HMAC_CTX_copy_ex(ctx.get(), ctx_init.get()) || + !HMAC_Update(ctx.get(), reinterpret_cast(label.data()), + label.size()) || + !HMAC_Update(ctx.get(), seed1.data(), seed1.size()) || + !HMAC_Update(ctx.get(), seed2.data(), seed2.size()) || + !HMAC_Final(ctx.get(), A1, &A1_len)) { + goto err; + } + + for (;;) { + unsigned len; + uint8_t hmac[EVP_MAX_MD_SIZE]; + if (!HMAC_CTX_copy_ex(ctx.get(), ctx_init.get()) || + !HMAC_Update(ctx.get(), A1, A1_len) || + // Save a copy of |ctx| to compute the next A1 value below. + (out.size() > chunk && !HMAC_CTX_copy_ex(ctx_tmp.get(), ctx.get())) || + !HMAC_Update(ctx.get(), reinterpret_cast(label.data()), + label.size()) || + !HMAC_Update(ctx.get(), seed1.data(), seed1.size()) || + !HMAC_Update(ctx.get(), seed2.data(), seed2.size()) || + !HMAC_Final(ctx.get(), hmac, &len)) { + goto err; + } + assert(len == chunk); + + // XOR the result into |out|. + if (len > out.size()) { + len = out.size(); + } + for (unsigned i = 0; i < len; i++) { + out[i] ^= hmac[i]; + } + out = out.subspan(len); + + if (out.empty()) { + break; + } + + // Calculate the next A1 value. + if (!HMAC_Final(ctx_tmp.get(), A1, &A1_len)) { + goto err; + } + } + + ret = true; + +err: + OPENSSL_cleanse(A1, sizeof(A1)); + return ret; +} + +bool tls1_prf(const EVP_MD *digest, Span out, + Span secret, Span label, + Span seed1, Span seed2) { + if (out.empty()) { + return true; + } + + OPENSSL_memset(out.data(), 0, out.size()); + + if (digest == EVP_md5_sha1()) { + // If using the MD5/SHA1 PRF, |secret| is partitioned between MD5 and SHA-1. + size_t secret_half = secret.size() - (secret.size() / 2); + if (!tls1_P_hash(out, EVP_md5(), secret.subspan(0, secret_half), label, + seed1, seed2)) { + return false; + } + + // Note that, if |secret.size()| is odd, the two halves share a byte. + secret = secret.subspan(secret.size() - secret_half); + digest = EVP_sha1(); + } + + return tls1_P_hash(out, digest, secret, label, seed1, seed2); +} + +static bool ssl3_prf(Span out, Span secret, + Span label, Span seed1, + Span seed2) { + ScopedEVP_MD_CTX md5; + ScopedEVP_MD_CTX sha1; + uint8_t buf[16], smd[SHA_DIGEST_LENGTH]; + uint8_t c = 'A'; + size_t k = 0; + while (!out.empty()) { + k++; + if (k > sizeof(buf)) { + // bug: 'buf' is too small for this ciphersuite + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + + for (size_t j = 0; j < k; j++) { + buf[j] = c; + } + c++; + if (!EVP_DigestInit_ex(sha1.get(), EVP_sha1(), NULL)) { + OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); + return false; + } + EVP_DigestUpdate(sha1.get(), buf, k); + EVP_DigestUpdate(sha1.get(), secret.data(), secret.size()); + // |label| is ignored for SSLv3. + EVP_DigestUpdate(sha1.get(), seed1.data(), seed1.size()); + EVP_DigestUpdate(sha1.get(), seed2.data(), seed2.size()); + EVP_DigestFinal_ex(sha1.get(), smd, NULL); + + if (!EVP_DigestInit_ex(md5.get(), EVP_md5(), NULL)) { + OPENSSL_PUT_ERROR(SSL, ERR_LIB_EVP); + return false; + } + EVP_DigestUpdate(md5.get(), secret.data(), secret.size()); + EVP_DigestUpdate(md5.get(), smd, SHA_DIGEST_LENGTH); + if (out.size() < MD5_DIGEST_LENGTH) { + EVP_DigestFinal_ex(md5.get(), smd, NULL); + OPENSSL_memcpy(out.data(), smd, out.size()); + break; + } + EVP_DigestFinal_ex(md5.get(), out.data(), NULL); + out = out.subspan(MD5_DIGEST_LENGTH); + } + + OPENSSL_cleanse(smd, SHA_DIGEST_LENGTH); + return true; +} + +static bool get_key_block_lengths(const SSL *ssl, size_t *out_mac_secret_len, + size_t *out_key_len, size_t *out_iv_len, + const SSL_CIPHER *cipher) { + const EVP_AEAD *aead = NULL; + if (!ssl_cipher_get_evp_aead(&aead, out_mac_secret_len, out_iv_len, cipher, + ssl_protocol_version(ssl), SSL_is_dtls(ssl))) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CIPHER_OR_HASH_UNAVAILABLE); + return false; + } + + *out_key_len = EVP_AEAD_key_length(aead); + if (*out_mac_secret_len > 0) { + // For "stateful" AEADs (i.e. compatibility with pre-AEAD cipher suites) the + // key length reported by |EVP_AEAD_key_length| will include the MAC key + // bytes and initial implicit IV. + if (*out_key_len < *out_mac_secret_len + *out_iv_len) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + *out_key_len -= *out_mac_secret_len + *out_iv_len; + } + + return true; +} + +static bool setup_key_block(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (!hs->key_block.empty()) { + return true; + } + + size_t mac_secret_len, key_len, fixed_iv_len; + Array key_block; + if (!get_key_block_lengths(ssl, &mac_secret_len, &key_len, &fixed_iv_len, + hs->new_cipher) || + !key_block.Init(2 * (mac_secret_len + key_len + fixed_iv_len)) || + !SSL_generate_key_block(ssl, key_block.data(), key_block.size())) { + return false; + } + + hs->key_block = std::move(key_block); + return true; +} + +int tls1_change_cipher_state(SSL_HANDSHAKE *hs, + evp_aead_direction_t direction) { + SSL *const ssl = hs->ssl; + // Ensure the key block is set up. + size_t mac_secret_len, key_len, iv_len; + if (!setup_key_block(hs) || + !get_key_block_lengths(ssl, &mac_secret_len, &key_len, &iv_len, + hs->new_cipher)) { + return 0; + } + + if ((mac_secret_len + key_len + iv_len) * 2 != hs->key_block.size()) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + Span key_block = hs->key_block; + Span mac_secret, key, iv; + if (direction == (ssl->server ? evp_aead_open : evp_aead_seal)) { + // Use the client write (server read) keys. + mac_secret = key_block.subspan(0, mac_secret_len); + key = key_block.subspan(2 * mac_secret_len, key_len); + iv = key_block.subspan(2 * mac_secret_len + 2 * key_len, iv_len); + } else { + // Use the server write (client read) keys. + mac_secret = key_block.subspan(mac_secret_len, mac_secret_len); + key = key_block.subspan(2 * mac_secret_len + key_len, key_len); + iv = key_block.subspan(2 * mac_secret_len + 2 * key_len + iv_len, iv_len); + } + + UniquePtr aead_ctx = + SSLAEADContext::Create(direction, ssl->version, SSL_is_dtls(ssl), + hs->new_cipher, key, mac_secret, iv); + if (!aead_ctx) { + return 0; + } + + if (direction == evp_aead_open) { + return ssl->method->set_read_state(ssl, std::move(aead_ctx)); + } + + return ssl->method->set_write_state(ssl, std::move(aead_ctx)); +} + +int tls1_generate_master_secret(SSL_HANDSHAKE *hs, uint8_t *out, + Span premaster) { + static const char kMasterSecretLabel[] = "master secret"; + static const char kExtendedMasterSecretLabel[] = "extended master secret"; + + const SSL *ssl = hs->ssl; + auto out_span = MakeSpan(out, SSL3_MASTER_SECRET_SIZE); + if (hs->extended_master_secret) { + auto label = MakeConstSpan(kExtendedMasterSecretLabel, + sizeof(kExtendedMasterSecretLabel) - 1); + uint8_t digests[EVP_MAX_MD_SIZE]; + size_t digests_len; + if (!hs->transcript.GetHash(digests, &digests_len) || + !tls1_prf(hs->transcript.Digest(), out_span, premaster, label, + MakeConstSpan(digests, digests_len), {})) { + return 0; + } + } else { + auto label = + MakeConstSpan(kMasterSecretLabel, sizeof(kMasterSecretLabel) - 1); + if (ssl_protocol_version(ssl) == SSL3_VERSION) { + if (!ssl3_prf(out_span, premaster, label, ssl->s3->client_random, + ssl->s3->server_random)) { + return 0; + } + } else { + if (!tls1_prf(hs->transcript.Digest(), out_span, premaster, label, + ssl->s3->client_random, ssl->s3->server_random)) { + return 0; + } + } + } + + return SSL3_MASTER_SECRET_SIZE; +} + +} // namespace bssl + +using namespace bssl; + +size_t SSL_get_key_block_len(const SSL *ssl) { + size_t mac_secret_len, key_len, fixed_iv_len; + if (!get_key_block_lengths(ssl, &mac_secret_len, &key_len, &fixed_iv_len, + SSL_get_current_cipher(ssl))) { + ERR_clear_error(); + return 0; + } + + return 2 * (mac_secret_len + key_len + fixed_iv_len); +} + +int SSL_generate_key_block(const SSL *ssl, uint8_t *out, size_t out_len) { + const SSL_SESSION *session = SSL_get_session(ssl); + auto out_span = MakeSpan(out, out_len); + auto master_key = + MakeConstSpan(session->master_key, session->master_key_length); + static const char kLabel[] = "key expansion"; + auto label = MakeConstSpan(kLabel, sizeof(kLabel) - 1); + + if (ssl_protocol_version(ssl) == SSL3_VERSION) { + return ssl3_prf(out_span, master_key, label, ssl->s3->server_random, + ssl->s3->client_random); + } + + const EVP_MD *digest = ssl_session_get_digest(session); + return tls1_prf(digest, out_span, master_key, label, ssl->s3->server_random, + ssl->s3->client_random); +} + +int SSL_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, + const char *label, size_t label_len, + const uint8_t *context, size_t context_len, + int use_context) { + if (!ssl->s3->have_version || ssl->version == SSL3_VERSION) { + return 0; + } + + // Exporters may not be used in the middle of a renegotiation. + if (SSL_in_init(ssl) && !SSL_in_false_start(ssl)) { + return 0; + } + + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return tls13_export_keying_material(ssl, out, out_len, label, label_len, + context, context_len, use_context); + } + + size_t seed_len = 2 * SSL3_RANDOM_SIZE; + if (use_context) { + if (context_len >= 1u << 16) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; + } + seed_len += 2 + context_len; + } + Array seed; + if (!seed.Init(seed_len)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + + OPENSSL_memcpy(seed.data(), ssl->s3->client_random, SSL3_RANDOM_SIZE); + OPENSSL_memcpy(seed.data() + SSL3_RANDOM_SIZE, ssl->s3->server_random, + SSL3_RANDOM_SIZE); + if (use_context) { + seed[2 * SSL3_RANDOM_SIZE] = static_cast(context_len >> 8); + seed[2 * SSL3_RANDOM_SIZE + 1] = static_cast(context_len); + OPENSSL_memcpy(seed.data() + 2 * SSL3_RANDOM_SIZE + 2, context, context_len); + } + + const SSL_SESSION *session = SSL_get_session(ssl); + const EVP_MD *digest = ssl_session_get_digest(session); + return tls1_prf( + digest, MakeSpan(out, out_len), + MakeConstSpan(session->master_key, session->master_key_length), + MakeConstSpan(label, label_len), seed, {}); +} diff --git a/Sources/BoringSSL/ssl/t1_lib.c b/Sources/BoringSSL/ssl/t1_lib.cc similarity index 56% rename from Sources/BoringSSL/ssl/t1_lib.c rename to Sources/BoringSSL/ssl/t1_lib.cc index d6ef1ffd8..8d0362383 100644 --- a/Sources/BoringSSL/ssl/t1_lib.c +++ b/Sources/BoringSSL/ssl/t1_lib.cc @@ -113,6 +113,8 @@ #include #include +#include + #include #include #include @@ -121,12 +123,13 @@ #include #include #include -#include #include "internal.h" #include "../crypto/internal.h" +namespace bssl { + static int ssl_check_clienthello_tlsext(SSL_HANDSHAKE *hs); static int compare_uint16_t(const void *p1, const void *p2) { @@ -141,24 +144,21 @@ static int compare_uint16_t(const void *p1, const void *p2) { } } -/* Per http://tools.ietf.org/html/rfc5246#section-7.4.1.4, there may not be - * more than one extension of the same type in a ClientHello or ServerHello. - * This function does an initial scan over the extensions block to filter those - * out. */ +// Per http://tools.ietf.org/html/rfc5246#section-7.4.1.4, there may not be +// more than one extension of the same type in a ClientHello or ServerHello. +// This function does an initial scan over the extensions block to filter those +// out. static int tls1_check_duplicate_extensions(const CBS *cbs) { + // First pass: count the extensions. + size_t num_extensions = 0; CBS extensions = *cbs; - size_t num_extensions = 0, i = 0; - uint16_t *extension_types = NULL; - int ret = 0; - - /* First pass: count the extensions. */ while (CBS_len(&extensions) > 0) { uint16_t type; CBS extension; if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { - goto done; + return 0; } num_extensions++; @@ -168,46 +168,42 @@ static int tls1_check_duplicate_extensions(const CBS *cbs) { return 1; } - extension_types = OPENSSL_malloc(sizeof(uint16_t) * num_extensions); - if (extension_types == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto done; + Array extension_types; + if (!extension_types.Init(num_extensions)) { + return 0; } - /* Second pass: gather the extension types. */ + // Second pass: gather the extension types. extensions = *cbs; - for (i = 0; i < num_extensions; i++) { + for (size_t i = 0; i < extension_types.size(); i++) { CBS extension; if (!CBS_get_u16(&extensions, &extension_types[i]) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { - /* This should not happen. */ - goto done; + // This should not happen. + return 0; } } assert(CBS_len(&extensions) == 0); - /* Sort the extensions and make sure there are no duplicates. */ - qsort(extension_types, num_extensions, sizeof(uint16_t), compare_uint16_t); - for (i = 1; i < num_extensions; i++) { + // Sort the extensions and make sure there are no duplicates. + qsort(extension_types.data(), extension_types.size(), sizeof(uint16_t), + compare_uint16_t); + for (size_t i = 1; i < num_extensions; i++) { if (extension_types[i - 1] == extension_types[i]) { - goto done; + return 0; } } - ret = 1; - -done: - OPENSSL_free(extension_types); - return ret; + return 1; } -int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, const uint8_t *in, - size_t in_len) { +int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, + const SSLMessage &msg) { OPENSSL_memset(out, 0, sizeof(*out)); out->ssl = ssl; - out->client_hello = in; - out->client_hello_len = in_len; + out->client_hello = CBS_data(&msg.body); + out->client_hello_len = CBS_len(&msg.body); CBS client_hello, random, session_id; CBS_init(&client_hello, out->client_hello, out->client_hello_len); @@ -223,7 +219,7 @@ int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, const uint8_t *in, out->session_id = CBS_data(&session_id); out->session_id_len = CBS_len(&session_id); - /* Skip past DTLS cookie */ + // Skip past DTLS cookie if (SSL_is_dtls(out->ssl)) { CBS cookie; if (!CBS_get_u8_length_prefixed(&client_hello, &cookie) || @@ -245,15 +241,15 @@ int ssl_client_hello_init(SSL *ssl, SSL_CLIENT_HELLO *out, const uint8_t *in, out->compression_methods = CBS_data(&compression_methods); out->compression_methods_len = CBS_len(&compression_methods); - /* If the ClientHello ends here then it's valid, but doesn't have any - * extensions. (E.g. SSLv3.) */ + // If the ClientHello ends here then it's valid, but doesn't have any + // extensions. (E.g. SSLv3.) if (CBS_len(&client_hello) == 0) { out->extensions = NULL; out->extensions_len = 0; return 1; } - /* Extract extensions and check it is valid. */ + // Extract extensions and check it is valid. CBS extensions; if (!CBS_get_u16_length_prefixed(&client_hello, &extensions) || !tls1_check_duplicate_extensions(&extensions) || @@ -272,7 +268,7 @@ int ssl_client_hello_get_extension(const SSL_CLIENT_HELLO *client_hello, CBS extensions; CBS_init(&extensions, client_hello->extensions, client_hello->extensions_len); while (CBS_len(&extensions) != 0) { - /* Decode the next extension. */ + // Decode the next extension. uint16_t type; CBS extension; if (!CBS_get_u16(&extensions, &type) || @@ -289,69 +285,47 @@ int ssl_client_hello_get_extension(const SSL_CLIENT_HELLO *client_hello, return 0; } -int SSL_early_callback_ctx_extension_get(const SSL_CLIENT_HELLO *client_hello, - uint16_t extension_type, - const uint8_t **out_data, - size_t *out_len) { - CBS cbs; - if (!ssl_client_hello_get_extension(client_hello, &cbs, extension_type)) { - return 0; - } - - *out_data = CBS_data(&cbs); - *out_len = CBS_len(&cbs); - return 1; -} - static const uint16_t kDefaultGroups[] = { SSL_CURVE_X25519, SSL_CURVE_SECP256R1, SSL_CURVE_SECP384R1, }; -void tls1_get_grouplist(SSL *ssl, const uint16_t **out_group_ids, - size_t *out_group_ids_len) { - *out_group_ids = ssl->supported_group_list; - *out_group_ids_len = ssl->supported_group_list_len; - if (!*out_group_ids) { - *out_group_ids = kDefaultGroups; - *out_group_ids_len = OPENSSL_ARRAY_SIZE(kDefaultGroups); +Span tls1_get_grouplist(const SSL *ssl) { + if (ssl->supported_group_list != nullptr) { + return MakeConstSpan(ssl->supported_group_list, + ssl->supported_group_list_len); } + return Span(kDefaultGroups); } int tls1_get_shared_group(SSL_HANDSHAKE *hs, uint16_t *out_group_id) { SSL *const ssl = hs->ssl; assert(ssl->server); - const uint16_t *groups, *pref, *supp; - size_t groups_len, pref_len, supp_len; - tls1_get_grouplist(ssl, &groups, &groups_len); - - /* Clients are not required to send a supported_groups extension. In this - * case, the server is free to pick any group it likes. See RFC 4492, - * section 4, paragraph 3. - * - * However, in the interests of compatibility, we will skip ECDH if the - * client didn't send an extension because we can't be sure that they'll - * support our favoured group. Thus we do not special-case an emtpy - * |peer_supported_group_list|. */ - + // Clients are not required to send a supported_groups extension. In this + // case, the server is free to pick any group it likes. See RFC 4492, + // section 4, paragraph 3. + // + // However, in the interests of compatibility, we will skip ECDH if the + // client didn't send an extension because we can't be sure that they'll + // support our favoured group. Thus we do not special-case an emtpy + // |peer_supported_group_list|. + + Span groups = tls1_get_grouplist(ssl); + Span pref, supp; if (ssl->options & SSL_OP_CIPHER_SERVER_PREFERENCE) { pref = groups; - pref_len = groups_len; supp = hs->peer_supported_group_list; - supp_len = hs->peer_supported_group_list_len; } else { pref = hs->peer_supported_group_list; - pref_len = hs->peer_supported_group_list_len; supp = groups; - supp_len = groups_len; } - for (size_t i = 0; i < pref_len; i++) { - for (size_t j = 0; j < supp_len; j++) { - if (pref[i] == supp[j]) { - *out_group_id = pref[i]; + for (uint16_t pref_group : pref) { + for (uint16_t supp_group : supp) { + if (pref_group == supp_group) { + *out_group_id = pref_group; return 1; } } @@ -362,9 +336,7 @@ int tls1_get_shared_group(SSL_HANDSHAKE *hs, uint16_t *out_group_id) { int tls1_set_curves(uint16_t **out_group_ids, size_t *out_group_ids_len, const int *curves, size_t ncurves) { - uint16_t *group_ids; - - group_ids = OPENSSL_malloc(ncurves * sizeof(uint16_t)); + uint16_t *group_ids = (uint16_t *)OPENSSL_malloc(ncurves * sizeof(uint16_t)); if (group_ids == NULL) { return 0; } @@ -400,8 +372,8 @@ int tls1_set_curves_list(uint16_t **out_group_ids, size_t *out_group_ids_len, goto err; } - uint16_t *new_group_ids = OPENSSL_realloc(group_ids, - (ncurves + 1) * sizeof(uint16_t)); + uint16_t *new_group_ids = (uint16_t *)OPENSSL_realloc( + group_ids, (ncurves + 1) * sizeof(uint16_t)); if (new_group_ids == NULL) { goto err; } @@ -426,12 +398,9 @@ int tls1_set_curves_list(uint16_t **out_group_ids, size_t *out_group_ids_len, return 0; } -int tls1_check_group_id(SSL *ssl, uint16_t group_id) { - const uint16_t *groups; - size_t groups_len; - tls1_get_grouplist(ssl, &groups, &groups_len); - for (size_t i = 0; i < groups_len; i++) { - if (groups[i] == group_id) { +int tls1_check_group_id(const SSL *ssl, uint16_t group_id) { + for (uint16_t supported : tls1_get_grouplist(ssl)) { + if (supported == group_id) { return 1; } } @@ -439,85 +408,98 @@ int tls1_check_group_id(SSL *ssl, uint16_t group_id) { return 0; } -/* kVerifySignatureAlgorithms is the default list of accepted signature - * algorithms for verifying. - * - * For now, RSA-PSS signature algorithms are not enabled on Android's system - * BoringSSL. Once the change in Chrome has stuck and the values are finalized, - * restore them. */ +// kVerifySignatureAlgorithms is the default list of accepted signature +// algorithms for verifying. +// +// For now, RSA-PSS signature algorithms are not enabled on Android's system +// BoringSSL. Once the change in Chrome has stuck and the values are finalized, +// restore them. static const uint16_t kVerifySignatureAlgorithms[] = { - /* Prefer SHA-256 algorithms. */ + // List our preferred algorithms first. + SSL_SIGN_ED25519, SSL_SIGN_ECDSA_SECP256R1_SHA256, -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA256, -#endif SSL_SIGN_RSA_PKCS1_SHA256, - /* Larger hashes are acceptable. */ + // Larger hashes are acceptable. SSL_SIGN_ECDSA_SECP384R1_SHA384, -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA384, -#endif SSL_SIGN_RSA_PKCS1_SHA384, - /* TODO(davidben): Remove this. */ -#if defined(BORINGSSL_ANDROID_SYSTEM) - SSL_SIGN_ECDSA_SECP521R1_SHA512, -#endif -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA512, -#endif SSL_SIGN_RSA_PKCS1_SHA512, - /* For now, SHA-1 is still accepted but least preferable. */ + // For now, SHA-1 is still accepted but least preferable. SSL_SIGN_RSA_PKCS1_SHA1, }; -/* kSignSignatureAlgorithms is the default list of supported signature - * algorithms for signing. - * - * For now, RSA-PSS signature algorithms are not enabled on Android's system - * BoringSSL. Once the change in Chrome has stuck and the values are finalized, - * restore them. */ +// kSignSignatureAlgorithms is the default list of supported signature +// algorithms for signing. +// +// For now, RSA-PSS signature algorithms are not enabled on Android's system +// BoringSSL. Once the change in Chrome has stuck and the values are finalized, +// restore them. static const uint16_t kSignSignatureAlgorithms[] = { - /* Prefer SHA-256 algorithms. */ + // List our preferred algorithms first. + SSL_SIGN_ED25519, SSL_SIGN_ECDSA_SECP256R1_SHA256, -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA256, -#endif SSL_SIGN_RSA_PKCS1_SHA256, - /* If needed, sign larger hashes. - * - * TODO(davidben): Determine which of these may be pruned. */ + // If needed, sign larger hashes. + // + // TODO(davidben): Determine which of these may be pruned. SSL_SIGN_ECDSA_SECP384R1_SHA384, -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA384, -#endif SSL_SIGN_RSA_PKCS1_SHA384, SSL_SIGN_ECDSA_SECP521R1_SHA512, -#if !defined(BORINGSSL_ANDROID_SYSTEM) SSL_SIGN_RSA_PSS_SHA512, -#endif SSL_SIGN_RSA_PKCS1_SHA512, - /* If the peer supports nothing else, sign with SHA-1. */ + // If the peer supports nothing else, sign with SHA-1. SSL_SIGN_ECDSA_SHA1, SSL_SIGN_RSA_PKCS1_SHA1, }; -size_t tls12_get_verify_sigalgs(const SSL *ssl, const uint16_t **out) { - *out = kVerifySignatureAlgorithms; - return OPENSSL_ARRAY_SIZE(kVerifySignatureAlgorithms); +int tls12_add_verify_sigalgs(const SSL *ssl, CBB *out) { + const uint16_t *sigalgs = kVerifySignatureAlgorithms; + size_t num_sigalgs = OPENSSL_ARRAY_SIZE(kVerifySignatureAlgorithms); + if (ssl->ctx->num_verify_sigalgs != 0) { + sigalgs = ssl->ctx->verify_sigalgs; + num_sigalgs = ssl->ctx->num_verify_sigalgs; + } + + for (size_t i = 0; i < num_sigalgs; i++) { + if (sigalgs == kVerifySignatureAlgorithms && + sigalgs[i] == SSL_SIGN_ED25519 && + !ssl->ctx->ed25519_enabled) { + continue; + } + if (!CBB_add_u16(out, sigalgs[i])) { + return 0; + } + } + + return 1; } -int tls12_check_peer_sigalg(SSL *ssl, int *out_alert, uint16_t sigalg) { - const uint16_t *verify_sigalgs; - size_t num_verify_sigalgs = tls12_get_verify_sigalgs(ssl, &verify_sigalgs); - for (size_t i = 0; i < num_verify_sigalgs; i++) { - if (sigalg == verify_sigalgs[i]) { +int tls12_check_peer_sigalg(SSL *ssl, uint8_t *out_alert, uint16_t sigalg) { + const uint16_t *sigalgs = kVerifySignatureAlgorithms; + size_t num_sigalgs = OPENSSL_ARRAY_SIZE(kVerifySignatureAlgorithms); + if (ssl->ctx->num_verify_sigalgs != 0) { + sigalgs = ssl->ctx->verify_sigalgs; + num_sigalgs = ssl->ctx->num_verify_sigalgs; + } + + for (size_t i = 0; i < num_sigalgs; i++) { + if (sigalgs == kVerifySignatureAlgorithms && + sigalgs[i] == SSL_SIGN_ED25519 && + !ssl->ctx->ed25519_enabled) { + continue; + } + if (sigalg == sigalgs[i]) { return 1; } } @@ -527,64 +509,64 @@ int tls12_check_peer_sigalg(SSL *ssl, int *out_alert, uint16_t sigalg) { return 0; } -/* tls_extension represents a TLS extension that is handled internally. The - * |init| function is called for each handshake, before any other functions of - * the extension. Then the add and parse callbacks are called as needed. - * - * The parse callbacks receive a |CBS| that contains the contents of the - * extension (i.e. not including the type and length bytes). If an extension is - * not received then the parse callbacks will be called with a NULL CBS so that - * they can do any processing needed to handle the absence of an extension. - * - * The add callbacks receive a |CBB| to which the extension can be appended but - * the function is responsible for appending the type and length bytes too. - * - * All callbacks return one for success and zero for error. If a parse function - * returns zero then a fatal alert with value |*out_alert| will be sent. If - * |*out_alert| isn't set, then a |decode_error| alert will be sent. */ +// tls_extension represents a TLS extension that is handled internally. The +// |init| function is called for each handshake, before any other functions of +// the extension. Then the add and parse callbacks are called as needed. +// +// The parse callbacks receive a |CBS| that contains the contents of the +// extension (i.e. not including the type and length bytes). If an extension is +// not received then the parse callbacks will be called with a NULL CBS so that +// they can do any processing needed to handle the absence of an extension. +// +// The add callbacks receive a |CBB| to which the extension can be appended but +// the function is responsible for appending the type and length bytes too. +// +// All callbacks return true for success and false for error. If a parse +// function returns zero then a fatal alert with value |*out_alert| will be +// sent. If |*out_alert| isn't set, then a |decode_error| alert will be sent. struct tls_extension { uint16_t value; void (*init)(SSL_HANDSHAKE *hs); - int (*add_clienthello)(SSL_HANDSHAKE *hs, CBB *out); - int (*parse_serverhello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents); + bool (*add_clienthello)(SSL_HANDSHAKE *hs, CBB *out); + bool (*parse_serverhello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents); - int (*parse_clienthello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents); - int (*add_serverhello)(SSL_HANDSHAKE *hs, CBB *out); + bool (*parse_clienthello)(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents); + bool (*add_serverhello)(SSL_HANDSHAKE *hs, CBB *out); }; -static int forbid_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, +static bool forbid_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { if (contents != NULL) { - /* Servers MUST NOT send this extension. */ + // Servers MUST NOT send this extension. *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - return 0; + return false; } - return 1; + return true; } -static int ignore_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, +static bool ignore_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { - /* This extension from the client is handled elsewhere. */ - return 1; + // This extension from the client is handled elsewhere. + return true; } -static int dont_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { - return 1; +static bool dont_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { + return true; } -/* Server name indication (SNI). - * - * https://tools.ietf.org/html/rfc6066#section-3. */ +// Server name indication (SNI). +// +// https://tools.ietf.org/html/rfc6066#section-3. -static int ext_sni_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_sni_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->tlsext_hostname == NULL) { - return 1; + return true; } CBB contents, server_name_list, name; @@ -596,58 +578,41 @@ static int ext_sni_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_bytes(&name, (const uint8_t *)ssl->tlsext_hostname, strlen(ssl->tlsext_hostname)) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_sni_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { - SSL *const ssl = hs->ssl; - if (contents == NULL) { - return 1; - } - - if (CBS_len(contents) != 0) { - return 0; - } - - assert(ssl->tlsext_hostname != NULL); - - if (ssl->session == NULL) { - OPENSSL_free(hs->new_session->tlsext_hostname); - hs->new_session->tlsext_hostname = BUF_strdup(ssl->tlsext_hostname); - if (!hs->new_session->tlsext_hostname) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; - } - } - - return 1; +static bool ext_sni_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { + // The server may acknowledge SNI with an empty extension. We check the syntax + // but otherwise ignore this signal. + return contents == NULL || CBS_len(contents) == 0; } -static int ext_sni_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_sni_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { + SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } CBS server_name_list, host_name; uint8_t name_type; if (!CBS_get_u16_length_prefixed(contents, &server_name_list) || !CBS_get_u8(&server_name_list, &name_type) || - /* Although the server_name extension was intended to be extensible to - * new name types and multiple names, OpenSSL 1.0.x had a bug which meant - * different name types will cause an error. Further, RFC 4366 originally - * defined syntax inextensibly. RFC 6066 corrected this mistake, but - * adding new name types is no longer feasible. - * - * Act as if the extensibility does not exist to simplify parsing. */ + // Although the server_name extension was intended to be extensible to + // new name types and multiple names, OpenSSL 1.0.x had a bug which meant + // different name types will cause an error. Further, RFC 4366 originally + // defined syntax inextensibly. RFC 6066 corrected this mistake, but + // adding new name types is no longer feasible. + // + // Act as if the extensibility does not exist to simplify parsing. !CBS_get_u16_length_prefixed(&server_name_list, &host_name) || CBS_len(&server_name_list) != 0 || CBS_len(contents) != 0) { - return 0; + return false; } if (name_type != TLSEXT_NAMETYPE_host_name || @@ -655,48 +620,45 @@ static int ext_sni_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS_len(&host_name) > TLSEXT_MAXLEN_host_name || CBS_contains_zero_byte(&host_name)) { *out_alert = SSL_AD_UNRECOGNIZED_NAME; - return 0; + return false; } - /* Copy the hostname as a string. */ - if (!CBS_strdup(&host_name, &hs->hostname)) { + // Copy the hostname as a string. + char *raw = nullptr; + if (!CBS_strdup(&host_name, &raw)) { *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } + ssl->s3->hostname.reset(raw); - hs->should_ack_sni = 1; - return 1; + hs->should_ack_sni = true; + return true; } -static int ext_sni_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_sni_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (hs->ssl->s3->session_reused || !hs->should_ack_sni) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_server_name) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -/* Renegotiation indication. - * - * https://tools.ietf.org/html/rfc5746 */ +// Renegotiation indication. +// +// https://tools.ietf.org/html/rfc5746 -static int ext_ri_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ri_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - /* Renegotiation indication is not necessary in TLS 1.3. */ - if (min_version >= TLS1_3_VERSION) { - return 1; + // Renegotiation indication is not necessary in TLS 1.3. + if (hs->min_version >= TLS1_3_VERSION) { + return true; } assert(ssl->s3->initial_handshake_complete == @@ -709,43 +671,44 @@ static int ext_ri_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_bytes(&prev_finished, ssl->s3->previous_client_finished, ssl->s3->previous_client_finished_len) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_ri_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ri_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; - if (contents != NULL && ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 0; + if (contents != NULL && ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return false; } - /* Servers may not switch between omitting the extension and supporting it. - * See RFC 5746, sections 3.5 and 4.2. */ + // Servers may not switch between omitting the extension and supporting it. + // See RFC 5746, sections 3.5 and 4.2. if (ssl->s3->initial_handshake_complete && (contents != NULL) != ssl->s3->send_connection_binding) { *out_alert = SSL_AD_HANDSHAKE_FAILURE; OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); - return 0; + return false; } if (contents == NULL) { - /* Strictly speaking, if we want to avoid an attack we should *always* see - * RI even on initial ServerHello because the client doesn't see any - * renegotiation during an attack. However this would mean we could not - * connect to any server which doesn't support RI. - * - * OpenSSL has |SSL_OP_LEGACY_SERVER_CONNECT| to control this, but in - * practical terms every client sets it so it's just assumed here. */ - return 1; + // Strictly speaking, if we want to avoid an attack we should *always* see + // RI even on initial ServerHello because the client doesn't see any + // renegotiation during an attack. However this would mean we could not + // connect to any server which doesn't support RI. + // + // OpenSSL has |SSL_OP_LEGACY_SERVER_CONNECT| to control this, but in + // practical terms every client sets it so it's just assumed here. + return true; } const size_t expected_len = ssl->s3->previous_client_finished_len + ssl->s3->previous_server_finished_len; - /* Check for logic errors */ + // Check for logic errors assert(!expected_len || ssl->s3->previous_client_finished_len); assert(!expected_len || ssl->s3->previous_server_finished_len); assert(ssl->s3->initial_handshake_complete == @@ -753,212 +716,207 @@ static int ext_ri_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, assert(ssl->s3->initial_handshake_complete == (ssl->s3->previous_server_finished_len != 0)); - /* Parse out the extension contents. */ + // Parse out the extension contents. CBS renegotiated_connection; if (!CBS_get_u8_length_prefixed(contents, &renegotiated_connection) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_ENCODING_ERR); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } - /* Check that the extension matches. */ + // Check that the extension matches. if (CBS_len(&renegotiated_connection) != expected_len) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; - return 0; + return false; } const uint8_t *d = CBS_data(&renegotiated_connection); - if (CRYPTO_memcmp(d, ssl->s3->previous_client_finished, - ssl->s3->previous_client_finished_len)) { + bool ok = CRYPTO_memcmp(d, ssl->s3->previous_client_finished, + ssl->s3->previous_client_finished_len) == 0; +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + ok = true; +#endif + if (!ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; - return 0; + return false; } d += ssl->s3->previous_client_finished_len; - if (CRYPTO_memcmp(d, ssl->s3->previous_server_finished, - ssl->s3->previous_server_finished_len)) { + ok = CRYPTO_memcmp(d, ssl->s3->previous_server_finished, + ssl->s3->previous_server_finished_len) == 0; +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + ok = true; +#endif + if (!ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); - *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + *out_alert = SSL_AD_HANDSHAKE_FAILURE; + return false; } - ssl->s3->send_connection_binding = 1; + ssl->s3->send_connection_binding = true; - return 1; + return true; } -static int ext_ri_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ri_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; - /* Renegotiation isn't supported as a server so this function should never be - * called after the initial handshake. */ + // Renegotiation isn't supported as a server so this function should never be + // called after the initial handshake. assert(!ssl->s3->initial_handshake_complete); - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 1; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return true; } if (contents == NULL) { - return 1; + return true; } CBS renegotiated_connection; if (!CBS_get_u8_length_prefixed(contents, &renegotiated_connection) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_ENCODING_ERR); - return 0; + return false; } - /* Check that the extension matches. We do not support renegotiation as a - * server, so this must be empty. */ + // Check that the extension matches. We do not support renegotiation as a + // server, so this must be empty. if (CBS_len(&renegotiated_connection) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_MISMATCH); *out_alert = SSL_AD_HANDSHAKE_FAILURE; - return 0; + return false; } - ssl->s3->send_connection_binding = 1; + ssl->s3->send_connection_binding = true; - return 1; + return true; } -static int ext_ri_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ri_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - /* Renegotiation isn't supported as a server so this function should never be - * called after the initial handshake. */ + // Renegotiation isn't supported as a server so this function should never be + // called after the initial handshake. assert(!ssl->s3->initial_handshake_complete); - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 1; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_renegotiate) || !CBB_add_u16(out, 1 /* length */) || !CBB_add_u8(out, 0 /* empty renegotiation info */)) { - return 0; + return false; } - return 1; + return true; } -/* Extended Master Secret. - * - * https://tools.ietf.org/html/rfc7627 */ +// Extended Master Secret. +// +// https://tools.ietf.org/html/rfc7627 -static int ext_ems_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - uint16_t min_version, max_version; - if (!ssl_get_version_range(hs->ssl, &min_version, &max_version)) { - return 0; - } - - /* Extended master secret is not necessary in TLS 1.3. */ - if (min_version >= TLS1_3_VERSION || max_version <= SSL3_VERSION) { - return 1; +static bool ext_ems_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { + // Extended master secret is not necessary in TLS 1.3. + if (hs->min_version >= TLS1_3_VERSION || hs->max_version <= SSL3_VERSION) { + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_extended_master_secret) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -static int ext_ems_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ems_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents != NULL) { - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION || + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || ssl->version == SSL3_VERSION || CBS_len(contents) != 0) { - return 0; + return false; } - hs->extended_master_secret = 1; + hs->extended_master_secret = true; } - /* Whether EMS is negotiated may not change on renegotiation. */ - if (ssl->s3->established_session != NULL && + // Whether EMS is negotiated may not change on renegotiation. + if (ssl->s3->established_session != nullptr && hs->extended_master_secret != - ssl->s3->established_session->extended_master_secret) { + !!ssl->s3->established_session->extended_master_secret) { OPENSSL_PUT_ERROR(SSL, SSL_R_RENEGOTIATION_EMS_MISMATCH); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } - return 1; + return true; } -static int ext_ems_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { - uint16_t version = ssl3_protocol_version(hs->ssl); +static bool ext_ems_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { + uint16_t version = ssl_protocol_version(hs->ssl); if (version >= TLS1_3_VERSION || version == SSL3_VERSION) { - return 1; + return true; } if (contents == NULL) { - return 1; + return true; } if (CBS_len(contents) != 0) { - return 0; + return false; } - hs->extended_master_secret = 1; - return 1; + hs->extended_master_secret = true; + return true; } -static int ext_ems_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ems_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->extended_master_secret) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_extended_master_secret) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -/* Session tickets. - * - * https://tools.ietf.org/html/rfc5077 */ +// Session tickets. +// +// https://tools.ietf.org/html/rfc5077 -static int ext_ticket_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ticket_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - /* TLS 1.3 uses a different ticket extension. */ - if (min_version >= TLS1_3_VERSION || + // TLS 1.3 uses a different ticket extension. + if (hs->min_version >= TLS1_3_VERSION || SSL_get_options(ssl) & SSL_OP_NO_TICKET) { - return 1; + return true; } const uint8_t *ticket_data = NULL; int ticket_len = 0; - /* Renegotiation does not participate in session resumption. However, still - * advertise the extension to avoid potentially breaking servers which carry - * over the state from the previous handshake, such as OpenSSL servers - * without upstream's 3c3f0259238594d77264a78944d409f2127642c4. */ - uint16_t session_version; + // Renegotiation does not participate in session resumption. However, still + // advertise the extension to avoid potentially breaking servers which carry + // over the state from the previous handshake, such as OpenSSL servers + // without upstream's 3c3f0259238594d77264a78944d409f2127642c4. if (!ssl->s3->initial_handshake_complete && ssl->session != NULL && ssl->session->tlsext_tick != NULL && - /* Don't send TLS 1.3 session tickets in the ticket extension. */ - ssl->method->version_from_wire(&session_version, - ssl->session->ssl_version) && - session_version < TLS1_3_VERSION) { + // Don't send TLS 1.3 session tickets in the ticket extension. + ssl_session_protocol_version(ssl->session) < TLS1_3_VERSION) { ticket_data = ssl->session->tlsext_tick; ticket_len = ssl->session->tlsext_ticklen; } @@ -968,99 +926,80 @@ static int ext_ticket_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16_length_prefixed(out, &ticket) || !CBB_add_bytes(&ticket, ticket_data, ticket_len) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_ticket_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ticket_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 0; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return false; } - /* If |SSL_OP_NO_TICKET| is set then no extension will have been sent and - * this function should never be called, even if the server tries to send the - * extension. */ + // If |SSL_OP_NO_TICKET| is set then no extension will have been sent and + // this function should never be called, even if the server tries to send the + // extension. assert((SSL_get_options(ssl) & SSL_OP_NO_TICKET) == 0); if (CBS_len(contents) != 0) { - return 0; + return false; } - hs->ticket_expected = 1; - return 1; + hs->ticket_expected = true; + return true; } -static int ext_ticket_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ticket_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->ticket_expected) { - return 1; + return true; } - /* If |SSL_OP_NO_TICKET| is set, |ticket_expected| should never be true. */ + // If |SSL_OP_NO_TICKET| is set, |ticket_expected| should never be true. assert((SSL_get_options(hs->ssl) & SSL_OP_NO_TICKET) == 0); if (!CBB_add_u16(out, TLSEXT_TYPE_session_ticket) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -/* Signature Algorithms. - * - * https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 */ +// Signature Algorithms. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 -static int ext_sigalgs_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_sigalgs_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - if (max_version < TLS1_2_VERSION) { - return 1; + if (hs->max_version < TLS1_2_VERSION) { + return true; } - const uint16_t *sigalgs; - const size_t num_sigalgs = tls12_get_verify_sigalgs(ssl, &sigalgs); - CBB contents, sigalgs_cbb; if (!CBB_add_u16(out, TLSEXT_TYPE_signature_algorithms) || !CBB_add_u16_length_prefixed(out, &contents) || - !CBB_add_u16_length_prefixed(&contents, &sigalgs_cbb)) { - return 0; - } - - for (size_t i = 0; i < num_sigalgs; i++) { - if (!CBB_add_u16(&sigalgs_cbb, sigalgs[i])) { - return 0; - } - } - - if (!CBB_flush(out)) { - return 0; + !CBB_add_u16_length_prefixed(&contents, &sigalgs_cbb) || + !tls12_add_verify_sigalgs(ssl, &sigalgs_cbb) || + !CBB_flush(out)) { + return false; } - return 1; + return true; } -static int ext_sigalgs_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { - OPENSSL_free(hs->peer_sigalgs); - hs->peer_sigalgs = NULL; - hs->num_peer_sigalgs = 0; - +static bool ext_sigalgs_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { + hs->peer_sigalgs.Reset(); if (contents == NULL) { - return 1; + return true; } CBS supported_signature_algorithms; @@ -1068,21 +1007,21 @@ static int ext_sigalgs_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS_len(contents) != 0 || CBS_len(&supported_signature_algorithms) == 0 || !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { - return 0; + return false; } - return 1; + return true; } -/* OCSP Stapling. - * - * https://tools.ietf.org/html/rfc6066#section-8 */ +// OCSP Stapling. +// +// https://tools.ietf.org/html/rfc6066#section-8 -static int ext_ocsp_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ocsp_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (!ssl->ocsp_stapling_enabled) { - return 1; + return true; } CBB contents; @@ -1092,116 +1031,116 @@ static int ext_ocsp_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16(&contents, 0 /* empty responder ID list */) || !CBB_add_u16(&contents, 0 /* empty request extensions */) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_ocsp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ocsp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } - /* TLS 1.3 OCSP responses are included in the Certificate extensions. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 0; + // TLS 1.3 OCSP responses are included in the Certificate extensions. + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return false; } - /* OCSP stapling is forbidden on non-certificate ciphers. */ + // OCSP stapling is forbidden on non-certificate ciphers. if (CBS_len(contents) != 0 || !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - return 0; + return false; } - /* Note this does not check for resumption in TLS 1.2. Sending - * status_request here does not make sense, but OpenSSL does so and the - * specification does not say anything. Tolerate it but ignore it. */ + // Note this does not check for resumption in TLS 1.2. Sending + // status_request here does not make sense, but OpenSSL does so and the + // specification does not say anything. Tolerate it but ignore it. - hs->certificate_status_expected = 1; - return 1; + hs->certificate_status_expected = true; + return true; } -static int ext_ocsp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ocsp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { if (contents == NULL) { - return 1; + return true; } uint8_t status_type; if (!CBS_get_u8(contents, &status_type)) { - return 0; + return false; } - /* We cannot decide whether OCSP stapling will occur yet because the correct - * SSL_CTX might not have been selected. */ + // We cannot decide whether OCSP stapling will occur yet because the correct + // SSL_CTX might not have been selected. hs->ocsp_stapling_requested = status_type == TLSEXT_STATUSTYPE_ocsp; - return 1; + return true; } -static int ext_ocsp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ocsp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION || + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || !hs->ocsp_stapling_requested || ssl->cert->ocsp_response == NULL || ssl->s3->session_reused || !ssl_cipher_uses_certificate_auth(hs->new_cipher)) { - return 1; + return true; } - hs->certificate_status_expected = 1; + hs->certificate_status_expected = true; return CBB_add_u16(out, TLSEXT_TYPE_status_request) && CBB_add_u16(out, 0 /* length */); } -/* Next protocol negotiation. - * - * https://htmlpreview.github.io/?https://github.com/agl/technotes/blob/master/nextprotoneg.html */ +// Next protocol negotiation. +// +// https://htmlpreview.github.io/?https://github.com/agl/technotes/blob/master/nextprotoneg.html -static int ext_npn_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_npn_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->s3->initial_handshake_complete || ssl->ctx->next_proto_select_cb == NULL || SSL_is_dtls(ssl)) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_next_proto_neg) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -static int ext_npn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_npn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 0; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return false; } - /* If any of these are false then we should never have sent the NPN - * extension in the ClientHello and thus this function should never have been - * called. */ + // If any of these are false then we should never have sent the NPN + // extension in the ClientHello and thus this function should never have been + // called. assert(!ssl->s3->initial_handshake_complete); assert(!SSL_is_dtls(ssl)); assert(ssl->ctx->next_proto_select_cb != NULL); - if (ssl->s3->alpn_selected != NULL) { - /* NPN and ALPN may not be negotiated in the same connection. */ + if (!ssl->s3->alpn_selected.empty()) { + // NPN and ALPN may not be negotiated in the same connection. *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_NEGOTIATED_BOTH_NPN_AND_ALPN); - return 0; + return false; } const uint8_t *const orig_contents = CBS_data(contents); @@ -1211,7 +1150,7 @@ static int ext_npn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS proto; if (!CBS_get_u8_length_prefixed(contents, &proto) || CBS_len(&proto) == 0) { - return 0; + return false; } } @@ -1219,52 +1158,45 @@ static int ext_npn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, uint8_t selected_len; if (ssl->ctx->next_proto_select_cb( ssl, &selected, &selected_len, orig_contents, orig_len, - ssl->ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK) { + ssl->ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK || + !ssl->s3->next_proto_negotiated.CopyFrom( + MakeConstSpan(selected, selected_len))) { *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } - OPENSSL_free(ssl->s3->next_proto_negotiated); - ssl->s3->next_proto_negotiated = BUF_memdup(selected, selected_len); - if (ssl->s3->next_proto_negotiated == NULL) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; - } - - ssl->s3->next_proto_negotiated_len = selected_len; - hs->next_proto_neg_seen = 1; - - return 1; + hs->next_proto_neg_seen = true; + return true; } -static int ext_npn_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_npn_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 1; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return true; } if (contents != NULL && CBS_len(contents) != 0) { - return 0; + return false; } if (contents == NULL || ssl->s3->initial_handshake_complete || ssl->ctx->next_protos_advertised_cb == NULL || SSL_is_dtls(ssl)) { - return 1; + return true; } - hs->next_proto_neg_seen = 1; - return 1; + hs->next_proto_neg_seen = true; + return true; } -static int ext_npn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_npn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - /* |next_proto_neg_seen| might have been cleared when an ALPN extension was - * parsed. */ + // |next_proto_neg_seen| might have been cleared when an ALPN extension was + // parsed. if (!hs->next_proto_neg_seen) { - return 1; + return true; } const uint8_t *npa; @@ -1273,8 +1205,8 @@ static int ext_npn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (ssl->ctx->next_protos_advertised_cb( ssl, &npa, &npa_len, ssl->ctx->next_protos_advertised_cb_arg) != SSL_TLSEXT_ERR_OK) { - hs->next_proto_neg_seen = 0; - return 1; + hs->next_proto_neg_seen = false; + return true; } CBB contents; @@ -1282,89 +1214,92 @@ static int ext_npn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_bytes(&contents, npa, npa_len) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -/* Signed certificate timestamps. - * - * https://tools.ietf.org/html/rfc6962#section-3.3.1 */ +// Signed certificate timestamps. +// +// https://tools.ietf.org/html/rfc6962#section-3.3.1 -static int ext_sct_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_sct_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (!ssl->signed_cert_timestamps_enabled) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_certificate_timestamp) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -static int ext_sct_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_sct_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } - /* TLS 1.3 SCTs are included in the Certificate extensions. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { + // TLS 1.3 SCTs are included in the Certificate extensions. + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - /* If this is false then we should never have sent the SCT extension in the - * ClientHello and thus this function should never have been called. */ + // If this is false then we should never have sent the SCT extension in the + // ClientHello and thus this function should never have been called. assert(ssl->signed_cert_timestamps_enabled); if (!ssl_is_sct_list_valid(contents)) { *out_alert = SSL_AD_DECODE_ERROR; - return 0; - } - - /* Session resumption uses the original session information. The extension - * should not be sent on resumption, but RFC 6962 did not make it a - * requirement, so tolerate this. - * - * TODO(davidben): Enforce this anyway. */ - if (!ssl->s3->session_reused && - !CBS_stow(contents, &hs->new_session->tlsext_signed_cert_timestamp_list, - &hs->new_session->tlsext_signed_cert_timestamp_list_length)) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; + } + + // Session resumption uses the original session information. The extension + // should not be sent on resumption, but RFC 6962 did not make it a + // requirement, so tolerate this. + // + // TODO(davidben): Enforce this anyway. + if (!ssl->s3->session_reused) { + CRYPTO_BUFFER_free(hs->new_session->signed_cert_timestamp_list); + hs->new_session->signed_cert_timestamp_list = + CRYPTO_BUFFER_new_from_CBS(contents, ssl->ctx->pool); + if (hs->new_session->signed_cert_timestamp_list == nullptr) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return false; + } } - return 1; + return true; } -static int ext_sct_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_sct_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { if (contents == NULL) { - return 1; + return true; } if (CBS_len(contents) != 0) { - return 0; + return false; } - hs->scts_requested = 1; - return 1; + hs->scts_requested = true; + return true; } -static int ext_sct_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_sct_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - /* The extension shouldn't be sent when resuming sessions. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION || + // The extension shouldn't be sent when resuming sessions. + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION || ssl->s3->session_reused || ssl->cert->signed_cert_timestamp_list == NULL) { - return 1; + return true; } CBB contents; @@ -1378,15 +1313,15 @@ static int ext_sct_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { } -/* Application-level Protocol Negotiation. - * - * https://tools.ietf.org/html/rfc7301 */ +// Application-level Protocol Negotiation. +// +// https://tools.ietf.org/html/rfc7301 -static int ext_alpn_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_alpn_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->alpn_client_proto_list == NULL || ssl->s3->initial_handshake_complete) { - return 1; + return true; } CBB contents, proto_list; @@ -1396,91 +1331,97 @@ static int ext_alpn_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_bytes(&proto_list, ssl->alpn_client_proto_list, ssl->alpn_client_proto_list_len) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_alpn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_alpn_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } assert(!ssl->s3->initial_handshake_complete); assert(ssl->alpn_client_proto_list != NULL); if (hs->next_proto_neg_seen) { - /* NPN and ALPN may not be negotiated in the same connection. */ + // NPN and ALPN may not be negotiated in the same connection. *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_NEGOTIATED_BOTH_NPN_AND_ALPN); - return 0; + return false; } - /* The extension data consists of a ProtocolNameList which must have - * exactly one ProtocolName. Each of these is length-prefixed. */ + // The extension data consists of a ProtocolNameList which must have + // exactly one ProtocolName. Each of these is length-prefixed. CBS protocol_name_list, protocol_name; if (!CBS_get_u16_length_prefixed(contents, &protocol_name_list) || CBS_len(contents) != 0 || !CBS_get_u8_length_prefixed(&protocol_name_list, &protocol_name) || - /* Empty protocol names are forbidden. */ + // Empty protocol names are forbidden. CBS_len(&protocol_name) == 0 || CBS_len(&protocol_name_list) != 0) { - return 0; + return false; + } + + if (!ssl_is_alpn_protocol_allowed(ssl, protocol_name)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return false; + } + + if (!ssl->s3->alpn_selected.CopyFrom(protocol_name)) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return false; + } + + return true; +} + +bool ssl_is_alpn_protocol_allowed(const SSL *ssl, + Span protocol) { + if (ssl->alpn_client_proto_list == nullptr) { + return false; } - /* Check that the protcol name is one of the ones we advertised. */ - int protocol_ok = 0; + if (ssl->ctx->allow_unknown_alpn_protos) { + return true; + } + + // Check that the protocol name is one of the ones we advertised. CBS client_protocol_name_list, client_protocol_name; CBS_init(&client_protocol_name_list, ssl->alpn_client_proto_list, ssl->alpn_client_proto_list_len); while (CBS_len(&client_protocol_name_list) > 0) { if (!CBS_get_u8_length_prefixed(&client_protocol_name_list, &client_protocol_name)) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } - if (CBS_len(&client_protocol_name) == CBS_len(&protocol_name) && - OPENSSL_memcmp(CBS_data(&client_protocol_name), - CBS_data(&protocol_name), - CBS_len(&protocol_name)) == 0) { - protocol_ok = 1; - break; + if (client_protocol_name == protocol) { + return true; } } - if (!protocol_ok) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_ALPN_PROTOCOL); - *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; - } - - if (!CBS_stow(&protocol_name, &ssl->s3->alpn_selected, - &ssl->s3->alpn_selected_len)) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; - } - - return 1; + return false; } -int ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, - const SSL_CLIENT_HELLO *client_hello) { +bool ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, + const SSL_CLIENT_HELLO *client_hello) { SSL *const ssl = hs->ssl; CBS contents; if (ssl->ctx->alpn_select_cb == NULL || !ssl_client_hello_get_extension( client_hello, &contents, TLSEXT_TYPE_application_layer_protocol_negotiation)) { - /* Ignore ALPN if not configured or no extension was supplied. */ - return 1; + // Ignore ALPN if not configured or no extension was supplied. + return true; } - /* ALPN takes precedence over NPN. */ - hs->next_proto_neg_seen = 0; + // ALPN takes precedence over NPN. + hs->next_proto_neg_seen = false; CBS protocol_name_list; if (!CBS_get_u16_length_prefixed(&contents, &protocol_name_list) || @@ -1488,20 +1429,20 @@ int ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS_len(&protocol_name_list) < 2) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - /* Validate the protocol list. */ + // Validate the protocol list. CBS protocol_name_list_copy = protocol_name_list; while (CBS_len(&protocol_name_list_copy) > 0) { CBS protocol_name; if (!CBS_get_u8_length_prefixed(&protocol_name_list_copy, &protocol_name) || - /* Empty protocol names are forbidden. */ + // Empty protocol names are forbidden. CBS_len(&protocol_name) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } } @@ -1511,22 +1452,20 @@ int ssl_negotiate_alpn(SSL_HANDSHAKE *hs, uint8_t *out_alert, ssl, &selected, &selected_len, CBS_data(&protocol_name_list), CBS_len(&protocol_name_list), ssl->ctx->alpn_select_cb_arg) == SSL_TLSEXT_ERR_OK) { - OPENSSL_free(ssl->s3->alpn_selected); - ssl->s3->alpn_selected = BUF_memdup(selected, selected_len); - if (ssl->s3->alpn_selected == NULL) { + if (!ssl->s3->alpn_selected.CopyFrom( + MakeConstSpan(selected, selected_len))) { *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } - ssl->s3->alpn_selected_len = selected_len; } - return 1; + return true; } -static int ext_alpn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_alpn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - if (ssl->s3->alpn_selected == NULL) { - return 1; + if (ssl->s3->alpn_selected.empty()) { + return true; } CBB contents, proto_list, proto; @@ -1534,142 +1473,140 @@ static int ext_alpn_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &proto_list) || !CBB_add_u8_length_prefixed(&proto_list, &proto) || - !CBB_add_bytes(&proto, ssl->s3->alpn_selected, - ssl->s3->alpn_selected_len) || + !CBB_add_bytes(&proto, ssl->s3->alpn_selected.data(), + ssl->s3->alpn_selected.size()) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -/* Channel ID. - * - * https://tools.ietf.org/html/draft-balfanz-tls-channelid-01 */ +// Channel ID. +// +// https://tools.ietf.org/html/draft-balfanz-tls-channelid-01 static void ext_channel_id_init(SSL_HANDSHAKE *hs) { - hs->ssl->s3->tlsext_channel_id_valid = 0; + hs->ssl->s3->tlsext_channel_id_valid = false; } -static int ext_channel_id_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_channel_id_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (!ssl->tlsext_channel_id_enabled || SSL_is_dtls(ssl)) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_channel_id) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -static int ext_channel_id_parse_serverhello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, CBS *contents) { +static bool ext_channel_id_parse_serverhello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } assert(!SSL_is_dtls(ssl)); assert(ssl->tlsext_channel_id_enabled); if (CBS_len(contents) != 0) { - return 0; + return false; } - ssl->s3->tlsext_channel_id_valid = 1; - return 1; + ssl->s3->tlsext_channel_id_valid = true; + return true; } -static int ext_channel_id_parse_clienthello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, CBS *contents) { +static bool ext_channel_id_parse_clienthello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL || !ssl->tlsext_channel_id_enabled || SSL_is_dtls(ssl)) { - return 1; + return true; } if (CBS_len(contents) != 0) { - return 0; + return false; } - ssl->s3->tlsext_channel_id_valid = 1; - return 1; + ssl->s3->tlsext_channel_id_valid = true; + return true; } -static int ext_channel_id_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_channel_id_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (!ssl->s3->tlsext_channel_id_valid) { - return 1; + return true; } if (!CBB_add_u16(out, TLSEXT_TYPE_channel_id) || !CBB_add_u16(out, 0 /* length */)) { - return 0; + return false; } - return 1; + return true; } -/* Secure Real-time Transport Protocol (SRTP) extension. - * - * https://tools.ietf.org/html/rfc5764 */ +// Secure Real-time Transport Protocol (SRTP) extension. +// +// https://tools.ietf.org/html/rfc5764 static void ext_srtp_init(SSL_HANDSHAKE *hs) { hs->ssl->srtp_profile = NULL; } -static int ext_srtp_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_srtp_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; STACK_OF(SRTP_PROTECTION_PROFILE) *profiles = SSL_get_srtp_profiles(ssl); - if (profiles == NULL) { - return 1; - } - const size_t num_profiles = sk_SRTP_PROTECTION_PROFILE_num(profiles); - if (num_profiles == 0) { - return 1; + if (profiles == NULL || + sk_SRTP_PROTECTION_PROFILE_num(profiles) == 0) { + return true; } CBB contents, profile_ids; if (!CBB_add_u16(out, TLSEXT_TYPE_srtp) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &profile_ids)) { - return 0; + return false; } - for (size_t i = 0; i < num_profiles; i++) { - if (!CBB_add_u16(&profile_ids, - sk_SRTP_PROTECTION_PROFILE_value(profiles, i)->id)) { - return 0; + for (const SRTP_PROTECTION_PROFILE *profile : profiles) { + if (!CBB_add_u16(&profile_ids, profile->id)) { + return false; } } if (!CBB_add_u8(&contents, 0 /* empty use_mki value */) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_srtp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_srtp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } - /* The extension consists of a u16-prefixed profile ID list containing a - * single uint16_t profile ID, then followed by a u8-prefixed srtp_mki field. - * - * See https://tools.ietf.org/html/rfc5764#section-4.1.1 */ + // The extension consists of a u16-prefixed profile ID list containing a + // single uint16_t profile ID, then followed by a u8-prefixed srtp_mki field. + // + // See https://tools.ietf.org/html/rfc5764#section-4.1.1 CBS profile_ids, srtp_mki; uint16_t profile_id; if (!CBS_get_u16_length_prefixed(contents, &profile_ids) || @@ -1678,40 +1615,37 @@ static int ext_srtp_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, !CBS_get_u8_length_prefixed(contents, &srtp_mki) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); - return 0; + return false; } if (CBS_len(&srtp_mki) != 0) { - /* Must be no MKI, since we never offer one. */ + // Must be no MKI, since we never offer one. OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_MKI_VALUE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } STACK_OF(SRTP_PROTECTION_PROFILE) *profiles = SSL_get_srtp_profiles(ssl); - /* Check to see if the server gave us something we support (and presumably - * offered). */ - for (size_t i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(profiles); i++) { - const SRTP_PROTECTION_PROFILE *profile = - sk_SRTP_PROTECTION_PROFILE_value(profiles, i); - + // Check to see if the server gave us something we support (and presumably + // offered). + for (const SRTP_PROTECTION_PROFILE *profile : profiles) { if (profile->id == profile_id) { ssl->srtp_profile = profile; - return 1; + return true; } } OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } -static int ext_srtp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_srtp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } CBS profile_ids, srtp_mki; @@ -1720,41 +1654,38 @@ static int ext_srtp_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, !CBS_get_u8_length_prefixed(contents, &srtp_mki) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SRTP_PROTECTION_PROFILE_LIST); - return 0; + return false; } - /* Discard the MKI value for now. */ + // Discard the MKI value for now. const STACK_OF(SRTP_PROTECTION_PROFILE) *server_profiles = SSL_get_srtp_profiles(ssl); - /* Pick the server's most preferred profile. */ - for (size_t i = 0; i < sk_SRTP_PROTECTION_PROFILE_num(server_profiles); i++) { - const SRTP_PROTECTION_PROFILE *server_profile = - sk_SRTP_PROTECTION_PROFILE_value(server_profiles, i); - + // Pick the server's most preferred profile. + for (const SRTP_PROTECTION_PROFILE *server_profile : server_profiles) { CBS profile_ids_tmp; CBS_init(&profile_ids_tmp, CBS_data(&profile_ids), CBS_len(&profile_ids)); while (CBS_len(&profile_ids_tmp) > 0) { uint16_t profile_id; if (!CBS_get_u16(&profile_ids_tmp, &profile_id)) { - return 0; + return false; } if (server_profile->id == profile_id) { ssl->srtp_profile = server_profile; - return 1; + return true; } } } - return 1; + return true; } -static int ext_srtp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_srtp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; if (ssl->srtp_profile == NULL) { - return 1; + return true; } CBB contents, profile_ids; @@ -1764,159 +1695,135 @@ static int ext_srtp_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16(&profile_ids, ssl->srtp_profile->id) || !CBB_add_u8(&contents, 0 /* empty MKI */) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -/* EC point formats. - * - * https://tools.ietf.org/html/rfc4492#section-5.1.2 */ +// EC point formats. +// +// https://tools.ietf.org/html/rfc4492#section-5.1.2 -static int ext_ec_point_add_extension(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ec_point_add_extension(SSL_HANDSHAKE *hs, CBB *out) { CBB contents, formats; if (!CBB_add_u16(out, TLSEXT_TYPE_ec_point_formats) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &formats) || !CBB_add_u8(&formats, TLSEXT_ECPOINTFORMAT_uncompressed) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -static int ext_ec_point_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - uint16_t min_version, max_version; - if (!ssl_get_version_range(hs->ssl, &min_version, &max_version)) { - return 0; - } - - /* The point format extension is unneccessary in TLS 1.3. */ - if (min_version >= TLS1_3_VERSION) { - return 1; +static bool ext_ec_point_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { + // The point format extension is unneccessary in TLS 1.3. + if (hs->min_version >= TLS1_3_VERSION) { + return true; } return ext_ec_point_add_extension(hs, out); } -static int ext_ec_point_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { +static bool ext_ec_point_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t *out_alert, + CBS *contents) { if (contents == NULL) { - return 1; + return true; } - if (ssl3_protocol_version(hs->ssl) >= TLS1_3_VERSION) { - return 0; + if (ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { + return false; } CBS ec_point_format_list; if (!CBS_get_u8_length_prefixed(contents, &ec_point_format_list) || CBS_len(contents) != 0) { - return 0; + return false; } - /* Per RFC 4492, section 5.1.2, implementations MUST support the uncompressed - * point format. */ + // Per RFC 4492, section 5.1.2, implementations MUST support the uncompressed + // point format. if (OPENSSL_memchr(CBS_data(&ec_point_format_list), TLSEXT_ECPOINTFORMAT_uncompressed, CBS_len(&ec_point_format_list)) == NULL) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } - return 1; + return true; } -static int ext_ec_point_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, +static bool ext_ec_point_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, CBS *contents) { - if (ssl3_protocol_version(hs->ssl) >= TLS1_3_VERSION) { - return 1; + if (ssl_protocol_version(hs->ssl) >= TLS1_3_VERSION) { + return true; } return ext_ec_point_parse_serverhello(hs, out_alert, contents); } -static int ext_ec_point_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_ec_point_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - return 1; + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + return true; } const uint32_t alg_k = hs->new_cipher->algorithm_mkey; const uint32_t alg_a = hs->new_cipher->algorithm_auth; - const int using_ecc = (alg_k & SSL_kECDHE) || (alg_a & SSL_aECDSA); + const bool using_ecc = (alg_k & SSL_kECDHE) || (alg_a & SSL_aECDSA); if (!using_ecc) { - return 1; + return true; } return ext_ec_point_add_extension(hs, out); } -/* Pre Shared Key - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.6 */ +// Pre Shared Key +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.6 static size_t ext_pre_shared_key_clienthello_length(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - uint16_t session_version; - if (max_version < TLS1_3_VERSION || ssl->session == NULL || - !ssl->method->version_from_wire(&session_version, - ssl->session->ssl_version) || - session_version < TLS1_3_VERSION) { - return 0; - } - - const EVP_MD *digest = SSL_SESSION_get_digest(ssl->session, ssl); - if (digest == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + if (hs->max_version < TLS1_3_VERSION || ssl->session == NULL || + ssl_session_protocol_version(ssl->session) < TLS1_3_VERSION) { return 0; } - size_t binder_len = EVP_MD_size(digest); + size_t binder_len = EVP_MD_size(ssl_session_get_digest(ssl->session)); return 15 + ssl->session->tlsext_ticklen + binder_len; } -static int ext_pre_shared_key_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_pre_shared_key_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; + hs->needs_psk_binder = false; + if (hs->max_version < TLS1_3_VERSION || ssl->session == NULL || + ssl_session_protocol_version(ssl->session) < TLS1_3_VERSION) { + return true; } - uint16_t session_version; - if (max_version < TLS1_3_VERSION || ssl->session == NULL || - !ssl->method->version_from_wire(&session_version, - ssl->session->ssl_version) || - session_version < TLS1_3_VERSION) { - return 1; + // Per draft-ietf-tls-tls13-21 section 4.1.4, skip offering the session if the + // selected cipher in HelloRetryRequest does not match. This avoids performing + // the transcript hash transformation for multiple hashes. + if (hs->received_hello_retry_request && + ssl_is_draft21(ssl->version) && + ssl->session->cipher->algorithm_prf != hs->new_cipher->algorithm_prf) { + return true; } - struct timeval now; + struct OPENSSL_timeval now; ssl_get_current_time(ssl, &now); uint32_t ticket_age = 1000 * (now.tv_sec - ssl->session->time); uint32_t obfuscated_ticket_age = ticket_age + ssl->session->ticket_age_add; - /* Fill in a placeholder zero binder of the appropriate length. It will be - * computed and filled in later after length prefixes are computed. */ + // Fill in a placeholder zero binder of the appropriate length. It will be + // computed and filled in later after length prefixes are computed. uint8_t zero_binder[EVP_MAX_MD_SIZE] = {0}; - - const EVP_MD *digest = SSL_SESSION_get_digest(ssl->session, ssl); - if (digest == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - size_t binder_len = EVP_MD_size(digest); + size_t binder_len = EVP_MD_size(ssl_session_get_digest(ssl->session)); CBB contents, identity, ticket, binders, binder; if (!CBB_add_u16(out, TLSEXT_TYPE_pre_shared_key) || @@ -1929,57 +1836,53 @@ static int ext_pre_shared_key_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16_length_prefixed(&contents, &binders) || !CBB_add_u8_length_prefixed(&binders, &binder) || !CBB_add_bytes(&binder, zero_binder, binder_len)) { - return 0; + return false; } - hs->needs_psk_binder = 1; + hs->needs_psk_binder = true; return CBB_flush(out); } -int ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, - CBS *contents) { +bool ssl_ext_pre_shared_key_parse_serverhello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents) { uint16_t psk_id; if (!CBS_get_u16(contents, &psk_id) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - /* We only advertise one PSK identity, so the only legal index is zero. */ + // We only advertise one PSK identity, so the only legal index is zero. if (psk_id != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_NOT_FOUND); *out_alert = SSL_AD_UNKNOWN_PSK_IDENTITY; - return 0; + return false; } - return 1; + return true; } -int ssl_ext_pre_shared_key_parse_clienthello(SSL_HANDSHAKE *hs, - SSL_SESSION **out_session, - CBS *out_binders, - uint8_t *out_alert, - CBS *contents) { - SSL *const ssl = hs->ssl; - /* We only process the first PSK identity since we don't support pure PSK. */ - uint32_t obfuscated_ticket_age; - CBS identities, ticket, binders; +bool ssl_ext_pre_shared_key_parse_clienthello( + SSL_HANDSHAKE *hs, CBS *out_ticket, CBS *out_binders, + uint32_t *out_obfuscated_ticket_age, uint8_t *out_alert, CBS *contents) { + // We only process the first PSK identity since we don't support pure PSK. + CBS identities, binders; if (!CBS_get_u16_length_prefixed(contents, &identities) || - !CBS_get_u16_length_prefixed(&identities, &ticket) || - !CBS_get_u32(&identities, &obfuscated_ticket_age) || + !CBS_get_u16_length_prefixed(&identities, out_ticket) || + !CBS_get_u32(&identities, out_obfuscated_ticket_age) || !CBS_get_u16_length_prefixed(contents, &binders) || CBS_len(&binders) == 0 || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } *out_binders = binders; - /* Check the syntax of the remaining identities, but do not process them. */ + // Check the syntax of the remaining identities, but do not process them. size_t num_identities = 1; while (CBS_len(&identities) != 0) { CBS unused_ticket; @@ -1988,21 +1891,21 @@ int ssl_ext_pre_shared_key_parse_clienthello(SSL_HANDSHAKE *hs, !CBS_get_u32(&identities, &unused_obfuscated_ticket_age)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } num_identities++; } - /* Check the syntax of the binders. The value will be checked later if - * resuming. */ + // Check the syntax of the binders. The value will be checked later if + // resuming. size_t num_binders = 0; while (CBS_len(&binders) != 0) { CBS binder; if (!CBS_get_u8_length_prefixed(&binders, &binder)) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } num_binders++; @@ -2011,56 +1914,38 @@ int ssl_ext_pre_shared_key_parse_clienthello(SSL_HANDSHAKE *hs, if (num_identities != num_binders) { OPENSSL_PUT_ERROR(SSL, SSL_R_PSK_IDENTITY_BINDER_COUNT_MISMATCH); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; - } - - /* TODO(svaldez): Check that the ticket_age is valid when attempting to use - * the PSK for 0-RTT. http://crbug.com/boringssl/113 */ - - /* TLS 1.3 session tickets are renewed separately as part of the - * NewSessionTicket. */ - int unused_renew; - if (!tls_process_ticket(ssl, out_session, &unused_renew, CBS_data(&ticket), - CBS_len(&ticket), NULL, 0)) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } - return 1; + return true; } -int ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +bool ssl_ext_pre_shared_key_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { if (!hs->ssl->s3->session_reused) { - return 1; + return true; } CBB contents; if (!CBB_add_u16(out, TLSEXT_TYPE_pre_shared_key) || !CBB_add_u16_length_prefixed(out, &contents) || - /* We only consider the first identity for resumption */ + // We only consider the first identity for resumption !CBB_add_u16(&contents, 0) || !CBB_flush(out)) { - return 0; + return false; } - return 1; + return true; } -/* Pre-Shared Key Exchange Modes - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.7 */ +// Pre-Shared Key Exchange Modes +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.7 -static int ext_psk_key_exchange_modes_add_clienthello(SSL_HANDSHAKE *hs, - CBB *out) { - SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - if (max_version < TLS1_3_VERSION) { - return 1; +static bool ext_psk_key_exchange_modes_add_clienthello(SSL_HANDSHAKE *hs, + CBB *out) { + if (hs->max_version < TLS1_3_VERSION) { + return true; } CBB contents, ke_modes; @@ -2068,17 +1953,17 @@ static int ext_psk_key_exchange_modes_add_clienthello(SSL_HANDSHAKE *hs, !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &ke_modes) || !CBB_add_u8(&ke_modes, SSL_PSK_DHE_KE)) { - return 0; + return false; } return CBB_flush(out); } -static int ext_psk_key_exchange_modes_parse_clienthello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, - CBS *contents) { +static bool ext_psk_key_exchange_modes_parse_clienthello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents) { if (contents == NULL) { - return 1; + return true; } CBS ke_modes; @@ -2086,234 +1971,269 @@ static int ext_psk_key_exchange_modes_parse_clienthello(SSL_HANDSHAKE *hs, CBS_len(&ke_modes) == 0 || CBS_len(contents) != 0) { *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - /* We only support tickets with PSK_DHE_KE. */ + // We only support tickets with PSK_DHE_KE. hs->accept_psk_mode = OPENSSL_memchr(CBS_data(&ke_modes), SSL_PSK_DHE_KE, CBS_len(&ke_modes)) != NULL; - return 1; + return true; } -/* Early Data Indication - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.8 */ +// Early Data Indication +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-4.2.8 -static int ext_early_data_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - /* TODO(svaldez): Support 0RTT. */ - return 1; +static bool ext_early_data_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { + SSL *const ssl = hs->ssl; + if (!ssl->cert->enable_early_data || + // Session must be 0-RTT capable. + ssl->session == NULL || + ssl_session_protocol_version(ssl->session) < TLS1_3_VERSION || + ssl->session->ticket_max_early_data == 0 || + // The second ClientHello never offers early data. + hs->received_hello_retry_request || + // In case ALPN preferences changed since this session was established, + // avoid reporting a confusing value in |SSL_get0_alpn_selected|. + (ssl->session->early_alpn_len != 0 && + !ssl_is_alpn_protocol_allowed( + ssl, MakeConstSpan(ssl->session->early_alpn, + ssl->session->early_alpn_len)))) { + return true; + } + + hs->early_data_offered = true; + + if (!CBB_add_u16(out, TLSEXT_TYPE_early_data) || + !CBB_add_u16(out, 0) || + !CBB_flush(out)) { + return false; + } + + return true; } -static int ext_early_data_parse_clienthello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, CBS *contents) { +static bool ext_early_data_parse_serverhello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, CBS *contents) { SSL *const ssl = hs->ssl; if (contents == NULL) { - return 1; + return true; } if (CBS_len(contents) != 0) { *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - /* Since we don't currently accept 0-RTT, we have to skip past any early data - * the client might have sent. */ - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - ssl->s3->skip_early_data = 1; + if (!ssl->s3->session_reused) { + *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + return false; } - return 1; + + ssl->early_data_accepted = true; + return true; } +static bool ext_early_data_parse_clienthello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, CBS *contents) { + SSL *const ssl = hs->ssl; + if (contents == NULL || + ssl_protocol_version(ssl) < TLS1_3_VERSION) { + return true; + } -/* Key Share - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.5 */ + if (CBS_len(contents) != 0) { + *out_alert = SSL_AD_DECODE_ERROR; + return false; + } -static int ext_key_share_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; + hs->early_data_offered = true; + return true; +} + +static bool ext_early_data_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { + if (!hs->ssl->early_data_accepted) { + return true; } - if (max_version < TLS1_3_VERSION) { - return 1; + if (!CBB_add_u16(out, TLSEXT_TYPE_early_data) || + !CBB_add_u16(out, 0) || + !CBB_flush(out)) { + return false; + } + + return true; +} + + +// Key Share +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.5 + +static bool ext_key_share_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { + SSL *const ssl = hs->ssl; + if (hs->max_version < TLS1_3_VERSION) { + return true; } CBB contents, kse_bytes; if (!CBB_add_u16(out, TLSEXT_TYPE_key_share) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &kse_bytes)) { - return 0; + return false; } uint16_t group_id = hs->retry_group; if (hs->received_hello_retry_request) { - /* We received a HelloRetryRequest without a new curve, so there is no new - * share to append. Leave |ecdh_ctx| as-is. */ + // We received a HelloRetryRequest without a new curve, so there is no new + // share to append. Leave |hs->key_share| as-is. if (group_id == 0 && - !CBB_add_bytes(&kse_bytes, hs->key_share_bytes, - hs->key_share_bytes_len)) { - return 0; + !CBB_add_bytes(&kse_bytes, hs->key_share_bytes.data(), + hs->key_share_bytes.size())) { + return false; } - OPENSSL_free(hs->key_share_bytes); - hs->key_share_bytes = NULL; - hs->key_share_bytes_len = 0; + hs->key_share_bytes.Reset(); if (group_id == 0) { return CBB_flush(out); } } else { - /* Add a fake group. See draft-davidben-tls-grease-01. */ + // Add a fake group. See draft-davidben-tls-grease-01. if (ssl->ctx->grease_enabled && (!CBB_add_u16(&kse_bytes, ssl_get_grease_value(ssl, ssl_grease_group)) || !CBB_add_u16(&kse_bytes, 1 /* length */) || !CBB_add_u8(&kse_bytes, 0 /* one byte key share */))) { - return 0; + return false; } - /* Predict the most preferred group. */ - const uint16_t *groups; - size_t groups_len; - tls1_get_grouplist(ssl, &groups, &groups_len); - if (groups_len == 0) { + // Predict the most preferred group. + Span groups = tls1_get_grouplist(ssl); + if (groups.empty()) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_GROUPS_SPECIFIED); - return 0; + return false; } group_id = groups[0]; } + hs->key_share = SSLKeyShare::Create(group_id); CBB key_exchange; - if (!CBB_add_u16(&kse_bytes, group_id) || + if (!hs->key_share || + !CBB_add_u16(&kse_bytes, group_id) || !CBB_add_u16_length_prefixed(&kse_bytes, &key_exchange) || - !SSL_ECDH_CTX_init(&hs->ecdh_ctx, group_id) || - !SSL_ECDH_CTX_offer(&hs->ecdh_ctx, &key_exchange) || + !hs->key_share->Offer(&key_exchange) || !CBB_flush(&kse_bytes)) { - return 0; + return false; } - if (!hs->received_hello_retry_request) { - /* Save the contents of the extension to repeat it in the second - * ClientHello. */ - hs->key_share_bytes_len = CBB_len(&kse_bytes); - hs->key_share_bytes = BUF_memdup(CBB_data(&kse_bytes), CBB_len(&kse_bytes)); - if (hs->key_share_bytes == NULL) { - return 0; - } + // Save the contents of the extension to repeat it in the second ClientHello. + if (!hs->received_hello_retry_request && + !hs->key_share_bytes.CopyFrom( + MakeConstSpan(CBB_data(&kse_bytes), CBB_len(&kse_bytes)))) { + return false; } return CBB_flush(out); } -int ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, uint8_t **out_secret, - size_t *out_secret_len, - uint8_t *out_alert, CBS *contents) { +bool ssl_ext_key_share_parse_serverhello(SSL_HANDSHAKE *hs, + Array *out_secret, + uint8_t *out_alert, CBS *contents) { CBS peer_key; uint16_t group_id; if (!CBS_get_u16(contents, &group_id) || !CBS_get_u16_length_prefixed(contents, &peer_key) || CBS_len(contents) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); *out_alert = SSL_AD_DECODE_ERROR; - return 0; + return false; } - if (SSL_ECDH_CTX_get_id(&hs->ecdh_ctx) != group_id) { + if (hs->key_share->GroupID() != group_id) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); - return 0; + return false; } - if (!SSL_ECDH_CTX_finish(&hs->ecdh_ctx, out_secret, out_secret_len, out_alert, - CBS_data(&peer_key), CBS_len(&peer_key))) { + if (!hs->key_share->Finish(out_secret, out_alert, peer_key)) { *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return false; } hs->new_session->group_id = group_id; - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - return 1; + hs->key_share.reset(); + return true; } -int ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, int *out_found, - uint8_t **out_secret, - size_t *out_secret_len, - uint8_t *out_alert, CBS *contents) { +bool ssl_ext_key_share_parse_clienthello(SSL_HANDSHAKE *hs, bool *out_found, + Array *out_secret, + uint8_t *out_alert, CBS *contents) { uint16_t group_id; CBS key_shares; if (!tls1_get_shared_group(hs, &group_id)) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_GROUP); *out_alert = SSL_AD_HANDSHAKE_FAILURE; - return 0; + return false; } if (!CBS_get_u16_length_prefixed(contents, &key_shares) || CBS_len(contents) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return 0; + return false; } - /* Find the corresponding key share. */ - int found = 0; + // Find the corresponding key share. CBS peer_key; + CBS_init(&peer_key, NULL, 0); while (CBS_len(&key_shares) > 0) { uint16_t id; CBS peer_key_tmp; if (!CBS_get_u16(&key_shares, &id) || - !CBS_get_u16_length_prefixed(&key_shares, &peer_key_tmp)) { + !CBS_get_u16_length_prefixed(&key_shares, &peer_key_tmp) || + CBS_len(&peer_key_tmp) == 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return 0; + return false; } if (id == group_id) { - if (found) { + if (CBS_len(&peer_key) != 0) { OPENSSL_PUT_ERROR(SSL, SSL_R_DUPLICATE_KEY_SHARE); *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } - found = 1; peer_key = peer_key_tmp; - /* Continue parsing the structure to keep peers honest. */ + // Continue parsing the structure to keep peers honest. } } - if (!found) { - *out_found = 0; - *out_secret = NULL; - *out_secret_len = 0; - return 1; + if (CBS_len(&peer_key) == 0) { + *out_found = false; + out_secret->Reset(); + return true; } - /* Compute the DH secret. */ - uint8_t *secret = NULL; - size_t secret_len; - SSL_ECDH_CTX group; - OPENSSL_memset(&group, 0, sizeof(SSL_ECDH_CTX)); - CBB public_key; - if (!CBB_init(&public_key, 32) || - !SSL_ECDH_CTX_init(&group, group_id) || - !SSL_ECDH_CTX_accept(&group, &public_key, &secret, &secret_len, out_alert, - CBS_data(&peer_key), CBS_len(&peer_key)) || - !CBB_finish(&public_key, &hs->public_key, &hs->public_key_len)) { - OPENSSL_free(secret); - SSL_ECDH_CTX_cleanup(&group); - CBB_cleanup(&public_key); + // Compute the DH secret. + Array secret; + ScopedCBB public_key; + UniquePtr key_share = SSLKeyShare::Create(group_id); + if (!key_share || + !CBB_init(public_key.get(), 32) || + !key_share->Accept(public_key.get(), &secret, out_alert, peer_key) || + !CBBFinishArray(public_key.get(), &hs->ecdh_public_key)) { *out_alert = SSL_AD_ILLEGAL_PARAMETER; - return 0; + return false; } - SSL_ECDH_CTX_cleanup(&group); - - *out_secret = secret; - *out_secret_len = secret_len; - *out_found = 1; - return 1; + *out_secret = std::move(secret); + *out_found = true; + return true; } -int ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { +bool ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { uint16_t group_id; CBB kse_bytes, public_key; if (!tls1_get_shared_group(hs, &group_id) || @@ -2321,219 +2241,156 @@ int ssl_ext_key_share_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { !CBB_add_u16_length_prefixed(out, &kse_bytes) || !CBB_add_u16(&kse_bytes, group_id) || !CBB_add_u16_length_prefixed(&kse_bytes, &public_key) || - !CBB_add_bytes(&public_key, hs->public_key, hs->public_key_len) || + !CBB_add_bytes(&public_key, hs->ecdh_public_key.data(), + hs->ecdh_public_key.size()) || !CBB_flush(out)) { - return 0; + return false; } - OPENSSL_free(hs->public_key); - hs->public_key = NULL; - hs->public_key_len = 0; + hs->ecdh_public_key.Reset(); hs->new_session->group_id = group_id; - return 1; + return true; } -/* Supported Versions - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.1 */ +// Supported Versions +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.1 -static int ext_supported_versions_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_supported_versions_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - if (max_version <= TLS1_2_VERSION) { - return 1; + if (hs->max_version <= TLS1_2_VERSION) { + return true; } CBB contents, versions; if (!CBB_add_u16(out, TLSEXT_TYPE_supported_versions) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u8_length_prefixed(&contents, &versions)) { - return 0; + return false; } - /* Add a fake version. See draft-davidben-tls-grease-01. */ + // Add a fake version. See draft-davidben-tls-grease-01. if (ssl->ctx->grease_enabled && !CBB_add_u16(&versions, ssl_get_grease_value(ssl, ssl_grease_version))) { - return 0; - } - - for (uint16_t version = max_version; version >= min_version; version--) { - if (!CBB_add_u16(&versions, ssl->method->version_to_wire(version))) { - return 0; - } + return false; } - if (!CBB_flush(out)) { - return 0; + if (!ssl_add_supported_versions(hs, &versions) || + !CBB_flush(out)) { + return false; } - return 1; + return true; } -/* Cookie - * - * https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.2 */ +// Cookie +// +// https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.2 -static int ext_cookie_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - if (hs->cookie == NULL) { - return 1; +static bool ext_cookie_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { + if (hs->cookie.empty()) { + return true; } CBB contents, cookie; if (!CBB_add_u16(out, TLSEXT_TYPE_cookie) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &cookie) || - !CBB_add_bytes(&cookie, hs->cookie, hs->cookie_len) || + !CBB_add_bytes(&cookie, hs->cookie.data(), hs->cookie.size()) || !CBB_flush(out)) { - return 0; - } - - /* The cookie is no longer needed in memory. */ - OPENSSL_free(hs->cookie); - hs->cookie = NULL; - hs->cookie_len = 0; - return 1; -} - - -/* Short record headers - * - * This is a non-standard extension which negotiates - * https://github.com/tlswg/tls13-spec/pull/762 for experimenting. */ - -static int ext_short_header_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { - SSL *const ssl = hs->ssl; - uint16_t min_version, max_version; - if (!ssl_get_version_range(ssl, &min_version, &max_version)) { - return 0; - } - - if (max_version < TLS1_3_VERSION || - !ssl->ctx->short_header_enabled) { - return 1; - } - - return CBB_add_u16(out, TLSEXT_TYPE_short_header) && - CBB_add_u16(out, 0 /* empty extension */); -} - -static int ext_short_header_parse_clienthello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, - CBS *contents) { - SSL *const ssl = hs->ssl; - if (contents == NULL || - !ssl->ctx->short_header_enabled || - ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - return 1; - } - - if (CBS_len(contents) != 0) { - return 0; + return false; } - ssl->s3->short_header = 1; - return 1; + // The cookie is no longer needed in memory. + hs->cookie.Reset(); + return true; } -/* Negotiated Groups - * - * https://tools.ietf.org/html/rfc4492#section-5.1.2 - * https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.4 */ +// Negotiated Groups +// +// https://tools.ietf.org/html/rfc4492#section-5.1.2 +// https://tools.ietf.org/html/draft-ietf-tls-tls13-16#section-4.2.4 -static int ext_supported_groups_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { +static bool ext_supported_groups_add_clienthello(SSL_HANDSHAKE *hs, CBB *out) { SSL *const ssl = hs->ssl; CBB contents, groups_bytes; if (!CBB_add_u16(out, TLSEXT_TYPE_supported_groups) || !CBB_add_u16_length_prefixed(out, &contents) || !CBB_add_u16_length_prefixed(&contents, &groups_bytes)) { - return 0; + return false; } - /* Add a fake group. See draft-davidben-tls-grease-01. */ + // Add a fake group. See draft-davidben-tls-grease-01. if (ssl->ctx->grease_enabled && !CBB_add_u16(&groups_bytes, ssl_get_grease_value(ssl, ssl_grease_group))) { - return 0; + return false; } - const uint16_t *groups; - size_t groups_len; - tls1_get_grouplist(ssl, &groups, &groups_len); - - for (size_t i = 0; i < groups_len; i++) { - if (!CBB_add_u16(&groups_bytes, groups[i])) { - return 0; + for (uint16_t group : tls1_get_grouplist(ssl)) { + if (!CBB_add_u16(&groups_bytes, group)) { + return false; } } return CBB_flush(out); } -static int ext_supported_groups_parse_serverhello(SSL_HANDSHAKE *hs, - uint8_t *out_alert, - CBS *contents) { - /* This extension is not expected to be echoed by servers in TLS 1.2, but some - * BigIP servers send it nonetheless, so do not enforce this. */ +static bool ext_supported_groups_parse_serverhello(SSL_HANDSHAKE *hs, + uint8_t *out_alert, + CBS *contents) { + // This extension is not expected to be echoed by servers in TLS 1.2, but some + // BigIP servers send it nonetheless, so do not enforce this. + return true; +} + +static bool parse_u16_array(const CBS *cbs, Array *out) { + CBS copy = *cbs; + if ((CBS_len(©) & 1) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return false; + } + + Array ret; + if (!ret.Init(CBS_len(©) / 2)) { + return false; + } + for (size_t i = 0; i < ret.size(); i++) { + if (!CBS_get_u16(©, &ret[i])) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + } + + assert(CBS_len(©) == 0); + *out = std::move(ret); return 1; } -static int ext_supported_groups_parse_clienthello(SSL_HANDSHAKE *hs, +static bool ext_supported_groups_parse_clienthello(SSL_HANDSHAKE *hs, uint8_t *out_alert, - CBS *contents) { + CBS *contents) { if (contents == NULL) { - return 1; + return true; } CBS supported_group_list; if (!CBS_get_u16_length_prefixed(contents, &supported_group_list) || CBS_len(&supported_group_list) == 0 || - (CBS_len(&supported_group_list) & 1) != 0 || - CBS_len(contents) != 0) { - return 0; - } - - hs->peer_supported_group_list = - OPENSSL_malloc(CBS_len(&supported_group_list)); - if (hs->peer_supported_group_list == NULL) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; - } - - const size_t num_groups = CBS_len(&supported_group_list) / 2; - for (size_t i = 0; i < num_groups; i++) { - if (!CBS_get_u16(&supported_group_list, - &hs->peer_supported_group_list[i])) { - goto err; - } + CBS_len(contents) != 0 || + !parse_u16_array(&supported_group_list, &hs->peer_supported_group_list)) { + return false; } - assert(CBS_len(&supported_group_list) == 0); - hs->peer_supported_group_list_len = num_groups; - - return 1; - -err: - OPENSSL_free(hs->peer_supported_group_list); - hs->peer_supported_group_list = NULL; - *out_alert = SSL_AD_INTERNAL_ERROR; - return 0; + return true; } -static int ext_supported_groups_add_serverhello(SSL_HANDSHAKE *hs, CBB *out) { - /* Servers don't echo this extension. */ - return 1; -} - -/* kExtensions contains all the supported extensions. */ +// kExtensions contains all the supported extensions. static const struct tls_extension kExtensions[] = { { TLSEXT_TYPE_renegotiate, @@ -2564,7 +2421,7 @@ static const struct tls_extension kExtensions[] = { NULL, ext_ticket_add_clienthello, ext_ticket_parse_serverhello, - /* Ticket extension client parsing is handled in ssl_session.c */ + // Ticket extension client parsing is handled in ssl_session.c ignore_parse_clienthello, ext_ticket_add_serverhello, }, @@ -2605,7 +2462,7 @@ static const struct tls_extension kExtensions[] = { NULL, ext_alpn_add_clienthello, ext_alpn_parse_serverhello, - /* ALPN is negotiated late in |ssl_negotiate_alpn|. */ + // ALPN is negotiated late in |ssl_negotiate_alpn|. ignore_parse_clienthello, ext_alpn_add_serverhello, }, @@ -2653,9 +2510,9 @@ static const struct tls_extension kExtensions[] = { TLSEXT_TYPE_early_data, NULL, ext_early_data_add_clienthello, - forbid_parse_serverhello, + ext_early_data_parse_serverhello, ext_early_data_parse_clienthello, - dont_add_serverhello, + ext_early_data_add_serverhello, }, { TLSEXT_TYPE_supported_versions, @@ -2673,35 +2530,27 @@ static const struct tls_extension kExtensions[] = { ignore_parse_clienthello, dont_add_serverhello, }, - { - TLSEXT_TYPE_short_header, - NULL, - ext_short_header_add_clienthello, - forbid_parse_serverhello, - ext_short_header_parse_clienthello, - dont_add_serverhello, - }, - /* The final extension must be non-empty. WebSphere Application Server 7.0 is - * intolerant to the last extension being zero-length. See - * https://crbug.com/363583. */ + // The final extension must be non-empty. WebSphere Application Server 7.0 is + // intolerant to the last extension being zero-length. See + // https://crbug.com/363583. { TLSEXT_TYPE_supported_groups, NULL, ext_supported_groups_add_clienthello, ext_supported_groups_parse_serverhello, ext_supported_groups_parse_clienthello, - ext_supported_groups_add_serverhello, + dont_add_serverhello, }, }; #define kNumExtensions (sizeof(kExtensions) / sizeof(struct tls_extension)) -OPENSSL_COMPILE_ASSERT(kNumExtensions <= - sizeof(((SSL_HANDSHAKE *)NULL)->extensions.sent) * 8, - too_many_extensions_for_sent_bitset); -OPENSSL_COMPILE_ASSERT( - kNumExtensions <= sizeof(((SSL_HANDSHAKE *)NULL)->extensions.received) * 8, - too_many_extensions_for_received_bitset); +static_assert(kNumExtensions <= + sizeof(((SSL_HANDSHAKE *)NULL)->extensions.sent) * 8, + "too many extensions for sent bitset"); +static_assert(kNumExtensions <= + sizeof(((SSL_HANDSHAKE *)NULL)->extensions.received) * 8, + "too many extensions for received bitset"); static const struct tls_extension *tls_extension_find(uint32_t *out_index, uint16_t value) { @@ -2716,15 +2565,9 @@ static const struct tls_extension *tls_extension_find(uint32_t *out_index, return NULL; } -int SSL_extension_supported(unsigned extension_value) { - uint32_t index; - return extension_value == TLSEXT_TYPE_padding || - tls_extension_find(&index, extension_value) != NULL; -} - int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { SSL *const ssl = hs->ssl; - /* Don't add extensions for SSLv3 unless doing secure renegotiation. */ + // Don't add extensions for SSLv3 unless doing secure renegotiation. if (hs->client_version == SSL3_VERSION && !ssl->s3->send_connection_binding) { return 1; @@ -2732,7 +2575,8 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { CBB extensions; if (!CBB_add_u16_length_prefixed(out, &extensions)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } hs->extensions.sent = 0; @@ -2746,11 +2590,12 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { uint16_t grease_ext1 = 0; if (ssl->ctx->grease_enabled) { - /* Add a fake empty extension. See draft-davidben-tls-grease-01. */ + // Add a fake empty extension. See draft-davidben-tls-grease-01. grease_ext1 = ssl_get_grease_value(ssl, ssl_grease_extension1); if (!CBB_add_u16(&extensions, grease_ext1) || !CBB_add_u16(&extensions, 0 /* zero length */)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } } @@ -2759,7 +2604,7 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { if (!kExtensions[i].add_clienthello(hs, &extensions)) { OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_ADDING_EXTENSION); ERR_add_error_dataf("extension %u", (unsigned)kExtensions[i].value); - goto err; + return 0; } if (CBB_len(&extensions) != len_before) { @@ -2768,16 +2613,17 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { } if (!custom_ext_add_clienthello(hs, &extensions)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } if (ssl->ctx->grease_enabled) { - /* Add a fake non-empty extension. See draft-davidben-tls-grease-01. */ + // Add a fake non-empty extension. See draft-davidben-tls-grease-01. uint16_t grease_ext2 = ssl_get_grease_value(ssl, ssl_grease_extension2); - /* The two fake extensions must not have the same value. GREASE values are - * of the form 0x1a1a, 0x2a2a, 0x3a3a, etc., so XOR to generate a different - * one. */ + // The two fake extensions must not have the same value. GREASE values are + // of the form 0x1a1a, 0x2a2a, 0x3a3a, etc., so XOR to generate a different + // one. if (grease_ext1 == grease_ext2) { grease_ext2 ^= 0x1010; } @@ -2785,7 +2631,8 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { if (!CBB_add_u16(&extensions, grease_ext2) || !CBB_add_u16(&extensions, 1 /* one byte length */) || !CBB_add_u8(&extensions, 0 /* single zero byte as contents */)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } } @@ -2793,15 +2640,15 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { size_t psk_extension_len = ext_pre_shared_key_clienthello_length(hs); header_len += 2 + CBB_len(&extensions) + psk_extension_len; if (header_len > 0xff && header_len < 0x200) { - /* Add padding to workaround bugs in F5 terminators. See RFC 7685. - * - * NB: because this code works out the length of all existing extensions - * it MUST always appear last. */ + // Add padding to workaround bugs in F5 terminators. See RFC 7685. + // + // NB: because this code works out the length of all existing extensions + // it MUST always appear last. size_t padding_len = 0x200 - header_len; - /* Extensions take at least four bytes to encode. Always include at least - * one byte of data if including the extension. WebSphere Application - * Server 7.0 is intolerant to the last extension being zero-length. See - * https://crbug.com/363583. */ + // Extensions take at least four bytes to encode. Always include at least + // one byte of data if including the extension. WebSphere Application + // Server 7.0 is intolerant to the last extension being zero-length. See + // https://crbug.com/363583. if (padding_len >= 4 + 1) { padding_len -= 4; } else { @@ -2812,28 +2659,26 @@ int ssl_add_clienthello_tlsext(SSL_HANDSHAKE *hs, CBB *out, size_t header_len) { if (!CBB_add_u16(&extensions, TLSEXT_TYPE_padding) || !CBB_add_u16(&extensions, padding_len) || !CBB_add_space(&extensions, &padding_bytes, padding_len)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } OPENSSL_memset(padding_bytes, 0, padding_len); } } - /* The PSK extension must be last, including after the padding. */ + // The PSK extension must be last, including after the padding. if (!ext_pre_shared_key_add_clienthello(hs, &extensions)) { - goto err; + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; } - /* Discard empty extensions blocks. */ + // Discard empty extensions blocks. if (CBB_len(&extensions) == 0) { CBB_discard_child(out); } return CBB_flush(out); - -err: - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; } int ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out) { @@ -2845,7 +2690,7 @@ int ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out) { for (unsigned i = 0; i < kNumExtensions; i++) { if (!(hs->extensions.received & (1u << i))) { - /* Don't send extensions that were not received. */ + // Don't send extensions that were not received. continue; } @@ -2860,8 +2705,8 @@ int ssl_add_serverhello_tlsext(SSL_HANDSHAKE *hs, CBB *out) { goto err; } - /* Discard empty extensions blocks before TLS 1.3. */ - if (ssl3_protocol_version(ssl) < TLS1_3_VERSION && + // Discard empty extensions blocks before TLS 1.3. + if (ssl_protocol_version(ssl) < TLS1_3_VERSION && CBB_len(&extensions) == 0) { CBB_discard_child(out); } @@ -2885,22 +2730,21 @@ static int ssl_scan_clienthello_tlsext(SSL_HANDSHAKE *hs, hs->extensions.received = 0; hs->custom_extensions.received = 0; - CBS extensions; CBS_init(&extensions, client_hello->extensions, client_hello->extensions_len); while (CBS_len(&extensions) != 0) { uint16_t type; CBS extension; - /* Decode the next extension. */ + // Decode the next extension. if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { *out_alert = SSL_AD_DECODE_ERROR; return 0; } - /* RFC 5746 made the existence of extensions in SSL 3.0 somewhat - * ambiguous. Ignore all but the renegotiation_info extension. */ + // RFC 5746 made the existence of extensions in SSL 3.0 somewhat + // ambiguous. Ignore all but the renegotiation_info extension. if (ssl->version == SSL3_VERSION && type != TLSEXT_TYPE_renegotiate) { continue; } @@ -2937,16 +2781,16 @@ static int ssl_scan_clienthello_tlsext(SSL_HANDSHAKE *hs, if (kExtensions[i].value == TLSEXT_TYPE_renegotiate && ssl_client_cipher_list_contains_cipher(client_hello, SSL3_CK_SCSV & 0xffff)) { - /* The renegotiation SCSV was received so pretend that we received a - * renegotiation extension. */ + // The renegotiation SCSV was received so pretend that we received a + // renegotiation extension. CBS_init(&fake_contents, kFakeRenegotiateExtension, sizeof(kFakeRenegotiateExtension)); contents = &fake_contents; hs->extensions.received |= (1u << i); } - /* Extension wasn't observed so call the callback with a NULL - * parameter. */ + // Extension wasn't observed so call the callback with a NULL + // parameter. uint8_t alert = SSL_AD_DECODE_ERROR; if (!kExtensions[i].parse_clienthello(hs, &alert, contents)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); @@ -2964,7 +2808,7 @@ int ssl_parse_clienthello_tlsext(SSL_HANDSHAKE *hs, SSL *const ssl = hs->ssl; int alert = SSL_AD_DECODE_ERROR; if (ssl_scan_clienthello_tlsext(hs, client_hello, &alert) <= 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return 0; } @@ -2979,12 +2823,12 @@ int ssl_parse_clienthello_tlsext(SSL_HANDSHAKE *hs, static int ssl_scan_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs, int *out_alert) { SSL *const ssl = hs->ssl; - /* Before TLS 1.3, ServerHello extensions blocks may be omitted if empty. */ - if (CBS_len(cbs) == 0 && ssl3_protocol_version(ssl) < TLS1_3_VERSION) { + // Before TLS 1.3, ServerHello extensions blocks may be omitted if empty. + if (CBS_len(cbs) == 0 && ssl_protocol_version(ssl) < TLS1_3_VERSION) { return 1; } - /* Decode the extensions block and check it is valid. */ + // Decode the extensions block and check it is valid. CBS extensions; if (!CBS_get_u16_length_prefixed(cbs, &extensions) || !tls1_check_duplicate_extensions(&extensions)) { @@ -2997,7 +2841,7 @@ static int ssl_scan_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs, uint16_t type; CBS extension; - /* Decode the next extension. */ + // Decode the next extension. if (!CBS_get_u16(&extensions, &type) || !CBS_get_u16_length_prefixed(&extensions, &extension)) { *out_alert = SSL_AD_DECODE_ERROR; @@ -3009,19 +2853,20 @@ static int ssl_scan_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs, tls_extension_find(&ext_index, type); if (ext == NULL) { + hs->received_custom_extension = true; if (!custom_ext_parse_serverhello(hs, out_alert, type, &extension)) { return 0; } continue; } - OPENSSL_COMPILE_ASSERT(kNumExtensions <= sizeof(hs->extensions.sent) * 8, - too_many_bits); + static_assert(kNumExtensions <= sizeof(hs->extensions.sent) * 8, + "too many bits"); if (!(hs->extensions.sent & (1u << ext_index)) && type != TLSEXT_TYPE_renegotiate) { - /* If the extension was never sent then it is illegal, except for the - * renegotiation extension which, in SSL 3.0, is signaled via SCSV. */ + // If the extension was never sent then it is illegal, except for the + // renegotiation extension which, in SSL 3.0, is signaled via SCSV. OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); ERR_add_error_dataf("extension :%u", (unsigned)type); *out_alert = SSL_AD_UNSUPPORTED_EXTENSION; @@ -3041,8 +2886,8 @@ static int ssl_scan_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs, for (size_t i = 0; i < kNumExtensions; i++) { if (!(received & (1u << i))) { - /* Extension wasn't observed so call the callback with a NULL - * parameter. */ + // Extension wasn't observed so call the callback with a NULL + // parameter. uint8_t alert = SSL_AD_DECODE_ERROR; if (!kExtensions[i].parse_serverhello(hs, &alert, NULL)) { OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_EXTENSION); @@ -3064,18 +2909,18 @@ static int ssl_check_clienthello_tlsext(SSL_HANDSHAKE *hs) { if (ssl->ctx->tlsext_servername_callback != 0) { ret = ssl->ctx->tlsext_servername_callback(ssl, &al, ssl->ctx->tlsext_servername_arg); - } else if (ssl->initial_ctx->tlsext_servername_callback != 0) { - ret = ssl->initial_ctx->tlsext_servername_callback( - ssl, &al, ssl->initial_ctx->tlsext_servername_arg); + } else if (ssl->session_ctx->tlsext_servername_callback != 0) { + ret = ssl->session_ctx->tlsext_servername_callback( + ssl, &al, ssl->session_ctx->tlsext_servername_arg); } switch (ret) { case SSL_TLSEXT_ERR_ALERT_FATAL: - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); + ssl_send_alert(ssl, SSL3_AL_FATAL, al); return -1; case SSL_TLSEXT_ERR_NOACK: - hs->should_ack_sni = 0; + hs->should_ack_sni = false; return 1; default: @@ -3087,235 +2932,273 @@ int ssl_parse_serverhello_tlsext(SSL_HANDSHAKE *hs, CBS *cbs) { SSL *const ssl = hs->ssl; int alert = SSL_AD_DECODE_ERROR; if (ssl_scan_serverhello_tlsext(hs, cbs, &alert) <= 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); return 0; } return 1; } -int tls_process_ticket(SSL *ssl, SSL_SESSION **out_session, - int *out_renew_ticket, const uint8_t *ticket, - size_t ticket_len, const uint8_t *session_id, - size_t session_id_len) { - int ret = 1; /* Most errors are non-fatal. */ - SSL_CTX *ssl_ctx = ssl->initial_ctx; - uint8_t *plaintext = NULL; - - HMAC_CTX hmac_ctx; - HMAC_CTX_init(&hmac_ctx); - EVP_CIPHER_CTX cipher_ctx; - EVP_CIPHER_CTX_init(&cipher_ctx); - - *out_renew_ticket = 0; - *out_session = NULL; - - if (SSL_get_options(ssl) & SSL_OP_NO_TICKET) { - goto done; - } - - if (session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { - goto done; - } - - /* Ensure there is room for the key name and the largest IV - * |tlsext_ticket_key_cb| may try to consume. The real limit may be lower, but - * the maximum IV length should be well under the minimum size for the - * session material and HMAC. */ - if (ticket_len < SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH) { - goto done; - } - const uint8_t *iv = ticket + SSL_TICKET_KEY_NAME_LEN; - - if (ssl_ctx->tlsext_ticket_key_cb != NULL) { - int cb_ret = ssl_ctx->tlsext_ticket_key_cb( - ssl, (uint8_t *)ticket /* name */, (uint8_t *)iv, &cipher_ctx, - &hmac_ctx, 0 /* decrypt */); - if (cb_ret < 0) { - ret = 0; - goto done; - } - if (cb_ret == 0) { - goto done; - } - if (cb_ret == 2) { - *out_renew_ticket = 1; - } - } else { - /* Check the key name matches. */ - if (OPENSSL_memcmp(ticket, ssl_ctx->tlsext_tick_key_name, - SSL_TICKET_KEY_NAME_LEN) != 0) { - goto done; - } - if (!HMAC_Init_ex(&hmac_ctx, ssl_ctx->tlsext_tick_hmac_key, - sizeof(ssl_ctx->tlsext_tick_hmac_key), tlsext_tick_md(), - NULL) || - !EVP_DecryptInit_ex(&cipher_ctx, EVP_aes_128_cbc(), NULL, - ssl_ctx->tlsext_tick_aes_key, iv)) { - ret = 0; - goto done; - } - } - size_t iv_len = EVP_CIPHER_CTX_iv_length(&cipher_ctx); +static enum ssl_ticket_aead_result_t decrypt_ticket_with_cipher_ctx( + uint8_t **out, size_t *out_len, EVP_CIPHER_CTX *cipher_ctx, + HMAC_CTX *hmac_ctx, const uint8_t *ticket, size_t ticket_len) { + size_t iv_len = EVP_CIPHER_CTX_iv_length(cipher_ctx); - /* Check the MAC at the end of the ticket. */ + // Check the MAC at the end of the ticket. uint8_t mac[EVP_MAX_MD_SIZE]; - size_t mac_len = HMAC_size(&hmac_ctx); + size_t mac_len = HMAC_size(hmac_ctx); if (ticket_len < SSL_TICKET_KEY_NAME_LEN + iv_len + 1 + mac_len) { - /* The ticket must be large enough for key name, IV, data, and MAC. */ - goto done; + // The ticket must be large enough for key name, IV, data, and MAC. + return ssl_ticket_aead_ignore_ticket; } - HMAC_Update(&hmac_ctx, ticket, ticket_len - mac_len); - HMAC_Final(&hmac_ctx, mac, NULL); + HMAC_Update(hmac_ctx, ticket, ticket_len - mac_len); + HMAC_Final(hmac_ctx, mac, NULL); int mac_ok = CRYPTO_memcmp(mac, ticket + (ticket_len - mac_len), mac_len) == 0; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) mac_ok = 1; #endif if (!mac_ok) { - goto done; + return ssl_ticket_aead_ignore_ticket; } - /* Decrypt the session data. */ + // Decrypt the session data. const uint8_t *ciphertext = ticket + SSL_TICKET_KEY_NAME_LEN + iv_len; size_t ciphertext_len = ticket_len - SSL_TICKET_KEY_NAME_LEN - iv_len - mac_len; - plaintext = OPENSSL_malloc(ciphertext_len); - if (plaintext == NULL) { - ret = 0; - goto done; + UniquePtr plaintext((uint8_t *)OPENSSL_malloc(ciphertext_len)); + if (!plaintext) { + return ssl_ticket_aead_error; } size_t plaintext_len; #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - OPENSSL_memcpy(plaintext, ciphertext, ciphertext_len); + OPENSSL_memcpy(plaintext.get(), ciphertext, ciphertext_len); plaintext_len = ciphertext_len; #else if (ciphertext_len >= INT_MAX) { - goto done; + return ssl_ticket_aead_ignore_ticket; } int len1, len2; - if (!EVP_DecryptUpdate(&cipher_ctx, plaintext, &len1, ciphertext, + if (!EVP_DecryptUpdate(cipher_ctx, plaintext.get(), &len1, ciphertext, (int)ciphertext_len) || - !EVP_DecryptFinal_ex(&cipher_ctx, plaintext + len1, &len2)) { - ERR_clear_error(); /* Don't leave an error on the queue. */ - goto done; + !EVP_DecryptFinal_ex(cipher_ctx, plaintext.get() + len1, &len2)) { + ERR_clear_error(); + return ssl_ticket_aead_ignore_ticket; } - plaintext_len = (size_t)(len1 + len2); + plaintext_len = (size_t)(len1) + len2; #endif - /* Decode the session. */ - SSL_SESSION *session = - SSL_SESSION_from_bytes(plaintext, plaintext_len, ssl->ctx); - if (session == NULL) { - ERR_clear_error(); /* Don't leave an error on the queue. */ - goto done; + *out = plaintext.release(); + *out_len = plaintext_len; + return ssl_ticket_aead_success; +} + +static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_cb( + SSL *ssl, uint8_t **out, size_t *out_len, bool *out_renew_ticket, + const uint8_t *ticket, size_t ticket_len) { + assert(ticket_len >= SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH); + ScopedEVP_CIPHER_CTX cipher_ctx; + ScopedHMAC_CTX hmac_ctx; + const uint8_t *iv = ticket + SSL_TICKET_KEY_NAME_LEN; + int cb_ret = ssl->session_ctx->tlsext_ticket_key_cb( + ssl, (uint8_t *)ticket /* name */, (uint8_t *)iv, cipher_ctx.get(), + hmac_ctx.get(), 0 /* decrypt */); + if (cb_ret < 0) { + return ssl_ticket_aead_error; + } else if (cb_ret == 0) { + return ssl_ticket_aead_ignore_ticket; + } else if (cb_ret == 2) { + *out_renew_ticket = true; + } else { + assert(cb_ret == 1); } + return decrypt_ticket_with_cipher_ctx(out, out_len, cipher_ctx.get(), + hmac_ctx.get(), ticket, ticket_len); +} - /* Copy the client's session ID into the new session, to denote the ticket has - * been accepted. */ - OPENSSL_memcpy(session->session_id, session_id, session_id_len); - session->session_id_length = session_id_len; +static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_ticket_keys( + SSL *ssl, uint8_t **out, size_t *out_len, const uint8_t *ticket, + size_t ticket_len) { + assert(ticket_len >= SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH); + SSL_CTX *ctx = ssl->session_ctx; + + // Rotate the ticket key if necessary. + if (!ssl_ctx_rotate_ticket_encryption_key(ctx)) { + return ssl_ticket_aead_error; + } - *out_session = session; + // Pick the matching ticket key and decrypt. + ScopedEVP_CIPHER_CTX cipher_ctx; + ScopedHMAC_CTX hmac_ctx; + { + MutexReadLock lock(&ctx->lock); + const tlsext_ticket_key *key; + if (ctx->tlsext_ticket_key_current && + !OPENSSL_memcmp(ctx->tlsext_ticket_key_current->name, ticket, + SSL_TICKET_KEY_NAME_LEN)) { + key = ctx->tlsext_ticket_key_current; + } else if (ctx->tlsext_ticket_key_prev && + !OPENSSL_memcmp(ctx->tlsext_ticket_key_prev->name, ticket, + SSL_TICKET_KEY_NAME_LEN)) { + key = ctx->tlsext_ticket_key_prev; + } else { + return ssl_ticket_aead_ignore_ticket; + } + const uint8_t *iv = ticket + SSL_TICKET_KEY_NAME_LEN; + if (!HMAC_Init_ex(hmac_ctx.get(), key->hmac_key, sizeof(key->hmac_key), + tlsext_tick_md(), NULL) || + !EVP_DecryptInit_ex(cipher_ctx.get(), EVP_aes_128_cbc(), NULL, + key->aes_key, iv)) { + return ssl_ticket_aead_error; + } + } + return decrypt_ticket_with_cipher_ctx(out, out_len, cipher_ctx.get(), + hmac_ctx.get(), ticket, ticket_len); +} + +static enum ssl_ticket_aead_result_t ssl_decrypt_ticket_with_method( + SSL *ssl, uint8_t **out, size_t *out_len, bool *out_renew_ticket, + const uint8_t *ticket, size_t ticket_len) { + uint8_t *plaintext = (uint8_t *)OPENSSL_malloc(ticket_len); + if (plaintext == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return ssl_ticket_aead_error; + } + + size_t plaintext_len; + const enum ssl_ticket_aead_result_t result = + ssl->session_ctx->ticket_aead_method->open( + ssl, plaintext, &plaintext_len, ticket_len, ticket, ticket_len); + + if (result == ssl_ticket_aead_success) { + *out = plaintext; + plaintext = NULL; + *out_len = plaintext_len; + } -done: OPENSSL_free(plaintext); - HMAC_CTX_cleanup(&hmac_ctx); - EVP_CIPHER_CTX_cleanup(&cipher_ctx); - return ret; + return result; } -int tls1_parse_peer_sigalgs(SSL_HANDSHAKE *hs, const CBS *in_sigalgs) { - /* Extension ignored for inappropriate versions */ - if (ssl3_protocol_version(hs->ssl) < TLS1_2_VERSION) { - return 1; +enum ssl_ticket_aead_result_t ssl_process_ticket( + SSL *ssl, UniquePtr *out_session, bool *out_renew_ticket, + const uint8_t *ticket, size_t ticket_len, const uint8_t *session_id, + size_t session_id_len) { + *out_renew_ticket = false; + out_session->reset(); + + if ((SSL_get_options(ssl) & SSL_OP_NO_TICKET) || + session_id_len > SSL_MAX_SSL_SESSION_ID_LENGTH) { + return ssl_ticket_aead_ignore_ticket; } - OPENSSL_free(hs->peer_sigalgs); - hs->peer_sigalgs = NULL; - hs->num_peer_sigalgs = 0; + uint8_t *plaintext = NULL; + size_t plaintext_len; + enum ssl_ticket_aead_result_t result; + if (ssl->session_ctx->ticket_aead_method != NULL) { + result = ssl_decrypt_ticket_with_method( + ssl, &plaintext, &plaintext_len, out_renew_ticket, ticket, ticket_len); + } else { + // Ensure there is room for the key name and the largest IV + // |tlsext_ticket_key_cb| may try to consume. The real limit may be lower, + // but the maximum IV length should be well under the minimum size for the + // session material and HMAC. + if (ticket_len < SSL_TICKET_KEY_NAME_LEN + EVP_MAX_IV_LENGTH) { + return ssl_ticket_aead_ignore_ticket; + } + if (ssl->session_ctx->tlsext_ticket_key_cb != NULL) { + result = ssl_decrypt_ticket_with_cb(ssl, &plaintext, &plaintext_len, + out_renew_ticket, ticket, ticket_len); + } else { + result = ssl_decrypt_ticket_with_ticket_keys( + ssl, &plaintext, &plaintext_len, ticket, ticket_len); + } + } - size_t num_sigalgs = CBS_len(in_sigalgs); - if (num_sigalgs % 2 != 0) { - return 0; + if (result != ssl_ticket_aead_success) { + return result; } - num_sigalgs /= 2; - /* supported_signature_algorithms in the certificate request is - * allowed to be empty. */ - if (num_sigalgs == 0) { - return 1; + // Decode the session. + UniquePtr session( + SSL_SESSION_from_bytes(plaintext, plaintext_len, ssl->ctx)); + OPENSSL_free(plaintext); + + if (!session) { + ERR_clear_error(); // Don't leave an error on the queue. + return ssl_ticket_aead_ignore_ticket; } - /* This multiplication doesn't overflow because sizeof(uint16_t) is two - * and we just divided |num_sigalgs| by two. */ - hs->peer_sigalgs = OPENSSL_malloc(num_sigalgs * sizeof(uint16_t)); - if (hs->peer_sigalgs == NULL) { - return 0; + // Copy the client's session ID into the new session, to denote the ticket has + // been accepted. + OPENSSL_memcpy(session->session_id, session_id, session_id_len); + session->session_id_length = session_id_len; + + *out_session = std::move(session); + return ssl_ticket_aead_success; +} + +int tls1_parse_peer_sigalgs(SSL_HANDSHAKE *hs, const CBS *in_sigalgs) { + // Extension ignored for inappropriate versions + if (ssl_protocol_version(hs->ssl) < TLS1_2_VERSION) { + return 1; } - hs->num_peer_sigalgs = num_sigalgs; - CBS sigalgs; - CBS_init(&sigalgs, CBS_data(in_sigalgs), CBS_len(in_sigalgs)); - for (size_t i = 0; i < num_sigalgs; i++) { - if (!CBS_get_u16(&sigalgs, &hs->peer_sigalgs[i])) { + return parse_u16_array(in_sigalgs, &hs->peer_sigalgs); +} + +int tls1_get_legacy_signature_algorithm(uint16_t *out, const EVP_PKEY *pkey) { + switch (EVP_PKEY_id(pkey)) { + case EVP_PKEY_RSA: + *out = SSL_SIGN_RSA_PKCS1_MD5_SHA1; + return 1; + case EVP_PKEY_EC: + *out = SSL_SIGN_ECDSA_SHA1; + return 1; + default: return 0; - } } - - return 1; } int tls1_choose_signature_algorithm(SSL_HANDSHAKE *hs, uint16_t *out) { SSL *const ssl = hs->ssl; CERT *cert = ssl->cert; - /* Before TLS 1.2, the signature algorithm isn't negotiated as part of the - * handshake. It is fixed at MD5-SHA1 for RSA and SHA1 for ECDSA. */ - if (ssl3_protocol_version(ssl) < TLS1_2_VERSION) { - int type = ssl_private_key_type(ssl); - if (type == NID_rsaEncryption) { - *out = SSL_SIGN_RSA_PKCS1_MD5_SHA1; - return 1; - } - if (ssl_is_ecdsa_key_type(type)) { - *out = SSL_SIGN_ECDSA_SHA1; - return 1; + // Before TLS 1.2, the signature algorithm isn't negotiated as part of the + // handshake. + if (ssl_protocol_version(ssl) < TLS1_2_VERSION) { + if (!tls1_get_legacy_signature_algorithm(out, hs->local_pubkey.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS); + return 0; } - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_COMMON_SIGNATURE_ALGORITHMS); - return 0; + return 1; } - const uint16_t *sigalgs = cert->sigalgs; - size_t num_sigalgs = cert->num_sigalgs; - if (sigalgs == NULL) { - sigalgs = kSignSignatureAlgorithms; - num_sigalgs = OPENSSL_ARRAY_SIZE(kSignSignatureAlgorithms); + Span sigalgs = kSignSignatureAlgorithms; + if (cert->sigalgs != nullptr) { + sigalgs = MakeConstSpan(cert->sigalgs, cert->num_sigalgs); } - const uint16_t *peer_sigalgs = hs->peer_sigalgs; - size_t num_peer_sigalgs = hs->num_peer_sigalgs; - if (num_peer_sigalgs == 0 && ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - /* If the client didn't specify any signature_algorithms extension then - * we can assume that it supports SHA1. See - * http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 */ + Span peer_sigalgs = hs->peer_sigalgs; + if (peer_sigalgs.empty() && ssl_protocol_version(ssl) < TLS1_3_VERSION) { + // If the client didn't specify any signature_algorithms extension then + // we can assume that it supports SHA1. See + // http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 static const uint16_t kDefaultPeerAlgorithms[] = {SSL_SIGN_RSA_PKCS1_SHA1, SSL_SIGN_ECDSA_SHA1}; peer_sigalgs = kDefaultPeerAlgorithms; - num_peer_sigalgs = OPENSSL_ARRAY_SIZE(kDefaultPeerAlgorithms); } - for (size_t i = 0; i < num_sigalgs; i++) { - uint16_t sigalg = sigalgs[i]; - /* SSL_SIGN_RSA_PKCS1_MD5_SHA1 is an internal value and should never be - * negotiated. */ + for (uint16_t sigalg : sigalgs) { + // SSL_SIGN_RSA_PKCS1_MD5_SHA1 is an internal value and should never be + // negotiated. if (sigalg == SSL_SIGN_RSA_PKCS1_MD5_SHA1 || - !ssl_private_key_supports_signature_algorithm(ssl, sigalgs[i])) { + !ssl_private_key_supports_signature_algorithm(hs, sigalg)) { continue; } - for (size_t j = 0; j < num_peer_sigalgs; j++) { - if (sigalg == peer_sigalgs[j]) { + for (uint16_t peer_sigalg : peer_sigalgs) { + if (sigalg == peer_sigalg) { *out = sigalg; return 1; } @@ -3326,157 +3209,124 @@ int tls1_choose_signature_algorithm(SSL_HANDSHAKE *hs, uint16_t *out) { return 0; } -int tls1_verify_channel_id(SSL_HANDSHAKE *hs) { +int tls1_verify_channel_id(SSL_HANDSHAKE *hs, const SSLMessage &msg) { SSL *const ssl = hs->ssl; - int ret = 0; + // A Channel ID handshake message is structured to contain multiple + // extensions, but the only one that can be present is Channel ID. uint16_t extension_type; - CBS extension, channel_id; - - /* A Channel ID handshake message is structured to contain multiple - * extensions, but the only one that can be present is Channel ID. */ - CBS_init(&channel_id, ssl->init_msg, ssl->init_num); + CBS channel_id = msg.body, extension; if (!CBS_get_u16(&channel_id, &extension_type) || !CBS_get_u16_length_prefixed(&channel_id, &extension) || CBS_len(&channel_id) != 0 || extension_type != TLSEXT_TYPE_channel_id || CBS_len(&extension) != TLSEXT_CHANNEL_ID_SIZE) { OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); return 0; } - EC_GROUP *p256 = EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1); + UniquePtr p256(EC_GROUP_new_by_curve_name(NID_X9_62_prime256v1)); if (!p256) { OPENSSL_PUT_ERROR(SSL, SSL_R_NO_P256_SUPPORT); return 0; } - EC_KEY *key = NULL; - EC_POINT *point = NULL; - BIGNUM x, y; - ECDSA_SIG sig; - BN_init(&x); - BN_init(&y); - sig.r = BN_new(); - sig.s = BN_new(); - if (sig.r == NULL || sig.s == NULL) { - goto err; + UniquePtr sig(ECDSA_SIG_new()); + UniquePtr x(BN_new()), y(BN_new()); + if (!sig || !x || !y) { + return 0; } const uint8_t *p = CBS_data(&extension); - if (BN_bin2bn(p + 0, 32, &x) == NULL || - BN_bin2bn(p + 32, 32, &y) == NULL || - BN_bin2bn(p + 64, 32, sig.r) == NULL || - BN_bin2bn(p + 96, 32, sig.s) == NULL) { - goto err; - } - - point = EC_POINT_new(p256); - if (point == NULL || - !EC_POINT_set_affine_coordinates_GFp(p256, point, &x, &y, NULL)) { - goto err; + if (BN_bin2bn(p + 0, 32, x.get()) == NULL || + BN_bin2bn(p + 32, 32, y.get()) == NULL || + BN_bin2bn(p + 64, 32, sig->r) == NULL || + BN_bin2bn(p + 96, 32, sig->s) == NULL) { + return 0; } - key = EC_KEY_new(); - if (key == NULL || - !EC_KEY_set_group(key, p256) || - !EC_KEY_set_public_key(key, point)) { - goto err; + UniquePtr key(EC_KEY_new()); + UniquePtr point(EC_POINT_new(p256.get())); + if (!key || !point || + !EC_POINT_set_affine_coordinates_GFp(p256.get(), point.get(), x.get(), + y.get(), nullptr) || + !EC_KEY_set_group(key.get(), p256.get()) || + !EC_KEY_set_public_key(key.get(), point.get())) { + return 0; } uint8_t digest[EVP_MAX_MD_SIZE]; size_t digest_len; if (!tls1_channel_id_hash(hs, digest, &digest_len)) { - goto err; + return 0; } - int sig_ok = ECDSA_do_verify(digest, digest_len, &sig, key); + int sig_ok = ECDSA_do_verify(digest, digest_len, sig.get(), key.get()); #if defined(BORINGSSL_UNSAFE_FUZZER_MODE) sig_ok = 1; #endif if (!sig_ok) { OPENSSL_PUT_ERROR(SSL, SSL_R_CHANNEL_ID_SIGNATURE_INVALID); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); - ssl->s3->tlsext_channel_id_valid = 0; - goto err; + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + ssl->s3->tlsext_channel_id_valid = false; + return 0; } OPENSSL_memcpy(ssl->s3->tlsext_channel_id, p, 64); - ret = 1; - -err: - BN_free(&x); - BN_free(&y); - BN_free(sig.r); - BN_free(sig.s); - EC_KEY_free(key); - EC_POINT_free(point); - EC_GROUP_free(p256); - return ret; + return 1; } -int tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb) { +bool tls1_write_channel_id(SSL_HANDSHAKE *hs, CBB *cbb) { SSL *const ssl = hs->ssl; uint8_t digest[EVP_MAX_MD_SIZE]; size_t digest_len; if (!tls1_channel_id_hash(hs, digest, &digest_len)) { - return 0; + return false; } EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(ssl->tlsext_channel_id_private); - if (ec_key == NULL) { + if (ec_key == nullptr) { OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; + return false; } - int ret = 0; - BIGNUM *x = BN_new(); - BIGNUM *y = BN_new(); - ECDSA_SIG *sig = NULL; - if (x == NULL || y == NULL || + UniquePtr x(BN_new()), y(BN_new()); + if (!x || !y || !EC_POINT_get_affine_coordinates_GFp(EC_KEY_get0_group(ec_key), EC_KEY_get0_public_key(ec_key), - x, y, NULL)) { - goto err; + x.get(), y.get(), nullptr)) { + return false; } - sig = ECDSA_do_sign(digest, digest_len, ec_key); - if (sig == NULL) { - goto err; + UniquePtr sig(ECDSA_do_sign(digest, digest_len, ec_key)); + if (!sig) { + return false; } CBB child; if (!CBB_add_u16(cbb, TLSEXT_TYPE_channel_id) || !CBB_add_u16_length_prefixed(cbb, &child) || - !BN_bn2cbb_padded(&child, 32, x) || - !BN_bn2cbb_padded(&child, 32, y) || + !BN_bn2cbb_padded(&child, 32, x.get()) || + !BN_bn2cbb_padded(&child, 32, y.get()) || !BN_bn2cbb_padded(&child, 32, sig->r) || !BN_bn2cbb_padded(&child, 32, sig->s) || !CBB_flush(cbb)) { - goto err; + return false; } - ret = 1; - -err: - BN_free(x); - BN_free(y); - ECDSA_SIG_free(sig); - return ret; + return true; } int tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len) { SSL *const ssl = hs->ssl; - if (ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - uint8_t *msg; - size_t msg_len; - if (!tls13_get_cert_verify_signature_input(hs, &msg, &msg_len, + if (ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + Array msg; + if (!tls13_get_cert_verify_signature_input(hs, &msg, ssl_cert_verify_channel_id)) { return 0; } - SHA256(msg, msg_len, out); + SHA256(msg.data(), msg.size(), out); *out_len = SHA256_DIGEST_LENGTH; - OPENSSL_free(msg); return 1; } @@ -3499,7 +3349,7 @@ int tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len) { uint8_t hs_hash[EVP_MAX_MD_SIZE]; size_t hs_hash_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, hs_hash, &hs_hash_len)) { + if (!hs->transcript.GetHash(hs_hash, &hs_hash_len)) { return 0; } SHA256_Update(&ctx, hs_hash, (size_t)hs_hash_len); @@ -3508,30 +3358,30 @@ int tls1_channel_id_hash(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len) { return 1; } -/* tls1_record_handshake_hashes_for_channel_id records the current handshake - * hashes in |hs->new_session| so that Channel ID resumptions can sign that - * data. */ +// tls1_record_handshake_hashes_for_channel_id records the current handshake +// hashes in |hs->new_session| so that Channel ID resumptions can sign that +// data. int tls1_record_handshake_hashes_for_channel_id(SSL_HANDSHAKE *hs) { SSL *const ssl = hs->ssl; - /* This function should never be called for a resumed session because the - * handshake hashes that we wish to record are for the original, full - * handshake. */ + // This function should never be called for a resumed session because the + // handshake hashes that we wish to record are for the original, full + // handshake. if (ssl->session != NULL) { - return -1; + return 0; } - OPENSSL_COMPILE_ASSERT( + static_assert( sizeof(hs->new_session->original_handshake_hash) == EVP_MAX_MD_SIZE, - original_handshake_hash_is_too_small); + "original_handshake_hash is too small"); size_t digest_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, - hs->new_session->original_handshake_hash, - &digest_len)) { - return -1; + if (!hs->transcript.GetHash(hs->new_session->original_handshake_hash, + &digest_len)) { + return 0; } - OPENSSL_COMPILE_ASSERT(EVP_MAX_MD_SIZE <= 0xff, max_md_size_is_too_large); + static_assert(EVP_MAX_MD_SIZE <= 0xff, + "EVP_MAX_MD_SIZE does not fit in uint8_t"); hs->new_session->original_handshake_hash_len = (uint8_t)digest_len; return 1; @@ -3546,7 +3396,7 @@ int ssl_do_channel_id_callback(SSL *ssl) { EVP_PKEY *key = NULL; ssl->ctx->channel_id_cb(ssl, &key); if (key == NULL) { - /* The caller should try again later. */ + // The caller should try again later. return 1; } @@ -3556,9 +3406,9 @@ int ssl_do_channel_id_callback(SSL *ssl) { } int ssl_is_sct_list_valid(const CBS *contents) { - /* Shallow parse the SCT list for sanity. By the RFC - * (https://tools.ietf.org/html/rfc6962#section-3.3) neither the list nor any - * of the SCTs may be empty. */ + // Shallow parse the SCT list for sanity. By the RFC + // (https://tools.ietf.org/html/rfc6962#section-3.3) neither the list nor any + // of the SCTs may be empty. CBS copy = *contents; CBS sct_list; if (!CBS_get_u16_length_prefixed(©, &sct_list) || @@ -3577,3 +3427,31 @@ int ssl_is_sct_list_valid(const CBS *contents) { return 1; } + +} // namespace bssl + +using namespace bssl; + +int SSL_early_callback_ctx_extension_get(const SSL_CLIENT_HELLO *client_hello, + uint16_t extension_type, + const uint8_t **out_data, + size_t *out_len) { + CBS cbs; + if (!ssl_client_hello_get_extension(client_hello, &cbs, extension_type)) { + return 0; + } + + *out_data = CBS_data(&cbs); + *out_len = CBS_len(&cbs); + return 1; +} + +void SSL_CTX_set_ed25519_enabled(SSL_CTX *ctx, int enabled) { + ctx->ed25519_enabled = !!enabled; +} + +int SSL_extension_supported(unsigned extension_value) { + uint32_t index; + return extension_value == TLSEXT_TYPE_padding || + tls_extension_find(&index, extension_value) != NULL; +} diff --git a/Sources/BoringSSL/ssl/tls13_both.c b/Sources/BoringSSL/ssl/tls13_both.c deleted file mode 100644 index 91cae9ad3..000000000 --- a/Sources/BoringSSL/ssl/tls13_both.c +++ /dev/null @@ -1,634 +0,0 @@ -/* Copyright (c) 2016, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -/* kMaxKeyUpdates is the number of consecutive KeyUpdates that will be - * processed. Without this limit an attacker could force unbounded processing - * without being able to return application data. */ -static const uint8_t kMaxKeyUpdates = 32; - -int tls13_handshake(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - for (;;) { - /* Resolve the operation the handshake was waiting on. */ - switch (hs->wait) { - case ssl_hs_error: - OPENSSL_PUT_ERROR(SSL, SSL_R_SSL_HANDSHAKE_FAILURE); - return -1; - - case ssl_hs_flush: - case ssl_hs_flush_and_read_message: { - int ret = ssl->method->flush_flight(ssl); - if (ret <= 0) { - return ret; - } - if (hs->wait != ssl_hs_flush_and_read_message) { - break; - } - ssl->method->expect_flight(ssl); - hs->wait = ssl_hs_read_message; - /* Fall-through. */ - } - - case ssl_hs_read_message: { - int ret = ssl->method->ssl_get_message(ssl); - if (ret <= 0) { - return ret; - } - break; - } - - case ssl_hs_x509_lookup: - ssl->rwstate = SSL_X509_LOOKUP; - hs->wait = ssl_hs_ok; - return -1; - - case ssl_hs_channel_id_lookup: - ssl->rwstate = SSL_CHANNEL_ID_LOOKUP; - hs->wait = ssl_hs_ok; - return -1; - - case ssl_hs_private_key_operation: - ssl->rwstate = SSL_PRIVATE_KEY_OPERATION; - hs->wait = ssl_hs_ok; - return -1; - - case ssl_hs_ok: - break; - } - - /* Run the state machine again. */ - hs->wait = hs->do_tls13_handshake(hs); - if (hs->wait == ssl_hs_error) { - /* Don't loop around to avoid a stray |SSL_R_SSL_HANDSHAKE_FAILURE| the - * first time around. */ - return -1; - } - if (hs->wait == ssl_hs_ok) { - /* The handshake has completed. */ - return 1; - } - - /* Otherwise, loop to the beginning and resolve what was blocking the - * handshake. */ - } -} - -int tls13_get_cert_verify_signature_input( - SSL_HANDSHAKE *hs, uint8_t **out, size_t *out_len, - enum ssl_cert_verify_context_t cert_verify_context) { - CBB cbb; - if (!CBB_init(&cbb, 64 + 33 + 1 + 2 * EVP_MAX_MD_SIZE)) { - goto err; - } - - for (size_t i = 0; i < 64; i++) { - if (!CBB_add_u8(&cbb, 0x20)) { - goto err; - } - } - - const uint8_t *context; - size_t context_len; - if (cert_verify_context == ssl_cert_verify_server) { - /* Include the NUL byte. */ - static const char kContext[] = "TLS 1.3, server CertificateVerify"; - context = (const uint8_t *)kContext; - context_len = sizeof(kContext); - } else if (cert_verify_context == ssl_cert_verify_client) { - static const char kContext[] = "TLS 1.3, client CertificateVerify"; - context = (const uint8_t *)kContext; - context_len = sizeof(kContext); - } else if (cert_verify_context == ssl_cert_verify_channel_id) { - static const char kContext[] = "TLS 1.3, Channel ID"; - context = (const uint8_t *)kContext; - context_len = sizeof(kContext); - } else { - goto err; - } - - if (!CBB_add_bytes(&cbb, context, context_len)) { - goto err; - } - - uint8_t context_hash[EVP_MAX_MD_SIZE]; - size_t context_hash_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, context_hash, - &context_hash_len) || - !CBB_add_bytes(&cbb, context_hash, context_hash_len) || - !CBB_finish(&cbb, out, out_len)) { - goto err; - } - - return 1; - -err: - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - CBB_cleanup(&cbb); - return 0; -} - -int tls13_process_certificate(SSL_HANDSHAKE *hs, int allow_anonymous) { - SSL *const ssl = hs->ssl; - CBS cbs, context, certificate_list; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u8_length_prefixed(&cbs, &context) || - CBS_len(&context) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return 0; - } - - const int retain_sha256 = - ssl->server && ssl->retain_only_sha256_of_client_certs; - int ret = 0; - - EVP_PKEY *pkey = NULL; - STACK_OF(CRYPTO_BUFFER) *certs = sk_CRYPTO_BUFFER_new_null(); - if (certs == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - if (!CBS_get_u24_length_prefixed(&cbs, &certificate_list)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; - } - - while (CBS_len(&certificate_list) > 0) { - CBS certificate, extensions; - if (!CBS_get_u24_length_prefixed(&certificate_list, &certificate) || - !CBS_get_u16_length_prefixed(&certificate_list, &extensions) || - CBS_len(&certificate) == 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_LENGTH_MISMATCH); - goto err; - } - - if (sk_CRYPTO_BUFFER_num(certs) == 0) { - pkey = ssl_cert_parse_pubkey(&certificate); - if (pkey == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; - } - /* TLS 1.3 always uses certificate keys for signing thus the correct - * keyUsage is enforced. */ - if (!ssl_cert_check_digital_signature_key_usage(&certificate)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - goto err; - } - - if (retain_sha256) { - /* Retain the hash of the leaf certificate if requested. */ - SHA256(CBS_data(&certificate), CBS_len(&certificate), - hs->new_session->peer_sha256); - } - } - - CRYPTO_BUFFER *buf = - CRYPTO_BUFFER_new_from_CBS(&certificate, ssl->ctx->pool); - if (buf == NULL || - !sk_CRYPTO_BUFFER_push(certs, buf)) { - CRYPTO_BUFFER_free(buf); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); - goto err; - } - - /* Parse out the extensions. */ - int have_status_request = 0, have_sct = 0; - CBS status_request, sct; - const SSL_EXTENSION_TYPE ext_types[] = { - {TLSEXT_TYPE_status_request, &have_status_request, &status_request}, - {TLSEXT_TYPE_certificate_timestamp, &have_sct, &sct}, - }; - - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!ssl_parse_extensions(&extensions, &alert, ext_types, - OPENSSL_ARRAY_SIZE(ext_types), - 0 /* reject unknown */)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - goto err; - } - - /* All Certificate extensions are parsed, but only the leaf extensions are - * stored. */ - if (have_status_request) { - if (ssl->server || !ssl->ocsp_stapling_enabled) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); - goto err; - } - - uint8_t status_type; - CBS ocsp_response; - if (!CBS_get_u8(&status_request, &status_type) || - status_type != TLSEXT_STATUSTYPE_ocsp || - !CBS_get_u24_length_prefixed(&status_request, &ocsp_response) || - CBS_len(&ocsp_response) == 0 || - CBS_len(&status_request) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - goto err; - } - - if (sk_CRYPTO_BUFFER_num(certs) == 1 && - !CBS_stow(&ocsp_response, &hs->new_session->ocsp_response, - &hs->new_session->ocsp_response_length)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - } - - if (have_sct) { - if (ssl->server || !ssl->signed_cert_timestamps_enabled) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); - goto err; - } - - if (!ssl_is_sct_list_valid(&sct)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_PARSING_EXTENSION); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - goto err; - } - - if (sk_CRYPTO_BUFFER_num(certs) == 1 && - !CBS_stow( - &sct, &hs->new_session->tlsext_signed_cert_timestamp_list, - &hs->new_session->tlsext_signed_cert_timestamp_list_length)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - } - } - - if (CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - goto err; - } - - EVP_PKEY_free(hs->peer_pubkey); - hs->peer_pubkey = pkey; - pkey = NULL; - - sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); - hs->new_session->certs = certs; - certs = NULL; - - if (!ssl->ctx->x509_method->session_cache_objects(hs->new_session)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - goto err; - } - - if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { - if (!allow_anonymous) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_CERTIFICATE_REQUIRED); - goto err; - } - - /* OpenSSL returns X509_V_OK when no certificates are requested. This is - * classed by them as a bug, but it's assumed by at least NGINX. */ - hs->new_session->verify_result = X509_V_OK; - - /* No certificate, so nothing more to do. */ - ret = 1; - goto err; - } - - hs->new_session->peer_sha256_valid = retain_sha256; - - if (!ssl_verify_cert_chain(ssl, &hs->new_session->verify_result, - hs->new_session->x509_chain)) { - goto err; - } - - ret = 1; - -err: - sk_CRYPTO_BUFFER_pop_free(certs, CRYPTO_BUFFER_free); - EVP_PKEY_free(pkey); - return ret; -} - -int tls13_process_certificate_verify(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - int ret = 0; - uint8_t *msg = NULL; - size_t msg_len; - - if (hs->peer_pubkey == NULL) { - goto err; - } - - CBS cbs, signature; - uint16_t signature_algorithm; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u16(&cbs, &signature_algorithm) || - !CBS_get_u16_length_prefixed(&cbs, &signature) || - CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - goto err; - } - - int al; - if (!tls12_check_peer_sigalg(ssl, &al, signature_algorithm)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, al); - goto err; - } - hs->new_session->peer_signature_algorithm = signature_algorithm; - - if (!tls13_get_cert_verify_signature_input( - hs, &msg, &msg_len, - ssl->server ? ssl_cert_verify_client : ssl_cert_verify_server)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - - int sig_ok = - ssl_public_key_verify(ssl, CBS_data(&signature), CBS_len(&signature), - signature_algorithm, hs->peer_pubkey, msg, msg_len); -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - sig_ok = 1; - ERR_clear_error(); -#endif - if (!sig_ok) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); - goto err; - } - - ret = 1; - -err: - OPENSSL_free(msg); - return ret; -} - -int tls13_process_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - uint8_t verify_data[EVP_MAX_MD_SIZE]; - size_t verify_data_len; - if (!tls13_finished_mac(hs, verify_data, &verify_data_len, !ssl->server)) { - return 0; - } - - int finished_ok = - ssl->init_num == verify_data_len && - CRYPTO_memcmp(verify_data, ssl->init_msg, verify_data_len) == 0; -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - finished_ok = 1; -#endif - if (!finished_ok) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); - return 0; - } - - return 1; -} - -int tls13_add_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, body, certificate_list; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_CERTIFICATE) || - /* The request context is always empty in the handshake. */ - !CBB_add_u8(&body, 0) || - !CBB_add_u24_length_prefixed(&body, &certificate_list)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - - if (!ssl_has_certificate(ssl)) { - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - return 1; - } - - CERT *cert = ssl->cert; - CRYPTO_BUFFER *leaf_buf = sk_CRYPTO_BUFFER_value(cert->chain, 0); - CBB leaf, extensions; - if (!CBB_add_u24_length_prefixed(&certificate_list, &leaf) || - !CBB_add_bytes(&leaf, CRYPTO_BUFFER_data(leaf_buf), - CRYPTO_BUFFER_len(leaf_buf)) || - !CBB_add_u16_length_prefixed(&certificate_list, &extensions)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - - if (hs->scts_requested && ssl->cert->signed_cert_timestamp_list != NULL) { - CBB contents; - if (!CBB_add_u16(&extensions, TLSEXT_TYPE_certificate_timestamp) || - !CBB_add_u16_length_prefixed(&extensions, &contents) || - !CBB_add_bytes( - &contents, - CRYPTO_BUFFER_data(ssl->cert->signed_cert_timestamp_list), - CRYPTO_BUFFER_len(ssl->cert->signed_cert_timestamp_list)) || - !CBB_flush(&extensions)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - } - - if (hs->ocsp_stapling_requested && - ssl->cert->ocsp_response != NULL) { - CBB contents, ocsp_response; - if (!CBB_add_u16(&extensions, TLSEXT_TYPE_status_request) || - !CBB_add_u16_length_prefixed(&extensions, &contents) || - !CBB_add_u8(&contents, TLSEXT_STATUSTYPE_ocsp) || - !CBB_add_u24_length_prefixed(&contents, &ocsp_response) || - !CBB_add_bytes(&ocsp_response, - CRYPTO_BUFFER_data(ssl->cert->ocsp_response), - CRYPTO_BUFFER_len(ssl->cert->ocsp_response)) || - !CBB_flush(&extensions)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - } - - for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(cert->chain); i++) { - CRYPTO_BUFFER *cert_buf = sk_CRYPTO_BUFFER_value(cert->chain, i); - CBB child; - if (!CBB_add_u24_length_prefixed(&certificate_list, &child) || - !CBB_add_bytes(&child, CRYPTO_BUFFER_data(cert_buf), - CRYPTO_BUFFER_len(cert_buf)) || - !CBB_add_u16(&certificate_list, 0 /* no extensions */)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - } - - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - return 1; - -err: - CBB_cleanup(&cbb); - return 0; -} - -enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs, - int is_first_run) { - SSL *const ssl = hs->ssl; - enum ssl_private_key_result_t ret = ssl_private_key_failure; - uint8_t *msg = NULL; - size_t msg_len; - CBB cbb, body; - CBB_zero(&cbb); - - uint16_t signature_algorithm; - if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { - goto err; - } - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CERTIFICATE_VERIFY) || - !CBB_add_u16(&body, signature_algorithm)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - goto err; - } - - /* Sign the digest. */ - CBB child; - const size_t max_sig_len = ssl_private_key_max_signature_len(ssl); - uint8_t *sig; - size_t sig_len; - if (!CBB_add_u16_length_prefixed(&body, &child) || - !CBB_reserve(&child, &sig, max_sig_len)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - - enum ssl_private_key_result_t sign_result; - if (is_first_run) { - if (!tls13_get_cert_verify_signature_input( - hs, &msg, &msg_len, - ssl->server ? ssl_cert_verify_server : ssl_cert_verify_client)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - goto err; - } - sign_result = ssl_private_key_sign(ssl, sig, &sig_len, max_sig_len, - signature_algorithm, msg, msg_len); - } else { - sign_result = ssl_private_key_complete(ssl, sig, &sig_len, max_sig_len); - } - - if (sign_result != ssl_private_key_success) { - ret = sign_result; - goto err; - } - - if (!CBB_did_write(&child, sig_len) || - !ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - ret = ssl_private_key_success; - -err: - CBB_cleanup(&cbb); - OPENSSL_free(msg); - return ret; -} - -int tls13_add_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - size_t verify_data_len; - uint8_t verify_data[EVP_MAX_MD_SIZE]; - - if (!tls13_finished_mac(hs, verify_data, &verify_data_len, ssl->server)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); - return 0; - } - - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_FINISHED) || - !CBB_add_bytes(&body, verify_data, verify_data_len) || - !ssl_add_message_cbb(ssl, &cbb)) { - CBB_cleanup(&cbb); - return 0; - } - - return 1; -} - -static int tls13_receive_key_update(SSL *ssl) { - CBS cbs; - uint8_t key_update_request; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u8(&cbs, &key_update_request) || - CBS_len(&cbs) != 0 || - (key_update_request != SSL_KEY_UPDATE_NOT_REQUESTED && - key_update_request != SSL_KEY_UPDATE_REQUESTED)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return 0; - } - - /* TODO(svaldez): Send KeyUpdate if |key_update_request| is - * |SSL_KEY_UPDATE_REQUESTED|. */ - return tls13_rotate_traffic_key(ssl, evp_aead_open); -} - -int tls13_post_handshake(SSL *ssl) { - if (ssl->s3->tmp.message_type == SSL3_MT_KEY_UPDATE) { - ssl->s3->key_update_count++; - if (ssl->s3->key_update_count > kMaxKeyUpdates) { - OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_KEY_UPDATES); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - return 0; - } - - return tls13_receive_key_update(ssl); - } - - ssl->s3->key_update_count = 0; - - if (ssl->s3->tmp.message_type == SSL3_MT_NEW_SESSION_TICKET && - !ssl->server) { - return tls13_process_new_session_ticket(ssl); - } - - // TODO(svaldez): Handle post-handshake authentication. - - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); - return 0; -} diff --git a/Sources/BoringSSL/ssl/tls13_both.cc b/Sources/BoringSSL/ssl/tls13_both.cc new file mode 100644 index 000000000..57acbcba6 --- /dev/null +++ b/Sources/BoringSSL/ssl/tls13_both.cc @@ -0,0 +1,551 @@ +/* Copyright (c) 2016, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +// kMaxKeyUpdates is the number of consecutive KeyUpdates that will be +// processed. Without this limit an attacker could force unbounded processing +// without being able to return application data. +static const uint8_t kMaxKeyUpdates = 32; + +const uint8_t kHelloRetryRequest[SSL3_RANDOM_SIZE] = { + 0xcf, 0x21, 0xad, 0x74, 0xe5, 0x9a, 0x61, 0x11, 0xbe, 0x1d, 0x8c, + 0x02, 0x1e, 0x65, 0xb8, 0x91, 0xc2, 0xa2, 0x11, 0x16, 0x7a, 0xbb, + 0x8c, 0x5e, 0x07, 0x9e, 0x09, 0xe2, 0xc8, 0xa8, 0x33, 0x9c, +}; + +bool tls13_get_cert_verify_signature_input( + SSL_HANDSHAKE *hs, Array *out, + enum ssl_cert_verify_context_t cert_verify_context) { + ScopedCBB cbb; + if (!CBB_init(cbb.get(), 64 + 33 + 1 + 2 * EVP_MAX_MD_SIZE)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + for (size_t i = 0; i < 64; i++) { + if (!CBB_add_u8(cbb.get(), 0x20)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + } + + Span context; + if (cert_verify_context == ssl_cert_verify_server) { + static const char kContext[] = "TLS 1.3, server CertificateVerify"; + context = kContext; + } else if (cert_verify_context == ssl_cert_verify_client) { + static const char kContext[] = "TLS 1.3, client CertificateVerify"; + context = kContext; + } else if (cert_verify_context == ssl_cert_verify_channel_id) { + static const char kContext[] = "TLS 1.3, Channel ID"; + context = kContext; + } else { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + // Note |context| includes the NUL byte separator. + if (!CBB_add_bytes(cbb.get(), + reinterpret_cast(context.data()), + context.size())) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + uint8_t context_hash[EVP_MAX_MD_SIZE]; + size_t context_hash_len; + if (!hs->transcript.GetHash(context_hash, &context_hash_len) || + !CBB_add_bytes(cbb.get(), context_hash, context_hash_len) || + !CBBFinishArray(cbb.get(), out)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return false; + } + + return true; +} + +int tls13_process_certificate(SSL_HANDSHAKE *hs, const SSLMessage &msg, + int allow_anonymous) { + SSL *const ssl = hs->ssl; + CBS body = msg.body, context, certificate_list; + if (!CBS_get_u8_length_prefixed(&body, &context) || + CBS_len(&context) != 0 || + !CBS_get_u24_length_prefixed(&body, &certificate_list) || + CBS_len(&body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return 0; + } + + UniquePtr certs(sk_CRYPTO_BUFFER_new_null()); + if (!certs) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + + const bool retain_sha256 = + ssl->server && ssl->retain_only_sha256_of_client_certs; + UniquePtr pkey; + while (CBS_len(&certificate_list) > 0) { + CBS certificate, extensions; + if (!CBS_get_u24_length_prefixed(&certificate_list, &certificate) || + !CBS_get_u16_length_prefixed(&certificate_list, &extensions) || + CBS_len(&certificate) == 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_LENGTH_MISMATCH); + return 0; + } + + if (sk_CRYPTO_BUFFER_num(certs.get()) == 0) { + pkey = ssl_cert_parse_pubkey(&certificate); + if (!pkey) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return 0; + } + // TLS 1.3 always uses certificate keys for signing thus the correct + // keyUsage is enforced. + if (!ssl_cert_check_digital_signature_key_usage(&certificate)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return 0; + } + + if (retain_sha256) { + // Retain the hash of the leaf certificate if requested. + SHA256(CBS_data(&certificate), CBS_len(&certificate), + hs->new_session->peer_sha256); + } + } + + UniquePtr buf( + CRYPTO_BUFFER_new_from_CBS(&certificate, ssl->ctx->pool)); + if (!buf || + !PushToStack(certs.get(), std::move(buf))) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + return 0; + } + + // Parse out the extensions. + bool have_status_request = false, have_sct = false; + CBS status_request, sct; + const SSL_EXTENSION_TYPE ext_types[] = { + {TLSEXT_TYPE_status_request, &have_status_request, &status_request}, + {TLSEXT_TYPE_certificate_timestamp, &have_sct, &sct}, + }; + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 0 /* reject unknown */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return 0; + } + + // All Certificate extensions are parsed, but only the leaf extensions are + // stored. + if (have_status_request) { + if (ssl->server || !ssl->ocsp_stapling_enabled) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); + return 0; + } + + uint8_t status_type; + CBS ocsp_response; + if (!CBS_get_u8(&status_request, &status_type) || + status_type != TLSEXT_STATUSTYPE_ocsp || + !CBS_get_u24_length_prefixed(&status_request, &ocsp_response) || + CBS_len(&ocsp_response) == 0 || + CBS_len(&status_request) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + if (sk_CRYPTO_BUFFER_num(certs.get()) == 1) { + CRYPTO_BUFFER_free(hs->new_session->ocsp_response); + hs->new_session->ocsp_response = + CRYPTO_BUFFER_new_from_CBS(&ocsp_response, ssl->ctx->pool); + if (hs->new_session->ocsp_response == nullptr) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return 0; + } + } + } + + if (have_sct) { + if (ssl->server || !ssl->signed_cert_timestamps_enabled) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); + return 0; + } + + if (!ssl_is_sct_list_valid(&sct)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_ERROR_PARSING_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + if (sk_CRYPTO_BUFFER_num(certs.get()) == 1) { + CRYPTO_BUFFER_free(hs->new_session->signed_cert_timestamp_list); + hs->new_session->signed_cert_timestamp_list = + CRYPTO_BUFFER_new_from_CBS(&sct, ssl->ctx->pool); + if (hs->new_session->signed_cert_timestamp_list == nullptr) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return 0; + } + } + } + } + + // Store a null certificate list rather than an empty one if the peer didn't + // send certificates. + if (sk_CRYPTO_BUFFER_num(certs.get()) == 0) { + certs.reset(); + } + + hs->peer_pubkey = std::move(pkey); + + sk_CRYPTO_BUFFER_pop_free(hs->new_session->certs, CRYPTO_BUFFER_free); + hs->new_session->certs = certs.release(); + + if (!ssl->ctx->x509_method->session_cache_objects(hs->new_session.get())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { + if (!allow_anonymous) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PEER_DID_NOT_RETURN_A_CERTIFICATE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_CERTIFICATE_REQUIRED); + return 0; + } + + // OpenSSL returns X509_V_OK when no certificates are requested. This is + // classed by them as a bug, but it's assumed by at least NGINX. + hs->new_session->verify_result = X509_V_OK; + + // No certificate, so nothing more to do. + return 1; + } + + hs->new_session->peer_sha256_valid = retain_sha256; + return 1; +} + +int tls13_process_certificate_verify(SSL_HANDSHAKE *hs, const SSLMessage &msg) { + SSL *const ssl = hs->ssl; + if (hs->peer_pubkey == NULL) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + CBS body = msg.body, signature; + uint16_t signature_algorithm; + if (!CBS_get_u16(&body, &signature_algorithm) || + !CBS_get_u16_length_prefixed(&body, &signature) || + CBS_len(&body) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!tls12_check_peer_sigalg(ssl, &alert, signature_algorithm)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return 0; + } + hs->new_session->peer_signature_algorithm = signature_algorithm; + + Array input; + if (!tls13_get_cert_verify_signature_input( + hs, &input, + ssl->server ? ssl_cert_verify_client : ssl_cert_verify_server)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return 0; + } + + bool sig_ok = ssl_public_key_verify(ssl, signature, signature_algorithm, + hs->peer_pubkey.get(), input); +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + sig_ok = true; + ERR_clear_error(); +#endif + if (!sig_ok) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_SIGNATURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + return 0; + } + + return 1; +} + +int tls13_process_finished(SSL_HANDSHAKE *hs, const SSLMessage &msg, + int use_saved_value) { + SSL *const ssl = hs->ssl; + uint8_t verify_data_buf[EVP_MAX_MD_SIZE]; + const uint8_t *verify_data; + size_t verify_data_len; + if (use_saved_value) { + assert(ssl->server); + verify_data = hs->expected_client_finished; + verify_data_len = hs->hash_len; + } else { + if (!tls13_finished_mac(hs, verify_data_buf, &verify_data_len, + !ssl->server)) { + return 0; + } + verify_data = verify_data_buf; + } + + int finished_ok = CBS_mem_equal(&msg.body, verify_data, verify_data_len); +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + finished_ok = 1; +#endif + if (!finished_ok) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); + return 0; + } + + return 1; +} + +int tls13_add_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + ScopedCBB cbb; + CBB body, certificate_list; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CERTIFICATE) || + // The request context is always empty in the handshake. + !CBB_add_u8(&body, 0) || + !CBB_add_u24_length_prefixed(&body, &certificate_list)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + if (!ssl_has_certificate(ssl)) { + return ssl_add_message_cbb(ssl, cbb.get()); + } + + CERT *cert = ssl->cert; + CRYPTO_BUFFER *leaf_buf = sk_CRYPTO_BUFFER_value(cert->chain, 0); + CBB leaf, extensions; + if (!CBB_add_u24_length_prefixed(&certificate_list, &leaf) || + !CBB_add_bytes(&leaf, CRYPTO_BUFFER_data(leaf_buf), + CRYPTO_BUFFER_len(leaf_buf)) || + !CBB_add_u16_length_prefixed(&certificate_list, &extensions)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + if (hs->scts_requested && ssl->cert->signed_cert_timestamp_list != NULL) { + CBB contents; + if (!CBB_add_u16(&extensions, TLSEXT_TYPE_certificate_timestamp) || + !CBB_add_u16_length_prefixed(&extensions, &contents) || + !CBB_add_bytes( + &contents, + CRYPTO_BUFFER_data(ssl->cert->signed_cert_timestamp_list), + CRYPTO_BUFFER_len(ssl->cert->signed_cert_timestamp_list)) || + !CBB_flush(&extensions)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + } + + if (hs->ocsp_stapling_requested && + ssl->cert->ocsp_response != NULL) { + CBB contents, ocsp_response; + if (!CBB_add_u16(&extensions, TLSEXT_TYPE_status_request) || + !CBB_add_u16_length_prefixed(&extensions, &contents) || + !CBB_add_u8(&contents, TLSEXT_STATUSTYPE_ocsp) || + !CBB_add_u24_length_prefixed(&contents, &ocsp_response) || + !CBB_add_bytes(&ocsp_response, + CRYPTO_BUFFER_data(ssl->cert->ocsp_response), + CRYPTO_BUFFER_len(ssl->cert->ocsp_response)) || + !CBB_flush(&extensions)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + } + + for (size_t i = 1; i < sk_CRYPTO_BUFFER_num(cert->chain); i++) { + CRYPTO_BUFFER *cert_buf = sk_CRYPTO_BUFFER_value(cert->chain, i); + CBB child; + if (!CBB_add_u24_length_prefixed(&certificate_list, &child) || + !CBB_add_bytes(&child, CRYPTO_BUFFER_data(cert_buf), + CRYPTO_BUFFER_len(cert_buf)) || + !CBB_add_u16(&certificate_list, 0 /* no extensions */)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + } + + return ssl_add_message_cbb(ssl, cbb.get()); +} + +enum ssl_private_key_result_t tls13_add_certificate_verify(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + uint16_t signature_algorithm; + if (!tls1_choose_signature_algorithm(hs, &signature_algorithm)) { + return ssl_private_key_failure; + } + + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_VERIFY) || + !CBB_add_u16(&body, signature_algorithm)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_private_key_failure; + } + + // Sign the digest. + CBB child; + const size_t max_sig_len = EVP_PKEY_size(hs->local_pubkey.get()); + uint8_t *sig; + size_t sig_len; + if (!CBB_add_u16_length_prefixed(&body, &child) || + !CBB_reserve(&child, &sig, max_sig_len)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_private_key_failure; + } + + Array msg; + if (!tls13_get_cert_verify_signature_input( + hs, &msg, + ssl->server ? ssl_cert_verify_server : ssl_cert_verify_client)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_private_key_failure; + } + + enum ssl_private_key_result_t sign_result = ssl_private_key_sign( + hs, sig, &sig_len, max_sig_len, signature_algorithm, msg); + if (sign_result != ssl_private_key_success) { + return sign_result; + } + + if (!CBB_did_write(&child, sig_len) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_private_key_failure; + } + + return ssl_private_key_success; +} + +int tls13_add_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + size_t verify_data_len; + uint8_t verify_data[EVP_MAX_MD_SIZE]; + + if (!tls13_finished_mac(hs, verify_data, &verify_data_len, ssl->server)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); + return 0; + } + + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_FINISHED) || + !CBB_add_bytes(&body, verify_data, verify_data_len) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return 0; + } + + return 1; +} + +static int tls13_receive_key_update(SSL *ssl, const SSLMessage &msg) { + CBS body = msg.body; + uint8_t key_update_request; + if (!CBS_get_u8(&body, &key_update_request) || + CBS_len(&body) != 0 || + (key_update_request != SSL_KEY_UPDATE_NOT_REQUESTED && + key_update_request != SSL_KEY_UPDATE_REQUESTED)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return 0; + } + + if (!tls13_rotate_traffic_key(ssl, evp_aead_open)) { + return 0; + } + + // Acknowledge the KeyUpdate + if (key_update_request == SSL_KEY_UPDATE_REQUESTED && + !ssl->s3->key_update_pending) { + ScopedCBB cbb; + CBB body_cbb; + if (!ssl->method->init_message(ssl, cbb.get(), &body_cbb, + SSL3_MT_KEY_UPDATE) || + !CBB_add_u8(&body_cbb, SSL_KEY_UPDATE_NOT_REQUESTED) || + !ssl_add_message_cbb(ssl, cbb.get()) || + !tls13_rotate_traffic_key(ssl, evp_aead_seal)) { + return 0; + } + + // Suppress KeyUpdate acknowledgments until this change is written to the + // wire. This prevents us from accumulating write obligations when read and + // write progress at different rates. See draft-ietf-tls-tls13-18, section + // 4.5.3. + ssl->s3->key_update_pending = true; + } + + return 1; +} + +int tls13_post_handshake(SSL *ssl, const SSLMessage &msg) { + if (msg.type == SSL3_MT_KEY_UPDATE) { + ssl->s3->key_update_count++; + if (ssl->s3->key_update_count > kMaxKeyUpdates) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_KEY_UPDATES); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return 0; + } + + return tls13_receive_key_update(ssl, msg); + } + + ssl->s3->key_update_count = 0; + + if (msg.type == SSL3_MT_NEW_SESSION_TICKET && !ssl->server) { + return tls13_process_new_session_ticket(ssl, msg); + } + + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + return 0; +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/tls13_client.c b/Sources/BoringSSL/ssl/tls13_client.c deleted file mode 100644 index 8e994e581..000000000 --- a/Sources/BoringSSL/ssl/tls13_client.c +++ /dev/null @@ -1,712 +0,0 @@ -/* Copyright (c) 2016, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -enum client_hs_state_t { - state_process_hello_retry_request = 0, - state_send_second_client_hello, - state_process_server_hello, - state_process_encrypted_extensions, - state_process_certificate_request, - state_process_server_certificate, - state_process_server_certificate_verify, - state_process_server_finished, - state_send_client_certificate, - state_send_client_certificate_verify, - state_complete_client_certificate_verify, - state_complete_second_flight, - state_done, -}; - -static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; - -static enum ssl_hs_wait_t do_process_hello_retry_request(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (ssl->s3->tmp.message_type != SSL3_MT_HELLO_RETRY_REQUEST) { - hs->tls13_state = state_process_server_hello; - return ssl_hs_ok; - } - - CBS cbs, extensions; - uint16_t server_wire_version; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u16(&cbs, &server_wire_version) || - !CBS_get_u16_length_prefixed(&cbs, &extensions) || - /* HelloRetryRequest may not be empty. */ - CBS_len(&extensions) == 0 || - CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - int have_cookie, have_key_share; - CBS cookie, key_share; - const SSL_EXTENSION_TYPE ext_types[] = { - {TLSEXT_TYPE_key_share, &have_key_share, &key_share}, - {TLSEXT_TYPE_cookie, &have_cookie, &cookie}, - }; - - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!ssl_parse_extensions(&extensions, &alert, ext_types, - OPENSSL_ARRAY_SIZE(ext_types), - 0 /* reject unknown */)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - if (have_cookie) { - CBS cookie_value; - if (!CBS_get_u16_length_prefixed(&cookie, &cookie_value) || - CBS_len(&cookie_value) == 0 || - CBS_len(&cookie) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - if (!CBS_stow(&cookie_value, &hs->cookie, &hs->cookie_len)) { - return ssl_hs_error; - } - } - - if (have_key_share) { - uint16_t group_id; - if (!CBS_get_u16(&key_share, &group_id) || CBS_len(&key_share) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - /* The group must be supported. */ - const uint16_t *groups; - size_t groups_len; - tls1_get_grouplist(ssl, &groups, &groups_len); - int found = 0; - for (size_t i = 0; i < groups_len; i++) { - if (groups[i] == group_id) { - found = 1; - break; - } - } - - if (!found) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); - return ssl_hs_error; - } - - /* Check that the HelloRetryRequest does not request the key share that - * was provided in the initial ClientHello. */ - if (SSL_ECDH_CTX_get_id(&hs->ecdh_ctx) == group_id) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); - return ssl_hs_error; - } - - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - hs->retry_group = group_id; - } - - if (!ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->received_hello_retry_request = 1; - hs->tls13_state = state_send_second_client_hello; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_second_client_hello(SSL_HANDSHAKE *hs) { - if (!ssl_write_client_hello(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_server_hello; - return ssl_hs_flush_and_read_message; -} - -static enum ssl_hs_wait_t do_process_server_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_SERVER_HELLO)) { - return ssl_hs_error; - } - - CBS cbs, server_random, extensions; - uint16_t server_wire_version; - uint16_t cipher_suite; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u16(&cbs, &server_wire_version) || - !CBS_get_bytes(&cbs, &server_random, SSL3_RANDOM_SIZE) || - !CBS_get_u16(&cbs, &cipher_suite) || - !CBS_get_u16_length_prefixed(&cbs, &extensions) || - CBS_len(&cbs) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return ssl_hs_error; - } - - if (server_wire_version != ssl->version) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_NUMBER); - return ssl_hs_error; - } - - assert(ssl->s3->have_version); - OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_random), - SSL3_RANDOM_SIZE); - - const SSL_CIPHER *cipher = SSL_get_cipher_by_value(cipher_suite); - if (cipher == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_CIPHER_RETURNED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - /* Check if the cipher is a TLS 1.3 cipher. */ - if (SSL_CIPHER_get_min_version(cipher) > ssl3_protocol_version(ssl) || - SSL_CIPHER_get_max_version(cipher) < ssl3_protocol_version(ssl)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - /* Parse out the extensions. */ - int have_key_share = 0, have_pre_shared_key = 0, have_short_header = 0; - CBS key_share, pre_shared_key, short_header; - const SSL_EXTENSION_TYPE ext_types[] = { - {TLSEXT_TYPE_key_share, &have_key_share, &key_share}, - {TLSEXT_TYPE_pre_shared_key, &have_pre_shared_key, &pre_shared_key}, - {TLSEXT_TYPE_short_header, &have_short_header, &short_header}, - }; - - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!ssl_parse_extensions(&extensions, &alert, ext_types, - OPENSSL_ARRAY_SIZE(ext_types), - 0 /* reject unknown */)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - alert = SSL_AD_DECODE_ERROR; - if (have_pre_shared_key) { - if (ssl->session == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); - return ssl_hs_error; - } - - if (!ssl_ext_pre_shared_key_parse_serverhello(hs, &alert, - &pre_shared_key)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - if (ssl->session->ssl_version != ssl->version) { - OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - if (ssl->session->cipher->algorithm_prf != cipher->algorithm_prf) { - OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_PRF_HASH_MISMATCH); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - if (!ssl_session_is_context_valid(ssl, ssl->session)) { - /* This is actually a client application bug. */ - OPENSSL_PUT_ERROR(SSL, - SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - ssl->s3->session_reused = 1; - /* Only authentication information carries over in TLS 1.3. */ - hs->new_session = SSL_SESSION_dup(ssl->session, SSL_SESSION_DUP_AUTH_ONLY); - if (hs->new_session == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - ssl_set_session(ssl, NULL); - - /* Resumption incorporates fresh key material, so refresh the timeout. */ - ssl_session_renew_timeout(ssl, hs->new_session, - ssl->initial_ctx->session_psk_dhe_timeout); - } else if (!ssl_get_new_session(hs, 0)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - - hs->new_session->cipher = cipher; - hs->new_cipher = cipher; - - /* Store the initial negotiated ALPN in the session. */ - if (ssl->s3->alpn_selected != NULL) { - hs->new_session->early_alpn = - BUF_memdup(ssl->s3->alpn_selected, ssl->s3->alpn_selected_len); - if (hs->new_session->early_alpn == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - hs->new_session->early_alpn_len = ssl->s3->alpn_selected_len; - } - - /* The PRF hash is now known. Set up the key schedule. */ - if (!tls13_init_key_schedule(hs)) { - return ssl_hs_error; - } - - /* Incorporate the PSK into the running secret. */ - if (ssl->s3->session_reused) { - if (!tls13_advance_key_schedule(hs, hs->new_session->master_key, - hs->new_session->master_key_length)) { - return ssl_hs_error; - } - } else if (!tls13_advance_key_schedule(hs, kZeroes, hs->hash_len)) { - return ssl_hs_error; - } - - if (!have_key_share) { - /* We do not support psk_ke and thus always require a key share. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); - return ssl_hs_error; - } - - /* Resolve ECDHE and incorporate it into the secret. */ - uint8_t *dhe_secret; - size_t dhe_secret_len; - alert = SSL_AD_DECODE_ERROR; - if (!ssl_ext_key_share_parse_serverhello(hs, &dhe_secret, &dhe_secret_len, - &alert, &key_share)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - if (!tls13_advance_key_schedule(hs, dhe_secret, dhe_secret_len)) { - OPENSSL_free(dhe_secret); - return ssl_hs_error; - } - OPENSSL_free(dhe_secret); - - /* Negotiate short record headers. */ - if (have_short_header) { - if (CBS_len(&short_header) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - if (!ssl->ctx->short_header_enabled) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); - return ssl_hs_error; - } - - ssl->s3->short_header = 1; - } - - if (!ssl_hash_current_message(hs) || - !tls13_derive_handshake_secrets(hs) || - !tls13_set_traffic_key(ssl, evp_aead_open, hs->server_handshake_secret, - hs->hash_len) || - !tls13_set_traffic_key(ssl, evp_aead_seal, hs->client_handshake_secret, - hs->hash_len)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_encrypted_extensions; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_encrypted_extensions(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_ENCRYPTED_EXTENSIONS)) { - return ssl_hs_error; - } - - CBS cbs; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!ssl_parse_serverhello_tlsext(hs, &cbs)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); - return ssl_hs_error; - } - if (CBS_len(&cbs) != 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - if (!ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_certificate_request; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_certificate_request(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* CertificateRequest may only be sent in non-resumption handshakes. */ - if (ssl->s3->session_reused) { - hs->tls13_state = state_process_server_finished; - return ssl_hs_ok; - } - - /* CertificateRequest is optional. */ - if (ssl->s3->tmp.message_type != SSL3_MT_CERTIFICATE_REQUEST) { - hs->tls13_state = state_process_server_certificate; - return ssl_hs_ok; - } - - CBS cbs, context, supported_signature_algorithms; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u8_length_prefixed(&cbs, &context) || - /* The request context is always empty during the handshake. */ - CBS_len(&context) != 0 || - !CBS_get_u16_length_prefixed(&cbs, &supported_signature_algorithms) || - CBS_len(&supported_signature_algorithms) == 0 || - !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return ssl_hs_error; - } - - uint8_t alert = SSL_AD_DECODE_ERROR; - STACK_OF(X509_NAME) *ca_sk = ssl_parse_client_CA_list(ssl, &alert, &cbs); - if (ca_sk == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - /* Ignore extensions. */ - CBS extensions; - if (!CBS_get_u16_length_prefixed(&cbs, &extensions) || - CBS_len(&cbs) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - sk_X509_NAME_pop_free(ca_sk, X509_NAME_free); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - return ssl_hs_error; - } - - hs->cert_request = 1; - sk_X509_NAME_pop_free(hs->ca_names, X509_NAME_free); - hs->ca_names = ca_sk; - - if (!ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_server_certificate; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_server_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE) || - !tls13_process_certificate(hs, 0 /* certificate required */) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_server_certificate_verify; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_server_certificate_verify( - SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE_VERIFY) || - !tls13_process_certificate_verify(hs) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_server_finished; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_server_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_FINISHED) || - !tls13_process_finished(hs) || - !ssl_hash_current_message(hs) || - /* Update the secret to the master secret and derive traffic keys. */ - !tls13_advance_key_schedule(hs, kZeroes, hs->hash_len) || - !tls13_derive_application_secrets(hs)) { - return ssl_hs_error; - } - - ssl->method->received_flight(ssl); - hs->tls13_state = state_send_client_certificate; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_client_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* The peer didn't request a certificate. */ - if (!hs->cert_request) { - hs->tls13_state = state_complete_second_flight; - return ssl_hs_ok; - } - - /* Call cert_cb to update the certificate. */ - if (ssl->cert->cert_cb != NULL) { - int rv = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); - if (rv == 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); - return ssl_hs_error; - } - if (rv < 0) { - hs->tls13_state = state_send_client_certificate; - return ssl_hs_x509_lookup; - } - } - - if (!ssl_auto_chain_if_needed(ssl) || - !tls13_add_certificate(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_send_client_certificate_verify; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_client_certificate_verify(SSL_HANDSHAKE *hs, - int is_first_run) { - SSL *const ssl = hs->ssl; - /* Don't send CertificateVerify if there is no certificate. */ - if (!ssl_has_certificate(ssl)) { - hs->tls13_state = state_complete_second_flight; - return ssl_hs_ok; - } - - switch (tls13_add_certificate_verify(hs, is_first_run)) { - case ssl_private_key_success: - hs->tls13_state = state_complete_second_flight; - return ssl_hs_ok; - - case ssl_private_key_retry: - hs->tls13_state = state_complete_client_certificate_verify; - return ssl_hs_private_key_operation; - - case ssl_private_key_failure: - return ssl_hs_error; - } - - assert(0); - return ssl_hs_error; -} - -static enum ssl_hs_wait_t do_complete_second_flight(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - - /* Send a Channel ID assertion if necessary. */ - if (ssl->s3->tlsext_channel_id_valid) { - if (!ssl_do_channel_id_callback(ssl)) { - hs->tls13_state = state_complete_second_flight; - return ssl_hs_error; - } - - if (ssl->tlsext_channel_id_private == NULL) { - return ssl_hs_channel_id_lookup; - } - - CBB cbb, body; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_CHANNEL_ID) || - !tls1_write_channel_id(hs, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - CBB_cleanup(&cbb); - return ssl_hs_error; - } - } - - /* Send a Finished message. */ - if (!tls13_add_finished(hs)) { - return ssl_hs_error; - } - - /* Derive the final keys and enable them. */ - if (!tls13_set_traffic_key(ssl, evp_aead_open, hs->server_traffic_secret_0, - hs->hash_len) || - !tls13_set_traffic_key(ssl, evp_aead_seal, hs->client_traffic_secret_0, - hs->hash_len) || - !tls13_derive_resumption_secret(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_done; - return ssl_hs_flush; -} - -enum ssl_hs_wait_t tls13_client_handshake(SSL_HANDSHAKE *hs) { - while (hs->tls13_state != state_done) { - enum ssl_hs_wait_t ret = ssl_hs_error; - enum client_hs_state_t state = hs->tls13_state; - switch (state) { - case state_process_hello_retry_request: - ret = do_process_hello_retry_request(hs); - break; - case state_send_second_client_hello: - ret = do_send_second_client_hello(hs); - break; - case state_process_server_hello: - ret = do_process_server_hello(hs); - break; - case state_process_encrypted_extensions: - ret = do_process_encrypted_extensions(hs); - break; - case state_process_certificate_request: - ret = do_process_certificate_request(hs); - break; - case state_process_server_certificate: - ret = do_process_server_certificate(hs); - break; - case state_process_server_certificate_verify: - ret = do_process_server_certificate_verify(hs); - break; - case state_process_server_finished: - ret = do_process_server_finished(hs); - break; - case state_send_client_certificate: - ret = do_send_client_certificate(hs); - break; - case state_send_client_certificate_verify: - ret = do_send_client_certificate_verify(hs, 1 /* first run */); - break; - case state_complete_client_certificate_verify: - ret = do_send_client_certificate_verify(hs, 0 /* complete */); - break; - case state_complete_second_flight: - ret = do_complete_second_flight(hs); - break; - case state_done: - ret = ssl_hs_ok; - break; - } - - if (ret != ssl_hs_ok) { - return ret; - } - } - - return ssl_hs_ok; -} - -int tls13_process_new_session_ticket(SSL *ssl) { - int ret = 0; - SSL_SESSION *session = SSL_SESSION_dup(ssl->s3->established_session, - SSL_SESSION_INCLUDE_NONAUTH); - if (session == NULL) { - return 0; - } - - ssl_session_rebase_time(ssl, session); - - uint32_t server_timeout; - CBS cbs, ticket, extensions; - CBS_init(&cbs, ssl->init_msg, ssl->init_num); - if (!CBS_get_u32(&cbs, &server_timeout) || - !CBS_get_u32(&cbs, &session->ticket_age_add) || - !CBS_get_u16_length_prefixed(&cbs, &ticket) || - !CBS_stow(&ticket, &session->tlsext_tick, &session->tlsext_ticklen) || - !CBS_get_u16_length_prefixed(&cbs, &extensions) || - CBS_len(&cbs) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; - } - - /* Cap the renewable lifetime by the server advertised value. This avoids - * wasting bandwidth on 0-RTT when we know the server will reject it. - * - * TODO(davidben): This dance where we're not sure if long or uint32_t is - * bigger is silly. session->timeout should not be a long to begin with. - * https://crbug.com/boringssl/155. */ -#if LONG_MAX < 0xffffffff - if (server_timeout > LONG_MAX) { - server_timeout = LONG_MAX; - } -#endif - if (session->timeout > (long)server_timeout) { - session->timeout = (long)server_timeout; - } - - /* Parse out the extensions. */ - int have_early_data_info = 0; - CBS early_data_info; - const SSL_EXTENSION_TYPE ext_types[] = { - {TLSEXT_TYPE_ticket_early_data_info, &have_early_data_info, - &early_data_info}, - }; - - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!ssl_parse_extensions(&extensions, &alert, ext_types, - OPENSSL_ARRAY_SIZE(ext_types), - 1 /* ignore unknown */)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - goto err; - } - - if (have_early_data_info && ssl->ctx->enable_early_data) { - if (!CBS_get_u32(&early_data_info, &session->ticket_max_early_data) || - CBS_len(&early_data_info) != 0) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - goto err; - } - } - - session->ticket_age_add_valid = 1; - session->not_resumable = 0; - - if (ssl->ctx->new_session_cb != NULL && - ssl->ctx->new_session_cb(ssl, session)) { - /* |new_session_cb|'s return value signals that it took ownership. */ - session = NULL; - } - - ret = 1; - -err: - SSL_SESSION_free(session); - return ret; -} - -void ssl_clear_tls13_state(SSL_HANDSHAKE *hs) { - SSL_ECDH_CTX_cleanup(&hs->ecdh_ctx); - - OPENSSL_free(hs->key_share_bytes); - hs->key_share_bytes = NULL; - hs->key_share_bytes_len = 0; -} diff --git a/Sources/BoringSSL/ssl/tls13_client.cc b/Sources/BoringSSL/ssl/tls13_client.cc new file mode 100644 index 000000000..688fa061f --- /dev/null +++ b/Sources/BoringSSL/ssl/tls13_client.cc @@ -0,0 +1,977 @@ +/* Copyright (c) 2016, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +enum client_hs_state_t { + state_read_hello_retry_request = 0, + state_send_second_client_hello, + state_read_server_hello, + state_read_encrypted_extensions, + state_read_certificate_request, + state_read_server_certificate, + state_read_server_certificate_verify, + state_read_server_finished, + state_send_end_of_early_data, + state_send_client_certificate, + state_send_client_certificate_verify, + state_complete_second_flight, + state_done, +}; + +static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; + +static enum ssl_hs_wait_t do_read_hello_retry_request(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + assert(ssl->s3->have_version); + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + CBS extensions; + uint16_t cipher_suite = 0; + if (ssl_is_draft22(ssl->version)) { + // Queue up a ChangeCipherSpec for whenever we next send something. This + // will be before the second ClientHello. If we offered early data, this was + // already done. + if (!hs->early_data_offered && + !ssl->method->add_change_cipher_spec(ssl)) { + return ssl_hs_error; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_SERVER_HELLO)) { + return ssl_hs_error; + } + + CBS body = msg.body, server_random, session_id; + uint16_t server_version; + if (!CBS_get_u16(&body, &server_version) || + !CBS_get_bytes(&body, &server_random, SSL3_RANDOM_SIZE) || + !CBS_get_u8_length_prefixed(&body, &session_id) || + !CBS_get_u16(&body, &cipher_suite) || + !CBS_skip(&body, 1) || + !CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&extensions) == 0 || + CBS_len(&body) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (!CBS_mem_equal(&server_random, kHelloRetryRequest, SSL3_RANDOM_SIZE)) { + hs->tls13_state = state_read_server_hello; + return ssl_hs_ok; + } + } else { + if (msg.type != SSL3_MT_HELLO_RETRY_REQUEST) { + hs->tls13_state = state_read_server_hello; + return ssl_hs_ok; + } + + CBS body = msg.body; + uint16_t server_version; + if (!CBS_get_u16(&body, &server_version) || + (ssl_is_draft21(ssl->version) && + !CBS_get_u16(&body, &cipher_suite)) || + !CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&body) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + } + + if (ssl_is_draft21(ssl->version)) { + const SSL_CIPHER *cipher = SSL_get_cipher_by_value(cipher_suite); + // Check if the cipher is a TLS 1.3 cipher. + if (cipher == NULL || + SSL_CIPHER_get_min_version(cipher) > ssl_protocol_version(ssl) || + SSL_CIPHER_get_max_version(cipher) < ssl_protocol_version(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + hs->new_cipher = cipher; + + if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher) || + !hs->transcript.UpdateForHelloRetryRequest()) { + return ssl_hs_error; + } + } + + + bool have_cookie, have_key_share, have_supported_versions; + CBS cookie, key_share, supported_versions; + const SSL_EXTENSION_TYPE ext_types[] = { + {TLSEXT_TYPE_key_share, &have_key_share, &key_share}, + {TLSEXT_TYPE_cookie, &have_cookie, &cookie}, + {TLSEXT_TYPE_supported_versions, &have_supported_versions, + &supported_versions}, + }; + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 0 /* reject unknown */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + if (!ssl_is_draft22(ssl->version) && have_supported_versions) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); + return ssl_hs_error; + } + if (!have_cookie && !have_key_share) { + OPENSSL_PUT_ERROR(SSL, SSL_R_EMPTY_HELLO_RETRY_REQUEST); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + if (have_cookie) { + CBS cookie_value; + if (!CBS_get_u16_length_prefixed(&cookie, &cookie_value) || + CBS_len(&cookie_value) == 0 || + CBS_len(&cookie) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + if (!hs->cookie.CopyFrom(cookie_value)) { + return ssl_hs_error; + } + } + + if (have_key_share) { + uint16_t group_id; + if (!CBS_get_u16(&key_share, &group_id) || CBS_len(&key_share) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // The group must be supported. + if (!tls1_check_group_id(ssl, group_id)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); + return ssl_hs_error; + } + + // Check that the HelloRetryRequest does not request the key share that + // was provided in the initial ClientHello. + if (hs->key_share->GroupID() == group_id) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); + return ssl_hs_error; + } + + hs->key_share.reset(); + hs->retry_group = group_id; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->received_hello_retry_request = true; + hs->tls13_state = state_send_second_client_hello; + // 0-RTT is rejected if we receive a HelloRetryRequest. + if (hs->in_early_data) { + return ssl_hs_early_data_rejected; + } + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_second_client_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + // Restore the null cipher. We may have switched due to 0-RTT. + bssl::UniquePtr null_ctx = + SSLAEADContext::CreateNullCipher(SSL_is_dtls(ssl)); + if (!null_ctx || + !ssl->method->set_write_state(ssl, std::move(null_ctx))) { + return ssl_hs_error; + } + + ssl->s3->aead_write_ctx->SetVersionIfNullCipher(ssl->version); + + if (!ssl_write_client_hello(hs)) { + return ssl_hs_error; + } + + hs->tls13_state = state_read_server_hello; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_read_server_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_SERVER_HELLO)) { + return ssl_hs_error; + } + + CBS body = msg.body, server_random, session_id, extensions; + uint16_t server_version; + uint16_t cipher_suite; + uint8_t compression_method; + if (!CBS_get_u16(&body, &server_version) || + !CBS_get_bytes(&body, &server_random, SSL3_RANDOM_SIZE) || + (ssl_is_resumption_experiment(ssl->version) && + !CBS_get_u8_length_prefixed(&body, &session_id)) || + !CBS_get_u16(&body, &cipher_suite) || + (ssl_is_resumption_experiment(ssl->version) && + (!CBS_get_u8(&body, &compression_method) || compression_method != 0)) || + !CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + uint16_t expected_version = ssl_is_resumption_experiment(ssl->version) + ? TLS1_2_VERSION + : ssl->version; + if (server_version != expected_version) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_NUMBER); + return ssl_hs_error; + } + + // Forbid a second HelloRetryRequest. + if (ssl_is_draft22(ssl->version) && + CBS_mem_equal(&server_random, kHelloRetryRequest, SSL3_RANDOM_SIZE)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_MESSAGE); + return ssl_hs_error; + } + + OPENSSL_memcpy(ssl->s3->server_random, CBS_data(&server_random), + SSL3_RANDOM_SIZE); + + // Check if the cipher is a TLS 1.3 cipher. + const SSL_CIPHER *cipher = SSL_get_cipher_by_value(cipher_suite); + if (cipher == nullptr || + SSL_CIPHER_get_min_version(cipher) > ssl_protocol_version(ssl) || + SSL_CIPHER_get_max_version(cipher) < ssl_protocol_version(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // Check that the cipher matches the one in the HelloRetryRequest. + if (ssl_is_draft21(ssl->version) && + hs->received_hello_retry_request && + hs->new_cipher != cipher) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CIPHER_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + // Parse out the extensions. + bool have_key_share = false, have_pre_shared_key = false, + have_supported_versions = false; + CBS key_share, pre_shared_key, supported_versions; + const SSL_EXTENSION_TYPE ext_types[] = { + {TLSEXT_TYPE_key_share, &have_key_share, &key_share}, + {TLSEXT_TYPE_pre_shared_key, &have_pre_shared_key, &pre_shared_key}, + {TLSEXT_TYPE_supported_versions, &have_supported_versions, + &supported_versions}, + }; + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 0 /* reject unknown */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + // supported_versions is parsed in handshake_client to select the experimental + // TLS 1.3 version. + if (have_supported_versions && !ssl_is_resumption_experiment(ssl->version)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); + return ssl_hs_error; + } + + alert = SSL_AD_DECODE_ERROR; + if (have_pre_shared_key) { + if (ssl->session == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNSUPPORTED_EXTENSION); + return ssl_hs_error; + } + + if (!ssl_ext_pre_shared_key_parse_serverhello(hs, &alert, + &pre_shared_key)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + if (ssl->session->ssl_version != ssl->version) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_VERSION_NOT_RETURNED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + if (ssl->session->cipher->algorithm_prf != cipher->algorithm_prf) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OLD_SESSION_PRF_HASH_MISMATCH); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + if (!ssl_session_is_context_valid(ssl, ssl->session)) { + // This is actually a client application bug. + OPENSSL_PUT_ERROR(SSL, + SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + return ssl_hs_error; + } + + ssl->s3->session_reused = true; + // Only authentication information carries over in TLS 1.3. + hs->new_session = SSL_SESSION_dup(ssl->session, SSL_SESSION_DUP_AUTH_ONLY); + if (!hs->new_session) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + ssl_set_session(ssl, NULL); + + // Resumption incorporates fresh key material, so refresh the timeout. + ssl_session_renew_timeout(ssl, hs->new_session.get(), + ssl->session_ctx->session_psk_dhe_timeout); + } else if (!ssl_get_new_session(hs, 0)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + hs->new_session->cipher = cipher; + hs->new_cipher = cipher; + + size_t hash_len = + EVP_MD_size(ssl_get_handshake_digest(ssl_protocol_version(ssl), cipher)); + + // Set up the key schedule and incorporate the PSK into the running secret. + if (ssl->s3->session_reused) { + if (!tls13_init_key_schedule(hs, hs->new_session->master_key, + hs->new_session->master_key_length)) { + return ssl_hs_error; + } + } else if (!tls13_init_key_schedule(hs, kZeroes, hash_len)) { + return ssl_hs_error; + } + + if (!have_key_share) { + // We do not support psk_ke and thus always require a key share. + OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); + return ssl_hs_error; + } + + // Resolve ECDHE and incorporate it into the secret. + Array dhe_secret; + alert = SSL_AD_DECODE_ERROR; + if (!ssl_ext_key_share_parse_serverhello(hs, &dhe_secret, &alert, + &key_share)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + if (!tls13_advance_key_schedule(hs, dhe_secret.data(), dhe_secret.size()) || + !ssl_hash_message(hs, msg) || + !tls13_derive_handshake_secrets(hs) || + !tls13_set_traffic_key(ssl, evp_aead_open, hs->server_handshake_secret, + hs->hash_len)) { + return ssl_hs_error; + } + + if (!hs->early_data_offered) { + // Earlier versions of the resumption experiment added ChangeCipherSpec just + // before the Finished flight. + if (ssl_is_resumption_client_ccs_experiment(ssl->version) && + !ssl_is_draft22(ssl->version) && + !ssl->method->add_change_cipher_spec(ssl)) { + return ssl_hs_error; + } + + // If not sending early data, set client traffic keys now so that alerts are + // encrypted. + if (!tls13_set_traffic_key(ssl, evp_aead_seal, hs->client_handshake_secret, + hs->hash_len)) { + return ssl_hs_error; + } + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_encrypted_extensions; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_encrypted_extensions(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_ENCRYPTED_EXTENSIONS)) { + return ssl_hs_error; + } + + CBS body = msg.body; + if (!ssl_parse_serverhello_tlsext(hs, &body)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PARSE_TLSEXT); + return ssl_hs_error; + } + if (CBS_len(&body) != 0) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + // Store the negotiated ALPN in the session. + if (!ssl->s3->alpn_selected.empty()) { + hs->new_session->early_alpn = (uint8_t *)BUF_memdup( + ssl->s3->alpn_selected.data(), ssl->s3->alpn_selected.size()); + if (hs->new_session->early_alpn == NULL) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + hs->new_session->early_alpn_len = ssl->s3->alpn_selected.size(); + } + + if (ssl->early_data_accepted) { + if (hs->early_session->cipher != hs->new_session->cipher || + MakeConstSpan(hs->early_session->early_alpn, + hs->early_session->early_alpn_len) != + ssl->s3->alpn_selected) { + OPENSSL_PUT_ERROR(SSL, SSL_R_ALPN_MISMATCH_ON_EARLY_DATA); + return ssl_hs_error; + } + if (ssl->s3->tlsext_channel_id_valid || hs->received_custom_extension) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION_ON_EARLY_DATA); + return ssl_hs_error; + } + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_certificate_request; + if (hs->in_early_data && !ssl->early_data_accepted) { + return ssl_hs_early_data_rejected; + } + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_certificate_request(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + // CertificateRequest may only be sent in non-resumption handshakes. + if (ssl->s3->session_reused) { + hs->tls13_state = state_read_server_finished; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + // CertificateRequest is optional. + if (msg.type != SSL3_MT_CERTIFICATE_REQUEST) { + hs->tls13_state = state_read_server_certificate; + return ssl_hs_ok; + } + + + if (ssl_is_draft21(ssl->version)) { + bool have_sigalgs = false, have_ca = false; + CBS sigalgs, ca; + const SSL_EXTENSION_TYPE ext_types[] = { + {TLSEXT_TYPE_signature_algorithms, &have_sigalgs, &sigalgs}, + {TLSEXT_TYPE_certificate_authorities, &have_ca, &ca}, + }; + + CBS body = msg.body, context, extensions, supported_signature_algorithms; + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!CBS_get_u8_length_prefixed(&body, &context) || + // The request context is always empty during the handshake. + CBS_len(&context) != 0 || + !CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&body) != 0 || + !ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 1 /* accept unknown */) || + (have_ca && CBS_len(&ca) == 0) || + !have_sigalgs || + !CBS_get_u16_length_prefixed(&sigalgs, + &supported_signature_algorithms) || + CBS_len(&supported_signature_algorithms) == 0 || + !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + if (have_ca) { + hs->ca_names = ssl_parse_client_CA_list(ssl, &alert, &ca); + if (!hs->ca_names) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + } else { + hs->ca_names.reset(sk_CRYPTO_BUFFER_new_null()); + if (!hs->ca_names) { + OPENSSL_PUT_ERROR(SSL, ERR_R_MALLOC_FAILURE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + } + } else { + CBS body = msg.body, context, supported_signature_algorithms; + if (!CBS_get_u8_length_prefixed(&body, &context) || + // The request context is always empty during the handshake. + CBS_len(&context) != 0 || + !CBS_get_u16_length_prefixed(&body, &supported_signature_algorithms) || + CBS_len(&supported_signature_algorithms) == 0 || + !tls1_parse_peer_sigalgs(hs, &supported_signature_algorithms)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + + uint8_t alert = SSL_AD_DECODE_ERROR; + hs->ca_names = ssl_parse_client_CA_list(ssl, &alert, &body); + if (!hs->ca_names) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + // Ignore extensions. + CBS extensions; + if (!CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + } + + hs->cert_request = true; + ssl->ctx->x509_method->hs_flush_cached_ca_names(hs); + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_server_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE) || + !tls13_process_certificate(hs, msg, 0 /* certificate required */) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_server_certificate_verify; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_certificate_verify( + SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + switch (ssl_verify_peer_cert(hs)) { + case ssl_verify_ok: + break; + case ssl_verify_invalid: + return ssl_hs_error; + case ssl_verify_retry: + hs->tls13_state = state_read_server_certificate_verify; + return ssl_hs_certificate_verify; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY) || + !tls13_process_certificate_verify(hs, msg) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_server_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_server_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED) || + !tls13_process_finished(hs, msg, 0 /* don't use saved value */) || + !ssl_hash_message(hs, msg) || + // Update the secret to the master secret and derive traffic keys. + !tls13_advance_key_schedule(hs, kZeroes, hs->hash_len) || + !tls13_derive_application_secrets(hs)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_send_end_of_early_data; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_end_of_early_data(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + if (ssl->early_data_accepted) { + hs->can_early_write = false; + if (ssl_is_draft21(ssl->version)) { + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_END_OF_EARLY_DATA) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } else { + if (!ssl->method->add_alert(ssl, SSL3_AL_WARNING, + TLS1_AD_END_OF_EARLY_DATA)) { + return ssl_hs_error; + } + } + } + + if (hs->early_data_offered) { + if (!tls13_set_traffic_key(ssl, evp_aead_seal, hs->client_handshake_secret, + hs->hash_len)) { + return ssl_hs_error; + } + } + + hs->tls13_state = state_send_client_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_client_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // The peer didn't request a certificate. + if (!hs->cert_request) { + hs->tls13_state = state_complete_second_flight; + return ssl_hs_ok; + } + + // Call cert_cb to update the certificate. + if (ssl->cert->cert_cb != NULL) { + int rv = ssl->cert->cert_cb(ssl, ssl->cert->cert_cb_arg); + if (rv == 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_CERT_CB_ERROR); + return ssl_hs_error; + } + if (rv < 0) { + hs->tls13_state = state_send_client_certificate; + return ssl_hs_x509_lookup; + } + } + + if (!ssl_on_certificate_selected(hs) || + !tls13_add_certificate(hs)) { + return ssl_hs_error; + } + + hs->tls13_state = state_send_client_certificate_verify; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_client_certificate_verify(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + // Don't send CertificateVerify if there is no certificate. + if (!ssl_has_certificate(ssl)) { + hs->tls13_state = state_complete_second_flight; + return ssl_hs_ok; + } + + switch (tls13_add_certificate_verify(hs)) { + case ssl_private_key_success: + hs->tls13_state = state_complete_second_flight; + return ssl_hs_ok; + + case ssl_private_key_retry: + hs->tls13_state = state_send_client_certificate_verify; + return ssl_hs_private_key_operation; + + case ssl_private_key_failure: + return ssl_hs_error; + } + + assert(0); + return ssl_hs_error; +} + +static enum ssl_hs_wait_t do_complete_second_flight(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + // Send a Channel ID assertion if necessary. + if (ssl->s3->tlsext_channel_id_valid) { + if (!ssl_do_channel_id_callback(ssl)) { + hs->tls13_state = state_complete_second_flight; + return ssl_hs_error; + } + + if (ssl->tlsext_channel_id_private == NULL) { + return ssl_hs_channel_id_lookup; + } + + ScopedCBB cbb; + CBB body; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_CHANNEL_ID) || + !tls1_write_channel_id(hs, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } + + // Send a Finished message. + if (!tls13_add_finished(hs)) { + return ssl_hs_error; + } + + // Derive the final keys and enable them. + if (!tls13_set_traffic_key(ssl, evp_aead_open, hs->server_traffic_secret_0, + hs->hash_len) || + !tls13_set_traffic_key(ssl, evp_aead_seal, hs->client_traffic_secret_0, + hs->hash_len) || + !tls13_derive_resumption_secret(hs)) { + return ssl_hs_error; + } + + hs->tls13_state = state_done; + return ssl_hs_flush; +} + +enum ssl_hs_wait_t tls13_client_handshake(SSL_HANDSHAKE *hs) { + while (hs->tls13_state != state_done) { + enum ssl_hs_wait_t ret = ssl_hs_error; + enum client_hs_state_t state = + static_cast(hs->tls13_state); + switch (state) { + case state_read_hello_retry_request: + ret = do_read_hello_retry_request(hs); + break; + case state_send_second_client_hello: + ret = do_send_second_client_hello(hs); + break; + case state_read_server_hello: + ret = do_read_server_hello(hs); + break; + case state_read_encrypted_extensions: + ret = do_read_encrypted_extensions(hs); + break; + case state_read_certificate_request: + ret = do_read_certificate_request(hs); + break; + case state_read_server_certificate: + ret = do_read_server_certificate(hs); + break; + case state_read_server_certificate_verify: + ret = do_read_server_certificate_verify(hs); + break; + case state_read_server_finished: + ret = do_read_server_finished(hs); + break; + case state_send_end_of_early_data: + ret = do_send_end_of_early_data(hs); + break; + case state_send_client_certificate: + ret = do_send_client_certificate(hs); + break; + case state_send_client_certificate_verify: + ret = do_send_client_certificate_verify(hs); + break; + case state_complete_second_flight: + ret = do_complete_second_flight(hs); + break; + case state_done: + ret = ssl_hs_ok; + break; + } + + if (hs->tls13_state != state) { + ssl_do_info_callback(hs->ssl, SSL_CB_CONNECT_LOOP, 1); + } + + if (ret != ssl_hs_ok) { + return ret; + } + } + + return ssl_hs_ok; +} + +const char *tls13_client_handshake_state(SSL_HANDSHAKE *hs) { + enum client_hs_state_t state = + static_cast(hs->tls13_state); + switch (state) { + case state_read_hello_retry_request: + return "TLS 1.3 client read_hello_retry_request"; + case state_send_second_client_hello: + return "TLS 1.3 client send_second_client_hello"; + case state_read_server_hello: + return "TLS 1.3 client read_server_hello"; + case state_read_encrypted_extensions: + return "TLS 1.3 client read_encrypted_extensions"; + case state_read_certificate_request: + return "TLS 1.3 client read_certificate_request"; + case state_read_server_certificate: + return "TLS 1.3 client read_server_certificate"; + case state_read_server_certificate_verify: + return "TLS 1.3 client read_server_certificate_verify"; + case state_read_server_finished: + return "TLS 1.3 client read_server_finished"; + case state_send_end_of_early_data: + return "TLS 1.3 client send_end_of_early_data"; + case state_send_client_certificate: + return "TLS 1.3 client send_client_certificate"; + case state_send_client_certificate_verify: + return "TLS 1.3 client send_client_certificate_verify"; + case state_complete_second_flight: + return "TLS 1.3 client complete_second_flight"; + case state_done: + return "TLS 1.3 client done"; + } + + return "TLS 1.3 client unknown"; +} + +int tls13_process_new_session_ticket(SSL *ssl, const SSLMessage &msg) { + if (ssl->s3->write_shutdown != ssl_shutdown_none) { + // Ignore tickets on shutdown. Callers tend to indiscriminately call + // |SSL_shutdown| before destroying an |SSL|, at which point calling the new + // session callback may be confusing. + return 1; + } + + UniquePtr session = SSL_SESSION_dup( + ssl->s3->established_session.get(), SSL_SESSION_INCLUDE_NONAUTH); + if (!session) { + return 0; + } + + ssl_session_rebase_time(ssl, session.get()); + + uint32_t server_timeout; + CBS body = msg.body, ticket_nonce, ticket, extensions; + if (!CBS_get_u32(&body, &server_timeout) || + !CBS_get_u32(&body, &session->ticket_age_add) || + (ssl_is_draft21(ssl->version) && + !CBS_get_u8_length_prefixed(&body, &ticket_nonce)) || + !CBS_get_u16_length_prefixed(&body, &ticket) || + !CBS_stow(&ticket, &session->tlsext_tick, &session->tlsext_ticklen) || + !CBS_get_u16_length_prefixed(&body, &extensions) || + CBS_len(&body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return 0; + } + + // Cap the renewable lifetime by the server advertised value. This avoids + // wasting bandwidth on 0-RTT when we know the server will reject it. + if (session->timeout > server_timeout) { + session->timeout = server_timeout; + } + + if (!tls13_derive_session_psk(session.get(), ticket_nonce)) { + return 0; + } + + // Parse out the extensions. + bool have_early_data_info = false; + CBS early_data_info; + uint16_t ext_id = ssl_is_draft21(ssl->version) + ? TLSEXT_TYPE_early_data + : TLSEXT_TYPE_ticket_early_data_info; + const SSL_EXTENSION_TYPE ext_types[] = { + {ext_id, &have_early_data_info, &early_data_info}, + }; + + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_parse_extensions(&extensions, &alert, ext_types, + OPENSSL_ARRAY_SIZE(ext_types), + 1 /* ignore unknown */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return 0; + } + + if (have_early_data_info && ssl->cert->enable_early_data) { + if (!CBS_get_u32(&early_data_info, &session->ticket_max_early_data) || + CBS_len(&early_data_info) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return 0; + } + } + + session->ticket_age_add_valid = 1; + session->not_resumable = 0; + + if ((ssl->ctx->session_cache_mode & SSL_SESS_CACHE_CLIENT) && + ssl->ctx->new_session_cb != NULL && + ssl->ctx->new_session_cb(ssl, session.get())) { + // |new_session_cb|'s return value signals that it took ownership. + session.release(); + } + + return 1; +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/tls13_enc.c b/Sources/BoringSSL/ssl/tls13_enc.c deleted file mode 100644 index 412705da8..000000000 --- a/Sources/BoringSSL/ssl/tls13_enc.c +++ /dev/null @@ -1,430 +0,0 @@ -/* Copyright (c) 2016, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -int tls13_init_key_schedule(SSL_HANDSHAKE *hs) { - if (!SSL_TRANSCRIPT_init_hash(&hs->transcript, ssl3_protocol_version(hs->ssl), - hs->new_cipher->algorithm_prf)) { - return 0; - } - - - hs->hash_len = SSL_TRANSCRIPT_digest_len(&hs->transcript); - - /* Initialize the secret to the zero key. */ - OPENSSL_memset(hs->secret, 0, hs->hash_len); - - SSL_TRANSCRIPT_free_buffer(&hs->transcript); - return 1; -} - -int tls13_advance_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *in, - size_t len) { - return HKDF_extract(hs->secret, &hs->hash_len, - SSL_TRANSCRIPT_md(&hs->transcript), in, len, hs->secret, - hs->hash_len); -} - -static int hkdf_expand_label(uint8_t *out, const EVP_MD *digest, - const uint8_t *secret, size_t secret_len, - const uint8_t *label, size_t label_len, - const uint8_t *hash, size_t hash_len, size_t len) { - static const char kTLS13LabelVersion[] = "TLS 1.3, "; - - CBB cbb, child; - uint8_t *hkdf_label; - size_t hkdf_label_len; - if (!CBB_init(&cbb, 2 + 1 + strlen(kTLS13LabelVersion) + label_len + 1 + - hash_len) || - !CBB_add_u16(&cbb, len) || - !CBB_add_u8_length_prefixed(&cbb, &child) || - !CBB_add_bytes(&child, (const uint8_t *)kTLS13LabelVersion, - strlen(kTLS13LabelVersion)) || - !CBB_add_bytes(&child, label, label_len) || - !CBB_add_u8_length_prefixed(&cbb, &child) || - !CBB_add_bytes(&child, hash, hash_len) || - !CBB_finish(&cbb, &hkdf_label, &hkdf_label_len)) { - CBB_cleanup(&cbb); - return 0; - } - - int ret = HKDF_expand(out, len, digest, secret, secret_len, hkdf_label, - hkdf_label_len); - OPENSSL_free(hkdf_label); - return ret; -} - -/* derive_secret derives a secret of length |len| and writes the result in |out| - * with the given label and the current base secret and most recently-saved - * handshake context. It returns one on success and zero on error. */ -static int derive_secret(SSL_HANDSHAKE *hs, uint8_t *out, size_t len, - const uint8_t *label, size_t label_len) { - uint8_t context_hash[EVP_MAX_MD_SIZE]; - size_t context_hash_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, context_hash, - &context_hash_len)) { - return 0; - } - - return hkdf_expand_label(out, SSL_TRANSCRIPT_md(&hs->transcript), hs->secret, - hs->hash_len, label, label_len, context_hash, - context_hash_len, len); -} - -int tls13_set_traffic_key(SSL *ssl, enum evp_aead_direction_t direction, - const uint8_t *traffic_secret, - size_t traffic_secret_len) { - if (traffic_secret_len > 0xff) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - - /* Look up cipher suite properties. */ - const EVP_AEAD *aead; - size_t discard; - if (!ssl_cipher_get_evp_aead(&aead, &discard, &discard, - SSL_get_session(ssl)->cipher, - ssl3_protocol_version(ssl))) { - return 0; - } - - const EVP_MD *digest = ssl_get_handshake_digest( - SSL_get_session(ssl)->cipher->algorithm_prf, ssl3_protocol_version(ssl)); - - /* Derive the key. */ - size_t key_len = EVP_AEAD_key_length(aead); - uint8_t key[EVP_AEAD_MAX_KEY_LENGTH]; - if (!hkdf_expand_label(key, digest, traffic_secret, traffic_secret_len, - (const uint8_t *)"key", 3, NULL, 0, key_len)) { - return 0; - } - - /* Derive the IV. */ - size_t iv_len = EVP_AEAD_nonce_length(aead); - uint8_t iv[EVP_AEAD_MAX_NONCE_LENGTH]; - if (!hkdf_expand_label(iv, digest, traffic_secret, traffic_secret_len, - (const uint8_t *)"iv", 2, NULL, 0, iv_len)) { - return 0; - } - - SSL_AEAD_CTX *traffic_aead = SSL_AEAD_CTX_new( - direction, ssl3_protocol_version(ssl), SSL_get_session(ssl)->cipher, key, - key_len, NULL, 0, iv, iv_len); - if (traffic_aead == NULL) { - return 0; - } - - if (direction == evp_aead_open) { - if (!ssl->method->set_read_state(ssl, traffic_aead)) { - return 0; - } - } else { - if (!ssl->method->set_write_state(ssl, traffic_aead)) { - return 0; - } - } - - /* Save the traffic secret. */ - if (direction == evp_aead_open) { - OPENSSL_memmove(ssl->s3->read_traffic_secret, traffic_secret, - traffic_secret_len); - ssl->s3->read_traffic_secret_len = traffic_secret_len; - } else { - OPENSSL_memmove(ssl->s3->write_traffic_secret, traffic_secret, - traffic_secret_len); - ssl->s3->write_traffic_secret_len = traffic_secret_len; - } - - return 1; -} - -static const char kTLS13LabelClientHandshakeTraffic[] = - "client handshake traffic secret"; -static const char kTLS13LabelServerHandshakeTraffic[] = - "server handshake traffic secret"; -static const char kTLS13LabelClientApplicationTraffic[] = - "client application traffic secret"; -static const char kTLS13LabelServerApplicationTraffic[] = - "server application traffic secret"; - -int tls13_derive_handshake_secrets(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - return derive_secret(hs, hs->client_handshake_secret, hs->hash_len, - (const uint8_t *)kTLS13LabelClientHandshakeTraffic, - strlen(kTLS13LabelClientHandshakeTraffic)) && - ssl_log_secret(ssl, "CLIENT_HANDSHAKE_TRAFFIC_SECRET", - hs->client_handshake_secret, hs->hash_len) && - derive_secret(hs, hs->server_handshake_secret, hs->hash_len, - (const uint8_t *)kTLS13LabelServerHandshakeTraffic, - strlen(kTLS13LabelServerHandshakeTraffic)) && - ssl_log_secret(ssl, "SERVER_HANDSHAKE_TRAFFIC_SECRET", - hs->server_handshake_secret, hs->hash_len); -} - -static const char kTLS13LabelExporter[] = "exporter master secret"; - -int tls13_derive_application_secrets(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - ssl->s3->exporter_secret_len = hs->hash_len; - return derive_secret(hs, hs->client_traffic_secret_0, hs->hash_len, - (const uint8_t *)kTLS13LabelClientApplicationTraffic, - strlen(kTLS13LabelClientApplicationTraffic)) && - ssl_log_secret(ssl, "CLIENT_TRAFFIC_SECRET_0", - hs->client_traffic_secret_0, hs->hash_len) && - derive_secret(hs, hs->server_traffic_secret_0, hs->hash_len, - (const uint8_t *)kTLS13LabelServerApplicationTraffic, - strlen(kTLS13LabelServerApplicationTraffic)) && - ssl_log_secret(ssl, "SERVER_TRAFFIC_SECRET_0", - hs->server_traffic_secret_0, hs->hash_len) && - derive_secret(hs, ssl->s3->exporter_secret, hs->hash_len, - (const uint8_t *)kTLS13LabelExporter, - strlen(kTLS13LabelExporter)); -} - -static const char kTLS13LabelApplicationTraffic[] = - "application traffic secret"; - -int tls13_rotate_traffic_key(SSL *ssl, enum evp_aead_direction_t direction) { - const EVP_MD *digest = ssl_get_handshake_digest( - SSL_get_session(ssl)->cipher->algorithm_prf, ssl3_protocol_version(ssl)); - - uint8_t *secret; - size_t secret_len; - if (direction == evp_aead_open) { - secret = ssl->s3->read_traffic_secret; - secret_len = ssl->s3->read_traffic_secret_len; - } else { - secret = ssl->s3->write_traffic_secret; - secret_len = ssl->s3->write_traffic_secret_len; - } - - if (!hkdf_expand_label(secret, digest, secret, secret_len, - (const uint8_t *)kTLS13LabelApplicationTraffic, - strlen(kTLS13LabelApplicationTraffic), NULL, 0, - secret_len)) { - return 0; - } - - return tls13_set_traffic_key(ssl, direction, secret, secret_len); -} - -static const char kTLS13LabelResumption[] = "resumption master secret"; - -int tls13_derive_resumption_secret(SSL_HANDSHAKE *hs) { - if (hs->hash_len > SSL_MAX_MASTER_KEY_LENGTH) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - hs->new_session->master_key_length = hs->hash_len; - return derive_secret( - hs, hs->new_session->master_key, hs->new_session->master_key_length, - (const uint8_t *)kTLS13LabelResumption, strlen(kTLS13LabelResumption)); -} - -static const char kTLS13LabelFinished[] = "finished"; - -/* tls13_verify_data sets |out| to be the HMAC of |context| using a derived - * Finished key for both Finished messages and the PSK binder. */ -static int tls13_verify_data(const EVP_MD *digest, uint8_t *out, - size_t *out_len, const uint8_t *secret, - size_t hash_len, uint8_t *context, - size_t context_len) { - uint8_t key[EVP_MAX_MD_SIZE]; - unsigned len; - if (!hkdf_expand_label(key, digest, secret, hash_len, - (const uint8_t *)kTLS13LabelFinished, - strlen(kTLS13LabelFinished), NULL, 0, hash_len) || - HMAC(digest, key, hash_len, context, context_len, out, &len) == NULL) { - return 0; - } - *out_len = len; - return 1; -} - -int tls13_finished_mac(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, - int is_server) { - SSL *const ssl = hs->ssl; - - const uint8_t *traffic_secret; - if (is_server == ssl->server) { - traffic_secret = ssl->s3->write_traffic_secret; - } else { - traffic_secret = ssl->s3->read_traffic_secret; - } - - uint8_t context_hash[EVP_MAX_MD_SIZE]; - size_t context_hash_len; - if (!SSL_TRANSCRIPT_get_hash(&hs->transcript, context_hash, - &context_hash_len) || - !tls13_verify_data(SSL_TRANSCRIPT_md(&hs->transcript), out, out_len, - traffic_secret, hs->hash_len, context_hash, - context_hash_len)) { - return 0; - } - return 1; -} - -int tls13_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, - const char *label, size_t label_len, - const uint8_t *context, size_t context_len, - int use_context) { - const EVP_MD *digest = ssl_get_handshake_digest( - SSL_get_session(ssl)->cipher->algorithm_prf, ssl3_protocol_version(ssl)); - - const uint8_t *hash = NULL; - size_t hash_len = 0; - if (use_context) { - hash = context; - hash_len = context_len; - } - return hkdf_expand_label(out, digest, ssl->s3->exporter_secret, - ssl->s3->exporter_secret_len, (const uint8_t *)label, - label_len, hash, hash_len, out_len); -} - -static const char kTLS13LabelPSKBinder[] = "resumption psk binder key"; - -static int tls13_psk_binder(uint8_t *out, const EVP_MD *digest, uint8_t *psk, - size_t psk_len, uint8_t *context, - size_t context_len, size_t hash_len) { - uint8_t binder_context[EVP_MAX_MD_SIZE]; - unsigned binder_context_len; - if (!EVP_Digest(NULL, 0, binder_context, &binder_context_len, digest, NULL)) { - return 0; - } - - uint8_t early_secret[EVP_MAX_MD_SIZE] = {0}; - size_t early_secret_len; - if (!HKDF_extract(early_secret, &early_secret_len, digest, psk, hash_len, - NULL, 0)) { - return 0; - } - - uint8_t binder_key[EVP_MAX_MD_SIZE] = {0}; - size_t len; - if (!hkdf_expand_label(binder_key, digest, early_secret, hash_len, - (const uint8_t *)kTLS13LabelPSKBinder, - strlen(kTLS13LabelPSKBinder), binder_context, - binder_context_len, hash_len) || - !tls13_verify_data(digest, out, &len, binder_key, hash_len, context, - context_len)) { - return 0; - } - - return 1; -} - -int tls13_write_psk_binder(SSL_HANDSHAKE *hs, uint8_t *msg, size_t len) { - SSL *const ssl = hs->ssl; - const EVP_MD *digest = SSL_SESSION_get_digest(ssl->session, ssl); - if (digest == NULL) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - size_t hash_len = EVP_MD_size(digest); - - if (len < hash_len + 3) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - EVP_MD_CTX ctx; - EVP_MD_CTX_init(&ctx); - uint8_t context[EVP_MAX_MD_SIZE]; - unsigned context_len; - if (!EVP_DigestInit_ex(&ctx, digest, NULL) || - !EVP_DigestUpdate(&ctx, hs->transcript.buffer->data, - hs->transcript.buffer->length) || - !EVP_DigestUpdate(&ctx, msg, len - hash_len - 3) || - !EVP_DigestFinal_ex(&ctx, context, &context_len)) { - EVP_MD_CTX_cleanup(&ctx); - return 0; - } - - EVP_MD_CTX_cleanup(&ctx); - - uint8_t verify_data[EVP_MAX_MD_SIZE] = {0}; - if (!tls13_psk_binder(verify_data, digest, ssl->session->master_key, - ssl->session->master_key_length, context, context_len, - hash_len)) { - return 0; - } - - OPENSSL_memcpy(msg + len - hash_len, verify_data, hash_len); - return 1; -} - -int tls13_verify_psk_binder(SSL_HANDSHAKE *hs, SSL_SESSION *session, - CBS *binders) { - size_t hash_len = SSL_TRANSCRIPT_digest_len(&hs->transcript); - - /* Get the full ClientHello, including message header. It must be large enough - * to exclude the binders. */ - CBS message; - hs->ssl->method->get_current_message(hs->ssl, &message); - if (CBS_len(&message) < CBS_len(binders) + 2) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - /* Hash a ClientHello prefix up to the binders. For now, this assumes we only - * ever verify PSK binders on initial ClientHellos. */ - uint8_t context[EVP_MAX_MD_SIZE]; - unsigned context_len; - if (!EVP_Digest(CBS_data(&message), CBS_len(&message) - CBS_len(binders) - 2, - context, &context_len, SSL_TRANSCRIPT_md(&hs->transcript), - NULL)) { - return 0; - } - - uint8_t verify_data[EVP_MAX_MD_SIZE] = {0}; - CBS binder; - if (!tls13_psk_binder(verify_data, SSL_TRANSCRIPT_md(&hs->transcript), - session->master_key, session->master_key_length, - context, context_len, hash_len) || - /* We only consider the first PSK, so compare against the first binder. */ - !CBS_get_u8_length_prefixed(binders, &binder)) { - OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); - return 0; - } - - int binder_ok = - CBS_len(&binder) == hash_len && - CRYPTO_memcmp(CBS_data(&binder), verify_data, hash_len) == 0; -#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) - binder_ok = 1; -#endif - if (!binder_ok) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); - return 0; - } - - return 1; -} diff --git a/Sources/BoringSSL/ssl/tls13_enc.cc b/Sources/BoringSSL/ssl/tls13_enc.cc new file mode 100644 index 000000000..14f4a7879 --- /dev/null +++ b/Sources/BoringSSL/ssl/tls13_enc.cc @@ -0,0 +1,563 @@ +/* Copyright (c) 2016, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +static int init_key_schedule(SSL_HANDSHAKE *hs, uint16_t version, + const SSL_CIPHER *cipher) { + if (!hs->transcript.InitHash(version, cipher)) { + return 0; + } + + hs->hash_len = hs->transcript.DigestLen(); + + // Initialize the secret to the zero key. + OPENSSL_memset(hs->secret, 0, hs->hash_len); + + return 1; +} + +int tls13_init_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *psk, + size_t psk_len) { + if (!init_key_schedule(hs, ssl_protocol_version(hs->ssl), hs->new_cipher)) { + return 0; + } + + hs->transcript.FreeBuffer(); + return HKDF_extract(hs->secret, &hs->hash_len, hs->transcript.Digest(), psk, + psk_len, hs->secret, hs->hash_len); +} + +int tls13_init_early_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *psk, + size_t psk_len) { + SSL *const ssl = hs->ssl; + return init_key_schedule(hs, ssl_session_protocol_version(ssl->session), + ssl->session->cipher) && + HKDF_extract(hs->secret, &hs->hash_len, hs->transcript.Digest(), psk, + psk_len, hs->secret, hs->hash_len); +} + +static int hkdf_expand_label(uint8_t *out, uint16_t version, + const EVP_MD *digest, const uint8_t *secret, + size_t secret_len, const uint8_t *label, + size_t label_len, const uint8_t *hash, + size_t hash_len, size_t len) { + const char *kTLS13LabelVersion = + ssl_is_draft21(version) ? "tls13 " : "TLS 1.3, "; + + ScopedCBB cbb; + CBB child; + uint8_t *hkdf_label; + size_t hkdf_label_len; + if (!CBB_init(cbb.get(), 2 + 1 + strlen(kTLS13LabelVersion) + label_len + 1 + + hash_len) || + !CBB_add_u16(cbb.get(), len) || + !CBB_add_u8_length_prefixed(cbb.get(), &child) || + !CBB_add_bytes(&child, (const uint8_t *)kTLS13LabelVersion, + strlen(kTLS13LabelVersion)) || + !CBB_add_bytes(&child, label, label_len) || + !CBB_add_u8_length_prefixed(cbb.get(), &child) || + !CBB_add_bytes(&child, hash, hash_len) || + !CBB_finish(cbb.get(), &hkdf_label, &hkdf_label_len)) { + return 0; + } + + int ret = HKDF_expand(out, len, digest, secret, secret_len, hkdf_label, + hkdf_label_len); + OPENSSL_free(hkdf_label); + return ret; +} + +static const char kTLS13LabelDerived[] = "derived"; + +int tls13_advance_key_schedule(SSL_HANDSHAKE *hs, const uint8_t *in, + size_t len) { + SSL *const ssl = hs->ssl; + + // Draft 18 does not include the extra Derive-Secret step. + if (ssl_is_draft21(ssl->version)) { + uint8_t derive_context[EVP_MAX_MD_SIZE]; + unsigned derive_context_len; + if (!EVP_Digest(nullptr, 0, derive_context, &derive_context_len, + hs->transcript.Digest(), nullptr)) { + return 0; + } + + if (!hkdf_expand_label(hs->secret, ssl->version, hs->transcript.Digest(), + hs->secret, hs->hash_len, + (const uint8_t *)kTLS13LabelDerived, + strlen(kTLS13LabelDerived), derive_context, + derive_context_len, hs->hash_len)) { + return 0; + } + } + + return HKDF_extract(hs->secret, &hs->hash_len, hs->transcript.Digest(), in, + len, hs->secret, hs->hash_len); +} + +// derive_secret derives a secret of length |len| and writes the result in |out| +// with the given label and the current base secret and most recently-saved +// handshake context. It returns one on success and zero on error. +static int derive_secret(SSL_HANDSHAKE *hs, uint8_t *out, size_t len, + const uint8_t *label, size_t label_len) { + uint8_t context_hash[EVP_MAX_MD_SIZE]; + size_t context_hash_len; + if (!hs->transcript.GetHash(context_hash, &context_hash_len)) { + return 0; + } + + return hkdf_expand_label(out, SSL_get_session(hs->ssl)->ssl_version, + hs->transcript.Digest(), hs->secret, hs->hash_len, + label, label_len, context_hash, context_hash_len, + len); +} + +int tls13_set_traffic_key(SSL *ssl, enum evp_aead_direction_t direction, + const uint8_t *traffic_secret, + size_t traffic_secret_len) { + const SSL_SESSION *session = SSL_get_session(ssl); + uint16_t version = ssl_session_protocol_version(session); + + if (traffic_secret_len > 0xff) { + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; + } + + // Look up cipher suite properties. + const EVP_AEAD *aead; + size_t discard; + if (!ssl_cipher_get_evp_aead(&aead, &discard, &discard, session->cipher, + version, SSL_is_dtls(ssl))) { + return 0; + } + + const EVP_MD *digest = ssl_session_get_digest(session); + + // Derive the key. + size_t key_len = EVP_AEAD_key_length(aead); + uint8_t key[EVP_AEAD_MAX_KEY_LENGTH]; + if (!hkdf_expand_label(key, session->ssl_version, digest, traffic_secret, + traffic_secret_len, (const uint8_t *)"key", 3, NULL, 0, + key_len)) { + return 0; + } + + // Derive the IV. + size_t iv_len = EVP_AEAD_nonce_length(aead); + uint8_t iv[EVP_AEAD_MAX_NONCE_LENGTH]; + if (!hkdf_expand_label(iv, session->ssl_version, digest, traffic_secret, + traffic_secret_len, (const uint8_t *)"iv", 2, NULL, 0, + iv_len)) { + return 0; + } + + UniquePtr traffic_aead = + SSLAEADContext::Create(direction, session->ssl_version, SSL_is_dtls(ssl), + session->cipher, MakeConstSpan(key, key_len), + Span(), MakeConstSpan(iv, iv_len)); + if (!traffic_aead) { + return 0; + } + + if (direction == evp_aead_open) { + if (!ssl->method->set_read_state(ssl, std::move(traffic_aead))) { + return 0; + } + } else { + if (!ssl->method->set_write_state(ssl, std::move(traffic_aead))) { + return 0; + } + } + + // Save the traffic secret. + if (direction == evp_aead_open) { + OPENSSL_memmove(ssl->s3->read_traffic_secret, traffic_secret, + traffic_secret_len); + ssl->s3->read_traffic_secret_len = traffic_secret_len; + } else { + OPENSSL_memmove(ssl->s3->write_traffic_secret, traffic_secret, + traffic_secret_len); + ssl->s3->write_traffic_secret_len = traffic_secret_len; + } + + return 1; +} + +static const char kTLS13LabelExporter[] = "exporter master secret"; +static const char kTLS13LabelEarlyExporter[] = "early exporter master secret"; + +static const char kTLS13LabelClientEarlyTraffic[] = + "client early traffic secret"; +static const char kTLS13LabelClientHandshakeTraffic[] = + "client handshake traffic secret"; +static const char kTLS13LabelServerHandshakeTraffic[] = + "server handshake traffic secret"; +static const char kTLS13LabelClientApplicationTraffic[] = + "client application traffic secret"; +static const char kTLS13LabelServerApplicationTraffic[] = + "server application traffic secret"; + +static const char kTLS13Draft21LabelExporter[] = "exp master"; +static const char kTLS13Draft21LabelEarlyExporter[] = "e exp master"; + +static const char kTLS13Draft21LabelClientEarlyTraffic[] = "c e traffic"; +static const char kTLS13Draft21LabelClientHandshakeTraffic[] = "c hs traffic"; +static const char kTLS13Draft21LabelServerHandshakeTraffic[] = "s hs traffic"; +static const char kTLS13Draft21LabelClientApplicationTraffic[] = "c ap traffic"; +static const char kTLS13Draft21LabelServerApplicationTraffic[] = "s ap traffic"; + +int tls13_derive_early_secrets(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + uint16_t version = SSL_get_session(ssl)->ssl_version; + + const char *early_traffic_label = ssl_is_draft21(version) + ? kTLS13Draft21LabelClientEarlyTraffic + : kTLS13LabelClientEarlyTraffic; + const char *early_exporter_label = ssl_is_draft21(version) + ? kTLS13Draft21LabelEarlyExporter + : kTLS13LabelEarlyExporter; + return derive_secret(hs, hs->early_traffic_secret, hs->hash_len, + (const uint8_t *)early_traffic_label, + strlen(early_traffic_label)) && + ssl_log_secret(ssl, "CLIENT_EARLY_TRAFFIC_SECRET", + hs->early_traffic_secret, hs->hash_len) && + derive_secret(hs, ssl->s3->early_exporter_secret, hs->hash_len, + (const uint8_t *)early_exporter_label, + strlen(early_exporter_label)); +} + +int tls13_derive_handshake_secrets(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + const char *client_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelClientHandshakeTraffic + : kTLS13LabelClientHandshakeTraffic; + const char *server_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelServerHandshakeTraffic + : kTLS13LabelServerHandshakeTraffic; + return derive_secret(hs, hs->client_handshake_secret, hs->hash_len, + (const uint8_t *)client_label, strlen(client_label)) && + ssl_log_secret(ssl, "CLIENT_HANDSHAKE_TRAFFIC_SECRET", + hs->client_handshake_secret, hs->hash_len) && + derive_secret(hs, hs->server_handshake_secret, hs->hash_len, + (const uint8_t *)server_label, strlen(server_label)) && + ssl_log_secret(ssl, "SERVER_HANDSHAKE_TRAFFIC_SECRET", + hs->server_handshake_secret, hs->hash_len); +} + +int tls13_derive_application_secrets(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + ssl->s3->exporter_secret_len = hs->hash_len; + const char *client_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelClientApplicationTraffic + : kTLS13LabelClientApplicationTraffic; + const char *server_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelServerApplicationTraffic + : kTLS13LabelServerApplicationTraffic; + const char *exporter_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelExporter + : kTLS13LabelExporter; + return derive_secret(hs, hs->client_traffic_secret_0, hs->hash_len, + (const uint8_t *)client_label, strlen(client_label)) && + ssl_log_secret(ssl, "CLIENT_TRAFFIC_SECRET_0", + hs->client_traffic_secret_0, hs->hash_len) && + derive_secret(hs, hs->server_traffic_secret_0, hs->hash_len, + (const uint8_t *)server_label, strlen(server_label)) && + ssl_log_secret(ssl, "SERVER_TRAFFIC_SECRET_0", + hs->server_traffic_secret_0, hs->hash_len) && + derive_secret(hs, ssl->s3->exporter_secret, hs->hash_len, + (const uint8_t *)exporter_label, + strlen(exporter_label)) && + ssl_log_secret(ssl, "EXPORTER_SECRET", ssl->s3->exporter_secret, + hs->hash_len); +} + +static const char kTLS13LabelApplicationTraffic[] = + "application traffic secret"; +static const char kTLS13Draft21LabelApplicationTraffic[] = "traffic upd"; + +int tls13_rotate_traffic_key(SSL *ssl, enum evp_aead_direction_t direction) { + uint8_t *secret; + size_t secret_len; + if (direction == evp_aead_open) { + secret = ssl->s3->read_traffic_secret; + secret_len = ssl->s3->read_traffic_secret_len; + } else { + secret = ssl->s3->write_traffic_secret; + secret_len = ssl->s3->write_traffic_secret_len; + } + + const char *traffic_label = ssl_is_draft21(ssl->version) + ? kTLS13Draft21LabelApplicationTraffic + : kTLS13LabelApplicationTraffic; + + const EVP_MD *digest = ssl_session_get_digest(SSL_get_session(ssl)); + if (!hkdf_expand_label(secret, ssl->version, digest, secret, secret_len, + (const uint8_t *)traffic_label, strlen(traffic_label), + NULL, 0, secret_len)) { + return 0; + } + + return tls13_set_traffic_key(ssl, direction, secret, secret_len); +} + +static const char kTLS13LabelResumption[] = "resumption master secret"; +static const char kTLS13Draft21LabelResumption[] = "res master"; + +int tls13_derive_resumption_secret(SSL_HANDSHAKE *hs) { + if (hs->hash_len > SSL_MAX_MASTER_KEY_LENGTH) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + const char *resumption_label = ssl_is_draft21(hs->ssl->version) + ? kTLS13Draft21LabelResumption + : kTLS13LabelResumption; + hs->new_session->master_key_length = hs->hash_len; + return derive_secret( + hs, hs->new_session->master_key, hs->new_session->master_key_length, + (const uint8_t *)resumption_label, strlen(resumption_label)); +} + +static const char kTLS13LabelFinished[] = "finished"; + +// tls13_verify_data sets |out| to be the HMAC of |context| using a derived +// Finished key for both Finished messages and the PSK binder. +static int tls13_verify_data(const EVP_MD *digest, uint16_t version, + uint8_t *out, size_t *out_len, + const uint8_t *secret, size_t hash_len, + uint8_t *context, size_t context_len) { + uint8_t key[EVP_MAX_MD_SIZE]; + unsigned len; + if (!hkdf_expand_label(key, version, digest, secret, hash_len, + (const uint8_t *)kTLS13LabelFinished, + strlen(kTLS13LabelFinished), NULL, 0, hash_len) || + HMAC(digest, key, hash_len, context, context_len, out, &len) == NULL) { + return 0; + } + *out_len = len; + return 1; +} + +int tls13_finished_mac(SSL_HANDSHAKE *hs, uint8_t *out, size_t *out_len, + int is_server) { + const uint8_t *traffic_secret; + if (is_server) { + traffic_secret = hs->server_handshake_secret; + } else { + traffic_secret = hs->client_handshake_secret; + } + + uint8_t context_hash[EVP_MAX_MD_SIZE]; + size_t context_hash_len; + if (!hs->transcript.GetHash(context_hash, &context_hash_len) || + !tls13_verify_data(hs->transcript.Digest(), hs->ssl->version, out, + out_len, traffic_secret, hs->hash_len, context_hash, + context_hash_len)) { + return 0; + } + return 1; +} + +static const char kTLS13LabelResumptionPSK[] = "resumption"; + +bool tls13_derive_session_psk(SSL_SESSION *session, Span nonce) { + if (!ssl_is_draft21(session->ssl_version)) { + return true; + } + + const EVP_MD *digest = ssl_session_get_digest(session); + return hkdf_expand_label(session->master_key, session->ssl_version, digest, + session->master_key, session->master_key_length, + (const uint8_t *)kTLS13LabelResumptionPSK, + strlen(kTLS13LabelResumptionPSK), nonce.data(), + nonce.size(), session->master_key_length); +} + +static const char kTLS13LabelExportKeying[] = "exporter"; + +int tls13_export_keying_material(SSL *ssl, uint8_t *out, size_t out_len, + const char *label, size_t label_len, + const uint8_t *context_in, + size_t context_in_len, int use_context) { + const uint8_t *context = NULL; + size_t context_len = 0; + if (use_context) { + context = context_in; + context_len = context_in_len; + } + + if (!ssl_is_draft21(ssl->version)) { + const EVP_MD *digest = ssl_session_get_digest(SSL_get_session(ssl)); + return hkdf_expand_label( + out, ssl->version, digest, ssl->s3->exporter_secret, + ssl->s3->exporter_secret_len, (const uint8_t *)label, label_len, + context, context_len, out_len); + } + + const EVP_MD *digest = ssl_session_get_digest(SSL_get_session(ssl)); + + uint8_t hash[EVP_MAX_MD_SIZE]; + uint8_t export_context[EVP_MAX_MD_SIZE]; + uint8_t derived_secret[EVP_MAX_MD_SIZE]; + unsigned hash_len; + unsigned export_context_len; + unsigned derived_secret_len = EVP_MD_size(digest); + if (!EVP_Digest(context, context_len, hash, &hash_len, digest, NULL) || + !EVP_Digest(NULL, 0, export_context, &export_context_len, digest, NULL)) { + return 0; + } + return hkdf_expand_label( + derived_secret, ssl->version, digest, ssl->s3->exporter_secret, + ssl->s3->exporter_secret_len, (const uint8_t *)label, label_len, + export_context, export_context_len, derived_secret_len) && + hkdf_expand_label( + out, ssl->version, digest, derived_secret, derived_secret_len, + (const uint8_t *)kTLS13LabelExportKeying, + strlen(kTLS13LabelExportKeying), hash, hash_len, out_len); +} + +static const char kTLS13LabelPSKBinder[] = "resumption psk binder key"; +static const char kTLS13Draft21LabelPSKBinder[] = "res binder"; + +static int tls13_psk_binder(uint8_t *out, uint16_t version, + const EVP_MD *digest, uint8_t *psk, size_t psk_len, + uint8_t *context, size_t context_len, + size_t hash_len) { + uint8_t binder_context[EVP_MAX_MD_SIZE]; + unsigned binder_context_len; + if (!EVP_Digest(NULL, 0, binder_context, &binder_context_len, digest, NULL)) { + return 0; + } + + uint8_t early_secret[EVP_MAX_MD_SIZE] = {0}; + size_t early_secret_len; + if (!HKDF_extract(early_secret, &early_secret_len, digest, psk, hash_len, + NULL, 0)) { + return 0; + } + const char *binder_label = ssl_is_draft21(version) + ? kTLS13Draft21LabelPSKBinder + : kTLS13LabelPSKBinder; + + uint8_t binder_key[EVP_MAX_MD_SIZE] = {0}; + size_t len; + if (!hkdf_expand_label(binder_key, version, digest, early_secret, hash_len, + (const uint8_t *)binder_label, strlen(binder_label), + binder_context, binder_context_len, hash_len) || + !tls13_verify_data(digest, version, out, &len, binder_key, hash_len, + context, context_len)) { + return 0; + } + + return 1; +} + +int tls13_write_psk_binder(SSL_HANDSHAKE *hs, uint8_t *msg, size_t len) { + SSL *const ssl = hs->ssl; + const EVP_MD *digest = ssl_session_get_digest(ssl->session); + size_t hash_len = EVP_MD_size(digest); + + if (len < hash_len + 3) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + ScopedEVP_MD_CTX ctx; + uint8_t context[EVP_MAX_MD_SIZE]; + unsigned context_len; + + if (!EVP_DigestInit_ex(ctx.get(), digest, NULL) || + !EVP_DigestUpdate(ctx.get(), hs->transcript.buffer().data(), + hs->transcript.buffer().size()) || + !EVP_DigestUpdate(ctx.get(), msg, len - hash_len - 3) || + !EVP_DigestFinal_ex(ctx.get(), context, &context_len)) { + return 0; + } + + uint8_t verify_data[EVP_MAX_MD_SIZE] = {0}; + if (!tls13_psk_binder(verify_data, ssl->session->ssl_version, digest, + ssl->session->master_key, + ssl->session->master_key_length, context, context_len, + hash_len)) { + return 0; + } + + OPENSSL_memcpy(msg + len - hash_len, verify_data, hash_len); + return 1; +} + +int tls13_verify_psk_binder(SSL_HANDSHAKE *hs, SSL_SESSION *session, + const SSLMessage &msg, CBS *binders) { + size_t hash_len = hs->transcript.DigestLen(); + + // The message must be large enough to exclude the binders. + if (CBS_len(&msg.raw) < CBS_len(binders) + 2) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + // Hash a ClientHello prefix up to the binders. This includes the header. For + // now, this assumes we only ever verify PSK binders on initial + // ClientHellos. + uint8_t context[EVP_MAX_MD_SIZE]; + unsigned context_len; + if (!EVP_Digest(CBS_data(&msg.raw), CBS_len(&msg.raw) - CBS_len(binders) - 2, + context, &context_len, hs->transcript.Digest(), NULL)) { + return 0; + } + + uint8_t verify_data[EVP_MAX_MD_SIZE] = {0}; + CBS binder; + if (!tls13_psk_binder(verify_data, hs->ssl->version, hs->transcript.Digest(), + session->master_key, session->master_key_length, + context, context_len, hash_len) || + // We only consider the first PSK, so compare against the first binder. + !CBS_get_u8_length_prefixed(binders, &binder)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + + int binder_ok = + CBS_len(&binder) == hash_len && + CRYPTO_memcmp(CBS_data(&binder), verify_data, hash_len) == 0; +#if defined(BORINGSSL_UNSAFE_FUZZER_MODE) + binder_ok = 1; +#endif + if (!binder_ok) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DIGEST_CHECK_FAILED); + return 0; + } + + return 1; +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/tls13_server.c b/Sources/BoringSSL/ssl/tls13_server.c deleted file mode 100644 index 402c23431..000000000 --- a/Sources/BoringSSL/ssl/tls13_server.c +++ /dev/null @@ -1,680 +0,0 @@ -/* Copyright (c) 2016, Google Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../crypto/internal.h" -#include "internal.h" - - -/* kMaxEarlyDataAccepted is the advertised number of plaintext bytes of early - * data that will be accepted. This value should be slightly below - * kMaxEarlyDataSkipped in tls_record.c, which is measured in ciphertext. */ -static const size_t kMaxEarlyDataAccepted = 14336; - -enum server_hs_state_t { - state_select_parameters = 0, - state_send_hello_retry_request, - state_process_second_client_hello, - state_send_server_hello, - state_send_server_certificate_verify, - state_complete_server_certificate_verify, - state_send_server_finished, - state_process_client_certificate, - state_process_client_certificate_verify, - state_process_channel_id, - state_process_client_finished, - state_send_new_session_ticket, - state_done, -}; - -static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; - -static int resolve_ecdhe_secret(SSL_HANDSHAKE *hs, int *out_need_retry, - SSL_CLIENT_HELLO *client_hello) { - SSL *const ssl = hs->ssl; - *out_need_retry = 0; - - /* We only support connections that include an ECDHE key exchange. */ - CBS key_share; - if (!ssl_client_hello_get_extension(client_hello, &key_share, - TLSEXT_TYPE_key_share)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); - return 0; - } - - int found_key_share; - uint8_t *dhe_secret; - size_t dhe_secret_len; - uint8_t alert = SSL_AD_DECODE_ERROR; - if (!ssl_ext_key_share_parse_clienthello(hs, &found_key_share, &dhe_secret, - &dhe_secret_len, &alert, - &key_share)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return 0; - } - - if (!found_key_share) { - *out_need_retry = 1; - return 0; - } - - int ok = tls13_advance_key_schedule(hs, dhe_secret, dhe_secret_len); - OPENSSL_free(dhe_secret); - return ok; -} - -static const SSL_CIPHER *choose_tls13_cipher( - const SSL *ssl, const SSL_CLIENT_HELLO *client_hello) { - if (client_hello->cipher_suites_len % 2 != 0) { - return NULL; - } - - CBS cipher_suites; - CBS_init(&cipher_suites, client_hello->cipher_suites, - client_hello->cipher_suites_len); - - const int aes_is_fine = EVP_has_aes_hardware(); - const uint16_t version = ssl3_protocol_version(ssl); - - const SSL_CIPHER *best = NULL; - while (CBS_len(&cipher_suites) > 0) { - uint16_t cipher_suite; - if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { - return NULL; - } - - /* Limit to TLS 1.3 ciphers we know about. */ - const SSL_CIPHER *candidate = SSL_get_cipher_by_value(cipher_suite); - if (candidate == NULL || - SSL_CIPHER_get_min_version(candidate) > version || - SSL_CIPHER_get_max_version(candidate) < version) { - continue; - } - - /* TLS 1.3 removes legacy ciphers, so honor the client order, but prefer - * ChaCha20 if we do not have AES hardware. */ - if (aes_is_fine) { - return candidate; - } - - if (candidate->algorithm_enc == SSL_CHACHA20POLY1305) { - return candidate; - } - - if (best == NULL) { - best = candidate; - } - } - - return best; -} - -static enum ssl_hs_wait_t do_select_parameters(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - /* The short record header extension is incompatible with early data. */ - if (ssl->s3->skip_early_data && ssl->s3->short_header) { - OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_EXTENSION); - return ssl_hs_error; - } - - SSL_CLIENT_HELLO client_hello; - if (!ssl_client_hello_init(ssl, &client_hello, ssl->init_msg, - ssl->init_num)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - /* Negotiate the cipher suite. */ - hs->new_cipher = choose_tls13_cipher(ssl, &client_hello); - if (hs->new_cipher == NULL) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); - return ssl_hs_error; - } - - /* The PRF hash is now known. Set up the key schedule and hash the - * ClientHello. */ - if (!tls13_init_key_schedule(hs) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - - /* Decode the ticket if we agree on a PSK key exchange mode. */ - uint8_t alert = SSL_AD_DECODE_ERROR; - SSL_SESSION *session = NULL; - CBS pre_shared_key, binders; - if (hs->accept_psk_mode && - ssl_client_hello_get_extension(&client_hello, &pre_shared_key, - TLSEXT_TYPE_pre_shared_key)) { - /* Verify that the pre_shared_key extension is the last extension in - * ClientHello. */ - if (CBS_data(&pre_shared_key) + CBS_len(&pre_shared_key) != - client_hello.extensions + client_hello.extensions_len) { - OPENSSL_PUT_ERROR(SSL, SSL_R_PRE_SHARED_KEY_MUST_BE_LAST); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - return ssl_hs_error; - } - - if (!ssl_ext_pre_shared_key_parse_clienthello(hs, &session, &binders, - &alert, &pre_shared_key)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - } - - if (session != NULL && - !ssl_session_is_resumable(hs, session)) { - SSL_SESSION_free(session); - session = NULL; - } - - /* Set up the new session, either using the original one as a template or - * creating a fresh one. */ - if (session == NULL) { - if (!ssl_get_new_session(hs, 1 /* server */)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - - hs->new_session->cipher = hs->new_cipher; - - /* On new sessions, stash the SNI value in the session. */ - if (hs->hostname != NULL) { - OPENSSL_free(hs->new_session->tlsext_hostname); - hs->new_session->tlsext_hostname = BUF_strdup(hs->hostname); - if (hs->new_session->tlsext_hostname == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - } - } else { - /* Check the PSK binder. */ - if (!tls13_verify_psk_binder(hs, session, &binders)) { - SSL_SESSION_free(session); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECRYPT_ERROR); - return ssl_hs_error; - } - - /* Only authentication information carries over in TLS 1.3. */ - hs->new_session = SSL_SESSION_dup(session, SSL_SESSION_DUP_AUTH_ONLY); - if (hs->new_session == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - ssl->s3->session_reused = 1; - SSL_SESSION_free(session); - - /* Resumption incorporates fresh key material, so refresh the timeout. */ - ssl_session_renew_timeout(ssl, hs->new_session, - ssl->initial_ctx->session_psk_dhe_timeout); - } - - if (ssl->ctx->dos_protection_cb != NULL && - ssl->ctx->dos_protection_cb(&client_hello) == 0) { - /* Connection rejected for DOS reasons. */ - OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - - /* HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was - * deferred. Complete it now. */ - alert = SSL_AD_DECODE_ERROR; - if (!ssl_negotiate_alpn(hs, &alert, &client_hello)) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, alert); - return ssl_hs_error; - } - - /* Store the initial negotiated ALPN in the session. */ - if (ssl->s3->alpn_selected != NULL) { - hs->new_session->early_alpn = - BUF_memdup(ssl->s3->alpn_selected, ssl->s3->alpn_selected_len); - if (hs->new_session->early_alpn == NULL) { - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); - return ssl_hs_error; - } - hs->new_session->early_alpn_len = ssl->s3->alpn_selected_len; - } - - /* Incorporate the PSK into the running secret. */ - if (ssl->s3->session_reused) { - if (!tls13_advance_key_schedule(hs, hs->new_session->master_key, - hs->new_session->master_key_length)) { - return ssl_hs_error; - } - } else if (!tls13_advance_key_schedule(hs, kZeroes, hs->hash_len)) { - return ssl_hs_error; - } - - ssl->method->received_flight(ssl); - - /* Resolve ECDHE and incorporate it into the secret. */ - int need_retry; - if (!resolve_ecdhe_secret(hs, &need_retry, &client_hello)) { - if (need_retry) { - hs->tls13_state = state_send_hello_retry_request; - return ssl_hs_ok; - } - return ssl_hs_error; - } - - hs->tls13_state = state_send_server_hello; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_hello_retry_request(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - CBB cbb, body, extensions; - uint16_t group_id; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_HELLO_RETRY_REQUEST) || - !CBB_add_u16(&body, ssl->version) || - !tls1_get_shared_group(hs, &group_id) || - !CBB_add_u16_length_prefixed(&body, &extensions) || - !CBB_add_u16(&extensions, TLSEXT_TYPE_key_share) || - !CBB_add_u16(&extensions, 2 /* length */) || - !CBB_add_u16(&extensions, group_id) || - !ssl_add_message_cbb(ssl, &cbb)) { - CBB_cleanup(&cbb); - return ssl_hs_error; - } - - hs->tls13_state = state_process_second_client_hello; - return ssl_hs_flush_and_read_message; -} - -static enum ssl_hs_wait_t do_process_second_client_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_CLIENT_HELLO)) { - return ssl_hs_error; - } - - SSL_CLIENT_HELLO client_hello; - if (!ssl_client_hello_init(ssl, &client_hello, ssl->init_msg, - ssl->init_num)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); - return ssl_hs_error; - } - - int need_retry; - if (!resolve_ecdhe_secret(hs, &need_retry, &client_hello)) { - if (need_retry) { - /* Only send one HelloRetryRequest. */ - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); - } - return ssl_hs_error; - } - - if (!ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - ssl->method->received_flight(ssl); - hs->tls13_state = state_send_server_hello; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_server_hello(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - - /* Send a ServerHello. */ - CBB cbb, body, extensions; - if (!ssl->method->init_message(ssl, &cbb, &body, SSL3_MT_SERVER_HELLO) || - !CBB_add_u16(&body, ssl->version) || - !RAND_bytes(ssl->s3->server_random, sizeof(ssl->s3->server_random)) || - !CBB_add_bytes(&body, ssl->s3->server_random, SSL3_RANDOM_SIZE) || - !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher)) || - !CBB_add_u16_length_prefixed(&body, &extensions) || - !ssl_ext_pre_shared_key_add_serverhello(hs, &extensions) || - !ssl_ext_key_share_add_serverhello(hs, &extensions)) { - goto err; - } - - if (ssl->s3->short_header) { - if (!CBB_add_u16(&extensions, TLSEXT_TYPE_short_header) || - !CBB_add_u16(&extensions, 0 /* empty extension */)) { - goto err; - } - } - - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - /* Derive and enable the handshake traffic secrets. */ - if (!tls13_derive_handshake_secrets(hs) || - !tls13_set_traffic_key(ssl, evp_aead_open, hs->client_handshake_secret, - hs->hash_len) || - !tls13_set_traffic_key(ssl, evp_aead_seal, hs->server_handshake_secret, - hs->hash_len)) { - goto err; - } - - /* Send EncryptedExtensions. */ - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_ENCRYPTED_EXTENSIONS) || - !ssl_add_serverhello_tlsext(hs, &body) || - !ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - - /* Determine whether to request a client certificate. */ - hs->cert_request = !!(ssl->verify_mode & SSL_VERIFY_PEER); - /* CertificateRequest may only be sent in non-resumption handshakes. */ - if (ssl->s3->session_reused) { - hs->cert_request = 0; - } - - /* Send a CertificateRequest, if necessary. */ - if (hs->cert_request) { - CBB sigalgs_cbb; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_CERTIFICATE_REQUEST) || - !CBB_add_u8(&body, 0 /* no certificate_request_context. */)) { - goto err; - } - - const uint16_t *sigalgs; - size_t num_sigalgs = tls12_get_verify_sigalgs(ssl, &sigalgs); - if (!CBB_add_u16_length_prefixed(&body, &sigalgs_cbb)) { - goto err; - } - - for (size_t i = 0; i < num_sigalgs; i++) { - if (!CBB_add_u16(&sigalgs_cbb, sigalgs[i])) { - goto err; - } - } - - if (!ssl_add_client_CA_list(ssl, &body) || - !CBB_add_u16(&body, 0 /* empty certificate_extensions. */) || - !ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - } - - /* Send the server Certificate message, if necessary. */ - if (!ssl->s3->session_reused) { - if (!ssl_has_certificate(ssl)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_SET); - goto err; - } - - if (!tls13_add_certificate(hs)) { - goto err; - } - - hs->tls13_state = state_send_server_certificate_verify; - return ssl_hs_ok; - } - - hs->tls13_state = state_send_server_finished; - return ssl_hs_ok; - -err: - CBB_cleanup(&cbb); - return ssl_hs_error; -} - -static enum ssl_hs_wait_t do_send_server_certificate_verify(SSL_HANDSHAKE *hs, - int is_first_run) { - switch (tls13_add_certificate_verify(hs, is_first_run)) { - case ssl_private_key_success: - hs->tls13_state = state_send_server_finished; - return ssl_hs_ok; - - case ssl_private_key_retry: - hs->tls13_state = state_complete_server_certificate_verify; - return ssl_hs_private_key_operation; - - case ssl_private_key_failure: - return ssl_hs_error; - } - - assert(0); - return ssl_hs_error; -} - -static enum ssl_hs_wait_t do_send_server_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!tls13_add_finished(hs) || - /* Update the secret to the master secret and derive traffic keys. */ - !tls13_advance_key_schedule(hs, kZeroes, hs->hash_len) || - !tls13_derive_application_secrets(hs) || - !tls13_set_traffic_key(ssl, evp_aead_seal, hs->server_traffic_secret_0, - hs->hash_len)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_client_certificate; - return ssl_hs_flush_and_read_message; -} - -static enum ssl_hs_wait_t do_process_client_certificate(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!hs->cert_request) { - /* OpenSSL returns X509_V_OK when no certificates are requested. This is - * classed by them as a bug, but it's assumed by at least NGINX. */ - hs->new_session->verify_result = X509_V_OK; - - /* Skip this state. */ - hs->tls13_state = state_process_channel_id; - return ssl_hs_ok; - } - - const int allow_anonymous = - (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) == 0; - - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE) || - !tls13_process_certificate(hs, allow_anonymous) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_client_certificate_verify; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_client_certificate_verify( - SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { - /* Skip this state. */ - hs->tls13_state = state_process_channel_id; - return ssl_hs_ok; - } - - if (!ssl_check_message_type(ssl, SSL3_MT_CERTIFICATE_VERIFY) || - !tls13_process_certificate_verify(hs) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_channel_id; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_channel_id(SSL_HANDSHAKE *hs) { - if (!hs->ssl->s3->tlsext_channel_id_valid) { - hs->tls13_state = state_process_client_finished; - return ssl_hs_ok; - } - - if (!ssl_check_message_type(hs->ssl, SSL3_MT_CHANNEL_ID) || - !tls1_verify_channel_id(hs) || - !ssl_hash_current_message(hs)) { - return ssl_hs_error; - } - - hs->tls13_state = state_process_client_finished; - return ssl_hs_read_message; -} - -static enum ssl_hs_wait_t do_process_client_finished(SSL_HANDSHAKE *hs) { - SSL *const ssl = hs->ssl; - if (!ssl_check_message_type(ssl, SSL3_MT_FINISHED) || - !tls13_process_finished(hs) || - !ssl_hash_current_message(hs) || - /* evp_aead_seal keys have already been switched. */ - !tls13_set_traffic_key(ssl, evp_aead_open, hs->client_traffic_secret_0, - hs->hash_len) || - !tls13_derive_resumption_secret(hs)) { - return ssl_hs_error; - } - - ssl->method->received_flight(ssl); - - /* Rebase the session timestamp so that it is measured from ticket - * issuance. */ - ssl_session_rebase_time(ssl, hs->new_session); - hs->tls13_state = state_send_new_session_ticket; - return ssl_hs_ok; -} - -static enum ssl_hs_wait_t do_send_new_session_ticket(SSL_HANDSHAKE *hs) { - /* TLS 1.3 recommends single-use tickets, so issue multiple tickets in case the - * client makes several connections before getting a renewal. */ - static const int kNumTickets = 2; - - SSL *const ssl = hs->ssl; - /* If the client doesn't accept resumption with PSK_DHE_KE, don't send a - * session ticket. */ - if (!hs->accept_psk_mode) { - hs->tls13_state = state_done; - return ssl_hs_ok; - } - - SSL_SESSION *session = hs->new_session; - CBB cbb; - CBB_zero(&cbb); - - for (int i = 0; i < kNumTickets; i++) { - if (!RAND_bytes((uint8_t *)&session->ticket_age_add, 4)) { - goto err; - } - - CBB body, ticket, extensions; - if (!ssl->method->init_message(ssl, &cbb, &body, - SSL3_MT_NEW_SESSION_TICKET) || - !CBB_add_u32(&body, session->timeout) || - !CBB_add_u32(&body, session->ticket_age_add) || - !CBB_add_u16_length_prefixed(&body, &ticket) || - !ssl_encrypt_ticket(ssl, &ticket, session) || - !CBB_add_u16_length_prefixed(&body, &extensions)) { - goto err; - } - - if (ssl->ctx->enable_early_data) { - session->ticket_max_early_data = kMaxEarlyDataAccepted; - - CBB early_data_info; - if (!CBB_add_u16(&extensions, TLSEXT_TYPE_ticket_early_data_info) || - !CBB_add_u16_length_prefixed(&extensions, &early_data_info) || - !CBB_add_u32(&early_data_info, session->ticket_max_early_data) || - !CBB_flush(&extensions)) { - goto err; - } - } - - /* Add a fake extension. See draft-davidben-tls-grease-01. */ - if (!CBB_add_u16(&extensions, - ssl_get_grease_value(ssl, ssl_grease_ticket_extension)) || - !CBB_add_u16(&extensions, 0 /* empty */)) { - goto err; - } - - if (!ssl_add_message_cbb(ssl, &cbb)) { - goto err; - } - } - - hs->session_tickets_sent++; - hs->tls13_state = state_done; - return ssl_hs_flush; - -err: - CBB_cleanup(&cbb); - return ssl_hs_error; -} - -enum ssl_hs_wait_t tls13_server_handshake(SSL_HANDSHAKE *hs) { - while (hs->tls13_state != state_done) { - enum ssl_hs_wait_t ret = ssl_hs_error; - enum server_hs_state_t state = hs->tls13_state; - switch (state) { - case state_select_parameters: - ret = do_select_parameters(hs); - break; - case state_send_hello_retry_request: - ret = do_send_hello_retry_request(hs); - break; - case state_process_second_client_hello: - ret = do_process_second_client_hello(hs); - break; - case state_send_server_hello: - ret = do_send_server_hello(hs); - break; - case state_send_server_certificate_verify: - ret = do_send_server_certificate_verify(hs, 1 /* first run */); - break; - case state_complete_server_certificate_verify: - ret = do_send_server_certificate_verify(hs, 0 /* complete */); - break; - case state_send_server_finished: - ret = do_send_server_finished(hs); - break; - case state_process_client_certificate: - ret = do_process_client_certificate(hs); - break; - case state_process_client_certificate_verify: - ret = do_process_client_certificate_verify(hs); - break; - case state_process_channel_id: - ret = do_process_channel_id(hs); - break; - case state_process_client_finished: - ret = do_process_client_finished(hs); - break; - case state_send_new_session_ticket: - ret = do_send_new_session_ticket(hs); - break; - case state_done: - ret = ssl_hs_ok; - break; - } - - if (ret != ssl_hs_ok) { - return ret; - } - } - - return ssl_hs_ok; -} diff --git a/Sources/BoringSSL/ssl/tls13_server.cc b/Sources/BoringSSL/ssl/tls13_server.cc new file mode 100644 index 000000000..1040ace03 --- /dev/null +++ b/Sources/BoringSSL/ssl/tls13_server.cc @@ -0,0 +1,1068 @@ +/* Copyright (c) 2016, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +// Per C99, various stdint.h macros are unavailable in C++ unless some macros +// are defined. C++11 overruled this decision, but older Android NDKs still +// require it. +#if !defined(__STDC_LIMIT_MACROS) +#define __STDC_LIMIT_MACROS +#endif + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../crypto/internal.h" +#include "internal.h" + + +namespace bssl { + +enum server_hs_state_t { + state_select_parameters = 0, + state_select_session, + state_send_hello_retry_request, + state_read_second_client_hello, + state_send_server_hello, + state_send_server_certificate_verify, + state_send_server_finished, + state_read_second_client_flight, + state_process_end_of_early_data, + state_read_client_certificate, + state_read_client_certificate_verify, + state_read_channel_id, + state_read_client_finished, + state_send_new_session_ticket, + state_done, +}; + +static const uint8_t kZeroes[EVP_MAX_MD_SIZE] = {0}; + +static int resolve_ecdhe_secret(SSL_HANDSHAKE *hs, bool *out_need_retry, + SSL_CLIENT_HELLO *client_hello) { + SSL *const ssl = hs->ssl; + *out_need_retry = false; + + // We only support connections that include an ECDHE key exchange. + CBS key_share; + if (!ssl_client_hello_get_extension(client_hello, &key_share, + TLSEXT_TYPE_key_share)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_MISSING_KEY_SHARE); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_MISSING_EXTENSION); + return 0; + } + + bool found_key_share; + Array dhe_secret; + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_ext_key_share_parse_clienthello(hs, &found_key_share, &dhe_secret, + &alert, &key_share)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return 0; + } + + if (!found_key_share) { + *out_need_retry = true; + return 0; + } + + return tls13_advance_key_schedule(hs, dhe_secret.data(), dhe_secret.size()); +} + +static int ssl_ext_supported_versions_add_serverhello(SSL_HANDSHAKE *hs, + CBB *out) { + CBB contents; + if (!CBB_add_u16(out, TLSEXT_TYPE_supported_versions) || + !CBB_add_u16_length_prefixed(out, &contents) || + !CBB_add_u16(&contents, hs->ssl->version) || + !CBB_flush(out)) { + return 0; + } + + return 1; +} + +static const SSL_CIPHER *choose_tls13_cipher( + const SSL *ssl, const SSL_CLIENT_HELLO *client_hello) { + if (client_hello->cipher_suites_len % 2 != 0) { + return NULL; + } + + CBS cipher_suites; + CBS_init(&cipher_suites, client_hello->cipher_suites, + client_hello->cipher_suites_len); + + const int aes_is_fine = EVP_has_aes_hardware(); + const uint16_t version = ssl_protocol_version(ssl); + + const SSL_CIPHER *best = NULL; + while (CBS_len(&cipher_suites) > 0) { + uint16_t cipher_suite; + if (!CBS_get_u16(&cipher_suites, &cipher_suite)) { + return NULL; + } + + // Limit to TLS 1.3 ciphers we know about. + const SSL_CIPHER *candidate = SSL_get_cipher_by_value(cipher_suite); + if (candidate == NULL || + SSL_CIPHER_get_min_version(candidate) > version || + SSL_CIPHER_get_max_version(candidate) < version) { + continue; + } + + // TLS 1.3 removes legacy ciphers, so honor the client order, but prefer + // ChaCha20 if we do not have AES hardware. + if (aes_is_fine) { + return candidate; + } + + if (candidate->algorithm_enc == SSL_CHACHA20POLY1305) { + return candidate; + } + + if (best == NULL) { + best = candidate; + } + } + + return best; +} + +static int add_new_session_tickets(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + // TLS 1.3 recommends single-use tickets, so issue multiple tickets in case + // the client makes several connections before getting a renewal. + static const int kNumTickets = 2; + + // Rebase the session timestamp so that it is measured from ticket + // issuance. + ssl_session_rebase_time(ssl, hs->new_session.get()); + + for (int i = 0; i < kNumTickets; i++) { + UniquePtr session( + SSL_SESSION_dup(hs->new_session.get(), SSL_SESSION_INCLUDE_NONAUTH)); + if (!session) { + return 0; + } + + if (!RAND_bytes((uint8_t *)&session->ticket_age_add, 4)) { + return 0; + } + session->ticket_age_add_valid = 1; + if (ssl->cert->enable_early_data) { + session->ticket_max_early_data = kMaxEarlyDataAccepted; + } + + static_assert(kNumTickets < 256, "Too many tickets"); + uint8_t nonce[] = {static_cast(i)}; + + ScopedCBB cbb; + CBB body, nonce_cbb, ticket, extensions; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_NEW_SESSION_TICKET) || + !CBB_add_u32(&body, session->timeout) || + !CBB_add_u32(&body, session->ticket_age_add) || + (ssl_is_draft21(ssl->version) && + (!CBB_add_u8_length_prefixed(&body, &nonce_cbb) || + !CBB_add_bytes(&nonce_cbb, nonce, sizeof(nonce)))) || + !CBB_add_u16_length_prefixed(&body, &ticket) || + !tls13_derive_session_psk(session.get(), nonce) || + !ssl_encrypt_ticket(ssl, &ticket, session.get()) || + !CBB_add_u16_length_prefixed(&body, &extensions)) { + return 0; + } + + if (ssl->cert->enable_early_data) { + CBB early_data_info; + if (!CBB_add_u16(&extensions, ssl_is_draft21(ssl->version) + ? TLSEXT_TYPE_early_data + : TLSEXT_TYPE_ticket_early_data_info) || + !CBB_add_u16_length_prefixed(&extensions, &early_data_info) || + !CBB_add_u32(&early_data_info, session->ticket_max_early_data) || + !CBB_flush(&extensions)) { + return 0; + } + } + + // Add a fake extension. See draft-davidben-tls-grease-01. + if (!CBB_add_u16(&extensions, + ssl_get_grease_value(ssl, ssl_grease_ticket_extension)) || + !CBB_add_u16(&extensions, 0 /* empty */)) { + return 0; + } + + if (!ssl_add_message_cbb(ssl, cbb.get())) { + return 0; + } + } + + return 1; +} + +static enum ssl_hs_wait_t do_select_parameters(SSL_HANDSHAKE *hs) { + // At this point, most ClientHello extensions have already been processed by + // the common handshake logic. Resolve the remaining non-PSK parameters. + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + OPENSSL_memcpy(hs->session_id, client_hello.session_id, + client_hello.session_id_len); + hs->session_id_len = client_hello.session_id_len; + + // Negotiate the cipher suite. + hs->new_cipher = choose_tls13_cipher(ssl, &client_hello); + if (hs->new_cipher == NULL) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_SHARED_CIPHER); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE); + return ssl_hs_error; + } + + // HTTP/2 negotiation depends on the cipher suite, so ALPN negotiation was + // deferred. Complete it now. + uint8_t alert = SSL_AD_DECODE_ERROR; + if (!ssl_negotiate_alpn(hs, &alert, &client_hello)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + } + + // The PRF hash is now known. Set up the key schedule and hash the + // ClientHello. + if (!hs->transcript.InitHash(ssl_protocol_version(ssl), hs->new_cipher)) { + return ssl_hs_error; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + hs->tls13_state = state_select_session; + return ssl_hs_ok; +} + +static enum ssl_ticket_aead_result_t select_session( + SSL_HANDSHAKE *hs, uint8_t *out_alert, UniquePtr *out_session, + int32_t *out_ticket_age_skew, const SSLMessage &msg, + const SSL_CLIENT_HELLO *client_hello) { + SSL *const ssl = hs->ssl; + *out_session = NULL; + + // Decode the ticket if we agreed on a PSK key exchange mode. + CBS pre_shared_key; + if (!hs->accept_psk_mode || + !ssl_client_hello_get_extension(client_hello, &pre_shared_key, + TLSEXT_TYPE_pre_shared_key)) { + return ssl_ticket_aead_ignore_ticket; + } + + // Verify that the pre_shared_key extension is the last extension in + // ClientHello. + if (CBS_data(&pre_shared_key) + CBS_len(&pre_shared_key) != + client_hello->extensions + client_hello->extensions_len) { + OPENSSL_PUT_ERROR(SSL, SSL_R_PRE_SHARED_KEY_MUST_BE_LAST); + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + return ssl_ticket_aead_error; + } + + CBS ticket, binders; + uint32_t client_ticket_age; + if (!ssl_ext_pre_shared_key_parse_clienthello(hs, &ticket, &binders, + &client_ticket_age, out_alert, + &pre_shared_key)) { + return ssl_ticket_aead_error; + } + + // TLS 1.3 session tickets are renewed separately as part of the + // NewSessionTicket. + bool unused_renew; + UniquePtr session; + enum ssl_ticket_aead_result_t ret = + ssl_process_ticket(ssl, &session, &unused_renew, CBS_data(&ticket), + CBS_len(&ticket), NULL, 0); + switch (ret) { + case ssl_ticket_aead_success: + break; + case ssl_ticket_aead_error: + *out_alert = SSL_AD_INTERNAL_ERROR; + return ret; + default: + return ret; + } + + if (!ssl_session_is_resumable(hs, session.get()) || + // Historically, some TLS 1.3 tickets were missing ticket_age_add. + !session->ticket_age_add_valid) { + return ssl_ticket_aead_ignore_ticket; + } + + // Recover the client ticket age and convert to seconds. + client_ticket_age -= session->ticket_age_add; + client_ticket_age /= 1000; + + struct OPENSSL_timeval now; + ssl_get_current_time(ssl, &now); + + // Compute the server ticket age in seconds. + assert(now.tv_sec >= session->time); + uint64_t server_ticket_age = now.tv_sec - session->time; + + // To avoid overflowing |hs->ticket_age_skew|, we will not resume + // 68-year-old sessions. + if (server_ticket_age > INT32_MAX) { + return ssl_ticket_aead_ignore_ticket; + } + + // TODO(davidben,svaldez): Measure this value to decide on tolerance. For + // now, accept all values. https://crbug.com/boringssl/113. + *out_ticket_age_skew = + (int32_t)client_ticket_age - (int32_t)server_ticket_age; + + // Check the PSK binder. + if (!tls13_verify_psk_binder(hs, session.get(), msg, &binders)) { + *out_alert = SSL_AD_DECRYPT_ERROR; + return ssl_ticket_aead_error; + } + + *out_session = std::move(session); + return ssl_ticket_aead_success; +} + +static enum ssl_hs_wait_t do_select_session(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + uint8_t alert = SSL_AD_DECODE_ERROR; + UniquePtr session; + switch (select_session(hs, &alert, &session, &ssl->s3->ticket_age_skew, msg, + &client_hello)) { + case ssl_ticket_aead_ignore_ticket: + assert(!session); + if (!ssl_get_new_session(hs, 1 /* server */)) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + break; + + case ssl_ticket_aead_success: + // Carry over authentication information from the previous handshake into + // a fresh session. + hs->new_session = + SSL_SESSION_dup(session.get(), SSL_SESSION_DUP_AUTH_ONLY); + + if (ssl->cert->enable_early_data && + // Early data must be acceptable for this ticket. + session->ticket_max_early_data != 0 && + // The client must have offered early data. + hs->early_data_offered && + // Channel ID is incompatible with 0-RTT. + !ssl->s3->tlsext_channel_id_valid && + // Custom extensions is incompatible with 0-RTT. + hs->custom_extensions.received == 0 && + // The negotiated ALPN must match the one in the ticket. + ssl->s3->alpn_selected == + MakeConstSpan(session->early_alpn, session->early_alpn_len)) { + ssl->early_data_accepted = true; + } + + if (hs->new_session == NULL) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + ssl->s3->session_reused = true; + + // Resumption incorporates fresh key material, so refresh the timeout. + ssl_session_renew_timeout(ssl, hs->new_session.get(), + ssl->session_ctx->session_psk_dhe_timeout); + break; + + case ssl_ticket_aead_error: + ssl_send_alert(ssl, SSL3_AL_FATAL, alert); + return ssl_hs_error; + + case ssl_ticket_aead_retry: + hs->tls13_state = state_select_session; + return ssl_hs_pending_ticket; + } + + // Record connection properties in the new session. + hs->new_session->cipher = hs->new_cipher; + + // Store the initial negotiated ALPN in the session. + if (!ssl->s3->alpn_selected.empty()) { + hs->new_session->early_alpn = (uint8_t *)BUF_memdup( + ssl->s3->alpn_selected.data(), ssl->s3->alpn_selected.size()); + if (hs->new_session->early_alpn == NULL) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + hs->new_session->early_alpn_len = ssl->s3->alpn_selected.size(); + } + + if (ssl->ctx->dos_protection_cb != NULL && + ssl->ctx->dos_protection_cb(&client_hello) == 0) { + // Connection rejected for DOS reasons. + OPENSSL_PUT_ERROR(SSL, SSL_R_CONNECTION_REJECTED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR); + return ssl_hs_error; + } + + size_t hash_len = EVP_MD_size( + ssl_get_handshake_digest(ssl_protocol_version(ssl), hs->new_cipher)); + + // Set up the key schedule and incorporate the PSK into the running secret. + if (ssl->s3->session_reused) { + if (!tls13_init_key_schedule(hs, hs->new_session->master_key, + hs->new_session->master_key_length)) { + return ssl_hs_error; + } + } else if (!tls13_init_key_schedule(hs, kZeroes, hash_len)) { + return ssl_hs_error; + } + + if (ssl->early_data_accepted) { + if (!tls13_derive_early_secrets(hs)) { + return ssl_hs_error; + } + } else if (hs->early_data_offered) { + ssl->s3->skip_early_data = true; + } + + // Resolve ECDHE and incorporate it into the secret. + bool need_retry; + if (!resolve_ecdhe_secret(hs, &need_retry, &client_hello)) { + if (need_retry) { + ssl->early_data_accepted = false; + ssl->s3->skip_early_data = true; + ssl->method->next_message(ssl); + if (ssl_is_draft21(ssl->version) && + !hs->transcript.UpdateForHelloRetryRequest()) { + return ssl_hs_error; + } + hs->tls13_state = state_send_hello_retry_request; + return ssl_hs_ok; + } + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_send_server_hello; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_hello_retry_request(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + + if (ssl_is_draft22(ssl->version)) { + ScopedCBB cbb; + CBB body, session_id, extensions; + uint16_t group_id; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_SERVER_HELLO) || + !CBB_add_u16(&body, TLS1_2_VERSION) || + !CBB_add_bytes(&body, kHelloRetryRequest, SSL3_RANDOM_SIZE) || + !CBB_add_u8_length_prefixed(&body, &session_id) || + !CBB_add_bytes(&session_id, hs->session_id, hs->session_id_len) || + !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher)) || + !CBB_add_u8(&body, 0 /* no compression */) || + !tls1_get_shared_group(hs, &group_id) || + !CBB_add_u16_length_prefixed(&body, &extensions) || + !CBB_add_u16(&extensions, TLSEXT_TYPE_supported_versions) || + !CBB_add_u16(&extensions, 2 /* length */) || + !CBB_add_u16(&extensions, ssl->version) || + !CBB_add_u16(&extensions, TLSEXT_TYPE_key_share) || + !CBB_add_u16(&extensions, 2 /* length */) || + !CBB_add_u16(&extensions, group_id) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + if (!ssl->method->add_change_cipher_spec(ssl)) { + return ssl_hs_error; + } + } else { + ScopedCBB cbb; + CBB body, extensions; + uint16_t group_id; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_HELLO_RETRY_REQUEST) || + !CBB_add_u16(&body, ssl->version) || + (ssl_is_draft21(ssl->version) && + !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher))) || + !tls1_get_shared_group(hs, &group_id) || + !CBB_add_u16_length_prefixed(&body, &extensions) || + !CBB_add_u16(&extensions, TLSEXT_TYPE_key_share) || + !CBB_add_u16(&extensions, 2 /* length */) || + !CBB_add_u16(&extensions, group_id) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } + + hs->sent_hello_retry_request = true; + hs->tls13_state = state_read_second_client_hello; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_read_second_client_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CLIENT_HELLO)) { + return ssl_hs_error; + } + SSL_CLIENT_HELLO client_hello; + if (!ssl_client_hello_init(ssl, &client_hello, msg)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_CLIENTHELLO_PARSE_FAILED); + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + return ssl_hs_error; + } + + bool need_retry; + if (!resolve_ecdhe_secret(hs, &need_retry, &client_hello)) { + if (need_retry) { + // Only send one HelloRetryRequest. + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_ILLEGAL_PARAMETER); + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_CURVE); + } + return ssl_hs_error; + } + + if (!ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_send_server_hello; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_hello(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + + uint16_t version = ssl->version; + if (ssl_is_resumption_experiment(ssl->version)) { + version = TLS1_2_VERSION; + } + + // Send a ServerHello. + ScopedCBB cbb; + CBB body, extensions, session_id; + if (!ssl->method->init_message(ssl, cbb.get(), &body, SSL3_MT_SERVER_HELLO) || + !CBB_add_u16(&body, version) || + !RAND_bytes(ssl->s3->server_random, sizeof(ssl->s3->server_random)) || + !CBB_add_bytes(&body, ssl->s3->server_random, SSL3_RANDOM_SIZE) || + (ssl_is_resumption_experiment(ssl->version) && + (!CBB_add_u8_length_prefixed(&body, &session_id) || + !CBB_add_bytes(&session_id, hs->session_id, hs->session_id_len))) || + !CBB_add_u16(&body, ssl_cipher_get_value(hs->new_cipher)) || + (ssl_is_resumption_experiment(ssl->version) && !CBB_add_u8(&body, 0)) || + !CBB_add_u16_length_prefixed(&body, &extensions) || + !ssl_ext_pre_shared_key_add_serverhello(hs, &extensions) || + !ssl_ext_key_share_add_serverhello(hs, &extensions) || + (ssl_is_resumption_experiment(ssl->version) && + !ssl_ext_supported_versions_add_serverhello(hs, &extensions)) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + if (ssl_is_resumption_experiment(ssl->version) && + (!ssl_is_draft22(ssl->version) || !hs->sent_hello_retry_request) && + !ssl->method->add_change_cipher_spec(ssl)) { + return ssl_hs_error; + } + + // Derive and enable the handshake traffic secrets. + if (!tls13_derive_handshake_secrets(hs) || + !tls13_set_traffic_key(ssl, evp_aead_seal, hs->server_handshake_secret, + hs->hash_len)) { + return ssl_hs_error; + } + + // Send EncryptedExtensions. + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_ENCRYPTED_EXTENSIONS) || + !ssl_add_serverhello_tlsext(hs, &body) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + + if (!ssl->s3->session_reused) { + // Determine whether to request a client certificate. + hs->cert_request = !!(ssl->verify_mode & SSL_VERIFY_PEER); + // Only request a certificate if Channel ID isn't negotiated. + if ((ssl->verify_mode & SSL_VERIFY_PEER_IF_NO_OBC) && + ssl->s3->tlsext_channel_id_valid) { + hs->cert_request = false; + } + } + + // Send a CertificateRequest, if necessary. + if (hs->cert_request) { + if (ssl_is_draft21(ssl->version)) { + CBB cert_request_extensions, sigalg_contents, sigalgs_cbb; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_REQUEST) || + !CBB_add_u8(&body, 0 /* no certificate_request_context. */) || + !CBB_add_u16_length_prefixed(&body, &cert_request_extensions) || + !CBB_add_u16(&cert_request_extensions, + TLSEXT_TYPE_signature_algorithms) || + !CBB_add_u16_length_prefixed(&cert_request_extensions, + &sigalg_contents) || + !CBB_add_u16_length_prefixed(&sigalg_contents, &sigalgs_cbb) || + !tls12_add_verify_sigalgs(ssl, &sigalgs_cbb)) { + return ssl_hs_error; + } + + if (ssl_has_client_CAs(ssl)) { + CBB ca_contents; + if (!CBB_add_u16(&cert_request_extensions, + TLSEXT_TYPE_certificate_authorities) || + !CBB_add_u16_length_prefixed(&cert_request_extensions, + &ca_contents) || + !ssl_add_client_CA_list(ssl, &ca_contents) || + !CBB_flush(&cert_request_extensions)) { + return ssl_hs_error; + } + } + + if (!ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } else { + CBB sigalgs_cbb; + if (!ssl->method->init_message(ssl, cbb.get(), &body, + SSL3_MT_CERTIFICATE_REQUEST) || + !CBB_add_u8(&body, 0 /* no certificate_request_context. */) || + !CBB_add_u16_length_prefixed(&body, &sigalgs_cbb) || + !tls12_add_verify_sigalgs(ssl, &sigalgs_cbb) || + !ssl_add_client_CA_list(ssl, &body) || + !CBB_add_u16(&body, 0 /* empty certificate_extensions. */) || + !ssl_add_message_cbb(ssl, cbb.get())) { + return ssl_hs_error; + } + } + } + + // Send the server Certificate message, if necessary. + if (!ssl->s3->session_reused) { + if (!ssl_has_certificate(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_NO_CERTIFICATE_SET); + return ssl_hs_error; + } + + if (!tls13_add_certificate(hs)) { + return ssl_hs_error; + } + + hs->tls13_state = state_send_server_certificate_verify; + return ssl_hs_ok; + } + + hs->tls13_state = state_send_server_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_server_certificate_verify(SSL_HANDSHAKE *hs) { + switch (tls13_add_certificate_verify(hs)) { + case ssl_private_key_success: + hs->tls13_state = state_send_server_finished; + return ssl_hs_ok; + + case ssl_private_key_retry: + hs->tls13_state = state_send_server_certificate_verify; + return ssl_hs_private_key_operation; + + case ssl_private_key_failure: + return ssl_hs_error; + } + + assert(0); + return ssl_hs_error; +} + +static enum ssl_hs_wait_t do_send_server_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (!tls13_add_finished(hs) || + // Update the secret to the master secret and derive traffic keys. + !tls13_advance_key_schedule(hs, kZeroes, hs->hash_len) || + !tls13_derive_application_secrets(hs) || + !tls13_set_traffic_key(ssl, evp_aead_seal, hs->server_traffic_secret_0, + hs->hash_len)) { + return ssl_hs_error; + } + + if (ssl->early_data_accepted) { + // If accepting 0-RTT, we send tickets half-RTT. This gets the tickets on + // the wire sooner and also avoids triggering a write on |SSL_read| when + // processing the client Finished. This requires computing the client + // Finished early. See draft-ietf-tls-tls13-18, section 4.5.1. + if (ssl_is_draft21(ssl->version)) { + static const uint8_t kEndOfEarlyData[4] = {SSL3_MT_END_OF_EARLY_DATA, 0, + 0, 0}; + if (!hs->transcript.Update(kEndOfEarlyData)) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + } + + size_t finished_len; + if (!tls13_finished_mac(hs, hs->expected_client_finished, &finished_len, + 0 /* client */)) { + return ssl_hs_error; + } + + if (finished_len != hs->hash_len) { + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return ssl_hs_error; + } + + // Feed the predicted Finished into the transcript. This allows us to derive + // the resumption secret early and send half-RTT tickets. + // + // TODO(davidben): This will need to be updated for DTLS 1.3. + assert(!SSL_is_dtls(hs->ssl)); + assert(hs->hash_len <= 0xff); + uint8_t header[4] = {SSL3_MT_FINISHED, 0, 0, + static_cast(hs->hash_len)}; + if (!hs->transcript.Update(header) || + !hs->transcript.Update( + MakeConstSpan(hs->expected_client_finished, hs->hash_len)) || + !tls13_derive_resumption_secret(hs) || + !add_new_session_tickets(hs)) { + return ssl_hs_error; + } + } + + hs->tls13_state = state_read_second_client_flight; + return ssl_hs_flush; +} + +static enum ssl_hs_wait_t do_read_second_client_flight(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (ssl->early_data_accepted) { + if (!tls13_set_traffic_key(ssl, evp_aead_open, hs->early_traffic_secret, + hs->hash_len)) { + return ssl_hs_error; + } + hs->can_early_write = true; + hs->can_early_read = true; + hs->in_early_data = true; + } + hs->tls13_state = state_process_end_of_early_data; + return ssl->early_data_accepted ? ssl_hs_read_end_of_early_data : ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_process_end_of_early_data(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (hs->early_data_offered) { + // If early data was not accepted, the EndOfEarlyData and ChangeCipherSpec + // message will be in the discarded early data. + if (hs->ssl->early_data_accepted) { + if (ssl_is_draft21(ssl->version)) { + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_END_OF_EARLY_DATA)) { + return ssl_hs_error; + } + if (CBS_len(&msg.body) != 0) { + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR); + OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); + return ssl_hs_error; + } + ssl->method->next_message(ssl); + } + } + } + if (!tls13_set_traffic_key(ssl, evp_aead_open, hs->client_handshake_secret, + hs->hash_len)) { + return ssl_hs_error; + } + hs->tls13_state = ssl->early_data_accepted ? state_read_client_finished + : state_read_client_certificate; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_certificate(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (!hs->cert_request) { + // OpenSSL returns X509_V_OK when no certificates are requested. This is + // classed by them as a bug, but it's assumed by at least NGINX. + hs->new_session->verify_result = X509_V_OK; + + // Skip this state. + hs->tls13_state = state_read_channel_id; + return ssl_hs_ok; + } + + const int allow_anonymous = + (ssl->verify_mode & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) == 0; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE) || + !tls13_process_certificate(hs, msg, allow_anonymous) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_client_certificate_verify; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_certificate_verify( + SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (sk_CRYPTO_BUFFER_num(hs->new_session->certs) == 0) { + // Skip this state. + hs->tls13_state = state_read_channel_id; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + + switch (ssl_verify_peer_cert(hs)) { + case ssl_verify_ok: + break; + case ssl_verify_invalid: + return ssl_hs_error; + case ssl_verify_retry: + hs->tls13_state = state_read_client_certificate_verify; + return ssl_hs_certificate_verify; + } + + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CERTIFICATE_VERIFY) || + !tls13_process_certificate_verify(hs, msg) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_channel_id; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_channel_id(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + if (!ssl->s3->tlsext_channel_id_valid) { + hs->tls13_state = state_read_client_finished; + return ssl_hs_ok; + } + + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_CHANNEL_ID) || + !tls1_verify_channel_id(hs, msg) || + !ssl_hash_message(hs, msg)) { + return ssl_hs_error; + } + + ssl->method->next_message(ssl); + hs->tls13_state = state_read_client_finished; + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_read_client_finished(SSL_HANDSHAKE *hs) { + SSL *const ssl = hs->ssl; + SSLMessage msg; + if (!ssl->method->get_message(ssl, &msg)) { + return ssl_hs_read_message; + } + if (!ssl_check_message_type(ssl, msg, SSL3_MT_FINISHED) || + // If early data was accepted, we've already computed the client Finished + // and derived the resumption secret. + !tls13_process_finished(hs, msg, ssl->early_data_accepted) || + // evp_aead_seal keys have already been switched. + !tls13_set_traffic_key(ssl, evp_aead_open, hs->client_traffic_secret_0, + hs->hash_len)) { + return ssl_hs_error; + } + + if (!ssl->early_data_accepted) { + if (!ssl_hash_message(hs, msg) || + !tls13_derive_resumption_secret(hs)) { + return ssl_hs_error; + } + + // We send post-handshake tickets as part of the handshake in 1-RTT. + hs->tls13_state = state_send_new_session_ticket; + } else { + // We already sent half-RTT tickets. + hs->tls13_state = state_done; + } + + ssl->method->next_message(ssl); + return ssl_hs_ok; +} + +static enum ssl_hs_wait_t do_send_new_session_ticket(SSL_HANDSHAKE *hs) { + // If the client doesn't accept resumption with PSK_DHE_KE, don't send a + // session ticket. + if (!hs->accept_psk_mode) { + hs->tls13_state = state_done; + return ssl_hs_ok; + } + + if (!add_new_session_tickets(hs)) { + return ssl_hs_error; + } + + hs->tls13_state = state_done; + return ssl_hs_flush; +} + +enum ssl_hs_wait_t tls13_server_handshake(SSL_HANDSHAKE *hs) { + while (hs->tls13_state != state_done) { + enum ssl_hs_wait_t ret = ssl_hs_error; + enum server_hs_state_t state = + static_cast(hs->tls13_state); + switch (state) { + case state_select_parameters: + ret = do_select_parameters(hs); + break; + case state_select_session: + ret = do_select_session(hs); + break; + case state_send_hello_retry_request: + ret = do_send_hello_retry_request(hs); + break; + case state_read_second_client_hello: + ret = do_read_second_client_hello(hs); + break; + case state_send_server_hello: + ret = do_send_server_hello(hs); + break; + case state_send_server_certificate_verify: + ret = do_send_server_certificate_verify(hs); + break; + case state_send_server_finished: + ret = do_send_server_finished(hs); + break; + case state_read_second_client_flight: + ret = do_read_second_client_flight(hs); + break; + case state_process_end_of_early_data: + ret = do_process_end_of_early_data(hs); + break; + case state_read_client_certificate: + ret = do_read_client_certificate(hs); + break; + case state_read_client_certificate_verify: + ret = do_read_client_certificate_verify(hs); + break; + case state_read_channel_id: + ret = do_read_channel_id(hs); + break; + case state_read_client_finished: + ret = do_read_client_finished(hs); + break; + case state_send_new_session_ticket: + ret = do_send_new_session_ticket(hs); + break; + case state_done: + ret = ssl_hs_ok; + break; + } + + if (hs->tls13_state != state) { + ssl_do_info_callback(hs->ssl, SSL_CB_ACCEPT_LOOP, 1); + } + + if (ret != ssl_hs_ok) { + return ret; + } + } + + return ssl_hs_ok; +} + +const char *tls13_server_handshake_state(SSL_HANDSHAKE *hs) { + enum server_hs_state_t state = + static_cast(hs->tls13_state); + switch (state) { + case state_select_parameters: + return "TLS 1.3 server select_parameters"; + case state_select_session: + return "TLS 1.3 server select_session"; + case state_send_hello_retry_request: + return "TLS 1.3 server send_hello_retry_request"; + case state_read_second_client_hello: + return "TLS 1.3 server read_second_client_hello"; + case state_send_server_hello: + return "TLS 1.3 server send_server_hello"; + case state_send_server_certificate_verify: + return "TLS 1.3 server send_server_certificate_verify"; + case state_send_server_finished: + return "TLS 1.3 server send_server_finished"; + case state_read_second_client_flight: + return "TLS 1.3 server read_second_client_flight"; + case state_process_end_of_early_data: + return "TLS 1.3 server process_end_of_early_data"; + case state_read_client_certificate: + return "TLS 1.3 server read_client_certificate"; + case state_read_client_certificate_verify: + return "TLS 1.3 server read_client_certificate_verify"; + case state_read_channel_id: + return "TLS 1.3 server read_channel_id"; + case state_read_client_finished: + return "TLS 1.3 server read_client_finished"; + case state_send_new_session_ticket: + return "TLS 1.3 server send_new_session_ticket"; + case state_done: + return "TLS 1.3 server done"; + } + + return "TLS 1.3 server unknown"; +} + +} // namespace bssl diff --git a/Sources/BoringSSL/ssl/tls_method.c b/Sources/BoringSSL/ssl/tls_method.cc similarity index 70% rename from Sources/BoringSSL/ssl/tls_method.c rename to Sources/BoringSSL/ssl/tls_method.cc index eaad2cafb..4eacf646f 100644 --- a/Sources/BoringSSL/ssl/tls_method.c +++ b/Sources/BoringSSL/ssl/tls_method.cc @@ -65,98 +65,121 @@ #include "internal.h" -static int ssl3_version_from_wire(uint16_t *out_version, - uint16_t wire_version) { - switch (wire_version) { - case SSL3_VERSION: - case TLS1_VERSION: - case TLS1_1_VERSION: - case TLS1_2_VERSION: - *out_version = wire_version; - return 1; - case TLS1_3_DRAFT_VERSION: - *out_version = TLS1_3_VERSION; - return 1; +namespace bssl { + +static void ssl3_on_handshake_complete(SSL *ssl) { + // The handshake should have released its final message. + assert(!ssl->s3->has_message); + + // During the handshake, |hs_buf| is retained. Release if it there is no + // excess in it. There may be excess left if there server sent Finished and + // HelloRequest in the same record. + // + // TODO(davidben): SChannel does not support this. Reject this case. + if (ssl->s3->hs_buf && ssl->s3->hs_buf->length == 0) { + ssl->s3->hs_buf.reset(); } - - return 0; } -static uint16_t ssl3_version_to_wire(uint16_t version) { - switch (version) { - case SSL3_VERSION: - case TLS1_VERSION: - case TLS1_1_VERSION: - case TLS1_2_VERSION: - return version; - case TLS1_3_VERSION: - return TLS1_3_DRAFT_VERSION; - } - - /* It is an error to use this function with an invalid version. */ - assert(0); - return 0; -} - -static int ssl3_supports_cipher(const SSL_CIPHER *cipher) { return 1; } - -static void ssl3_expect_flight(SSL *ssl) {} - -static void ssl3_received_flight(SSL *ssl) {} - -static int ssl3_set_read_state(SSL *ssl, SSL_AEAD_CTX *aead_ctx) { - if (ssl->s3->rrec.length != 0) { - /* There may not be unprocessed record data at a cipher change. */ +static bool ssl3_set_read_state(SSL *ssl, UniquePtr aead_ctx) { + // Cipher changes are forbidden if the current epoch has leftover data. + if (tls_has_unprocessed_handshake_data(ssl)) { OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFERED_MESSAGES_ON_CIPHER_CHANGE); - ssl3_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); - SSL_AEAD_CTX_free(aead_ctx); - return 0; + ssl_send_alert(ssl, SSL3_AL_FATAL, SSL_AD_UNEXPECTED_MESSAGE); + return false; } OPENSSL_memset(ssl->s3->read_sequence, 0, sizeof(ssl->s3->read_sequence)); - - SSL_AEAD_CTX_free(ssl->s3->aead_read_ctx); - ssl->s3->aead_read_ctx = aead_ctx; - return 1; + ssl->s3->aead_read_ctx = std::move(aead_ctx); + return true; } -static int ssl3_set_write_state(SSL *ssl, SSL_AEAD_CTX *aead_ctx) { +static bool ssl3_set_write_state(SSL *ssl, UniquePtr aead_ctx) { OPENSSL_memset(ssl->s3->write_sequence, 0, sizeof(ssl->s3->write_sequence)); - - SSL_AEAD_CTX_free(ssl->s3->aead_write_ctx); - ssl->s3->aead_write_ctx = aead_ctx; - return 1; + ssl->s3->aead_write_ctx = std::move(aead_ctx); + return true; } static const SSL_PROTOCOL_METHOD kTLSProtocolMethod = { - 0 /* is_dtls */, - SSL3_VERSION, - TLS1_3_VERSION, - ssl3_version_from_wire, - ssl3_version_to_wire, + false /* is_dtls */, ssl3_new, ssl3_free, ssl3_get_message, - ssl3_get_current_message, - ssl3_release_current_message, - ssl3_read_app_data, - ssl3_read_change_cipher_spec, - ssl3_read_close_notify, + ssl3_next_message, + ssl3_open_handshake, + ssl3_open_change_cipher_spec, + ssl3_open_app_data, ssl3_write_app_data, ssl3_dispatch_alert, - ssl3_supports_cipher, ssl3_init_message, ssl3_finish_message, ssl3_add_message, ssl3_add_change_cipher_spec, ssl3_add_alert, ssl3_flush_flight, - ssl3_expect_flight, - ssl3_received_flight, + ssl3_on_handshake_complete, ssl3_set_read_state, ssl3_set_write_state, }; +static int ssl_noop_x509_check_client_CA_names( + STACK_OF(CRYPTO_BUFFER) *names) { + return 1; +} + +static void ssl_noop_x509_clear(CERT *cert) {} +static void ssl_noop_x509_free(CERT *cert) {} +static void ssl_noop_x509_dup(CERT *new_cert, const CERT *cert) {} +static void ssl_noop_x509_flush_cached_leaf(CERT *cert) {} +static void ssl_noop_x509_flush_cached_chain(CERT *cert) {} +static int ssl_noop_x509_session_cache_objects(SSL_SESSION *sess) { + return 1; +} +static int ssl_noop_x509_session_dup(SSL_SESSION *new_session, + const SSL_SESSION *session) { + return 1; +} +static void ssl_noop_x509_session_clear(SSL_SESSION *session) {} +static int ssl_noop_x509_session_verify_cert_chain(SSL_SESSION *session, + SSL *ssl, + uint8_t *out_alert) { + return 0; +} + +static void ssl_noop_x509_hs_flush_cached_ca_names(SSL_HANDSHAKE *hs) {} +static int ssl_noop_x509_ssl_new(SSL *ctx) { return 1; } +static void ssl_noop_x509_ssl_free(SSL *ctx) { } +static void ssl_noop_x509_ssl_flush_cached_client_CA(SSL *ssl) {} +static int ssl_noop_x509_ssl_auto_chain_if_needed(SSL *ssl) { return 1; } +static int ssl_noop_x509_ssl_ctx_new(SSL_CTX *ctx) { return 1; } +static void ssl_noop_x509_ssl_ctx_free(SSL_CTX *ctx) { } +static void ssl_noop_x509_ssl_ctx_flush_cached_client_CA(SSL_CTX *ctx) {} + +const SSL_X509_METHOD ssl_noop_x509_method = { + ssl_noop_x509_check_client_CA_names, + ssl_noop_x509_clear, + ssl_noop_x509_free, + ssl_noop_x509_dup, + ssl_noop_x509_flush_cached_chain, + ssl_noop_x509_flush_cached_leaf, + ssl_noop_x509_session_cache_objects, + ssl_noop_x509_session_dup, + ssl_noop_x509_session_clear, + ssl_noop_x509_session_verify_cert_chain, + ssl_noop_x509_hs_flush_cached_ca_names, + ssl_noop_x509_ssl_new, + ssl_noop_x509_ssl_free, + ssl_noop_x509_ssl_flush_cached_client_CA, + ssl_noop_x509_ssl_auto_chain_if_needed, + ssl_noop_x509_ssl_ctx_new, + ssl_noop_x509_ssl_ctx_free, + ssl_noop_x509_ssl_ctx_flush_cached_client_CA, +}; + +} // namespace bssl + +using namespace bssl; + const SSL_METHOD *TLS_method(void) { static const SSL_METHOD kMethod = { 0, @@ -170,7 +193,16 @@ const SSL_METHOD *SSLv23_method(void) { return TLS_method(); } -/* Legacy version-locked methods. */ +const SSL_METHOD *TLS_with_buffers_method(void) { + static const SSL_METHOD kMethod = { + 0, + &kTLSProtocolMethod, + &ssl_noop_x509_method, + }; + return &kMethod; +} + +// Legacy version-locked methods. const SSL_METHOD *TLSv1_2_method(void) { static const SSL_METHOD kMethod = { @@ -208,7 +240,7 @@ const SSL_METHOD *SSLv3_method(void) { return &kMethod; } -/* Legacy side-specific methods. */ +// Legacy side-specific methods. const SSL_METHOD *TLSv1_2_server_method(void) { return TLSv1_2_method(); @@ -257,24 +289,3 @@ const SSL_METHOD *TLS_server_method(void) { const SSL_METHOD *TLS_client_method(void) { return TLS_method(); } - -static void ssl_noop_x509_clear(CERT *cert) {} -static void ssl_noop_x509_flush_cached_leaf(CERT *cert) {} -static void ssl_noop_x509_flush_cached_chain(CERT *cert) {} -static int ssl_noop_x509_session_cache_objects(SSL_SESSION *sess) { - return 1; -} -static int ssl_noop_x509_session_dup(SSL_SESSION *new_session, - const SSL_SESSION *session) { - return 1; -} -static void ssl_noop_x509_session_clear(SSL_SESSION *session) {} - -const SSL_X509_METHOD ssl_noop_x509_method = { - ssl_noop_x509_clear, - ssl_noop_x509_flush_cached_chain, - ssl_noop_x509_flush_cached_leaf, - ssl_noop_x509_session_cache_objects, - ssl_noop_x509_session_dup, - ssl_noop_x509_session_clear, -}; diff --git a/Sources/BoringSSL/ssl/tls_record.c b/Sources/BoringSSL/ssl/tls_record.c deleted file mode 100644 index 6ff79c4b4..000000000 --- a/Sources/BoringSSL/ssl/tls_record.c +++ /dev/null @@ -1,556 +0,0 @@ -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ -/* ==================================================================== - * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). */ - -#include - -#include -#include - -#include -#include -#include - -#include "internal.h" -#include "../crypto/internal.h" - - -/* kMaxEmptyRecords is the number of consecutive, empty records that will be - * processed. Without this limit an attacker could send empty records at a - * faster rate than we can process and cause record processing to loop - * forever. */ -static const uint8_t kMaxEmptyRecords = 32; - -/* kMaxEarlyDataSkipped is the maximum number of rejected early data bytes that - * will be skipped. Without this limit an attacker could send records at a - * faster rate than we can process and cause trial decryption to loop forever. - * This value should be slightly above kMaxEarlyDataAccepted in tls13_server.c, - * which is measured in plaintext. */ -static const size_t kMaxEarlyDataSkipped = 16384; - -/* kMaxWarningAlerts is the number of consecutive warning alerts that will be - * processed. */ -static const uint8_t kMaxWarningAlerts = 4; - -/* ssl_needs_record_splitting returns one if |ssl|'s current outgoing cipher - * state needs record-splitting and zero otherwise. */ -static int ssl_needs_record_splitting(const SSL *ssl) { - return ssl->s3->aead_write_ctx != NULL && - ssl3_protocol_version(ssl) < TLS1_1_VERSION && - (ssl->mode & SSL_MODE_CBC_RECORD_SPLITTING) != 0 && - SSL_CIPHER_is_block_cipher(ssl->s3->aead_write_ctx->cipher); -} - -static int ssl_uses_short_header(const SSL *ssl, - enum evp_aead_direction_t dir) { - if (!ssl->s3->short_header) { - return 0; - } - - if (dir == evp_aead_open) { - return ssl->s3->aead_read_ctx != NULL; - } - - return ssl->s3->aead_write_ctx != NULL; -} - -int ssl_record_sequence_update(uint8_t *seq, size_t seq_len) { - for (size_t i = seq_len - 1; i < seq_len; i--) { - ++seq[i]; - if (seq[i] != 0) { - return 1; - } - } - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; -} - -size_t ssl_record_prefix_len(const SSL *ssl) { - size_t header_len; - if (SSL_is_dtls(ssl)) { - header_len = DTLS1_RT_HEADER_LENGTH; - } else if (ssl_uses_short_header(ssl, evp_aead_open)) { - header_len = 2; - } else { - header_len = SSL3_RT_HEADER_LENGTH; - } - - return header_len + SSL_AEAD_CTX_explicit_nonce_len(ssl->s3->aead_read_ctx); -} - -size_t ssl_seal_align_prefix_len(const SSL *ssl) { - if (SSL_is_dtls(ssl)) { - return DTLS1_RT_HEADER_LENGTH + - SSL_AEAD_CTX_explicit_nonce_len(ssl->s3->aead_write_ctx); - } - - size_t header_len; - if (ssl_uses_short_header(ssl, evp_aead_seal)) { - header_len = 2; - } else { - header_len = SSL3_RT_HEADER_LENGTH; - } - - size_t ret = - header_len + SSL_AEAD_CTX_explicit_nonce_len(ssl->s3->aead_write_ctx); - if (ssl_needs_record_splitting(ssl)) { - ret += header_len; - ret += ssl_cipher_get_record_split_len(ssl->s3->aead_write_ctx->cipher); - } - return ret; -} - -size_t SSL_max_seal_overhead(const SSL *ssl) { - if (SSL_is_dtls(ssl)) { - return dtls_max_seal_overhead(ssl, dtls1_use_current_epoch); - } - - size_t ret = - ssl_uses_short_header(ssl, evp_aead_seal) ? 2 : SSL3_RT_HEADER_LENGTH; - ret += SSL_AEAD_CTX_max_overhead(ssl->s3->aead_write_ctx); - /* TLS 1.3 needs an extra byte for the encrypted record type. */ - if (ssl->s3->have_version && - ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - ret += 1; - } - if (ssl_needs_record_splitting(ssl)) { - ret *= 2; - } - return ret; -} - -enum ssl_open_record_t tls_open_record(SSL *ssl, uint8_t *out_type, CBS *out, - size_t *out_consumed, uint8_t *out_alert, - uint8_t *in, size_t in_len) { - *out_consumed = 0; - - CBS cbs; - CBS_init(&cbs, in, in_len); - - /* Decode the record header. */ - uint8_t type; - uint16_t version, ciphertext_len; - size_t header_len; - if (ssl_uses_short_header(ssl, evp_aead_open)) { - if (!CBS_get_u16(&cbs, &ciphertext_len)) { - *out_consumed = 2; - return ssl_open_record_partial; - } - - if ((ciphertext_len & 0x8000) == 0) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECODE_ERROR); - *out_alert = SSL_AD_DECODE_ERROR; - return ssl_open_record_error; - } - - ciphertext_len &= 0x7fff; - type = SSL3_RT_APPLICATION_DATA; - version = TLS1_VERSION; - header_len = 2; - } else { - if (!CBS_get_u8(&cbs, &type) || - !CBS_get_u16(&cbs, &version) || - !CBS_get_u16(&cbs, &ciphertext_len)) { - *out_consumed = SSL3_RT_HEADER_LENGTH; - return ssl_open_record_partial; - } - header_len = SSL3_RT_HEADER_LENGTH; - } - - int version_ok; - if (ssl->s3->aead_read_ctx == NULL) { - /* Only check the first byte. Enforcing beyond that can prevent decoding - * version negotiation failure alerts. */ - version_ok = (version >> 8) == SSL3_VERSION_MAJOR; - } else if (ssl3_protocol_version(ssl) < TLS1_3_VERSION) { - /* Earlier versions of TLS switch the record version. */ - version_ok = version == ssl->version; - } else { - /* Starting TLS 1.3, the version field is frozen at {3, 1}. */ - version_ok = version == TLS1_VERSION; - } - - if (!version_ok) { - OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_NUMBER); - *out_alert = SSL_AD_PROTOCOL_VERSION; - return ssl_open_record_error; - } - - /* Check the ciphertext length. */ - if (ciphertext_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) { - OPENSSL_PUT_ERROR(SSL, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); - *out_alert = SSL_AD_RECORD_OVERFLOW; - return ssl_open_record_error; - } - - /* Extract the body. */ - CBS body; - if (!CBS_get_bytes(&cbs, &body, ciphertext_len)) { - *out_consumed = header_len + (size_t)ciphertext_len; - return ssl_open_record_partial; - } - - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, in, header_len); - - *out_consumed = in_len - CBS_len(&cbs); - - /* Skip early data received when expecting a second ClientHello if we rejected - * 0RTT. */ - if (ssl->s3->skip_early_data && - ssl->s3->aead_read_ctx == NULL && - type == SSL3_RT_APPLICATION_DATA) { - goto skipped_data; - } - - /* Decrypt the body in-place. */ - if (!SSL_AEAD_CTX_open(ssl->s3->aead_read_ctx, out, type, version, - ssl->s3->read_sequence, (uint8_t *)CBS_data(&body), - CBS_len(&body))) { - if (ssl->s3->skip_early_data && - ssl->s3->aead_read_ctx != NULL) { - ERR_clear_error(); - goto skipped_data; - } - - OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); - *out_alert = SSL_AD_BAD_RECORD_MAC; - return ssl_open_record_error; - } - - ssl->s3->skip_early_data = 0; - - if (!ssl_record_sequence_update(ssl->s3->read_sequence, 8)) { - *out_alert = SSL_AD_INTERNAL_ERROR; - return ssl_open_record_error; - } - - /* TLS 1.3 hides the record type inside the encrypted data. */ - if (ssl->s3->have_version && - ssl3_protocol_version(ssl) >= TLS1_3_VERSION && - ssl->s3->aead_read_ctx != NULL) { - /* The outer record type is always application_data. */ - if (type != SSL3_RT_APPLICATION_DATA) { - OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_OUTER_RECORD_TYPE); - *out_alert = SSL_AD_DECODE_ERROR; - return ssl_open_record_error; - } - - do { - if (!CBS_get_last_u8(out, &type)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); - *out_alert = SSL_AD_DECRYPT_ERROR; - return ssl_open_record_error; - } - } while (type == 0); - } - - /* Check the plaintext length. */ - if (CBS_len(out) > SSL3_RT_MAX_PLAIN_LENGTH) { - OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); - *out_alert = SSL_AD_RECORD_OVERFLOW; - return ssl_open_record_error; - } - - /* Limit the number of consecutive empty records. */ - if (CBS_len(out) == 0) { - ssl->s3->empty_record_count++; - if (ssl->s3->empty_record_count > kMaxEmptyRecords) { - OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_EMPTY_FRAGMENTS); - *out_alert = SSL_AD_UNEXPECTED_MESSAGE; - return ssl_open_record_error; - } - /* Apart from the limit, empty records are returned up to the caller. This - * allows the caller to reject records of the wrong type. */ - } else { - ssl->s3->empty_record_count = 0; - } - - if (type == SSL3_RT_ALERT) { - return ssl_process_alert(ssl, out_alert, CBS_data(out), CBS_len(out)); - } - - ssl->s3->warning_alert_count = 0; - - *out_type = type; - return ssl_open_record_success; - -skipped_data: - ssl->s3->early_data_skipped += *out_consumed; - if (ssl->s3->early_data_skipped < *out_consumed) { - ssl->s3->early_data_skipped = kMaxEarlyDataSkipped + 1; - } - - if (ssl->s3->early_data_skipped > kMaxEarlyDataSkipped) { - OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MUCH_SKIPPED_EARLY_DATA); - *out_alert = SSL_AD_UNEXPECTED_MESSAGE; - return ssl_open_record_error; - } - - return ssl_open_record_discard; -} - -static int do_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, - size_t max_out, uint8_t type, const uint8_t *in, - size_t in_len) { - assert(!buffers_alias(in, in_len, out, max_out)); - - const int short_header = ssl_uses_short_header(ssl, evp_aead_seal); - size_t header_len = short_header ? 2 : SSL3_RT_HEADER_LENGTH; - - /* TLS 1.3 hides the actual record type inside the encrypted data. */ - if (ssl->s3->have_version && - ssl3_protocol_version(ssl) >= TLS1_3_VERSION && - ssl->s3->aead_write_ctx != NULL) { - if (in_len > in_len + header_len + 1 || max_out < in_len + header_len + 1) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return 0; - } - - OPENSSL_memcpy(out + header_len, in, in_len); - out[header_len + in_len] = type; - in = out + header_len; - type = SSL3_RT_APPLICATION_DATA; - in_len++; - } - - if (max_out < header_len) { - OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); - return 0; - } - - /* The TLS record-layer version number is meaningless and, starting in - * TLS 1.3, is frozen at TLS 1.0. But for historical reasons, SSL 3.0 - * ClientHellos should use SSL 3.0 and pre-TLS-1.3 expects the version - * to change after version negotiation. */ - uint16_t wire_version = TLS1_VERSION; - if (ssl->version == SSL3_VERSION || - (ssl->s3->have_version && ssl3_protocol_version(ssl) < TLS1_3_VERSION)) { - wire_version = ssl->version; - } - - /* Write the non-length portions of the header. */ - if (!short_header) { - out[0] = type; - out[1] = wire_version >> 8; - out[2] = wire_version & 0xff; - out += 3; - max_out -= 3; - } - - /* Write the ciphertext, leaving two bytes for the length. */ - size_t ciphertext_len; - if (!SSL_AEAD_CTX_seal(ssl->s3->aead_write_ctx, out + 2, &ciphertext_len, - max_out - 2, type, wire_version, - ssl->s3->write_sequence, in, in_len) || - !ssl_record_sequence_update(ssl->s3->write_sequence, 8)) { - return 0; - } - - /* Fill in the length. */ - if (ciphertext_len >= 1 << 15) { - OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); - return 0; - } - out[0] = ciphertext_len >> 8; - out[1] = ciphertext_len & 0xff; - if (short_header) { - out[0] |= 0x80; - } - - *out_len = header_len + ciphertext_len; - - ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, out, header_len); - return 1; -} - -int tls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out, - uint8_t type, const uint8_t *in, size_t in_len) { - if (buffers_alias(in, in_len, out, max_out)) { - OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); - return 0; - } - - size_t frag_len = 0; - if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && - ssl_needs_record_splitting(ssl)) { - if (!do_seal_record(ssl, out, &frag_len, max_out, type, in, 1)) { - return 0; - } - in++; - in_len--; - out += frag_len; - max_out -= frag_len; - - assert(!ssl_uses_short_header(ssl, evp_aead_seal)); -#if !defined(BORINGSSL_UNSAFE_FUZZER_MODE) - assert(SSL3_RT_HEADER_LENGTH + ssl_cipher_get_record_split_len( - ssl->s3->aead_write_ctx->cipher) == - frag_len); -#endif - } - - if (!do_seal_record(ssl, out, out_len, max_out, type, in, in_len)) { - return 0; - } - *out_len += frag_len; - return 1; -} - -enum ssl_open_record_t ssl_process_alert(SSL *ssl, uint8_t *out_alert, - const uint8_t *in, size_t in_len) { - /* Alerts records may not contain fragmented or multiple alerts. */ - if (in_len != 2) { - *out_alert = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); - return ssl_open_record_error; - } - - ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_ALERT, in, in_len); - - const uint8_t alert_level = in[0]; - const uint8_t alert_descr = in[1]; - - uint16_t alert = (alert_level << 8) | alert_descr; - ssl_do_info_callback(ssl, SSL_CB_READ_ALERT, alert); - - if (alert_level == SSL3_AL_WARNING) { - if (alert_descr == SSL_AD_CLOSE_NOTIFY) { - ssl->s3->recv_shutdown = ssl_shutdown_close_notify; - return ssl_open_record_close_notify; - } - - /* Warning alerts do not exist in TLS 1.3. */ - if (ssl->s3->have_version && - ssl3_protocol_version(ssl) >= TLS1_3_VERSION) { - *out_alert = SSL_AD_DECODE_ERROR; - OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); - return ssl_open_record_error; - } - - ssl->s3->warning_alert_count++; - if (ssl->s3->warning_alert_count > kMaxWarningAlerts) { - *out_alert = SSL_AD_UNEXPECTED_MESSAGE; - OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_WARNING_ALERTS); - return ssl_open_record_error; - } - return ssl_open_record_discard; - } - - if (alert_level == SSL3_AL_FATAL) { - ssl->s3->recv_shutdown = ssl_shutdown_fatal_alert; - - char tmp[16]; - OPENSSL_PUT_ERROR(SSL, SSL_AD_REASON_OFFSET + alert_descr); - BIO_snprintf(tmp, sizeof(tmp), "%d", alert_descr); - ERR_add_error_data(2, "SSL alert number ", tmp); - return ssl_open_record_fatal_alert; - } - - *out_alert = SSL_AD_ILLEGAL_PARAMETER; - OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_ALERT_TYPE); - return ssl_open_record_error; -} diff --git a/Sources/BoringSSL/ssl/tls_record.cc b/Sources/BoringSSL/ssl/tls_record.cc new file mode 100644 index 000000000..a1363fa6f --- /dev/null +++ b/Sources/BoringSSL/ssl/tls_record.cc @@ -0,0 +1,712 @@ +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* ==================================================================== + * Copyright (c) 1998-2002 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). */ + +#include + +#include +#include + +#include +#include +#include + +#include "internal.h" +#include "../crypto/internal.h" + + +namespace bssl { + +// kMaxEmptyRecords is the number of consecutive, empty records that will be +// processed. Without this limit an attacker could send empty records at a +// faster rate than we can process and cause record processing to loop +// forever. +static const uint8_t kMaxEmptyRecords = 32; + +// kMaxEarlyDataSkipped is the maximum number of rejected early data bytes that +// will be skipped. Without this limit an attacker could send records at a +// faster rate than we can process and cause trial decryption to loop forever. +// This value should be slightly above kMaxEarlyDataAccepted, which is measured +// in plaintext. +static const size_t kMaxEarlyDataSkipped = 16384; + +// kMaxWarningAlerts is the number of consecutive warning alerts that will be +// processed. +static const uint8_t kMaxWarningAlerts = 4; + +// ssl_needs_record_splitting returns one if |ssl|'s current outgoing cipher +// state needs record-splitting and zero otherwise. +static int ssl_needs_record_splitting(const SSL *ssl) { +#if !defined(BORINGSSL_UNSAFE_FUZZER_MODE) + return !ssl->s3->aead_write_ctx->is_null_cipher() && + ssl->s3->aead_write_ctx->ProtocolVersion() < TLS1_1_VERSION && + (ssl->mode & SSL_MODE_CBC_RECORD_SPLITTING) != 0 && + SSL_CIPHER_is_block_cipher(ssl->s3->aead_write_ctx->cipher()); +#else + return 0; +#endif +} + +int ssl_record_sequence_update(uint8_t *seq, size_t seq_len) { + for (size_t i = seq_len - 1; i < seq_len; i--) { + ++seq[i]; + if (seq[i] != 0) { + return 1; + } + } + OPENSSL_PUT_ERROR(SSL, ERR_R_OVERFLOW); + return 0; +} + +size_t ssl_record_prefix_len(const SSL *ssl) { + size_t header_len; + if (SSL_is_dtls(ssl)) { + header_len = DTLS1_RT_HEADER_LENGTH; + } else { + header_len = SSL3_RT_HEADER_LENGTH; + } + + return header_len + ssl->s3->aead_read_ctx->ExplicitNonceLen(); +} + +size_t ssl_seal_align_prefix_len(const SSL *ssl) { + if (SSL_is_dtls(ssl)) { + return DTLS1_RT_HEADER_LENGTH + ssl->s3->aead_write_ctx->ExplicitNonceLen(); + } + + size_t ret = + SSL3_RT_HEADER_LENGTH + ssl->s3->aead_write_ctx->ExplicitNonceLen(); + if (ssl_needs_record_splitting(ssl)) { + ret += SSL3_RT_HEADER_LENGTH; + ret += ssl_cipher_get_record_split_len(ssl->s3->aead_write_ctx->cipher()); + } + return ret; +} + +static ssl_open_record_t skip_early_data(SSL *ssl, uint8_t *out_alert, + size_t consumed) { + ssl->s3->early_data_skipped += consumed; + if (ssl->s3->early_data_skipped < consumed) { + ssl->s3->early_data_skipped = kMaxEarlyDataSkipped + 1; + } + + if (ssl->s3->early_data_skipped > kMaxEarlyDataSkipped) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MUCH_SKIPPED_EARLY_DATA); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + return ssl_open_record_discard; +} + +ssl_open_record_t tls_open_record(SSL *ssl, uint8_t *out_type, + Span *out, size_t *out_consumed, + uint8_t *out_alert, Span in) { + *out_consumed = 0; + if (ssl->s3->read_shutdown == ssl_shutdown_close_notify) { + return ssl_open_record_close_notify; + } + + // If there is an unprocessed handshake message or we are already buffering + // too much, stop before decrypting another handshake record. + if (!tls_can_accept_handshake_data(ssl, out_alert)) { + return ssl_open_record_error; + } + + CBS cbs = CBS(in); + + // Decode the record header. + uint8_t type; + uint16_t version, ciphertext_len; + if (!CBS_get_u8(&cbs, &type) || + !CBS_get_u16(&cbs, &version) || + !CBS_get_u16(&cbs, &ciphertext_len)) { + *out_consumed = SSL3_RT_HEADER_LENGTH; + return ssl_open_record_partial; + } + + bool version_ok; + if (ssl->s3->aead_read_ctx->is_null_cipher()) { + // Only check the first byte. Enforcing beyond that can prevent decoding + // version negotiation failure alerts. + version_ok = (version >> 8) == SSL3_VERSION_MAJOR; + } else { + version_ok = version == ssl->s3->aead_read_ctx->RecordVersion(); + } + + if (!version_ok) { + OPENSSL_PUT_ERROR(SSL, SSL_R_WRONG_VERSION_NUMBER); + *out_alert = SSL_AD_PROTOCOL_VERSION; + return ssl_open_record_error; + } + + // Check the ciphertext length. + if (ciphertext_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) { + OPENSSL_PUT_ERROR(SSL, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); + *out_alert = SSL_AD_RECORD_OVERFLOW; + return ssl_open_record_error; + } + + // Extract the body. + CBS body; + if (!CBS_get_bytes(&cbs, &body, ciphertext_len)) { + *out_consumed = SSL3_RT_HEADER_LENGTH + (size_t)ciphertext_len; + return ssl_open_record_partial; + } + + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_HEADER, + in.subspan(0, SSL3_RT_HEADER_LENGTH)); + + *out_consumed = in.size() - CBS_len(&cbs); + + if (ssl->s3->have_version && + ssl_is_resumption_experiment(ssl->version) && + SSL_in_init(ssl) && + type == SSL3_RT_CHANGE_CIPHER_SPEC && + ciphertext_len == 1 && + CBS_data(&body)[0] == 1) { + ssl->s3->empty_record_count++; + if (ssl->s3->empty_record_count > kMaxEmptyRecords) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_EMPTY_FRAGMENTS); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + return ssl_open_record_discard; + } + + // Skip early data received when expecting a second ClientHello if we rejected + // 0RTT. + if (ssl->s3->skip_early_data && + ssl->s3->aead_read_ctx->is_null_cipher() && + type == SSL3_RT_APPLICATION_DATA) { + return skip_early_data(ssl, out_alert, *out_consumed); + } + + // Decrypt the body in-place. + if (!ssl->s3->aead_read_ctx->Open( + out, type, version, ssl->s3->read_sequence, + MakeSpan(const_cast(CBS_data(&body)), CBS_len(&body)))) { + if (ssl->s3->skip_early_data && !ssl->s3->aead_read_ctx->is_null_cipher()) { + ERR_clear_error(); + return skip_early_data(ssl, out_alert, *out_consumed); + } + + OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); + *out_alert = SSL_AD_BAD_RECORD_MAC; + return ssl_open_record_error; + } + + ssl->s3->skip_early_data = false; + + if (!ssl_record_sequence_update(ssl->s3->read_sequence, 8)) { + *out_alert = SSL_AD_INTERNAL_ERROR; + return ssl_open_record_error; + } + + // TLS 1.3 hides the record type inside the encrypted data. + bool has_padding = + !ssl->s3->aead_read_ctx->is_null_cipher() && + ssl->s3->aead_read_ctx->ProtocolVersion() >= TLS1_3_VERSION; + + // If there is padding, the plaintext limit includes the padding, but includes + // extra room for the inner content type. + size_t plaintext_limit = + has_padding ? SSL3_RT_MAX_PLAIN_LENGTH + 1 : SSL3_RT_MAX_PLAIN_LENGTH; + if (out->size() > plaintext_limit) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DATA_LENGTH_TOO_LONG); + *out_alert = SSL_AD_RECORD_OVERFLOW; + return ssl_open_record_error; + } + + if (has_padding) { + // The outer record type is always application_data. + if (type != SSL3_RT_APPLICATION_DATA) { + OPENSSL_PUT_ERROR(SSL, SSL_R_INVALID_OUTER_RECORD_TYPE); + *out_alert = SSL_AD_DECODE_ERROR; + return ssl_open_record_error; + } + + do { + if (out->empty()) { + OPENSSL_PUT_ERROR(SSL, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); + *out_alert = SSL_AD_DECRYPT_ERROR; + return ssl_open_record_error; + } + type = out->back(); + *out = out->subspan(0, out->size() - 1); + } while (type == 0); + } + + // Limit the number of consecutive empty records. + if (out->empty()) { + ssl->s3->empty_record_count++; + if (ssl->s3->empty_record_count > kMaxEmptyRecords) { + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_EMPTY_FRAGMENTS); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + // Apart from the limit, empty records are returned up to the caller. This + // allows the caller to reject records of the wrong type. + } else { + ssl->s3->empty_record_count = 0; + } + + if (type == SSL3_RT_ALERT) { + // Return end_of_early_data alerts as-is for the caller to process. + if (!ssl_is_draft21(ssl->version) && + out->size() == 2 && + (*out)[0] == SSL3_AL_WARNING && + (*out)[1] == TLS1_AD_END_OF_EARLY_DATA) { + *out_type = type; + return ssl_open_record_success; + } + + return ssl_process_alert(ssl, out_alert, *out); + } + + // Handshake messages may not interleave with any other record type. + if (type != SSL3_RT_HANDSHAKE && + tls_has_unprocessed_handshake_data(ssl)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_UNEXPECTED_RECORD); + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return ssl_open_record_error; + } + + ssl->s3->warning_alert_count = 0; + + *out_type = type; + return ssl_open_record_success; +} + +static int do_seal_record(SSL *ssl, uint8_t *out_prefix, uint8_t *out, + uint8_t *out_suffix, uint8_t type, const uint8_t *in, + const size_t in_len) { + uint8_t *extra_in = NULL; + size_t extra_in_len = 0; + if (!ssl->s3->aead_write_ctx->is_null_cipher() && + ssl->s3->aead_write_ctx->ProtocolVersion() >= TLS1_3_VERSION) { + // TLS 1.3 hides the actual record type inside the encrypted data. + extra_in = &type; + extra_in_len = 1; + } + + size_t suffix_len; + if (!ssl->s3->aead_write_ctx->SuffixLen(&suffix_len, in_len, extra_in_len)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return 0; + } + size_t ciphertext_len = + ssl->s3->aead_write_ctx->ExplicitNonceLen() + suffix_len; + if (ciphertext_len + in_len < ciphertext_len) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return 0; + } + ciphertext_len += in_len; + + assert(in == out || !buffers_alias(in, in_len, out, in_len)); + assert(!buffers_alias(in, in_len, out_prefix, ssl_record_prefix_len(ssl))); + assert(!buffers_alias(in, in_len, out_suffix, suffix_len)); + + if (extra_in_len) { + out_prefix[0] = SSL3_RT_APPLICATION_DATA; + } else { + out_prefix[0] = type; + } + + uint16_t record_version = ssl->s3->aead_write_ctx->RecordVersion(); + + out_prefix[1] = record_version >> 8; + out_prefix[2] = record_version & 0xff; + out_prefix[3] = ciphertext_len >> 8; + out_prefix[4] = ciphertext_len & 0xff; + + if (!ssl->s3->aead_write_ctx->SealScatter( + out_prefix + SSL3_RT_HEADER_LENGTH, out, out_suffix, type, + record_version, ssl->s3->write_sequence, in, in_len, extra_in, + extra_in_len) || + !ssl_record_sequence_update(ssl->s3->write_sequence, 8)) { + return 0; + } + + ssl_do_msg_callback(ssl, 1 /* write */, SSL3_RT_HEADER, + MakeSpan(out_prefix, SSL3_RT_HEADER_LENGTH)); + return 1; +} + +static size_t tls_seal_scatter_prefix_len(const SSL *ssl, uint8_t type, + size_t in_len) { + size_t ret = SSL3_RT_HEADER_LENGTH; + if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && + ssl_needs_record_splitting(ssl)) { + // In the case of record splitting, the 1-byte record (of the 1/n-1 split) + // will be placed in the prefix, as will four of the five bytes of the + // record header for the main record. The final byte will replace the first + // byte of the plaintext that was used in the small record. + ret += ssl_cipher_get_record_split_len(ssl->s3->aead_write_ctx->cipher()); + ret += SSL3_RT_HEADER_LENGTH - 1; + } else { + ret += ssl->s3->aead_write_ctx->ExplicitNonceLen(); + } + return ret; +} + +static bool tls_seal_scatter_suffix_len(const SSL *ssl, size_t *out_suffix_len, + uint8_t type, size_t in_len) { + size_t extra_in_len = 0; + if (!ssl->s3->aead_write_ctx->is_null_cipher() && + ssl->s3->aead_write_ctx->ProtocolVersion() >= TLS1_3_VERSION) { + // TLS 1.3 adds an extra byte for encrypted record type. + extra_in_len = 1; + } + if (type == SSL3_RT_APPLICATION_DATA && // clang-format off + in_len > 1 && + ssl_needs_record_splitting(ssl)) { + // With record splitting enabled, the first byte gets sealed into a separate + // record which is written into the prefix. + in_len -= 1; + } + return ssl->s3->aead_write_ctx->SuffixLen(out_suffix_len, in_len, extra_in_len); +} + +// tls_seal_scatter_record seals a new record of type |type| and body |in| and +// splits it between |out_prefix|, |out|, and |out_suffix|. Exactly +// |tls_seal_scatter_prefix_len| bytes are written to |out_prefix|, |in_len| +// bytes to |out|, and |tls_seal_scatter_suffix_len| bytes to |out_suffix|. It +// returns one on success and zero on error. If enabled, +// |tls_seal_scatter_record| implements TLS 1.0 CBC 1/n-1 record splitting and +// may write two records concatenated. +static int tls_seal_scatter_record(SSL *ssl, uint8_t *out_prefix, uint8_t *out, + uint8_t *out_suffix, uint8_t type, + const uint8_t *in, size_t in_len) { + if (type == SSL3_RT_APPLICATION_DATA && in_len > 1 && + ssl_needs_record_splitting(ssl)) { + assert(ssl->s3->aead_write_ctx->ExplicitNonceLen() == 0); + const size_t prefix_len = SSL3_RT_HEADER_LENGTH; + + // Write the 1-byte fragment into |out_prefix|. + uint8_t *split_body = out_prefix + prefix_len; + uint8_t *split_suffix = split_body + 1; + + if (!do_seal_record(ssl, out_prefix, split_body, split_suffix, type, in, + 1)) { + return 0; + } + + size_t split_record_suffix_len; + if (!ssl->s3->aead_write_ctx->SuffixLen(&split_record_suffix_len, 1, 0)) { + assert(false); + return 0; + } + const size_t split_record_len = prefix_len + 1 + split_record_suffix_len; + assert(SSL3_RT_HEADER_LENGTH + ssl_cipher_get_record_split_len( + ssl->s3->aead_write_ctx->cipher()) == + split_record_len); + + // Write the n-1-byte fragment. The header gets split between |out_prefix| + // (header[:-1]) and |out| (header[-1:]). + uint8_t tmp_prefix[SSL3_RT_HEADER_LENGTH]; + if (!do_seal_record(ssl, tmp_prefix, out + 1, out_suffix, type, in + 1, + in_len - 1)) { + return 0; + } + assert(tls_seal_scatter_prefix_len(ssl, type, in_len) == + split_record_len + SSL3_RT_HEADER_LENGTH - 1); + OPENSSL_memcpy(out_prefix + split_record_len, tmp_prefix, + SSL3_RT_HEADER_LENGTH - 1); + OPENSSL_memcpy(out, tmp_prefix + SSL3_RT_HEADER_LENGTH - 1, 1); + return 1; + } + + return do_seal_record(ssl, out_prefix, out, out_suffix, type, in, in_len); +} + +int tls_seal_record(SSL *ssl, uint8_t *out, size_t *out_len, size_t max_out_len, + uint8_t type, const uint8_t *in, size_t in_len) { + if (buffers_alias(in, in_len, out, max_out_len)) { + OPENSSL_PUT_ERROR(SSL, SSL_R_OUTPUT_ALIASES_INPUT); + return 0; + } + + const size_t prefix_len = tls_seal_scatter_prefix_len(ssl, type, in_len); + size_t suffix_len; + if (!tls_seal_scatter_suffix_len(ssl, &suffix_len, type, in_len)) { + return false; + } + if (in_len + prefix_len < in_len || + prefix_len + in_len + suffix_len < prefix_len + in_len) { + OPENSSL_PUT_ERROR(SSL, SSL_R_RECORD_TOO_LARGE); + return 0; + } + if (max_out_len < in_len + prefix_len + suffix_len) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); + return 0; + } + + uint8_t *prefix = out; + uint8_t *body = out + prefix_len; + uint8_t *suffix = body + in_len; + if (!tls_seal_scatter_record(ssl, prefix, body, suffix, type, in, in_len)) { + return 0; + } + + *out_len = prefix_len + in_len + suffix_len; + return 1; +} + +enum ssl_open_record_t ssl_process_alert(SSL *ssl, uint8_t *out_alert, + Span in) { + // Alerts records may not contain fragmented or multiple alerts. + if (in.size() != 2) { + *out_alert = SSL_AD_DECODE_ERROR; + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); + return ssl_open_record_error; + } + + ssl_do_msg_callback(ssl, 0 /* read */, SSL3_RT_ALERT, in); + + const uint8_t alert_level = in[0]; + const uint8_t alert_descr = in[1]; + + uint16_t alert = (alert_level << 8) | alert_descr; + ssl_do_info_callback(ssl, SSL_CB_READ_ALERT, alert); + + if (alert_level == SSL3_AL_WARNING) { + if (alert_descr == SSL_AD_CLOSE_NOTIFY) { + ssl->s3->read_shutdown = ssl_shutdown_close_notify; + return ssl_open_record_close_notify; + } + + // Warning alerts do not exist in TLS 1.3. + if (ssl->s3->have_version && + ssl_protocol_version(ssl) >= TLS1_3_VERSION) { + *out_alert = SSL_AD_DECODE_ERROR; + OPENSSL_PUT_ERROR(SSL, SSL_R_BAD_ALERT); + return ssl_open_record_error; + } + + ssl->s3->warning_alert_count++; + if (ssl->s3->warning_alert_count > kMaxWarningAlerts) { + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + OPENSSL_PUT_ERROR(SSL, SSL_R_TOO_MANY_WARNING_ALERTS); + return ssl_open_record_error; + } + return ssl_open_record_discard; + } + + if (alert_level == SSL3_AL_FATAL) { + OPENSSL_PUT_ERROR(SSL, SSL_AD_REASON_OFFSET + alert_descr); + ERR_add_error_dataf("SSL alert number %d", alert_descr); + *out_alert = 0; // No alert to send back to the peer. + return ssl_open_record_error; + } + + *out_alert = SSL_AD_ILLEGAL_PARAMETER; + OPENSSL_PUT_ERROR(SSL, SSL_R_UNKNOWN_ALERT_TYPE); + return ssl_open_record_error; +} + +OpenRecordResult OpenRecord(SSL *ssl, Span *out, + size_t *out_record_len, uint8_t *out_alert, + const Span in) { + // This API is a work in progress and currently only works for TLS 1.2 servers + // and below. + if (SSL_in_init(ssl) || + SSL_is_dtls(ssl) || + ssl_protocol_version(ssl) > TLS1_2_VERSION) { + assert(false); + *out_alert = SSL_AD_INTERNAL_ERROR; + return OpenRecordResult::kError; + } + + Span plaintext; + uint8_t type = 0; + const ssl_open_record_t result = tls_open_record( + ssl, &type, &plaintext, out_record_len, out_alert, in); + + switch (result) { + case ssl_open_record_success: + if (type != SSL3_RT_APPLICATION_DATA && type != SSL3_RT_ALERT) { + *out_alert = SSL_AD_UNEXPECTED_MESSAGE; + return OpenRecordResult::kError; + } + *out = plaintext; + return OpenRecordResult::kOK; + case ssl_open_record_discard: + return OpenRecordResult::kDiscard; + case ssl_open_record_partial: + return OpenRecordResult::kIncompleteRecord; + case ssl_open_record_close_notify: + return OpenRecordResult::kAlertCloseNotify; + case ssl_open_record_error: + return OpenRecordResult::kError; + } + assert(false); + return OpenRecordResult::kError; +} + +size_t SealRecordPrefixLen(const SSL *ssl, const size_t record_len) { + return tls_seal_scatter_prefix_len(ssl, SSL3_RT_APPLICATION_DATA, record_len); +} + +size_t SealRecordSuffixLen(const SSL *ssl, const size_t plaintext_len) { + assert(plaintext_len <= SSL3_RT_MAX_PLAIN_LENGTH); + size_t suffix_len; + if (!tls_seal_scatter_suffix_len(ssl, &suffix_len, SSL3_RT_APPLICATION_DATA, + plaintext_len)) { + assert(false); + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return 0; + } + assert(suffix_len <= SSL3_RT_MAX_ENCRYPTED_OVERHEAD); + return suffix_len; +} + +bool SealRecord(SSL *ssl, const Span out_prefix, + const Span out, Span out_suffix, + const Span in) { + // This API is a work in progress and currently only works for TLS 1.2 servers + // and below. + if (SSL_in_init(ssl) || + SSL_is_dtls(ssl) || + ssl_protocol_version(ssl) > TLS1_2_VERSION) { + assert(false); + OPENSSL_PUT_ERROR(SSL, ERR_R_INTERNAL_ERROR); + return false; + } + + if (out_prefix.size() != SealRecordPrefixLen(ssl, in.size()) || + out.size() != in.size() || + out_suffix.size() != SealRecordSuffixLen(ssl, in.size())) { + OPENSSL_PUT_ERROR(SSL, SSL_R_BUFFER_TOO_SMALL); + return false; + } + return tls_seal_scatter_record(ssl, out_prefix.data(), out.data(), + out_suffix.data(), SSL3_RT_APPLICATION_DATA, + in.data(), in.size()); +} + +} // namespace bssl + +using namespace bssl; + +size_t SSL_max_seal_overhead(const SSL *ssl) { + if (SSL_is_dtls(ssl)) { + return dtls_max_seal_overhead(ssl, dtls1_use_current_epoch); + } + + size_t ret = SSL3_RT_HEADER_LENGTH; + ret += ssl->s3->aead_write_ctx->MaxOverhead(); + // TLS 1.3 needs an extra byte for the encrypted record type. + if (!ssl->s3->aead_write_ctx->is_null_cipher() && + ssl->s3->aead_write_ctx->ProtocolVersion() >= TLS1_3_VERSION) { + ret += 1; + } + if (ssl_needs_record_splitting(ssl)) { + ret *= 2; + } + return ret; +} diff --git a/Sources/BoringSSL/third_party/fiat/curve25519.c b/Sources/BoringSSL/third_party/fiat/curve25519.c new file mode 100644 index 000000000..d54aa839e --- /dev/null +++ b/Sources/BoringSSL/third_party/fiat/curve25519.c @@ -0,0 +1,5062 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015-2016 the fiat-crypto authors (see the AUTHORS file). +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Some of this code is taken from the ref10 version of Ed25519 in SUPERCOP +// 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as +// public domain but parts have been replaced with code generated by Fiat +// (https://github.com/mit-plv/fiat-crypto), which is MIT licensed. +// +// The field functions are shared by Ed25519 and X25519 where possible. + +#include + +#include +#include + +#include +#include +#include +#include + +#include "internal.h" +#include "../../crypto/internal.h" + + +static const int64_t kBottom25Bits = INT64_C(0x1ffffff); +static const int64_t kBottom26Bits = INT64_C(0x3ffffff); + +static uint64_t load_3(const uint8_t *in) { + uint64_t result; + result = (uint64_t)in[0]; + result |= ((uint64_t)in[1]) << 8; + result |= ((uint64_t)in[2]) << 16; + return result; +} + +static uint64_t load_4(const uint8_t *in) { + uint64_t result; + result = (uint64_t)in[0]; + result |= ((uint64_t)in[1]) << 8; + result |= ((uint64_t)in[2]) << 16; + result |= ((uint64_t)in[3]) << 24; + return result; +} + +#define assert_fe(f) do { \ + for (unsigned _assert_fe_i = 0; _assert_fe_i< 10; _assert_fe_i++) { \ + assert(f[_assert_fe_i] < 1.125*(1<<(26-(_assert_fe_i&1)))); \ + } \ +} while (0) + +#define assert_fe_loose(f) do { \ + for (unsigned _assert_fe_i = 0; _assert_fe_i< 10; _assert_fe_i++) { \ + assert(f[_assert_fe_i] < 3.375*(1<<(26-(_assert_fe_i&1)))); \ + } \ +} while (0) + +static void fe_frombytes_impl(uint32_t h[10], const uint8_t *s) { + // Ignores top bit of s. + uint32_t a0 = load_4(s); + uint32_t a1 = load_4(s+4); + uint32_t a2 = load_4(s+8); + uint32_t a3 = load_4(s+12); + uint32_t a4 = load_4(s+16); + uint32_t a5 = load_4(s+20); + uint32_t a6 = load_4(s+24); + uint32_t a7 = load_4(s+28); + h[0] = a0&((1<<26)-1); // 26 used, 32-26 left. 26 + h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); // (32-26) + 19 = 6+19 = 25 + h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); // (32-19) + 13 = 13+13 = 26 + h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); // (32-13) + 6 = 19+ 6 = 25 + h[4] = (a3>> 6); // (32- 6) = 26 + h[5] = a4&((1<<25)-1); // 25 + h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); // (32-25) + 19 = 7+19 = 26 + h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); // (32-19) + 12 = 13+12 = 25 + h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); // (32-12) + 6 = 20+ 6 = 26 + h[9] = (a7>> 6)&((1<<25)-1); // 25 + assert_fe(h); +} + +static void fe_frombytes(fe *h, const uint8_t *s) { + fe_frombytes_impl(h->v, s); +} + +// Preconditions: +// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25; + q = (h0 + q) >> 26; + q = (h1 + q) >> 25; + q = (h2 + q) >> 26; + q = (h3 + q) >> 25; + q = (h4 + q) >> 26; + q = (h5 + q) >> 25; + q = (h6 + q) >> 26; + q = (h7 + q) >> 25; + q = (h8 + q) >> 26; + q = (h9 + q) >> 25; + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h0 += 19 * q; + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + h1 += h0 >> 26; h0 &= kBottom26Bits; + h2 += h1 >> 25; h1 &= kBottom25Bits; + h3 += h2 >> 26; h2 &= kBottom26Bits; + h4 += h3 >> 25; h3 &= kBottom25Bits; + h5 += h4 >> 26; h4 &= kBottom26Bits; + h6 += h5 >> 25; h5 &= kBottom25Bits; + h7 += h6 >> 26; h6 &= kBottom26Bits; + h8 += h7 >> 25; h7 &= kBottom25Bits; + h9 += h8 >> 26; h8 &= kBottom26Bits; + h9 &= kBottom25Bits; + // h10 = carry9 + + // Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h0+...+2^230 h9 between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h0+...+2^230 h9. + + s[0] = h0 >> 0; + s[1] = h0 >> 8; + s[2] = h0 >> 16; + s[3] = (h0 >> 24) | ((uint32_t)(h1) << 2); + s[4] = h1 >> 6; + s[5] = h1 >> 14; + s[6] = (h1 >> 22) | ((uint32_t)(h2) << 3); + s[7] = h2 >> 5; + s[8] = h2 >> 13; + s[9] = (h2 >> 21) | ((uint32_t)(h3) << 5); + s[10] = h3 >> 3; + s[11] = h3 >> 11; + s[12] = (h3 >> 19) | ((uint32_t)(h4) << 6); + s[13] = h4 >> 2; + s[14] = h4 >> 10; + s[15] = h4 >> 18; + s[16] = h5 >> 0; + s[17] = h5 >> 8; + s[18] = h5 >> 16; + s[19] = (h5 >> 24) | ((uint32_t)(h6) << 1); + s[20] = h6 >> 7; + s[21] = h6 >> 15; + s[22] = (h6 >> 23) | ((uint32_t)(h7) << 3); + s[23] = h7 >> 5; + s[24] = h7 >> 13; + s[25] = (h7 >> 21) | ((uint32_t)(h8) << 4); + s[26] = h8 >> 4; + s[27] = h8 >> 12; + s[28] = (h8 >> 20) | ((uint32_t)(h9) << 6); + s[29] = h9 >> 2; + s[30] = h9 >> 10; + s[31] = h9 >> 18; +} + +static void fe_tobytes(uint8_t *s, const fe *h) { + fe_tobytes_impl(s, h->v); +} + +static void fe_loose_tobytes(uint8_t *s, const fe_loose *h) { + fe_tobytes_impl(s, h->v); +} + +// h = f +static void fe_copy(fe *h, const fe *f) { + OPENSSL_memmove(h, f, sizeof(uint32_t) * 10); +} + +static void fe_copy_lt(fe_loose *h, const fe *f) { + OPENSSL_memmove(h, f, sizeof(uint32_t) * 10); +} +#if !defined(OPENSSL_SMALL) +static void fe_copy_ll(fe_loose *h, const fe_loose *f) { + OPENSSL_memmove(h, f, sizeof(uint32_t) * 10); +} +#endif // !defined(OPENSSL_SMALL) + +// h = 0 +static void fe_0(fe *h) { + OPENSSL_memset(h, 0, sizeof(uint32_t) * 10); +} + +static void fe_loose_0(fe_loose *h) { + OPENSSL_memset(h, 0, sizeof(uint32_t) * 10); +} + +// h = 1 +static void fe_1(fe *h) { + OPENSSL_memset(h, 0, sizeof(uint32_t) * 10); + h->v[0] = 1; +} + +static void fe_loose_1(fe_loose *h) { + OPENSSL_memset(h, 0, sizeof(uint32_t) * 10); + h->v[0] = 1; +} + +static void fe_add_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) { + { const uint32_t x20 = in1[9]; + { const uint32_t x21 = in1[8]; + { const uint32_t x19 = in1[7]; + { const uint32_t x17 = in1[6]; + { const uint32_t x15 = in1[5]; + { const uint32_t x13 = in1[4]; + { const uint32_t x11 = in1[3]; + { const uint32_t x9 = in1[2]; + { const uint32_t x7 = in1[1]; + { const uint32_t x5 = in1[0]; + { const uint32_t x38 = in2[9]; + { const uint32_t x39 = in2[8]; + { const uint32_t x37 = in2[7]; + { const uint32_t x35 = in2[6]; + { const uint32_t x33 = in2[5]; + { const uint32_t x31 = in2[4]; + { const uint32_t x29 = in2[3]; + { const uint32_t x27 = in2[2]; + { const uint32_t x25 = in2[1]; + { const uint32_t x23 = in2[0]; + out[0] = (x5 + x23); + out[1] = (x7 + x25); + out[2] = (x9 + x27); + out[3] = (x11 + x29); + out[4] = (x13 + x31); + out[5] = (x15 + x33); + out[6] = (x17 + x35); + out[7] = (x19 + x37); + out[8] = (x21 + x39); + out[9] = (x20 + x38); + }}}}}}}}}}}}}}}}}}}} +} + +// h = f + g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +static void fe_add(fe_loose *h, const fe *f, const fe *g) { + assert_fe(f->v); + assert_fe(g->v); + fe_add_impl(h->v, f->v, g->v); + assert_fe_loose(h->v); +} + +static void fe_sub_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) { + { const uint32_t x20 = in1[9]; + { const uint32_t x21 = in1[8]; + { const uint32_t x19 = in1[7]; + { const uint32_t x17 = in1[6]; + { const uint32_t x15 = in1[5]; + { const uint32_t x13 = in1[4]; + { const uint32_t x11 = in1[3]; + { const uint32_t x9 = in1[2]; + { const uint32_t x7 = in1[1]; + { const uint32_t x5 = in1[0]; + { const uint32_t x38 = in2[9]; + { const uint32_t x39 = in2[8]; + { const uint32_t x37 = in2[7]; + { const uint32_t x35 = in2[6]; + { const uint32_t x33 = in2[5]; + { const uint32_t x31 = in2[4]; + { const uint32_t x29 = in2[3]; + { const uint32_t x27 = in2[2]; + { const uint32_t x25 = in2[1]; + { const uint32_t x23 = in2[0]; + out[0] = ((0x7ffffda + x5) - x23); + out[1] = ((0x3fffffe + x7) - x25); + out[2] = ((0x7fffffe + x9) - x27); + out[3] = ((0x3fffffe + x11) - x29); + out[4] = ((0x7fffffe + x13) - x31); + out[5] = ((0x3fffffe + x15) - x33); + out[6] = ((0x7fffffe + x17) - x35); + out[7] = ((0x3fffffe + x19) - x37); + out[8] = ((0x7fffffe + x21) - x39); + out[9] = ((0x3fffffe + x20) - x38); + }}}}}}}}}}}}}}}}}}}} +} + +// h = f - g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +static void fe_sub(fe_loose *h, const fe *f, const fe *g) { + assert_fe(f->v); + assert_fe(g->v); + fe_sub_impl(h->v, f->v, g->v); + assert_fe_loose(h->v); +} + +static void fe_carry_impl(uint32_t out[10], const uint32_t in1[10]) { + { const uint32_t x17 = in1[9]; + { const uint32_t x18 = in1[8]; + { const uint32_t x16 = in1[7]; + { const uint32_t x14 = in1[6]; + { const uint32_t x12 = in1[5]; + { const uint32_t x10 = in1[4]; + { const uint32_t x8 = in1[3]; + { const uint32_t x6 = in1[2]; + { const uint32_t x4 = in1[1]; + { const uint32_t x2 = in1[0]; + { uint32_t x19 = (x2 >> 0x1a); + { uint32_t x20 = (x2 & 0x3ffffff); + { uint32_t x21 = (x19 + x4); + { uint32_t x22 = (x21 >> 0x19); + { uint32_t x23 = (x21 & 0x1ffffff); + { uint32_t x24 = (x22 + x6); + { uint32_t x25 = (x24 >> 0x1a); + { uint32_t x26 = (x24 & 0x3ffffff); + { uint32_t x27 = (x25 + x8); + { uint32_t x28 = (x27 >> 0x19); + { uint32_t x29 = (x27 & 0x1ffffff); + { uint32_t x30 = (x28 + x10); + { uint32_t x31 = (x30 >> 0x1a); + { uint32_t x32 = (x30 & 0x3ffffff); + { uint32_t x33 = (x31 + x12); + { uint32_t x34 = (x33 >> 0x19); + { uint32_t x35 = (x33 & 0x1ffffff); + { uint32_t x36 = (x34 + x14); + { uint32_t x37 = (x36 >> 0x1a); + { uint32_t x38 = (x36 & 0x3ffffff); + { uint32_t x39 = (x37 + x16); + { uint32_t x40 = (x39 >> 0x19); + { uint32_t x41 = (x39 & 0x1ffffff); + { uint32_t x42 = (x40 + x18); + { uint32_t x43 = (x42 >> 0x1a); + { uint32_t x44 = (x42 & 0x3ffffff); + { uint32_t x45 = (x43 + x17); + { uint32_t x46 = (x45 >> 0x19); + { uint32_t x47 = (x45 & 0x1ffffff); + { uint32_t x48 = (x20 + (0x13 * x46)); + { uint32_t x49 = (x48 >> 0x1a); + { uint32_t x50 = (x48 & 0x3ffffff); + { uint32_t x51 = (x49 + x23); + { uint32_t x52 = (x51 >> 0x19); + { uint32_t x53 = (x51 & 0x1ffffff); + out[0] = x50; + out[1] = x53; + out[2] = (x52 + x26); + out[3] = x29; + out[4] = x32; + out[5] = x35; + out[6] = x38; + out[7] = x41; + out[8] = x44; + out[9] = x47; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} +} + +static void fe_carry(fe *h, const fe_loose* f) { + assert_fe_loose(f->v); + fe_carry_impl(h->v, f->v); + assert_fe(h->v); +} + +static void fe_mul_impl(uint32_t out[10], const uint32_t in1[10], const uint32_t in2[10]) { + assert_fe_loose(in1); + assert_fe_loose(in2); + { const uint32_t x20 = in1[9]; + { const uint32_t x21 = in1[8]; + { const uint32_t x19 = in1[7]; + { const uint32_t x17 = in1[6]; + { const uint32_t x15 = in1[5]; + { const uint32_t x13 = in1[4]; + { const uint32_t x11 = in1[3]; + { const uint32_t x9 = in1[2]; + { const uint32_t x7 = in1[1]; + { const uint32_t x5 = in1[0]; + { const uint32_t x38 = in2[9]; + { const uint32_t x39 = in2[8]; + { const uint32_t x37 = in2[7]; + { const uint32_t x35 = in2[6]; + { const uint32_t x33 = in2[5]; + { const uint32_t x31 = in2[4]; + { const uint32_t x29 = in2[3]; + { const uint32_t x27 = in2[2]; + { const uint32_t x25 = in2[1]; + { const uint32_t x23 = in2[0]; + { uint64_t x40 = ((uint64_t)x23 * x5); + { uint64_t x41 = (((uint64_t)x23 * x7) + ((uint64_t)x25 * x5)); + { uint64_t x42 = ((((uint64_t)(0x2 * x25) * x7) + ((uint64_t)x23 * x9)) + ((uint64_t)x27 * x5)); + { uint64_t x43 = (((((uint64_t)x25 * x9) + ((uint64_t)x27 * x7)) + ((uint64_t)x23 * x11)) + ((uint64_t)x29 * x5)); + { uint64_t x44 = (((((uint64_t)x27 * x9) + (0x2 * (((uint64_t)x25 * x11) + ((uint64_t)x29 * x7)))) + ((uint64_t)x23 * x13)) + ((uint64_t)x31 * x5)); + { uint64_t x45 = (((((((uint64_t)x27 * x11) + ((uint64_t)x29 * x9)) + ((uint64_t)x25 * x13)) + ((uint64_t)x31 * x7)) + ((uint64_t)x23 * x15)) + ((uint64_t)x33 * x5)); + { uint64_t x46 = (((((0x2 * ((((uint64_t)x29 * x11) + ((uint64_t)x25 * x15)) + ((uint64_t)x33 * x7))) + ((uint64_t)x27 * x13)) + ((uint64_t)x31 * x9)) + ((uint64_t)x23 * x17)) + ((uint64_t)x35 * x5)); + { uint64_t x47 = (((((((((uint64_t)x29 * x13) + ((uint64_t)x31 * x11)) + ((uint64_t)x27 * x15)) + ((uint64_t)x33 * x9)) + ((uint64_t)x25 * x17)) + ((uint64_t)x35 * x7)) + ((uint64_t)x23 * x19)) + ((uint64_t)x37 * x5)); + { uint64_t x48 = (((((((uint64_t)x31 * x13) + (0x2 * (((((uint64_t)x29 * x15) + ((uint64_t)x33 * x11)) + ((uint64_t)x25 * x19)) + ((uint64_t)x37 * x7)))) + ((uint64_t)x27 * x17)) + ((uint64_t)x35 * x9)) + ((uint64_t)x23 * x21)) + ((uint64_t)x39 * x5)); + { uint64_t x49 = (((((((((((uint64_t)x31 * x15) + ((uint64_t)x33 * x13)) + ((uint64_t)x29 * x17)) + ((uint64_t)x35 * x11)) + ((uint64_t)x27 * x19)) + ((uint64_t)x37 * x9)) + ((uint64_t)x25 * x21)) + ((uint64_t)x39 * x7)) + ((uint64_t)x23 * x20)) + ((uint64_t)x38 * x5)); + { uint64_t x50 = (((((0x2 * ((((((uint64_t)x33 * x15) + ((uint64_t)x29 * x19)) + ((uint64_t)x37 * x11)) + ((uint64_t)x25 * x20)) + ((uint64_t)x38 * x7))) + ((uint64_t)x31 * x17)) + ((uint64_t)x35 * x13)) + ((uint64_t)x27 * x21)) + ((uint64_t)x39 * x9)); + { uint64_t x51 = (((((((((uint64_t)x33 * x17) + ((uint64_t)x35 * x15)) + ((uint64_t)x31 * x19)) + ((uint64_t)x37 * x13)) + ((uint64_t)x29 * x21)) + ((uint64_t)x39 * x11)) + ((uint64_t)x27 * x20)) + ((uint64_t)x38 * x9)); + { uint64_t x52 = (((((uint64_t)x35 * x17) + (0x2 * (((((uint64_t)x33 * x19) + ((uint64_t)x37 * x15)) + ((uint64_t)x29 * x20)) + ((uint64_t)x38 * x11)))) + ((uint64_t)x31 * x21)) + ((uint64_t)x39 * x13)); + { uint64_t x53 = (((((((uint64_t)x35 * x19) + ((uint64_t)x37 * x17)) + ((uint64_t)x33 * x21)) + ((uint64_t)x39 * x15)) + ((uint64_t)x31 * x20)) + ((uint64_t)x38 * x13)); + { uint64_t x54 = (((0x2 * ((((uint64_t)x37 * x19) + ((uint64_t)x33 * x20)) + ((uint64_t)x38 * x15))) + ((uint64_t)x35 * x21)) + ((uint64_t)x39 * x17)); + { uint64_t x55 = (((((uint64_t)x37 * x21) + ((uint64_t)x39 * x19)) + ((uint64_t)x35 * x20)) + ((uint64_t)x38 * x17)); + { uint64_t x56 = (((uint64_t)x39 * x21) + (0x2 * (((uint64_t)x37 * x20) + ((uint64_t)x38 * x19)))); + { uint64_t x57 = (((uint64_t)x39 * x20) + ((uint64_t)x38 * x21)); + { uint64_t x58 = ((uint64_t)(0x2 * x38) * x20); + { uint64_t x59 = (x48 + (x58 << 0x4)); + { uint64_t x60 = (x59 + (x58 << 0x1)); + { uint64_t x61 = (x60 + x58); + { uint64_t x62 = (x47 + (x57 << 0x4)); + { uint64_t x63 = (x62 + (x57 << 0x1)); + { uint64_t x64 = (x63 + x57); + { uint64_t x65 = (x46 + (x56 << 0x4)); + { uint64_t x66 = (x65 + (x56 << 0x1)); + { uint64_t x67 = (x66 + x56); + { uint64_t x68 = (x45 + (x55 << 0x4)); + { uint64_t x69 = (x68 + (x55 << 0x1)); + { uint64_t x70 = (x69 + x55); + { uint64_t x71 = (x44 + (x54 << 0x4)); + { uint64_t x72 = (x71 + (x54 << 0x1)); + { uint64_t x73 = (x72 + x54); + { uint64_t x74 = (x43 + (x53 << 0x4)); + { uint64_t x75 = (x74 + (x53 << 0x1)); + { uint64_t x76 = (x75 + x53); + { uint64_t x77 = (x42 + (x52 << 0x4)); + { uint64_t x78 = (x77 + (x52 << 0x1)); + { uint64_t x79 = (x78 + x52); + { uint64_t x80 = (x41 + (x51 << 0x4)); + { uint64_t x81 = (x80 + (x51 << 0x1)); + { uint64_t x82 = (x81 + x51); + { uint64_t x83 = (x40 + (x50 << 0x4)); + { uint64_t x84 = (x83 + (x50 << 0x1)); + { uint64_t x85 = (x84 + x50); + { uint64_t x86 = (x85 >> 0x1a); + { uint32_t x87 = ((uint32_t)x85 & 0x3ffffff); + { uint64_t x88 = (x86 + x82); + { uint64_t x89 = (x88 >> 0x19); + { uint32_t x90 = ((uint32_t)x88 & 0x1ffffff); + { uint64_t x91 = (x89 + x79); + { uint64_t x92 = (x91 >> 0x1a); + { uint32_t x93 = ((uint32_t)x91 & 0x3ffffff); + { uint64_t x94 = (x92 + x76); + { uint64_t x95 = (x94 >> 0x19); + { uint32_t x96 = ((uint32_t)x94 & 0x1ffffff); + { uint64_t x97 = (x95 + x73); + { uint64_t x98 = (x97 >> 0x1a); + { uint32_t x99 = ((uint32_t)x97 & 0x3ffffff); + { uint64_t x100 = (x98 + x70); + { uint64_t x101 = (x100 >> 0x19); + { uint32_t x102 = ((uint32_t)x100 & 0x1ffffff); + { uint64_t x103 = (x101 + x67); + { uint64_t x104 = (x103 >> 0x1a); + { uint32_t x105 = ((uint32_t)x103 & 0x3ffffff); + { uint64_t x106 = (x104 + x64); + { uint64_t x107 = (x106 >> 0x19); + { uint32_t x108 = ((uint32_t)x106 & 0x1ffffff); + { uint64_t x109 = (x107 + x61); + { uint64_t x110 = (x109 >> 0x1a); + { uint32_t x111 = ((uint32_t)x109 & 0x3ffffff); + { uint64_t x112 = (x110 + x49); + { uint64_t x113 = (x112 >> 0x19); + { uint32_t x114 = ((uint32_t)x112 & 0x1ffffff); + { uint64_t x115 = (x87 + (0x13 * x113)); + { uint32_t x116 = (uint32_t) (x115 >> 0x1a); + { uint32_t x117 = ((uint32_t)x115 & 0x3ffffff); + { uint32_t x118 = (x116 + x90); + { uint32_t x119 = (x118 >> 0x19); + { uint32_t x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + assert_fe(out); +} + +static void fe_mul_ltt(fe_loose *h, const fe *f, const fe *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_mul_llt(fe_loose *h, const fe_loose *f, const fe *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_mul_ttt(fe *h, const fe *f, const fe *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_mul_ttl(fe *h, const fe *f, const fe_loose *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) { + fe_mul_impl(h->v, f->v, g->v); +} + +static void fe_sqr_impl(uint32_t out[10], const uint32_t in1[10]) { + assert_fe_loose(in1); + { const uint32_t x17 = in1[9]; + { const uint32_t x18 = in1[8]; + { const uint32_t x16 = in1[7]; + { const uint32_t x14 = in1[6]; + { const uint32_t x12 = in1[5]; + { const uint32_t x10 = in1[4]; + { const uint32_t x8 = in1[3]; + { const uint32_t x6 = in1[2]; + { const uint32_t x4 = in1[1]; + { const uint32_t x2 = in1[0]; + { uint64_t x19 = ((uint64_t)x2 * x2); + { uint64_t x20 = ((uint64_t)(0x2 * x2) * x4); + { uint64_t x21 = (0x2 * (((uint64_t)x4 * x4) + ((uint64_t)x2 * x6))); + { uint64_t x22 = (0x2 * (((uint64_t)x4 * x6) + ((uint64_t)x2 * x8))); + { uint64_t x23 = ((((uint64_t)x6 * x6) + ((uint64_t)(0x4 * x4) * x8)) + ((uint64_t)(0x2 * x2) * x10)); + { uint64_t x24 = (0x2 * ((((uint64_t)x6 * x8) + ((uint64_t)x4 * x10)) + ((uint64_t)x2 * x12))); + { uint64_t x25 = (0x2 * (((((uint64_t)x8 * x8) + ((uint64_t)x6 * x10)) + ((uint64_t)x2 * x14)) + ((uint64_t)(0x2 * x4) * x12))); + { uint64_t x26 = (0x2 * (((((uint64_t)x8 * x10) + ((uint64_t)x6 * x12)) + ((uint64_t)x4 * x14)) + ((uint64_t)x2 * x16))); + { uint64_t x27 = (((uint64_t)x10 * x10) + (0x2 * ((((uint64_t)x6 * x14) + ((uint64_t)x2 * x18)) + (0x2 * (((uint64_t)x4 * x16) + ((uint64_t)x8 * x12)))))); + { uint64_t x28 = (0x2 * ((((((uint64_t)x10 * x12) + ((uint64_t)x8 * x14)) + ((uint64_t)x6 * x16)) + ((uint64_t)x4 * x18)) + ((uint64_t)x2 * x17))); + { uint64_t x29 = (0x2 * (((((uint64_t)x12 * x12) + ((uint64_t)x10 * x14)) + ((uint64_t)x6 * x18)) + (0x2 * (((uint64_t)x8 * x16) + ((uint64_t)x4 * x17))))); + { uint64_t x30 = (0x2 * (((((uint64_t)x12 * x14) + ((uint64_t)x10 * x16)) + ((uint64_t)x8 * x18)) + ((uint64_t)x6 * x17))); + { uint64_t x31 = (((uint64_t)x14 * x14) + (0x2 * (((uint64_t)x10 * x18) + (0x2 * (((uint64_t)x12 * x16) + ((uint64_t)x8 * x17)))))); + { uint64_t x32 = (0x2 * ((((uint64_t)x14 * x16) + ((uint64_t)x12 * x18)) + ((uint64_t)x10 * x17))); + { uint64_t x33 = (0x2 * ((((uint64_t)x16 * x16) + ((uint64_t)x14 * x18)) + ((uint64_t)(0x2 * x12) * x17))); + { uint64_t x34 = (0x2 * (((uint64_t)x16 * x18) + ((uint64_t)x14 * x17))); + { uint64_t x35 = (((uint64_t)x18 * x18) + ((uint64_t)(0x4 * x16) * x17)); + { uint64_t x36 = ((uint64_t)(0x2 * x18) * x17); + { uint64_t x37 = ((uint64_t)(0x2 * x17) * x17); + { uint64_t x38 = (x27 + (x37 << 0x4)); + { uint64_t x39 = (x38 + (x37 << 0x1)); + { uint64_t x40 = (x39 + x37); + { uint64_t x41 = (x26 + (x36 << 0x4)); + { uint64_t x42 = (x41 + (x36 << 0x1)); + { uint64_t x43 = (x42 + x36); + { uint64_t x44 = (x25 + (x35 << 0x4)); + { uint64_t x45 = (x44 + (x35 << 0x1)); + { uint64_t x46 = (x45 + x35); + { uint64_t x47 = (x24 + (x34 << 0x4)); + { uint64_t x48 = (x47 + (x34 << 0x1)); + { uint64_t x49 = (x48 + x34); + { uint64_t x50 = (x23 + (x33 << 0x4)); + { uint64_t x51 = (x50 + (x33 << 0x1)); + { uint64_t x52 = (x51 + x33); + { uint64_t x53 = (x22 + (x32 << 0x4)); + { uint64_t x54 = (x53 + (x32 << 0x1)); + { uint64_t x55 = (x54 + x32); + { uint64_t x56 = (x21 + (x31 << 0x4)); + { uint64_t x57 = (x56 + (x31 << 0x1)); + { uint64_t x58 = (x57 + x31); + { uint64_t x59 = (x20 + (x30 << 0x4)); + { uint64_t x60 = (x59 + (x30 << 0x1)); + { uint64_t x61 = (x60 + x30); + { uint64_t x62 = (x19 + (x29 << 0x4)); + { uint64_t x63 = (x62 + (x29 << 0x1)); + { uint64_t x64 = (x63 + x29); + { uint64_t x65 = (x64 >> 0x1a); + { uint32_t x66 = ((uint32_t)x64 & 0x3ffffff); + { uint64_t x67 = (x65 + x61); + { uint64_t x68 = (x67 >> 0x19); + { uint32_t x69 = ((uint32_t)x67 & 0x1ffffff); + { uint64_t x70 = (x68 + x58); + { uint64_t x71 = (x70 >> 0x1a); + { uint32_t x72 = ((uint32_t)x70 & 0x3ffffff); + { uint64_t x73 = (x71 + x55); + { uint64_t x74 = (x73 >> 0x19); + { uint32_t x75 = ((uint32_t)x73 & 0x1ffffff); + { uint64_t x76 = (x74 + x52); + { uint64_t x77 = (x76 >> 0x1a); + { uint32_t x78 = ((uint32_t)x76 & 0x3ffffff); + { uint64_t x79 = (x77 + x49); + { uint64_t x80 = (x79 >> 0x19); + { uint32_t x81 = ((uint32_t)x79 & 0x1ffffff); + { uint64_t x82 = (x80 + x46); + { uint64_t x83 = (x82 >> 0x1a); + { uint32_t x84 = ((uint32_t)x82 & 0x3ffffff); + { uint64_t x85 = (x83 + x43); + { uint64_t x86 = (x85 >> 0x19); + { uint32_t x87 = ((uint32_t)x85 & 0x1ffffff); + { uint64_t x88 = (x86 + x40); + { uint64_t x89 = (x88 >> 0x1a); + { uint32_t x90 = ((uint32_t)x88 & 0x3ffffff); + { uint64_t x91 = (x89 + x28); + { uint64_t x92 = (x91 >> 0x19); + { uint32_t x93 = ((uint32_t)x91 & 0x1ffffff); + { uint64_t x94 = (x66 + (0x13 * x92)); + { uint32_t x95 = (uint32_t) (x94 >> 0x1a); + { uint32_t x96 = ((uint32_t)x94 & 0x3ffffff); + { uint32_t x97 = (x95 + x69); + { uint32_t x98 = (x97 >> 0x19); + { uint32_t x99 = (x97 & 0x1ffffff); + out[0] = x96; + out[1] = x99; + out[2] = (x98 + x72); + out[3] = x75; + out[4] = x78; + out[5] = x81; + out[6] = x84; + out[7] = x87; + out[8] = x90; + out[9] = x93; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + assert_fe(out); +} + +static void fe_sq_tl(fe *h, const fe_loose *f) { + fe_sqr_impl(h->v, f->v); +} + +static void fe_sq_tt(fe *h, const fe *f) { + fe_sqr_impl(h->v, f->v); +} + +static void fe_loose_invert(fe *out, const fe_loose *z) { + fe t0; + fe t1; + fe t2; + fe t3; + int i; + + fe_sq_tl(&t0, z); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 2; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_tlt(&t1, z, &t1); + fe_mul_ttt(&t0, &t0, &t1); + fe_sq_tt(&t2, &t0); + fe_mul_ttt(&t1, &t1, &t2); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 5; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 10; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 20; ++i) { + fe_sq_tt(&t3, &t3); + } + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 10; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 50; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t2, &t2, &t1); + fe_sq_tt(&t3, &t2); + for (i = 1; i < 100; ++i) { + fe_sq_tt(&t3, &t3); + } + fe_mul_ttt(&t2, &t3, &t2); + fe_sq_tt(&t2, &t2); + for (i = 1; i < 50; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t1, &t1); + for (i = 1; i < 5; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(out, &t1, &t0); +} + +static void fe_invert(fe *out, const fe *z) { + fe_loose l; + fe_copy_lt(&l, z); + fe_loose_invert(out, &l); +} + +static void fe_neg_impl(uint32_t out[10], const uint32_t in2[10]) { + { const uint32_t x20 = 0; + { const uint32_t x21 = 0; + { const uint32_t x19 = 0; + { const uint32_t x17 = 0; + { const uint32_t x15 = 0; + { const uint32_t x13 = 0; + { const uint32_t x11 = 0; + { const uint32_t x9 = 0; + { const uint32_t x7 = 0; + { const uint32_t x5 = 0; + { const uint32_t x38 = in2[9]; + { const uint32_t x39 = in2[8]; + { const uint32_t x37 = in2[7]; + { const uint32_t x35 = in2[6]; + { const uint32_t x33 = in2[5]; + { const uint32_t x31 = in2[4]; + { const uint32_t x29 = in2[3]; + { const uint32_t x27 = in2[2]; + { const uint32_t x25 = in2[1]; + { const uint32_t x23 = in2[0]; + out[0] = ((0x7ffffda + x5) - x23); + out[1] = ((0x3fffffe + x7) - x25); + out[2] = ((0x7fffffe + x9) - x27); + out[3] = ((0x3fffffe + x11) - x29); + out[4] = ((0x7fffffe + x13) - x31); + out[5] = ((0x3fffffe + x15) - x33); + out[6] = ((0x7fffffe + x17) - x35); + out[7] = ((0x3fffffe + x19) - x37); + out[8] = ((0x7fffffe + x21) - x39); + out[9] = ((0x3fffffe + x20) - x38); + }}}}}}}}}}}}}}}}}}}} +} + +// h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +static void fe_neg(fe_loose *h, const fe *f) { + assert_fe(f->v); + fe_neg_impl(h->v, f->v); + assert_fe_loose(h->v); +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +static void fe_cmov(fe_loose *f, const fe_loose *g, unsigned b) { + b = 0-b; + unsigned i; + for (i = 0; i < 10; i++) { + uint32_t x = f->v[i] ^ g->v[i]; + x &= b; + f->v[i] ^= x; + } +} + +// return 0 if f == 0 +// return 1 if f != 0 +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +static int fe_isnonzero(const fe_loose *f) { + uint8_t s[32]; + fe_loose_tobytes(s, f); + + static const uint8_t zero[32] = {0}; + return CRYPTO_memcmp(s, zero, sizeof(zero)) != 0; +} + +// return 1 if f is in {1,3,5,...,q-2} +// return 0 if f is in {0,2,4,...,q-1} +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +static int fe_isnegative(const fe *f) { + uint8_t s[32]; + fe_tobytes(s, f); + return s[0] & 1; +} + +// NOTE: based on fiat-crypto fe_mul, edited for in2=2*in1 +static void fe_sq2_impl(uint32_t out[10], const uint32_t in1[10]) { + assert_fe_loose(in1); + { const uint32_t x20 = in1[9]; + { const uint32_t x21 = in1[8]; + { const uint32_t x19 = in1[7]; + { const uint32_t x17 = in1[6]; + { const uint32_t x15 = in1[5]; + { const uint32_t x13 = in1[4]; + { const uint32_t x11 = in1[3]; + { const uint32_t x9 = in1[2]; + { const uint32_t x7 = in1[1]; + { const uint32_t x5 = in1[0]; + { const uint32_t x38 = 2*in1[9]; + { const uint32_t x39 = 2*in1[8]; + { const uint32_t x37 = 2*in1[7]; + { const uint32_t x35 = 2*in1[6]; + { const uint32_t x33 = 2*in1[5]; + { const uint32_t x31 = 2*in1[4]; + { const uint32_t x29 = 2*in1[3]; + { const uint32_t x27 = 2*in1[2]; + { const uint32_t x25 = 2*in1[1]; + { const uint32_t x23 = 2*in1[0]; + { uint64_t x40 = ((uint64_t)x23 * x5); + { uint64_t x41 = (((uint64_t)x23 * x7) + ((uint64_t)x25 * x5)); + { uint64_t x42 = ((((uint64_t)(0x2 * x25) * x7) + ((uint64_t)x23 * x9)) + ((uint64_t)x27 * x5)); + { uint64_t x43 = (((((uint64_t)x25 * x9) + ((uint64_t)x27 * x7)) + ((uint64_t)x23 * x11)) + ((uint64_t)x29 * x5)); + { uint64_t x44 = (((((uint64_t)x27 * x9) + (0x2 * (((uint64_t)x25 * x11) + ((uint64_t)x29 * x7)))) + ((uint64_t)x23 * x13)) + ((uint64_t)x31 * x5)); + { uint64_t x45 = (((((((uint64_t)x27 * x11) + ((uint64_t)x29 * x9)) + ((uint64_t)x25 * x13)) + ((uint64_t)x31 * x7)) + ((uint64_t)x23 * x15)) + ((uint64_t)x33 * x5)); + { uint64_t x46 = (((((0x2 * ((((uint64_t)x29 * x11) + ((uint64_t)x25 * x15)) + ((uint64_t)x33 * x7))) + ((uint64_t)x27 * x13)) + ((uint64_t)x31 * x9)) + ((uint64_t)x23 * x17)) + ((uint64_t)x35 * x5)); + { uint64_t x47 = (((((((((uint64_t)x29 * x13) + ((uint64_t)x31 * x11)) + ((uint64_t)x27 * x15)) + ((uint64_t)x33 * x9)) + ((uint64_t)x25 * x17)) + ((uint64_t)x35 * x7)) + ((uint64_t)x23 * x19)) + ((uint64_t)x37 * x5)); + { uint64_t x48 = (((((((uint64_t)x31 * x13) + (0x2 * (((((uint64_t)x29 * x15) + ((uint64_t)x33 * x11)) + ((uint64_t)x25 * x19)) + ((uint64_t)x37 * x7)))) + ((uint64_t)x27 * x17)) + ((uint64_t)x35 * x9)) + ((uint64_t)x23 * x21)) + ((uint64_t)x39 * x5)); + { uint64_t x49 = (((((((((((uint64_t)x31 * x15) + ((uint64_t)x33 * x13)) + ((uint64_t)x29 * x17)) + ((uint64_t)x35 * x11)) + ((uint64_t)x27 * x19)) + ((uint64_t)x37 * x9)) + ((uint64_t)x25 * x21)) + ((uint64_t)x39 * x7)) + ((uint64_t)x23 * x20)) + ((uint64_t)x38 * x5)); + { uint64_t x50 = (((((0x2 * ((((((uint64_t)x33 * x15) + ((uint64_t)x29 * x19)) + ((uint64_t)x37 * x11)) + ((uint64_t)x25 * x20)) + ((uint64_t)x38 * x7))) + ((uint64_t)x31 * x17)) + ((uint64_t)x35 * x13)) + ((uint64_t)x27 * x21)) + ((uint64_t)x39 * x9)); + { uint64_t x51 = (((((((((uint64_t)x33 * x17) + ((uint64_t)x35 * x15)) + ((uint64_t)x31 * x19)) + ((uint64_t)x37 * x13)) + ((uint64_t)x29 * x21)) + ((uint64_t)x39 * x11)) + ((uint64_t)x27 * x20)) + ((uint64_t)x38 * x9)); + { uint64_t x52 = (((((uint64_t)x35 * x17) + (0x2 * (((((uint64_t)x33 * x19) + ((uint64_t)x37 * x15)) + ((uint64_t)x29 * x20)) + ((uint64_t)x38 * x11)))) + ((uint64_t)x31 * x21)) + ((uint64_t)x39 * x13)); + { uint64_t x53 = (((((((uint64_t)x35 * x19) + ((uint64_t)x37 * x17)) + ((uint64_t)x33 * x21)) + ((uint64_t)x39 * x15)) + ((uint64_t)x31 * x20)) + ((uint64_t)x38 * x13)); + { uint64_t x54 = (((0x2 * ((((uint64_t)x37 * x19) + ((uint64_t)x33 * x20)) + ((uint64_t)x38 * x15))) + ((uint64_t)x35 * x21)) + ((uint64_t)x39 * x17)); + { uint64_t x55 = (((((uint64_t)x37 * x21) + ((uint64_t)x39 * x19)) + ((uint64_t)x35 * x20)) + ((uint64_t)x38 * x17)); + { uint64_t x56 = (((uint64_t)x39 * x21) + (0x2 * (((uint64_t)x37 * x20) + ((uint64_t)x38 * x19)))); + { uint64_t x57 = (((uint64_t)x39 * x20) + ((uint64_t)x38 * x21)); + { uint64_t x58 = ((uint64_t)(0x2 * x38) * x20); + { uint64_t x59 = (x48 + (x58 << 0x4)); + { uint64_t x60 = (x59 + (x58 << 0x1)); + { uint64_t x61 = (x60 + x58); + { uint64_t x62 = (x47 + (x57 << 0x4)); + { uint64_t x63 = (x62 + (x57 << 0x1)); + { uint64_t x64 = (x63 + x57); + { uint64_t x65 = (x46 + (x56 << 0x4)); + { uint64_t x66 = (x65 + (x56 << 0x1)); + { uint64_t x67 = (x66 + x56); + { uint64_t x68 = (x45 + (x55 << 0x4)); + { uint64_t x69 = (x68 + (x55 << 0x1)); + { uint64_t x70 = (x69 + x55); + { uint64_t x71 = (x44 + (x54 << 0x4)); + { uint64_t x72 = (x71 + (x54 << 0x1)); + { uint64_t x73 = (x72 + x54); + { uint64_t x74 = (x43 + (x53 << 0x4)); + { uint64_t x75 = (x74 + (x53 << 0x1)); + { uint64_t x76 = (x75 + x53); + { uint64_t x77 = (x42 + (x52 << 0x4)); + { uint64_t x78 = (x77 + (x52 << 0x1)); + { uint64_t x79 = (x78 + x52); + { uint64_t x80 = (x41 + (x51 << 0x4)); + { uint64_t x81 = (x80 + (x51 << 0x1)); + { uint64_t x82 = (x81 + x51); + { uint64_t x83 = (x40 + (x50 << 0x4)); + { uint64_t x84 = (x83 + (x50 << 0x1)); + { uint64_t x85 = (x84 + x50); + { uint64_t x86 = (x85 >> 0x1a); + { uint32_t x87 = ((uint32_t)x85 & 0x3ffffff); + { uint64_t x88 = (x86 + x82); + { uint64_t x89 = (x88 >> 0x19); + { uint32_t x90 = ((uint32_t)x88 & 0x1ffffff); + { uint64_t x91 = (x89 + x79); + { uint64_t x92 = (x91 >> 0x1a); + { uint32_t x93 = ((uint32_t)x91 & 0x3ffffff); + { uint64_t x94 = (x92 + x76); + { uint64_t x95 = (x94 >> 0x19); + { uint32_t x96 = ((uint32_t)x94 & 0x1ffffff); + { uint64_t x97 = (x95 + x73); + { uint64_t x98 = (x97 >> 0x1a); + { uint32_t x99 = ((uint32_t)x97 & 0x3ffffff); + { uint64_t x100 = (x98 + x70); + { uint64_t x101 = (x100 >> 0x19); + { uint32_t x102 = ((uint32_t)x100 & 0x1ffffff); + { uint64_t x103 = (x101 + x67); + { uint64_t x104 = (x103 >> 0x1a); + { uint32_t x105 = ((uint32_t)x103 & 0x3ffffff); + { uint64_t x106 = (x104 + x64); + { uint64_t x107 = (x106 >> 0x19); + { uint32_t x108 = ((uint32_t)x106 & 0x1ffffff); + { uint64_t x109 = (x107 + x61); + { uint64_t x110 = (x109 >> 0x1a); + { uint32_t x111 = ((uint32_t)x109 & 0x3ffffff); + { uint64_t x112 = (x110 + x49); + { uint64_t x113 = (x112 >> 0x19); + { uint32_t x114 = ((uint32_t)x112 & 0x1ffffff); + { uint64_t x115 = (x87 + (0x13 * x113)); + { uint32_t x116 = (uint32_t) (x115 >> 0x1a); + { uint32_t x117 = ((uint32_t)x115 & 0x3ffffff); + { uint32_t x118 = (x116 + x90); + { uint32_t x119 = (x118 >> 0x19); + { uint32_t x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + assert_fe(out); +} + +static void fe_sq2_tt(fe *h, const fe *f) { + fe_sq2_impl(h->v, f->v); +} + +static void fe_pow22523(fe *out, const fe *z) { + fe t0; + fe t1; + fe t2; + int i; + + fe_sq_tt(&t0, z); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 2; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t1, z, &t1); + fe_mul_ttt(&t0, &t0, &t1); + fe_sq_tt(&t0, &t0); + fe_mul_ttt(&t0, &t1, &t0); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 5; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t0, &t1, &t0); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 10; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t1, &t1, &t0); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 20; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t1, &t1); + for (i = 1; i < 10; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t0, &t1, &t0); + fe_sq_tt(&t1, &t0); + for (i = 1; i < 50; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t1, &t1, &t0); + fe_sq_tt(&t2, &t1); + for (i = 1; i < 100; ++i) { + fe_sq_tt(&t2, &t2); + } + fe_mul_ttt(&t1, &t2, &t1); + fe_sq_tt(&t1, &t1); + for (i = 1; i < 50; ++i) { + fe_sq_tt(&t1, &t1); + } + fe_mul_ttt(&t0, &t1, &t0); + fe_sq_tt(&t0, &t0); + for (i = 1; i < 2; ++i) { + fe_sq_tt(&t0, &t0); + } + fe_mul_ttt(out, &t0, z); +} + +void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h) { + fe recip; + fe x; + fe y; + + fe_invert(&recip, &h->Z); + fe_mul_ttt(&x, &h->X, &recip); + fe_mul_ttt(&y, &h->Y, &recip); + fe_tobytes(s, &y); + s[31] ^= fe_isnegative(&x) << 7; +} + +static void ge_p3_tobytes(uint8_t *s, const ge_p3 *h) { + fe recip; + fe x; + fe y; + + fe_invert(&recip, &h->Z); + fe_mul_ttt(&x, &h->X, &recip); + fe_mul_ttt(&y, &h->Y, &recip); + fe_tobytes(s, &y); + s[31] ^= fe_isnegative(&x) << 7; +} + +static const fe d = {{56195235, 13857412, 51736253, 6949390, 114729, + 24766616, 60832955, 30306712, 48412415, 21499315}}; + +static const fe sqrtm1 = {{34513072, 25610706, 9377949, 3500415, 12389472, + 33281959, 41962654, 31548777, 326685, 11406482}}; + +int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s) { + fe u; + fe_loose v; + fe v3; + fe vxx; + fe_loose check; + + fe_frombytes(&h->Y, s); + fe_1(&h->Z); + fe_sq_tt(&v3, &h->Y); + fe_mul_ttt(&vxx, &v3, &d); + fe_sub(&v, &v3, &h->Z); // u = y^2-1 + fe_carry(&u, &v); + fe_add(&v, &vxx, &h->Z); // v = dy^2+1 + + fe_sq_tl(&v3, &v); + fe_mul_ttl(&v3, &v3, &v); // v3 = v^3 + fe_sq_tt(&h->X, &v3); + fe_mul_ttl(&h->X, &h->X, &v); + fe_mul_ttt(&h->X, &h->X, &u); // x = uv^7 + + fe_pow22523(&h->X, &h->X); // x = (uv^7)^((q-5)/8) + fe_mul_ttt(&h->X, &h->X, &v3); + fe_mul_ttt(&h->X, &h->X, &u); // x = uv^3(uv^7)^((q-5)/8) + + fe_sq_tt(&vxx, &h->X); + fe_mul_ttl(&vxx, &vxx, &v); + fe_sub(&check, &vxx, &u); + if (fe_isnonzero(&check)) { + fe_add(&check, &vxx, &u); + if (fe_isnonzero(&check)) { + return -1; + } + fe_mul_ttt(&h->X, &h->X, &sqrtm1); + } + + if (fe_isnegative(&h->X) != (s[31] >> 7)) { + fe_loose t; + fe_neg(&t, &h->X); + fe_carry(&h->X, &t); + } + + fe_mul_ttt(&h->T, &h->X, &h->Y); + return 0; +} + +static void ge_p2_0(ge_p2 *h) { + fe_0(&h->X); + fe_1(&h->Y); + fe_1(&h->Z); +} + +static void ge_p3_0(ge_p3 *h) { + fe_0(&h->X); + fe_1(&h->Y); + fe_1(&h->Z); + fe_0(&h->T); +} + +static void ge_cached_0(ge_cached *h) { + fe_loose_1(&h->YplusX); + fe_loose_1(&h->YminusX); + fe_loose_1(&h->Z); + fe_loose_0(&h->T2d); +} + +static void ge_precomp_0(ge_precomp *h) { + fe_loose_1(&h->yplusx); + fe_loose_1(&h->yminusx); + fe_loose_0(&h->xy2d); +} + +// r = p +static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) { + fe_copy(&r->X, &p->X); + fe_copy(&r->Y, &p->Y); + fe_copy(&r->Z, &p->Z); +} + +static const fe d2 = {{45281625, 27714825, 36363642, 13898781, 229458, + 15978800, 54557047, 27058993, 29715967, 9444199}}; + +// r = p +void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) { + fe_add(&r->YplusX, &p->Y, &p->X); + fe_sub(&r->YminusX, &p->Y, &p->X); + fe_copy_lt(&r->Z, &p->Z); + fe_mul_ltt(&r->T2d, &p->T, &d2); +} + +// r = p +void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) { + fe_mul_tll(&r->X, &p->X, &p->T); + fe_mul_tll(&r->Y, &p->Y, &p->Z); + fe_mul_tll(&r->Z, &p->Z, &p->T); +} + +// r = p +void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) { + fe_mul_tll(&r->X, &p->X, &p->T); + fe_mul_tll(&r->Y, &p->Y, &p->Z); + fe_mul_tll(&r->Z, &p->Z, &p->T); + fe_mul_tll(&r->T, &p->X, &p->Y); +} + +// r = p +static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) { + ge_p3 t; + x25519_ge_p1p1_to_p3(&t, p); + x25519_ge_p3_to_cached(r, &t); +} + +// r = 2 * p +static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) { + fe trX, trZ, trT; + fe t0; + + fe_sq_tt(&trX, &p->X); + fe_sq_tt(&trZ, &p->Y); + fe_sq2_tt(&trT, &p->Z); + fe_add(&r->Y, &p->X, &p->Y); + fe_sq_tl(&t0, &r->Y); + + fe_add(&r->Y, &trZ, &trX); + fe_sub(&r->Z, &trZ, &trX); + fe_carry(&trZ, &r->Y); + fe_sub(&r->X, &t0, &trZ); + fe_carry(&trZ, &r->Z); + fe_sub(&r->T, &trT, &trZ); +} + +// r = 2 * p +static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) { + ge_p2 q; + ge_p3_to_p2(&q, p); + ge_p2_dbl(r, &q); +} + +// r = p + q +static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { + fe trY, trZ, trT; + + fe_add(&r->X, &p->Y, &p->X); + fe_sub(&r->Y, &p->Y, &p->X); + fe_mul_tll(&trZ, &r->X, &q->yplusx); + fe_mul_tll(&trY, &r->Y, &q->yminusx); + fe_mul_tlt(&trT, &q->xy2d, &p->T); + fe_add(&r->T, &p->Z, &p->Z); + fe_sub(&r->X, &trZ, &trY); + fe_add(&r->Y, &trZ, &trY); + fe_carry(&trZ, &r->T); + fe_add(&r->Z, &trZ, &trT); + fe_sub(&r->T, &trZ, &trT); +} + +// r = p - q +static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) { + fe trY, trZ, trT; + + fe_add(&r->X, &p->Y, &p->X); + fe_sub(&r->Y, &p->Y, &p->X); + fe_mul_tll(&trZ, &r->X, &q->yminusx); + fe_mul_tll(&trY, &r->Y, &q->yplusx); + fe_mul_tlt(&trT, &q->xy2d, &p->T); + fe_add(&r->T, &p->Z, &p->Z); + fe_sub(&r->X, &trZ, &trY); + fe_add(&r->Y, &trZ, &trY); + fe_carry(&trZ, &r->T); + fe_sub(&r->Z, &trZ, &trT); + fe_add(&r->T, &trZ, &trT); +} + +// r = p + q +void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { + fe trX, trY, trZ, trT; + + fe_add(&r->X, &p->Y, &p->X); + fe_sub(&r->Y, &p->Y, &p->X); + fe_mul_tll(&trZ, &r->X, &q->YplusX); + fe_mul_tll(&trY, &r->Y, &q->YminusX); + fe_mul_tlt(&trT, &q->T2d, &p->T); + fe_mul_ttl(&trX, &p->Z, &q->Z); + fe_add(&r->T, &trX, &trX); + fe_sub(&r->X, &trZ, &trY); + fe_add(&r->Y, &trZ, &trY); + fe_carry(&trZ, &r->T); + fe_add(&r->Z, &trZ, &trT); + fe_sub(&r->T, &trZ, &trT); +} + +// r = p - q +void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) { + fe trX, trY, trZ, trT; + + fe_add(&r->X, &p->Y, &p->X); + fe_sub(&r->Y, &p->Y, &p->X); + fe_mul_tll(&trZ, &r->X, &q->YminusX); + fe_mul_tll(&trY, &r->Y, &q->YplusX); + fe_mul_tlt(&trT, &q->T2d, &p->T); + fe_mul_ttl(&trX, &p->Z, &q->Z); + fe_add(&r->T, &trX, &trX); + fe_sub(&r->X, &trZ, &trY); + fe_add(&r->Y, &trZ, &trY); + fe_carry(&trZ, &r->T); + fe_sub(&r->Z, &trZ, &trT); + fe_add(&r->T, &trZ, &trT); +} + +static uint8_t equal(signed char b, signed char c) { + uint8_t ub = b; + uint8_t uc = c; + uint8_t x = ub ^ uc; // 0: yes; 1..255: no + uint32_t y = x; // 0: yes; 1..255: no + y -= 1; // 4294967295: yes; 0..254: no + y >>= 31; // 1: yes; 0: no + return y; +} + +static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) { + fe_cmov(&t->yplusx, &u->yplusx, b); + fe_cmov(&t->yminusx, &u->yminusx, b); + fe_cmov(&t->xy2d, &u->xy2d, b); +} + +void x25519_ge_scalarmult_small_precomp( + ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) { + // precomp_table is first expanded into matching |ge_precomp| + // elements. + ge_precomp multiples[15]; + + unsigned i; + for (i = 0; i < 15; i++) { + const uint8_t *bytes = &precomp_table[i*(2 * 32)]; + fe x, y; + fe_frombytes(&x, bytes); + fe_frombytes(&y, bytes + 32); + + ge_precomp *out = &multiples[i]; + fe_add(&out->yplusx, &y, &x); + fe_sub(&out->yminusx, &y, &x); + fe_mul_ltt(&out->xy2d, &x, &y); + fe_mul_llt(&out->xy2d, &out->xy2d, &d2); + } + + // See the comment above |k25519SmallPrecomp| about the structure of the + // precomputed elements. This loop does 64 additions and 64 doublings to + // calculate the result. + ge_p3_0(h); + + for (i = 63; i < 64; i--) { + unsigned j; + signed char index = 0; + + for (j = 0; j < 4; j++) { + const uint8_t bit = 1 & (a[(8 * j) + (i / 8)] >> (i & 7)); + index |= (bit << j); + } + + ge_precomp e; + ge_precomp_0(&e); + + for (j = 1; j < 16; j++) { + cmov(&e, &multiples[j-1], equal(index, j)); + } + + ge_cached cached; + ge_p1p1 r; + x25519_ge_p3_to_cached(&cached, h); + x25519_ge_add(&r, h, &cached); + x25519_ge_p1p1_to_p3(h, &r); + + ge_madd(&r, h, &e); + x25519_ge_p1p1_to_p3(h, &r); + } +} + +#if defined(OPENSSL_SMALL) + +// This block of code replaces the standard base-point table with a much smaller +// one. The standard table is 30,720 bytes while this one is just 960. +// +// This table contains 15 pairs of group elements, (x, y), where each field +// element is serialised with |fe_tobytes|. If |i| is the index of the group +// element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀ +// is the most significant bit). The value of the group element is then: +// (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. +static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = { + 0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95, + 0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0, + 0xfe, 0x53, 0x6e, 0xcd, 0xd3, 0x36, 0x69, 0x21, 0x58, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x02, 0xa2, 0xed, 0xf4, 0x8f, 0x6b, 0x0b, 0x3e, + 0xeb, 0x35, 0x1a, 0xd5, 0x7e, 0xdb, 0x78, 0x00, 0x96, 0x8a, 0xa0, 0xb4, + 0xcf, 0x60, 0x4b, 0xd4, 0xd5, 0xf9, 0x2d, 0xbf, 0x88, 0xbd, 0x22, 0x62, + 0x13, 0x53, 0xe4, 0x82, 0x57, 0xfa, 0x1e, 0x8f, 0x06, 0x2b, 0x90, 0xba, + 0x08, 0xb6, 0x10, 0x54, 0x4f, 0x7c, 0x1b, 0x26, 0xed, 0xda, 0x6b, 0xdd, + 0x25, 0xd0, 0x4e, 0xea, 0x42, 0xbb, 0x25, 0x03, 0xa2, 0xfb, 0xcc, 0x61, + 0x67, 0x06, 0x70, 0x1a, 0xc4, 0x78, 0x3a, 0xff, 0x32, 0x62, 0xdd, 0x2c, + 0xab, 0x50, 0x19, 0x3b, 0xf2, 0x9b, 0x7d, 0xb8, 0xfd, 0x4f, 0x29, 0x9c, + 0xa7, 0x91, 0xba, 0x0e, 0x46, 0x5e, 0x51, 0xfe, 0x1d, 0xbf, 0xe5, 0xe5, + 0x9b, 0x95, 0x0d, 0x67, 0xf8, 0xd1, 0xb5, 0x5a, 0xa1, 0x93, 0x2c, 0xc3, + 0xde, 0x0e, 0x97, 0x85, 0x2d, 0x7f, 0xea, 0xab, 0x3e, 0x47, 0x30, 0x18, + 0x24, 0xe8, 0xb7, 0x60, 0xae, 0x47, 0x80, 0xfc, 0xe5, 0x23, 0xe7, 0xc2, + 0xc9, 0x85, 0xe6, 0x98, 0xa0, 0x29, 0x4e, 0xe1, 0x84, 0x39, 0x2d, 0x95, + 0x2c, 0xf3, 0x45, 0x3c, 0xff, 0xaf, 0x27, 0x4c, 0x6b, 0xa6, 0xf5, 0x4b, + 0x11, 0xbd, 0xba, 0x5b, 0x9e, 0xc4, 0xa4, 0x51, 0x1e, 0xbe, 0xd0, 0x90, + 0x3a, 0x9c, 0xc2, 0x26, 0xb6, 0x1e, 0xf1, 0x95, 0x7d, 0xc8, 0x6d, 0x52, + 0xe6, 0x99, 0x2c, 0x5f, 0x9a, 0x96, 0x0c, 0x68, 0x29, 0xfd, 0xe2, 0xfb, + 0xe6, 0xbc, 0xec, 0x31, 0x08, 0xec, 0xe6, 0xb0, 0x53, 0x60, 0xc3, 0x8c, + 0xbe, 0xc1, 0xb3, 0x8a, 0x8f, 0xe4, 0x88, 0x2b, 0x55, 0xe5, 0x64, 0x6e, + 0x9b, 0xd0, 0xaf, 0x7b, 0x64, 0x2a, 0x35, 0x25, 0x10, 0x52, 0xc5, 0x9e, + 0x58, 0x11, 0x39, 0x36, 0x45, 0x51, 0xb8, 0x39, 0x93, 0xfc, 0x9d, 0x6a, + 0xbe, 0x58, 0xcb, 0xa4, 0x0f, 0x51, 0x3c, 0x38, 0x05, 0xca, 0xab, 0x43, + 0x63, 0x0e, 0xf3, 0x8b, 0x41, 0xa6, 0xf8, 0x9b, 0x53, 0x70, 0x80, 0x53, + 0x86, 0x5e, 0x8f, 0xe3, 0xc3, 0x0d, 0x18, 0xc8, 0x4b, 0x34, 0x1f, 0xd8, + 0x1d, 0xbc, 0xf2, 0x6d, 0x34, 0x3a, 0xbe, 0xdf, 0xd9, 0xf6, 0xf3, 0x89, + 0xa1, 0xe1, 0x94, 0x9f, 0x5d, 0x4c, 0x5d, 0xe9, 0xa1, 0x49, 0x92, 0xef, + 0x0e, 0x53, 0x81, 0x89, 0x58, 0x87, 0xa6, 0x37, 0xf1, 0xdd, 0x62, 0x60, + 0x63, 0x5a, 0x9d, 0x1b, 0x8c, 0xc6, 0x7d, 0x52, 0xea, 0x70, 0x09, 0x6a, + 0xe1, 0x32, 0xf3, 0x73, 0x21, 0x1f, 0x07, 0x7b, 0x7c, 0x9b, 0x49, 0xd8, + 0xc0, 0xf3, 0x25, 0x72, 0x6f, 0x9d, 0xed, 0x31, 0x67, 0x36, 0x36, 0x54, + 0x40, 0x92, 0x71, 0xe6, 0x11, 0x28, 0x11, 0xad, 0x93, 0x32, 0x85, 0x7b, + 0x3e, 0xb7, 0x3b, 0x49, 0x13, 0x1c, 0x07, 0xb0, 0x2e, 0x93, 0xaa, 0xfd, + 0xfd, 0x28, 0x47, 0x3d, 0x8d, 0xd2, 0xda, 0xc7, 0x44, 0xd6, 0x7a, 0xdb, + 0x26, 0x7d, 0x1d, 0xb8, 0xe1, 0xde, 0x9d, 0x7a, 0x7d, 0x17, 0x7e, 0x1c, + 0x37, 0x04, 0x8d, 0x2d, 0x7c, 0x5e, 0x18, 0x38, 0x1e, 0xaf, 0xc7, 0x1b, + 0x33, 0x48, 0x31, 0x00, 0x59, 0xf6, 0xf2, 0xca, 0x0f, 0x27, 0x1b, 0x63, + 0x12, 0x7e, 0x02, 0x1d, 0x49, 0xc0, 0x5d, 0x79, 0x87, 0xef, 0x5e, 0x7a, + 0x2f, 0x1f, 0x66, 0x55, 0xd8, 0x09, 0xd9, 0x61, 0x38, 0x68, 0xb0, 0x07, + 0xa3, 0xfc, 0xcc, 0x85, 0x10, 0x7f, 0x4c, 0x65, 0x65, 0xb3, 0xfa, 0xfa, + 0xa5, 0x53, 0x6f, 0xdb, 0x74, 0x4c, 0x56, 0x46, 0x03, 0xe2, 0xd5, 0x7a, + 0x29, 0x1c, 0xc6, 0x02, 0xbc, 0x59, 0xf2, 0x04, 0x75, 0x63, 0xc0, 0x84, + 0x2f, 0x60, 0x1c, 0x67, 0x76, 0xfd, 0x63, 0x86, 0xf3, 0xfa, 0xbf, 0xdc, + 0xd2, 0x2d, 0x90, 0x91, 0xbd, 0x33, 0xa9, 0xe5, 0x66, 0x0c, 0xda, 0x42, + 0x27, 0xca, 0xf4, 0x66, 0xc2, 0xec, 0x92, 0x14, 0x57, 0x06, 0x63, 0xd0, + 0x4d, 0x15, 0x06, 0xeb, 0x69, 0x58, 0x4f, 0x77, 0xc5, 0x8b, 0xc7, 0xf0, + 0x8e, 0xed, 0x64, 0xa0, 0xb3, 0x3c, 0x66, 0x71, 0xc6, 0x2d, 0xda, 0x0a, + 0x0d, 0xfe, 0x70, 0x27, 0x64, 0xf8, 0x27, 0xfa, 0xf6, 0x5f, 0x30, 0xa5, + 0x0d, 0x6c, 0xda, 0xf2, 0x62, 0x5e, 0x78, 0x47, 0xd3, 0x66, 0x00, 0x1c, + 0xfd, 0x56, 0x1f, 0x5d, 0x3f, 0x6f, 0xf4, 0x4c, 0xd8, 0xfd, 0x0e, 0x27, + 0xc9, 0x5c, 0x2b, 0xbc, 0xc0, 0xa4, 0xe7, 0x23, 0x29, 0x02, 0x9f, 0x31, + 0xd6, 0xe9, 0xd7, 0x96, 0xf4, 0xe0, 0x5e, 0x0b, 0x0e, 0x13, 0xee, 0x3c, + 0x09, 0xed, 0xf2, 0x3d, 0x76, 0x91, 0xc3, 0xa4, 0x97, 0xae, 0xd4, 0x87, + 0xd0, 0x5d, 0xf6, 0x18, 0x47, 0x1f, 0x1d, 0x67, 0xf2, 0xcf, 0x63, 0xa0, + 0x91, 0x27, 0xf8, 0x93, 0x45, 0x75, 0x23, 0x3f, 0xd1, 0xf1, 0xad, 0x23, + 0xdd, 0x64, 0x93, 0x96, 0x41, 0x70, 0x7f, 0xf7, 0xf5, 0xa9, 0x89, 0xa2, + 0x34, 0xb0, 0x8d, 0x1b, 0xae, 0x19, 0x15, 0x49, 0x58, 0x23, 0x6d, 0x87, + 0x15, 0x4f, 0x81, 0x76, 0xfb, 0x23, 0xb5, 0xea, 0xcf, 0xac, 0x54, 0x8d, + 0x4e, 0x42, 0x2f, 0xeb, 0x0f, 0x63, 0xdb, 0x68, 0x37, 0xa8, 0xcf, 0x8b, + 0xab, 0xf5, 0xa4, 0x6e, 0x96, 0x2a, 0xb2, 0xd6, 0xbe, 0x9e, 0xbd, 0x0d, + 0xb4, 0x42, 0xa9, 0xcf, 0x01, 0x83, 0x8a, 0x17, 0x47, 0x76, 0xc4, 0xc6, + 0x83, 0x04, 0x95, 0x0b, 0xfc, 0x11, 0xc9, 0x62, 0xb8, 0x0c, 0x76, 0x84, + 0xd9, 0xb9, 0x37, 0xfa, 0xfc, 0x7c, 0xc2, 0x6d, 0x58, 0x3e, 0xb3, 0x04, + 0xbb, 0x8c, 0x8f, 0x48, 0xbc, 0x91, 0x27, 0xcc, 0xf9, 0xb7, 0x22, 0x19, + 0x83, 0x2e, 0x09, 0xb5, 0x72, 0xd9, 0x54, 0x1c, 0x4d, 0xa1, 0xea, 0x0b, + 0xf1, 0xc6, 0x08, 0x72, 0x46, 0x87, 0x7a, 0x6e, 0x80, 0x56, 0x0a, 0x8a, + 0xc0, 0xdd, 0x11, 0x6b, 0xd6, 0xdd, 0x47, 0xdf, 0x10, 0xd9, 0xd8, 0xea, + 0x7c, 0xb0, 0x8f, 0x03, 0x00, 0x2e, 0xc1, 0x8f, 0x44, 0xa8, 0xd3, 0x30, + 0x06, 0x89, 0xa2, 0xf9, 0x34, 0xad, 0xdc, 0x03, 0x85, 0xed, 0x51, 0xa7, + 0x82, 0x9c, 0xe7, 0x5d, 0x52, 0x93, 0x0c, 0x32, 0x9a, 0x5b, 0xe1, 0xaa, + 0xca, 0xb8, 0x02, 0x6d, 0x3a, 0xd4, 0xb1, 0x3a, 0xf0, 0x5f, 0xbe, 0xb5, + 0x0d, 0x10, 0x6b, 0x38, 0x32, 0xac, 0x76, 0x80, 0xbd, 0xca, 0x94, 0x71, + 0x7a, 0xf2, 0xc9, 0x35, 0x2a, 0xde, 0x9f, 0x42, 0x49, 0x18, 0x01, 0xab, + 0xbc, 0xef, 0x7c, 0x64, 0x3f, 0x58, 0x3d, 0x92, 0x59, 0xdb, 0x13, 0xdb, + 0x58, 0x6e, 0x0a, 0xe0, 0xb7, 0x91, 0x4a, 0x08, 0x20, 0xd6, 0x2e, 0x3c, + 0x45, 0xc9, 0x8b, 0x17, 0x79, 0xe7, 0xc7, 0x90, 0x99, 0x3a, 0x18, 0x25, +}; + +void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) { + x25519_ge_scalarmult_small_precomp(h, a, k25519SmallPrecomp); +} + +#else + +// k25519Precomp[i][j] = (j+1)*256^i*B +static const ge_precomp k25519Precomp[32][8] = { + { + { + {{25967493, 19198397, 29566455, 3660896, 54414519, 4014786, + 27544626, 21800161, 61029707, 2047604}}, + {{54563134, 934261, 64385954, 3049989, 66381436, 9406985, 12720692, + 5043384, 19500929, 18085054}}, + {{58370664, 4489569, 9688441, 18769238, 10184608, 21191052, + 29287918, 11864899, 42594502, 29115885}}, + }, + { + {{54292951, 20578084, 45527620, 11784319, 41753206, 30803714, + 55390960, 29739860, 66750418, 23343128}}, + {{45405608, 6903824, 27185491, 6451973, 37531140, 24000426, + 51492312, 11189267, 40279186, 28235350}}, + {{26966623, 11152617, 32442495, 15396054, 14353839, 20802097, + 63980037, 24013313, 51636816, 29387734}}, + }, + { + {{15636272, 23865875, 24204772, 25642034, 616976, 16869170, + 27787599, 18782243, 28944399, 32004408}}, + {{16568933, 4717097, 55552716, 32452109, 15682895, 21747389, + 16354576, 21778470, 7689661, 11199574}}, + {{30464137, 27578307, 55329429, 17883566, 23220364, 15915852, + 7512774, 10017326, 49359771, 23634074}}, + }, + { + {{50071967, 13921891, 10945806, 27521001, 27105051, 17470053, + 38182653, 15006022, 3284568, 27277892}}, + {{23599295, 25248385, 55915199, 25867015, 13236773, 10506355, + 7464579, 9656445, 13059162, 10374397}}, + {{7798537, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, + 29715387, 66467155, 33453106}}, + }, + { + {{10861363, 11473154, 27284546, 1981175, 37044515, 12577860, + 32867885, 14515107, 51670560, 10819379}}, + {{4708026, 6336745, 20377586, 9066809, 55836755, 6594695, 41455196, + 12483687, 54440373, 5581305}}, + {{19563141, 16186464, 37722007, 4097518, 10237984, 29206317, + 28542349, 13850243, 43430843, 17738489}}, + }, + { + {{51736881, 20691677, 32573249, 4720197, 40672342, 5875510, + 47920237, 18329612, 57289923, 21468654}}, + {{58559652, 109982, 15149363, 2178705, 22900618, 4543417, 3044240, + 17864545, 1762327, 14866737}}, + {{48909169, 17603008, 56635573, 1707277, 49922944, 3916100, + 38872452, 3959420, 27914454, 4383652}}, + }, + { + {{5153727, 9909285, 1723747, 30776558, 30523604, 5516873, 19480852, + 5230134, 43156425, 18378665}}, + {{36839857, 30090922, 7665485, 10083793, 28475525, 1649722, + 20654025, 16520125, 30598449, 7715701}}, + {{28881826, 14381568, 9657904, 3680757, 46927229, 7843315, + 35708204, 1370707, 29794553, 32145132}}, + }, + { + {{14499471, 30824833, 33917750, 29299779, 28494861, 14271267, + 30290735, 10876454, 33954766, 2381725}}, + {{59913433, 30899068, 52378708, 462250, 39384538, 3941371, + 60872247, 3696004, 34808032, 15351954}}, + {{27431194, 8222322, 16448760, 29646437, 48401861, 11938354, + 34147463, 30583916, 29551812, 10109425}}, + }, + }, + { + { + {{53451805, 20399000, 35825113, 11777097, 21447386, 6519384, + 64730580, 31926875, 10092782, 28790261}}, + {{27939166, 14210322, 4677035, 16277044, 44144402, 21156292, + 34600109, 12005537, 49298737, 12803509}}, + {{17228999, 17892808, 65875336, 300139, 65883994, 21839654, + 30364212, 24516238, 18016356, 4397660}}, + }, + { + {{56150021, 25864224, 4776340, 18600194, 27850027, 17952220, + 40489757, 14544524, 49631360, 982638}}, + {{29253598, 15796703, 64244882, 23645547, 10057022, 3163536, 7332899, + 29434304, 46061167, 9934962}}, + {{5793284, 16271923, 42977250, 23438027, 29188559, 1206517, + 52360934, 4559894, 36984942, 22656481}}, + }, + { + {{39464912, 22061425, 16282656, 22517939, 28414020, 18542168, + 24191033, 4541697, 53770555, 5500567}}, + {{12650548, 32057319, 9052870, 11355358, 49428827, 25154267, + 49678271, 12264342, 10874051, 13524335}}, + {{25556948, 30508442, 714650, 2510400, 23394682, 23139102, 33119037, + 5080568, 44580805, 5376627}}, + }, + { + {{41020600, 29543379, 50095164, 30016803, 60382070, 1920896, + 44787559, 24106988, 4535767, 1569007}}, + {{64853442, 14606629, 45416424, 25514613, 28430648, 8775819, + 36614302, 3044289, 31848280, 12543772}}, + {{45080285, 2943892, 35251351, 6777305, 13784462, 29262229, + 39731668, 31491700, 7718481, 14474653}}, + }, + { + {{2385296, 2454213, 44477544, 46602, 62670929, 17874016, 656964, + 26317767, 24316167, 28300865}}, + {{13741529, 10911568, 33875447, 24950694, 46931033, 32521134, + 33040650, 20129900, 46379407, 8321685}}, + {{21060490, 31341688, 15712756, 29218333, 1639039, 10656336, + 23845965, 21679594, 57124405, 608371}}, + }, + { + {{53436132, 18466845, 56219170, 25997372, 61071954, 11305546, + 1123968, 26773855, 27229398, 23887}}, + {{43864724, 33260226, 55364135, 14712570, 37643165, 31524814, + 12797023, 27114124, 65475458, 16678953}}, + {{37608244, 4770661, 51054477, 14001337, 7830047, 9564805, + 65600720, 28759386, 49939598, 4904952}}, + }, + { + {{24059538, 14617003, 19037157, 18514524, 19766092, 18648003, + 5169210, 16191880, 2128236, 29227599}}, + {{50127693, 4124965, 58568254, 22900634, 30336521, 19449185, + 37302527, 916032, 60226322, 30567899}}, + {{44477957, 12419371, 59974635, 26081060, 50629959, 16739174, + 285431, 2763829, 15736322, 4143876}}, + }, + { + {{2379333, 11839345, 62998462, 27565766, 11274297, 794957, 212801, + 18959769, 23527083, 17096164}}, + {{33431108, 22423954, 49269897, 17927531, 8909498, 8376530, + 34483524, 4087880, 51919953, 19138217}}, + {{1767664, 7197987, 53903638, 31531796, 54017513, 448825, 5799055, + 4357868, 62334673, 17231393}}, + }, + }, + { + { + {{6721966, 13833823, 43585476, 32003117, 26354292, 21691111, + 23365146, 29604700, 7390889, 2759800}}, + {{4409022, 2052381, 23373853, 10530217, 7676779, 20668478, 21302352, + 29290375, 1244379, 20634787}}, + {{62687625, 7169618, 4982368, 30596842, 30256824, 30776892, 14086412, + 9208236, 15886429, 16489664}}, + }, + { + {{1996056, 10375649, 14346367, 13311202, 60234729, 17116020, + 53415665, 398368, 36502409, 32841498}}, + {{41801399, 9795879, 64331450, 14878808, 33577029, 14780362, + 13348553, 12076947, 36272402, 5113181}}, + {{49338080, 11797795, 31950843, 13929123, 41220562, 12288343, + 36767763, 26218045, 13847710, 5387222}}, + }, + { + {{48526701, 30138214, 17824842, 31213466, 22744342, 23111821, + 8763060, 3617786, 47508202, 10370990}}, + {{20246567, 19185054, 22358228, 33010720, 18507282, 23140436, + 14554436, 24808340, 32232923, 16763880}}, + {{9648486, 10094563, 26416693, 14745928, 36734546, 27081810, + 11094160, 15689506, 3140038, 17044340}}, + }, + { + {{50948792, 5472694, 31895588, 4744994, 8823515, 10365685, + 39884064, 9448612, 38334410, 366294}}, + {{19153450, 11523972, 56012374, 27051289, 42461232, 5420646, + 28344573, 8041113, 719605, 11671788}}, + {{8678006, 2694440, 60300850, 2517371, 4964326, 11152271, 51675948, + 18287915, 27000812, 23358879}}, + }, + { + {{51950941, 7134311, 8639287, 30739555, 59873175, 10421741, 564065, + 5336097, 6750977, 19033406}}, + {{11836410, 29574944, 26297893, 16080799, 23455045, 15735944, + 1695823, 24735310, 8169719, 16220347}}, + {{48993007, 8653646, 17578566, 27461813, 59083086, 17541668, + 55964556, 30926767, 61118155, 19388398}}, + }, + { + {{43800366, 22586119, 15213227, 23473218, 36255258, 22504427, + 27884328, 2847284, 2655861, 1738395}}, + {{39571412, 19301410, 41772562, 25551651, 57738101, 8129820, + 21651608, 30315096, 48021414, 22549153}}, + {{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, + 5821408, 10478196, 8544890}}, + }, + { + {{32173102, 17425121, 24896206, 3921497, 22579056, 30143578, + 19270448, 12217473, 17789017, 30158437}}, + {{36555903, 31326030, 51530034, 23407230, 13243888, 517024, + 15479401, 29701199, 30460519, 1052596}}, + {{55493970, 13323617, 32618793, 8175907, 51878691, 12596686, + 27491595, 28942073, 3179267, 24075541}}, + }, + { + {{31947050, 19187781, 62468280, 18214510, 51982886, 27514722, + 52352086, 17142691, 19072639, 24043372}}, + {{11685058, 11822410, 3158003, 19601838, 33402193, 29389366, + 5977895, 28339415, 473098, 5040608}}, + {{46817982, 8198641, 39698732, 11602122, 1290375, 30754672, + 28326861, 1721092, 47550222, 30422825}}, + }, + }, + { + { + {{7881532, 10687937, 7578723, 7738378, 48157852, 31000479, 21820785, + 8076149, 39240368, 11538388}}, + {{47173198, 3899860, 18283497, 26752864, 51380203, 22305220, + 8754524, 7446702, 61432810, 5797015}}, + {{55813245, 29760862, 51326753, 25589858, 12708868, 25098233, + 2014098, 24503858, 64739691, 27677090}}, + }, + { + {{44636488, 21985690, 39426843, 1146374, 18956691, 16640559, + 1192730, 29840233, 15123618, 10811505}}, + {{14352079, 30134717, 48166819, 10822654, 32750596, 4699007, 67038501, + 15776355, 38222085, 21579878}}, + {{38867681, 25481956, 62129901, 28239114, 29416930, 1847569, + 46454691, 17069576, 4714546, 23953777}}, + }, + { + {{15200332, 8368572, 19679101, 15970074, 35236190, 1959450, + 24611599, 29010600, 55362987, 12340219}}, + {{12876937, 23074376, 33134380, 6590940, 60801088, 14872439, + 9613953, 8241152, 15370987, 9608631}}, + {{62965568, 21540023, 8446280, 33162829, 4407737, 13629032, 59383996, + 15866073, 38898243, 24740332}}, + }, + { + {{26660628, 17876777, 8393733, 358047, 59707573, 992987, 43204631, + 858696, 20571223, 8420556}}, + {{14620696, 13067227, 51661590, 8264466, 14106269, 15080814, + 33531827, 12516406, 45534429, 21077682}}, + {{236881, 10476226, 57258, 18877408, 6472997, 2466984, 17258519, + 7256740, 8791136, 15069930}}, + }, + { + {{1276391, 24182514, 22949634, 17231625, 43615824, 27852245, + 14711874, 4874229, 36445724, 31223040}}, + {{5855666, 4990204, 53397016, 7294283, 59304582, 1924646, 65685689, + 25642053, 34039526, 9234252}}, + {{20590503, 24535444, 31529743, 26201766, 64402029, 10650547, + 31559055, 21944845, 18979185, 13396066}}, + }, + { + {{24474287, 4968103, 22267082, 4407354, 24063882, 25229252, + 48291976, 13594781, 33514650, 7021958}}, + {{55541958, 26988926, 45743778, 15928891, 40950559, 4315420, + 41160136, 29637754, 45628383, 12868081}}, + {{38473832, 13504660, 19988037, 31421671, 21078224, 6443208, + 45662757, 2244499, 54653067, 25465048}}, + }, + { + {{36513336, 13793478, 61256044, 319135, 41385692, 27290532, + 33086545, 8957937, 51875216, 5540520}}, + {{55478669, 22050529, 58989363, 25911358, 2620055, 1022908, + 43398120, 31985447, 50980335, 18591624}}, + {{23152952, 775386, 27395463, 14006635, 57407746, 4649511, 1689819, + 892185, 55595587, 18348483}}, + }, + { + {{9770129, 9586738, 26496094, 4324120, 1556511, 30004408, 27453818, + 4763127, 47929250, 5867133}}, + {{34343820, 1927589, 31726409, 28801137, 23962433, 17534932, + 27846558, 5931263, 37359161, 17445976}}, + {{27461885, 30576896, 22380809, 1815854, 44075111, 30522493, + 7283489, 18406359, 47582163, 7734628}}, + }, + }, + { + { + {{59098600, 23963614, 55988460, 6196037, 29344158, 20123547, + 7585294, 30377806, 18549496, 15302069}}, + {{34450527, 27383209, 59436070, 22502750, 6258877, 13504381, + 10458790, 27135971, 58236621, 8424745}}, + {{24687186, 8613276, 36441818, 30320886, 1863891, 31723888, + 19206233, 7134917, 55824382, 32725512}}, + }, + { + {{11334899, 24336410, 8025292, 12707519, 17523892, 23078361, + 10243737, 18868971, 62042829, 16498836}}, + {{8911542, 6887158, 57524604, 26595841, 11145640, 24010752, 17303924, + 19430194, 6536640, 10543906}}, + {{38162480, 15479762, 49642029, 568875, 65611181, 11223453, + 64439674, 16928857, 39873154, 8876770}}, + }, + { + {{41365946, 20987567, 51458897, 32707824, 34082177, 32758143, + 33627041, 15824473, 66504438, 24514614}}, + {{10330056, 70051, 7957388, 24551765, 9764901, 15609756, 27698697, + 28664395, 1657393, 3084098}}, + {{10477963, 26084172, 12119565, 20303627, 29016246, 28188843, + 31280318, 14396151, 36875289, 15272408}}, + }, + { + {{54820555, 3169462, 28813183, 16658753, 25116432, 27923966, + 41934906, 20918293, 42094106, 1950503}}, + {{40928506, 9489186, 11053416, 18808271, 36055143, 5825629, + 58724558, 24786899, 15341278, 8373727}}, + {{28685821, 7759505, 52730348, 21551571, 35137043, 4079241, + 298136, 23321830, 64230656, 15190419}}, + }, + { + {{34175969, 13806335, 52771379, 17760000, 43104243, 10940927, + 8669718, 2742393, 41075551, 26679428}}, + {{65528476, 21825014, 41129205, 22109408, 49696989, 22641577, + 9291593, 17306653, 54954121, 6048604}}, + {{36803549, 14843443, 1539301, 11864366, 20201677, 1900163, + 13934231, 5128323, 11213262, 9168384}}, + }, + { + {{40828332, 11007846, 19408960, 32613674, 48515898, 29225851, + 62020803, 22449281, 20470156, 17155731}}, + {{43972811, 9282191, 14855179, 18164354, 59746048, 19145871, + 44324911, 14461607, 14042978, 5230683}}, + {{29969548, 30812838, 50396996, 25001989, 9175485, 31085458, + 21556950, 3506042, 61174973, 21104723}}, + }, + { + {{63964118, 8744660, 19704003, 4581278, 46678178, 6830682, + 45824694, 8971512, 38569675, 15326562}}, + {{47644235, 10110287, 49846336, 30050539, 43608476, 1355668, + 51585814, 15300987, 46594746, 9168259}}, + {{61755510, 4488612, 43305616, 16314346, 7780487, 17915493, + 38160505, 9601604, 33087103, 24543045}}, + }, + { + {{47665694, 18041531, 46311396, 21109108, 37284416, 10229460, + 39664535, 18553900, 61111993, 15664671}}, + {{23294591, 16921819, 44458082, 25083453, 27844203, 11461195, + 13099750, 31094076, 18151675, 13417686}}, + {{42385932, 29377914, 35958184, 5988918, 40250079, 6685064, + 1661597, 21002991, 15271675, 18101767}}, + }, + }, + { + { + {{11433023, 20325767, 8239630, 28274915, 65123427, 32828713, + 48410099, 2167543, 60187563, 20114249}}, + {{35672693, 15575145, 30436815, 12192228, 44645511, 9395378, + 57191156, 24915434, 12215109, 12028277}}, + {{14098381, 6555944, 23007258, 5757252, 51681032, 20603929, + 30123439, 4617780, 50208775, 32898803}}, + }, + { + {{63082644, 18313596, 11893167, 13718664, 52299402, 1847384, + 51288865, 10154008, 23973261, 20869958}}, + {{40577025, 29858441, 65199965, 2534300, 35238307, 17004076, + 18341389, 22134481, 32013173, 23450893}}, + {{41629544, 10876442, 55337778, 18929291, 54739296, 1838103, + 21911214, 6354752, 4425632, 32716610}}, + }, + { + {{56675475, 18941465, 22229857, 30463385, 53917697, 776728, + 49693489, 21533969, 4725004, 14044970}}, + {{19268631, 26250011, 1555348, 8692754, 45634805, 23643767, 6347389, + 32142648, 47586572, 17444675}}, + {{42244775, 12986007, 56209986, 27995847, 55796492, 33405905, + 19541417, 8180106, 9282262, 10282508}}, + }, + { + {{40903763, 4428546, 58447668, 20360168, 4098401, 19389175, + 15522534, 8372215, 5542595, 22851749}}, + {{56546323, 14895632, 26814552, 16880582, 49628109, 31065071, + 64326972, 6993760, 49014979, 10114654}}, + {{47001790, 32625013, 31422703, 10427861, 59998115, 6150668, + 38017109, 22025285, 25953724, 33448274}}, + }, + { + {{62874467, 25515139, 57989738, 3045999, 2101609, 20947138, + 19390019, 6094296, 63793585, 12831124}}, + {{51110167, 7578151, 5310217, 14408357, 33560244, 33329692, + 31575953, 6326196, 7381791, 31132593}}, + {{46206085, 3296810, 24736065, 17226043, 18374253, 7318640, + 6295303, 8082724, 51746375, 12339663}}, + }, + { + {{27724736, 2291157, 6088201, 19369634, 1792726, 5857634, 13848414, + 15768922, 25091167, 14856294}}, + {{48242193, 8331042, 24373479, 8541013, 66406866, 24284974, 12927299, + 20858939, 44926390, 24541532}}, + {{55685435, 28132841, 11632844, 3405020, 30536730, 21880393, + 39848098, 13866389, 30146206, 9142070}}, + }, + { + {{3924129, 18246916, 53291741, 23499471, 12291819, 32886066, + 39406089, 9326383, 58871006, 4171293}}, + {{51186905, 16037936, 6713787, 16606682, 45496729, 2790943, + 26396185, 3731949, 345228, 28091483}}, + {{45781307, 13448258, 25284571, 1143661, 20614966, 24705045, + 2031538, 21163201, 50855680, 19972348}}, + }, + { + {{31016192, 16832003, 26371391, 19103199, 62081514, 14854136, + 17477601, 3842657, 28012650, 17149012}}, + {{62033029, 9368965, 58546785, 28953529, 51858910, 6970559, + 57918991, 16292056, 58241707, 3507939}}, + {{29439664, 3537914, 23333589, 6997794, 49553303, 22536363, + 51899661, 18503164, 57943934, 6580395}}, + }, + }, + { + { + {{54923003, 25874643, 16438268, 10826160, 58412047, 27318820, + 17860443, 24280586, 65013061, 9304566}}, + {{20714545, 29217521, 29088194, 7406487, 11426967, 28458727, + 14792666, 18945815, 5289420, 33077305}}, + {{50443312, 22903641, 60948518, 20248671, 9192019, 31751970, + 17271489, 12349094, 26939669, 29802138}}, + }, + { + {{54218966, 9373457, 31595848, 16374215, 21471720, 13221525, + 39825369, 21205872, 63410057, 117886}}, + {{22263325, 26994382, 3984569, 22379786, 51994855, 32987646, + 28311252, 5358056, 43789084, 541963}}, + {{16259200, 3261970, 2309254, 18019958, 50223152, 28972515, + 24134069, 16848603, 53771797, 20002236}}, + }, + { + {{9378160, 20414246, 44262881, 20809167, 28198280, 26310334, + 64709179, 32837080, 690425, 14876244}}, + {{24977353, 33240048, 58884894, 20089345, 28432342, 32378079, + 54040059, 21257083, 44727879, 6618998}}, + {{65570671, 11685645, 12944378, 13682314, 42719353, 19141238, + 8044828, 19737104, 32239828, 27901670}}, + }, + { + {{48505798, 4762989, 66182614, 8885303, 38696384, 30367116, 9781646, + 23204373, 32779358, 5095274}}, + {{34100715, 28339925, 34843976, 29869215, 9460460, 24227009, + 42507207, 14506723, 21639561, 30924196}}, + {{50707921, 20442216, 25239337, 15531969, 3987758, 29055114, + 65819361, 26690896, 17874573, 558605}}, + }, + { + {{53508735, 10240080, 9171883, 16131053, 46239610, 9599699, + 33499487, 5080151, 2085892, 5119761}}, + {{44903700, 31034903, 50727262, 414690, 42089314, 2170429, + 30634760, 25190818, 35108870, 27794547}}, + {{60263160, 15791201, 8550074, 32241778, 29928808, 21462176, + 27534429, 26362287, 44757485, 12961481}}, + }, + { + {{42616785, 23983660, 10368193, 11582341, 43711571, 31309144, + 16533929, 8206996, 36914212, 28394793}}, + {{55987368, 30172197, 2307365, 6362031, 66973409, 8868176, 50273234, + 7031274, 7589640, 8945490}}, + {{34956097, 8917966, 6661220, 21876816, 65916803, 17761038, + 7251488, 22372252, 24099108, 19098262}}, + }, + { + {{5019539, 25646962, 4244126, 18840076, 40175591, 6453164, + 47990682, 20265406, 60876967, 23273695}}, + {{10853575, 10721687, 26480089, 5861829, 44113045, 1972174, + 65242217, 22996533, 63745412, 27113307}}, + {{50106456, 5906789, 221599, 26991285, 7828207, 20305514, 24362660, + 31546264, 53242455, 7421391}}, + }, + { + {{8139908, 27007935, 32257645, 27663886, 30375718, 1886181, + 45933756, 15441251, 28826358, 29431403}}, + {{6267067, 9695052, 7709135, 16950835, 34239795, 31668296, + 14795159, 25714308, 13746020, 31812384}}, + {{28584883, 7787108, 60375922, 18503702, 22846040, 25983196, + 63926927, 33190907, 4771361, 25134474}}, + }, + }, + { + { + {{24949256, 6376279, 39642383, 25379823, 48462709, 23623825, + 33543568, 21412737, 3569626, 11342593}}, + {{26514970, 4740088, 27912651, 3697550, 19331575, 22082093, 6809885, + 4608608, 7325975, 18753361}}, + {{55490446, 19000001, 42787651, 7655127, 65739590, 5214311, + 39708324, 10258389, 49462170, 25367739}}, + }, + { + {{11431185, 15823007, 26570245, 14329124, 18029990, 4796082, + 35662685, 15580663, 9280358, 29580745}}, + {{66948081, 23228174, 44253547, 29249434, 46247496, 19933429, + 34297962, 22372809, 51563772, 4387440}}, + {{46309467, 12194511, 3937617, 27748540, 39954043, 9340369, + 42594872, 8548136, 20617071, 26072431}}, + }, + { + {{66170039, 29623845, 58394552, 16124717, 24603125, 27329039, + 53333511, 21678609, 24345682, 10325460}}, + {{47253587, 31985546, 44906155, 8714033, 14007766, 6928528, + 16318175, 32543743, 4766742, 3552007}}, + {{45357481, 16823515, 1351762, 32751011, 63099193, 3950934, 3217514, + 14481909, 10988822, 29559670}}, + }, + { + {{15564307, 19242862, 3101242, 5684148, 30446780, 25503076, + 12677126, 27049089, 58813011, 13296004}}, + {{57666574, 6624295, 36809900, 21640754, 62437882, 31497052, + 31521203, 9614054, 37108040, 12074673}}, + {{4771172, 33419193, 14290748, 20464580, 27992297, 14998318, + 65694928, 31997715, 29832612, 17163397}}, + }, + { + {{7064884, 26013258, 47946901, 28486894, 48217594, 30641695, + 25825241, 5293297, 39986204, 13101589}}, + {{64810282, 2439669, 59642254, 1719964, 39841323, 17225986, + 32512468, 28236839, 36752793, 29363474}}, + {{37102324, 10162315, 33928688, 3981722, 50626726, 20484387, + 14413973, 9515896, 19568978, 9628812}}, + }, + { + {{33053803, 199357, 15894591, 1583059, 27380243, 28973997, 49269969, + 27447592, 60817077, 3437739}}, + {{48129987, 3884492, 19469877, 12726490, 15913552, 13614290, + 44147131, 70103, 7463304, 4176122}}, + {{39984863, 10659916, 11482427, 17484051, 12771466, 26919315, + 34389459, 28231680, 24216881, 5944158}}, + }, + { + {{8894125, 7450974, 64444715, 23788679, 39028346, 21165316, + 19345745, 14680796, 11632993, 5847885}}, + {{26942781, 31239115, 9129563, 28647825, 26024104, 11769399, + 55590027, 6367193, 57381634, 4782139}}, + {{19916442, 28726022, 44198159, 22140040, 25606323, 27581991, + 33253852, 8220911, 6358847, 31680575}}, + }, + { + {{801428, 31472730, 16569427, 11065167, 29875704, 96627, 7908388, + 29073952, 53570360, 1387154}}, + {{19646058, 5720633, 55692158, 12814208, 11607948, 12749789, + 14147075, 15156355, 45242033, 11835259}}, + {{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, + 15467869, 40548314, 5052482}}, + }, + }, + { + { + {{64091413, 10058205, 1980837, 3964243, 22160966, 12322533, 60677741, + 20936246, 12228556, 26550755}}, + {{32944382, 14922211, 44263970, 5188527, 21913450, 24834489, + 4001464, 13238564, 60994061, 8653814}}, + {{22865569, 28901697, 27603667, 21009037, 14348957, 8234005, + 24808405, 5719875, 28483275, 2841751}}, + }, + { + {{50687877, 32441126, 66781144, 21446575, 21886281, 18001658, + 65220897, 33238773, 19932057, 20815229}}, + {{55452759, 10087520, 58243976, 28018288, 47830290, 30498519, + 3999227, 13239134, 62331395, 19644223}}, + {{1382174, 21859713, 17266789, 9194690, 53784508, 9720080, + 20403944, 11284705, 53095046, 3093229}}, + }, + { + {{16650902, 22516500, 66044685, 1570628, 58779118, 7352752, 66806440, + 16271224, 43059443, 26862581}}, + {{45197768, 27626490, 62497547, 27994275, 35364760, 22769138, + 24123613, 15193618, 45456747, 16815042}}, + {{57172930, 29264984, 41829040, 4372841, 2087473, 10399484, + 31870908, 14690798, 17361620, 11864968}}, + }, + { + {{55801235, 6210371, 13206574, 5806320, 38091172, 19587231, + 54777658, 26067830, 41530403, 17313742}}, + {{14668443, 21284197, 26039038, 15305210, 25515617, 4542480, + 10453892, 6577524, 9145645, 27110552}}, + {{5974855, 3053895, 57675815, 23169240, 35243739, 3225008, + 59136222, 3936127, 61456591, 30504127}}, + }, + { + {{30625386, 28825032, 41552902, 20761565, 46624288, 7695098, + 17097188, 17250936, 39109084, 1803631}}, + {{63555773, 9865098, 61880298, 4272700, 61435032, 16864731, + 14911343, 12196514, 45703375, 7047411}}, + {{20093258, 9920966, 55970670, 28210574, 13161586, 12044805, + 34252013, 4124600, 34765036, 23296865}}, + }, + { + {{46320040, 14084653, 53577151, 7842146, 19119038, 19731827, + 4752376, 24839792, 45429205, 2288037}}, + {{40289628, 30270716, 29965058, 3039786, 52635099, 2540456, + 29457502, 14625692, 42289247, 12570231}}, + {{66045306, 22002608, 16920317, 12494842, 1278292, 27685323, + 45948920, 30055751, 55134159, 4724942}}, + }, + { + {{17960970, 21778898, 62967895, 23851901, 58232301, 32143814, + 54201480, 24894499, 37532563, 1903855}}, + {{23134274, 19275300, 56426866, 31942495, 20684484, 15770816, + 54119114, 3190295, 26955097, 14109738}}, + {{15308788, 5320727, 36995055, 19235554, 22902007, 7767164, + 29425325, 22276870, 31960941, 11934971}}, + }, + { + {{39713153, 8435795, 4109644, 12222639, 42480996, 14818668, + 20638173, 4875028, 10491392, 1379718}}, + {{53949449, 9197840, 3875503, 24618324, 65725151, 27674630, + 33518458, 16176658, 21432314, 12180697}}, + {{55321537, 11500837, 13787581, 19721842, 44678184, 10140204, + 1465425, 12689540, 56807545, 19681548}}, + }, + }, + { + { + {{5414091, 18168391, 46101199, 9643569, 12834970, 1186149, + 64485948, 32212200, 26128230, 6032912}}, + {{40771450, 19788269, 32496024, 19900513, 17847800, 20885276, + 3604024, 8316894, 41233830, 23117073}}, + {{3296484, 6223048, 24680646, 21307972, 44056843, 5903204, + 58246567, 28915267, 12376616, 3188849}}, + }, + { + {{29190469, 18895386, 27549112, 32370916, 3520065, 22857131, + 32049514, 26245319, 50999629, 23702124}}, + {{52364359, 24245275, 735817, 32955454, 46701176, 28496527, + 25246077, 17758763, 18640740, 32593455}}, + {{60180029, 17123636, 10361373, 5642961, 4910474, 12345252, + 35470478, 33060001, 10530746, 1053335}}, + }, + { + {{37842897, 19367626, 53570647, 21437058, 47651804, 22899047, + 35646494, 30605446, 24018830, 15026644}}, + {{44516310, 30409154, 64819587, 5953842, 53668675, 9425630, + 25310643, 13003497, 64794073, 18408815}}, + {{39688860, 32951110, 59064879, 31885314, 41016598, 13987818, + 39811242, 187898, 43942445, 31022696}}, + }, + { + {{45364466, 19743956, 1844839, 5021428, 56674465, 17642958, + 9716666, 16266922, 62038647, 726098}}, + {{29370903, 27500434, 7334070, 18212173, 9385286, 2247707, + 53446902, 28714970, 30007387, 17731091}}, + {{66172485, 16086690, 23751945, 33011114, 65941325, 28365395, 9137108, + 730663, 9835848, 4555336}}, + }, + { + {{43732429, 1410445, 44855111, 20654817, 30867634, 15826977, + 17693930, 544696, 55123566, 12422645}}, + {{31117226, 21338698, 53606025, 6561946, 57231997, 20796761, + 61990178, 29457725, 29120152, 13924425}}, + {{49707966, 19321222, 19675798, 30819676, 56101901, 27695611, + 57724924, 22236731, 7240930, 33317044}}, + }, + { + {{35747106, 22207651, 52101416, 27698213, 44655523, 21401660, + 1222335, 4389483, 3293637, 18002689}}, + {{50424044, 19110186, 11038543, 11054958, 53307689, 30215898, + 42789283, 7733546, 12796905, 27218610}}, + {{58349431, 22736595, 41689999, 10783768, 36493307, 23807620, + 38855524, 3647835, 3222231, 22393970}}, + }, + { + {{18606113, 1693100, 41660478, 18384159, 4112352, 10045021, + 23603893, 31506198, 59558087, 2484984}}, + {{9255298, 30423235, 54952701, 32550175, 13098012, 24339566, + 16377219, 31451620, 47306788, 30519729}}, + {{44379556, 7496159, 61366665, 11329248, 19991973, 30206930, + 35390715, 9936965, 37011176, 22935634}}, + }, + { + {{21878571, 28553135, 4338335, 13643897, 64071999, 13160959, + 19708896, 5415497, 59748361, 29445138}}, + {{27736842, 10103576, 12500508, 8502413, 63695848, 23920873, + 10436917, 32004156, 43449720, 25422331}}, + {{19492550, 21450067, 37426887, 32701801, 63900692, 12403436, + 30066266, 8367329, 13243957, 8709688}}, + }, + }, + { + { + {{12015105, 2801261, 28198131, 10151021, 24818120, 28811299, + 55914672, 27908697, 5150967, 7274186}}, + {{2831347, 21062286, 1478974, 6122054, 23825128, 20820846, + 31097298, 6083058, 31021603, 23760822}}, + {{64578913, 31324785, 445612, 10720828, 53259337, 22048494, + 43601132, 16354464, 15067285, 19406725}}, + }, + { + {{7840923, 14037873, 33744001, 15934015, 66380651, 29911725, + 21403987, 1057586, 47729402, 21151211}}, + {{915865, 17085158, 15608284, 24765302, 42751837, 6060029, + 49737545, 8410996, 59888403, 16527024}}, + {{32922597, 32997445, 20336073, 17369864, 10903704, 28169945, + 16957573, 52992, 23834301, 6588044}}, + }, + { + {{32752011, 11232950, 3381995, 24839566, 22652987, 22810329, + 17159698, 16689107, 46794284, 32248439}}, + {{62419196, 9166775, 41398568, 22707125, 11576751, 12733943, + 7924251, 30802151, 1976122, 26305405}}, + {{21251203, 16309901, 64125849, 26771309, 30810596, 12967303, 156041, + 30183180, 12331344, 25317235}}, + }, + { + {{8651595, 29077400, 51023227, 28557437, 13002506, 2950805, + 29054427, 28447462, 10008135, 28886531}}, + {{31486061, 15114593, 52847614, 12951353, 14369431, 26166587, + 16347320, 19892343, 8684154, 23021480}}, + {{19443825, 11385320, 24468943, 23895364, 43189605, 2187568, + 40845657, 27467510, 31316347, 14219878}}, + }, + { + {{38514374, 1193784, 32245219, 11392485, 31092169, 15722801, + 27146014, 6992409, 29126555, 9207390}}, + {{32382916, 1110093, 18477781, 11028262, 39697101, 26006320, + 62128346, 10843781, 59151264, 19118701}}, + {{2814918, 7836403, 27519878, 25686276, 46214848, 22000742, + 45614304, 8550129, 28346258, 1994730}}, + }, + { + {{47530565, 8085544, 53108345, 29605809, 2785837, 17323125, + 47591912, 7174893, 22628102, 8115180}}, + {{36703732, 955510, 55975026, 18476362, 34661776, 20276352, + 41457285, 3317159, 57165847, 930271}}, + {{51805164, 26720662, 28856489, 1357446, 23421993, 1057177, + 24091212, 32165462, 44343487, 22903716}}, + }, + { + {{44357633, 28250434, 54201256, 20785565, 51297352, 25757378, + 52269845, 17000211, 65241845, 8398969}}, + {{35139535, 2106402, 62372504, 1362500, 12813763, 16200670, + 22981545, 27263159, 18009407, 17781660}}, + {{49887941, 24009210, 39324209, 14166834, 29815394, 7444469, + 29551787, 29827013, 19288548, 1325865}}, + }, + { + {{15100138, 17718680, 43184885, 32549333, 40658671, 15509407, + 12376730, 30075286, 33166106, 25511682}}, + {{20909212, 13023121, 57899112, 16251777, 61330449, 25459517, + 12412150, 10018715, 2213263, 19676059}}, + {{32529814, 22479743, 30361438, 16864679, 57972923, 1513225, + 22922121, 6382134, 61341936, 8371347}}, + }, + }, + { + { + {{9923462, 11271500, 12616794, 3544722, 37110496, 31832805, + 12891686, 25361300, 40665920, 10486143}}, + {{44511638, 26541766, 8587002, 25296571, 4084308, 20584370, 361725, + 2610596, 43187334, 22099236}}, + {{5408392, 32417741, 62139741, 10561667, 24145918, 14240566, + 31319731, 29318891, 19985174, 30118346}}, + }, + { + {{53114407, 16616820, 14549246, 3341099, 32155958, 13648976, + 49531796, 8849296, 65030, 8370684}}, + {{58787919, 21504805, 31204562, 5839400, 46481576, 32497154, + 47665921, 6922163, 12743482, 23753914}}, + {{64747493, 12678784, 28815050, 4759974, 43215817, 4884716, + 23783145, 11038569, 18800704, 255233}}, + }, + { + {{61839187, 31780545, 13957885, 7990715, 23132995, 728773, 13393847, + 9066957, 19258688, 18800639}}, + {{64172210, 22726896, 56676774, 14516792, 63468078, 4372540, + 35173943, 2209389, 65584811, 2055793}}, + {{580882, 16705327, 5468415, 30871414, 36182444, 18858431, + 59905517, 24560042, 37087844, 7394434}}, + }, + { + {{23838809, 1822728, 51370421, 15242726, 8318092, 29821328, + 45436683, 30062226, 62287122, 14799920}}, + {{13345610, 9759151, 3371034, 17416641, 16353038, 8577942, 31129804, + 13496856, 58052846, 7402517}}, + {{2286874, 29118501, 47066405, 31546095, 53412636, 5038121, + 11006906, 17794080, 8205060, 1607563}}, + }, + { + {{14414067, 25552300, 3331829, 30346215, 22249150, 27960244, + 18364660, 30647474, 30019586, 24525154}}, + {{39420813, 1585952, 56333811, 931068, 37988643, 22552112, + 52698034, 12029092, 9944378, 8024}}, + {{4368715, 29844802, 29874199, 18531449, 46878477, 22143727, + 50994269, 32555346, 58966475, 5640029}}, + }, + { + {{10299591, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, + 16859868, 15219797, 19226649}}, + {{27425505, 27835351, 3055005, 10660664, 23458024, 595578, 51710259, + 32381236, 48766680, 9742716}}, + {{6744077, 2427284, 26042789, 2720740, 66260958, 1118973, 32324614, + 7406442, 12420155, 1994844}}, + }, + { + {{14012502, 28529712, 48724410, 23975962, 40623521, 29617992, + 54075385, 22644628, 24319928, 27108099}}, + {{16412671, 29047065, 10772640, 15929391, 50040076, 28895810, + 10555944, 23070383, 37006495, 28815383}}, + {{22397363, 25786748, 57815702, 20761563, 17166286, 23799296, + 39775798, 6199365, 21880021, 21303672}}, + }, + { + {{62825557, 5368522, 35991846, 8163388, 36785801, 3209127, + 16557151, 8890729, 8840445, 4957760}}, + {{51661137, 709326, 60189418, 22684253, 37330941, 6522331, + 45388683, 12130071, 52312361, 5005756}}, + {{64994094, 19246303, 23019041, 15765735, 41839181, 6002751, + 10183197, 20315106, 50713577, 31378319}}, + }, + }, + { + { + {{48083108, 1632004, 13466291, 25559332, 43468412, 16573536, + 35094956, 30497327, 22208661, 2000468}}, + {{3065054, 32141671, 41510189, 33192999, 49425798, 27851016, + 58944651, 11248526, 63417650, 26140247}}, + {{10379208, 27508878, 8877318, 1473647, 37817580, 21046851, + 16690914, 2553332, 63976176, 16400288}}, + }, + { + {{15716668, 1254266, 48636174, 7446273, 58659946, 6344163, + 45011593, 26268851, 26894936, 9132066}}, + {{24158868, 12938817, 11085297, 25376834, 39045385, 29097348, + 36532400, 64451, 60291780, 30861549}}, + {{13488534, 7794716, 22236231, 5989356, 25426474, 20976224, 2350709, + 30135921, 62420857, 2364225}}, + }, + { + {{16335033, 9132434, 25640582, 6678888, 1725628, 8517937, 55301840, + 21856974, 15445874, 25756331}}, + {{29004188, 25687351, 28661401, 32914020, 54314860, 25611345, + 31863254, 29418892, 66830813, 17795152}}, + {{60986784, 18687766, 38493958, 14569918, 56250865, 29962602, + 10343411, 26578142, 37280576, 22738620}}, + }, + { + {{27081650, 3463984, 14099042, 29036828, 1616302, 27348828, 29542635, + 15372179, 17293797, 960709}}, + {{20263915, 11434237, 61343429, 11236809, 13505955, 22697330, + 50997518, 6493121, 47724353, 7639713}}, + {{64278047, 18715199, 25403037, 25339236, 58791851, 17380732, + 18006286, 17510682, 29994676, 17746311}}, + }, + { + {{9769828, 5202651, 42951466, 19923039, 39057860, 21992807, + 42495722, 19693649, 35924288, 709463}}, + {{12286395, 13076066, 45333675, 32377809, 42105665, 4057651, + 35090736, 24663557, 16102006, 13205847}}, + {{13733362, 5599946, 10557076, 3195751, 61550873, 8536969, 41568694, + 8525971, 10151379, 10394400}}, + }, + { + {{4024660, 17416881, 22436261, 12276534, 58009849, 30868332, + 19698228, 11743039, 33806530, 8934413}}, + {{51229064, 29029191, 58528116, 30620370, 14634844, 32856154, + 57659786, 3137093, 55571978, 11721157}}, + {{17555920, 28540494, 8268605, 2331751, 44370049, 9761012, 9319229, + 8835153, 57903375, 32274386}}, + }, + { + {{66647436, 25724417, 20614117, 16688288, 59594098, 28747312, + 22300303, 505429, 6108462, 27371017}}, + {{62038564, 12367916, 36445330, 3234472, 32617080, 25131790, + 29880582, 20071101, 40210373, 25686972}}, + {{35133562, 5726538, 26934134, 10237677, 63935147, 32949378, + 24199303, 3795095, 7592688, 18562353}}, + }, + { + {{21594432, 18590204, 17466407, 29477210, 32537083, 2739898, + 6407723, 12018833, 38852812, 4298411}}, + {{46458361, 21592935, 39872588, 570497, 3767144, 31836892, + 13891941, 31985238, 13717173, 10805743}}, + {{52432215, 17910135, 15287173, 11927123, 24177847, 25378864, + 66312432, 14860608, 40169934, 27690595}}, + }, + }, + { + { + {{12962541, 5311799, 57048096, 11658279, 18855286, 25600231, + 13286262, 20745728, 62727807, 9882021}}, + {{18512060, 11319350, 46985740, 15090308, 18818594, 5271736, + 44380960, 3666878, 43141434, 30255002}}, + {{60319844, 30408388, 16192428, 13241070, 15898607, 19348318, + 57023983, 26893321, 64705764, 5276064}}, + }, + { + {{30169808, 28236784, 26306205, 21803573, 27814963, 7069267, + 7152851, 3684982, 1449224, 13082861}}, + {{10342807, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, + 15056736, 46092426, 25352431}}, + {{33958735, 3261607, 22745853, 7948688, 19370557, 18376767, + 40936887, 6482813, 56808784, 22494330}}, + }, + { + {{32869458, 28145887, 25609742, 15678670, 56421095, 18083360, + 26112420, 2521008, 44444576, 6904814}}, + {{29506904, 4457497, 3377935, 23757988, 36598817, 12935079, 1561737, + 3841096, 38105225, 26896789}}, + {{10340844, 26924055, 48452231, 31276001, 12621150, 20215377, + 30878496, 21730062, 41524312, 5181965}}, + }, + { + {{25940096, 20896407, 17324187, 23247058, 58437395, 15029093, + 24396252, 17103510, 64786011, 21165857}}, + {{45343161, 9916822, 65808455, 4079497, 66080518, 11909558, 1782390, + 12641087, 20603771, 26992690}}, + {{48226577, 21881051, 24849421, 11501709, 13161720, 28785558, + 1925522, 11914390, 4662781, 7820689}}, + }, + { + {{12241050, 33128450, 8132690, 9393934, 32846760, 31954812, 29749455, + 12172924, 16136752, 15264020}}, + {{56758909, 18873868, 58896884, 2330219, 49446315, 19008651, + 10658212, 6671822, 19012087, 3772772}}, + {{3753511, 30133366, 10617073, 2028709, 14841030, 26832768, 28718731, + 17791548, 20527770, 12988982}}, + }, + { + {{52286360, 27757162, 63400876, 12689772, 66209881, 22639565, + 42925817, 22989488, 3299664, 21129479}}, + {{50331161, 18301130, 57466446, 4978982, 3308785, 8755439, 6943197, + 6461331, 41525717, 8991217}}, + {{49882601, 1816361, 65435576, 27467992, 31783887, 25378441, + 34160718, 7417949, 36866577, 1507264}}, + }, + { + {{29692644, 6829891, 56610064, 4334895, 20945975, 21647936, + 38221255, 8209390, 14606362, 22907359}}, + {{63627275, 8707080, 32188102, 5672294, 22096700, 1711240, 34088169, + 9761486, 4170404, 31469107}}, + {{55521375, 14855944, 62981086, 32022574, 40459774, 15084045, + 22186522, 16002000, 52832027, 25153633}}, + }, + { + {{62297408, 13761028, 35404987, 31070512, 63796392, 7869046, + 59995292, 23934339, 13240844, 10965870}}, + {{59366301, 25297669, 52340529, 19898171, 43876480, 12387165, + 4498947, 14147411, 29514390, 4302863}}, + {{53695440, 21146572, 20757301, 19752600, 14785142, 8976368, + 62047588, 31410058, 17846987, 19582505}}, + }, + }, + { + { + {{64864412, 32799703, 62511833, 32488122, 60861691, 1455298, + 45461136, 24339642, 61886162, 12650266}}, + {{57202067, 17484121, 21134159, 12198166, 40044289, 708125, 387813, + 13770293, 47974538, 10958662}}, + {{22470984, 12369526, 23446014, 28113323, 45588061, 23855708, + 55336367, 21979976, 42025033, 4271861}}, + }, + { + {{41939299, 23500789, 47199531, 15361594, 61124506, 2159191, + 75375, 29275903, 34582642, 8469672}}, + {{15854951, 4148314, 58214974, 7259001, 11666551, 13824734, + 36577666, 2697371, 24154791, 24093489}}, + {{15446137, 17747788, 29759746, 14019369, 30811221, 23944241, + 35526855, 12840103, 24913809, 9815020}}, + }, + { + {{62399578, 27940162, 35267365, 21265538, 52665326, 10799413, + 58005188, 13438768, 18735128, 9466238}}, + {{11933045, 9281483, 5081055, 28370608, 64480701, 28648802, 59381042, + 22658328, 44380208, 16199063}}, + {{14576810, 379472, 40322331, 25237195, 37682355, 22741457, + 67006097, 1876698, 30801119, 2164795}}, + }, + { + {{15995086, 3199873, 13672555, 13712240, 47730029, 28906785, + 54027253, 18058162, 53616056, 1268051}}, + {{56818250, 29895392, 63822271, 10948817, 23037027, 3794475, + 63638526, 20954210, 50053494, 3565903}}, + {{29210069, 24135095, 61189071, 28601646, 10834810, 20226706, + 50596761, 22733718, 39946641, 19523900}}, + }, + { + {{53946955, 15508587, 16663704, 25398282, 38758921, 9019122, + 37925443, 29785008, 2244110, 19552453}}, + {{61955989, 29753495, 57802388, 27482848, 16243068, 14684434, + 41435776, 17373631, 13491505, 4641841}}, + {{10813398, 643330, 47920349, 32825515, 30292061, 16954354, + 27548446, 25833190, 14476988, 20787001}}, + }, + { + {{10292079, 9984945, 6481436, 8279905, 59857350, 7032742, 27282937, + 31910173, 39196053, 12651323}}, + {{35923332, 32741048, 22271203, 11835308, 10201545, 15351028, + 17099662, 3988035, 21721536, 30405492}}, + {{10202177, 27008593, 35735631, 23979793, 34958221, 25434748, + 54202543, 3852693, 13216206, 14842320}}, + }, + { + {{51293224, 22953365, 60569911, 26295436, 60124204, 26972653, + 35608016, 13765823, 39674467, 9900183}}, + {{14465486, 19721101, 34974879, 18815558, 39665676, 12990491, + 33046193, 15796406, 60056998, 25514317}}, + {{30924398, 25274812, 6359015, 20738097, 16508376, 9071735, + 41620263, 15413634, 9524356, 26535554}}, + }, + { + {{12274201, 20378885, 32627640, 31769106, 6736624, 13267305, + 5237659, 28444949, 15663515, 4035784}}, + {{64157555, 8903984, 17349946, 601635, 50676049, 28941875, + 53376124, 17665097, 44850385, 4659090}}, + {{50192582, 28601458, 36715152, 18395610, 20774811, 15897498, + 5736189, 15026997, 64930608, 20098846}}, + }, + }, + { + { + {{58249865, 31335375, 28571665, 23398914, 66634396, 23448733, + 63307367, 278094, 23440562, 33264224}}, + {{10226222, 27625730, 15139955, 120818, 52241171, 5218602, 32937275, + 11551483, 50536904, 26111567}}, + {{17932739, 21117156, 43069306, 10749059, 11316803, 7535897, + 22503767, 5561594, 63462240, 3898660}}, + }, + { + {{7749907, 32584865, 50769132, 33537967, 42090752, 15122142, 65535333, + 7152529, 21831162, 1245233}}, + {{26958440, 18896406, 4314585, 8346991, 61431100, 11960071, + 34519569, 32934396, 36706772, 16838219}}, + {{54942968, 9166946, 33491384, 13673479, 29787085, 13096535, + 6280834, 14587357, 44770839, 13987524}}, + }, + { + {{42758936, 7778774, 21116000, 15572597, 62275598, 28196653, + 62807965, 28429792, 59639082, 30696363}}, + {{9681908, 26817309, 35157219, 13591837, 60225043, 386949, 31622781, + 6439245, 52527852, 4091396}}, + {{58682418, 1470726, 38999185, 31957441, 3978626, 28430809, + 47486180, 12092162, 29077877, 18812444}}, + }, + { + {{5269168, 26694706, 53878652, 25533716, 25932562, 1763552, + 61502754, 28048550, 47091016, 2357888}}, + {{32264008, 18146780, 61721128, 32394338, 65017541, 29607531, + 23104803, 20684524, 5727337, 189038}}, + {{14609104, 24599962, 61108297, 16931650, 52531476, 25810533, + 40363694, 10942114, 41219933, 18669734}}, + }, + { + {{20513481, 5557931, 51504251, 7829530, 26413943, 31535028, + 45729895, 7471780, 13913677, 28416557}}, + {{41534488, 11967825, 29233242, 12948236, 60354399, 4713226, + 58167894, 14059179, 12878652, 8511905}}, + {{41452044, 3393630, 64153449, 26478905, 64858154, 9366907, + 36885446, 6812973, 5568676, 30426776}}, + }, + { + {{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, + 49700111, 20050058, 52713667, 8070817}}, + {{27117677, 23547054, 35826092, 27984343, 1127281, 12772488, + 37262958, 10483305, 55556115, 32525717}}, + {{10637467, 27866368, 5674780, 1072708, 40765276, 26572129, + 65424888, 9177852, 39615702, 15431202}}, + }, + { + {{20525126, 10892566, 54366392, 12779442, 37615830, 16150074, + 38868345, 14943141, 52052074, 25618500}}, + {{37084402, 5626925, 66557297, 23573344, 753597, 11981191, 25244767, + 30314666, 63752313, 9594023}}, + {{43356201, 2636869, 61944954, 23450613, 585133, 7877383, 11345683, + 27062142, 13352334, 22577348}}, + }, + { + {{65177046, 28146973, 3304648, 20669563, 17015805, 28677341, + 37325013, 25801949, 53893326, 33235227}}, + {{20239939, 6607058, 6203985, 3483793, 48721888, 32775202, 46385121, + 15077869, 44358105, 14523816}}, + {{27406023, 27512775, 27423595, 29057038, 4996213, 10002360, + 38266833, 29008937, 36936121, 28748764}}, + }, + }, + { + { + {{11374242, 12660715, 17861383, 21013599, 10935567, 1099227, + 53222788, 24462691, 39381819, 11358503}}, + {{54378055, 10311866, 1510375, 10778093, 64989409, 24408729, + 32676002, 11149336, 40985213, 4985767}}, + {{48012542, 341146, 60911379, 33315398, 15756972, 24757770, 66125820, + 13794113, 47694557, 17933176}}, + }, + { + {{6490062, 11940286, 25495923, 25828072, 8668372, 24803116, 3367602, + 6970005, 65417799, 24549641}}, + {{1656478, 13457317, 15370807, 6364910, 13605745, 8362338, 47934242, + 28078708, 50312267, 28522993}}, + {{44835530, 20030007, 67044178, 29220208, 48503227, 22632463, + 46537798, 26546453, 67009010, 23317098}}, + }, + { + {{17747446, 10039260, 19368299, 29503841, 46478228, 17513145, + 31992682, 17696456, 37848500, 28042460}}, + {{31932008, 28568291, 47496481, 16366579, 22023614, 88450, 11371999, + 29810185, 4882241, 22927527}}, + {{29796488, 37186, 19818052, 10115756, 55279832, 3352735, 18551198, + 3272828, 61917932, 29392022}}, + }, + { + {{12501267, 4044383, 58495907, 20162046, 34678811, 5136598, + 47878486, 30024734, 330069, 29895023}}, + {{6384877, 2899513, 17807477, 7663917, 64749976, 12363164, 25366522, + 24980540, 66837568, 12071498}}, + {{58743349, 29511910, 25133447, 29037077, 60897836, 2265926, + 34339246, 1936674, 61949167, 3829362}}, + }, + { + {{28425966, 27718999, 66531773, 28857233, 52891308, 6870929, 7921550, + 26986645, 26333139, 14267664}}, + {{56041645, 11871230, 27385719, 22994888, 62522949, 22365119, + 10004785, 24844944, 45347639, 8930323}}, + {{45911060, 17158396, 25654215, 31829035, 12282011, 11008919, + 1541940, 4757911, 40617363, 17145491}}, + }, + { + {{13537262, 25794942, 46504023, 10961926, 61186044, 20336366, + 53952279, 6217253, 51165165, 13814989}}, + {{49686272, 15157789, 18705543, 29619, 24409717, 33293956, 27361680, + 9257833, 65152338, 31777517}}, + {{42063564, 23362465, 15366584, 15166509, 54003778, 8423555, + 37937324, 12361134, 48422886, 4578289}}, + }, + { + {{24579768, 3711570, 1342322, 22374306, 40103728, 14124955, + 44564335, 14074918, 21964432, 8235257}}, + {{60580251, 31142934, 9442965, 27628844, 12025639, 32067012, + 64127349, 31885225, 13006805, 2355433}}, + {{50803946, 19949172, 60476436, 28412082, 16974358, 22643349, + 27202043, 1719366, 1141648, 20758196}}, + }, + { + {{54244920, 20334445, 58790597, 22536340, 60298718, 28710537, + 13475065, 30420460, 32674894, 13715045}}, + {{11423316, 28086373, 32344215, 8962751, 24989809, 9241752, + 53843611, 16086211, 38367983, 17912338}}, + {{65699196, 12530727, 60740138, 10847386, 19531186, 19422272, + 55399715, 7791793, 39862921, 4383346}}, + }, + }, + { + { + {{38137966, 5271446, 65842855, 23817442, 54653627, 16732598, + 62246457, 28647982, 27193556, 6245191}}, + {{51914908, 5362277, 65324971, 2695833, 4960227, 12840725, 23061898, + 3260492, 22510453, 8577507}}, + {{54476394, 11257345, 34415870, 13548176, 66387860, 10879010, + 31168030, 13952092, 37537372, 29918525}}, + }, + { + {{3877321, 23981693, 32416691, 5405324, 56104457, 19897796, + 3759768, 11935320, 5611860, 8164018}}, + {{50833043, 14667796, 15906460, 12155291, 44997715, 24514713, + 32003001, 24722143, 5773084, 25132323}}, + {{43320746, 25300131, 1950874, 8937633, 18686727, 16459170, 66203139, + 12376319, 31632953, 190926}}, + }, + { + {{42515238, 17415546, 58684872, 13378745, 14162407, 6901328, + 58820115, 4508563, 41767309, 29926903}}, + {{8884438, 27670423, 6023973, 10104341, 60227295, 28612898, 18722940, + 18768427, 65436375, 827624}}, + {{34388281, 17265135, 34605316, 7101209, 13354605, 2659080, + 65308289, 19446395, 42230385, 1541285}}, + }, + { + {{2901328, 32436745, 3880375, 23495044, 49487923, 29941650, + 45306746, 29986950, 20456844, 31669399}}, + {{27019610, 12299467, 53450576, 31951197, 54247203, 28692960, + 47568713, 28538373, 29439640, 15138866}}, + {{21536104, 26928012, 34661045, 22864223, 44700786, 5175813, + 61688824, 17193268, 7779327, 109896}}, + }, + { + {{30279725, 14648750, 59063993, 6425557, 13639621, 32810923, 28698389, + 12180118, 23177719, 33000357}}, + {{26572828, 3405927, 35407164, 12890904, 47843196, 5335865, + 60615096, 2378491, 4439158, 20275085}}, + {{44392139, 3489069, 57883598, 33221678, 18875721, 32414337, + 14819433, 20822905, 49391106, 28092994}}, + }, + { + {{62052362, 16566550, 15953661, 3767752, 56672365, 15627059, + 66287910, 2177224, 8550082, 18440267}}, + {{48635543, 16596774, 66727204, 15663610, 22860960, 15585581, + 39264755, 29971692, 43848403, 25125843}}, + {{34628313, 15707274, 58902952, 27902350, 29464557, 2713815, + 44383727, 15860481, 45206294, 1494192}}, + }, + { + {{47546773, 19467038, 41524991, 24254879, 13127841, 759709, + 21923482, 16529112, 8742704, 12967017}}, + {{38643965, 1553204, 32536856, 23080703, 42417258, 33148257, + 58194238, 30620535, 37205105, 15553882}}, + {{21877890, 3230008, 9881174, 10539357, 62311749, 2841331, 11543572, + 14513274, 19375923, 20906471}}, + }, + { + {{8832269, 19058947, 13253510, 5137575, 5037871, 4078777, 24880818, + 27331716, 2862652, 9455043}}, + {{29306751, 5123106, 20245049, 19404543, 9592565, 8447059, 65031740, + 30564351, 15511448, 4789663}}, + {{46429108, 7004546, 8824831, 24119455, 63063159, 29803695, + 61354101, 108892, 23513200, 16652362}}, + }, + }, + { + { + {{33852691, 4144781, 62632835, 26975308, 10770038, 26398890, + 60458447, 20618131, 48789665, 10212859}}, + {{2756062, 8598110, 7383731, 26694540, 22312758, 32449420, 21179800, + 2600940, 57120566, 21047965}}, + {{42463153, 13317461, 36659605, 17900503, 21365573, 22684775, + 11344423, 864440, 64609187, 16844368}}, + }, + { + {{40676061, 6148328, 49924452, 19080277, 18782928, 33278435, + 44547329, 211299, 2719757, 4940997}}, + {{65784982, 3911312, 60160120, 14759764, 37081714, 7851206, + 21690126, 8518463, 26699843, 5276295}}, + {{53958991, 27125364, 9396248, 365013, 24703301, 23065493, 1321585, + 149635, 51656090, 7159368}}, + }, + { + {{9987761, 30149673, 17507961, 9505530, 9731535, 31388918, 22356008, + 8312176, 22477218, 25151047}}, + {{18155857, 17049442, 19744715, 9006923, 15154154, 23015456, + 24256459, 28689437, 44560690, 9334108}}, + {{2986088, 28642539, 10776627, 30080588, 10620589, 26471229, + 45695018, 14253544, 44521715, 536905}}, + }, + { + {{4377737, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, + 10589625, 10838060, 18134008}}, + {{47766460, 867879, 9277171, 30335973, 52677291, 31567988, + 19295825, 17757482, 6378259, 699185}}, + {{7895007, 4057113, 60027092, 20476675, 49222032, 33231305, 66392824, + 15693154, 62063800, 20180469}}, + }, + { + {{59371282, 27685029, 52542544, 26147512, 11385653, 13201616, + 31730678, 22591592, 63190227, 23885106}}, + {{10188286, 17783598, 59772502, 13427542, 22223443, 14896287, + 30743455, 7116568, 45322357, 5427592}}, + {{696102, 13206899, 27047647, 22922350, 15285304, 23701253, + 10798489, 28975712, 19236242, 12477404}}, + }, + { + {{55879425, 11243795, 50054594, 25513566, 66320635, 25386464, + 63211194, 11180503, 43939348, 7733643}}, + {{17800790, 19518253, 40108434, 21787760, 23887826, 3149671, + 23466177, 23016261, 10322026, 15313801}}, + {{26246234, 11968874, 32263343, 28085704, 6830754, 20231401, + 51314159, 33452449, 42659621, 10890803}}, + }, + { + {{35743198, 10271362, 54448239, 27287163, 16690206, 20491888, + 52126651, 16484930, 25180797, 28219548}}, + {{66522290, 10376443, 34522450, 22268075, 19801892, 10997610, + 2276632, 9482883, 316878, 13820577}}, + {{57226037, 29044064, 64993357, 16457135, 56008783, 11674995, + 30756178, 26039378, 30696929, 29841583}}, + }, + { + {{32988917, 23951020, 12499365, 7910787, 56491607, 21622917, + 59766047, 23569034, 34759346, 7392472}}, + {{58253184, 15927860, 9866406, 29905021, 64711949, 16898650, + 36699387, 24419436, 25112946, 30627788}}, + {{64604801, 33117465, 25621773, 27875660, 15085041, 28074555, + 42223985, 20028237, 5537437, 19640113}}, + }, + }, + { + { + {{55883280, 2320284, 57524584, 10149186, 33664201, 5808647, + 52232613, 31824764, 31234589, 6090599}}, + {{57475529, 116425, 26083934, 2897444, 60744427, 30866345, 609720, + 15878753, 60138459, 24519663}}, + {{39351007, 247743, 51914090, 24551880, 23288160, 23542496, + 43239268, 6503645, 20650474, 1804084}}, + }, + { + {{39519059, 15456423, 8972517, 8469608, 15640622, 4439847, 3121995, + 23224719, 27842615, 33352104}}, + {{51801891, 2839643, 22530074, 10026331, 4602058, 5048462, 28248656, + 5031932, 55733782, 12714368}}, + {{20807691, 26283607, 29286140, 11421711, 39232341, 19686201, + 45881388, 1035545, 47375635, 12796919}}, + }, + { + {{12076880, 19253146, 58323862, 21705509, 42096072, 16400683, + 49517369, 20654993, 3480664, 18371617}}, + {{34747315, 5457596, 28548107, 7833186, 7303070, 21600887, + 42745799, 17632556, 33734809, 2771024}}, + {{45719598, 421931, 26597266, 6860826, 22486084, 26817260, + 49971378, 29344205, 42556581, 15673396}}, + }, + { + {{46924223, 2338215, 19788685, 23933476, 63107598, 24813538, + 46837679, 4733253, 3727144, 20619984}}, + {{6120100, 814863, 55314462, 32931715, 6812204, 17806661, 2019593, + 7975683, 31123697, 22595451}}, + {{30069250, 22119100, 30434653, 2958439, 18399564, 32578143, + 12296868, 9204260, 50676426, 9648164}}, + }, + { + {{32705413, 32003455, 30705657, 7451065, 55303258, 9631812, 3305266, + 5248604, 41100532, 22176930}}, + {{17219846, 2375039, 35537917, 27978816, 47649184, 9219902, 294711, + 15298639, 2662509, 17257359}}, + {{65935918, 25995736, 62742093, 29266687, 45762450, 25120105, + 32087528, 32331655, 32247247, 19164571}}, + }, + { + {{14312609, 1221556, 17395390, 24854289, 62163122, 24869796, + 38911119, 23916614, 51081240, 20175586}}, + {{65680039, 23875441, 57873182, 6549686, 59725795, 33085767, 23046501, + 9803137, 17597934, 2346211}}, + {{18510781, 15337574, 26171504, 981392, 44867312, 7827555, + 43617730, 22231079, 3059832, 21771562}}, + }, + { + {{10141598, 6082907, 17829293, 31606789, 9830091, 13613136, + 41552228, 28009845, 33606651, 3592095}}, + {{33114149, 17665080, 40583177, 20211034, 33076704, 8716171, + 1151462, 1521897, 66126199, 26716628}}, + {{34169699, 29298616, 23947180, 33230254, 34035889, 21248794, + 50471177, 3891703, 26353178, 693168}}, + }, + { + {{30374239, 1595580, 50224825, 13186930, 4600344, 406904, 9585294, + 33153764, 31375463, 14369965}}, + {{52738210, 25781902, 1510300, 6434173, 48324075, 27291703, + 32732229, 20445593, 17901440, 16011505}}, + {{18171223, 21619806, 54608461, 15197121, 56070717, 18324396, + 47936623, 17508055, 8764034, 12309598}}, + }, + }, + { + { + {{5975889, 28311244, 47649501, 23872684, 55567586, 14015781, + 43443107, 1228318, 17544096, 22960650}}, + {{5811932, 31839139, 3442886, 31285122, 48741515, 25194890, + 49064820, 18144304, 61543482, 12348899}}, + {{35709185, 11407554, 25755363, 6891399, 63851926, 14872273, + 42259511, 8141294, 56476330, 32968952}}, + }, + { + {{54433560, 694025, 62032719, 13300343, 14015258, 19103038, + 57410191, 22225381, 30944592, 1130208}}, + {{8247747, 26843490, 40546482, 25845122, 52706924, 18905521, + 4652151, 2488540, 23550156, 33283200}}, + {{17294297, 29765994, 7026747, 15626851, 22990044, 113481, 2267737, + 27646286, 66700045, 33416712}}, + }, + { + {{16091066, 17300506, 18599251, 7340678, 2137637, 32332775, + 63744702, 14550935, 3260525, 26388161}}, + {{62198760, 20221544, 18550886, 10864893, 50649539, 26262835, + 44079994, 20349526, 54360141, 2701325}}, + {{58534169, 16099414, 4629974, 17213908, 46322650, 27548999, + 57090500, 9276970, 11329923, 1862132}}, + }, + { + {{14763057, 17650824, 36190593, 3689866, 3511892, 10313526, + 45157776, 12219230, 58070901, 32614131}}, + {{8894987, 30108338, 6150752, 3013931, 301220, 15693451, 35127648, + 30644714, 51670695, 11595569}}, + {{15214943, 3537601, 40870142, 19495559, 4418656, 18323671, + 13947275, 10730794, 53619402, 29190761}}, + }, + { + {{64570558, 7682792, 32759013, 263109, 37124133, 25598979, + 44776739, 23365796, 977107, 699994}}, + {{54642373, 4195083, 57897332, 550903, 51543527, 12917919, + 19118110, 33114591, 36574330, 19216518}}, + {{31788442, 19046775, 4799988, 7372237, 8808585, 18806489, 9408236, + 23502657, 12493931, 28145115}}, + }, + { + {{41428258, 5260743, 47873055, 27269961, 63412921, 16566086, + 27218280, 2607121, 29375955, 6024730}}, + {{842132, 30759739, 62345482, 24831616, 26332017, 21148791, + 11831879, 6985184, 57168503, 2854095}}, + {{62261602, 25585100, 2516241, 27706719, 9695690, 26333246, 16512644, + 960770, 12121869, 16648078}}, + }, + { + {{51890212, 14667095, 53772635, 2013716, 30598287, 33090295, + 35603941, 25672367, 20237805, 2838411}}, + {{47820798, 4453151, 15298546, 17376044, 22115042, 17581828, + 12544293, 20083975, 1068880, 21054527}}, + {{57549981, 17035596, 33238497, 13506958, 30505848, 32439836, + 58621956, 30924378, 12521377, 4845654}}, + }, + { + {{38910324, 10744107, 64150484, 10199663, 7759311, 20465832, + 3409347, 32681032, 60626557, 20668561}}, + {{43547042, 6230155, 46726851, 10655313, 43068279, 21933259, + 10477733, 32314216, 63995636, 13974497}}, + {{12966261, 15550616, 35069916, 31939085, 21025979, 32924988, + 5642324, 7188737, 18895762, 12629579}}, + }, + }, + { + { + {{14741879, 18607545, 22177207, 21833195, 1279740, 8058600, + 11758140, 789443, 32195181, 3895677}}, + {{10758205, 15755439, 62598914, 9243697, 62229442, 6879878, 64904289, + 29988312, 58126794, 4429646}}, + {{64654951, 15725972, 46672522, 23143759, 61304955, 22514211, + 59972993, 21911536, 18047435, 18272689}}, + }, + { + {{41935844, 22247266, 29759955, 11776784, 44846481, 17733976, + 10993113, 20703595, 49488162, 24145963}}, + {{21987233, 700364, 42603816, 14972007, 59334599, 27836036, + 32155025, 2581431, 37149879, 8773374}}, + {{41540495, 454462, 53896929, 16126714, 25240068, 8594567, + 20656846, 12017935, 59234475, 19634276}}, + }, + { + {{6028163, 6263078, 36097058, 22252721, 66289944, 2461771, + 35267690, 28086389, 65387075, 30777706}}, + {{54829870, 16624276, 987579, 27631834, 32908202, 1248608, 7719845, + 29387734, 28408819, 6816612}}, + {{56750770, 25316602, 19549650, 21385210, 22082622, 16147817, + 20613181, 13982702, 56769294, 5067942}}, + }, + { + {{36602878, 29732664, 12074680, 13582412, 47230892, 2443950, + 47389578, 12746131, 5331210, 23448488}}, + {{30528792, 3601899, 65151774, 4619784, 39747042, 18118043, + 24180792, 20984038, 27679907, 31905504}}, + {{9402385, 19597367, 32834042, 10838634, 40528714, 20317236, + 26653273, 24868867, 22611443, 20839026}}, + }, + { + {{22190590, 1118029, 22736441, 15130463, 36648172, 27563110, + 19189624, 28905490, 4854858, 6622139}}, + {{58798126, 30600981, 58846284, 30166382, 56707132, 33282502, + 13424425, 29987205, 26404408, 13001963}}, + {{35867026, 18138731, 64114613, 8939345, 11562230, 20713762, + 41044498, 21932711, 51703708, 11020692}}, + }, + { + {{1866042, 25604943, 59210214, 23253421, 12483314, 13477547, + 3175636, 21130269, 28761761, 1406734}}, + {{66660290, 31776765, 13018550, 3194501, 57528444, 22392694, + 24760584, 29207344, 25577410, 20175752}}, + {{42818486, 4759344, 66418211, 31701615, 2066746, 10693769, + 37513074, 9884935, 57739938, 4745409}}, + }, + { + {{57967561, 6049713, 47577803, 29213020, 35848065, 9944275, + 51646856, 22242579, 10931923, 21622501}}, + {{50547351, 14112679, 59096219, 4817317, 59068400, 22139825, + 44255434, 10856640, 46638094, 13434653}}, + {{22759470, 23480998, 50342599, 31683009, 13637441, 23386341, + 1765143, 20900106, 28445306, 28189722}}, + }, + { + {{29875063, 12493613, 2795536, 29768102, 1710619, 15181182, + 56913147, 24765756, 9074233, 1167180}}, + {{40903181, 11014232, 57266213, 30918946, 40200743, 7532293, + 48391976, 24018933, 3843902, 9367684}}, + {{56139269, 27150720, 9591133, 9582310, 11349256, 108879, 16235123, + 8601684, 66969667, 4242894}}, + }, + }, + { + { + {{22092954, 20363309, 65066070, 21585919, 32186752, 22037044, + 60534522, 2470659, 39691498, 16625500}}, + {{56051142, 3042015, 13770083, 24296510, 584235, 33009577, 59338006, + 2602724, 39757248, 14247412}}, + {{6314156, 23289540, 34336361, 15957556, 56951134, 168749, + 58490057, 14290060, 27108877, 32373552}}, + }, + { + {{58522267, 26383465, 13241781, 10960156, 34117849, 19759835, + 33547975, 22495543, 39960412, 981873}}, + {{22833421, 9293594, 34459416, 19935764, 57971897, 14756818, + 44180005, 19583651, 56629059, 17356469}}, + {{59340277, 3326785, 38997067, 10783823, 19178761, 14905060, + 22680049, 13906969, 51175174, 3797898}}, + }, + { + {{21721337, 29341686, 54902740, 9310181, 63226625, 19901321, + 23740223, 30845200, 20491982, 25512280}}, + {{9209251, 18419377, 53852306, 27386633, 66377847, 15289672, + 25947805, 15286587, 30997318, 26851369}}, + {{7392013, 16618386, 23946583, 25514540, 53843699, 32020573, + 52911418, 31232855, 17649997, 33304352}}, + }, + { + {{57807776, 19360604, 30609525, 30504889, 41933794, 32270679, + 51867297, 24028707, 64875610, 7662145}}, + {{49550191, 1763593, 33994528, 15908609, 37067994, 21380136, + 7335079, 25082233, 63934189, 3440182}}, + {{47219164, 27577423, 42997570, 23865561, 10799742, 16982475, + 40449, 29122597, 4862399, 1133}}, + }, + { + {{34252636, 25680474, 61686474, 14860949, 50789833, 7956141, + 7258061, 311861, 36513873, 26175010}}, + {{63335436, 31988495, 28985339, 7499440, 24445838, 9325937, 29727763, + 16527196, 18278453, 15405622}}, + {{62726958, 8508651, 47210498, 29880007, 61124410, 15149969, + 53795266, 843522, 45233802, 13626196}}, + }, + { + {{2281448, 20067377, 56193445, 30944521, 1879357, 16164207, + 56324982, 3953791, 13340839, 15928663}}, + {{31727126, 26374577, 48671360, 25270779, 2875792, 17164102, + 41838969, 26539605, 43656557, 5964752}}, + {{4100401, 27594980, 49929526, 6017713, 48403027, 12227140, + 40424029, 11344143, 2538215, 25983677}}, + }, + { + {{57675240, 6123112, 11159803, 31397824, 30016279, 14966241, + 46633881, 1485420, 66479608, 17595569}}, + {{40304287, 4260918, 11851389, 9658551, 35091757, 16367491, + 46903439, 20363143, 11659921, 22439314}}, + {{26180377, 10015009, 36264640, 24973138, 5418196, 9480663, 2231568, + 23384352, 33100371, 32248261}}, + }, + { + {{15121094, 28352561, 56718958, 15427820, 39598927, 17561924, + 21670946, 4486675, 61177054, 19088051}}, + {{16166467, 24070699, 56004733, 6023907, 35182066, 32189508, + 2340059, 17299464, 56373093, 23514607}}, + {{28042865, 29997343, 54982337, 12259705, 63391366, 26608532, + 6766452, 24864833, 18036435, 5803270}}, + }, + }, + { + { + {{66291264, 6763911, 11803561, 1585585, 10958447, 30883267, 23855390, + 4598332, 60949433, 19436993}}, + {{36077558, 19298237, 17332028, 31170912, 31312681, 27587249, + 696308, 50292, 47013125, 11763583}}, + {{66514282, 31040148, 34874710, 12643979, 12650761, 14811489, 665117, + 20940800, 47335652, 22840869}}, + }, + { + {{30464590, 22291560, 62981387, 20819953, 19835326, 26448819, + 42712688, 2075772, 50088707, 992470}}, + {{18357166, 26559999, 7766381, 16342475, 37783946, 411173, 14578841, + 8080033, 55534529, 22952821}}, + {{19598397, 10334610, 12555054, 2555664, 18821899, 23214652, + 21873262, 16014234, 26224780, 16452269}}, + }, + { + {{36884939, 5145195, 5944548, 16385966, 3976735, 2009897, 55731060, + 25936245, 46575034, 3698649}}, + {{14187449, 3448569, 56472628, 22743496, 44444983, 30120835, + 7268409, 22663988, 27394300, 12015369}}, + {{19695742, 16087646, 28032085, 12999827, 6817792, 11427614, + 20244189, 32241655, 53849736, 30151970}}, + }, + { + {{30860084, 12735208, 65220619, 28854697, 50133957, 2256939, + 58942851, 12298311, 58558340, 23160969}}, + {{61389038, 22309106, 65198214, 15569034, 26642876, 25966672, + 61319509, 18435777, 62132699, 12651792}}, + {{64260450, 9953420, 11531313, 28271553, 26895122, 20857343, + 53990043, 17036529, 9768697, 31021214}}, + }, + { + {{42389405, 1894650, 66821166, 28850346, 15348718, 25397902, + 32767512, 12765450, 4940095, 10678226}}, + {{18860224, 15980149, 48121624, 31991861, 40875851, 22482575, + 59264981, 13944023, 42736516, 16582018}}, + {{51604604, 4970267, 37215820, 4175592, 46115652, 31354675, + 55404809, 15444559, 56105103, 7989036}}, + }, + { + {{31490433, 5568061, 64696061, 2182382, 34772017, 4531685, + 35030595, 6200205, 47422751, 18754260}}, + {{49800177, 17674491, 35586086, 33551600, 34221481, 16375548, + 8680158, 17182719, 28550067, 26697300}}, + {{38981977, 27866340, 16837844, 31733974, 60258182, 12700015, + 37068883, 4364037, 1155602, 5988841}}, + }, + { + {{21890435, 20281525, 54484852, 12154348, 59276991, 15300495, + 23148983, 29083951, 24618406, 8283181}}, + {{33972757, 23041680, 9975415, 6841041, 35549071, 16356535, + 3070187, 26528504, 1466168, 10740210}}, + {{65599446, 18066246, 53605478, 22898515, 32799043, 909394, + 53169961, 27774712, 34944214, 18227391}}, + }, + { + {{3960804, 19286629, 39082773, 17636380, 47704005, 13146867, + 15567327, 951507, 63848543, 32980496}}, + {{24740822, 5052253, 37014733, 8961360, 25877428, 6165135, + 42740684, 14397371, 59728495, 27410326}}, + {{38220480, 3510802, 39005586, 32395953, 55870735, 22922977, + 51667400, 19101303, 65483377, 27059617}}, + }, + }, + { + { + {{793280, 24323954, 8836301, 27318725, 39747955, 31184838, 33152842, + 28669181, 57202663, 32932579}}, + {{5666214, 525582, 20782575, 25516013, 42570364, 14657739, 16099374, + 1468826, 60937436, 18367850}}, + {{62249590, 29775088, 64191105, 26806412, 7778749, 11688288, + 36704511, 23683193, 65549940, 23690785}}, + }, + { + {{10896313, 25834728, 824274, 472601, 47648556, 3009586, 25248958, + 14783338, 36527388, 17796587}}, + {{10566929, 12612572, 35164652, 11118702, 54475488, 12362878, + 21752402, 8822496, 24003793, 14264025}}, + {{27713843, 26198459, 56100623, 9227529, 27050101, 2504721, + 23886875, 20436907, 13958494, 27821979}}, + }, + { + {{43627235, 4867225, 39861736, 3900520, 29838369, 25342141, + 35219464, 23512650, 7340520, 18144364}}, + {{4646495, 25543308, 44342840, 22021777, 23184552, 8566613, + 31366726, 32173371, 52042079, 23179239}}, + {{49838347, 12723031, 50115803, 14878793, 21619651, 27356856, + 27584816, 3093888, 58265170, 3849920}}, + }, + { + {{58043933, 2103171, 25561640, 18428694, 61869039, 9582957, + 32477045, 24536477, 5002293, 18004173}}, + {{55051311, 22376525, 21115584, 20189277, 8808711, 21523724, + 16489529, 13378448, 41263148, 12741425}}, + {{61162478, 10645102, 36197278, 15390283, 63821882, 26435754, + 24306471, 15852464, 28834118, 25908360}}, + }, + { + {{49773116, 24447374, 42577584, 9434952, 58636780, 32971069, + 54018092, 455840, 20461858, 5491305}}, + {{13669229, 17458950, 54626889, 23351392, 52539093, 21661233, + 42112877, 11293806, 38520660, 24132599}}, + {{28497909, 6272777, 34085870, 14470569, 8906179, 32328802, + 18504673, 19389266, 29867744, 24758489}}, + }, + { + {{50901822, 13517195, 39309234, 19856633, 24009063, 27180541, + 60741263, 20379039, 22853428, 29542421}}, + {{24191359, 16712145, 53177067, 15217830, 14542237, 1646131, + 18603514, 22516545, 12876622, 31441985}}, + {{17902668, 4518229, 66697162, 30725184, 26878216, 5258055, 54248111, + 608396, 16031844, 3723494}}, + }, + { + {{38476072, 12763727, 46662418, 7577503, 33001348, 20536687, + 17558841, 25681542, 23896953, 29240187}}, + {{47103464, 21542479, 31520463, 605201, 2543521, 5991821, 64163800, + 7229063, 57189218, 24727572}}, + {{28816026, 298879, 38943848, 17633493, 19000927, 31888542, + 54428030, 30605106, 49057085, 31471516}}, + }, + { + {{16000882, 33209536, 3493091, 22107234, 37604268, 20394642, + 12577739, 16041268, 47393624, 7847706}}, + {{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, + 34252933, 27035413, 57088296, 3852847}}, + {{55678375, 15697595, 45987307, 29133784, 5386313, 15063598, + 16514493, 17622322, 29330898, 18478208}}, + }, + }, + { + { + {{41609129, 29175637, 51885955, 26653220, 16615730, 2051784, + 3303702, 15490, 39560068, 12314390}}, + {{15683501, 27551389, 18109119, 23573784, 15337967, 27556609, + 50391428, 15921865, 16103996, 29823217}}, + {{43939021, 22773182, 13588191, 31925625, 63310306, 32479502, + 47835256, 5402698, 37293151, 23713330}}, + }, + { + {{23190676, 2384583, 34394524, 3462153, 37205209, 32025299, + 55842007, 8911516, 41903005, 2739712}}, + {{21374101, 30000182, 33584214, 9874410, 15377179, 11831242, + 33578960, 6134906, 4931255, 11987849}}, + {{67101132, 30575573, 50885377, 7277596, 105524, 33232381, 35628324, + 13861387, 37032554, 10117929}}, + }, + { + {{37607694, 22809559, 40945095, 13051538, 41483300, 5089642, + 60783361, 6704078, 12890019, 15728940}}, + {{45136504, 21783052, 66157804, 29135591, 14704839, 2695116, 903376, + 23126293, 12885166, 8311031}}, + {{49592363, 5352193, 10384213, 19742774, 7506450, 13453191, + 26423267, 4384730, 1888765, 28119028}}, + }, + { + {{41291507, 30447119, 53614264, 30371925, 30896458, 19632703, + 34857219, 20846562, 47644429, 30214188}}, + {{43500868, 30888657, 66582772, 4651135, 5765089, 4618330, 6092245, + 14845197, 17151279, 23700316}}, + {{42278406, 20820711, 51942885, 10367249, 37577956, 33289075, + 22825804, 26467153, 50242379, 16176524}}, + }, + { + {{43525589, 6564960, 20063689, 3798228, 62368686, 7359224, 2006182, + 23191006, 38362610, 23356922}}, + {{56482264, 29068029, 53788301, 28429114, 3432135, 27161203, + 23632036, 31613822, 32808309, 1099883}}, + {{15030958, 5768825, 39657628, 30667132, 60681485, 18193060, + 51830967, 26745081, 2051440, 18328567}}, + }, + { + {{63746541, 26315059, 7517889, 9824992, 23555850, 295369, 5148398, + 19400244, 44422509, 16633659}}, + {{4577067, 16802144, 13249840, 18250104, 19958762, 19017158, + 18559669, 22794883, 8402477, 23690159}}, + {{38702534, 32502850, 40318708, 32646733, 49896449, 22523642, + 9453450, 18574360, 17983009, 9967138}}, + }, + { + {{41346370, 6524721, 26585488, 9969270, 24709298, 1220360, 65430874, + 7806336, 17507396, 3651560}}, + {{56688388, 29436320, 14584638, 15971087, 51340543, 8861009, + 26556809, 27979875, 48555541, 22197296}}, + {{2839082, 14284142, 4029895, 3472686, 14402957, 12689363, 40466743, + 8459446, 61503401, 25932490}}, + }, + { + {{62269556, 30018987, 9744960, 2871048, 25113978, 3187018, 41998051, + 32705365, 17258083, 25576693}}, + {{18164541, 22959256, 49953981, 32012014, 19237077, 23809137, + 23357532, 18337424, 26908269, 12150756}}, + {{36843994, 25906566, 5112248, 26517760, 65609056, 26580174, 43167, + 28016731, 34806789, 16215818}}, + }, + }, + { + { + {{60209940, 9824393, 54804085, 29153342, 35711722, 27277596, + 32574488, 12532905, 59605792, 24879084}}, + {{39765323, 17038963, 39957339, 22831480, 946345, 16291093, + 254968, 7168080, 21676107, 31611404}}, + {{21260942, 25129680, 50276977, 21633609, 43430902, 3968120, + 63456915, 27338965, 63552672, 25641356}}, + }, + { + {{16544735, 13250366, 50304436, 15546241, 62525861, 12757257, + 64646556, 24874095, 48201831, 23891632}}, + {{64693606, 17976703, 18312302, 4964443, 51836334, 20900867, + 26820650, 16690659, 25459437, 28989823}}, + {{41964155, 11425019, 28423002, 22533875, 60963942, 17728207, + 9142794, 31162830, 60676445, 31909614}}, + }, + { + {{44004212, 6253475, 16964147, 29785560, 41994891, 21257994, + 39651638, 17209773, 6335691, 7249989}}, + {{36775618, 13979674, 7503222, 21186118, 55152142, 28932738, + 36836594, 2682241, 25993170, 21075909}}, + {{4364628, 5930691, 32304656, 23509878, 59054082, 15091130, + 22857016, 22955477, 31820367, 15075278}}, + }, + { + {{31879134, 24635739, 17258760, 90626, 59067028, 28636722, 24162787, + 23903546, 49138625, 12833044}}, + {{19073683, 14851414, 42705695, 21694263, 7625277, 11091125, + 47489674, 2074448, 57694925, 14905376}}, + {{24483648, 21618865, 64589997, 22007013, 65555733, 15355505, + 41826784, 9253128, 27628530, 25998952}}, + }, + { + {{17597607, 8340603, 19355617, 552187, 26198470, 30377849, 4593323, + 24396850, 52997988, 15297015}}, + {{510886, 14337390, 35323607, 16638631, 6328095, 2713355, 46891447, + 21690211, 8683220, 2921426}}, + {{18606791, 11874196, 27155355, 28272950, 43077121, 6265445, + 41930624, 32275507, 4674689, 13890525}}, + }, + { + {{13609624, 13069022, 39736503, 20498523, 24360585, 9592974, + 14977157, 9835105, 4389687, 288396}}, + {{9922506, 33035038, 13613106, 5883594, 48350519, 33120168, 54804801, + 8317627, 23388070, 16052080}}, + {{12719997, 11937594, 35138804, 28525742, 26900119, 8561328, + 46953177, 21921452, 52354592, 22741539}}, + }, + { + {{15961858, 14150409, 26716931, 32888600, 44314535, 13603568, + 11829573, 7467844, 38286736, 929274}}, + {{11038231, 21972036, 39798381, 26237869, 56610336, 17246600, + 43629330, 24182562, 45715720, 2465073}}, + {{20017144, 29231206, 27915241, 1529148, 12396362, 15675764, + 13817261, 23896366, 2463390, 28932292}}, + }, + { + {{50749986, 20890520, 55043680, 4996453, 65852442, 1073571, + 9583558, 12851107, 4003896, 12673717}}, + {{65377275, 18398561, 63845933, 16143081, 19294135, 13385325, + 14741514, 24450706, 7903885, 2348101}}, + {{24536016, 17039225, 12715591, 29692277, 1511292, 10047386, + 63266518, 26425272, 38731325, 10048126}}, + }, + }, + { + { + {{54486638, 27349611, 30718824, 2591312, 56491836, 12192839, + 18873298, 26257342, 34811107, 15221631}}, + {{40630742, 22450567, 11546243, 31701949, 9180879, 7656409, + 45764914, 2095754, 29769758, 6593415}}, + {{35114656, 30646970, 4176911, 3264766, 12538965, 32686321, 26312344, + 27435754, 30958053, 8292160}}, + }, + { + {{31429803, 19595316, 29173531, 15632448, 12174511, 30794338, + 32808830, 3977186, 26143136, 30405556}}, + {{22648882, 1402143, 44308880, 13746058, 7936347, 365344, 58440231, + 31879998, 63350620, 31249806}}, + {{51616947, 8012312, 64594134, 20851969, 43143017, 23300402, + 65496150, 32018862, 50444388, 8194477}}, + }, + { + {{27338066, 26047012, 59694639, 10140404, 48082437, 26964542, + 27277190, 8855376, 28572286, 3005164}}, + {{26287105, 4821776, 25476601, 29408529, 63344350, 17765447, + 49100281, 1182478, 41014043, 20474836}}, + {{59937691, 3178079, 23970071, 6201893, 49913287, 29065239, + 45232588, 19571804, 32208682, 32356184}}, + }, + { + {{50451143, 2817642, 56822502, 14811297, 6024667, 13349505, + 39793360, 23056589, 39436278, 22014573}}, + {{15941010, 24148500, 45741813, 8062054, 31876073, 33315803, + 51830470, 32110002, 15397330, 29424239}}, + {{8934485, 20068965, 43822466, 20131190, 34662773, 14047985, + 31170398, 32113411, 39603297, 15087183}}, + }, + { + {{48751602, 31397940, 24524912, 16876564, 15520426, 27193656, + 51606457, 11461895, 16788528, 27685490}}, + {{65161459, 16013772, 21750665, 3714552, 49707082, 17498998, + 63338576, 23231111, 31322513, 21938797}}, + {{21426636, 27904214, 53460576, 28206894, 38296674, 28633461, + 48833472, 18933017, 13040861, 21441484}}, + }, + { + {{11293895, 12478086, 39972463, 15083749, 37801443, 14748871, + 14555558, 20137329, 1613710, 4896935}}, + {{41213962, 15323293, 58619073, 25496531, 25967125, 20128972, + 2825959, 28657387, 43137087, 22287016}}, + {{51184079, 28324551, 49665331, 6410663, 3622847, 10243618, + 20615400, 12405433, 43355834, 25118015}}, + }, + { + {{60017550, 12556207, 46917512, 9025186, 50036385, 4333800, + 4378436, 2432030, 23097949, 32988414}}, + {{4565804, 17528778, 20084411, 25711615, 1724998, 189254, 24767264, + 10103221, 48596551, 2424777}}, + {{366633, 21577626, 8173089, 26664313, 30788633, 5745705, 59940186, + 1344108, 63466311, 12412658}}, + }, + { + {{43107073, 7690285, 14929416, 33386175, 34898028, 20141445, + 24162696, 18227928, 63967362, 11179384}}, + {{18289503, 18829478, 8056944, 16430056, 45379140, 7842513, + 61107423, 32067534, 48424218, 22110928}}, + {{476239, 6601091, 60956074, 23831056, 17503544, 28690532, 27672958, + 13403813, 11052904, 5219329}}, + }, + }, + { + { + {{20678527, 25178694, 34436965, 8849122, 62099106, 14574751, + 31186971, 29580702, 9014761, 24975376}}, + {{53464795, 23204192, 51146355, 5075807, 65594203, 22019831, + 34006363, 9160279, 8473550, 30297594}}, + {{24900749, 14435722, 17209120, 18261891, 44516588, 9878982, + 59419555, 17218610, 42540382, 11788947}}, + }, + { + {{63990690, 22159237, 53306774, 14797440, 9652448, 26708528, + 47071426, 10410732, 42540394, 32095740}}, + {{51449703, 16736705, 44641714, 10215877, 58011687, 7563910, + 11871841, 21049238, 48595538, 8464117}}, + {{43708233, 8348506, 52522913, 32692717, 63158658, 27181012, + 14325288, 8628612, 33313881, 25183915}}, + }, + { + {{46921872, 28586496, 22367355, 5271547, 66011747, 28765593, + 42303196, 23317577, 58168128, 27736162}}, + {{60160060, 31759219, 34483180, 17533252, 32635413, 26180187, + 15989196, 20716244, 28358191, 29300528}}, + {{43547083, 30755372, 34757181, 31892468, 57961144, 10429266, + 50471180, 4072015, 61757200, 5596588}}, + }, + { + {{38872266, 30164383, 12312895, 6213178, 3117142, 16078565, + 29266239, 2557221, 1768301, 15373193}}, + {{59865506, 30307471, 62515396, 26001078, 66980936, 32642186, 66017961, + 29049440, 42448372, 3442909}}, + {{36898293, 5124042, 14181784, 8197961, 18964734, 21615339, + 22597930, 7176455, 48523386, 13365929}}, + }, + { + {{59231455, 32054473, 8324672, 4690079, 6261860, 890446, 24538107, + 24984246, 57419264, 30522764}}, + {{25008885, 22782833, 62803832, 23916421, 16265035, 15721635, + 683793, 21730648, 15723478, 18390951}}, + {{57448220, 12374378, 40101865, 26528283, 59384749, 21239917, + 11879681, 5400171, 519526, 32318556}}, + }, + { + {{22258397, 17222199, 59239046, 14613015, 44588609, 30603508, + 46754982, 7315966, 16648397, 7605640}}, + {{59027556, 25089834, 58885552, 9719709, 19259459, 18206220, + 23994941, 28272877, 57640015, 4763277}}, + {{45409620, 9220968, 51378240, 1084136, 41632757, 30702041, + 31088446, 25789909, 55752334, 728111}}, + }, + { + {{26047201, 21802961, 60208540, 17032633, 24092067, 9158119, + 62835319, 20998873, 37743427, 28056159}}, + {{17510331, 33231575, 5854288, 8403524, 17133918, 30441820, 38997856, + 12327944, 10750447, 10014012}}, + {{56796096, 3936951, 9156313, 24656749, 16498691, 32559785, + 39627812, 32887699, 3424690, 7540221}}, + }, + { + {{30322361, 26590322, 11361004, 29411115, 7433303, 4989748, 60037442, + 17237212, 57864598, 15258045}}, + {{13054543, 30774935, 19155473, 469045, 54626067, 4566041, 5631406, + 2711395, 1062915, 28418087}}, + {{47868616, 22299832, 37599834, 26054466, 61273100, 13005410, + 61042375, 12194496, 32960380, 1459310}}, + }, + }, + { + { + {{19852015, 7027924, 23669353, 10020366, 8586503, 26896525, 394196, + 27452547, 18638002, 22379495}}, + {{31395515, 15098109, 26581030, 8030562, 50580950, 28547297, + 9012485, 25970078, 60465776, 28111795}}, + {{57916680, 31207054, 65111764, 4529533, 25766844, 607986, 67095642, + 9677542, 34813975, 27098423}}, + }, + { + {{64664349, 33404494, 29348901, 8186665, 1873760, 12489863, 36174285, + 25714739, 59256019, 25416002}}, + {{51872508, 18120922, 7766469, 746860, 26346930, 23332670, + 39775412, 10754587, 57677388, 5203575}}, + {{31834314, 14135496, 66338857, 5159117, 20917671, 16786336, + 59640890, 26216907, 31809242, 7347066}}, + }, + { + {{57502122, 21680191, 20414458, 13033986, 13716524, 21862551, + 19797969, 21343177, 15192875, 31466942}}, + {{54445282, 31372712, 1168161, 29749623, 26747876, 19416341, + 10609329, 12694420, 33473243, 20172328}}, + {{33184999, 11180355, 15832085, 22169002, 65475192, 225883, + 15089336, 22530529, 60973201, 14480052}}, + }, + { + {{31308717, 27934434, 31030839, 31657333, 15674546, 26971549, + 5496207, 13685227, 27595050, 8737275}}, + {{46790012, 18404192, 10933842, 17376410, 8335351, 26008410, + 36100512, 20943827, 26498113, 66511}}, + {{22644435, 24792703, 50437087, 4884561, 64003250, 19995065, + 30540765, 29267685, 53781076, 26039336}}, + }, + { + {{39091017, 9834844, 18617207, 30873120, 63706907, 20246925, + 8205539, 13585437, 49981399, 15115438}}, + {{23711543, 32881517, 31206560, 25191721, 6164646, 23844445, + 33572981, 32128335, 8236920, 16492939}}, + {{43198286, 20038905, 40809380, 29050590, 25005589, 25867162, + 19574901, 10071562, 6708380, 27332008}}, + }, + { + {{2101372, 28624378, 19702730, 2367575, 51681697, 1047674, 5301017, + 9328700, 29955601, 21876122}}, + {{3096359, 9271816, 45488000, 18032587, 52260867, 25961494, + 41216721, 20918836, 57191288, 6216607}}, + {{34493015, 338662, 41913253, 2510421, 37895298, 19734218, + 24822829, 27407865, 40341383, 7525078}}, + }, + { + {{44042215, 19568808, 16133486, 25658254, 63719298, 778787, + 66198528, 30771936, 47722230, 11994100}}, + {{21691500, 19929806, 66467532, 19187410, 3285880, 30070836, + 42044197, 9718257, 59631427, 13381417}}, + {{18445390, 29352196, 14979845, 11622458, 65381754, 29971451, + 23111647, 27179185, 28535281, 15779576}}, + }, + { + {{30098034, 3089662, 57874477, 16662134, 45801924, 11308410, + 53040410, 12021729, 9955285, 17251076}}, + {{9734894, 18977602, 59635230, 24415696, 2060391, 11313496, + 48682835, 9924398, 20194861, 13380996}}, + {{40730762, 25589224, 44941042, 15789296, 49053522, 27385639, + 65123949, 15707770, 26342023, 10146099}}, + }, + }, + { + { + {{41091971, 33334488, 21339190, 33513044, 19745255, 30675732, + 37471583, 2227039, 21612326, 33008704}}, + {{54031477, 1184227, 23562814, 27583990, 46757619, 27205717, + 25764460, 12243797, 46252298, 11649657}}, + {{57077370, 11262625, 27384172, 2271902, 26947504, 17556661, 39943, + 6114064, 33514190, 2333242}}, + }, + { + {{45675257, 21132610, 8119781, 7219913, 45278342, 24538297, + 60429113, 20883793, 24350577, 20104431}}, + {{62992557, 22282898, 43222677, 4843614, 37020525, 690622, + 35572776, 23147595, 8317859, 12352766}}, + {{18200138, 19078521, 34021104, 30857812, 43406342, 24451920, + 43556767, 31266881, 20712162, 6719373}}, + }, + { + {{26656189, 6075253, 59250308, 1886071, 38764821, 4262325, 11117530, + 29791222, 26224234, 30256974}}, + {{49939907, 18700334, 63713187, 17184554, 47154818, 14050419, + 21728352, 9493610, 18620611, 17125804}}, + {{53785524, 13325348, 11432106, 5964811, 18609221, 6062965, + 61839393, 23828875, 36407290, 17074774}}, + }, + { + {{43248326, 22321272, 26961356, 1640861, 34695752, 16816491, + 12248508, 28313793, 13735341, 1934062}}, + {{25089769, 6742589, 17081145, 20148166, 21909292, 17486451, + 51972569, 29789085, 45830866, 5473615}}, + {{31883658, 25593331, 1083431, 21982029, 22828470, 13290673, + 59983779, 12469655, 29111212, 28103418}}, + }, + { + {{24244947, 18504025, 40845887, 2791539, 52111265, 16666677, + 24367466, 6388839, 56813277, 452382}}, + {{41468082, 30136590, 5217915, 16224624, 19987036, 29472163, + 42872612, 27639183, 15766061, 8407814}}, + {{46701865, 13990230, 15495425, 16395525, 5377168, 15166495, + 58191841, 29165478, 59040954, 2276717}}, + }, + { + {{30157899, 12924066, 49396814, 9245752, 19895028, 3368142, + 43281277, 5096218, 22740376, 26251015}}, + {{2041139, 19298082, 7783686, 13876377, 41161879, 20201972, + 24051123, 13742383, 51471265, 13295221}}, + {{33338218, 25048699, 12532112, 7977527, 9106186, 31839181, + 49388668, 28941459, 62657506, 18884987}}, + }, + { + {{47063583, 5454096, 52762316, 6447145, 28862071, 1883651, + 64639598, 29412551, 7770568, 9620597}}, + {{23208049, 7979712, 33071466, 8149229, 1758231, 22719437, 30945527, + 31860109, 33606523, 18786461}}, + {{1439939, 17283952, 66028874, 32760649, 4625401, 10647766, 62065063, + 1220117, 30494170, 22113633}}, + }, + { + {{62071265, 20526136, 64138304, 30492664, 15640973, 26852766, + 40369837, 926049, 65424525, 20220784}}, + {{13908495, 30005160, 30919927, 27280607, 45587000, 7989038, + 9021034, 9078865, 3353509, 4033511}}, + {{37445433, 18440821, 32259990, 33209950, 24295848, 20642309, + 23161162, 8839127, 27485041, 7356032}}, + }, + }, + { + { + {{9661008, 705443, 11980065, 28184278, 65480320, 14661172, 60762722, + 2625014, 28431036, 16782598}}, + {{43269631, 25243016, 41163352, 7480957, 49427195, 25200248, + 44562891, 14150564, 15970762, 4099461}}, + {{29262576, 16756590, 26350592, 24760869, 8529670, 22346382, + 13617292, 23617289, 11465738, 8317062}}, + }, + { + {{41615764, 26591503, 32500199, 24135381, 44070139, 31252209, + 14898636, 3848455, 20969334, 28396916}}, + {{46724414, 19206718, 48772458, 13884721, 34069410, 2842113, + 45498038, 29904543, 11177094, 14989547}}, + {{42612143, 21838415, 16959895, 2278463, 12066309, 10137771, + 13515641, 2581286, 38621356, 9930239}}, + }, + { + {{49357223, 31456605, 16544299, 20545132, 51194056, 18605350, + 18345766, 20150679, 16291480, 28240394}}, + {{33879670, 2553287, 32678213, 9875984, 8534129, 6889387, 57432090, + 6957616, 4368891, 9788741}}, + {{16660737, 7281060, 56278106, 12911819, 20108584, 25452756, + 45386327, 24941283, 16250551, 22443329}}, + }, + { + {{47343357, 2390525, 50557833, 14161979, 1905286, 6414907, 4689584, + 10604807, 36918461, 4782746}}, + {{65754325, 14736940, 59741422, 20261545, 7710541, 19398842, + 57127292, 4383044, 22546403, 437323}}, + {{31665558, 21373968, 50922033, 1491338, 48740239, 3294681, + 27343084, 2786261, 36475274, 19457415}}, + }, + { + {{52641566, 32870716, 33734756, 7448551, 19294360, 14334329, + 47418233, 2355318, 47824193, 27440058}}, + {{15121312, 17758270, 6377019, 27523071, 56310752, 20596586, + 18952176, 15496498, 37728731, 11754227}}, + {{64471568, 20071356, 8488726, 19250536, 12728760, 31931939, + 7141595, 11724556, 22761615, 23420291}}, + }, + { + {{16918416, 11729663, 49025285, 3022986, 36093132, 20214772, + 38367678, 21327038, 32851221, 11717399}}, + {{11166615, 7338049, 60386341, 4531519, 37640192, 26252376, + 31474878, 3483633, 65915689, 29523600}}, + {{66923210, 9921304, 31456609, 20017994, 55095045, 13348922, + 33142652, 6546660, 47123585, 29606055}}, + }, + { + {{34648249, 11266711, 55911757, 25655328, 31703693, 3855903, + 58571733, 20721383, 36336829, 18068118}}, + {{49102387, 12709067, 3991746, 27075244, 45617340, 23004006, + 35973516, 17504552, 10928916, 3011958}}, + {{60151107, 17960094, 31696058, 334240, 29576716, 14796075, + 36277808, 20749251, 18008030, 10258577}}, + }, + { + {{44660220, 15655568, 7018479, 29144429, 36794597, 32352840, + 65255398, 1367119, 25127874, 6671743}}, + {{29701166, 19180498, 56230743, 9279287, 67091296, 13127209, + 21382910, 11042292, 25838796, 4642684}}, + {{46678630, 14955536, 42982517, 8124618, 61739576, 27563961, + 30468146, 19653792, 18423288, 4177476}}, + }, + }, +}; + +static uint8_t negative(signed char b) { + uint32_t x = b; + x >>= 31; // 1: yes; 0: no + return x; +} + +static void table_select(ge_precomp *t, int pos, signed char b) { + ge_precomp minust; + uint8_t bnegative = negative(b); + uint8_t babs = b - ((uint8_t)((-bnegative) & b) << 1); + + ge_precomp_0(t); + cmov(t, &k25519Precomp[pos][0], equal(babs, 1)); + cmov(t, &k25519Precomp[pos][1], equal(babs, 2)); + cmov(t, &k25519Precomp[pos][2], equal(babs, 3)); + cmov(t, &k25519Precomp[pos][3], equal(babs, 4)); + cmov(t, &k25519Precomp[pos][4], equal(babs, 5)); + cmov(t, &k25519Precomp[pos][5], equal(babs, 6)); + cmov(t, &k25519Precomp[pos][6], equal(babs, 7)); + cmov(t, &k25519Precomp[pos][7], equal(babs, 8)); + fe_copy_ll(&minust.yplusx, &t->yminusx); + fe_copy_ll(&minust.yminusx, &t->yplusx); + + // NOTE: the input table is canonical, but types don't encode it + fe tmp; + fe_carry(&tmp, &t->xy2d); + fe_neg(&minust.xy2d, &tmp); + + cmov(t, &minust, bnegative); +} + +// h = a * B +// where a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) { + signed char e[64]; + signed char carry; + ge_p1p1 r; + ge_p2 s; + ge_precomp t; + int i; + + for (i = 0; i < 32; ++i) { + e[2 * i + 0] = (a[i] >> 0) & 15; + e[2 * i + 1] = (a[i] >> 4) & 15; + } + // each e[i] is between 0 and 15 + // e[63] is between 0 and 7 + + carry = 0; + for (i = 0; i < 63; ++i) { + e[i] += carry; + carry = e[i] + 8; + carry >>= 4; + e[i] -= carry << 4; + } + e[63] += carry; + // each e[i] is between -8 and 8 + + ge_p3_0(h); + for (i = 1; i < 64; i += 2) { + table_select(&t, i / 2, e[i]); + ge_madd(&r, h, &t); + x25519_ge_p1p1_to_p3(h, &r); + } + + ge_p3_dbl(&r, h); + x25519_ge_p1p1_to_p2(&s, &r); + ge_p2_dbl(&r, &s); + x25519_ge_p1p1_to_p2(&s, &r); + ge_p2_dbl(&r, &s); + x25519_ge_p1p1_to_p2(&s, &r); + ge_p2_dbl(&r, &s); + x25519_ge_p1p1_to_p3(h, &r); + + for (i = 0; i < 64; i += 2) { + table_select(&t, i / 2, e[i]); + ge_madd(&r, h, &t); + x25519_ge_p1p1_to_p3(h, &r); + } +} + +#endif + +static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) { + fe_cmov(&t->YplusX, &u->YplusX, b); + fe_cmov(&t->YminusX, &u->YminusX, b); + fe_cmov(&t->Z, &u->Z, b); + fe_cmov(&t->T2d, &u->T2d, b); +} + +// r = scalar * A. +// where a = a[0]+256*a[1]+...+256^31 a[31]. +void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) { + ge_p2 Ai_p2[8]; + ge_cached Ai[16]; + ge_p1p1 t; + + ge_cached_0(&Ai[0]); + x25519_ge_p3_to_cached(&Ai[1], A); + ge_p3_to_p2(&Ai_p2[1], A); + + unsigned i; + for (i = 2; i < 16; i += 2) { + ge_p2_dbl(&t, &Ai_p2[i / 2]); + ge_p1p1_to_cached(&Ai[i], &t); + if (i < 8) { + x25519_ge_p1p1_to_p2(&Ai_p2[i], &t); + } + x25519_ge_add(&t, A, &Ai[i]); + ge_p1p1_to_cached(&Ai[i + 1], &t); + if (i < 7) { + x25519_ge_p1p1_to_p2(&Ai_p2[i + 1], &t); + } + } + + ge_p2_0(r); + ge_p3 u; + + for (i = 0; i < 256; i += 4) { + ge_p2_dbl(&t, r); + x25519_ge_p1p1_to_p2(r, &t); + ge_p2_dbl(&t, r); + x25519_ge_p1p1_to_p2(r, &t); + ge_p2_dbl(&t, r); + x25519_ge_p1p1_to_p2(r, &t); + ge_p2_dbl(&t, r); + x25519_ge_p1p1_to_p3(&u, &t); + + uint8_t index = scalar[31 - i/8]; + index >>= 4 - (i & 4); + index &= 0xf; + + unsigned j; + ge_cached selected; + ge_cached_0(&selected); + for (j = 0; j < 16; j++) { + cmov_cached(&selected, &Ai[j], equal(j, index)); + } + + x25519_ge_add(&t, &u, &selected); + x25519_ge_p1p1_to_p2(r, &t); + } +} + +static void slide(signed char *r, const uint8_t *a) { + int i; + int b; + int k; + + for (i = 0; i < 256; ++i) { + r[i] = 1 & (a[i >> 3] >> (i & 7)); + } + + for (i = 0; i < 256; ++i) { + if (r[i]) { + for (b = 1; b <= 6 && i + b < 256; ++b) { + if (r[i + b]) { + if (r[i] + (r[i + b] << b) <= 15) { + r[i] += r[i + b] << b; + r[i + b] = 0; + } else if (r[i] - (r[i + b] << b) >= -15) { + r[i] -= r[i + b] << b; + for (k = i + b; k < 256; ++k) { + if (!r[k]) { + r[k] = 1; + break; + } + r[k] = 0; + } + } else { + break; + } + } + } + } + } +} + +static const ge_precomp Bi[8] = { + { + {{25967493, 19198397, 29566455, 3660896, 54414519, 4014786, 27544626, + 21800161, 61029707, 2047604}}, + {{54563134, 934261, 64385954, 3049989, 66381436, 9406985, 12720692, + 5043384, 19500929, 18085054}}, + {{58370664, 4489569, 9688441, 18769238, 10184608, 21191052, 29287918, + 11864899, 42594502, 29115885}}, + }, + { + {{15636272, 23865875, 24204772, 25642034, 616976, 16869170, 27787599, + 18782243, 28944399, 32004408}}, + {{16568933, 4717097, 55552716, 32452109, 15682895, 21747389, 16354576, + 21778470, 7689661, 11199574}}, + {{30464137, 27578307, 55329429, 17883566, 23220364, 15915852, 7512774, + 10017326, 49359771, 23634074}}, + }, + { + {{10861363, 11473154, 27284546, 1981175, 37044515, 12577860, 32867885, + 14515107, 51670560, 10819379}}, + {{4708026, 6336745, 20377586, 9066809, 55836755, 6594695, 41455196, + 12483687, 54440373, 5581305}}, + {{19563141, 16186464, 37722007, 4097518, 10237984, 29206317, 28542349, + 13850243, 43430843, 17738489}}, + }, + { + {{5153727, 9909285, 1723747, 30776558, 30523604, 5516873, 19480852, + 5230134, 43156425, 18378665}}, + {{36839857, 30090922, 7665485, 10083793, 28475525, 1649722, 20654025, + 16520125, 30598449, 7715701}}, + {{28881826, 14381568, 9657904, 3680757, 46927229, 7843315, 35708204, + 1370707, 29794553, 32145132}}, + }, + { + {{44589871, 26862249, 14201701, 24808930, 43598457, 8844725, 18474211, + 32192982, 54046167, 13821876}}, + {{60653668, 25714560, 3374701, 28813570, 40010246, 22982724, 31655027, + 26342105, 18853321, 19333481}}, + {{4566811, 20590564, 38133974, 21313742, 59506191, 30723862, 58594505, + 23123294, 2207752, 30344648}}, + }, + { + {{41954014, 29368610, 29681143, 7868801, 60254203, 24130566, 54671499, + 32891431, 35997400, 17421995}}, + {{25576264, 30851218, 7349803, 21739588, 16472781, 9300885, 3844789, + 15725684, 171356, 6466918}}, + {{23103977, 13316479, 9739013, 17404951, 817874, 18515490, 8965338, + 19466374, 36393951, 16193876}}, + }, + { + {{33587053, 3180712, 64714734, 14003686, 50205390, 17283591, 17238397, + 4729455, 49034351, 9256799}}, + {{41926547, 29380300, 32336397, 5036987, 45872047, 11360616, 22616405, + 9761698, 47281666, 630304}}, + {{53388152, 2639452, 42871404, 26147950, 9494426, 27780403, 60554312, + 17593437, 64659607, 19263131}}, + }, + { + {{63957664, 28508356, 9282713, 6866145, 35201802, 32691408, 48168288, + 15033783, 25105118, 25659556}}, + {{42782475, 15950225, 35307649, 18961608, 55446126, 28463506, + 1573891, 30928545, 2198789, 17749813}}, + {{64009494, 10324966, 64867251, 7453182, 61661885, 30818928, 53296841, + 17317989, 34647629, 21263748}}, + }, +}; + +// r = a * A + b * B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +static void ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a, + const ge_p3 *A, const uint8_t *b) { + signed char aslide[256]; + signed char bslide[256]; + ge_cached Ai[8]; // A,3A,5A,7A,9A,11A,13A,15A + ge_p1p1 t; + ge_p3 u; + ge_p3 A2; + int i; + + slide(aslide, a); + slide(bslide, b); + + x25519_ge_p3_to_cached(&Ai[0], A); + ge_p3_dbl(&t, A); + x25519_ge_p1p1_to_p3(&A2, &t); + x25519_ge_add(&t, &A2, &Ai[0]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[1], &u); + x25519_ge_add(&t, &A2, &Ai[1]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[2], &u); + x25519_ge_add(&t, &A2, &Ai[2]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[3], &u); + x25519_ge_add(&t, &A2, &Ai[3]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[4], &u); + x25519_ge_add(&t, &A2, &Ai[4]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[5], &u); + x25519_ge_add(&t, &A2, &Ai[5]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[6], &u); + x25519_ge_add(&t, &A2, &Ai[6]); + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_p3_to_cached(&Ai[7], &u); + + ge_p2_0(r); + + for (i = 255; i >= 0; --i) { + if (aslide[i] || bslide[i]) { + break; + } + } + + for (; i >= 0; --i) { + ge_p2_dbl(&t, r); + + if (aslide[i] > 0) { + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_add(&t, &u, &Ai[aslide[i] / 2]); + } else if (aslide[i] < 0) { + x25519_ge_p1p1_to_p3(&u, &t); + x25519_ge_sub(&t, &u, &Ai[(-aslide[i]) / 2]); + } + + if (bslide[i] > 0) { + x25519_ge_p1p1_to_p3(&u, &t); + ge_madd(&t, &u, &Bi[bslide[i] / 2]); + } else if (bslide[i] < 0) { + x25519_ge_p1p1_to_p3(&u, &t); + ge_msub(&t, &u, &Bi[(-bslide[i]) / 2]); + } + + x25519_ge_p1p1_to_p2(r, &t); + } +} + +// The set of scalars is \Z/l +// where l = 2^252 + 27742317777372353535851937790883648493. + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +// Overwrites s in place. +void x25519_sc_reduce(uint8_t *s) { + int64_t s0 = 2097151 & load_3(s); + int64_t s1 = 2097151 & (load_4(s + 2) >> 5); + int64_t s2 = 2097151 & (load_3(s + 5) >> 2); + int64_t s3 = 2097151 & (load_4(s + 7) >> 7); + int64_t s4 = 2097151 & (load_4(s + 10) >> 4); + int64_t s5 = 2097151 & (load_3(s + 13) >> 1); + int64_t s6 = 2097151 & (load_4(s + 15) >> 6); + int64_t s7 = 2097151 & (load_3(s + 18) >> 3); + int64_t s8 = 2097151 & load_3(s + 21); + int64_t s9 = 2097151 & (load_4(s + 23) >> 5); + int64_t s10 = 2097151 & (load_3(s + 26) >> 2); + int64_t s11 = 2097151 & (load_4(s + 28) >> 7); + int64_t s12 = 2097151 & (load_4(s + 31) >> 4); + int64_t s13 = 2097151 & (load_3(s + 34) >> 1); + int64_t s14 = 2097151 & (load_4(s + 36) >> 6); + int64_t s15 = 2097151 & (load_3(s + 39) >> 3); + int64_t s16 = 2097151 & load_3(s + 42); + int64_t s17 = 2097151 & (load_4(s + 44) >> 5); + int64_t s18 = 2097151 & (load_3(s + 47) >> 2); + int64_t s19 = 2097151 & (load_4(s + 49) >> 7); + int64_t s20 = 2097151 & (load_4(s + 52) >> 4); + int64_t s21 = 2097151 & (load_3(s + 55) >> 1); + int64_t s22 = 2097151 & (load_4(s + 57) >> 6); + int64_t s23 = (load_4(s + 60) >> 3); + int64_t carry0; + int64_t carry1; + int64_t carry2; + int64_t carry3; + int64_t carry4; + int64_t carry5; + int64_t carry6; + int64_t carry7; + int64_t carry8; + int64_t carry9; + int64_t carry10; + int64_t carry11; + int64_t carry12; + int64_t carry13; + int64_t carry14; + int64_t carry15; + int64_t carry16; + + s11 += s23 * 666643; + s12 += s23 * 470296; + s13 += s23 * 654183; + s14 -= s23 * 997805; + s15 += s23 * 136657; + s16 -= s23 * 683901; + s23 = 0; + + s10 += s22 * 666643; + s11 += s22 * 470296; + s12 += s22 * 654183; + s13 -= s22 * 997805; + s14 += s22 * 136657; + s15 -= s22 * 683901; + s22 = 0; + + s9 += s21 * 666643; + s10 += s21 * 470296; + s11 += s21 * 654183; + s12 -= s21 * 997805; + s13 += s21 * 136657; + s14 -= s21 * 683901; + s21 = 0; + + s8 += s20 * 666643; + s9 += s20 * 470296; + s10 += s20 * 654183; + s11 -= s20 * 997805; + s12 += s20 * 136657; + s13 -= s20 * 683901; + s20 = 0; + + s7 += s19 * 666643; + s8 += s19 * 470296; + s9 += s19 * 654183; + s10 -= s19 * 997805; + s11 += s19 * 136657; + s12 -= s19 * 683901; + s19 = 0; + + s6 += s18 * 666643; + s7 += s18 * 470296; + s8 += s18 * 654183; + s9 -= s18 * 997805; + s10 += s18 * 136657; + s11 -= s18 * 683901; + s18 = 0; + + carry6 = (s6 + (1 << 20)) >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry8 = (s8 + (1 << 20)) >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry10 = (s10 + (1 << 20)) >> 21; + s11 += carry10; + s10 -= carry10 << 21; + carry12 = (s12 + (1 << 20)) >> 21; + s13 += carry12; + s12 -= carry12 << 21; + carry14 = (s14 + (1 << 20)) >> 21; + s15 += carry14; + s14 -= carry14 << 21; + carry16 = (s16 + (1 << 20)) >> 21; + s17 += carry16; + s16 -= carry16 << 21; + + carry7 = (s7 + (1 << 20)) >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry9 = (s9 + (1 << 20)) >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry11 = (s11 + (1 << 20)) >> 21; + s12 += carry11; + s11 -= carry11 << 21; + carry13 = (s13 + (1 << 20)) >> 21; + s14 += carry13; + s13 -= carry13 << 21; + carry15 = (s15 + (1 << 20)) >> 21; + s16 += carry15; + s15 -= carry15 << 21; + + s5 += s17 * 666643; + s6 += s17 * 470296; + s7 += s17 * 654183; + s8 -= s17 * 997805; + s9 += s17 * 136657; + s10 -= s17 * 683901; + s17 = 0; + + s4 += s16 * 666643; + s5 += s16 * 470296; + s6 += s16 * 654183; + s7 -= s16 * 997805; + s8 += s16 * 136657; + s9 -= s16 * 683901; + s16 = 0; + + s3 += s15 * 666643; + s4 += s15 * 470296; + s5 += s15 * 654183; + s6 -= s15 * 997805; + s7 += s15 * 136657; + s8 -= s15 * 683901; + s15 = 0; + + s2 += s14 * 666643; + s3 += s14 * 470296; + s4 += s14 * 654183; + s5 -= s14 * 997805; + s6 += s14 * 136657; + s7 -= s14 * 683901; + s14 = 0; + + s1 += s13 * 666643; + s2 += s13 * 470296; + s3 += s13 * 654183; + s4 -= s13 * 997805; + s5 += s13 * 136657; + s6 -= s13 * 683901; + s13 = 0; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = (s0 + (1 << 20)) >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry2 = (s2 + (1 << 20)) >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry4 = (s4 + (1 << 20)) >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry6 = (s6 + (1 << 20)) >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry8 = (s8 + (1 << 20)) >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry10 = (s10 + (1 << 20)) >> 21; + s11 += carry10; + s10 -= carry10 << 21; + + carry1 = (s1 + (1 << 20)) >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry3 = (s3 + (1 << 20)) >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry5 = (s5 + (1 << 20)) >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry7 = (s7 + (1 << 20)) >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry9 = (s9 + (1 << 20)) >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry11 = (s11 + (1 << 20)) >> 21; + s12 += carry11; + s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry1 = s1 >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry2 = s2 >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry3 = s3 >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry4 = s4 >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry5 = s5 >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry6 = s6 >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry7 = s7 >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry8 = s8 >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry9 = s9 >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry10 = s10 >> 21; + s11 += carry10; + s10 -= carry10 << 21; + carry11 = s11 >> 21; + s12 += carry11; + s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry1 = s1 >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry2 = s2 >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry3 = s3 >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry4 = s4 >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry5 = s5 >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry6 = s6 >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry7 = s7 >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry8 = s8 >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry9 = s9 >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry10 = s10 >> 21; + s11 += carry10; + s10 -= carry10 << 21; + + s[0] = s0 >> 0; + s[1] = s0 >> 8; + s[2] = (s0 >> 16) | (s1 << 5); + s[3] = s1 >> 3; + s[4] = s1 >> 11; + s[5] = (s1 >> 19) | (s2 << 2); + s[6] = s2 >> 6; + s[7] = (s2 >> 14) | (s3 << 7); + s[8] = s3 >> 1; + s[9] = s3 >> 9; + s[10] = (s3 >> 17) | (s4 << 4); + s[11] = s4 >> 4; + s[12] = s4 >> 12; + s[13] = (s4 >> 20) | (s5 << 1); + s[14] = s5 >> 7; + s[15] = (s5 >> 15) | (s6 << 6); + s[16] = s6 >> 2; + s[17] = s6 >> 10; + s[18] = (s6 >> 18) | (s7 << 3); + s[19] = s7 >> 5; + s[20] = s7 >> 13; + s[21] = s8 >> 0; + s[22] = s8 >> 8; + s[23] = (s8 >> 16) | (s9 << 5); + s[24] = s9 >> 3; + s[25] = s9 >> 11; + s[26] = (s9 >> 19) | (s10 << 2); + s[27] = s10 >> 6; + s[28] = (s10 >> 14) | (s11 << 7); + s[29] = s11 >> 1; + s[30] = s11 >> 9; + s[31] = s11 >> 17; +} + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +static void sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b, + const uint8_t *c) { + int64_t a0 = 2097151 & load_3(a); + int64_t a1 = 2097151 & (load_4(a + 2) >> 5); + int64_t a2 = 2097151 & (load_3(a + 5) >> 2); + int64_t a3 = 2097151 & (load_4(a + 7) >> 7); + int64_t a4 = 2097151 & (load_4(a + 10) >> 4); + int64_t a5 = 2097151 & (load_3(a + 13) >> 1); + int64_t a6 = 2097151 & (load_4(a + 15) >> 6); + int64_t a7 = 2097151 & (load_3(a + 18) >> 3); + int64_t a8 = 2097151 & load_3(a + 21); + int64_t a9 = 2097151 & (load_4(a + 23) >> 5); + int64_t a10 = 2097151 & (load_3(a + 26) >> 2); + int64_t a11 = (load_4(a + 28) >> 7); + int64_t b0 = 2097151 & load_3(b); + int64_t b1 = 2097151 & (load_4(b + 2) >> 5); + int64_t b2 = 2097151 & (load_3(b + 5) >> 2); + int64_t b3 = 2097151 & (load_4(b + 7) >> 7); + int64_t b4 = 2097151 & (load_4(b + 10) >> 4); + int64_t b5 = 2097151 & (load_3(b + 13) >> 1); + int64_t b6 = 2097151 & (load_4(b + 15) >> 6); + int64_t b7 = 2097151 & (load_3(b + 18) >> 3); + int64_t b8 = 2097151 & load_3(b + 21); + int64_t b9 = 2097151 & (load_4(b + 23) >> 5); + int64_t b10 = 2097151 & (load_3(b + 26) >> 2); + int64_t b11 = (load_4(b + 28) >> 7); + int64_t c0 = 2097151 & load_3(c); + int64_t c1 = 2097151 & (load_4(c + 2) >> 5); + int64_t c2 = 2097151 & (load_3(c + 5) >> 2); + int64_t c3 = 2097151 & (load_4(c + 7) >> 7); + int64_t c4 = 2097151 & (load_4(c + 10) >> 4); + int64_t c5 = 2097151 & (load_3(c + 13) >> 1); + int64_t c6 = 2097151 & (load_4(c + 15) >> 6); + int64_t c7 = 2097151 & (load_3(c + 18) >> 3); + int64_t c8 = 2097151 & load_3(c + 21); + int64_t c9 = 2097151 & (load_4(c + 23) >> 5); + int64_t c10 = 2097151 & (load_3(c + 26) >> 2); + int64_t c11 = (load_4(c + 28) >> 7); + int64_t s0; + int64_t s1; + int64_t s2; + int64_t s3; + int64_t s4; + int64_t s5; + int64_t s6; + int64_t s7; + int64_t s8; + int64_t s9; + int64_t s10; + int64_t s11; + int64_t s12; + int64_t s13; + int64_t s14; + int64_t s15; + int64_t s16; + int64_t s17; + int64_t s18; + int64_t s19; + int64_t s20; + int64_t s21; + int64_t s22; + int64_t s23; + int64_t carry0; + int64_t carry1; + int64_t carry2; + int64_t carry3; + int64_t carry4; + int64_t carry5; + int64_t carry6; + int64_t carry7; + int64_t carry8; + int64_t carry9; + int64_t carry10; + int64_t carry11; + int64_t carry12; + int64_t carry13; + int64_t carry14; + int64_t carry15; + int64_t carry16; + int64_t carry17; + int64_t carry18; + int64_t carry19; + int64_t carry20; + int64_t carry21; + int64_t carry22; + + s0 = c0 + a0 * b0; + s1 = c1 + a0 * b1 + a1 * b0; + s2 = c2 + a0 * b2 + a1 * b1 + a2 * b0; + s3 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0; + s4 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0; + s5 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0; + s6 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0; + s7 = c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 + + a6 * b1 + a7 * b0; + s8 = c8 + a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 + + a6 * b2 + a7 * b1 + a8 * b0; + s9 = c9 + a0 * b9 + a1 * b8 + a2 * b7 + a3 * b6 + a4 * b5 + a5 * b4 + + a6 * b3 + a7 * b2 + a8 * b1 + a9 * b0; + s10 = c10 + a0 * b10 + a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 + + a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1 + a10 * b0; + s11 = c11 + a0 * b11 + a1 * b10 + a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 + + a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2 + a10 * b1 + a11 * b0; + s12 = a1 * b11 + a2 * b10 + a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 + + a8 * b4 + a9 * b3 + a10 * b2 + a11 * b1; + s13 = a2 * b11 + a3 * b10 + a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 + + a9 * b4 + a10 * b3 + a11 * b2; + s14 = a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 + + a10 * b4 + a11 * b3; + s15 = a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 + + a11 * b4; + s16 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5; + s17 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6; + s18 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7; + s19 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8; + s20 = a9 * b11 + a10 * b10 + a11 * b9; + s21 = a10 * b11 + a11 * b10; + s22 = a11 * b11; + s23 = 0; + + carry0 = (s0 + (1 << 20)) >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry2 = (s2 + (1 << 20)) >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry4 = (s4 + (1 << 20)) >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry6 = (s6 + (1 << 20)) >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry8 = (s8 + (1 << 20)) >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry10 = (s10 + (1 << 20)) >> 21; + s11 += carry10; + s10 -= carry10 << 21; + carry12 = (s12 + (1 << 20)) >> 21; + s13 += carry12; + s12 -= carry12 << 21; + carry14 = (s14 + (1 << 20)) >> 21; + s15 += carry14; + s14 -= carry14 << 21; + carry16 = (s16 + (1 << 20)) >> 21; + s17 += carry16; + s16 -= carry16 << 21; + carry18 = (s18 + (1 << 20)) >> 21; + s19 += carry18; + s18 -= carry18 << 21; + carry20 = (s20 + (1 << 20)) >> 21; + s21 += carry20; + s20 -= carry20 << 21; + carry22 = (s22 + (1 << 20)) >> 21; + s23 += carry22; + s22 -= carry22 << 21; + + carry1 = (s1 + (1 << 20)) >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry3 = (s3 + (1 << 20)) >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry5 = (s5 + (1 << 20)) >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry7 = (s7 + (1 << 20)) >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry9 = (s9 + (1 << 20)) >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry11 = (s11 + (1 << 20)) >> 21; + s12 += carry11; + s11 -= carry11 << 21; + carry13 = (s13 + (1 << 20)) >> 21; + s14 += carry13; + s13 -= carry13 << 21; + carry15 = (s15 + (1 << 20)) >> 21; + s16 += carry15; + s15 -= carry15 << 21; + carry17 = (s17 + (1 << 20)) >> 21; + s18 += carry17; + s17 -= carry17 << 21; + carry19 = (s19 + (1 << 20)) >> 21; + s20 += carry19; + s19 -= carry19 << 21; + carry21 = (s21 + (1 << 20)) >> 21; + s22 += carry21; + s21 -= carry21 << 21; + + s11 += s23 * 666643; + s12 += s23 * 470296; + s13 += s23 * 654183; + s14 -= s23 * 997805; + s15 += s23 * 136657; + s16 -= s23 * 683901; + s23 = 0; + + s10 += s22 * 666643; + s11 += s22 * 470296; + s12 += s22 * 654183; + s13 -= s22 * 997805; + s14 += s22 * 136657; + s15 -= s22 * 683901; + s22 = 0; + + s9 += s21 * 666643; + s10 += s21 * 470296; + s11 += s21 * 654183; + s12 -= s21 * 997805; + s13 += s21 * 136657; + s14 -= s21 * 683901; + s21 = 0; + + s8 += s20 * 666643; + s9 += s20 * 470296; + s10 += s20 * 654183; + s11 -= s20 * 997805; + s12 += s20 * 136657; + s13 -= s20 * 683901; + s20 = 0; + + s7 += s19 * 666643; + s8 += s19 * 470296; + s9 += s19 * 654183; + s10 -= s19 * 997805; + s11 += s19 * 136657; + s12 -= s19 * 683901; + s19 = 0; + + s6 += s18 * 666643; + s7 += s18 * 470296; + s8 += s18 * 654183; + s9 -= s18 * 997805; + s10 += s18 * 136657; + s11 -= s18 * 683901; + s18 = 0; + + carry6 = (s6 + (1 << 20)) >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry8 = (s8 + (1 << 20)) >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry10 = (s10 + (1 << 20)) >> 21; + s11 += carry10; + s10 -= carry10 << 21; + carry12 = (s12 + (1 << 20)) >> 21; + s13 += carry12; + s12 -= carry12 << 21; + carry14 = (s14 + (1 << 20)) >> 21; + s15 += carry14; + s14 -= carry14 << 21; + carry16 = (s16 + (1 << 20)) >> 21; + s17 += carry16; + s16 -= carry16 << 21; + + carry7 = (s7 + (1 << 20)) >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry9 = (s9 + (1 << 20)) >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry11 = (s11 + (1 << 20)) >> 21; + s12 += carry11; + s11 -= carry11 << 21; + carry13 = (s13 + (1 << 20)) >> 21; + s14 += carry13; + s13 -= carry13 << 21; + carry15 = (s15 + (1 << 20)) >> 21; + s16 += carry15; + s15 -= carry15 << 21; + + s5 += s17 * 666643; + s6 += s17 * 470296; + s7 += s17 * 654183; + s8 -= s17 * 997805; + s9 += s17 * 136657; + s10 -= s17 * 683901; + s17 = 0; + + s4 += s16 * 666643; + s5 += s16 * 470296; + s6 += s16 * 654183; + s7 -= s16 * 997805; + s8 += s16 * 136657; + s9 -= s16 * 683901; + s16 = 0; + + s3 += s15 * 666643; + s4 += s15 * 470296; + s5 += s15 * 654183; + s6 -= s15 * 997805; + s7 += s15 * 136657; + s8 -= s15 * 683901; + s15 = 0; + + s2 += s14 * 666643; + s3 += s14 * 470296; + s4 += s14 * 654183; + s5 -= s14 * 997805; + s6 += s14 * 136657; + s7 -= s14 * 683901; + s14 = 0; + + s1 += s13 * 666643; + s2 += s13 * 470296; + s3 += s13 * 654183; + s4 -= s13 * 997805; + s5 += s13 * 136657; + s6 -= s13 * 683901; + s13 = 0; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = (s0 + (1 << 20)) >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry2 = (s2 + (1 << 20)) >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry4 = (s4 + (1 << 20)) >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry6 = (s6 + (1 << 20)) >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry8 = (s8 + (1 << 20)) >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry10 = (s10 + (1 << 20)) >> 21; + s11 += carry10; + s10 -= carry10 << 21; + + carry1 = (s1 + (1 << 20)) >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry3 = (s3 + (1 << 20)) >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry5 = (s5 + (1 << 20)) >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry7 = (s7 + (1 << 20)) >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry9 = (s9 + (1 << 20)) >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry11 = (s11 + (1 << 20)) >> 21; + s12 += carry11; + s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry1 = s1 >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry2 = s2 >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry3 = s3 >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry4 = s4 >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry5 = s5 >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry6 = s6 >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry7 = s7 >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry8 = s8 >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry9 = s9 >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry10 = s10 >> 21; + s11 += carry10; + s10 -= carry10 << 21; + carry11 = s11 >> 21; + s12 += carry11; + s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; + s1 += carry0; + s0 -= carry0 << 21; + carry1 = s1 >> 21; + s2 += carry1; + s1 -= carry1 << 21; + carry2 = s2 >> 21; + s3 += carry2; + s2 -= carry2 << 21; + carry3 = s3 >> 21; + s4 += carry3; + s3 -= carry3 << 21; + carry4 = s4 >> 21; + s5 += carry4; + s4 -= carry4 << 21; + carry5 = s5 >> 21; + s6 += carry5; + s5 -= carry5 << 21; + carry6 = s6 >> 21; + s7 += carry6; + s6 -= carry6 << 21; + carry7 = s7 >> 21; + s8 += carry7; + s7 -= carry7 << 21; + carry8 = s8 >> 21; + s9 += carry8; + s8 -= carry8 << 21; + carry9 = s9 >> 21; + s10 += carry9; + s9 -= carry9 << 21; + carry10 = s10 >> 21; + s11 += carry10; + s10 -= carry10 << 21; + + s[0] = s0 >> 0; + s[1] = s0 >> 8; + s[2] = (s0 >> 16) | (s1 << 5); + s[3] = s1 >> 3; + s[4] = s1 >> 11; + s[5] = (s1 >> 19) | (s2 << 2); + s[6] = s2 >> 6; + s[7] = (s2 >> 14) | (s3 << 7); + s[8] = s3 >> 1; + s[9] = s3 >> 9; + s[10] = (s3 >> 17) | (s4 << 4); + s[11] = s4 >> 4; + s[12] = s4 >> 12; + s[13] = (s4 >> 20) | (s5 << 1); + s[14] = s5 >> 7; + s[15] = (s5 >> 15) | (s6 << 6); + s[16] = s6 >> 2; + s[17] = s6 >> 10; + s[18] = (s6 >> 18) | (s7 << 3); + s[19] = s7 >> 5; + s[20] = s7 >> 13; + s[21] = s8 >> 0; + s[22] = s8 >> 8; + s[23] = (s8 >> 16) | (s9 << 5); + s[24] = s9 >> 3; + s[25] = s9 >> 11; + s[26] = (s9 >> 19) | (s10 << 2); + s[27] = s10 >> 6; + s[28] = (s10 >> 14) | (s11 << 7); + s[29] = s11 >> 1; + s[30] = s11 >> 9; + s[31] = s11 >> 17; +} + +void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]) { + uint8_t seed[32]; + RAND_bytes(seed, 32); + ED25519_keypair_from_seed(out_public_key, out_private_key, seed); +} + +int ED25519_sign(uint8_t *out_sig, const uint8_t *message, size_t message_len, + const uint8_t private_key[64]) { + uint8_t az[SHA512_DIGEST_LENGTH]; + SHA512(private_key, 32, az); + + az[0] &= 248; + az[31] &= 63; + az[31] |= 64; + + SHA512_CTX hash_ctx; + SHA512_Init(&hash_ctx); + SHA512_Update(&hash_ctx, az + 32, 32); + SHA512_Update(&hash_ctx, message, message_len); + uint8_t nonce[SHA512_DIGEST_LENGTH]; + SHA512_Final(nonce, &hash_ctx); + + x25519_sc_reduce(nonce); + ge_p3 R; + x25519_ge_scalarmult_base(&R, nonce); + ge_p3_tobytes(out_sig, &R); + + SHA512_Init(&hash_ctx); + SHA512_Update(&hash_ctx, out_sig, 32); + SHA512_Update(&hash_ctx, private_key + 32, 32); + SHA512_Update(&hash_ctx, message, message_len); + uint8_t hram[SHA512_DIGEST_LENGTH]; + SHA512_Final(hram, &hash_ctx); + + x25519_sc_reduce(hram); + sc_muladd(out_sig + 32, hram, az, nonce); + + return 1; +} + +int ED25519_verify(const uint8_t *message, size_t message_len, + const uint8_t signature[64], const uint8_t public_key[32]) { + ge_p3 A; + if ((signature[63] & 224) != 0 || + x25519_ge_frombytes_vartime(&A, public_key) != 0) { + return 0; + } + + fe_loose t; + fe_neg(&t, &A.X); + fe_carry(&A.X, &t); + fe_neg(&t, &A.T); + fe_carry(&A.T, &t); + + uint8_t pkcopy[32]; + OPENSSL_memcpy(pkcopy, public_key, 32); + uint8_t rcopy[32]; + OPENSSL_memcpy(rcopy, signature, 32); + uint8_t scopy[32]; + OPENSSL_memcpy(scopy, signature + 32, 32); + + SHA512_CTX hash_ctx; + SHA512_Init(&hash_ctx); + SHA512_Update(&hash_ctx, signature, 32); + SHA512_Update(&hash_ctx, public_key, 32); + SHA512_Update(&hash_ctx, message, message_len); + uint8_t h[SHA512_DIGEST_LENGTH]; + SHA512_Final(h, &hash_ctx); + + x25519_sc_reduce(h); + + ge_p2 R; + ge_double_scalarmult_vartime(&R, h, &A, scopy); + + uint8_t rcheck[32]; + x25519_ge_tobytes(rcheck, &R); + + return CRYPTO_memcmp(rcheck, rcopy, sizeof(rcheck)) == 0; +} + +void ED25519_keypair_from_seed(uint8_t out_public_key[32], + uint8_t out_private_key[64], + const uint8_t seed[32]) { + uint8_t az[SHA512_DIGEST_LENGTH]; + SHA512(seed, 32, az); + + az[0] &= 248; + az[31] &= 63; + az[31] |= 64; + + ge_p3 A; + x25519_ge_scalarmult_base(&A, az); + ge_p3_tobytes(out_public_key, &A); + + OPENSSL_memcpy(out_private_key, seed, 32); + OPENSSL_memcpy(out_private_key + 32, out_public_key, 32); +} + + +#if defined(BORINGSSL_X25519_X86_64) + +static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], + const uint8_t point[32]) { + x25519_x86_64(out, scalar, point); +} + +#else + +// Replace (f,g) with (g,f) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +static void fe_cswap(fe *f, fe *g, unsigned int b) { + b = 0-b; + unsigned i; + for (i = 0; i < 10; i++) { + uint32_t x = f->v[i] ^ g->v[i]; + x &= b; + f->v[i] ^= x; + g->v[i] ^= x; + } +} + +// NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.. +static void fe_mul_121666_impl(uint32_t out[10], const uint32_t in1[10]) { + assert_fe_loose(in1); + { const uint32_t x20 = in1[9]; + { const uint32_t x21 = in1[8]; + { const uint32_t x19 = in1[7]; + { const uint32_t x17 = in1[6]; + { const uint32_t x15 = in1[5]; + { const uint32_t x13 = in1[4]; + { const uint32_t x11 = in1[3]; + { const uint32_t x9 = in1[2]; + { const uint32_t x7 = in1[1]; + { const uint32_t x5 = in1[0]; + { const uint32_t x38 = 0; + { const uint32_t x39 = 0; + { const uint32_t x37 = 0; + { const uint32_t x35 = 0; + { const uint32_t x33 = 0; + { const uint32_t x31 = 0; + { const uint32_t x29 = 0; + { const uint32_t x27 = 0; + { const uint32_t x25 = 0; + { const uint32_t x23 = 121666; + { uint64_t x40 = ((uint64_t)x23 * x5); + { uint64_t x41 = (((uint64_t)x23 * x7) + ((uint64_t)x25 * x5)); + { uint64_t x42 = ((((uint64_t)(0x2 * x25) * x7) + ((uint64_t)x23 * x9)) + ((uint64_t)x27 * x5)); + { uint64_t x43 = (((((uint64_t)x25 * x9) + ((uint64_t)x27 * x7)) + ((uint64_t)x23 * x11)) + ((uint64_t)x29 * x5)); + { uint64_t x44 = (((((uint64_t)x27 * x9) + (0x2 * (((uint64_t)x25 * x11) + ((uint64_t)x29 * x7)))) + ((uint64_t)x23 * x13)) + ((uint64_t)x31 * x5)); + { uint64_t x45 = (((((((uint64_t)x27 * x11) + ((uint64_t)x29 * x9)) + ((uint64_t)x25 * x13)) + ((uint64_t)x31 * x7)) + ((uint64_t)x23 * x15)) + ((uint64_t)x33 * x5)); + { uint64_t x46 = (((((0x2 * ((((uint64_t)x29 * x11) + ((uint64_t)x25 * x15)) + ((uint64_t)x33 * x7))) + ((uint64_t)x27 * x13)) + ((uint64_t)x31 * x9)) + ((uint64_t)x23 * x17)) + ((uint64_t)x35 * x5)); + { uint64_t x47 = (((((((((uint64_t)x29 * x13) + ((uint64_t)x31 * x11)) + ((uint64_t)x27 * x15)) + ((uint64_t)x33 * x9)) + ((uint64_t)x25 * x17)) + ((uint64_t)x35 * x7)) + ((uint64_t)x23 * x19)) + ((uint64_t)x37 * x5)); + { uint64_t x48 = (((((((uint64_t)x31 * x13) + (0x2 * (((((uint64_t)x29 * x15) + ((uint64_t)x33 * x11)) + ((uint64_t)x25 * x19)) + ((uint64_t)x37 * x7)))) + ((uint64_t)x27 * x17)) + ((uint64_t)x35 * x9)) + ((uint64_t)x23 * x21)) + ((uint64_t)x39 * x5)); + { uint64_t x49 = (((((((((((uint64_t)x31 * x15) + ((uint64_t)x33 * x13)) + ((uint64_t)x29 * x17)) + ((uint64_t)x35 * x11)) + ((uint64_t)x27 * x19)) + ((uint64_t)x37 * x9)) + ((uint64_t)x25 * x21)) + ((uint64_t)x39 * x7)) + ((uint64_t)x23 * x20)) + ((uint64_t)x38 * x5)); + { uint64_t x50 = (((((0x2 * ((((((uint64_t)x33 * x15) + ((uint64_t)x29 * x19)) + ((uint64_t)x37 * x11)) + ((uint64_t)x25 * x20)) + ((uint64_t)x38 * x7))) + ((uint64_t)x31 * x17)) + ((uint64_t)x35 * x13)) + ((uint64_t)x27 * x21)) + ((uint64_t)x39 * x9)); + { uint64_t x51 = (((((((((uint64_t)x33 * x17) + ((uint64_t)x35 * x15)) + ((uint64_t)x31 * x19)) + ((uint64_t)x37 * x13)) + ((uint64_t)x29 * x21)) + ((uint64_t)x39 * x11)) + ((uint64_t)x27 * x20)) + ((uint64_t)x38 * x9)); + { uint64_t x52 = (((((uint64_t)x35 * x17) + (0x2 * (((((uint64_t)x33 * x19) + ((uint64_t)x37 * x15)) + ((uint64_t)x29 * x20)) + ((uint64_t)x38 * x11)))) + ((uint64_t)x31 * x21)) + ((uint64_t)x39 * x13)); + { uint64_t x53 = (((((((uint64_t)x35 * x19) + ((uint64_t)x37 * x17)) + ((uint64_t)x33 * x21)) + ((uint64_t)x39 * x15)) + ((uint64_t)x31 * x20)) + ((uint64_t)x38 * x13)); + { uint64_t x54 = (((0x2 * ((((uint64_t)x37 * x19) + ((uint64_t)x33 * x20)) + ((uint64_t)x38 * x15))) + ((uint64_t)x35 * x21)) + ((uint64_t)x39 * x17)); + { uint64_t x55 = (((((uint64_t)x37 * x21) + ((uint64_t)x39 * x19)) + ((uint64_t)x35 * x20)) + ((uint64_t)x38 * x17)); + { uint64_t x56 = (((uint64_t)x39 * x21) + (0x2 * (((uint64_t)x37 * x20) + ((uint64_t)x38 * x19)))); + { uint64_t x57 = (((uint64_t)x39 * x20) + ((uint64_t)x38 * x21)); + { uint64_t x58 = ((uint64_t)(0x2 * x38) * x20); + { uint64_t x59 = (x48 + (x58 << 0x4)); + { uint64_t x60 = (x59 + (x58 << 0x1)); + { uint64_t x61 = (x60 + x58); + { uint64_t x62 = (x47 + (x57 << 0x4)); + { uint64_t x63 = (x62 + (x57 << 0x1)); + { uint64_t x64 = (x63 + x57); + { uint64_t x65 = (x46 + (x56 << 0x4)); + { uint64_t x66 = (x65 + (x56 << 0x1)); + { uint64_t x67 = (x66 + x56); + { uint64_t x68 = (x45 + (x55 << 0x4)); + { uint64_t x69 = (x68 + (x55 << 0x1)); + { uint64_t x70 = (x69 + x55); + { uint64_t x71 = (x44 + (x54 << 0x4)); + { uint64_t x72 = (x71 + (x54 << 0x1)); + { uint64_t x73 = (x72 + x54); + { uint64_t x74 = (x43 + (x53 << 0x4)); + { uint64_t x75 = (x74 + (x53 << 0x1)); + { uint64_t x76 = (x75 + x53); + { uint64_t x77 = (x42 + (x52 << 0x4)); + { uint64_t x78 = (x77 + (x52 << 0x1)); + { uint64_t x79 = (x78 + x52); + { uint64_t x80 = (x41 + (x51 << 0x4)); + { uint64_t x81 = (x80 + (x51 << 0x1)); + { uint64_t x82 = (x81 + x51); + { uint64_t x83 = (x40 + (x50 << 0x4)); + { uint64_t x84 = (x83 + (x50 << 0x1)); + { uint64_t x85 = (x84 + x50); + { uint64_t x86 = (x85 >> 0x1a); + { uint32_t x87 = ((uint32_t)x85 & 0x3ffffff); + { uint64_t x88 = (x86 + x82); + { uint64_t x89 = (x88 >> 0x19); + { uint32_t x90 = ((uint32_t)x88 & 0x1ffffff); + { uint64_t x91 = (x89 + x79); + { uint64_t x92 = (x91 >> 0x1a); + { uint32_t x93 = ((uint32_t)x91 & 0x3ffffff); + { uint64_t x94 = (x92 + x76); + { uint64_t x95 = (x94 >> 0x19); + { uint32_t x96 = ((uint32_t)x94 & 0x1ffffff); + { uint64_t x97 = (x95 + x73); + { uint64_t x98 = (x97 >> 0x1a); + { uint32_t x99 = ((uint32_t)x97 & 0x3ffffff); + { uint64_t x100 = (x98 + x70); + { uint64_t x101 = (x100 >> 0x19); + { uint32_t x102 = ((uint32_t)x100 & 0x1ffffff); + { uint64_t x103 = (x101 + x67); + { uint64_t x104 = (x103 >> 0x1a); + { uint32_t x105 = ((uint32_t)x103 & 0x3ffffff); + { uint64_t x106 = (x104 + x64); + { uint64_t x107 = (x106 >> 0x19); + { uint32_t x108 = ((uint32_t)x106 & 0x1ffffff); + { uint64_t x109 = (x107 + x61); + { uint64_t x110 = (x109 >> 0x1a); + { uint32_t x111 = ((uint32_t)x109 & 0x3ffffff); + { uint64_t x112 = (x110 + x49); + { uint64_t x113 = (x112 >> 0x19); + { uint32_t x114 = ((uint32_t)x112 & 0x1ffffff); + { uint64_t x115 = (x87 + (0x13 * x113)); + { uint32_t x116 = (uint32_t) (x115 >> 0x1a); + { uint32_t x117 = ((uint32_t)x115 & 0x3ffffff); + { uint32_t x118 = (x116 + x90); + { uint32_t x119 = (x118 >> 0x19); + { uint32_t x120 = (x118 & 0x1ffffff); + out[0] = x117; + out[1] = x120; + out[2] = (x119 + x93); + out[3] = x96; + out[4] = x99; + out[5] = x102; + out[6] = x105; + out[7] = x108; + out[8] = x111; + out[9] = x114; + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + assert_fe(out); +} + +static void fe_mul121666(fe *h, const fe_loose *f) { + assert_fe_loose(f->v); + fe_mul_121666_impl(h->v, f->v); + assert_fe(h->v); +} + +static void x25519_scalar_mult_generic(uint8_t out[32], + const uint8_t scalar[32], + const uint8_t point[32]) { + fe x1, x2, z2, x3, z3, tmp0, tmp1; + fe_loose x2l, z2l, x3l, tmp0l, tmp1l; + + uint8_t e[32]; + OPENSSL_memcpy(e, scalar, 32); + e[0] &= 248; + e[31] &= 127; + e[31] |= 64; + fe_frombytes(&x1, point); + fe_1(&x2); + fe_0(&z2); + fe_copy(&x3, &x1); + fe_1(&z3); + + unsigned swap = 0; + int pos; + for (pos = 254; pos >= 0; --pos) { + unsigned b = 1 & (e[pos / 8] >> (pos & 7)); + swap ^= b; + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + swap = b; + fe_sub(&tmp0l, &x3, &z3); + fe_sub(&tmp1l, &x2, &z2); + fe_add(&x2l, &x2, &z2); + fe_add(&z2l, &x3, &z3); + fe_mul_tll(&z3, &tmp0l, &x2l); + fe_mul_tll(&z2, &z2l, &tmp1l); + fe_sq_tl(&tmp0, &tmp1l); + fe_sq_tl(&tmp1, &x2l); + fe_add(&x3l, &z3, &z2); + fe_sub(&z2l, &z3, &z2); + fe_mul_ttt(&x2, &tmp1, &tmp0); + fe_sub(&tmp1l, &tmp1, &tmp0); + fe_sq_tl(&z2, &z2l); + fe_mul121666(&z3, &tmp1l); + fe_sq_tl(&x3, &x3l); + fe_add(&tmp0l, &tmp0, &z3); + fe_mul_ttt(&z3, &x1, &z2); + fe_mul_tll(&z2, &tmp1l, &tmp0l); + } + fe_cswap(&x2, &x3, swap); + fe_cswap(&z2, &z3, swap); + + fe_invert(&z2, &z2); + fe_mul_ttt(&x2, &x2, &z2); + fe_tobytes(out, &x2); +} + +static void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32], + const uint8_t point[32]) { +#if defined(BORINGSSL_X25519_NEON) + if (CRYPTO_is_NEON_capable()) { + x25519_NEON(out, scalar, point); + return; + } +#endif + + x25519_scalar_mult_generic(out, scalar, point); +} + +#endif // BORINGSSL_X25519_X86_64 + + +void X25519_keypair(uint8_t out_public_value[32], uint8_t out_private_key[32]) { + RAND_bytes(out_private_key, 32); + + // All X25519 implementations should decode scalars correctly (see + // https://tools.ietf.org/html/rfc7748#section-5). However, if an + // implementation doesn't then it might interoperate with random keys a + // fraction of the time because they'll, randomly, happen to be correctly + // formed. + // + // Thus we do the opposite of the masking here to make sure that our private + // keys are never correctly masked and so, hopefully, any incorrect + // implementations are deterministically broken. + // + // This does not affect security because, although we're throwing away + // entropy, a valid implementation of scalarmult should throw away the exact + // same bits anyway. + out_private_key[0] |= 7; + out_private_key[31] &= 63; + out_private_key[31] |= 128; + + X25519_public_from_private(out_public_value, out_private_key); +} + +int X25519(uint8_t out_shared_key[32], const uint8_t private_key[32], + const uint8_t peer_public_value[32]) { + static const uint8_t kZeros[32] = {0}; + x25519_scalar_mult(out_shared_key, private_key, peer_public_value); + // The all-zero output results when the input is a point of small order. + return CRYPTO_memcmp(kZeros, out_shared_key, 32) != 0; +} + +#if defined(BORINGSSL_X25519_X86_64) + +// When |BORINGSSL_X25519_X86_64| is set, base point multiplication is done with +// the Montgomery ladder because it's faster. Otherwise it's done using the +// Ed25519 tables. + +void X25519_public_from_private(uint8_t out_public_value[32], + const uint8_t private_key[32]) { + static const uint8_t kMongomeryBasePoint[32] = {9}; + x25519_scalar_mult(out_public_value, private_key, kMongomeryBasePoint); +} + +#else + +void X25519_public_from_private(uint8_t out_public_value[32], + const uint8_t private_key[32]) { +#if defined(BORINGSSL_X25519_NEON) + if (CRYPTO_is_NEON_capable()) { + static const uint8_t kMongomeryBasePoint[32] = {9}; + x25519_NEON(out_public_value, private_key, kMongomeryBasePoint); + return; + } +#endif + + uint8_t e[32]; + OPENSSL_memcpy(e, private_key, 32); + e[0] &= 248; + e[31] &= 127; + e[31] |= 64; + + ge_p3 A; + x25519_ge_scalarmult_base(&A, e); + + // We only need the u-coordinate of the curve25519 point. The map is + // u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). + fe_loose zplusy, zminusy; + fe zminusy_inv; + fe_add(&zplusy, &A.Z, &A.Y); + fe_sub(&zminusy, &A.Z, &A.Y); + fe_loose_invert(&zminusy_inv, &zminusy); + fe_mul_tlt(&zminusy_inv, &zplusy, &zminusy_inv); + fe_tobytes(out_public_value, &zminusy_inv); +} + +#endif // BORINGSSL_X25519_X86_64 diff --git a/Sources/BoringSSL/third_party/fiat/internal.h b/Sources/BoringSSL/third_party/fiat/internal.h new file mode 100644 index 000000000..10218e065 --- /dev/null +++ b/Sources/BoringSSL/third_party/fiat/internal.h @@ -0,0 +1,142 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015-2016 the fiat-crypto authors (see the AUTHORS file). +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef OPENSSL_HEADER_CURVE25519_INTERNAL_H +#define OPENSSL_HEADER_CURVE25519_INTERNAL_H + +#if defined(__cplusplus) +extern "C" { +#endif + + +#if defined(OPENSSL_X86_64) && !defined(OPENSSL_SMALL) && \ + !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_NO_ASM) +#define BORINGSSL_X25519_X86_64 + +void x25519_x86_64(uint8_t out[32], const uint8_t scalar[32], + const uint8_t point[32]); +#endif + + +#if defined(OPENSSL_ARM) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_APPLE) +#define BORINGSSL_X25519_NEON + +// x25519_NEON is defined in asm/x25519-arm.S. +void x25519_NEON(uint8_t out[32], const uint8_t scalar[32], + const uint8_t point[32]); +#endif + +// fe means field element. Here the field is \Z/(2^255-19). An element t, +// entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. +// fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. +// Multiplication and carrying produce fe from fe_loose. +typedef struct fe { uint32_t v[10]; } fe; + +// fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc. +// Addition and subtraction produce fe_loose from (fe, fe). +typedef struct fe_loose { uint32_t v[10]; } fe_loose; + +/* ge means group element. + + * Here the group is the set of pairs (x,y) of field elements (see fe.h) + * satisfying -x^2 + y^2 = 1 + d x^2y^2 + * where d = -121665/121666. + * + * Representations: + * ge_p2 (projective): (X:Y:Z) satisfying x=X/Z, y=Y/Z + * ge_p3 (extended): (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT + * ge_p1p1 (completed): ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T + * ge_precomp (Duif): (y+x,y-x,2dxy) */ + +typedef struct { + fe X; + fe Y; + fe Z; +} ge_p2; + +typedef struct { + fe X; + fe Y; + fe Z; + fe T; +} ge_p3; + +typedef struct { + fe_loose X; + fe_loose Y; + fe_loose Z; + fe_loose T; +} ge_p1p1; + +typedef struct { + fe_loose yplusx; + fe_loose yminusx; + fe_loose xy2d; +} ge_precomp; + +typedef struct { + fe_loose YplusX; + fe_loose YminusX; + fe_loose Z; + fe_loose T2d; +} ge_cached; + +void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h); +int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s); +void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p); +void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p); +void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p); +void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); +void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q); +void x25519_ge_scalarmult_small_precomp( + ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]); +void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]); +void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A); +void x25519_sc_reduce(uint8_t *s); + +enum spake2_state_t { + spake2_state_init = 0, + spake2_state_msg_generated, + spake2_state_key_generated, +}; + +struct spake2_ctx_st { + uint8_t private_key[32]; + uint8_t my_msg[32]; + uint8_t password_scalar[32]; + uint8_t password_hash[64]; + uint8_t *my_name; + size_t my_name_len; + uint8_t *their_name; + size_t their_name_len; + enum spake2_role_t my_role; + enum spake2_state_t state; + char disable_password_scalar_hack; +}; + + +#if defined(__cplusplus) +} // extern C +#endif + +#endif // OPENSSL_HEADER_CURVE25519_INTERNAL_H diff --git a/Sources/CgRPC/include/grpc/byte_buffer.h b/Sources/CgRPC/include/grpc/byte_buffer.h index 7669582af..ee740f479 100644 --- a/Sources/CgRPC/include/grpc/byte_buffer.h +++ b/Sources/CgRPC/include/grpc/byte_buffer.h @@ -19,6 +19,8 @@ #ifndef GRPC_BYTE_BUFFER_H #define GRPC_BYTE_BUFFER_H +#include + #include #include diff --git a/Sources/CgRPC/include/grpc/byte_buffer_reader.h b/Sources/CgRPC/include/grpc/byte_buffer_reader.h index 6bd078478..15e06cad7 100644 --- a/Sources/CgRPC/include/grpc/byte_buffer_reader.h +++ b/Sources/CgRPC/include/grpc/byte_buffer_reader.h @@ -19,6 +19,8 @@ #ifndef GRPC_BYTE_BUFFER_READER_H #define GRPC_BYTE_BUFFER_READER_H +#include + #include #endif /* GRPC_BYTE_BUFFER_READER_H */ diff --git a/Sources/CgRPC/include/grpc/census.h b/Sources/CgRPC/include/grpc/census.h index de8e7a661..4894f1c09 100644 --- a/Sources/CgRPC/include/grpc/census.h +++ b/Sources/CgRPC/include/grpc/census.h @@ -16,452 +16,23 @@ * */ -/** RPC-internal Census API's. These are designed to be generic enough that - * they can (ultimately) be used in many different RPC systems (with differing - * implementations). */ - #ifndef GRPC_CENSUS_H #define GRPC_CENSUS_H +#include + #include #ifdef __cplusplus extern "C" { #endif -/** Identify census features that can be enabled via census_initialize(). */ -enum census_features { - CENSUS_FEATURE_NONE = 0, /** Do not enable census. */ - CENSUS_FEATURE_TRACING = 1, /** Enable census tracing. */ - CENSUS_FEATURE_STATS = 2, /** Enable Census stats collection. */ - CENSUS_FEATURE_CPU = 4, /** Enable Census CPU usage collection. */ - CENSUS_FEATURE_ALL = - CENSUS_FEATURE_TRACING | CENSUS_FEATURE_STATS | CENSUS_FEATURE_CPU -}; - -/** Shutdown and startup census subsystem. The 'features' argument should be - * the OR (|) of census_features values. If census fails to initialize, then - * census_initialize() will return -1, otherwise the set of enabled features - * (which may be smaller than that provided in the `features` argument, see - * census_supported()) is returned. It is an error to call census_initialize() - * more than once (without an intervening census_shutdown()). These functions - * are not thread-safe. */ -CENSUSAPI int census_initialize(int features); -CENSUSAPI void census_shutdown(void); - -/** Return the features supported by the current census implementation (not all - * features will be available on all platforms). */ -CENSUSAPI int census_supported(void); - -/** Return the census features currently enabled. */ -CENSUSAPI int census_enabled(void); - /** A Census Context is a handle used by Census to represent the current tracing and stats collection information. Contexts should be propagated across RPC's - (this is the responsibility of the local RPC system). A context is typically - used as the first argument to most census functions. Conceptually, they - should be thought of as specific to a single RPC/thread. The user visible - context representation is that of a collection of key:value string pairs, - each of which is termed a 'tag'; these form the basis against which Census - metrics will be recorded. Keys are unique within a context. */ + (this is the responsibility of the local RPC system). */ typedef struct census_context census_context; -/** A tag is a key:value pair. Both keys and values are nil-terminated strings, - containing printable ASCII characters (decimal 32-126). Keys must be at - least one character in length. Both keys and values can have at most - CENSUS_MAX_TAG_KB_LEN characters (including the terminating nil). The - maximum number of tags that can be propagated is - CENSUS_MAX_PROPAGATED_TAGS. Users should also remember that some systems - may have limits on, e.g., the number of bytes that can be transmitted as - metadata, and that larger tags means more memory consumed and time in - processing. */ -typedef struct { - const char *key; - const char *value; - uint8_t flags; -} census_tag; - -/** Maximum length of a tag's key or value. */ -#define CENSUS_MAX_TAG_KV_LEN 255 -/** Maximum number of propagatable tags. */ -#define CENSUS_MAX_PROPAGATED_TAGS 255 - -/** Tag flags. */ -#define CENSUS_TAG_PROPAGATE 1 /** Tag should be propagated over RPC */ -#define CENSUS_TAG_STATS 2 /** Tag will be used for statistics aggregation */ -#define CENSUS_TAG_RESERVED 4 /** Reserved for internal use. */ -/** Flag values 4,8,16,32,64,128 are reserved for future/internal use. Clients - should not use or rely on their values. */ - -#define CENSUS_TAG_IS_PROPAGATED(flags) (flags & CENSUS_TAG_PROPAGATE) -#define CENSUS_TAG_IS_STATS(flags) (flags & CENSUS_TAG_STATS) - -/** An instance of this structure is kept by every context, and records the - basic information associated with the creation of that context. */ -typedef struct { - int n_propagated_tags; /** number of propagated tags */ - int n_local_tags; /** number of non-propagated (local) tags */ - int n_deleted_tags; /** number of tags that were deleted */ - int n_added_tags; /** number of tags that were added */ - int n_modified_tags; /** number of tags that were modified */ - int n_invalid_tags; /** number of tags with bad keys or values (e.g. - longer than CENSUS_MAX_TAG_KV_LEN) */ - int n_ignored_tags; /** number of tags ignored because of - CENSUS_MAX_PROPAGATED_TAGS limit. */ -} census_context_status; - -/** Create a new context, adding and removing tags from an existing context. - This will copy all tags from the 'tags' input, so it is recommended - to add as many tags in a single operation as is practical for the client. - @param base Base context to build upon. Can be NULL. - @param tags A set of tags to be added/changed/deleted. Tags with keys that - are in 'tags', but not 'base', are added to the context. Keys that are in - both 'tags' and 'base' will have their value/flags modified. Tags with keys - in both, but with NULL values, will be deleted from the context. Tags with - invalid (too long or short) keys or values will be ignored. - If adding a tag will result in more than CENSUS_MAX_PROPAGATED_TAGS in either - binary or non-binary tags, they will be ignored, as will deletions of - tags that don't exist. - @param ntags number of tags in 'tags' - @param status If not NULL, will return a pointer to a census_context_status - structure containing information about the new context and status of the - tags used in its creation. - @return A new, valid census_context. -*/ -CENSUSAPI census_context *census_context_create( - const census_context *base, const census_tag *tags, int ntags, - census_context_status const **status); - -/** Destroy a context. Once this function has been called, the context cannot - be reused. */ -CENSUSAPI void census_context_destroy(census_context *context); - -/** Get a pointer to the original status from the context creation. */ -CENSUSAPI const census_context_status *census_context_get_status( - const census_context *context); - -/** Structure used for iterating over the tags in a context. API clients should - not use or reference internal fields - neither their contents or - presence/absence are guaranteed. */ -typedef struct { - const census_context *context; - int base; - int index; - char *kvm; -} census_context_iterator; - -/** Initialize a census_tag_iterator. Must be called before first use. */ -CENSUSAPI void census_context_initialize_iterator( - const census_context *context, census_context_iterator *iterator); - -/** Get the contents of the "next" tag in the context. If there are no more - tags, returns 0 (and 'tag' contents will be unchanged), otherwise returns 1. - */ -CENSUSAPI int census_context_next_tag(census_context_iterator *iterator, - census_tag *tag); - -/** Get a context tag by key. Returns 0 if the key is not present. */ -CENSUSAPI int census_context_get_tag(const census_context *context, - const char *key, census_tag *tag); - -/** Tag set encode/decode functionality. These functions are intended - for use by RPC systems only, for purposes of transmitting/receiving contexts. - */ - -/** Encode a context into a buffer. - @param context context to be encoded - @param buffer buffer into which the context will be encoded. - @param buf_size number of available bytes in buffer. - @return The number of buffer bytes consumed for the encoded context, or - zero if the buffer was of insufficient size. */ -CENSUSAPI size_t census_context_encode(const census_context *context, - char *buffer, size_t buf_size); - -/** Decode context buffer encoded with census_context_encode(). Returns NULL - if there is an error in parsing either buffer. */ -CENSUSAPI census_context *census_context_decode(const char *buffer, - size_t size); - -/** Distributed traces can have a number of options. */ -enum census_trace_mask_values { - CENSUS_TRACE_MASK_NONE = 0, /** Default, empty flags */ - CENSUS_TRACE_MASK_IS_SAMPLED = 1 /** RPC tracing enabled for this context. */ -}; - -/** Get the current trace mask associated with this context. The value returned - will be the logical OR of census_trace_mask_values values. */ -CENSUSAPI int census_trace_mask(const census_context *context); - -/** Set the trace mask associated with a context. */ -CENSUSAPI void census_set_trace_mask(int trace_mask); - -/** The concept of "operation" is a fundamental concept for Census. In an RPC - system, an operation typically represents a single RPC, or a significant - sub-part thereof (e.g. a single logical "read" RPC to a distributed storage - system might do several other actions in parallel, from looking up metadata - indices to making requests of other services - each of these could be a - sub-operation with the larger RPC operation). Census uses operations for the - following: - - CPU accounting: If enabled, census will measure the thread CPU time - consumed between operation start and end times. - - Active operations: Census will maintain information on all currently - active operations. - - Distributed tracing: Each operation serves as a logical trace span. - - Stats collection: Stats are broken down by operation (e.g. latency - breakdown for each unique RPC path). - - The following functions serve to delineate the start and stop points for - each logical operation. */ - -/** - This structure represents a timestamp as used by census to record the time - at which an operation begins. -*/ -typedef struct { - /** Use gpr_timespec for default implementation. High performance - * implementations should use a cycle-counter based timestamp. */ - gpr_timespec ts; -} census_timestamp; - -/** - Mark the beginning of an RPC operation. The information required to call the - functions to record the start of RPC operations (both client and server) may - not be callable at the true start time of the operation, due to information - not being available (e.g. the census context data will not be available in a - server RPC until at least initial metadata has been processed). To ensure - correct CPU accounting and latency recording, RPC systems can call this - function to get the timestamp of operation beginning. This can later be used - as an argument to census_start_{client,server}_rpc_op(). NB: for correct - CPU accounting, the system must guarantee that the same thread is used - for all request processing after this function is called. - - @return A timestamp representing the operation start time. -*/ -CENSUSAPI census_timestamp census_start_rpc_op_timestamp(void); - -/** - Represent functions to map RPC name ID to service/method names. Census - breaks down all RPC stats by service and method names. We leave the - definition and format of these to the RPC system. For efficiency purposes, - we encode these as a single 64 bit identifier, and allow the RPC system to - provide a structure for functions that can convert these to service and - method strings. - - TODO(aveitch): Instead of providing this as an argument to the rpc_start_op() - functions, maybe it should be set once at census initialization. -*/ -typedef struct { - const char *(*get_rpc_service_name)(int64_t id); - const char *(*get_rpc_method_name)(int64_t id); -} census_rpc_name_info; - -/** - Start a client rpc operation. This function should be called as early in the - client RPC path as possible. This function will create a new context. If - the context argument is non-null, then the new context will inherit all - its properties, with the following changes: - - create a new operation ID for the new context, marking it as a child of - the previous operation. - - use the new RPC path and peer information for tracing and stats - collection purposes, rather than those from the original context - - If the context argument is NULL, then a new root context is created. This - is particularly important for tracing purposes (the trace spans generated - will be unassociated with any other trace spans, except those - downstream). The trace_mask will be used for tracing operations associated - with the new context. - - In some RPC systems (e.g. where load balancing is used), peer information - may not be available at the time the operation starts. In this case, use a - NULL value for peer, and set it later using the - census_set_rpc_client_peer() function. - - @param context The parent context. Can be NULL. - @param rpc_name_id The rpc name identifier to be associated with this RPC. - @param rpc_name_info Used to decode rpc_name_id. - @param peer RPC peer. If not available at the time, NULL can be used, - and a later census_set_rpc_client_peer() call made. - @param trace_mask An OR of census_trace_mask_values values. Only used in - the creation of a new root context (context == NULL). - @param start_time A timestamp returned from census_start_rpc_op_timestamp(). - Can be NULL. Used to set the true time the operation - begins. - - @return A new census context. - */ -CENSUSAPI census_context *census_start_client_rpc_op( - const census_context *context, int64_t rpc_name_id, - const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, - const census_timestamp *start_time); - -/** - Add peer information to a context representing a client RPC operation. -*/ -CENSUSAPI void census_set_rpc_client_peer(census_context *context, - const char *peer); - -/** - Start a server RPC operation. Returns a new context to be used in future - census calls. If buffer is non-NULL, then the buffer contents should - represent the client context, as generated by census_context_serialize(). - If buffer is NULL, a new root context is created. - - @param buffer Buffer containing bytes output from census_context_serialize(). - @param rpc_name_id The rpc name identifier to be associated with this RPC. - @param rpc_name_info Used to decode rpc_name_id. - @param peer RPC peer. - @param trace_mask An OR of census_trace_mask_values values. Only used in - the creation of a new root context (buffer == NULL). - @param start_time A timestamp returned from census_start_rpc_op_timestamp(). - Can be NULL. Used to set the true time the operation - begins. - - @return A new census context. - */ -CENSUSAPI census_context *census_start_server_rpc_op( - const char *buffer, int64_t rpc_name_id, - const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, - census_timestamp *start_time); - -/** - Start a new, non-RPC operation. In general, this function works very - similarly to census_start_client_rpc_op, with the primary difference being - the replacement of host/path information with the more generic family/name - tags. If the context argument is non-null, then the new context will - inherit all its properties, with the following changes: - - create a new operation ID for the new context, marking it as a child of - the previous operation. - - use the family and name information for tracing and stats collection - purposes, rather than those from the original context - - If the context argument is NULL, then a new root context is created. This - is particularly important for tracing purposes (the trace spans generated - will be unassociated with any other trace spans, except those - downstream). The trace_mask will be used for tracing - operations associated with the new context. - - @param context The base context. Can be NULL. - @param family Family name to associate with the trace - @param name Name within family to associate with traces/stats - @param trace_mask An OR of census_trace_mask_values values. Only used if - context is NULL. - - @return A new census context. - */ -CENSUSAPI census_context *census_start_op(census_context *context, - const char *family, const char *name, - int trace_mask); - -/** - End an operation started by any of the census_start_*_op*() calls. The - context used in this call will no longer be valid once this function - completes. - - @param context Context associated with operation which is ending. - @param status status associated with the operation. Not interpreted by - census. -*/ -CENSUSAPI void census_end_op(census_context *context, int status); - -#define CENSUS_TRACE_RECORD_START_OP ((uint32_t)0) -#define CENSUS_TRACE_RECORD_END_OP ((uint32_t)1) - -/** Insert a trace record into the trace stream. The record consists of an - arbitrary size buffer, the size of which is provided in 'n'. - @param context Trace context - @param type User-defined type to associate with trace entry. - @param buffer Pointer to buffer to use - @param n Number of bytes in buffer -*/ -CENSUSAPI void census_trace_print(census_context *context, uint32_t type, - const char *buffer, size_t n); - -/** Trace record. */ -typedef struct { - census_timestamp timestamp; /** Time of record creation */ - uint64_t trace_id; /** Trace ID associated with record */ - uint64_t op_id; /** Operation ID associated with record */ - uint32_t type; /** Type (as used in census_trace_print() */ - const char *buffer; /** Buffer (from census_trace_print() */ - size_t buf_size; /** Number of bytes inside buffer */ -} census_trace_record; - -/** Start a scan of existing trace records. While a scan is ongoing, addition - of new trace records will be blocked if the underlying trace buffers - fill up, so trace processing systems should endeavor to complete - reading as soon as possible. - @param consume if non-zero, indicates that reading records also "consumes" - the previously read record - i.e. releases space in the trace log - while scanning is ongoing. - @returns 0 on success, non-zero on failure (e.g. if a scan is already ongoing) -*/ -CENSUSAPI int census_trace_scan_start(int consume); - -/** Get a trace record. The data pointed to by the trace buffer is guaranteed - stable until the next census_get_trace_record() call (if the consume - argument to census_trace_scan_start was non-zero) or census_trace_scan_end() - is called (otherwise). - @param trace_record structure that will be filled in with oldest trace record. - @returns -1 if an error occurred (e.g. no previous call to - census_trace_scan_start()), 0 if there is no more trace data (and - trace_record will not be modified) or 1 otherwise. -*/ -CENSUSAPI int census_get_trace_record(census_trace_record *trace_record); - -/** End a scan previously started by census_trace_scan_start() */ -CENSUSAPI void census_trace_scan_end(); - -/** Core stats collection API's. The following concepts are used: - * Resource: Users record measurements for a single resource. Examples - include RPC latency, CPU seconds consumed, and bytes transmitted. - * Aggregation: An aggregation of a set of measurements. Census supports the - following aggregation types: - * Distribution - statistical distribution information, used for - recording average, standard deviation etc. Can include a histogram. - * Interval - a count of events that happen in a rolling time window. - * View: A view is a combination of a Resource, a set of tag keys and an - Aggregation. When a measurement for a Resource matches the View tags, it is - recorded (for each unique set of tag values) using the Aggregation type. - Each resource can have an arbitrary number of views by which it will be - broken down. - - Census uses protos to define each of the above, and output results. This - ensures unification across the different language and runtime - implementations. The proto definitions can be found in src/proto/census. -*/ - -/** Define a new resource. `resource_pb` should contain an encoded Resource - protobuf, `resource_pb_size` being the size of the buffer. Returns a -ve - value on error, or a positive (>= 0) resource id (for use in - census_delete_resource() and census_record_values()). In order to be valid, a - resource must have a name, and at least one numerator in its unit type. The - resource name must be unique, and an error will be returned if it is not. */ -CENSUSAPI int32_t census_define_resource(const uint8_t *resource_pb, - size_t resource_pb_size); - -/** Delete a resource created by census_define_resource(). */ -CENSUSAPI void census_delete_resource(int32_t resource_id); - -/** Determine the id of a resource, given its name. returns -1 if the resource - does not exist. */ -CENSUSAPI int32_t census_resource_id(const char *name); - -/** A single value to be recorded comprises two parts: an ID for the particular - * resource and the value to be recorded against it. */ -typedef struct { - int32_t resource_id; - double value; -} census_value; - -/** Record new usage values against the given context. */ -CENSUSAPI void census_record_values(census_context *context, - census_value *values, size_t nvalues); - #ifdef __cplusplus } #endif diff --git a/Sources/CgRPC/include/grpc/compression.h b/Sources/CgRPC/include/grpc/compression.h index 13a8dd66a..a4f6a8faf 100644 --- a/Sources/CgRPC/include/grpc/compression.h +++ b/Sources/CgRPC/include/grpc/compression.h @@ -30,60 +30,43 @@ extern "C" { #endif +/** Return if an algorithm is message compression algorithm. */ +GRPCAPI int grpc_compression_algorithm_is_message( + grpc_compression_algorithm algorithm); + +/** Return if an algorithm is stream compression algorithm. */ +GRPCAPI int grpc_compression_algorithm_is_stream( + grpc_compression_algorithm algorithm); + /** Parses the \a slice as a grpc_compression_algorithm instance and updating \a * algorithm. Returns 1 upon success, 0 otherwise. */ GRPCAPI int grpc_compression_algorithm_parse( - grpc_slice value, grpc_compression_algorithm *algorithm); - -/** Parses the \a slice as a grpc_stream_compression_algorithm instance and - * updating \a algorithm. Returns 1 upon success, 0 otherwise. */ -int grpc_stream_compression_algorithm_parse( - grpc_slice name, grpc_stream_compression_algorithm *algorithm); + grpc_slice value, grpc_compression_algorithm* algorithm); /** Updates \a name with the encoding name corresponding to a valid \a * algorithm. Note that \a name is statically allocated and must *not* be freed. * Returns 1 upon success, 0 otherwise. */ GRPCAPI int grpc_compression_algorithm_name( - grpc_compression_algorithm algorithm, const char **name); - -/** Updates \a name with the encoding name corresponding to a valid \a - * algorithm. Note that \a name is statically allocated and must *not* be freed. - * Returns 1 upon success, 0 otherwise. */ -GRPCAPI int grpc_stream_compression_algorithm_name( - grpc_stream_compression_algorithm algorithm, const char **name); + grpc_compression_algorithm algorithm, const char** name); /** Returns the compression algorithm corresponding to \a level for the - * compression algorithms encoded in the \a accepted_encodings bitset. - * - * It abort()s for unknown levels. */ + * compression algorithms encoded in the \a accepted_encodings bitset.*/ GRPCAPI grpc_compression_algorithm grpc_compression_algorithm_for_level( grpc_compression_level level, uint32_t accepted_encodings); -/** Returns the stream compression algorithm corresponding to \a level for the - * compression algorithms encoded in the \a accepted_stream_encodings bitset. - * It abort()s for unknown levels. */ -GRPCAPI grpc_stream_compression_algorithm -grpc_stream_compression_algorithm_for_level(grpc_stream_compression_level level, - uint32_t accepted_stream_encodings); - -GRPCAPI void grpc_compression_options_init(grpc_compression_options *opts); +GRPCAPI void grpc_compression_options_init(grpc_compression_options* opts); /** Mark \a algorithm as enabled in \a opts. */ GRPCAPI void grpc_compression_options_enable_algorithm( - grpc_compression_options *opts, grpc_compression_algorithm algorithm); + grpc_compression_options* opts, grpc_compression_algorithm algorithm); /** Mark \a algorithm as disabled in \a opts. */ GRPCAPI void grpc_compression_options_disable_algorithm( - grpc_compression_options *opts, grpc_compression_algorithm algorithm); + grpc_compression_options* opts, grpc_compression_algorithm algorithm); /** Returns true if \a algorithm is marked as enabled in \a opts. */ GRPCAPI int grpc_compression_options_is_algorithm_enabled( - const grpc_compression_options *opts, grpc_compression_algorithm algorithm); - -/** Returns true if \a algorithm is marked as enabled in \a opts. */ -GRPCAPI int grpc_compression_options_is_stream_compression_algorithm_enabled( - const grpc_compression_options *opts, - grpc_stream_compression_algorithm algorithm); + const grpc_compression_options* opts, grpc_compression_algorithm algorithm); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/fork.h b/Sources/CgRPC/include/grpc/fork.h index ca45e1139..26f9df987 100644 --- a/Sources/CgRPC/include/grpc/fork.h +++ b/Sources/CgRPC/include/grpc/fork.h @@ -19,6 +19,8 @@ #ifndef GRPC_FORK_H #define GRPC_FORK_H +#include + #include #endif /* GRPC_FORK_H */ diff --git a/Sources/CgRPC/include/grpc/grpc.h b/Sources/CgRPC/include/grpc/grpc.h index 1de289fba..dd8a5d7d5 100644 --- a/Sources/CgRPC/include/grpc/grpc.h +++ b/Sources/CgRPC/include/grpc/grpc.h @@ -19,6 +19,8 @@ #ifndef GRPC_GRPC_H #define GRPC_GRPC_H +#include + #include #include @@ -40,11 +42,11 @@ extern "C" { * functionality lives in grpc_security.h. */ -GRPCAPI void grpc_metadata_array_init(grpc_metadata_array *array); -GRPCAPI void grpc_metadata_array_destroy(grpc_metadata_array *array); +GRPCAPI void grpc_metadata_array_init(grpc_metadata_array* array); +GRPCAPI void grpc_metadata_array_destroy(grpc_metadata_array* array); -GRPCAPI void grpc_call_details_init(grpc_call_details *details); -GRPCAPI void grpc_call_details_destroy(grpc_call_details *details); +GRPCAPI void grpc_call_details_init(grpc_call_details* details); +GRPCAPI void grpc_call_details_destroy(grpc_call_details* details); /** Registers a plugin to be initialized and destroyed with the library. @@ -73,31 +75,31 @@ GRPCAPI void grpc_init(void); GRPCAPI void grpc_shutdown(void); /** Return a string representing the current version of grpc */ -GRPCAPI const char *grpc_version_string(void); +GRPCAPI const char* grpc_version_string(void); /** Return a string specifying what the 'g' in gRPC stands for */ -GRPCAPI const char *grpc_g_stands_for(void); +GRPCAPI const char* grpc_g_stands_for(void); /** Returns the completion queue factory based on the attributes. MAY return a NULL if no factory can be found */ -GRPCAPI const grpc_completion_queue_factory * +GRPCAPI const grpc_completion_queue_factory* grpc_completion_queue_factory_lookup( - const grpc_completion_queue_attributes *attributes); + const grpc_completion_queue_attributes* attributes); /** Helper function to create a completion queue with grpc_cq_completion_type of GRPC_CQ_NEXT and grpc_cq_polling_type of GRPC_CQ_DEFAULT_POLLING */ -GRPCAPI grpc_completion_queue *grpc_completion_queue_create_for_next( - void *reserved); +GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_next( + void* reserved); /** Helper function to create a completion queue with grpc_cq_completion_type of GRPC_CQ_PLUCK and grpc_cq_polling_type of GRPC_CQ_DEFAULT_POLLING */ -GRPCAPI grpc_completion_queue *grpc_completion_queue_create_for_pluck( - void *reserved); +GRPCAPI grpc_completion_queue* grpc_completion_queue_create_for_pluck( + void* reserved); /** Create a completion queue */ -GRPCAPI grpc_completion_queue *grpc_completion_queue_create( - const grpc_completion_queue_factory *factory, - const grpc_completion_queue_attributes *attributes, void *reserved); +GRPCAPI grpc_completion_queue* grpc_completion_queue_create( + const grpc_completion_queue_factory* factory, + const grpc_completion_queue_attributes* attributes, void* reserved); /** Blocks until an event is available, the completion queue is being shut down, or deadline is reached. @@ -107,9 +109,9 @@ GRPCAPI grpc_completion_queue *grpc_completion_queue_create( Callers must not call grpc_completion_queue_next and grpc_completion_queue_pluck simultaneously on the same completion queue. */ -GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, +GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue* cq, gpr_timespec deadline, - void *reserved); + void* reserved); /** Blocks until an event with tag 'tag' is available, the completion queue is being shutdown or deadline is reached. @@ -122,9 +124,9 @@ GRPCAPI grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS concurrently executing plucks at any time. */ -GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, - void *tag, gpr_timespec deadline, - void *reserved); +GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, + void* tag, gpr_timespec deadline, + void* reserved); /** Maximum number of outstanding grpc_completion_queue_pluck executions per completion queue */ @@ -137,40 +139,38 @@ GRPCAPI grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, After calling this function applications should ensure that no NEW work is added to be published on this completion queue. */ -GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue *cq); +GRPCAPI void grpc_completion_queue_shutdown(grpc_completion_queue* cq); /** Destroy a completion queue. The caller must ensure that the queue is drained and no threads are executing grpc_completion_queue_next */ -GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue *cq); - -/** Create a completion queue alarm instance */ -GRPCAPI grpc_alarm *grpc_alarm_create(void *reserved); - -/** Set a completion queue alarm instance associated to \a cq. - * - * Once the alarm expires (at \a deadline) or it's cancelled (see \a - * grpc_alarm_cancel), an event with tag \a tag will be added to \a cq. If the - * alarm expired, the event's success bit will be true, false otherwise (ie, - * upon cancellation). */ -GRPCAPI void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, - gpr_timespec deadline, void *tag, void *reserved); - -/** Cancel a completion queue alarm. Calling this function over an alarm that - * has already fired has no effect. */ -GRPCAPI void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved); +GRPCAPI void grpc_completion_queue_destroy(grpc_completion_queue* cq); -/** Destroy the given completion queue alarm, cancelling it in the process. */ -GRPCAPI void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved); +/*********** EXPERIMENTAL API ************/ +/** Initializes a thread local cache for \a cq. + * grpc_flush_cq_tls_cache() MUST be called on the same thread, + * with the same cq. + */ +GRPCAPI void grpc_completion_queue_thread_local_cache_init( + grpc_completion_queue* cq); + +/*********** EXPERIMENTAL API ************/ +/** Flushes the thread local cache for \a cq. + * Returns 1 if there was contents in the cache. If there was an event + * in \a cq tls cache, its tag is placed in tag, and ok is set to the + * event success. + */ +GRPCAPI int grpc_completion_queue_thread_local_cache_flush( + grpc_completion_queue* cq, void** tag, int* ok); /** Check the connectivity state of a channel. */ GRPCAPI grpc_connectivity_state grpc_channel_check_connectivity_state( - grpc_channel *channel, int try_to_connect); + grpc_channel* channel, int try_to_connect); /** Number of active "external connectivity state watchers" attached to a * channel. * Useful for testing. **/ GRPCAPI int grpc_channel_num_external_connectivity_watchers( - grpc_channel *channel); + grpc_channel* channel); /** Watch for a change in connectivity state. Once the channel connectivity state is different from last_observed_state, @@ -178,11 +178,11 @@ GRPCAPI int grpc_channel_num_external_connectivity_watchers( If deadline expires BEFORE the state is changed, tag will be enqueued on cq with success=0. */ GRPCAPI void grpc_channel_watch_connectivity_state( - grpc_channel *channel, grpc_connectivity_state last_observed_state, - gpr_timespec deadline, grpc_completion_queue *cq, void *tag); + grpc_channel* channel, grpc_connectivity_state last_observed_state, + gpr_timespec deadline, grpc_completion_queue* cq, void* tag); /** Check whether a grpc channel supports connectivity watcher */ -GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel); +GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel* channel); /** Create a call given a grpc_channel, in order to call 'method'. All completions are sent to 'completion_queue'. 'method' and 'host' need only @@ -191,31 +191,31 @@ GRPCAPI int grpc_channel_support_connectivity_watcher(grpc_channel *channel); to propagate properties from the server call to this new client call, depending on the value of \a propagation_mask (see propagation_bits.h for possible values). */ -GRPCAPI grpc_call *grpc_channel_create_call( - grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, - grpc_completion_queue *completion_queue, grpc_slice method, - const grpc_slice *host, gpr_timespec deadline, void *reserved); +GRPCAPI grpc_call* grpc_channel_create_call( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_completion_queue* completion_queue, grpc_slice method, + const grpc_slice* host, gpr_timespec deadline, void* reserved); /** Ping the channels peer (load balanced channels will select one sub-channel to ping); if the channel is not connected, posts a failed. */ -GRPCAPI void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, - void *tag, void *reserved); +GRPCAPI void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq, + void* tag, void* reserved); /** Pre-register a method/host pair on a channel. */ -GRPCAPI void *grpc_channel_register_call(grpc_channel *channel, - const char *method, const char *host, - void *reserved); +GRPCAPI void* grpc_channel_register_call(grpc_channel* channel, + const char* method, const char* host, + void* reserved); /** Create a call given a handle returned from grpc_channel_register_call. \sa grpc_channel_create_call. */ -GRPCAPI grpc_call *grpc_channel_create_registered_call( - grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, - grpc_completion_queue *completion_queue, void *registered_call_handle, - gpr_timespec deadline, void *reserved); +GRPCAPI grpc_call* grpc_channel_create_registered_call( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_completion_queue* completion_queue, void* registered_call_handle, + gpr_timespec deadline, void* reserved); /** Allocate memory in the grpc_call arena: this memory is automatically discarded at call completion */ -GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size); +GRPCAPI void* grpc_call_arena_alloc(grpc_call* call, size_t size); /** Start a batch of operations defined in the array ops; when complete, post a completion of type 'tag' to the completion queue bound to the call. @@ -234,9 +234,9 @@ GRPCAPI void *grpc_call_arena_alloc(grpc_call *call, size_t size); needs to be synchronized. As an optimization, you may synchronize batches containing just send operations independently from batches containing just receive operations. */ -GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call *call, - const grpc_op *ops, size_t nops, - void *tag, void *reserved); +GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call* call, + const grpc_op* ops, size_t nops, + void* tag, void* reserved); /** Returns a newly allocated string representing the endpoint to which this call is communicating with. The string is in the uri format accepted by @@ -246,43 +246,53 @@ GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call *call, WARNING: this value is never authenticated or subject to any security related code. It must not be used for any authentication related functionality. Instead, use grpc_auth_context. */ -GRPCAPI char *grpc_call_get_peer(grpc_call *call); +GRPCAPI char* grpc_call_get_peer(grpc_call* call); struct census_context; /** Set census context for a call; Must be called before first call to grpc_call_start_batch(). */ -GRPCAPI void grpc_census_call_set_context(grpc_call *call, - struct census_context *context); +GRPCAPI void grpc_census_call_set_context(grpc_call* call, + struct census_context* context); /** Retrieve the calls current census context. */ -GRPCAPI struct census_context *grpc_census_call_get_context(grpc_call *call); +GRPCAPI struct census_context* grpc_census_call_get_context(grpc_call* call); /** Return a newly allocated string representing the target a channel was created for. */ -GRPCAPI char *grpc_channel_get_target(grpc_channel *channel); +GRPCAPI char* grpc_channel_get_target(grpc_channel* channel); /** Request info about the channel. \a channel_info indicates what information is being requested and how that information will be returned. \a channel_info is owned by the caller. */ -GRPCAPI void grpc_channel_get_info(grpc_channel *channel, - const grpc_channel_info *channel_info); +GRPCAPI void grpc_channel_get_info(grpc_channel* channel, + const grpc_channel_info* channel_info); /** Create a client channel to 'target'. Additional channel level configuration MAY be provided by grpc_channel_args, though the expectation is that most - clients will want to simply pass NULL. See grpc_channel_args definition for - more on this. The data in 'args' need only live through the invocation of - this function. */ -GRPCAPI grpc_channel *grpc_insecure_channel_create( - const char *target, const grpc_channel_args *args, void *reserved); + clients will want to simply pass NULL. The user data in 'args' need only + live through the invocation of this function. However, if any args of the + 'pointer' type are passed, then the referenced vtable must be maintained + by the caller until grpc_channel_destroy terminates. See grpc_channel_args + definition for more on this. */ +GRPCAPI grpc_channel* grpc_insecure_channel_create( + const char* target, const grpc_channel_args* args, void* reserved); /** Create a lame client: this client fails every operation attempted on it. */ -GRPCAPI grpc_channel *grpc_lame_client_channel_create( - const char *target, grpc_status_code error_code, const char *error_message); +GRPCAPI grpc_channel* grpc_lame_client_channel_create( + const char* target, grpc_status_code error_code, const char* error_message); /** Close and destroy a grpc channel */ -GRPCAPI void grpc_channel_destroy(grpc_channel *channel); +GRPCAPI void grpc_channel_destroy(grpc_channel* channel); + +/** Returns the JSON formatted channel trace for this channel. The caller + owns the returned string and is responsible for freeing it. */ +GRPCAPI char* grpc_channel_get_trace(grpc_channel* channel); + +/** Returns the channel uuid, which can be used to look up its trace at a + later time. */ +GRPCAPI intptr_t grpc_channel_get_uuid(grpc_channel* channel); /** Error handling for grpc_call Most grpc_call functions return a grpc_error. If the error is not GRPC_OK @@ -295,7 +305,7 @@ GRPCAPI void grpc_channel_destroy(grpc_channel *channel); THREAD-SAFETY grpc_call_cancel and grpc_call_cancel_with_status are thread-safe, and can be called at any point before grpc_call_unref is called.*/ -GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved); +GRPCAPI grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved); /** Called by clients to cancel an RPC on the server. Can be called multiple times, from any thread. @@ -307,18 +317,18 @@ GRPCAPI grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved); It doesn't need to be alive after the call to grpc_call_cancel_with_status completes. */ -GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call, +GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call* call, grpc_status_code status, - const char *description, - void *reserved); + const char* description, + void* reserved); /** Ref a call. THREAD SAFETY: grpc_call_ref is thread-compatible */ -GRPCAPI void grpc_call_ref(grpc_call *call); +GRPCAPI void grpc_call_ref(grpc_call* call); /** Unref a call. THREAD SAFETY: grpc_call_unref is thread-compatible */ -GRPCAPI void grpc_call_unref(grpc_call *call); +GRPCAPI void grpc_call_unref(grpc_call* call); /** Request notification of a new call. Once a call is received, a notification tagged with \a tag_new is added to @@ -329,10 +339,10 @@ GRPCAPI void grpc_call_unref(grpc_call *call); Note that \a cq_for_notification must have been registered to the server via \a grpc_server_register_completion_queue. */ GRPCAPI grpc_call_error grpc_server_request_call( - grpc_server *server, grpc_call **call, grpc_call_details *details, - grpc_metadata_array *request_metadata, - grpc_completion_queue *cq_bound_to_call, - grpc_completion_queue *cq_for_notification, void *tag_new); + grpc_server* server, grpc_call** call, grpc_call_details* details, + grpc_metadata_array* request_metadata, + grpc_completion_queue* cq_bound_to_call, + grpc_completion_queue* cq_for_notification, void* tag_new); /** How to handle payloads for a registered method */ typedef enum { @@ -349,8 +359,8 @@ typedef enum { registered_method (as returned by this function). Must be called before grpc_server_start. Returns NULL on failure. */ -GRPCAPI void *grpc_server_register_method( - grpc_server *server, const char *method, const char *host, +GRPCAPI void* grpc_server_register_method( + grpc_server* server, const char* method, const char* host, grpc_server_register_method_payload_handling payload_handling, uint32_t flags); @@ -358,35 +368,38 @@ GRPCAPI void *grpc_server_register_method( must have been registered to the server via grpc_server_register_completion_queue. */ GRPCAPI grpc_call_error grpc_server_request_registered_call( - grpc_server *server, void *registered_method, grpc_call **call, - gpr_timespec *deadline, grpc_metadata_array *request_metadata, - grpc_byte_buffer **optional_payload, - grpc_completion_queue *cq_bound_to_call, - grpc_completion_queue *cq_for_notification, void *tag_new); + grpc_server* server, void* registered_method, grpc_call** call, + gpr_timespec* deadline, grpc_metadata_array* request_metadata, + grpc_byte_buffer** optional_payload, + grpc_completion_queue* cq_bound_to_call, + grpc_completion_queue* cq_for_notification, void* tag_new); /** Create a server. Additional configuration for each incoming channel can be specified with args. If no additional configuration is needed, args can - be NULL. See grpc_channel_args for more. The data in 'args' need only live - through the invocation of this function. */ -GRPCAPI grpc_server *grpc_server_create(const grpc_channel_args *args, - void *reserved); + be NULL. The user data in 'args' need only live through the invocation of + this function. However, if any args of the 'pointer' type are passed, then + the referenced vtable must be maintained by the caller until + grpc_server_destroy terminates. See grpc_channel_args definition for more + on this. */ +GRPCAPI grpc_server* grpc_server_create(const grpc_channel_args* args, + void* reserved); /** Register a completion queue with the server. Must be done for any notification completion queue that is passed to grpc_server_request_*_call and to grpc_server_shutdown_and_notify. Must be performed prior to grpc_server_start. */ -GRPCAPI void grpc_server_register_completion_queue(grpc_server *server, - grpc_completion_queue *cq, - void *reserved); +GRPCAPI void grpc_server_register_completion_queue(grpc_server* server, + grpc_completion_queue* cq, + void* reserved); /** Add a HTTP2 over plaintext over tcp listener. Returns bound port number on success, 0 on failure. REQUIRES: server not started */ -GRPCAPI int grpc_server_add_insecure_http2_port(grpc_server *server, - const char *addr); +GRPCAPI int grpc_server_add_insecure_http2_port(grpc_server* server, + const char* addr); /** Start a server - tells all listeners to start listening */ -GRPCAPI void grpc_server_start(grpc_server *server); +GRPCAPI void grpc_server_start(grpc_server* server); /** Begin shutting down a server. After completion, no new calls or connections will be admitted. @@ -395,19 +408,19 @@ GRPCAPI void grpc_server_start(grpc_server *server); Shutdown is idempotent, and all tags will be notified at once if multiple grpc_server_shutdown_and_notify calls are made. 'cq' must have been registered to this server via grpc_server_register_completion_queue. */ -GRPCAPI void grpc_server_shutdown_and_notify(grpc_server *server, - grpc_completion_queue *cq, - void *tag); +GRPCAPI void grpc_server_shutdown_and_notify(grpc_server* server, + grpc_completion_queue* cq, + void* tag); /** Cancel all in-progress calls. Only usable after shutdown. */ -GRPCAPI void grpc_server_cancel_all_calls(grpc_server *server); +GRPCAPI void grpc_server_cancel_all_calls(grpc_server* server); /** Destroy a server. Shutdown must have completed beforehand (i.e. all tags generated by grpc_server_shutdown_and_notify must have been received, and at least one call to grpc_server_shutdown_and_notify must have been made). */ -GRPCAPI void grpc_server_destroy(grpc_server *server); +GRPCAPI void grpc_server_destroy(grpc_server* server); /** Enable or disable a tracer. @@ -417,7 +430,7 @@ GRPCAPI void grpc_server_destroy(grpc_server *server); Use of this function is not strictly thread-safe, but the thread-safety issues raised by it should not be of concern. */ -GRPCAPI int grpc_tracer_set_enabled(const char *name, int enabled); +GRPCAPI int grpc_tracer_set_enabled(const char* name, int enabled); /** Check whether a metadata key is legal (will be accepted by core) */ GRPCAPI int grpc_header_key_is_legal(grpc_slice slice); @@ -430,24 +443,24 @@ GRPCAPI int grpc_header_nonbin_value_is_legal(grpc_slice slice); GRPCAPI int grpc_is_binary_header(grpc_slice slice); /** Convert grpc_call_error values to a string */ -GRPCAPI const char *grpc_call_error_to_string(grpc_call_error error); +GRPCAPI const char* grpc_call_error_to_string(grpc_call_error error); /** Create a buffer pool */ -GRPCAPI grpc_resource_quota *grpc_resource_quota_create(const char *trace_name); +GRPCAPI grpc_resource_quota* grpc_resource_quota_create(const char* trace_name); /** Add a reference to a buffer pool */ -GRPCAPI void grpc_resource_quota_ref(grpc_resource_quota *resource_quota); +GRPCAPI void grpc_resource_quota_ref(grpc_resource_quota* resource_quota); /** Drop a reference to a buffer pool */ -GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota *resource_quota); +GRPCAPI void grpc_resource_quota_unref(grpc_resource_quota* resource_quota); /** Update the size of a buffer pool */ -GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, +GRPCAPI void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t new_size); /** Fetch a vtable for a grpc_channel_arg that points to a grpc_resource_quota */ -GRPCAPI const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void); +GRPCAPI const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/grpc_posix.h b/Sources/CgRPC/include/grpc/grpc_posix.h index c7429eaea..5f1ada5aa 100644 --- a/Sources/CgRPC/include/grpc/grpc_posix.h +++ b/Sources/CgRPC/include/grpc/grpc_posix.h @@ -19,9 +19,10 @@ #ifndef GRPC_GRPC_POSIX_H #define GRPC_GRPC_POSIX_H -#include #include +#include + #include #ifdef __cplusplus @@ -37,8 +38,8 @@ extern "C" { /** Create a client channel to 'target' using file descriptor 'fd'. The 'target' argument will be used to indicate the name for this channel. See the comment for grpc_insecure_channel_create for description of 'args' argument. */ -GRPCAPI grpc_channel *grpc_insecure_channel_create_from_fd( - const char *target, int fd, const grpc_channel_args *args); +GRPCAPI grpc_channel* grpc_insecure_channel_create_from_fd( + const char* target, int fd, const grpc_channel_args* args); /** Add the connected communication channel based on file descriptor 'fd' to the 'server'. The 'fd' must be an open file descriptor corresponding to a @@ -48,8 +49,8 @@ GRPCAPI grpc_channel *grpc_insecure_channel_create_from_fd( The 'reserved' pointer MUST be NULL. */ -GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - void *reserved, int fd); +GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server* server, + void* reserved, int fd); /** GRPC Core POSIX library may internally use signals to optimize some work. The library uses (SIGRTMIN + 6) signal by default. Use this API to instruct diff --git a/Sources/CgRPC/include/grpc/grpc_security.h b/Sources/CgRPC/include/grpc/grpc_security.h index 95b144793..e1975a8e0 100644 --- a/Sources/CgRPC/include/grpc/grpc_security.h +++ b/Sources/CgRPC/include/grpc/grpc_security.h @@ -19,6 +19,8 @@ #ifndef GRPC_GRPC_SECURITY_H #define GRPC_GRPC_SECURITY_H +#include + #include #include #include @@ -32,51 +34,51 @@ extern "C" { typedef struct grpc_auth_context grpc_auth_context; typedef struct grpc_auth_property_iterator { - const grpc_auth_context *ctx; + const grpc_auth_context* ctx; size_t index; - const char *name; + const char* name; } grpc_auth_property_iterator; /** value, if not NULL, is guaranteed to be NULL terminated. */ typedef struct grpc_auth_property { - char *name; - char *value; + char* name; + char* value; size_t value_length; } grpc_auth_property; /** Returns NULL when the iterator is at the end. */ -GRPCAPI const grpc_auth_property *grpc_auth_property_iterator_next( - grpc_auth_property_iterator *it); +GRPCAPI const grpc_auth_property* grpc_auth_property_iterator_next( + grpc_auth_property_iterator* it); /** Iterates over the auth context. */ GRPCAPI grpc_auth_property_iterator -grpc_auth_context_property_iterator(const grpc_auth_context *ctx); +grpc_auth_context_property_iterator(const grpc_auth_context* ctx); /** Gets the peer identity. Returns an empty iterator (first _next will return NULL) if the peer is not authenticated. */ GRPCAPI grpc_auth_property_iterator -grpc_auth_context_peer_identity(const grpc_auth_context *ctx); +grpc_auth_context_peer_identity(const grpc_auth_context* ctx); /** Finds a property in the context. May return an empty iterator (first _next will return NULL) if no property with this name was found in the context. */ GRPCAPI grpc_auth_property_iterator grpc_auth_context_find_properties_by_name( - const grpc_auth_context *ctx, const char *name); + const grpc_auth_context* ctx, const char* name); /** Gets the name of the property that indicates the peer identity. Will return NULL if the peer is not authenticated. */ -GRPCAPI const char *grpc_auth_context_peer_identity_property_name( - const grpc_auth_context *ctx); +GRPCAPI const char* grpc_auth_context_peer_identity_property_name( + const grpc_auth_context* ctx); /** Returns 1 if the peer is authenticated, 0 otherwise. */ GRPCAPI int grpc_auth_context_peer_is_authenticated( - const grpc_auth_context *ctx); + const grpc_auth_context* ctx); /** Gets the auth context from the call. Caller needs to call grpc_auth_context_release on the returned context. */ -GRPCAPI grpc_auth_context *grpc_call_auth_context(grpc_call *call); +GRPCAPI grpc_auth_context* grpc_call_auth_context(grpc_call* call); /** Releases the auth context returned from grpc_call_auth_context. */ -GRPCAPI void grpc_auth_context_release(grpc_auth_context *context); +GRPCAPI void grpc_auth_context_release(grpc_auth_context* context); /** -- The following auth context methods should only be called by a server metadata @@ -84,19 +86,38 @@ GRPCAPI void grpc_auth_context_release(grpc_auth_context *context); -- */ /** Add a property. */ -GRPCAPI void grpc_auth_context_add_property(grpc_auth_context *ctx, - const char *name, const char *value, +GRPCAPI void grpc_auth_context_add_property(grpc_auth_context* ctx, + const char* name, const char* value, size_t value_length); /** Add a C string property. */ -GRPCAPI void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx, - const char *name, - const char *value); +GRPCAPI void grpc_auth_context_add_cstring_property(grpc_auth_context* ctx, + const char* name, + const char* value); /** Sets the property name. Returns 1 if successful or 0 in case of failure (which means that no property with this name exists). */ GRPCAPI int grpc_auth_context_set_peer_identity_property_name( - grpc_auth_context *ctx, const char *name); + grpc_auth_context* ctx, const char* name); + +/** --- SSL Session Cache. --- + + A SSL session cache object represents a way to cache client sessions + between connections. Only ticket-based resumption is supported. */ + +typedef struct grpc_ssl_session_cache grpc_ssl_session_cache; + +/** Create LRU cache for client-side SSL sessions with the given capacity. + If capacity is < 1, a default capacity is used instead. */ +GRPCAPI grpc_ssl_session_cache* grpc_ssl_session_cache_create_lru( + size_t capacity); + +/** Destroy SSL session cache. */ +GRPCAPI void grpc_ssl_session_cache_destroy(grpc_ssl_session_cache* cache); + +/** Create a channel arg with the given cache object. */ +GRPCAPI grpc_arg +grpc_ssl_session_cache_create_channel_arg(grpc_ssl_session_cache* cache); /** --- grpc_channel_credentials object. --- @@ -107,12 +128,12 @@ typedef struct grpc_channel_credentials grpc_channel_credentials; /** Releases a channel credentials object. The creator of the credentials object is responsible for its release. */ -GRPCAPI void grpc_channel_credentials_release(grpc_channel_credentials *creds); +GRPCAPI void grpc_channel_credentials_release(grpc_channel_credentials* creds); /** Creates default credentials to connect to a google gRPC service. WARNING: Do NOT use this credentials to connect to a non-google service as this could result in an oauth2 token leak. */ -GRPCAPI grpc_channel_credentials *grpc_google_default_credentials_create(void); +GRPCAPI grpc_channel_credentials* grpc_google_default_credentials_create(void); /** Callback for getting the SSL roots override from the application. In case of success, *pem_roots_certs must be set to a NULL terminated string @@ -121,7 +142,7 @@ GRPCAPI grpc_channel_credentials *grpc_google_default_credentials_create(void); If this function fails and GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment is set to a valid path, it will override the roots specified this func */ typedef grpc_ssl_roots_override_result (*grpc_ssl_roots_override_callback)( - char **pem_root_certs); + char** pem_root_certs); /** Setup a callback to override the default TLS/SSL roots. This function is not thread-safe and must be called at initialization time @@ -135,11 +156,11 @@ GRPCAPI void grpc_set_ssl_roots_override_callback( typedef struct { /** private_key is the NULL-terminated string containing the PEM encoding of the client's private key. */ - const char *private_key; + const char* private_key; /** cert_chain is the NULL-terminated string containing the PEM encoding of the client's certificate chain. */ - const char *cert_chain; + const char* cert_chain; } grpc_ssl_pem_key_cert_pair; /** Creates an SSL credentials object. @@ -153,9 +174,9 @@ typedef struct { - pem_key_cert_pair is a pointer on the object containing client's private key and certificate chain. This parameter can be NULL if the client does not have such a key/cert pair. */ -GRPCAPI grpc_channel_credentials *grpc_ssl_credentials_create( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, - void *reserved); +GRPCAPI grpc_channel_credentials* grpc_ssl_credentials_create( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair, + void* reserved); /** --- grpc_call_credentials object. @@ -167,35 +188,35 @@ typedef struct grpc_call_credentials grpc_call_credentials; /** Releases a call credentials object. The creator of the credentials object is responsible for its release. */ -GRPCAPI void grpc_call_credentials_release(grpc_call_credentials *creds); +GRPCAPI void grpc_call_credentials_release(grpc_call_credentials* creds); /** Creates a composite channel credentials object. */ -GRPCAPI grpc_channel_credentials *grpc_composite_channel_credentials_create( - grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, - void *reserved); +GRPCAPI grpc_channel_credentials* grpc_composite_channel_credentials_create( + grpc_channel_credentials* channel_creds, grpc_call_credentials* call_creds, + void* reserved); /** Creates a composite call credentials object. */ -GRPCAPI grpc_call_credentials *grpc_composite_call_credentials_create( - grpc_call_credentials *creds1, grpc_call_credentials *creds2, - void *reserved); +GRPCAPI grpc_call_credentials* grpc_composite_call_credentials_create( + grpc_call_credentials* creds1, grpc_call_credentials* creds2, + void* reserved); /** Creates a compute engine credentials object for connecting to Google. WARNING: Do NOT use this credentials to connect to a non-google service as this could result in an oauth2 token leak. */ -GRPCAPI grpc_call_credentials *grpc_google_compute_engine_credentials_create( - void *reserved); +GRPCAPI grpc_call_credentials* grpc_google_compute_engine_credentials_create( + void* reserved); -GRPCAPI gpr_timespec grpc_max_auth_token_lifetime(); +GRPCAPI gpr_timespec grpc_max_auth_token_lifetime(void); /** Creates a JWT credentials object. May return NULL if the input is invalid. - json_key is the JSON key string containing the client's private key. - token_lifetime is the lifetime of each Json Web Token (JWT) created with this credentials. It should not exceed grpc_max_auth_token_lifetime or will be cropped to this value. */ -GRPCAPI grpc_call_credentials * -grpc_service_account_jwt_access_credentials_create(const char *json_key, +GRPCAPI grpc_call_credentials* +grpc_service_account_jwt_access_credentials_create(const char* json_key, gpr_timespec token_lifetime, - void *reserved); + void* reserved); /** Creates an Oauth2 Refresh Token credentials object for connecting to Google. May return NULL if the input is invalid. @@ -203,18 +224,18 @@ grpc_service_account_jwt_access_credentials_create(const char *json_key, this could result in an oauth2 token leak. - json_refresh_token is the JSON string containing the refresh token itself along with a client_id and client_secret. */ -GRPCAPI grpc_call_credentials *grpc_google_refresh_token_credentials_create( - const char *json_refresh_token, void *reserved); +GRPCAPI grpc_call_credentials* grpc_google_refresh_token_credentials_create( + const char* json_refresh_token, void* reserved); /** Creates an Oauth2 Access Token credentials with an access token that was aquired by an out of band mechanism. */ -GRPCAPI grpc_call_credentials *grpc_access_token_credentials_create( - const char *access_token, void *reserved); +GRPCAPI grpc_call_credentials* grpc_access_token_credentials_create( + const char* access_token, void* reserved); /** Creates an IAM credentials object for connecting to Google. */ -GRPCAPI grpc_call_credentials *grpc_google_iam_credentials_create( - const char *authorization_token, const char *authority_selector, - void *reserved); +GRPCAPI grpc_call_credentials* grpc_google_iam_credentials_create( + const char* authorization_token, const char* authority_selector, + void* reserved); /** Callback function to be called by the metadata credentials plugin implementation when the metadata is ready. @@ -228,25 +249,25 @@ GRPCAPI grpc_call_credentials *grpc_google_iam_credentials_create( - error_details contains details about the error if any. In case of success it should be NULL and will be otherwise ignored. */ typedef void (*grpc_credentials_plugin_metadata_cb)( - void *user_data, const grpc_metadata *creds_md, size_t num_creds_md, - grpc_status_code status, const char *error_details); + void* user_data, const grpc_metadata* creds_md, size_t num_creds_md, + grpc_status_code status, const char* error_details); /** Context that can be used by metadata credentials plugin in order to create auth related metadata. */ typedef struct { /** The fully qualifed service url. */ - const char *service_url; + const char* service_url; /** The method name of the RPC being called (not fully qualified). The fully qualified method name can be built from the service_url: full_qualified_method_name = ctx->service_url + '/' + ctx->method_name. */ - const char *method_name; + const char* method_name; /** The auth_context of the channel which gives the server's identity. */ - const grpc_auth_context *channel_auth_context; + const grpc_auth_context* channel_auth_context; /** Reserved for future use. */ - void *reserved; + void* reserved; } grpc_auth_metadata_context; /** Maximum number of metadata entries returnable by a credentials plugin via @@ -278,32 +299,38 @@ typedef struct { \a context is the information that can be used by the plugin to create auth metadata. */ int (*get_metadata)( - void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data, + void* state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void* user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t *num_creds_md, grpc_status_code *status, - const char **error_details); + size_t* num_creds_md, grpc_status_code* status, + const char** error_details); /** Destroys the plugin state. */ - void (*destroy)(void *state); + void (*destroy)(void* state); /** State that will be set as the first parameter of the methods above. */ - void *state; + void* state; /** Type of credentials that this plugin is implementing. */ - const char *type; + const char* type; } grpc_metadata_credentials_plugin; /** Creates a credentials object from a plugin. */ -GRPCAPI grpc_call_credentials *grpc_metadata_credentials_create_from_plugin( - grpc_metadata_credentials_plugin plugin, void *reserved); +GRPCAPI grpc_call_credentials* grpc_metadata_credentials_create_from_plugin( + grpc_metadata_credentials_plugin plugin, void* reserved); /** --- Secure channel creation. --- */ -/** Creates a secure channel using the passed-in credentials. */ -GRPCAPI grpc_channel *grpc_secure_channel_create( - grpc_channel_credentials *creds, const char *target, - const grpc_channel_args *args, void *reserved); +/** Creates a secure channel using the passed-in credentials. Additional + channel level configuration MAY be provided by grpc_channel_args, though + the expectation is that most clients will want to simply pass NULL. The + user data in 'args' need only live through the invocation of this function. + However, if any args of the 'pointer' type are passed, then the referenced + vtable must be maintained by the caller until grpc_channel_destroy + terminates. See grpc_channel_args definition for more on this. */ +GRPCAPI grpc_channel* grpc_secure_channel_create( + grpc_channel_credentials* creds, const char* target, + const grpc_channel_args* args, void* reserved); /** --- grpc_server_credentials object. --- @@ -314,7 +341,44 @@ typedef struct grpc_server_credentials grpc_server_credentials; /** Releases a server_credentials object. The creator of the server_credentials object is responsible for its release. */ -GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds); +GRPCAPI void grpc_server_credentials_release(grpc_server_credentials* creds); + +/** Server certificate config object holds the server's public certificates and + associated private keys, as well as any CA certificates needed for client + certificate validation (if applicable). Create using + grpc_ssl_server_certificate_config_create(). */ +typedef struct grpc_ssl_server_certificate_config + grpc_ssl_server_certificate_config; + +/** Creates a grpc_ssl_server_certificate_config object. + - pem_roots_cert is the NULL-terminated string containing the PEM encoding of + the client root certificates. This parameter may be NULL if the server does + not want the client to be authenticated with SSL. + - pem_key_cert_pairs is an array private key / certificate chains of the + server. This parameter cannot be NULL. + - num_key_cert_pairs indicates the number of items in the private_key_files + and cert_chain_files parameters. It must be at least 1. + - It is the caller's responsibility to free this object via + grpc_ssl_server_certificate_config_destroy(). */ +GRPCAPI grpc_ssl_server_certificate_config* +grpc_ssl_server_certificate_config_create( + const char* pem_root_certs, + const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs); + +/** Destroys a grpc_ssl_server_certificate_config object. */ +GRPCAPI void grpc_ssl_server_certificate_config_destroy( + grpc_ssl_server_certificate_config* config); + +/** Callback to retrieve updated SSL server certificates, private keys, and + trusted CAs (for client authentication). + - user_data parameter, if not NULL, contains opaque data to be used by the + callback. + - Use grpc_ssl_server_certificate_config_create to create the config. + - The caller assumes ownership of the config. */ +typedef grpc_ssl_certificate_config_reload_status ( + *grpc_ssl_server_certificate_config_callback)( + void* user_data, grpc_ssl_server_certificate_config** config); /** Deprecated in favor of grpc_ssl_server_credentials_create_ex. Creates an SSL server_credentials object. @@ -328,34 +392,69 @@ GRPCAPI void grpc_server_credentials_release(grpc_server_credentials *creds); - force_client_auth, if set to non-zero will force the client to authenticate with an SSL cert. Note that this option is ignored if pem_root_certs is NULL. */ -GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, int force_client_auth, void *reserved); +GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, int force_client_auth, void* reserved); -/** Same as grpc_ssl_server_credentials_create method except uses +/** Deprecated in favor of grpc_ssl_server_credentials_create_with_options. + Same as grpc_ssl_server_credentials_create method except uses grpc_ssl_client_certificate_request_type enum to support more ways to authenticate client cerificates.*/ -GRPCAPI grpc_server_credentials *grpc_ssl_server_credentials_create_ex( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, +GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create_ex( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, size_t num_key_cert_pairs, grpc_ssl_client_certificate_request_type client_certificate_request, - void *reserved); + void* reserved); + +typedef struct grpc_ssl_server_credentials_options + grpc_ssl_server_credentials_options; + +/** Creates an options object using a certificate config. Use this method when + the certificates and keys of the SSL server will not change during the + server's lifetime. + - Takes ownership of the certificate_config parameter. */ +GRPCAPI grpc_ssl_server_credentials_options* +grpc_ssl_server_credentials_create_options_using_config( + grpc_ssl_client_certificate_request_type client_certificate_request, + grpc_ssl_server_certificate_config* certificate_config); + +/** Creates an options object using a certificate config fetcher. Use this + method to reload the certificates and keys of the SSL server without + interrupting the operation of the server. Initial certificate config will be + fetched during server initialization. + - user_data parameter, if not NULL, contains opaque data which will be passed + to the fetcher (see definition of + grpc_ssl_server_certificate_config_callback). */ +GRPCAPI grpc_ssl_server_credentials_options* +grpc_ssl_server_credentials_create_options_using_config_fetcher( + grpc_ssl_client_certificate_request_type client_certificate_request, + grpc_ssl_server_certificate_config_callback cb, void* user_data); + +/** Destroys a grpc_ssl_server_credentials_options object. */ +GRPCAPI void grpc_ssl_server_credentials_options_destroy( + grpc_ssl_server_credentials_options* options); + +/** Creates an SSL server_credentials object using the provided options struct. + - Takes ownership of the options parameter. */ +GRPCAPI grpc_server_credentials* +grpc_ssl_server_credentials_create_with_options( + grpc_ssl_server_credentials_options* options); /** --- Server-side secure ports. --- */ /** Add a HTTP2 over an encrypted link over tcp listener. Returns bound port number on success, 0 on failure. REQUIRES: server not started */ -GRPCAPI int grpc_server_add_secure_http2_port(grpc_server *server, - const char *addr, - grpc_server_credentials *creds); +GRPCAPI int grpc_server_add_secure_http2_port(grpc_server* server, + const char* addr, + grpc_server_credentials* creds); /** --- Call specific credentials. --- */ /** Sets a credentials to a call. Can only be called on the client side before grpc_call_start_batch. */ -GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call *call, - grpc_call_credentials *creds); +GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call* call, + grpc_call_credentials* creds); /** --- Auth Metadata Processing --- */ @@ -369,9 +468,9 @@ GRPCAPI grpc_call_error grpc_call_set_credentials(grpc_call *call, GRPC_STATUS PERMISSION_DENIED in case of an authorization failure. - error_details gives details about the error. May be NULL. */ typedef void (*grpc_process_auth_metadata_done_cb)( - void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md, - const grpc_metadata *response_md, size_t num_response_md, - grpc_status_code status, const char *error_details); + void* user_data, const grpc_metadata* consumed_md, size_t num_consumed_md, + const grpc_metadata* response_md, size_t num_response_md, + grpc_status_code status, const char* error_details); /** Pluggable server-side metadata processor object. */ typedef struct { @@ -379,15 +478,85 @@ typedef struct { channel peer and it is the job of the process function to augment it with properties derived from the passed-in metadata. The lifetime of these objects is guaranteed until cb is invoked. */ - void (*process)(void *state, grpc_auth_context *context, - const grpc_metadata *md, size_t num_md, - grpc_process_auth_metadata_done_cb cb, void *user_data); - void (*destroy)(void *state); - void *state; + void (*process)(void* state, grpc_auth_context* context, + const grpc_metadata* md, size_t num_md, + grpc_process_auth_metadata_done_cb cb, void* user_data); + void (*destroy)(void* state); + void* state; } grpc_auth_metadata_processor; GRPCAPI void grpc_server_credentials_set_auth_metadata_processor( - grpc_server_credentials *creds, grpc_auth_metadata_processor processor); + grpc_server_credentials* creds, grpc_auth_metadata_processor processor); + +/** --- ALTS channel/server credentials --- **/ + +/** + * Main interface for ALTS credentials options. The options will contain + * information that will be passed from grpc to TSI layer such as RPC protocol + * versions. ALTS client (channel) and server credentials will have their own + * implementation of this interface. The APIs listed in this header are + * thread-compatible. It is used for experimental purpose for now and subject + * to change. + */ +typedef struct grpc_alts_credentials_options grpc_alts_credentials_options; + +/** + * This method creates a grpc ALTS credentials client options instance. + * It is used for experimental purpose for now and subject to change. + */ +GRPCAPI grpc_alts_credentials_options* +grpc_alts_credentials_client_options_create(); + +/** + * This method creates a grpc ALTS credentials server options instance. + * It is used for experimental purpose for now and subject to change. + */ +GRPCAPI grpc_alts_credentials_options* +grpc_alts_credentials_server_options_create(); + +/** + * This method adds a target service account to grpc client's ALTS credentials + * options instance. It is used for experimental purpose for now and subject + * to change. + * + * - options: grpc ALTS credentials options instance. + * - service_account: service account of target endpoint. + */ +GRPCAPI void grpc_alts_credentials_client_options_add_target_service_account( + grpc_alts_credentials_options* options, const char* service_account); + +/** + * This method destroys a grpc_alts_credentials_options instance by + * de-allocating all of its occupied memory. It is used for experimental purpose + * for now and subject to change. + * + * - options: a grpc_alts_credentials_options instance that needs to be + * destroyed. + */ +GRPCAPI void grpc_alts_credentials_options_destroy( + grpc_alts_credentials_options* options); + +/** + * This method creates an ALTS channel credential object. It is used for + * experimental purpose for now and subject to change. + * + * - options: grpc ALTS credentials options instance for client. + * + * It returns the created ALTS channel credential object. + */ +GRPCAPI grpc_channel_credentials* grpc_alts_credentials_create( + const grpc_alts_credentials_options* options); + +/** + * This method creates an ALTS server credential object. It is used for + * experimental purpose for now and subject to change. + * + * - options: grpc ALTS credentials options instance for server. + * + * It returns the created ALTS server credential object. + */ +GRPCAPI grpc_server_credentials* grpc_alts_server_credentials_create( + const grpc_alts_credentials_options* options); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/grpc_security_constants.h b/Sources/CgRPC/include/grpc/grpc_security_constants.h index fde300dfb..92580ea35 100644 --- a/Sources/CgRPC/include/grpc/grpc_security_constants.h +++ b/Sources/CgRPC/include/grpc/grpc_security_constants.h @@ -29,6 +29,7 @@ extern "C" { #define GRPC_X509_CN_PROPERTY_NAME "x509_common_name" #define GRPC_X509_SAN_PROPERTY_NAME "x509_subject_alternative_name" #define GRPC_X509_PEM_CERT_PROPERTY_NAME "x509_pem_cert" +#define GRPC_SSL_SESSION_REUSED_PROPERTY "ssl_session_reused" /** Environment variable that points to the default SSL roots file. This file must be a PEM encoded file with all the roots such as the one that can be @@ -48,6 +49,13 @@ typedef enum { GRPC_SSL_ROOTS_OVERRIDE_FAIL } grpc_ssl_roots_override_result; +/** Callback results for dynamically loading a SSL certificate config. */ +typedef enum { + GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED, + GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW, + GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL +} grpc_ssl_certificate_config_reload_status; + typedef enum { /** Server does not request client certificate. A client can present a self signed or signed certificates if it wishes to do so and they would be diff --git a/Sources/CgRPC/include/grpc/impl/codegen/atm.h b/Sources/CgRPC/include/grpc/impl/codegen/atm.h index 764bee527..00d83f060 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/atm.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/atm.h @@ -79,9 +79,17 @@ #error could not determine platform for atm #endif +#ifdef __cplusplus +extern "C" { +#endif + /** Adds \a delta to \a *value, clamping the result to the range specified by \a min and \a max. Returns the new value. */ -gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta, +gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm* value, gpr_atm delta, gpr_atm min, gpr_atm max); +#ifdef __cplusplus +} +#endif + #endif /* GRPC_IMPL_CODEGEN_ATM_H */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_atomic.h b/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_atomic.h index 1793ec22b..587970854 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_atomic.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_atomic.h @@ -23,8 +23,13 @@ __atomic_* interface. */ #include +#ifdef __cplusplus +extern "C" { +#endif + typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #ifdef GPR_LOW_LEVEL_COUNTERS extern gpr_atm gpr_counter_atm_cas; @@ -56,22 +61,22 @@ extern gpr_atm gpr_counter_atm_add; GPR_ATM_INC_ADD_THEN( \ __atomic_fetch_add((p), (intptr_t)(delta), __ATOMIC_ACQ_REL)) -static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_no_barrier_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n( p, &o, n, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)); } -static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_acq_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n( p, &o, n, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)); } -static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_rel_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n( p, &o, n, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED)); } -static __inline int gpr_atm_full_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_full_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { return GPR_ATM_INC_CAS_THEN(__atomic_compare_exchange_n( p, &o, n, 0, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)); } @@ -79,4 +84,8 @@ static __inline int gpr_atm_full_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { #define gpr_atm_full_xchg(p, n) \ GPR_ATM_INC_CAS_THEN(__atomic_exchange_n((p), (n), __ATOMIC_ACQ_REL)) +#ifdef __cplusplus +} +#endif + #endif /* GRPC_IMPL_CODEGEN_ATM_GCC_ATOMIC_H */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_sync.h b/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_sync.h index 27ae0f63d..c0010a346 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_sync.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/atm_gcc_sync.h @@ -25,6 +25,7 @@ typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #define GPR_ATM_COMPILE_BARRIER_() __asm__ __volatile__("" : : : "memory") @@ -37,24 +38,24 @@ typedef intptr_t gpr_atm; #define gpr_atm_full_barrier() (__sync_synchronize()) -static __inline gpr_atm gpr_atm_acq_load(const gpr_atm *p) { +static __inline gpr_atm gpr_atm_acq_load(const gpr_atm* p) { gpr_atm value = *p; GPR_ATM_LS_BARRIER_(); return value; } -static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm *p) { +static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm* p) { gpr_atm value = *p; GPR_ATM_COMPILE_BARRIER_(); return value; } -static __inline void gpr_atm_rel_store(gpr_atm *p, gpr_atm value) { +static __inline void gpr_atm_rel_store(gpr_atm* p, gpr_atm value) { GPR_ATM_LS_BARRIER_(); *p = value; } -static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) { +static __inline void gpr_atm_no_barrier_store(gpr_atm* p, gpr_atm value) { GPR_ATM_COMPILE_BARRIER_(); *p = value; } @@ -71,7 +72,7 @@ static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) { #define gpr_atm_rel_cas(p, o, n) gpr_atm_acq_cas((p), (o), (n)) #define gpr_atm_full_cas(p, o, n) gpr_atm_acq_cas((p), (o), (n)) -static __inline gpr_atm gpr_atm_full_xchg(gpr_atm *p, gpr_atm n) { +static __inline gpr_atm gpr_atm_full_xchg(gpr_atm* p, gpr_atm n) { gpr_atm cur; do { cur = gpr_atm_acq_load(p); diff --git a/Sources/CgRPC/include/grpc/impl/codegen/atm_windows.h b/Sources/CgRPC/include/grpc/impl/codegen/atm_windows.h index dfcaa4cc3..f6b27e5df 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/atm_windows.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/atm_windows.h @@ -24,73 +24,74 @@ typedef intptr_t gpr_atm; #define GPR_ATM_MAX INTPTR_MAX +#define GPR_ATM_MIN INTPTR_MIN #define gpr_atm_full_barrier MemoryBarrier -static __inline gpr_atm gpr_atm_acq_load(const gpr_atm *p) { +static __inline gpr_atm gpr_atm_acq_load(const gpr_atm* p) { gpr_atm result = *p; gpr_atm_full_barrier(); return result; } -static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm *p) { +static __inline gpr_atm gpr_atm_no_barrier_load(const gpr_atm* p) { /* TODO(dklempner): Can we implement something better here? */ return gpr_atm_acq_load(p); } -static __inline void gpr_atm_rel_store(gpr_atm *p, gpr_atm value) { +static __inline void gpr_atm_rel_store(gpr_atm* p, gpr_atm value) { gpr_atm_full_barrier(); *p = value; } -static __inline void gpr_atm_no_barrier_store(gpr_atm *p, gpr_atm value) { +static __inline void gpr_atm_no_barrier_store(gpr_atm* p, gpr_atm value) { /* TODO(ctiller): Can we implement something better here? */ gpr_atm_rel_store(p, value); } -static __inline int gpr_atm_no_barrier_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_no_barrier_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { /** InterlockedCompareExchangePointerNoFence() not available on vista or windows7 */ #ifdef GPR_ARCH_64 return o == (gpr_atm)InterlockedCompareExchangeAcquire64( - (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o); + (volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o); #else - return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p, + return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG*)p, (LONG)n, (LONG)o); #endif } -static __inline int gpr_atm_acq_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_acq_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { #ifdef GPR_ARCH_64 return o == (gpr_atm)InterlockedCompareExchangeAcquire64( - (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o); + (volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o); #else - return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG *)p, + return o == (gpr_atm)InterlockedCompareExchangeAcquire((volatile LONG*)p, (LONG)n, (LONG)o); #endif } -static __inline int gpr_atm_rel_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_rel_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { #ifdef GPR_ARCH_64 return o == (gpr_atm)InterlockedCompareExchangeRelease64( - (volatile LONGLONG *)p, (LONGLONG)n, (LONGLONG)o); + (volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o); #else - return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG *)p, + return o == (gpr_atm)InterlockedCompareExchangeRelease((volatile LONG*)p, (LONG)n, (LONG)o); #endif } -static __inline int gpr_atm_full_cas(gpr_atm *p, gpr_atm o, gpr_atm n) { +static __inline int gpr_atm_full_cas(gpr_atm* p, gpr_atm o, gpr_atm n) { #ifdef GPR_ARCH_64 - return o == (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p, + return o == (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG*)p, (LONGLONG)n, (LONGLONG)o); #else - return o == (gpr_atm)InterlockedCompareExchange((volatile LONG *)p, (LONG)n, + return o == (gpr_atm)InterlockedCompareExchange((volatile LONG*)p, (LONG)n, (LONG)o); #endif } -static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm *p, +static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm* p, gpr_atm delta) { /** Use the CAS operation to get pointer-sized fetch and add */ gpr_atm old; @@ -100,26 +101,26 @@ static __inline gpr_atm gpr_atm_no_barrier_fetch_add(gpr_atm *p, return old; } -static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm *p, gpr_atm delta) { +static __inline gpr_atm gpr_atm_full_fetch_add(gpr_atm* p, gpr_atm delta) { /** Use a CAS operation to get pointer-sized fetch and add */ gpr_atm old; #ifdef GPR_ARCH_64 do { old = *p; - } while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG *)p, + } while (old != (gpr_atm)InterlockedCompareExchange64((volatile LONGLONG*)p, (LONGLONG)old + delta, (LONGLONG)old)); #else do { old = *p; } while (old != (gpr_atm)InterlockedCompareExchange( - (volatile LONG *)p, (LONG)old + delta, (LONG)old)); + (volatile LONG*)p, (LONG)old + delta, (LONG)old)); #endif return old; } -static __inline gpr_atm gpr_atm_full_xchg(gpr_atm *p, gpr_atm n) { - return (gpr_atm)InterlockedExchangePointer((PVOID *)p, (PVOID)n); +static __inline gpr_atm gpr_atm_full_xchg(gpr_atm* p, gpr_atm n) { + return (gpr_atm)InterlockedExchangePointer((PVOID*)p, (PVOID)n); } #endif /* GRPC_IMPL_CODEGEN_ATM_WINDOWS_H */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer.h b/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer.h index fc3330571..774655ed6 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer.h @@ -19,6 +19,8 @@ #ifndef GRPC_IMPL_CODEGEN_BYTE_BUFFER_H #define GRPC_IMPL_CODEGEN_BYTE_BUFFER_H +#include + #include #ifdef __cplusplus @@ -29,7 +31,7 @@ extern "C" { * * Increases the reference count for all \a slices processed. The user is * responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/ -GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices, +GRPCAPI grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slices, size_t nslices); /** Returns a *compressed* RAW byte buffer instance over the given slices (up to @@ -38,20 +40,20 @@ GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices, * * Increases the reference count for all \a slices processed. The user is * responsible for invoking grpc_byte_buffer_destroy on the returned instance.*/ -GRPCAPI grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create( - grpc_slice *slices, size_t nslices, grpc_compression_algorithm compression); +GRPCAPI grpc_byte_buffer* grpc_raw_compressed_byte_buffer_create( + grpc_slice* slices, size_t nslices, grpc_compression_algorithm compression); /** Copies input byte buffer \a bb. * * Increases the reference count of all the source slices. The user is * responsible for calling grpc_byte_buffer_destroy over the returned copy. */ -GRPCAPI grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb); +GRPCAPI grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb); /** Returns the size of the given byte buffer, in bytes. */ -GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer *bb); +GRPCAPI size_t grpc_byte_buffer_length(grpc_byte_buffer* bb); /** Destroys \a byte_buffer deallocating all its memory. */ -GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer); +GRPCAPI void grpc_byte_buffer_destroy(grpc_byte_buffer* byte_buffer); /** Reader for byte buffers. Iterates over slices in the byte buffer */ struct grpc_byte_buffer_reader; @@ -59,25 +61,25 @@ typedef struct grpc_byte_buffer_reader grpc_byte_buffer_reader; /** Initialize \a reader to read over \a buffer. * Returns 1 upon success, 0 otherwise. */ -GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer); +GRPCAPI int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer); /** Cleanup and destroy \a reader */ -GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader); +GRPCAPI void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader); /** Updates \a slice with the next piece of data from from \a reader and returns * 1. Returns 0 at the end of the stream. Caller is responsible for calling * grpc_slice_unref on the result. */ -GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, - grpc_slice *slice); +GRPCAPI int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, + grpc_slice* slice); /** Merge all data from \a reader into single slice */ GRPCAPI grpc_slice -grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader); +grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader* reader); /** Returns a RAW byte buffer instance from the output of \a reader. */ -GRPCAPI grpc_byte_buffer *grpc_raw_byte_buffer_from_reader( - grpc_byte_buffer_reader *reader); +GRPCAPI grpc_byte_buffer* grpc_raw_byte_buffer_from_reader( + grpc_byte_buffer_reader* reader); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer_reader.h b/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer_reader.h index dc0f15496..e06e19558 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer_reader.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/byte_buffer_reader.h @@ -26,8 +26,8 @@ extern "C" { struct grpc_byte_buffer; struct grpc_byte_buffer_reader { - struct grpc_byte_buffer *buffer_in; - struct grpc_byte_buffer *buffer_out; + struct grpc_byte_buffer* buffer_in; + struct grpc_byte_buffer* buffer_out; /** Different current objects correspond to different types of byte buffers */ union grpc_byte_buffer_reader_current { /** Index into a slice buffer's array of slices */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/compression_types.h b/Sources/CgRPC/include/grpc/impl/codegen/compression_types.h index 4419e2a44..e35d89296 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/compression_types.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/compression_types.h @@ -29,11 +29,6 @@ extern "C" { * algorithm */ #define GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY \ "grpc-internal-encoding-request" -/** To be used as initial metadata key for the request of a concrete stream - * compression - * algorithm */ -#define GRPC_STREAM_COMPRESSION_REQUEST_ALGORITHM_MD_KEY \ - "grpc-internal-stream-encoding-request" /** To be used in channel arguments. * @@ -43,17 +38,9 @@ extern "C" { * Its value is an int from the \a grpc_compression_algorithm enum. */ #define GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \ "grpc.default_compression_algorithm" -/** Default stream compression algorithm for the channel. - * Its value is an int from the \a grpc_stream_compression_algorithm enum. */ -#define GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \ - "grpc.default_stream_compression_algorithm" /** Default compression level for the channel. * Its value is an int from the \a grpc_compression_level enum. */ #define GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL "grpc.default_compression_level" -/** Default stream compression level for the channel. - * Its value is an int from the \a grpc_stream_compression_level enum. */ -#define GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_LEVEL \ - "grpc.default_stream_compression_level" /** Compression algorithms supported by the channel. * Its value is a bitset (an int). Bits correspond to algorithms in \a * grpc_compression_algorithm. For example, its LSB corresponds to @@ -63,15 +50,6 @@ extern "C" { * be ignored). */ #define GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET \ "grpc.compression_enabled_algorithms_bitset" -/** Stream compression algorithms supported by the channel. - * Its value is a bitset (an int). Bits correspond to algorithms in \a - * grpc_stream_compression_algorithm. For example, its LSB corresponds to - * GRPC_STREAM_COMPRESS_NONE, the next bit to GRPC_STREAM_COMPRESS_DEFLATE, etc. - * Unset bits disable support for the algorithm. By default all algorithms are - * supported. It's not possible to disable GRPC_STREAM_COMPRESS_NONE (the - * attempt will be ignored). */ -#define GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET \ - "grpc.stream_compression_enabled_algorithms_bitset" /** \} */ /** The various compression algorithms supported by gRPC */ @@ -79,17 +57,12 @@ typedef enum { GRPC_COMPRESS_NONE = 0, GRPC_COMPRESS_DEFLATE, GRPC_COMPRESS_GZIP, + /* EXPERIMENTAL: Stream compression is currently experimental. */ + GRPC_COMPRESS_STREAM_GZIP, /* TODO(ctiller): snappy */ GRPC_COMPRESS_ALGORITHMS_COUNT } grpc_compression_algorithm; -/** Stream compresssion algorithms supported by gRPC */ -typedef enum { - GRPC_STREAM_COMPRESS_NONE = 0, - GRPC_STREAM_COMPRESS_GZIP, - GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT -} grpc_stream_compression_algorithm; - /** Compression levels allow a party with knowledge of its peer's accepted * encodings to request compression in an abstract way. The level-algorithm * mapping is performed internally and depends on the peer's supported @@ -102,41 +75,22 @@ typedef enum { GRPC_COMPRESS_LEVEL_COUNT } grpc_compression_level; -/** Compression levels for stream compression algorithms */ -typedef enum { - GRPC_STREAM_COMPRESS_LEVEL_NONE = 0, - GRPC_STREAM_COMPRESS_LEVEL_LOW, - GRPC_STREAM_COMPRESS_LEVEL_MED, - GRPC_STREAM_COMPRESS_LEVEL_HIGH, - GRPC_STREAM_COMPRESS_LEVEL_COUNT -} grpc_stream_compression_level; - typedef struct grpc_compression_options { /** All algs are enabled by default. This option corresponds to the channel * argument key behind \a GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET */ uint32_t enabled_algorithms_bitset; - uint32_t enabled_stream_compression_algorithms_bitset; - /** The default message-wise compression level. It'll be used in the absence - * of * call specific settings. This option corresponds to the channel + /** The default compression level. It'll be used in the absence of call + * specific settings. This option corresponds to the channel * argument key behind \a GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL. If present, - * takes precedence over \a default_algorithm and \a - * default_stream_compression_algorithm. + * takes precedence over \a default_algorithm. * TODO(dgq): currently only available for server channels. */ struct grpc_compression_options_default_level { int is_set; grpc_compression_level level; } default_level; - /** The default stream compression level. It'll be used in the absence of call - * specefic settings. If present, takes precedence over \a default_level, - * \a default_algorithm and \a default_stream_compression_algorithm. */ - struct grpc_stream_compression_options_default_level { - int is_set; - grpc_stream_compression_level level; - } default_stream_compression_level; - /** The default message compression algorithm. It'll be used in the absence of * call specific settings. This option corresponds to the channel argument key * behind \a GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM. */ @@ -144,17 +98,6 @@ typedef struct grpc_compression_options { int is_set; grpc_compression_algorithm algorithm; } default_algorithm; - - /** The default stream compression algorithm. It'll be used in the absence of - * call specific settings. If present, takes precedence over \a - * default_algorithm. This option corresponds to the channel - * argument key behind \a GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM. - */ - struct grpc_stream_compression_options_default_algorithm { - int is_set; - grpc_stream_compression_algorithm algorithm; - } default_stream_compression_algorithm; - } grpc_compression_options; #ifdef __cplusplus diff --git a/Sources/CgRPC/include/grpc/impl/codegen/connectivity_state.h b/Sources/CgRPC/include/grpc/impl/codegen/connectivity_state.h index 545b4fdbc..b70dbef35 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/connectivity_state.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/connectivity_state.h @@ -25,8 +25,6 @@ extern "C" { /** Connectivity state of a channel. */ typedef enum { - /** channel has just been initialized */ - GRPC_CHANNEL_INIT = -1, /** channel is idle */ GRPC_CHANNEL_IDLE, /** channel is connecting */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/exec_ctx_fwd.h b/Sources/CgRPC/include/grpc/impl/codegen/exec_ctx_fwd.h deleted file mode 100644 index 005ff14e7..000000000 --- a/Sources/CgRPC/include/grpc/impl/codegen/exec_ctx_fwd.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_IMPL_CODEGEN_EXEC_CTX_FWD_H -#define GRPC_IMPL_CODEGEN_EXEC_CTX_FWD_H - -/* forward declaration for exec_ctx.h */ -struct grpc_exec_ctx; -typedef struct grpc_exec_ctx grpc_exec_ctx; - -#endif /* GRPC_IMPL_CODEGEN_EXEC_CTX_FWD_H */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/grpc_types.h b/Sources/CgRPC/include/grpc/impl/codegen/grpc_types.h index 877bf9278..022be5fec 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/grpc_types.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/grpc_types.h @@ -22,7 +22,6 @@ #include #include -#include #include #include #include @@ -39,11 +38,11 @@ typedef enum { } grpc_byte_buffer_type; typedef struct grpc_byte_buffer { - void *reserved; + void* reserved; grpc_byte_buffer_type type; union grpc_byte_buffer_data { struct /* internal */ { - void *reserved[8]; + void* reserved[8]; } reserved; struct grpc_compressed_buffer { grpc_compression_algorithm compression; @@ -84,9 +83,9 @@ typedef enum { } grpc_arg_type; typedef struct grpc_arg_pointer_vtable { - void *(*copy)(void *p); - void (*destroy)(grpc_exec_ctx *exec_ctx, void *p); - int (*cmp)(void *p, void *q); + void* (*copy)(void* p); + void (*destroy)(void* p); + int (*cmp)(void* p, void* q); } grpc_arg_pointer_vtable; /** A single argument... each argument has a key and a value @@ -103,13 +102,13 @@ typedef struct grpc_arg_pointer_vtable { their keys so that it's possible to change them in the future. */ typedef struct { grpc_arg_type type; - char *key; + char* key; union grpc_arg_value { - char *string; + char* string; int integer; struct grpc_arg_pointer { - void *p; - const grpc_arg_pointer_vtable *vtable; + void* p; + const grpc_arg_pointer_vtable* vtable; } pointer; } value; } grpc_arg; @@ -120,14 +119,19 @@ typedef struct { These configuration options are modelled as key-value pairs as defined by grpc_arg; keys are strings to allow easy backwards-compatible extension by arbitrary parties. All evaluation is performed at channel creation - time (i.e. the values in this structure need only live through the + time (i.e. the keys and values in this structure need only live through the creation invocation). + However, if one of the args has grpc_arg_type==GRPC_ARG_POINTER, then the + grpc_arg_pointer_vtable must live until the channel args are done being + used by core (i.e. when the object for use with which they were passed + is destroyed). + See the description of the \ref grpc_arg_keys "available args" for more details. */ typedef struct { size_t num_args; - grpc_arg *args; + grpc_arg* args; } grpc_channel_args; /** \defgroup grpc_arg_keys @@ -240,6 +244,9 @@ typedef struct { /** The time between the first and second connection attempts, in ms */ #define GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS \ "grpc.initial_reconnect_backoff_ms" +/** Minimum amount of time between DNS resolutions, in ms */ +#define GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS \ + "grpc.dns_min_time_between_resolutions_ms" /** The timeout used on servers for finishing handshaking on an incoming connection. Defaults to 120 seconds. */ #define GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS "grpc.server_handshake_timeout_ms" @@ -251,6 +258,10 @@ typedef struct { secure channel is an SSL channel). If this parameter is specified and the underlying is not an SSL channel, it will just be ignored. */ #define GRPC_SSL_TARGET_NAME_OVERRIDE_ARG "grpc.ssl_target_name_override" +/** If non-zero, a pointer to a session cache (a pointer of type + grpc_ssl_session_cache*). (use grpc_ssl_session_cache_arg_vtable() to fetch + an appropriate pointer arg vtable) */ +#define GRPC_SSL_SESSION_CACHE_ARG "grpc.ssl_session_cache" /** Maximum metadata size, in bytes. Note this limit applies to the max sum of all metadata key-value entries in a batch of headers. */ #define GRPC_ARG_MAX_METADATA_SIZE "grpc.max_metadata_size" @@ -274,6 +285,10 @@ typedef struct { #define GRPC_ARG_SOCKET_MUTATOR "grpc.socket_mutator" /** The grpc_socket_factory instance to create and bind sockets. A pointer. */ #define GRPC_ARG_SOCKET_FACTORY "grpc.socket_factory" +/** The maximum number of trace events to keep in the tracer for each channel or + * subchannel. The default is 10. If set to 0, channel tracing is disabled. */ +#define GRPC_ARG_MAX_CHANNEL_TRACE_EVENTS_PER_NODE \ + "grpc.max_channel_trace_events_per_node" /** If non-zero, Cronet transport will coalesce packets to fewer frames * when possible. */ #define GRPC_ARG_USE_CRONET_PACKET_COALESCING \ @@ -294,7 +309,7 @@ typedef struct { #define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms" /* Timeout in milliseconds to wait for the serverlist from the grpclb load balancer before using fallback backend addresses from the resolver. - If 0, fallback will never be used. */ + If 0, fallback will never be used. Default value is 10000. */ #define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms" /** If non-zero, grpc server's cronet compression workaround will be enabled */ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ @@ -307,6 +322,20 @@ typedef struct { Defaults to "blend". In the current implementation "blend" is equivalent to "latency". */ #define GRPC_ARG_OPTIMIZATION_TARGET "grpc.optimization_target" +/** If set to zero, disables retry behavior. Otherwise, transparent retries + are enabled for all RPCs, and configurable retries are enabled when they + are configured via the service config. For details, see: + https://github.com/grpc/proposal/blob/master/A6-client-retries.md + */ +#define GRPC_ARG_ENABLE_RETRIES "grpc.enable_retries" +/** Per-RPC retry buffer size, in bytes. Default is 256 KiB. */ +#define GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE "grpc.per_rpc_retry_buffer_size" +/** Channel arg that carries the bridged objective c object for custom metrics + * logging filter. */ +#define GRPC_ARG_MOBILE_LOG_CONFIG "grpc.mobile_log_config" +/** If non-zero, client authority filter is disabled for the channel */ +#define GRPC_ARG_DISABLE_CLIENT_AUTHORITY_FILTER \ + "grpc.disable_client_authority_filter" /** \} */ /** Result of a grpc call. If the caller satisfies the prerequisites of a @@ -403,7 +432,7 @@ typedef struct grpc_metadata { There is no need to initialize them, and they will be set to garbage during calls to grpc. */ struct /* internal */ { - void *obfuscated[4]; + void* obfuscated[4]; } internal_data; } grpc_metadata; @@ -431,13 +460,13 @@ typedef struct grpc_event { int success; /** The tag passed to grpc_call_start_batch etc to start this operation. Only GRPC_OP_COMPLETE has a tag. */ - void *tag; + void* tag; } grpc_event; typedef struct { size_t count; size_t capacity; - grpc_metadata *metadata; + grpc_metadata* metadata; } grpc_metadata_array; typedef struct { @@ -445,7 +474,7 @@ typedef struct { grpc_slice host; gpr_timespec deadline; uint32_t flags; - void *reserved; + void* reserved; } grpc_call_details; typedef enum { @@ -501,25 +530,21 @@ typedef struct grpc_op { /** Write flags bitset for grpc_begin_messages */ uint32_t flags; /** Reserved for future usage */ - void *reserved; + void* reserved; union grpc_op_data { /** Reserved for future usage */ struct /* internal */ { - void *reserved[8]; + void* reserved[8]; } reserved; struct grpc_op_send_initial_metadata { size_t count; - grpc_metadata *metadata; + grpc_metadata* metadata; /** If \a is_set, \a compression_level will be used for the call. * Otherwise, \a compression_level won't be considered */ struct grpc_op_send_initial_metadata_maybe_compression_level { uint8_t is_set; grpc_compression_level level; } maybe_compression_level; - struct grpc_op_send_initial_metadata_maybe_stream_compression_level { - uint8_t is_set; - grpc_stream_compression_level level; - } maybe_stream_compression_level; } send_initial_metadata; struct grpc_op_send_message { /** This op takes ownership of the slices in send_message. After @@ -527,16 +552,16 @@ typedef struct grpc_op { * and likely empty. The original owner should still call * grpc_byte_buffer_destroy() on this object however. */ - struct grpc_byte_buffer *send_message; + struct grpc_byte_buffer* send_message; } send_message; struct grpc_op_send_status_from_server { size_t trailing_metadata_count; - grpc_metadata *trailing_metadata; + grpc_metadata* trailing_metadata; grpc_status_code status; /** optional: set to NULL if no details need sending, non-NULL if they do * pointer will not be retained past the start_batch call */ - grpc_slice *status_details; + grpc_slice* status_details; } send_status_from_server; /** ownership of the array is with the caller, but ownership of the elements stays with the call object (ie key, value members are owned by the call @@ -544,13 +569,15 @@ typedef struct grpc_op { After the operation completes, call grpc_metadata_array_destroy on this value, or reuse it in a future op. */ struct grpc_op_recv_initial_metadata { - grpc_metadata_array *recv_initial_metadata; + grpc_metadata_array* recv_initial_metadata; } recv_initial_metadata; /** ownership of the byte buffer is moved to the caller; the caller must call grpc_byte_buffer_destroy on this value, or reuse it in a future op. + The returned byte buffer will be NULL if trailing metadata was + received instead of a message. */ struct grpc_op_recv_message { - struct grpc_byte_buffer **recv_message; + struct grpc_byte_buffer** recv_message; } recv_message; struct grpc_op_recv_status_on_client { /** ownership of the array is with the caller, but ownership of the @@ -558,14 +585,18 @@ typedef struct grpc_op { by the call object, trailing_metadata->array is owned by the caller). After the operation completes, call grpc_metadata_array_destroy on this value, or reuse it in a future op. */ - grpc_metadata_array *trailing_metadata; - grpc_status_code *status; - grpc_slice *status_details; + grpc_metadata_array* trailing_metadata; + grpc_status_code* status; + grpc_slice* status_details; + /** If this is not nullptr, it will be populated with the full fidelity + * error string for debugging purposes. The application is responsible + * for freeing the data by using gpr_free(). */ + const char** error_string; } recv_status_on_client; struct grpc_op_recv_close_on_server { /** out argument, set to 1 if the call failed in any way (seen as a cancellation on the server), or 0 if the call succeeded */ - int *cancelled; + int* cancelled; } recv_close_on_server; } data; } grpc_op; @@ -574,10 +605,10 @@ typedef struct grpc_op { typedef struct { /** If non-NULL, will be set to point to a string indicating the LB * policy name. Caller takes ownership. */ - char **lb_policy_name; + char** lb_policy_name; /** If non-NULL, will be set to point to a string containing the * service config used by the channel in JSON form. */ - char **service_config_json; + char** service_config_json; } grpc_channel_info; typedef struct grpc_resource_quota grpc_resource_quota; diff --git a/Sources/CgRPC/include/grpc/impl/codegen/port_platform.h b/Sources/CgRPC/include/grpc/impl/codegen/port_platform.h index 472690156..3cbc45ce2 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/port_platform.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/port_platform.h @@ -173,6 +173,7 @@ #endif /* _LP64 */ #ifdef __GLIBC__ #define GPR_POSIX_CRASH_HANDLER 1 +#define GPR_LINUX_PTHREAD_NAME 1 #else /* musl libc */ #define GPR_MUSL_LIBC_COMPAT 1 #endif @@ -194,11 +195,25 @@ #define GPR_PTHREAD_TLS 1 #else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */ #define GPR_CPU_POSIX 1 +/* TODO(vjpai): there is a reported issue in bazel build for Mac where __thread + in a header is currently not working (bazelbuild/bazel#4341). Remove + the following conditional and use GPR_GCC_TLS when that is fixed */ +#ifndef GRPC_BAZEL_BUILD #define GPR_GCC_TLS 1 +#else /* GRPC_BAZEL_BUILD */ +#define GPR_PTHREAD_TLS 1 +#endif /* GRPC_BAZEL_BUILD */ +#define GPR_APPLE_PTHREAD_NAME 1 #endif #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */ #define GPR_CPU_POSIX 1 +/* TODO(vjpai): Remove the following conditional and use only GPR_GCC_TLS + when bazelbuild/bazel#4341 is fixed */ +#ifndef GRPC_BAZEL_BUILD #define GPR_GCC_TLS 1 +#else /* GRPC_BAZEL_BUILD */ +#define GPR_PTHREAD_TLS 1 +#endif /* GRPC_BAZEL_BUILD */ #endif #define GPR_POSIX_CRASH_HANDLER 1 #endif @@ -241,6 +256,29 @@ #else /* _LP64 */ #define GPR_ARCH_32 1 #endif /* _LP64 */ +#elif defined(__OpenBSD__) +#define GPR_PLATFORM_STRING "openbsd" +#ifndef _BSD_SOURCE +#define _BSD_SOURCE +#endif +#define GPR_OPENBSD 1 +#define GPR_CPU_POSIX 1 +#define GPR_GCC_ATOMIC 1 +#define GPR_GCC_TLS 1 +#define GPR_POSIX_LOG 1 +#define GPR_POSIX_ENV 1 +#define GPR_POSIX_TMPFILE 1 +#define GPR_POSIX_STRING 1 +#define GPR_POSIX_SUBPROCESS 1 +#define GPR_POSIX_SYNC 1 +#define GPR_POSIX_TIME 1 +#define GPR_GETPID_IN_UNISTD_H 1 +#define GPR_SUPPORT_CHANNELS_FROM_FD 1 +#ifdef _LP64 +#define GPR_ARCH_64 1 +#else /* _LP64 */ +#define GPR_ARCH_32 1 +#endif /* _LP64 */ #elif defined(__native_client__) #define GPR_PLATFORM_STRING "nacl" #ifndef _BSD_SOURCE @@ -274,6 +312,30 @@ #endif #endif /* GPR_NO_AUTODETECT_PLATFORM */ +/* + * There are platforms for which TLS should not be used even though the + * compiler makes it seem like it's supported (Android NDK < r12b for example). + * This is primarily because of linker problems and toolchain misconfiguration: + * TLS isn't supported until NDK r12b per + * https://developer.android.com/ndk/downloads/revision_history.html + * TLS also does not work with Android NDK if GCC is being used as the compiler + * instead of Clang. + * Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in + * . For NDK < r16, users should define these macros, + * e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. */ +#if defined(__ANDROID__) && defined(GPR_GCC_TLS) +#if __has_include() +#include +#endif /* __has_include() */ +#if (defined(__clang__) && defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || \ + ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))) || \ + (defined(__GNUC__) && !defined(__clang__)) +#undef GPR_GCC_TLS +#define GPR_PTHREAD_TLS 1 +#endif +#endif /*defined(__ANDROID__) && defined(GPR_GCC_TLS) */ + #if defined(__has_include) #if __has_include() #define GRPC_HAS_CXX11_ATOMIC @@ -372,6 +434,14 @@ typedef unsigned __int64 uint64_t; #endif #endif +#ifndef GRPC_UNUSED +#if defined(__GNUC__) && !defined(__MINGW32__) +#define GRPC_UNUSED __attribute__((unused)) +#else +#define GRPC_UNUSED +#endif +#endif + #ifndef GPR_PRINT_FORMAT_CHECK #ifdef __GNUC__ #define GPR_PRINT_FORMAT_CHECK(FORMAT_STR, ARGS) \ @@ -415,4 +485,34 @@ typedef unsigned __int64 uint64_t; #endif /* GPR_ATTRIBUTE_NO_TSAN (2) */ #endif /* GPR_ATTRIBUTE_NO_TSAN (1) */ +/* GRPC_ALLOW_EXCEPTIONS should be 0 or 1 if exceptions are allowed or not */ +#ifndef GRPC_ALLOW_EXCEPTIONS +/* If not already set, set to 1 on Windows (style guide standard) but to + * 0 on non-Windows platforms unless the compiler defines __EXCEPTIONS */ +#ifdef GPR_WINDOWS +#define GRPC_ALLOW_EXCEPTIONS 1 +#else /* GPR_WINDOWS */ +#ifdef __EXCEPTIONS +#define GRPC_ALLOW_EXCEPTIONS 1 +#else /* __EXCEPTIONS */ +#define GRPC_ALLOW_EXCEPTIONS 0 +#endif /* __EXCEPTIONS */ +#endif /* __GPR_WINDOWS */ +#endif /* GRPC_ALLOW_EXCEPTIONS */ + +/* Use GPR_LIKELY only in cases where you are sure that a certain outcome is the + * most likely. Ideally, also collect performance numbers to justify the claim. + */ +#ifdef __GNUC__ +#define GPR_LIKELY(x) __builtin_expect((x), 1) +#define GPR_UNLIKELY(x) __builtin_expect((x), 0) +#else /* __GNUC__ */ +#define GPR_LIKELY(x) (x) +#define GPR_UNLIKELY(x) (x) +#endif /* __GNUC__ */ + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + #endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/slice.h b/Sources/CgRPC/include/grpc/impl/codegen/slice.h index 128fa8e12..90dbfd3b1 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/slice.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/slice.h @@ -23,7 +23,6 @@ #include -#include #include typedef struct grpc_slice grpc_slice; @@ -42,8 +41,8 @@ typedef struct grpc_slice grpc_slice; constraints (is the callee allowed to modify the slice?) */ typedef struct grpc_slice_refcount_vtable { - void (*ref)(void *); - void (*unref)(grpc_exec_ctx *exec_ctx, void *); + void (*ref)(void*); + void (*unref)(void*); int (*eq)(grpc_slice a, grpc_slice b); uint32_t (*hash)(grpc_slice slice); } grpc_slice_refcount_vtable; @@ -54,20 +53,20 @@ typedef struct grpc_slice_refcount_vtable { Typically client code should not touch this, and use grpc_slice_malloc, grpc_slice_new, or grpc_slice_new_with_len instead. */ typedef struct grpc_slice_refcount { - const grpc_slice_refcount_vtable *vtable; + const grpc_slice_refcount_vtable* vtable; /** If a subset of this slice is taken, use this pointer for the refcount. Typically points back to the refcount itself, however iterning implementations can use this to avoid a verification step on each hash or equality check */ - struct grpc_slice_refcount *sub_refcount; + struct grpc_slice_refcount* sub_refcount; } grpc_slice_refcount; /* Inlined half of grpc_slice is allowed to expand the size of the overall type by this many bytes */ -#define GRPC_SLICE_INLINE_EXTRA_SIZE sizeof(void *) +#define GRPC_SLICE_INLINE_EXTRA_SIZE sizeof(void*) #define GRPC_SLICE_INLINED_SIZE \ - (sizeof(size_t) + sizeof(uint8_t *) - 1 + GRPC_SLICE_INLINE_EXTRA_SIZE) + (sizeof(size_t) + sizeof(uint8_t*) - 1 + GRPC_SLICE_INLINE_EXTRA_SIZE) /** A grpc_slice s, if initialized, represents the byte range s.bytes[0..s.length-1]. @@ -79,10 +78,10 @@ typedef struct grpc_slice_refcount { If the slice does not have a refcount, it represents an inlined small piece of data that is copied by value. */ struct grpc_slice { - struct grpc_slice_refcount *refcount; + struct grpc_slice_refcount* refcount; union grpc_slice_data { struct grpc_slice_refcounted { - uint8_t *bytes; + uint8_t* bytes; size_t length; } refcounted; struct grpc_slice_inlined { @@ -96,13 +95,13 @@ struct grpc_slice { /** Represents an expandable array of slices, to be interpreted as a single item. */ -typedef struct { +typedef struct grpc_slice_buffer { /** This is for internal use only. External users (i.e any code outside grpc * core) MUST NOT use this field */ - grpc_slice *base_slices; + grpc_slice* base_slices; /** slices in the array (Points to the first valid grpc_slice in the array) */ - grpc_slice *slices; + grpc_slice* slices; /** the number of slices in the array */ size_t count; /** the number of slices allocated in the array. External users (i.e any code diff --git a/Sources/CgRPC/include/grpc/impl/codegen/sync.h b/Sources/CgRPC/include/grpc/impl/codegen/sync.h index 6cdb0c515..3df68c644 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/sync.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/sync.h @@ -43,6 +43,7 @@ extern "C" { /* Platform-specific type declarations of gpr_mu and gpr_cv. */ #include + #include #if defined(GPR_POSIX_SYNC) diff --git a/Sources/CgRPC/include/grpc/impl/codegen/sync_custom.h b/Sources/CgRPC/include/grpc/impl/codegen/sync_custom.h index 0840ad26b..69b1bf6cd 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/sync_custom.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/sync_custom.h @@ -19,6 +19,8 @@ #ifndef GRPC_IMPL_CODEGEN_SYNC_CUSTOM_H #define GRPC_IMPL_CODEGEN_SYNC_CUSTOM_H +#include + #include /* Users defining GPR_CUSTOM_SYNC need to define the following macros. */ diff --git a/Sources/CgRPC/include/grpc/impl/codegen/sync_generic.h b/Sources/CgRPC/include/grpc/impl/codegen/sync_generic.h index e1eea5429..d64db58a8 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/sync_generic.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/sync_generic.h @@ -20,19 +20,27 @@ #define GRPC_IMPL_CODEGEN_SYNC_GENERIC_H /* Generic type defintions for gpr_sync. */ +#include + #include /* gpr_event */ -typedef struct { gpr_atm state; } gpr_event; +typedef struct { + gpr_atm state; +} gpr_event; #define GPR_EVENT_INIT \ { 0 } /* gpr_refcount */ -typedef struct { gpr_atm count; } gpr_refcount; +typedef struct { + gpr_atm count; +} gpr_refcount; /* gpr_stats_counter */ -typedef struct { gpr_atm value; } gpr_stats_counter; +typedef struct { + gpr_atm value; +} gpr_stats_counter; #define GPR_STATS_INIT \ { 0 } diff --git a/Sources/CgRPC/include/grpc/impl/codegen/sync_posix.h b/Sources/CgRPC/include/grpc/impl/codegen/sync_posix.h index 6a3aed92c..d927046c5 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/sync_posix.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/sync_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_IMPL_CODEGEN_SYNC_POSIX_H #define GRPC_IMPL_CODEGEN_SYNC_POSIX_H +#include + #include #include diff --git a/Sources/CgRPC/include/grpc/impl/codegen/sync_windows.h b/Sources/CgRPC/include/grpc/impl/codegen/sync_windows.h index 39b127603..ba5d5aede 100644 --- a/Sources/CgRPC/include/grpc/impl/codegen/sync_windows.h +++ b/Sources/CgRPC/include/grpc/impl/codegen/sync_windows.h @@ -19,6 +19,8 @@ #ifndef GRPC_IMPL_CODEGEN_SYNC_WINDOWS_H #define GRPC_IMPL_CODEGEN_SYNC_WINDOWS_H +#include + #include typedef struct { diff --git a/Sources/CgRPC/include/grpc/slice.h b/Sources/CgRPC/include/grpc/slice.h index 3f3cff140..ce482922a 100644 --- a/Sources/CgRPC/include/grpc/slice.h +++ b/Sources/CgRPC/include/grpc/slice.h @@ -19,6 +19,8 @@ #ifndef GRPC_SLICE_H #define GRPC_SLICE_H +#include + #include #include @@ -44,20 +46,20 @@ GPRAPI grpc_slice grpc_slice_copy(grpc_slice s); /** Create a slice pointing at some data. Calls malloc to allocate a refcount for the object, and arranges that destroy will be called with the pointer passed in at destruction. */ -GPRAPI grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)); +GPRAPI grpc_slice grpc_slice_new(void* p, size_t len, void (*destroy)(void*)); /** Equivalent to grpc_slice_new, but with a separate pointer that is passed to the destroy function. This function can be useful when the data is part of a larger structure that must be destroyed when the data is no longer needed. */ -GPRAPI grpc_slice grpc_slice_new_with_user_data(void *p, size_t len, - void (*destroy)(void *), - void *user_data); +GPRAPI grpc_slice grpc_slice_new_with_user_data(void* p, size_t len, + void (*destroy)(void*), + void* user_data); /** Equivalent to grpc_slice_new, but with a two argument destroy function that also takes the slice length. */ -GPRAPI grpc_slice grpc_slice_new_with_len(void *p, size_t len, - void (*destroy)(void *, size_t)); +GPRAPI grpc_slice grpc_slice_new_with_len(void* p, size_t len, + void (*destroy)(void*, size_t)); /** Equivalent to grpc_slice_new(malloc(len), len, free), but saves one malloc() call. @@ -79,19 +81,19 @@ GPRAPI grpc_slice grpc_slice_intern(grpc_slice slice); size_t len = strlen(source); grpc_slice slice = grpc_slice_malloc(len); memcpy(slice->data, source, len); */ -GPRAPI grpc_slice grpc_slice_from_copied_string(const char *source); +GPRAPI grpc_slice grpc_slice_from_copied_string(const char* source); /** Create a slice by copying a buffer. Equivalent to: grpc_slice slice = grpc_slice_malloc(len); memcpy(slice->data, source, len); */ -GPRAPI grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len); +GPRAPI grpc_slice grpc_slice_from_copied_buffer(const char* source, size_t len); /** Create a slice pointing to constant memory */ -GPRAPI grpc_slice grpc_slice_from_static_string(const char *source); +GPRAPI grpc_slice grpc_slice_from_static_string(const char* source); /** Create a slice pointing to constant memory */ -GPRAPI grpc_slice grpc_slice_from_static_buffer(const void *source, size_t len); +GPRAPI grpc_slice grpc_slice_from_static_buffer(const void* source, size_t len); /** Return a result slice derived from s, which shares a ref count with \a s, where result.data==s.data+begin, and result.length==end-begin. The ref count @@ -106,7 +108,7 @@ GPRAPI grpc_slice grpc_slice_sub_no_ref(grpc_slice s, size_t begin, size_t end); /** Splits s into two: modifies s to be s[0:split], and returns a new slice, sharing a refcount with s, that contains s[split:s.length]. Requires s intialized, split <= s.length */ -GPRAPI grpc_slice grpc_slice_split_tail(grpc_slice *s, size_t split); +GPRAPI grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split); typedef enum { GRPC_SLICE_REF_TAIL = 1, @@ -117,13 +119,13 @@ typedef enum { /** The same as grpc_slice_split_tail, but with an option to skip altering * refcounts (grpc_slice_split_tail_maybe_ref(..., true) is equivalent to * grpc_slice_split_tail(...)) */ -GPRAPI grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *s, size_t split, +GPRAPI grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice* s, size_t split, grpc_slice_ref_whom ref_whom); /** Splits s into two: modifies s to be s[split:s.length], and returns a new slice, sharing a refcount with s, that contains s[0:split]. Requires s intialized, split <= s.length */ -GPRAPI grpc_slice grpc_slice_split_head(grpc_slice *s, size_t split); +GPRAPI grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split); GPRAPI grpc_slice grpc_empty_slice(void); @@ -136,11 +138,10 @@ GPRAPI int grpc_slice_eq(grpc_slice a, grpc_slice b); The order is arbitrary, and is not guaranteed to be stable across different versions of the API. */ GPRAPI int grpc_slice_cmp(grpc_slice a, grpc_slice b); -GPRAPI int grpc_slice_str_cmp(grpc_slice a, const char *b); -GPRAPI int grpc_slice_buf_cmp(grpc_slice a, const void *b, size_t blen); +GPRAPI int grpc_slice_str_cmp(grpc_slice a, const char* b); /** return non-zero if the first blen bytes of a are equal to b */ -GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void *b, size_t blen); +GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t blen); /** return the index of the last instance of \a c in \a s, or -1 if not found */ GPRAPI int grpc_slice_rchr(grpc_slice s, char c); @@ -162,7 +163,7 @@ GPRAPI grpc_slice grpc_slice_dup(grpc_slice a); /** Return a copy of slice as a C string. Offers no protection against embedded NULL's. Returned string must be freed with gpr_free. */ -GPRAPI char *grpc_slice_to_c_string(grpc_slice s); +GPRAPI char* grpc_slice_to_c_string(grpc_slice s); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/slice_buffer.h b/Sources/CgRPC/include/grpc/slice_buffer.h index de4b86f77..3260019ca 100644 --- a/Sources/CgRPC/include/grpc/slice_buffer.h +++ b/Sources/CgRPC/include/grpc/slice_buffer.h @@ -19,6 +19,8 @@ #ifndef GRPC_SLICE_BUFFER_H #define GRPC_SLICE_BUFFER_H +#include + #include #ifdef __cplusplus @@ -26,13 +28,13 @@ extern "C" { #endif /** initialize a slice buffer */ -GPRAPI void grpc_slice_buffer_init(grpc_slice_buffer *sb); +GPRAPI void grpc_slice_buffer_init(grpc_slice_buffer* sb); /** destroy a slice buffer - unrefs any held elements */ -GPRAPI void grpc_slice_buffer_destroy(grpc_slice_buffer *sb); +GPRAPI void grpc_slice_buffer_destroy(grpc_slice_buffer* sb); /** Add an element to a slice buffer - takes ownership of the slice. This function is allowed to concatenate the passed in slice to the end of some other slice if desired by the slice buffer. */ -GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice slice); +GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice slice); /** add an element to a slice buffer - takes ownership of the slice and returns the index of the slice. Guarantees that the slice will not be concatenated at the end of another @@ -40,40 +42,39 @@ GPRAPI void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice slice); slice at the returned index in sb->slices) The implementation MAY decide to concatenate data at the end of a small slice added in this fashion. */ -GPRAPI size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, +GPRAPI size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer* sb, grpc_slice slice); -GPRAPI void grpc_slice_buffer_addn(grpc_slice_buffer *sb, grpc_slice *slices, +GPRAPI void grpc_slice_buffer_addn(grpc_slice_buffer* sb, grpc_slice* slices, size_t n); /** add a very small (less than 8 bytes) amount of data to the end of a slice buffer: returns a pointer into which to add the data */ -GPRAPI uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t len); +GPRAPI uint8_t* grpc_slice_buffer_tiny_add(grpc_slice_buffer* sb, size_t len); /** pop the last buffer, but don't unref it */ -GPRAPI void grpc_slice_buffer_pop(grpc_slice_buffer *sb); +GPRAPI void grpc_slice_buffer_pop(grpc_slice_buffer* sb); /** clear a slice buffer, unref all elements */ -GPRAPI void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb); +GPRAPI void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer* sb); /** swap the contents of two slice buffers */ -GPRAPI void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b); +GPRAPI void grpc_slice_buffer_swap(grpc_slice_buffer* a, grpc_slice_buffer* b); /** move all of the elements of src into dst */ -GPRAPI void grpc_slice_buffer_move_into(grpc_slice_buffer *src, - grpc_slice_buffer *dst); +GPRAPI void grpc_slice_buffer_move_into(grpc_slice_buffer* src, + grpc_slice_buffer* dst); /** remove n bytes from the end of a slice buffer */ -GPRAPI void grpc_slice_buffer_trim_end(grpc_slice_buffer *src, size_t n, - grpc_slice_buffer *garbage); +GPRAPI void grpc_slice_buffer_trim_end(grpc_slice_buffer* src, size_t n, + grpc_slice_buffer* garbage); /** move the first n bytes of src into dst */ -GPRAPI void grpc_slice_buffer_move_first(grpc_slice_buffer *src, size_t n, - grpc_slice_buffer *dst); +GPRAPI void grpc_slice_buffer_move_first(grpc_slice_buffer* src, size_t n, + grpc_slice_buffer* dst); /** move the first n bytes of src into dst without adding references */ -GPRAPI void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, +GPRAPI void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer* src, size_t n, - grpc_slice_buffer *dst); + grpc_slice_buffer* dst); /** move the first n bytes of src into dst (copying them) */ -GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *src, - size_t n, void *dst); +GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_slice_buffer* src, + size_t n, void* dst); /** take the first slice in the slice buffer */ -GPRAPI grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *src); +GPRAPI grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer* src); /** undo the above with (a possibly different) \a slice */ -GPRAPI void grpc_slice_buffer_undo_take_first(grpc_slice_buffer *src, +GPRAPI void grpc_slice_buffer_undo_take_first(grpc_slice_buffer* src, grpc_slice slice); #ifdef __cplusplus diff --git a/Sources/CgRPC/include/grpc/status.h b/Sources/CgRPC/include/grpc/status.h index 9d8f50bc0..ecb9668bb 100644 --- a/Sources/CgRPC/include/grpc/status.h +++ b/Sources/CgRPC/include/grpc/status.h @@ -19,6 +19,8 @@ #ifndef GRPC_STATUS_H #define GRPC_STATUS_H +#include + #include #endif /* GRPC_STATUS_H */ diff --git a/Sources/CgRPC/include/grpc/support/alloc.h b/Sources/CgRPC/include/grpc/support/alloc.h index 4b59e137f..8bd940bec 100644 --- a/Sources/CgRPC/include/grpc/support/alloc.h +++ b/Sources/CgRPC/include/grpc/support/alloc.h @@ -19,19 +19,19 @@ #ifndef GRPC_SUPPORT_ALLOC_H #define GRPC_SUPPORT_ALLOC_H -#include +#include -#include +#include #ifdef __cplusplus extern "C" { #endif typedef struct gpr_allocation_functions { - void *(*malloc_fn)(size_t size); - void *(*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */ - void *(*realloc_fn)(void *ptr, size_t size); - void (*free_fn)(void *ptr); + void* (*malloc_fn)(size_t size); + void* (*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */ + void* (*realloc_fn)(void* ptr, size_t size); + void (*free_fn)(void* ptr); } gpr_allocation_functions; /** malloc. @@ -39,17 +39,18 @@ typedef struct gpr_allocation_functions { * The pointer returned is suitably aligned for any kind of variable it could * contain. */ -GPRAPI void *gpr_malloc(size_t size); +GPRAPI void* gpr_malloc(size_t size); /** like malloc, but zero all bytes before returning them */ -GPRAPI void *gpr_zalloc(size_t size); +GPRAPI void* gpr_zalloc(size_t size); /** free */ -GPRAPI void gpr_free(void *ptr); +GPRAPI void gpr_free(void* ptr); /** realloc, never returns NULL */ -GPRAPI void *gpr_realloc(void *p, size_t size); -/** aligned malloc, never returns NULL, will align to 1 << alignment_log */ -GPRAPI void *gpr_malloc_aligned(size_t size, size_t alignment_log); +GPRAPI void* gpr_realloc(void* p, size_t size); +/** aligned malloc, never returns NULL, will align to alignment, which + * must be a power of 2. */ +GPRAPI void* gpr_malloc_aligned(size_t size, size_t alignment); /** free memory allocated by gpr_malloc_aligned */ -GPRAPI void gpr_free_aligned(void *ptr); +GPRAPI void gpr_free_aligned(void* ptr); /** Request the family of allocation functions in \a functions be used. NOTE * that this request will be honored in a *best effort* basis and that no @@ -58,7 +59,7 @@ GPRAPI void gpr_free_aligned(void *ptr); GPRAPI void gpr_set_allocation_functions(gpr_allocation_functions functions); /** Return the family of allocation functions currently in effect. */ -GPRAPI gpr_allocation_functions gpr_get_allocation_functions(); +GPRAPI gpr_allocation_functions gpr_get_allocation_functions(void); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/support/atm.h b/Sources/CgRPC/include/grpc/support/atm.h index b3afa520a..073b0a6fc 100644 --- a/Sources/CgRPC/include/grpc/support/atm.h +++ b/Sources/CgRPC/include/grpc/support/atm.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_ATM_H #define GRPC_SUPPORT_ATM_H +#include + #include #endif /* GRPC_SUPPORT_ATM_H */ diff --git a/Sources/CgRPC/include/grpc/support/atm_gcc_atomic.h b/Sources/CgRPC/include/grpc/support/atm_gcc_atomic.h index e7b5ec402..ae603db49 100644 --- a/Sources/CgRPC/include/grpc/support/atm_gcc_atomic.h +++ b/Sources/CgRPC/include/grpc/support/atm_gcc_atomic.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_ATM_GCC_ATOMIC_H #define GRPC_SUPPORT_ATM_GCC_ATOMIC_H +#include + #include #endif /* GRPC_SUPPORT_ATM_GCC_ATOMIC_H */ diff --git a/Sources/CgRPC/include/grpc/support/atm_gcc_sync.h b/Sources/CgRPC/include/grpc/support/atm_gcc_sync.h index 728489770..6f51fdb1a 100644 --- a/Sources/CgRPC/include/grpc/support/atm_gcc_sync.h +++ b/Sources/CgRPC/include/grpc/support/atm_gcc_sync.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_ATM_GCC_SYNC_H #define GRPC_SUPPORT_ATM_GCC_SYNC_H +#include + #include #endif /* GRPC_SUPPORT_ATM_GCC_SYNC_H */ diff --git a/Sources/CgRPC/include/grpc/support/atm_windows.h b/Sources/CgRPC/include/grpc/support/atm_windows.h index 554c59a83..36955e4da 100644 --- a/Sources/CgRPC/include/grpc/support/atm_windows.h +++ b/Sources/CgRPC/include/grpc/support/atm_windows.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_ATM_WINDOWS_H #define GRPC_SUPPORT_ATM_WINDOWS_H +#include + #include #endif /* GRPC_SUPPORT_ATM_WINDOWS_H */ diff --git a/Sources/CgRPC/include/grpc/support/cmdline.h b/Sources/CgRPC/include/grpc/support/cmdline.h deleted file mode 100644 index 9f46491b3..000000000 --- a/Sources/CgRPC/include/grpc/support/cmdline.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_SUPPORT_CMDLINE_H -#define GRPC_SUPPORT_CMDLINE_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Simple command line parser. - - Supports flags that can be specified as -foo, --foo, --no-foo, -no-foo, etc - And integers, strings that can be specified as -foo=4, -foo blah, etc - - No support for short command line options (but we may get that in the - future.) - - Usage (for a program with a single flag argument 'foo'): - - int main(int argc, char **argv) { - gpr_cmdline *cl; - int verbose = 0; - - cl = gpr_cmdline_create("My cool tool"); - gpr_cmdline_add_int(cl, "verbose", "Produce verbose output?", &verbose); - gpr_cmdline_parse(cl, argc, argv); - gpr_cmdline_destroy(cl); - - if (verbose) { - gpr_log(GPR_INFO, "Goodbye cruel world!"); - } - - return 0; - } */ - -typedef struct gpr_cmdline gpr_cmdline; - -/** Construct a command line parser: takes a short description of the tool - doing the parsing */ -GPRAPI gpr_cmdline *gpr_cmdline_create(const char *description); -/** Add an integer parameter, with a name (used on the command line) and some - helpful text (used in the command usage) */ -GPRAPI void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name, - const char *help, int *value); -/** The same, for a boolean flag */ -GPRAPI void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name, - const char *help, int *value); -/** And for a string */ -GPRAPI void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, - const char *help, char **value); -/** Set a callback for non-named arguments */ -GPRAPI void gpr_cmdline_on_extra_arg( - gpr_cmdline *cl, const char *name, const char *help, - void (*on_extra_arg)(void *user_data, const char *arg), void *user_data); -/** Enable surviving failure: default behavior is to exit the process */ -GPRAPI void gpr_cmdline_set_survive_failure(gpr_cmdline *cl); -/** Parse the command line; returns 1 on success, on failure either dies - (by default) or returns 0 if gpr_cmdline_set_survive_failure() has been - called */ -GPRAPI int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv); -/** Destroy the parser */ -GPRAPI void gpr_cmdline_destroy(gpr_cmdline *cl); -/** Get a string describing usage */ -GPRAPI char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0); - -#ifdef __cplusplus -} -#endif - -#endif /* GRPC_SUPPORT_CMDLINE_H */ diff --git a/Sources/CgRPC/include/grpc/support/histogram.h b/Sources/CgRPC/include/grpc/support/histogram.h deleted file mode 100644 index 8489daa27..000000000 --- a/Sources/CgRPC/include/grpc/support/histogram.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_SUPPORT_HISTOGRAM_H -#define GRPC_SUPPORT_HISTOGRAM_H - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct gpr_histogram gpr_histogram; - -GPRAPI gpr_histogram *gpr_histogram_create(double resolution, - double max_bucket_start); -GPRAPI void gpr_histogram_destroy(gpr_histogram *h); -GPRAPI void gpr_histogram_add(gpr_histogram *h, double x); - -/** The following merges the second histogram into the first. It only works - if they have the same buckets and resolution. Returns 0 on failure, 1 - on success */ -GPRAPI int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src); - -GPRAPI double gpr_histogram_percentile(gpr_histogram *histogram, - double percentile); -GPRAPI double gpr_histogram_mean(gpr_histogram *histogram); -GPRAPI double gpr_histogram_stddev(gpr_histogram *histogram); -GPRAPI double gpr_histogram_variance(gpr_histogram *histogram); -GPRAPI double gpr_histogram_maximum(gpr_histogram *histogram); -GPRAPI double gpr_histogram_minimum(gpr_histogram *histogram); -GPRAPI double gpr_histogram_count(gpr_histogram *histogram); -GPRAPI double gpr_histogram_sum(gpr_histogram *histogram); -GPRAPI double gpr_histogram_sum_of_squares(gpr_histogram *histogram); - -GPRAPI const uint32_t *gpr_histogram_get_contents(gpr_histogram *histogram, - size_t *count); -GPRAPI void gpr_histogram_merge_contents(gpr_histogram *histogram, - const uint32_t *data, - size_t data_count, double min_seen, - double max_seen, double sum, - double sum_of_squares, double count); - -#ifdef __cplusplus -} -#endif - -#endif /* GRPC_SUPPORT_HISTOGRAM_H */ diff --git a/Sources/CgRPC/include/grpc/support/log.h b/Sources/CgRPC/include/grpc/support/log.h index a22fb6a6e..1837d4bd2 100644 --- a/Sources/CgRPC/include/grpc/support/log.h +++ b/Sources/CgRPC/include/grpc/support/log.h @@ -19,12 +19,11 @@ #ifndef GRPC_SUPPORT_LOG_H #define GRPC_SUPPORT_LOG_H -#include +#include + #include #include /* for abort() */ -#include - #ifdef __cplusplus extern "C" { #endif @@ -50,7 +49,7 @@ typedef enum gpr_log_severity { #define GPR_LOG_VERBOSITY_UNSET -1 /** Returns a string representation of the log severity */ -GPRAPI const char *gpr_log_severity_string(gpr_log_severity severity); +GPRAPI const char* gpr_log_severity_string(gpr_log_severity severity); /** Macros to build log contexts at various severity levels */ #define GPR_DEBUG __FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG @@ -59,28 +58,32 @@ GPRAPI const char *gpr_log_severity_string(gpr_log_severity severity); /** Log a message. It's advised to use GPR_xxx above to generate the context * for each message */ -GPRAPI void gpr_log(const char *file, int line, gpr_log_severity severity, - const char *format, ...) GPR_PRINT_FORMAT_CHECK(4, 5); +GPRAPI void gpr_log(const char* file, int line, gpr_log_severity severity, + const char* format, ...) GPR_PRINT_FORMAT_CHECK(4, 5); -GPRAPI void gpr_log_message(const char *file, int line, - gpr_log_severity severity, const char *message); +GPRAPI int gpr_should_log(gpr_log_severity severity); + +GPRAPI void gpr_log_message(const char* file, int line, + gpr_log_severity severity, const char* message); /** Set global log verbosity */ GPRAPI void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print); -GPRAPI void gpr_log_verbosity_init(); +GPRAPI void gpr_log_verbosity_init(void); /** Log overrides: applications can use this API to intercept logging calls and use their own implementations */ -typedef struct { - const char *file; +struct gpr_log_func_args { + const char* file; int line; gpr_log_severity severity; - const char *message; -} gpr_log_func_args; + const char* message; +}; + +typedef struct gpr_log_func_args gpr_log_func_args; -typedef void (*gpr_log_func)(gpr_log_func_args *args); +typedef void (*gpr_log_func)(gpr_log_func_args* args); GPRAPI void gpr_set_log_function(gpr_log_func func); /** abort() the process if x is zero, having written a line to the log. @@ -90,12 +93,18 @@ GPRAPI void gpr_set_log_function(gpr_log_func func); an exception in a higher-level language, consider returning error code. */ #define GPR_ASSERT(x) \ do { \ - if (!(x)) { \ + if (GPR_UNLIKELY(!(x))) { \ gpr_log(GPR_ERROR, "assertion failed: %s", #x); \ abort(); \ } \ } while (0) +#ifndef NDEBUG +#define GPR_DEBUG_ASSERT(x) GPR_ASSERT(x) +#else +#define GPR_DEBUG_ASSERT(x) +#endif + #ifdef __cplusplus } #endif diff --git a/Sources/CgRPC/include/grpc/support/log_windows.h b/Sources/CgRPC/include/grpc/support/log_windows.h index b530fd50d..e833f9d9d 100644 --- a/Sources/CgRPC/include/grpc/support/log_windows.h +++ b/Sources/CgRPC/include/grpc/support/log_windows.h @@ -29,7 +29,7 @@ extern "C" { * formatted error message, corresponding to the error messageid. * Use in conjunction with GetLastError() et al. */ -GPRAPI char *gpr_format_message(int messageid); +GPRAPI char* gpr_format_message(int messageid); #ifdef __cplusplus } diff --git a/Sources/CgRPC/include/grpc/support/string_util.h b/Sources/CgRPC/include/grpc/support/string_util.h index c4fc159d0..2c7460fa1 100644 --- a/Sources/CgRPC/include/grpc/support/string_util.h +++ b/Sources/CgRPC/include/grpc/support/string_util.h @@ -29,7 +29,7 @@ extern "C" { /** Returns a copy of src that can be passed to gpr_free(). If allocation fails or if src is NULL, returns NULL. */ -GPRAPI char *gpr_strdup(const char *src); +GPRAPI char* gpr_strdup(const char* src); /** printf to a newly-allocated string. The set of supported formats may vary between platforms. @@ -39,7 +39,7 @@ GPRAPI char *gpr_strdup(const char *src); On error, returns -1 and sets *strp to NULL. If the format string is bad, the result is undefined. */ -GPRAPI int gpr_asprintf(char **strp, const char *format, ...) +GPRAPI int gpr_asprintf(char** strp, const char* format, ...) GPR_PRINT_FORMAT_CHECK(2, 3); #ifdef __cplusplus diff --git a/Sources/CgRPC/include/grpc/support/subprocess.h b/Sources/CgRPC/include/grpc/support/subprocess.h deleted file mode 100644 index c06e62963..000000000 --- a/Sources/CgRPC/include/grpc/support/subprocess.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_SUPPORT_SUBPROCESS_H -#define GRPC_SUPPORT_SUBPROCESS_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct gpr_subprocess gpr_subprocess; - -/** .exe on windows, empty on unices */ -GPRAPI const char *gpr_subprocess_binary_extension(); - -GPRAPI gpr_subprocess *gpr_subprocess_create(int argc, const char **argv); -/** if subprocess has not been joined, kill it */ -GPRAPI void gpr_subprocess_destroy(gpr_subprocess *p); -/** returns exit status; can be called at most once */ -GPRAPI int gpr_subprocess_join(gpr_subprocess *p); -GPRAPI void gpr_subprocess_interrupt(gpr_subprocess *p); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif /* GRPC_SUPPORT_SUBPROCESS_H */ diff --git a/Sources/CgRPC/include/grpc/support/sync.h b/Sources/CgRPC/include/grpc/support/sync.h index fe8a59a5d..91d1fa79b 100644 --- a/Sources/CgRPC/include/grpc/support/sync.h +++ b/Sources/CgRPC/include/grpc/support/sync.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_SYNC_H #define GRPC_SUPPORT_SYNC_H +#include + #include /* for gpr_timespec */ #include @@ -34,26 +36,26 @@ extern "C" { gpr_mu are uninitialized when first declared. */ /** Initialize *mu. Requires: *mu uninitialized. */ -GPRAPI void gpr_mu_init(gpr_mu *mu); +GPRAPI void gpr_mu_init(gpr_mu* mu); /** Cause *mu no longer to be initialized, freeing any memory in use. Requires: - *mu initialized; no other concurrent operation on *mu. */ -GPRAPI void gpr_mu_destroy(gpr_mu *mu); + *mu initialized; no other concurrent operation on *mu. */ +GPRAPI void gpr_mu_destroy(gpr_mu* mu); /** Wait until no thread has a lock on *mu, cause the calling thread to own an exclusive lock on *mu, then return. May block indefinitely or crash if the calling thread has a lock on *mu. Requires: *mu initialized. */ -GPRAPI void gpr_mu_lock(gpr_mu *mu); +GPRAPI void gpr_mu_lock(gpr_mu* mu); /** Release an exclusive lock on *mu held by the calling thread. Requires: *mu initialized; the calling thread holds an exclusive lock on *mu. */ -GPRAPI void gpr_mu_unlock(gpr_mu *mu); +GPRAPI void gpr_mu_unlock(gpr_mu* mu); /** Without blocking, attempt to acquire an exclusive lock on *mu for the calling thread, then return non-zero iff success. Fail, if any thread holds the lock; succeeds with high probability if no thread holds the lock. Requires: *mu initialized. */ -GPRAPI int gpr_mu_trylock(gpr_mu *mu); +GPRAPI int gpr_mu_trylock(gpr_mu* mu); /** --- Condition variable interface --- @@ -62,11 +64,11 @@ GPRAPI int gpr_mu_trylock(gpr_mu *mu); uninitialized when first declared. */ /** Initialize *cv. Requires: *cv uninitialized. */ -GPRAPI void gpr_cv_init(gpr_cv *cv); +GPRAPI void gpr_cv_init(gpr_cv* cv); /** Cause *cv no longer to be initialized, freeing any memory in use. Requires: - *cv initialized; no other concurrent operation on *cv.*/ -GPRAPI void gpr_cv_destroy(gpr_cv *cv); + *cv initialized; no other concurrent operation on *cv.*/ +GPRAPI void gpr_cv_destroy(gpr_cv* cv); /** Atomically release *mu and wait on *cv. When the calling thread is woken from *cv or the deadline abs_deadline is exceeded, execute gpr_mu_lock(mu) @@ -75,16 +77,16 @@ GPRAPI void gpr_cv_destroy(gpr_cv *cv); an absolute deadline, or a GPR_TIMESPAN. May return even when not woken explicitly. Requires: *mu and *cv initialized; the calling thread holds an exclusive lock on *mu. */ -GPRAPI int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline); +GPRAPI int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline); /** If any threads are waiting on *cv, wake at least one. Clients may treat this as an optimization of gpr_cv_broadcast() for use in the case where waking more than one waiter is not useful. Requires: *cv initialized. */ -GPRAPI void gpr_cv_signal(gpr_cv *cv); +GPRAPI void gpr_cv_signal(gpr_cv* cv); /** Wake all threads waiting on *cv. Requires: *cv initialized. */ -GPRAPI void gpr_cv_broadcast(gpr_cv *cv); +GPRAPI void gpr_cv_broadcast(gpr_cv* cv); /** --- One-time initialization --- @@ -97,7 +99,7 @@ GPRAPI void gpr_cv_broadcast(gpr_cv *cv); If multiple threads call gpr_once() on the same gpr_once instance, one of them will call (*init_routine)(), and the others will block until that call finishes.*/ -GPRAPI void gpr_once_init(gpr_once *once, void (*init_routine)(void)); +GPRAPI void gpr_once_init(gpr_once* once, void (*init_routine)(void)); /** --- One-time event notification --- @@ -107,51 +109,51 @@ GPRAPI void gpr_once_init(gpr_once *once, void (*init_routine)(void)); It requires no destruction. */ /** Initialize *ev. */ -GPRAPI void gpr_event_init(gpr_event *ev); +GPRAPI void gpr_event_init(gpr_event* ev); /** Set *ev so that gpr_event_get() and gpr_event_wait() will return value. Requires: *ev initialized; value != NULL; no prior or concurrent calls to gpr_event_set(ev, ...) since initialization. */ -GPRAPI void gpr_event_set(gpr_event *ev, void *value); +GPRAPI void gpr_event_set(gpr_event* ev, void* value); /** Return the value set by gpr_event_set(ev, ...), or NULL if no such call has completed. If the result is non-NULL, all operations that occurred prior to the gpr_event_set(ev, ...) set will be visible after this call returns. Requires: *ev initialized. This operation is faster than acquiring a mutex on most platforms. */ -GPRAPI void *gpr_event_get(gpr_event *ev); +GPRAPI void* gpr_event_get(gpr_event* ev); /** Wait until *ev is set by gpr_event_set(ev, ...), or abs_deadline is exceeded, then return gpr_event_get(ev). Requires: *ev initialized. Use abs_deadline==gpr_inf_future for no deadline. When the event has been signalled before the call, this operation is faster than acquiring a mutex on most platforms. */ -GPRAPI void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline); +GPRAPI void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline); /** --- Reference counting --- These calls act on the type gpr_refcount. It requires no destruction. */ /** Initialize *r to value n. */ -GPRAPI void gpr_ref_init(gpr_refcount *r, int n); +GPRAPI void gpr_ref_init(gpr_refcount* r, int n); /** Increment the reference count *r. Requires *r initialized. */ -GPRAPI void gpr_ref(gpr_refcount *r); +GPRAPI void gpr_ref(gpr_refcount* r); /** Increment the reference count *r. Requires *r initialized. Crashes if refcount is zero */ -GPRAPI void gpr_ref_non_zero(gpr_refcount *r); +GPRAPI void gpr_ref_non_zero(gpr_refcount* r); /** Increment the reference count *r by n. Requires *r initialized, n > 0. */ -GPRAPI void gpr_refn(gpr_refcount *r, int n); +GPRAPI void gpr_refn(gpr_refcount* r, int n); /** Decrement the reference count *r and return non-zero iff it has reached zero. . Requires *r initialized. */ -GPRAPI int gpr_unref(gpr_refcount *r); +GPRAPI int gpr_unref(gpr_refcount* r); /** Return non-zero iff the reference count of *r is one, and thus is owned by exactly one object. */ -GPRAPI int gpr_ref_is_unique(gpr_refcount *r); +GPRAPI int gpr_ref_is_unique(gpr_refcount* r); /** --- Stats counters --- @@ -162,13 +164,13 @@ GPRAPI int gpr_ref_is_unique(gpr_refcount *r); synchronize other events. */ /** Initialize *c to the value n. */ -GPRAPI void gpr_stats_init(gpr_stats_counter *c, intptr_t n); +GPRAPI void gpr_stats_init(gpr_stats_counter* c, intptr_t n); /** *c += inc. Requires: *c initialized. */ -GPRAPI void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc); +GPRAPI void gpr_stats_inc(gpr_stats_counter* c, intptr_t inc); /** Return *c. Requires: *c initialized. */ -GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter *c); +GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter* c); /** ==================Example use of interface=================== A producer-consumer queue of up to N integers, @@ -274,7 +276,23 @@ GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter *c); #endif /* 0 */ #ifdef __cplusplus -} +} // extern "C" + +namespace grpc_core { + +class mu_guard { + public: + mu_guard(gpr_mu* mu) : mu_(mu) { gpr_mu_lock(mu); } + ~mu_guard() { gpr_mu_unlock(mu_); } + + mu_guard(const mu_guard&) = delete; + mu_guard& operator=(const mu_guard&) = delete; + + private: + gpr_mu* const mu_; +}; + +} // namespace grpc_core #endif #endif /* GRPC_SUPPORT_SYNC_H */ diff --git a/Sources/CgRPC/include/grpc/support/sync_custom.h b/Sources/CgRPC/include/grpc/support/sync_custom.h index b575f5e00..27cf0e057 100644 --- a/Sources/CgRPC/include/grpc/support/sync_custom.h +++ b/Sources/CgRPC/include/grpc/support/sync_custom.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_SYNC_CUSTOM_H #define GRPC_SUPPORT_SYNC_CUSTOM_H +#include + #include #endif /* GRPC_SUPPORT_SYNC_CUSTOM_H */ diff --git a/Sources/CgRPC/include/grpc/support/sync_generic.h b/Sources/CgRPC/include/grpc/support/sync_generic.h index 970b7a5d9..93028c4af 100644 --- a/Sources/CgRPC/include/grpc/support/sync_generic.h +++ b/Sources/CgRPC/include/grpc/support/sync_generic.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_SYNC_GENERIC_H #define GRPC_SUPPORT_SYNC_GENERIC_H +#include + #include #endif /* GRPC_SUPPORT_SYNC_GENERIC_H */ diff --git a/Sources/CgRPC/include/grpc/support/sync_posix.h b/Sources/CgRPC/include/grpc/support/sync_posix.h index 482a6004e..3dce7ee48 100644 --- a/Sources/CgRPC/include/grpc/support/sync_posix.h +++ b/Sources/CgRPC/include/grpc/support/sync_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_SYNC_POSIX_H #define GRPC_SUPPORT_SYNC_POSIX_H +#include + #include #endif /* GRPC_SUPPORT_SYNC_POSIX_H */ diff --git a/Sources/CgRPC/include/grpc/support/sync_windows.h b/Sources/CgRPC/include/grpc/support/sync_windows.h index 90ce8b776..a493c8642 100644 --- a/Sources/CgRPC/include/grpc/support/sync_windows.h +++ b/Sources/CgRPC/include/grpc/support/sync_windows.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_SYNC_WINDOWS_H #define GRPC_SUPPORT_SYNC_WINDOWS_H +#include + #include #endif /* GRPC_SUPPORT_SYNC_WINDOWS_H */ diff --git a/Sources/CgRPC/include/grpc/support/thd.h b/Sources/CgRPC/include/grpc/support/thd.h deleted file mode 100644 index 25bd8f123..000000000 --- a/Sources/CgRPC/include/grpc/support/thd.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_SUPPORT_THD_H -#define GRPC_SUPPORT_THD_H -/** Thread interface for GPR. - - Types - gpr_thd_id a thread identifier. - (Currently no calls take a thread identifier. - It exists for future extensibility.) - gpr_thd_options options used when creating a thread - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef uintptr_t gpr_thd_id; - -/** Thread creation options. */ -typedef struct { - int flags; /** Opaque field. Get and set with accessors below. */ -} gpr_thd_options; - -/** Create a new thread running (*thd_body)(arg) and place its thread identifier - in *t, and return true. If there are insufficient resources, return false. - If options==NULL, default options are used. - The thread is immediately runnable, and exits when (*thd_body)() returns. */ -GPRAPI int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, - const gpr_thd_options *options); - -/** Return a gpr_thd_options struct with all fields set to defaults. */ -GPRAPI gpr_thd_options gpr_thd_options_default(void); - -/** Set the thread to become detached on startup - this is the default. */ -GPRAPI void gpr_thd_options_set_detached(gpr_thd_options *options); - -/** Set the thread to become joinable - mutually exclusive with detached. */ -GPRAPI void gpr_thd_options_set_joinable(gpr_thd_options *options); - -/** Returns non-zero if the option detached is set. */ -GPRAPI int gpr_thd_options_is_detached(const gpr_thd_options *options); - -/** Returns non-zero if the option joinable is set. */ -GPRAPI int gpr_thd_options_is_joinable(const gpr_thd_options *options); - -/** Returns the identifier of the current thread. */ -GPRAPI gpr_thd_id gpr_thd_currentid(void); - -/** Blocks until the specified thread properly terminates. - Calling this on a detached thread has unpredictable results. */ -GPRAPI void gpr_thd_join(gpr_thd_id t); - -#ifdef __cplusplus -} -#endif - -#endif /* GRPC_SUPPORT_THD_H */ diff --git a/Sources/CgRPC/include/grpc/support/thd_id.h b/Sources/CgRPC/include/grpc/support/thd_id.h new file mode 100644 index 000000000..e9b2b7ec2 --- /dev/null +++ b/Sources/CgRPC/include/grpc/support/thd_id.h @@ -0,0 +1,44 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_SUPPORT_THD_ID_H +#define GRPC_SUPPORT_THD_ID_H +/** Thread ID interface for GPR. + + Used by some wrapped languages for logging purposes. + + Types + gpr_thd_id a unique opaque identifier for a thread. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uintptr_t gpr_thd_id; + +/** Returns the identifier of the current thread. */ +GPRAPI gpr_thd_id gpr_thd_currentid(void); + +#ifdef __cplusplus +} +#endif + +#endif /* GRPC_SUPPORT_THD_ID_H */ diff --git a/Sources/CgRPC/include/grpc/support/time.h b/Sources/CgRPC/include/grpc/support/time.h index 62d354aaf..550ffc2c2 100644 --- a/Sources/CgRPC/include/grpc/support/time.h +++ b/Sources/CgRPC/include/grpc/support/time.h @@ -19,6 +19,8 @@ #ifndef GRPC_SUPPORT_TIME_H #define GRPC_SUPPORT_TIME_H +#include + #include #include diff --git a/Sources/CgRPC/src/core/ext/census/aggregation.h b/Sources/CgRPC/src/core/ext/census/aggregation.h deleted file mode 100644 index 1ba7953ec..000000000 --- a/Sources/CgRPC/src/core/ext/census/aggregation.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#ifndef GRPC_CORE_EXT_CENSUS_AGGREGATION_H -#define GRPC_CORE_EXT_CENSUS_AGGREGATION_H - -/** Structure used to describe an aggregation type. */ -struct census_aggregation_ops { - /* Create a new aggregation. The pointer returned can be used in future calls - to clone(), free(), record(), data() and reset(). */ - void *(*create)(const void *create_arg); - /* Make a copy of an aggregation created by create() */ - void *(*clone)(const void *aggregation); - /* Destroy an aggregation created by create() */ - void (*free)(void *aggregation); - /* Record a new value against aggregation. */ - void (*record)(void *aggregation, double value); - /* Return current aggregation data. The caller must cast this object into - the correct type for the aggregation result. The object returned can be - freed by using free_data(). */ - void *(*data)(const void *aggregation); - /* free data returned by data() */ - void (*free_data)(void *data); - /* Reset an aggregation to default (zero) values. */ - void (*reset)(void *aggregation); - /* Merge 'from' aggregation into 'to'. Both aggregations must be compatible */ - void (*merge)(void *to, const void *from); - /* Fill buffer with printable string version of aggregation contents. For - debugging only. Returns the number of bytes added to buffer (a value == n - implies the buffer was of insufficient size). */ - size_t (*print)(const void *aggregation, char *buffer, size_t n); -}; - -#endif /* GRPC_CORE_EXT_CENSUS_AGGREGATION_H */ diff --git a/Sources/CgRPC/src/core/ext/census/base_resources.c b/Sources/CgRPC/src/core/ext/census/base_resources.c deleted file mode 100644 index 1f2bb39fe..000000000 --- a/Sources/CgRPC/src/core/ext/census/base_resources.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/base_resources.h" - -#include -#include - -#include -#include - -#include "src/core/ext/census/resource.h" - -// Add base RPC resource definitions for use by RPC runtime. -// -// TODO(aveitch): All of these are currently hardwired definitions encoded in -// the code in this file. These should be converted to use an external -// configuration mechanism, in which these resources are defined in a text -// file, which is compiled to .pb format and read by still-to-be-written -// configuration functions. - -// Define all base resources. This should be called by census initialization. -void define_base_resources() { - google_census_Resource_BasicUnit numerator = - google_census_Resource_BasicUnit_SECS; - resource r = {(char *)"client_rpc_latency", // name - (char *)"Client RPC latency in seconds", // description - 0, // prefix - 1, // n_numerators - &numerator, // numerators - 0, // n_denominators - NULL}; // denominators - define_resource(&r); - r = (resource){(char *)"server_rpc_latency", // name - (char *)"Server RPC latency in seconds", // description - 0, // prefix - 1, // n_numerators - &numerator, // numerators - 0, // n_denominators - NULL}; // denominators - define_resource(&r); -} diff --git a/Sources/CgRPC/src/core/ext/census/base_resources.h b/Sources/CgRPC/src/core/ext/census/base_resources.h deleted file mode 100644 index 78a4d1fae..000000000 --- a/Sources/CgRPC/src/core/ext/census/base_resources.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H -#define GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H - -/* Define all base resources. This should be called by census initialization. */ -void define_base_resources(); - -#endif /* GRPC_CORE_EXT_CENSUS_BASE_RESOURCES_H */ diff --git a/Sources/CgRPC/src/core/ext/census/census_interface.h b/Sources/CgRPC/src/core/ext/census/census_interface.h deleted file mode 100644 index a42b68ad6..000000000 --- a/Sources/CgRPC/src/core/ext/census/census_interface.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H -#define GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H - -#include - -/* Maximum length of an individual census trace annotation. */ -#define CENSUS_MAX_ANNOTATION_LENGTH 200 - -/* Structure of a census op id. Define as structure because 64bit integer is not - available on every platform for C89. */ -typedef struct census_op_id { - uint32_t upper; - uint32_t lower; -} census_op_id; - -typedef struct census_rpc_stats census_rpc_stats; - -/* Initializes Census library. No-op if Census is already initialized. */ -void census_init(void); - -/* Shutdown Census Library. */ -void census_shutdown(void); - -/* Annotates grpc method name on a census_op_id. The method name has the format - of /. Returns 0 iff - op_id and method_name are all valid. op_id is valid after its creation and - before calling census_tracing_end_op(). - - TODO(hongyu): Figure out valid characters set for service name and command - name and document requirements here.*/ -int census_add_method_tag(census_op_id op_id, const char *method_name); - -/* Annotates tracing information to a specific op_id. - Up to CENSUS_MAX_ANNOTATION_LENGTH bytes are recorded. */ -void census_tracing_print(census_op_id op_id, const char *annotation); - -/* Starts tracing for an RPC. Returns a locally unique census_op_id */ -census_op_id census_tracing_start_op(void); - -/* Ends tracing. Calling this function will invalidate the input op_id. */ -void census_tracing_end_op(census_op_id op_id); - -#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_INTERFACE_H */ diff --git a/Sources/CgRPC/src/core/ext/census/census_rpc_stats.h b/Sources/CgRPC/src/core/ext/census/census_rpc_stats.h deleted file mode 100644 index 8004ade37..000000000 --- a/Sources/CgRPC/src/core/ext/census/census_rpc_stats.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H -#define GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H - -#include -#include "src/core/ext/census/census_interface.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct census_rpc_stats { - uint64_t cnt; - uint64_t rpc_error_cnt; - uint64_t app_error_cnt; - double elapsed_time_ms; - double api_request_bytes; - double wire_request_bytes; - double api_response_bytes; - double wire_response_bytes; -}; - -/* Creates an empty rpc stats object on heap. */ -census_rpc_stats *census_rpc_stats_create_empty(void); - -typedef struct census_per_method_rpc_stats { - const char *method; - census_rpc_stats minute_stats; /* cumulative stats in the past minute */ - census_rpc_stats hour_stats; /* cumulative stats in the past hour */ - census_rpc_stats total_stats; /* cumulative stats from last gc */ -} census_per_method_rpc_stats; - -typedef struct census_aggregated_rpc_stats { - int num_entries; - census_per_method_rpc_stats *stats; -} census_aggregated_rpc_stats; - -/* Initializes an aggregated rpc stats object to an empty state. */ -void census_aggregated_rpc_stats_set_empty(census_aggregated_rpc_stats *data); - -/* Records client side stats of a rpc. */ -void census_record_rpc_client_stats(census_op_id op_id, - const census_rpc_stats *stats); - -/* Records server side stats of a rpc. */ -void census_record_rpc_server_stats(census_op_id op_id, - const census_rpc_stats *stats); - -/* The following two functions are intended for inprocess query of - per-service per-method stats from grpc implementations. */ - -/* Populates *data_map with server side aggregated per-service per-method - stats. - DO NOT CALL from outside of grpc code. */ -void census_get_server_stats(census_aggregated_rpc_stats *data_map); - -/* Populates *data_map with client side aggregated per-service per-method - stats. - DO NOT CALL from outside of grpc code. */ -void census_get_client_stats(census_aggregated_rpc_stats *data_map); - -void census_stats_store_init(void); -void census_stats_store_shutdown(void); - -#ifdef __cplusplus -} -#endif - -#endif /* GRPC_CORE_EXT_CENSUS_CENSUS_RPC_STATS_H */ diff --git a/Sources/CgRPC/src/core/ext/census/context.c b/Sources/CgRPC/src/core/ext/census/context.c deleted file mode 100644 index 9b25a32e3..000000000 --- a/Sources/CgRPC/src/core/ext/census/context.c +++ /dev/null @@ -1,496 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include "src/core/lib/support/string.h" - -// Functions in this file support the public context API, including -// encoding/decoding as part of context propagation across RPC's. The overall -// requirements (in approximate priority order) for the -// context representation: -// 1. Efficient conversion to/from wire format -// 2. Minimal bytes used on-wire -// 3. Efficient context creation -// 4. Efficient lookup of tag value for a key -// 5. Efficient iteration over tags -// 6. Minimal memory footprint -// -// Notes on tradeoffs/decisions: -// * tag includes 1 byte length of key, as well as nil-terminating byte. These -// are to aid in efficient parsing and the ability to directly return key -// strings. This is more important than saving a single byte/tag on the wire. -// * The wire encoding uses only single byte values. This eliminates the need -// to handle endian-ness conversions. It also means there is a hard upper -// limit of 255 for both CENSUS_MAX_TAG_KV_LEN and CENSUS_MAX_PROPAGATED_TAGS. -// * Keep all tag information (keys/values/flags) in a single memory buffer, -// that can be directly copied to the wire. - -// min and max valid chars in tag keys and values. All printable ASCII is OK. -#define MIN_VALID_TAG_CHAR 32 // ' ' -#define MAX_VALID_TAG_CHAR 126 // '~' - -// Structure representing a set of tags. Essentially a count of number of tags -// present, and pointer to a chunk of memory that contains the per-tag details. -struct tag_set { - int ntags; // number of tags. - int ntags_alloc; // ntags + number of deleted tags (total number of tags - // in all of kvm). This will always be == ntags, except during the process - // of building a new tag set. - size_t kvm_size; // number of bytes allocated for key/value storage. - size_t kvm_used; // number of bytes of used key/value memory - char *kvm; // key/value memory. Consists of repeated entries of: - // Offset Size Description - // 0 1 Key length, including trailing 0. (K) - // 1 1 Value length, including trailing 0 (V) - // 2 1 Flags - // 3 K Key bytes - // 3 + K V Value bytes - // - // We refer to the first 3 entries as the 'tag header'. If extra values are - // introduced in the header, you will need to modify the TAG_HEADER_SIZE - // constant, the raw_tag structure (and everything that uses it) and the - // encode/decode functions appropriately. -}; - -// Number of bytes in tag header. -#define TAG_HEADER_SIZE 3 // key length (1) + value length (1) + flags (1) -// Offsets to tag header entries. -#define KEY_LEN_OFFSET 0 -#define VALUE_LEN_OFFSET 1 -#define FLAG_OFFSET 2 - -// raw_tag represents the raw-storage form of a tag in the kvm of a tag_set. -struct raw_tag { - uint8_t key_len; - uint8_t value_len; - uint8_t flags; - char *key; - char *value; -}; - -// Use a reserved flag bit for indication of deleted tag. -#define CENSUS_TAG_DELETED CENSUS_TAG_RESERVED -#define CENSUS_TAG_IS_DELETED(flags) (flags & CENSUS_TAG_DELETED) - -// Primary representation of a context. Composed of 2 underlying tag_set -// structs, one each for propagated and local (non-propagated) tags. This is -// to efficiently support tag encoding/decoding. -// TODO(aveitch): need to add tracing id's/structure. -struct census_context { - struct tag_set tags[2]; - census_context_status status; -}; - -// Indices into the tags member of census_context -#define PROPAGATED_TAGS 0 -#define LOCAL_TAGS 1 - -// Validate (check all characters are in range and size is less than limit) a -// key or value string. Returns 0 if the string is invalid, or the length -// (including terminator) if valid. -static size_t validate_tag(const char *kv) { - size_t len = 1; - char ch; - while ((ch = *kv++) != 0) { - if (ch < MIN_VALID_TAG_CHAR || ch > MAX_VALID_TAG_CHAR) { - return 0; - } - len++; - } - if (len > CENSUS_MAX_TAG_KV_LEN) { - return 0; - } - return len; -} - -// Extract a raw tag given a pointer (raw) to the tag header. Allow for some -// extra bytes in the tag header (see encode/decode functions for usage: this -// allows for future expansion of the tag header). -static char *decode_tag(struct raw_tag *tag, char *header, int offset) { - tag->key_len = (uint8_t)(*header++); - tag->value_len = (uint8_t)(*header++); - tag->flags = (uint8_t)(*header++); - header += offset; - tag->key = header; - header += tag->key_len; - tag->value = header; - return header + tag->value_len; -} - -// Make a copy (in 'to') of an existing tag_set. -static void tag_set_copy(struct tag_set *to, const struct tag_set *from) { - memcpy(to, from, sizeof(struct tag_set)); - to->kvm = (char *)gpr_malloc(to->kvm_size); - memcpy(to->kvm, from->kvm, from->kvm_used); -} - -// Delete a tag from a tag_set, if it exists (returns true if it did). -static bool tag_set_delete_tag(struct tag_set *tags, const char *key, - size_t key_len) { - char *kvp = tags->kvm; - for (int i = 0; i < tags->ntags_alloc; i++) { - uint8_t *flags = (uint8_t *)(kvp + FLAG_OFFSET); - struct raw_tag tag; - kvp = decode_tag(&tag, kvp, 0); - if (CENSUS_TAG_IS_DELETED(tag.flags)) continue; - if ((key_len == tag.key_len) && (memcmp(key, tag.key, key_len) == 0)) { - *flags |= CENSUS_TAG_DELETED; - tags->ntags--; - return true; - } - } - return false; -} - -// Delete a tag from a context, return true if it existed. -static bool context_delete_tag(census_context *context, const census_tag *tag, - size_t key_len) { - return ( - tag_set_delete_tag(&context->tags[LOCAL_TAGS], tag->key, key_len) || - tag_set_delete_tag(&context->tags[PROPAGATED_TAGS], tag->key, key_len)); -} - -// Add a tag to a tag_set. Return true on success, false if the tag could -// not be added because of constraints on tag set size. This function should -// not be called if the tag may already exist (in a non-deleted state) in -// the tag_set, as that would result in two tags with the same key. -static bool tag_set_add_tag(struct tag_set *tags, const census_tag *tag, - size_t key_len, size_t value_len) { - if (tags->ntags == CENSUS_MAX_PROPAGATED_TAGS) { - return false; - } - const size_t tag_size = key_len + value_len + TAG_HEADER_SIZE; - if (tags->kvm_used + tag_size > tags->kvm_size) { - // allocate new memory if needed - tags->kvm_size += 2 * CENSUS_MAX_TAG_KV_LEN + TAG_HEADER_SIZE; - char *new_kvm = (char *)gpr_malloc(tags->kvm_size); - if (tags->kvm_used > 0) memcpy(new_kvm, tags->kvm, tags->kvm_used); - gpr_free(tags->kvm); - tags->kvm = new_kvm; - } - char *kvp = tags->kvm + tags->kvm_used; - *kvp++ = (char)key_len; - *kvp++ = (char)value_len; - // ensure reserved flags are not used. - *kvp++ = (char)(tag->flags & (CENSUS_TAG_PROPAGATE | CENSUS_TAG_STATS)); - memcpy(kvp, tag->key, key_len); - kvp += key_len; - memcpy(kvp, tag->value, value_len); - tags->kvm_used += tag_size; - tags->ntags++; - tags->ntags_alloc++; - return true; -} - -// Add/modify/delete a tag to/in a context. Caller must validate that tag key -// etc. are valid. -static void context_modify_tag(census_context *context, const census_tag *tag, - size_t key_len, size_t value_len) { - // First delete the tag if it is already present. - bool deleted = context_delete_tag(context, tag, key_len); - bool added = false; - if (CENSUS_TAG_IS_PROPAGATED(tag->flags)) { - added = tag_set_add_tag(&context->tags[PROPAGATED_TAGS], tag, key_len, - value_len); - } else { - added = - tag_set_add_tag(&context->tags[LOCAL_TAGS], tag, key_len, value_len); - } - - if (deleted) { - context->status.n_modified_tags++; - } else { - if (added) { - context->status.n_added_tags++; - } else { - context->status.n_ignored_tags++; - } - } -} - -// Remove memory used for deleted tags from a tag set. Basic algorithm: -// 1) Walk through tag set to find first deleted tag. Record where it is. -// 2) Find the next not-deleted tag. Copy all of kvm from there to the end -// "over" the deleted tags -// 3) repeat #1 and #2 until we have seen all tags -// 4) if we are still looking for a not-deleted tag, then all the end portion -// of the kvm is deleted. Just reduce the used amount of memory by the -// appropriate amount. -static void tag_set_flatten(struct tag_set *tags) { - if (tags->ntags == tags->ntags_alloc) return; - bool found_deleted = false; // found a deleted tag. - char *kvp = tags->kvm; - char *dbase = NULL; // record location of deleted tag - for (int i = 0; i < tags->ntags_alloc; i++) { - struct raw_tag tag; - char *next_kvp = decode_tag(&tag, kvp, 0); - if (found_deleted) { - if (!CENSUS_TAG_IS_DELETED(tag.flags)) { - ptrdiff_t reduce = kvp - dbase; // #bytes in deleted tags - GPR_ASSERT(reduce > 0); - ptrdiff_t copy_size = tags->kvm + tags->kvm_used - kvp; - GPR_ASSERT(copy_size > 0); - memmove(dbase, kvp, (size_t)copy_size); - tags->kvm_used -= (size_t)reduce; - next_kvp -= reduce; - found_deleted = false; - } - } else { - if (CENSUS_TAG_IS_DELETED(tag.flags)) { - dbase = kvp; - found_deleted = true; - } - } - kvp = next_kvp; - } - if (found_deleted) { - GPR_ASSERT(dbase > tags->kvm); - tags->kvm_used = (size_t)(dbase - tags->kvm); - } - tags->ntags_alloc = tags->ntags; -} - -census_context *census_context_create(const census_context *base, - const census_tag *tags, int ntags, - census_context_status const **status) { - census_context *context = - (census_context *)gpr_malloc(sizeof(census_context)); - // If we are given a base, copy it into our new tag set. Otherwise set it - // to zero/NULL everything. - if (base == NULL) { - memset(context, 0, sizeof(census_context)); - } else { - tag_set_copy(&context->tags[PROPAGATED_TAGS], &base->tags[PROPAGATED_TAGS]); - tag_set_copy(&context->tags[LOCAL_TAGS], &base->tags[LOCAL_TAGS]); - memset(&context->status, 0, sizeof(context->status)); - } - // Walk over the additional tags and, for those that aren't invalid, modify - // the context to add/replace/delete as required. - for (int i = 0; i < ntags; i++) { - const census_tag *tag = &tags[i]; - size_t key_len = validate_tag(tag->key); - // ignore the tag if it is invalid or too short. - if (key_len <= 1) { - context->status.n_invalid_tags++; - } else { - if (tag->value != NULL) { - size_t value_len = validate_tag(tag->value); - if (value_len != 0) { - context_modify_tag(context, tag, key_len, value_len); - } else { - context->status.n_invalid_tags++; - } - } else { - if (context_delete_tag(context, tag, key_len)) { - context->status.n_deleted_tags++; - } - } - } - } - // Remove any deleted tags, update status if needed, and return. - tag_set_flatten(&context->tags[PROPAGATED_TAGS]); - tag_set_flatten(&context->tags[LOCAL_TAGS]); - context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags; - context->status.n_local_tags = context->tags[LOCAL_TAGS].ntags; - if (status) { - *status = &context->status; - } - return context; -} - -const census_context_status *census_context_get_status( - const census_context *context) { - return &context->status; -} - -void census_context_destroy(census_context *context) { - gpr_free(context->tags[PROPAGATED_TAGS].kvm); - gpr_free(context->tags[LOCAL_TAGS].kvm); - gpr_free(context); -} - -void census_context_initialize_iterator(const census_context *context, - census_context_iterator *iterator) { - iterator->context = context; - iterator->index = 0; - if (context->tags[PROPAGATED_TAGS].ntags != 0) { - iterator->base = PROPAGATED_TAGS; - iterator->kvm = context->tags[PROPAGATED_TAGS].kvm; - } else if (context->tags[LOCAL_TAGS].ntags != 0) { - iterator->base = LOCAL_TAGS; - iterator->kvm = context->tags[LOCAL_TAGS].kvm; - } else { - iterator->base = -1; - } -} - -int census_context_next_tag(census_context_iterator *iterator, - census_tag *tag) { - if (iterator->base < 0) { - return 0; - } - struct raw_tag raw; - iterator->kvm = decode_tag(&raw, iterator->kvm, 0); - tag->key = raw.key; - tag->value = raw.value; - tag->flags = raw.flags; - if (++iterator->index == iterator->context->tags[iterator->base].ntags) { - do { - if (iterator->base == LOCAL_TAGS) { - iterator->base = -1; - return 1; - } - } while (iterator->context->tags[++iterator->base].ntags == 0); - iterator->index = 0; - iterator->kvm = iterator->context->tags[iterator->base].kvm; - } - return 1; -} - -// Find a tag in a tag_set by key. Return true if found, false otherwise. -static bool tag_set_get_tag(const struct tag_set *tags, const char *key, - size_t key_len, census_tag *tag) { - char *kvp = tags->kvm; - for (int i = 0; i < tags->ntags; i++) { - struct raw_tag raw; - kvp = decode_tag(&raw, kvp, 0); - if (key_len == raw.key_len && memcmp(raw.key, key, key_len) == 0) { - tag->key = raw.key; - tag->value = raw.value; - tag->flags = raw.flags; - return true; - } - } - return false; -} - -int census_context_get_tag(const census_context *context, const char *key, - census_tag *tag) { - size_t key_len = strlen(key) + 1; - if (key_len == 1) { - return 0; - } - if (tag_set_get_tag(&context->tags[PROPAGATED_TAGS], key, key_len, tag) || - tag_set_get_tag(&context->tags[LOCAL_TAGS], key, key_len, tag)) { - return 1; - } - return 0; -} - -// Context encoding and decoding functions. -// -// Wire format for tag_set's on the wire: -// -// First, a tag set header: -// -// offset bytes description -// 0 1 version number -// 1 1 number of bytes in this header. This allows for future -// expansion. -// 2 1 number of bytes in each tag header. -// 3 1 ntags value from tag set. -// -// This is followed by the key/value memory from struct tag_set. - -#define ENCODED_VERSION 0 // Version number -#define ENCODED_HEADER_SIZE 4 // size of tag set header - -// Encode a tag set. Returns 0 if buffer is too small. -static size_t tag_set_encode(const struct tag_set *tags, char *buffer, - size_t buf_size) { - if (buf_size < ENCODED_HEADER_SIZE + tags->kvm_used) { - return 0; - } - buf_size -= ENCODED_HEADER_SIZE; - *buffer++ = (char)ENCODED_VERSION; - *buffer++ = (char)ENCODED_HEADER_SIZE; - *buffer++ = (char)TAG_HEADER_SIZE; - *buffer++ = (char)tags->ntags; - if (tags->ntags == 0) { - return ENCODED_HEADER_SIZE; - } - memcpy(buffer, tags->kvm, tags->kvm_used); - return ENCODED_HEADER_SIZE + tags->kvm_used; -} - -size_t census_context_encode(const census_context *context, char *buffer, - size_t buf_size) { - return tag_set_encode(&context->tags[PROPAGATED_TAGS], buffer, buf_size); -} - -// Decode a tag set. -static void tag_set_decode(struct tag_set *tags, const char *buffer, - size_t size) { - uint8_t version = (uint8_t)(*buffer++); - uint8_t header_size = (uint8_t)(*buffer++); - uint8_t tag_header_size = (uint8_t)(*buffer++); - tags->ntags = tags->ntags_alloc = (int)(*buffer++); - if (tags->ntags == 0) { - tags->ntags_alloc = 0; - tags->kvm_size = 0; - tags->kvm_used = 0; - tags->kvm = NULL; - return; - } - if (header_size != ENCODED_HEADER_SIZE) { - GPR_ASSERT(version != ENCODED_VERSION); - GPR_ASSERT(ENCODED_HEADER_SIZE < header_size); - buffer += (header_size - ENCODED_HEADER_SIZE); - } - tags->kvm_used = size - header_size; - tags->kvm_size = tags->kvm_used + CENSUS_MAX_TAG_KV_LEN; - tags->kvm = (char *)gpr_malloc(tags->kvm_size); - if (tag_header_size != TAG_HEADER_SIZE) { - // something new in the tag information. I don't understand it, so - // don't copy it over. - GPR_ASSERT(version != ENCODED_VERSION); - GPR_ASSERT(tag_header_size > TAG_HEADER_SIZE); - char *kvp = tags->kvm; - for (int i = 0; i < tags->ntags; i++) { - memcpy(kvp, buffer, TAG_HEADER_SIZE); - kvp += header_size; - struct raw_tag raw; - buffer = - decode_tag(&raw, (char *)buffer, tag_header_size - TAG_HEADER_SIZE); - memcpy(kvp, raw.key, (size_t)raw.key_len + raw.value_len); - kvp += raw.key_len + raw.value_len; - } - } else { - memcpy(tags->kvm, buffer, tags->kvm_used); - } -} - -census_context *census_context_decode(const char *buffer, size_t size) { - census_context *context = - (census_context *)gpr_malloc(sizeof(census_context)); - memset(&context->tags[LOCAL_TAGS], 0, sizeof(struct tag_set)); - if (buffer == NULL) { - memset(&context->tags[PROPAGATED_TAGS], 0, sizeof(struct tag_set)); - } else { - tag_set_decode(&context->tags[PROPAGATED_TAGS], buffer, size); - } - memset(&context->status, 0, sizeof(context->status)); - context->status.n_propagated_tags = context->tags[PROPAGATED_TAGS].ntags; - return context; -} diff --git a/Sources/CgRPC/src/core/ext/census/gen/census.pb.c b/Sources/CgRPC/src/core/ext/census/gen/census.pb.c deleted file mode 100644 index 88efa7366..000000000 --- a/Sources/CgRPC/src/core/ext/census/gen/census.pb.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/* Automatically generated nanopb constant definitions */ -/* Generated by nanopb-0.3.5-dev */ - -#include "src/core/ext/census/gen/census.pb.h" - -#if PB_PROTO_HEADER_VERSION != 30 -#error Regenerate this file with the current version of nanopb generator. -#endif - - - -const pb_field_t google_census_Duration_fields[3] = { - PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Duration, seconds, seconds, 0), - PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Duration, nanos, seconds, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Timestamp_fields[3] = { - PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Timestamp, seconds, seconds, 0), - PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, google_census_Timestamp, nanos, seconds, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Resource_fields[4] = { - PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Resource, name, name, 0), - PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Resource, description, name, 0), - PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Resource, unit, description, &google_census_Resource_MeasurementUnit_fields), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Resource_MeasurementUnit_fields[4] = { - PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, google_census_Resource_MeasurementUnit, prefix, prefix, 0), - PB_FIELD( 2, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, numerator, prefix, 0), - PB_FIELD( 3, UENUM , REPEATED, CALLBACK, OTHER, google_census_Resource_MeasurementUnit, denominator, numerator, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_AggregationDescriptor_fields[4] = { - PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, google_census_AggregationDescriptor, type, type, 0), - PB_ONEOF_FIELD(options, 2, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, bucket_boundaries, type, &google_census_AggregationDescriptor_BucketBoundaries_fields), - PB_ONEOF_FIELD(options, 3, MESSAGE , ONEOF, STATIC , OTHER, google_census_AggregationDescriptor, interval_boundaries, type, &google_census_AggregationDescriptor_IntervalBoundaries_fields), - PB_LAST_FIELD -}; - -const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2] = { - PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_BucketBoundaries, bounds, bounds, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2] = { - PB_FIELD( 1, DOUBLE , REPEATED, CALLBACK, FIRST, google_census_AggregationDescriptor_IntervalBoundaries, window_size, window_size, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Distribution_fields[5] = { - PB_FIELD( 1, INT64 , OPTIONAL, STATIC , FIRST, google_census_Distribution, count, count, 0), - PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution, mean, count, 0), - PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Distribution, range, mean, &google_census_Distribution_Range_fields), - PB_FIELD( 4, INT64 , REPEATED, CALLBACK, OTHER, google_census_Distribution, bucket_count, range, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Distribution_Range_fields[3] = { - PB_FIELD( 1, DOUBLE , OPTIONAL, STATIC , FIRST, google_census_Distribution_Range, min, min, 0), - PB_FIELD( 2, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_Distribution_Range, max, min, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_IntervalStats_fields[2] = { - PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, google_census_IntervalStats, window, window, &google_census_IntervalStats_Window_fields), - PB_LAST_FIELD -}; - -const pb_field_t google_census_IntervalStats_Window_fields[4] = { - PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, google_census_IntervalStats_Window, window_size, window_size, &google_census_Duration_fields), - PB_FIELD( 2, INT64 , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, count, window_size, 0), - PB_FIELD( 3, DOUBLE , OPTIONAL, STATIC , OTHER, google_census_IntervalStats_Window, mean, count, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Tag_fields[3] = { - PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, google_census_Tag, key, key, 0), - PB_FIELD( 2, STRING , OPTIONAL, STATIC , OTHER, google_census_Tag, value, key, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_View_fields[6] = { - PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_View, name, name, 0), - PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, description, name, 0), - PB_FIELD( 3, STRING , OPTIONAL, CALLBACK, OTHER, google_census_View, resource_name, description, 0), - PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_View, aggregation, resource_name, &google_census_AggregationDescriptor_fields), - PB_FIELD( 5, STRING , REPEATED, CALLBACK, OTHER, google_census_View, tag_key, aggregation, 0), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Aggregation_fields[7] = { - PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Aggregation, name, name, 0), - PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, google_census_Aggregation, description, name, 0), - PB_ONEOF_FIELD(data, 3, UINT64 , ONEOF, STATIC , OTHER, google_census_Aggregation, count, description, 0), - PB_ONEOF_FIELD(data, 4, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, distribution, description, &google_census_Distribution_fields), - PB_ONEOF_FIELD(data, 5, MESSAGE , ONEOF, STATIC , OTHER, google_census_Aggregation, interval_stats, description, &google_census_IntervalStats_fields), - PB_FIELD( 6, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Aggregation, tag, data.interval_stats, &google_census_Tag_fields), - PB_LAST_FIELD -}; - -const pb_field_t google_census_Metric_fields[5] = { - PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, google_census_Metric, view_name, view_name, 0), - PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, google_census_Metric, aggregation, view_name, &google_census_Aggregation_fields), - PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, start, aggregation, &google_census_Timestamp_fields), - PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, google_census_Metric, end, start, &google_census_Timestamp_fields), - PB_LAST_FIELD -}; - - -/* Check that field information fits in pb_field_t */ -#if !defined(PB_FIELD_32BIT) -/* If you get an error here, it means that you need to define PB_FIELD_32BIT - * compile-time option. You can do that in pb.h or on compiler command line. - * - * The reason you need to do this is that some of your messages contain tag - * numbers or field sizes that are larger than what can fit in 8 or 16 bit - * field descriptors. - */ -PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Resource, unit) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 65536 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 65536 && pb_membersize(google_census_Distribution, range) < 65536 && pb_membersize(google_census_IntervalStats, window) < 65536 && pb_membersize(google_census_IntervalStats_Window, window_size) < 65536 && pb_membersize(google_census_View, aggregation) < 65536 && pb_membersize(google_census_Aggregation, data.distribution) < 65536 && pb_membersize(google_census_Aggregation, data.interval_stats) < 65536 && pb_membersize(google_census_Aggregation, tag) < 65536 && pb_membersize(google_census_Metric, aggregation) < 65536 && pb_membersize(google_census_Metric, start) < 65536 && pb_membersize(google_census_Metric, end) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric) -#endif - -#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT) -/* If you get an error here, it means that you need to define PB_FIELD_16BIT - * compile-time option. You can do that in pb.h or on compiler command line. - * - * The reason you need to do this is that some of your messages contain tag - * numbers or field sizes that are larger than what can fit in the default - * 8 bit descriptors. - */ -PB_STATIC_ASSERT((pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Resource, unit) < 256 && pb_membersize(google_census_AggregationDescriptor, options.bucket_boundaries) < 256 && pb_membersize(google_census_AggregationDescriptor, options.interval_boundaries) < 256 && pb_membersize(google_census_Distribution, range) < 256 && pb_membersize(google_census_IntervalStats, window) < 256 && pb_membersize(google_census_IntervalStats_Window, window_size) < 256 && pb_membersize(google_census_View, aggregation) < 256 && pb_membersize(google_census_Aggregation, data.distribution) < 256 && pb_membersize(google_census_Aggregation, data.interval_stats) < 256 && pb_membersize(google_census_Aggregation, tag) < 256 && pb_membersize(google_census_Metric, aggregation) < 256 && pb_membersize(google_census_Metric, start) < 256 && pb_membersize(google_census_Metric, end) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_google_census_Duration_google_census_Timestamp_google_census_Resource_google_census_Resource_MeasurementUnit_google_census_AggregationDescriptor_google_census_AggregationDescriptor_BucketBoundaries_google_census_AggregationDescriptor_IntervalBoundaries_google_census_Distribution_google_census_Distribution_Range_google_census_IntervalStats_google_census_IntervalStats_Window_google_census_Tag_google_census_View_google_census_Aggregation_google_census_Metric) -#endif - - -/* On some platforms (such as AVR), double is really float. - * These are not directly supported by nanopb, but see example_avr_double. - * To get rid of this error, remove any double fields from your .proto. - */ -PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) - diff --git a/Sources/CgRPC/src/core/ext/census/gen/census.pb.h b/Sources/CgRPC/src/core/ext/census/gen/census.pb.h deleted file mode 100644 index 5f2833566..000000000 --- a/Sources/CgRPC/src/core/ext/census/gen/census.pb.h +++ /dev/null @@ -1,280 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/* Automatically generated nanopb header */ -/* Generated by nanopb-0.3.5-dev */ - -#ifndef GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H -#define GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H -#include "third_party/nanopb/pb.h" -#if PB_PROTO_HEADER_VERSION != 30 -#error Regenerate this file with the current version of nanopb generator. -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -/* Enum definitions */ -typedef enum _google_census_Resource_BasicUnit { - google_census_Resource_BasicUnit_UNKNOWN = 0, - google_census_Resource_BasicUnit_BITS = 1, - google_census_Resource_BasicUnit_BYTES = 2, - google_census_Resource_BasicUnit_SECS = 3, - google_census_Resource_BasicUnit_CORES = 4, - google_census_Resource_BasicUnit_MAX_UNITS = 5 -} google_census_Resource_BasicUnit; - -typedef enum _google_census_AggregationDescriptor_AggregationType { - google_census_AggregationDescriptor_AggregationType_UNKNOWN = 0, - google_census_AggregationDescriptor_AggregationType_COUNT = 1, - google_census_AggregationDescriptor_AggregationType_DISTRIBUTION = 2, - google_census_AggregationDescriptor_AggregationType_INTERVAL = 3 -} google_census_AggregationDescriptor_AggregationType; - -/* Struct definitions */ -typedef struct _google_census_AggregationDescriptor_BucketBoundaries { - pb_callback_t bounds; -} google_census_AggregationDescriptor_BucketBoundaries; - -typedef struct _google_census_AggregationDescriptor_IntervalBoundaries { - pb_callback_t window_size; -} google_census_AggregationDescriptor_IntervalBoundaries; - -typedef struct _google_census_IntervalStats { - pb_callback_t window; -} google_census_IntervalStats; - -typedef struct _google_census_AggregationDescriptor { - bool has_type; - google_census_AggregationDescriptor_AggregationType type; - pb_size_t which_options; - union { - google_census_AggregationDescriptor_BucketBoundaries bucket_boundaries; - google_census_AggregationDescriptor_IntervalBoundaries interval_boundaries; - } options; -} google_census_AggregationDescriptor; - -typedef struct _google_census_Distribution_Range { - bool has_min; - double min; - bool has_max; - double max; -} google_census_Distribution_Range; - -typedef struct _google_census_Duration { - bool has_seconds; - int64_t seconds; - bool has_nanos; - int32_t nanos; -} google_census_Duration; - -typedef struct _google_census_Resource_MeasurementUnit { - bool has_prefix; - int32_t prefix; - pb_callback_t numerator; - pb_callback_t denominator; -} google_census_Resource_MeasurementUnit; - -typedef struct _google_census_Tag { - bool has_key; - char key[255]; - bool has_value; - char value[255]; -} google_census_Tag; - -typedef struct _google_census_Timestamp { - bool has_seconds; - int64_t seconds; - bool has_nanos; - int32_t nanos; -} google_census_Timestamp; - -typedef struct _google_census_Distribution { - bool has_count; - int64_t count; - bool has_mean; - double mean; - bool has_range; - google_census_Distribution_Range range; - pb_callback_t bucket_count; -} google_census_Distribution; - -typedef struct _google_census_IntervalStats_Window { - bool has_window_size; - google_census_Duration window_size; - bool has_count; - int64_t count; - bool has_mean; - double mean; -} google_census_IntervalStats_Window; - -typedef struct _google_census_Metric { - pb_callback_t view_name; - pb_callback_t aggregation; - bool has_start; - google_census_Timestamp start; - bool has_end; - google_census_Timestamp end; -} google_census_Metric; - -typedef struct _google_census_Resource { - pb_callback_t name; - pb_callback_t description; - bool has_unit; - google_census_Resource_MeasurementUnit unit; -} google_census_Resource; - -typedef struct _google_census_View { - pb_callback_t name; - pb_callback_t description; - pb_callback_t resource_name; - bool has_aggregation; - google_census_AggregationDescriptor aggregation; - pb_callback_t tag_key; -} google_census_View; - -typedef struct _google_census_Aggregation { - pb_callback_t name; - pb_callback_t description; - pb_size_t which_data; - union { - uint64_t count; - google_census_Distribution distribution; - google_census_IntervalStats interval_stats; - } data; - pb_callback_t tag; -} google_census_Aggregation; - -/* Default values for struct fields */ - -/* Initializer values for message structs */ -#define google_census_Duration_init_default {false, 0, false, 0} -#define google_census_Timestamp_init_default {false, 0, false, 0} -#define google_census_Resource_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_default} -#define google_census_Resource_MeasurementUnit_init_default {false, 0, {{NULL}, NULL}, {{NULL}, NULL}} -#define google_census_AggregationDescriptor_init_default {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_default}} -#define google_census_AggregationDescriptor_BucketBoundaries_init_default {{{NULL}, NULL}} -#define google_census_AggregationDescriptor_IntervalBoundaries_init_default {{{NULL}, NULL}} -#define google_census_Distribution_init_default {false, 0, false, 0, false, google_census_Distribution_Range_init_default, {{NULL}, NULL}} -#define google_census_Distribution_Range_init_default {false, 0, false, 0} -#define google_census_IntervalStats_init_default {{{NULL}, NULL}} -#define google_census_IntervalStats_Window_init_default {false, google_census_Duration_init_default, false, 0, false, 0} -#define google_census_Tag_init_default {false, "", false, ""} -#define google_census_View_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_default, {{NULL}, NULL}} -#define google_census_Aggregation_init_default {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}} -#define google_census_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_default, false, google_census_Timestamp_init_default} -#define google_census_Duration_init_zero {false, 0, false, 0} -#define google_census_Timestamp_init_zero {false, 0, false, 0} -#define google_census_Resource_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Resource_MeasurementUnit_init_zero} -#define google_census_Resource_MeasurementUnit_init_zero {false, 0, {{NULL}, NULL}, {{NULL}, NULL}} -#define google_census_AggregationDescriptor_init_zero {false, (google_census_AggregationDescriptor_AggregationType)0, 0, {google_census_AggregationDescriptor_BucketBoundaries_init_zero}} -#define google_census_AggregationDescriptor_BucketBoundaries_init_zero {{{NULL}, NULL}} -#define google_census_AggregationDescriptor_IntervalBoundaries_init_zero {{{NULL}, NULL}} -#define google_census_Distribution_init_zero {false, 0, false, 0, false, google_census_Distribution_Range_init_zero, {{NULL}, NULL}} -#define google_census_Distribution_Range_init_zero {false, 0, false, 0} -#define google_census_IntervalStats_init_zero {{{NULL}, NULL}} -#define google_census_IntervalStats_Window_init_zero {false, google_census_Duration_init_zero, false, 0, false, 0} -#define google_census_Tag_init_zero {false, "", false, ""} -#define google_census_View_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, google_census_AggregationDescriptor_init_zero, {{NULL}, NULL}} -#define google_census_Aggregation_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, 0, {0}, {{NULL}, NULL}} -#define google_census_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, google_census_Timestamp_init_zero, false, google_census_Timestamp_init_zero} - -/* Field tags (for use in manual encoding/decoding) */ -#define google_census_AggregationDescriptor_BucketBoundaries_bounds_tag 1 -#define google_census_AggregationDescriptor_IntervalBoundaries_window_size_tag 1 -#define google_census_IntervalStats_window_tag 1 -#define google_census_AggregationDescriptor_bucket_boundaries_tag 2 - -#define google_census_AggregationDescriptor_interval_boundaries_tag 3 -#define google_census_AggregationDescriptor_type_tag 1 -#define google_census_Distribution_Range_min_tag 1 -#define google_census_Distribution_Range_max_tag 2 -#define google_census_Duration_seconds_tag 1 -#define google_census_Duration_nanos_tag 2 -#define google_census_Resource_MeasurementUnit_prefix_tag 1 -#define google_census_Resource_MeasurementUnit_numerator_tag 2 -#define google_census_Resource_MeasurementUnit_denominator_tag 3 -#define google_census_Tag_key_tag 1 -#define google_census_Tag_value_tag 2 -#define google_census_Timestamp_seconds_tag 1 -#define google_census_Timestamp_nanos_tag 2 -#define google_census_Distribution_count_tag 1 -#define google_census_Distribution_mean_tag 2 -#define google_census_Distribution_range_tag 3 -#define google_census_Distribution_bucket_count_tag 4 -#define google_census_IntervalStats_Window_window_size_tag 1 -#define google_census_IntervalStats_Window_count_tag 2 -#define google_census_IntervalStats_Window_mean_tag 3 -#define google_census_Metric_view_name_tag 1 -#define google_census_Metric_aggregation_tag 2 -#define google_census_Metric_start_tag 3 -#define google_census_Metric_end_tag 4 -#define google_census_Resource_name_tag 1 -#define google_census_Resource_description_tag 2 -#define google_census_Resource_unit_tag 3 -#define google_census_View_name_tag 1 -#define google_census_View_description_tag 2 -#define google_census_View_resource_name_tag 3 -#define google_census_View_aggregation_tag 4 -#define google_census_View_tag_key_tag 5 -#define google_census_Aggregation_count_tag 3 - -#define google_census_Aggregation_distribution_tag 4 - -#define google_census_Aggregation_interval_stats_tag 5 -#define google_census_Aggregation_name_tag 1 -#define google_census_Aggregation_description_tag 2 -#define google_census_Aggregation_tag_tag 6 - -/* Struct field encoding specification for nanopb */ -extern const pb_field_t google_census_Duration_fields[3]; -extern const pb_field_t google_census_Timestamp_fields[3]; -extern const pb_field_t google_census_Resource_fields[4]; -extern const pb_field_t google_census_Resource_MeasurementUnit_fields[4]; -extern const pb_field_t google_census_AggregationDescriptor_fields[4]; -extern const pb_field_t google_census_AggregationDescriptor_BucketBoundaries_fields[2]; -extern const pb_field_t google_census_AggregationDescriptor_IntervalBoundaries_fields[2]; -extern const pb_field_t google_census_Distribution_fields[5]; -extern const pb_field_t google_census_Distribution_Range_fields[3]; -extern const pb_field_t google_census_IntervalStats_fields[2]; -extern const pb_field_t google_census_IntervalStats_Window_fields[4]; -extern const pb_field_t google_census_Tag_fields[3]; -extern const pb_field_t google_census_View_fields[6]; -extern const pb_field_t google_census_Aggregation_fields[7]; -extern const pb_field_t google_census_Metric_fields[5]; - -/* Maximum encoded size of messages (where known) */ -#define google_census_Duration_size 22 -#define google_census_Timestamp_size 22 -#define google_census_Distribution_Range_size 18 -#define google_census_IntervalStats_Window_size 44 -#define google_census_Tag_size 516 - -/* Message IDs (where set with "msgid" option) */ -#ifdef PB_MSGID - -#define CENSUS_MESSAGES \ - - -#endif - -#ifdef __cplusplus -} /* extern "C" */ -#endif - -#endif /* GRPC_CORE_EXT_CENSUS_GEN_CENSUS_PB_H */ diff --git a/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.c b/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.c deleted file mode 100644 index b5c3d52a7..000000000 --- a/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/* Automatically generated nanopb constant definitions */ -/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */ - -#include "src/core/ext/census/gen/trace_context.pb.h" - -/* @@protoc_insertion_point(includes) */ -#if PB_PROTO_HEADER_VERSION != 30 -#error Regenerate this file with the current version of nanopb generator. -#endif - - - -const pb_field_t google_trace_TraceContext_fields[5] = { - PB_FIELD( 1, FIXED64 , OPTIONAL, STATIC , FIRST, google_trace_TraceContext, trace_id_hi, trace_id_hi, 0), - PB_FIELD( 2, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, trace_id_lo, trace_id_hi, 0), - PB_FIELD( 3, FIXED64 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_id, trace_id_lo, 0), - PB_FIELD( 4, FIXED32 , OPTIONAL, STATIC , OTHER, google_trace_TraceContext, span_options, span_id, 0), - PB_LAST_FIELD -}; - - -/* @@protoc_insertion_point(eof) */ diff --git a/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.h b/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.h deleted file mode 100644 index 181925dc9..000000000 --- a/Sources/CgRPC/src/core/ext/census/gen/trace_context.pb.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/* Automatically generated nanopb header */ -/* Generated by nanopb-0.3.7-dev at Fri Jan 20 16:14:22 2017. */ - -#ifndef GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H -#define GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H -#include "third_party/nanopb/pb.h" - -/* @@protoc_insertion_point(includes) */ -#if PB_PROTO_HEADER_VERSION != 30 -#error Regenerate this file with the current version of nanopb generator. -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -/* Struct definitions */ -typedef struct _google_trace_TraceContext { - bool has_trace_id_hi; - uint64_t trace_id_hi; - bool has_trace_id_lo; - uint64_t trace_id_lo; - bool has_span_id; - uint64_t span_id; - bool has_span_options; - uint32_t span_options; -/* @@protoc_insertion_point(struct:google_trace_TraceContext) */ -} google_trace_TraceContext; - -/* Default values for struct fields */ - -/* Initializer values for message structs */ -#define google_trace_TraceContext_init_default {false, 0, false, 0, false, 0, false, 0} -#define google_trace_TraceContext_init_zero {false, 0, false, 0, false, 0, false, 0} - -/* Field tags (for use in manual encoding/decoding) */ -#define google_trace_TraceContext_trace_id_hi_tag 1 -#define google_trace_TraceContext_trace_id_lo_tag 2 -#define google_trace_TraceContext_span_id_tag 3 -#define google_trace_TraceContext_span_options_tag 4 - -/* Struct field encoding specification for nanopb */ -extern const pb_field_t google_trace_TraceContext_fields[5]; - -/* Maximum encoded size of messages (where known) */ -#define google_trace_TraceContext_size 32 - -/* Message IDs (where set with "msgid" option) */ -#ifdef PB_MSGID - -#define TRACE_CONTEXT_MESSAGES \ - - -#endif - -#ifdef __cplusplus -} /* extern "C" */ -#endif -/* @@protoc_insertion_point(eof) */ - -#endif /* GRPC_CORE_EXT_CENSUS_GEN_TRACE_CONTEXT_PB_H */ diff --git a/Sources/CgRPC/src/core/ext/census/grpc_context.c b/Sources/CgRPC/src/core/ext/census/grpc_context.cc similarity index 70% rename from Sources/CgRPC/src/core/ext/census/grpc_context.c rename to Sources/CgRPC/src/core/ext/census/grpc_context.cc index 0bfba63a5..599a798dd 100644 --- a/Sources/CgRPC/src/core/ext/census/grpc_context.c +++ b/Sources/CgRPC/src/core/ext/census/grpc_context.cc @@ -16,23 +16,23 @@ * */ +#include + #include #include #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" -void grpc_census_call_set_context(grpc_call *call, census_context *context) { +void grpc_census_call_set_context(grpc_call* call, census_context* context) { GRPC_API_TRACE("grpc_census_call_set_context(call=%p, census_context=%p)", 2, (call, context)); - if (census_enabled() == CENSUS_FEATURE_NONE) { - return; - } - if (context != NULL) { - grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, NULL); + if (context != nullptr) { + grpc_call_context_set(call, GRPC_CONTEXT_TRACING, context, nullptr); } } -census_context *grpc_census_call_get_context(grpc_call *call) { +census_context* grpc_census_call_get_context(grpc_call* call) { GRPC_API_TRACE("grpc_census_call_get_context(call=%p)", 1, (call)); - return (census_context *)grpc_call_context_get(call, GRPC_CONTEXT_TRACING); + return static_cast( + grpc_call_context_get(call, GRPC_CONTEXT_TRACING)); } diff --git a/Sources/CgRPC/src/core/ext/census/grpc_filter.c b/Sources/CgRPC/src/core/ext/census/grpc_filter.c deleted file mode 100644 index b37ab9038..000000000 --- a/Sources/CgRPC/src/core/ext/census/grpc_filter.c +++ /dev/null @@ -1,196 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/grpc_filter.h" - -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/ext/census/census_interface.h" -#include "src/core/ext/census/census_rpc_stats.h" -#include "src/core/lib/channel/channel_stack.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/transport/static_metadata.h" - -typedef struct call_data { - census_op_id op_id; - census_context *ctxt; - gpr_timespec start_ts; - int error; - - /* recv callback */ - grpc_metadata_batch *recv_initial_metadata; - grpc_closure *on_done_recv; - grpc_closure finish_recv; -} call_data; - -typedef struct channel_data { uint8_t unused; } channel_data; - -static void extract_and_annotate_method_tag(grpc_metadata_batch *md, - call_data *calld, - channel_data *chand) { - grpc_linked_mdelem *m; - for (m = md->list.head; m != NULL; m = m->next) { - if (grpc_slice_eq(GRPC_MDKEY(m->md), GRPC_MDSTR_PATH)) { - /* Add method tag here */ - } - } -} - -static void client_mutate_op(grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (op->send_initial_metadata) { - extract_and_annotate_method_tag( - op->payload->send_initial_metadata.send_initial_metadata, calld, chand); - } -} - -static void client_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - client_mutate_op(elem, op); - grpc_call_next_op(exec_ctx, elem, op); -} - -static void server_on_done_recv(grpc_exec_ctx *exec_ctx, void *ptr, - grpc_error *error) { - GPR_TIMER_BEGIN("census-server:server_on_done_recv", 0); - grpc_call_element *elem = (grpc_call_element *)ptr; - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (error == GRPC_ERROR_NONE) { - extract_and_annotate_method_tag(calld->recv_initial_metadata, calld, chand); - } - calld->on_done_recv->cb(exec_ctx, calld->on_done_recv->cb_arg, error); - GPR_TIMER_END("census-server:server_on_done_recv", 0); -} - -static void server_mutate_op(grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = (call_data *)elem->call_data; - if (op->recv_initial_metadata) { - /* substitute our callback for the op callback */ - calld->recv_initial_metadata = - op->payload->recv_initial_metadata.recv_initial_metadata; - calld->on_done_recv = - op->payload->recv_initial_metadata.recv_initial_metadata_ready; - op->payload->recv_initial_metadata.recv_initial_metadata_ready = - &calld->finish_recv; - } -} - -static void server_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - /* TODO(ctiller): this code fails. I don't know why. I expect it's - incomplete, and someone should look at it soon. - - call_data *calld = elem->call_data; - GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */ - server_mutate_op(elem, op); - grpc_call_next_op(exec_ctx, elem, op); -} - -static grpc_error *client_init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *d = (call_data *)elem->call_data; - GPR_ASSERT(d != NULL); - memset(d, 0, sizeof(*d)); - d->start_ts = args->start_time; - return GRPC_ERROR_NONE; -} - -static void client_destroy_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *d = (call_data *)elem->call_data; - GPR_ASSERT(d != NULL); - /* TODO(hongyu): record rpc client stats and census_rpc_end_op here */ -} - -static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *d = (call_data *)elem->call_data; - GPR_ASSERT(d != NULL); - memset(d, 0, sizeof(*d)); - d->start_ts = args->start_time; - /* TODO(hongyu): call census_tracing_start_op here. */ - GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem, - grpc_schedule_on_exec_ctx); - return GRPC_ERROR_NONE; -} - -static void server_destroy_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *d = (call_data *)elem->call_data; - GPR_ASSERT(d != NULL); - /* TODO(hongyu): record rpc server stats and census_tracing_end_op here */ -} - -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *chand = (channel_data *)elem->channel_data; - GPR_ASSERT(chand != NULL); - return GRPC_ERROR_NONE; -} - -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - GPR_ASSERT(chand != NULL); -} - -const grpc_channel_filter grpc_client_census_filter = { - client_start_transport_op, - grpc_channel_next_op, - sizeof(call_data), - client_init_call_elem, - grpc_call_stack_ignore_set_pollset_or_pollset_set, - client_destroy_call_elem, - sizeof(channel_data), - init_channel_elem, - destroy_channel_elem, - grpc_channel_next_get_info, - "census-client"}; - -const grpc_channel_filter grpc_server_census_filter = { - server_start_transport_op, - grpc_channel_next_op, - sizeof(call_data), - server_init_call_elem, - grpc_call_stack_ignore_set_pollset_or_pollset_set, - server_destroy_call_elem, - sizeof(channel_data), - init_channel_elem, - destroy_channel_elem, - grpc_channel_next_get_info, - "census-server"}; diff --git a/Sources/CgRPC/src/core/ext/census/grpc_plugin.c b/Sources/CgRPC/src/core/ext/census/grpc_plugin.c deleted file mode 100644 index c0efe5afb..000000000 --- a/Sources/CgRPC/src/core/ext/census/grpc_plugin.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include -#include - -#include - -#include "src/core/ext/census/grpc_filter.h" -#include "src/core/lib/channel/channel_stack_builder.h" -#include "src/core/lib/surface/channel_init.h" - -static bool is_census_enabled(const grpc_channel_args *a) { - size_t i; - if (a == NULL) return 0; - for (i = 0; i < a->num_args; i++) { - if (0 == strcmp(a->args[i].key, GRPC_ARG_ENABLE_CENSUS)) { - return a->args[i].value.integer != 0 && census_enabled(); - } - } - return census_enabled() && !grpc_channel_args_want_minimal_stack(a); -} - -static bool maybe_add_census_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg) { - const grpc_channel_args *args = - grpc_channel_stack_builder_get_channel_arguments(builder); - if (is_census_enabled(args)) { - return grpc_channel_stack_builder_prepend_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); - } - return true; -} - -void census_grpc_plugin_init(void) { - /* Only initialize census if no one else has and some features are - * available. */ - if (census_enabled() == CENSUS_FEATURE_NONE && - census_supported() != CENSUS_FEATURE_NONE) { - if (census_initialize(census_supported())) { /* enable all features. */ - gpr_log(GPR_ERROR, "Could not initialize census."); - } - } - grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX, - maybe_add_census_filter, - (void *)&grpc_client_census_filter); - grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, - maybe_add_census_filter, - (void *)&grpc_server_census_filter); -} - -void census_grpc_plugin_shutdown(void) { census_shutdown(); } diff --git a/Sources/CgRPC/src/core/ext/census/initialize.c b/Sources/CgRPC/src/core/ext/census/initialize.c deleted file mode 100644 index 165a1221d..000000000 --- a/Sources/CgRPC/src/core/ext/census/initialize.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include "src/core/ext/census/base_resources.h" -#include "src/core/ext/census/resource.h" - -static int features_enabled = CENSUS_FEATURE_NONE; - -int census_initialize(int features) { - if (features_enabled != CENSUS_FEATURE_NONE) { - // Must have been a previous call to census_initialize; return error - return -1; - } - features_enabled = features & CENSUS_FEATURE_ALL; - if (features & CENSUS_FEATURE_STATS) { - initialize_resources(); - define_base_resources(); - } - - return features_enabled; -} - -void census_shutdown(void) { - if (features_enabled & CENSUS_FEATURE_STATS) { - shutdown_resources(); - } - features_enabled = CENSUS_FEATURE_NONE; -} - -int census_supported(void) { - /* TODO(aveitch): improve this as we implement features... */ - return CENSUS_FEATURE_NONE; -} - -int census_enabled(void) { return features_enabled; } diff --git a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.c b/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.c deleted file mode 100644 index 793048696..000000000 --- a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.c +++ /dev/null @@ -1,305 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/intrusive_hash_map.h" -#include - -extern bool hm_index_compare(const hm_index *A, const hm_index *B); - -/* Simple hashing function that takes lower 32 bits. */ -static __inline uint32_t chunked_vector_hasher(uint64_t key) { - return (uint32_t)key; -} - -/* Vector chunks are 1MiB divided by pointer size. */ -static const size_t VECTOR_CHUNK_SIZE = (1 << 20) / sizeof(void *); - -/* Helper functions which return buckets from the chunked vector. */ -static __inline void **get_mutable_bucket(const chunked_vector *buckets, - uint32_t index) { - if (index < VECTOR_CHUNK_SIZE) { - return &buckets->first_[index]; - } - size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE; - return &buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE]; -} - -static __inline void *get_bucket(const chunked_vector *buckets, - uint32_t index) { - if (index < VECTOR_CHUNK_SIZE) { - return buckets->first_[index]; - } - size_t rest_index = (index - VECTOR_CHUNK_SIZE) / VECTOR_CHUNK_SIZE; - return buckets->rest_[rest_index][index % VECTOR_CHUNK_SIZE]; -} - -/* Helper function. */ -static __inline size_t RestSize(const chunked_vector *vec) { - return (vec->size_ <= VECTOR_CHUNK_SIZE) - ? 0 - : (vec->size_ - VECTOR_CHUNK_SIZE - 1) / VECTOR_CHUNK_SIZE + 1; -} - -/* Initialize chunked vector to size of 0. */ -static void chunked_vector_init(chunked_vector *vec) { - vec->size_ = 0; - vec->first_ = NULL; - vec->rest_ = NULL; -} - -/* Clear chunked vector and free all memory that has been allocated then - initialize chunked vector. */ -static void chunked_vector_clear(chunked_vector *vec) { - if (vec->first_ != NULL) { - gpr_free(vec->first_); - } - if (vec->rest_ != NULL) { - size_t rest_size = RestSize(vec); - for (size_t i = 0; i < rest_size; ++i) { - if (vec->rest_[i] != NULL) { - gpr_free(vec->rest_[i]); - } - } - gpr_free(vec->rest_); - } - chunked_vector_init(vec); -} - -/* Clear chunked vector and then resize it to n entries. Allow the first 1MB to - be read w/o an extra cache miss. The rest of the elements are stored in an - array of arrays to avoid large mallocs. */ -static void chunked_vector_reset(chunked_vector *vec, size_t n) { - chunked_vector_clear(vec); - vec->size_ = n; - if (n <= VECTOR_CHUNK_SIZE) { - vec->first_ = (void **)gpr_malloc(sizeof(void *) * n); - memset(vec->first_, 0, sizeof(void *) * n); - } else { - vec->first_ = (void **)gpr_malloc(sizeof(void *) * VECTOR_CHUNK_SIZE); - memset(vec->first_, 0, sizeof(void *) * VECTOR_CHUNK_SIZE); - size_t rest_size = RestSize(vec); - vec->rest_ = (void ***)gpr_malloc(sizeof(void **) * rest_size); - memset(vec->rest_, 0, sizeof(void **) * rest_size); - int i = 0; - n -= VECTOR_CHUNK_SIZE; - while (n > 0) { - size_t this_size = GPR_MIN(n, VECTOR_CHUNK_SIZE); - vec->rest_[i] = (void **)gpr_malloc(sizeof(void *) * this_size); - memset(vec->rest_[i], 0, sizeof(void *) * this_size); - n -= this_size; - ++i; - } - } -} - -void intrusive_hash_map_init(intrusive_hash_map *hash_map, - uint32_t initial_log2_table_size) { - hash_map->log2_num_buckets = initial_log2_table_size; - hash_map->num_items = 0; - uint32_t num_buckets = (uint32_t)1 << hash_map->log2_num_buckets; - hash_map->extend_threshold = num_buckets >> 1; - chunked_vector_init(&hash_map->buckets); - chunked_vector_reset(&hash_map->buckets, num_buckets); - hash_map->hash_mask = num_buckets - 1; -} - -bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map) { - return hash_map->num_items == 0; -} - -size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map) { - return hash_map->num_items; -} - -void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx) { - idx->bucket_index = (uint32_t)hash_map->buckets.size_; - GPR_ASSERT(idx->bucket_index <= UINT32_MAX); - idx->item = NULL; -} - -void intrusive_hash_map_next(const intrusive_hash_map *hash_map, - hm_index *idx) { - idx->item = idx->item->hash_link; - while (idx->item == NULL) { - idx->bucket_index++; - if (idx->bucket_index >= hash_map->buckets.size_) { - /* Reached end of table. */ - idx->item = NULL; - return; - } - idx->item = (hm_item *)get_bucket(&hash_map->buckets, idx->bucket_index); - } -} - -void intrusive_hash_map_begin(const intrusive_hash_map *hash_map, - hm_index *idx) { - for (uint32_t i = 0; i < hash_map->buckets.size_; ++i) { - if (get_bucket(&hash_map->buckets, i) != NULL) { - idx->bucket_index = i; - idx->item = (hm_item *)get_bucket(&hash_map->buckets, i); - return; - } - } - intrusive_hash_map_end(hash_map, idx); -} - -hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map, - uint64_t key) { - uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask; - - hm_item *p = (hm_item *)get_bucket(&hash_map->buckets, index); - while (p != NULL) { - if (key == p->key) { - return p; - } - p = p->hash_link; - } - return NULL; -} - -hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key) { - uint32_t index = chunked_vector_hasher(key) & hash_map->hash_mask; - - hm_item **slot = (hm_item **)get_mutable_bucket(&hash_map->buckets, index); - hm_item *p = *slot; - if (p == NULL) { - return NULL; - } - - if (key == p->key) { - *slot = p->hash_link; - p->hash_link = NULL; - hash_map->num_items--; - return p; - } - - hm_item *prev = p; - p = p->hash_link; - - while (p) { - if (key == p->key) { - prev->hash_link = p->hash_link; - p->hash_link = NULL; - hash_map->num_items--; - return p; - } - prev = p; - p = p->hash_link; - } - return NULL; -} - -/* Insert an hm_item* into the underlying chunked vector. hash_mask is - * array_size-1. Returns true if it is a new hm_item and false if the hm_item - * already existed. - */ -static __inline bool intrusive_hash_map_internal_insert(chunked_vector *buckets, - uint32_t hash_mask, - hm_item *item) { - const uint64_t key = item->key; - uint32_t index = chunked_vector_hasher(key) & hash_mask; - hm_item **slot = (hm_item **)get_mutable_bucket(buckets, index); - hm_item *p = *slot; - item->hash_link = p; - - /* Check to see if key already exists. */ - while (p) { - if (p->key == key) { - return false; - } - p = p->hash_link; - } - - /* Otherwise add new entry. */ - *slot = item; - return true; -} - -/* Extend the allocated number of elements in the hash map by a factor of 2. */ -void intrusive_hash_map_extend(intrusive_hash_map *hash_map) { - uint32_t new_log2_num_buckets = 1 + hash_map->log2_num_buckets; - uint32_t new_num_buckets = (uint32_t)1 << new_log2_num_buckets; - GPR_ASSERT(new_num_buckets <= UINT32_MAX && new_num_buckets > 0); - chunked_vector new_buckets; - chunked_vector_init(&new_buckets); - chunked_vector_reset(&new_buckets, new_num_buckets); - uint32_t new_hash_mask = new_num_buckets - 1; - - hm_index cur_idx; - hm_index end_idx; - intrusive_hash_map_end(hash_map, &end_idx); - intrusive_hash_map_begin(hash_map, &cur_idx); - while (!hm_index_compare(&cur_idx, &end_idx)) { - hm_item *new_item = cur_idx.item; - intrusive_hash_map_next(hash_map, &cur_idx); - intrusive_hash_map_internal_insert(&new_buckets, new_hash_mask, new_item); - } - - /* Set values for new chunked_vector. extend_threshold is set to half of - * new_num_buckets. */ - hash_map->log2_num_buckets = new_log2_num_buckets; - chunked_vector_clear(&hash_map->buckets); - hash_map->buckets = new_buckets; - hash_map->hash_mask = new_hash_mask; - hash_map->extend_threshold = new_num_buckets >> 1; -} - -/* Insert a hm_item. The hm_item must remain live until it is removed from the - table. This object does not take the ownership of hm_item. The caller must - remove this hm_item from the table and delete it before this table is - deleted. If hm_item exists already num_items is not changed. */ -bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item) { - if (hash_map->num_items >= hash_map->extend_threshold) { - intrusive_hash_map_extend(hash_map); - } - if (intrusive_hash_map_internal_insert(&hash_map->buckets, - hash_map->hash_mask, item)) { - hash_map->num_items++; - return true; - } - return false; -} - -void intrusive_hash_map_clear(intrusive_hash_map *hash_map, - void (*free_object)(void *)) { - hm_index cur; - hm_index end; - intrusive_hash_map_end(hash_map, &end); - intrusive_hash_map_begin(hash_map, &cur); - - while (!hm_index_compare(&cur, &end)) { - hm_index next = cur; - intrusive_hash_map_next(hash_map, &next); - if (cur.item != NULL) { - hm_item *item = intrusive_hash_map_erase(hash_map, cur.item->key); - (*free_object)((void *)item); - gpr_free(item); - } - cur = next; - } -} - -void intrusive_hash_map_free(intrusive_hash_map *hash_map, - void (*free_object)(void *)) { - intrusive_hash_map_clear(hash_map, (*free_object)); - hash_map->num_items = 0; - hash_map->extend_threshold = 0; - hash_map->log2_num_buckets = 0; - hash_map->hash_mask = 0; - chunked_vector_clear(&hash_map->buckets); -} diff --git a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.h b/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.h deleted file mode 100644 index f50de4fab..000000000 --- a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H -#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H - -#include "src/core/ext/census/intrusive_hash_map_internal.h" - -/* intrusive_hash_map is a fast chained hash table. This hash map is faster than - * a dense hash map when the application calls insert and erase more often than - * find. When the workload is dominated by find() a dense hash map may be - * faster. - * - * intrusive_hash_map uses an intrusive header placed within a user defined - * struct. The header field IHM_key MUST be set to a valid value before - * insertion into the hash map or undefined behavior may occur. The header field - * IHM_hash_link MUST to be set to NULL initially. - * - * EXAMPLE USAGE: - * - * typedef struct string_item { - * INTRUSIVE_HASH_MAP_HEADER; - * // User data. - * char *str_buf; - * uint16_t len; - * } string_item; - * - * static string_item *make_string_item(uint64_t key, const char *buf, - * uint16_t len) { - * string_item *item = (string_item *)gpr_malloc(sizeof(string_item)); - * item->IHM_key = key; - * item->IHM_hash_link = NULL; - * item->len = len; - * item->str_buf = (char *)malloc(len); - * memcpy(item->str_buf, buf, len); - * return item; - * } - * - * intrusive_hash_map hash_map; - * intrusive_hash_map_init(&hash_map, 4); - * string_item *new_item1 = make_string_item(10, "test1", 5); - * bool ok = intrusive_hash_map_insert(&hash_map, (hm_item *)new_item1); - * - * string_item *item1 = - * (string_item *)intrusive_hash_map_find(&hash_map, 10); - */ - -/* Hash map item. Stores key and a pointer to the actual object. A user defined - * version of this can be passed in provided the first 2 entries (key and - * hash_link) are the same. These entries must be first in the user defined - * struct. Pointer to struct will need to be cast as (hm_item *) when passed to - * hash map. This allows it to be intrusive. */ -typedef struct hm_item { - uint64_t key; - struct hm_item *hash_link; - /* Optional user defined data after this. */ -} hm_item; - -/* Macro provided for ease of use. This must be first in the user defined - * struct (i.e. uint64_t key and hm_item * must be the first two elements in - * that order). */ -#define INTRUSIVE_HASH_MAP_HEADER \ - uint64_t IHM_key; \ - struct hm_item *IHM_hash_link - -/* Index struct which acts as a pseudo-iterator within the hash map. */ -typedef struct hm_index { - uint32_t bucket_index; // hash map bucket index. - hm_item *item; // Pointer to hm_item within the hash map. -} hm_index; - -/* Returns true if two hm_indices point to the same object within the hash map - * and false otherwise. */ -__inline bool hm_index_compare(const hm_index *A, const hm_index *B) { - return (A->item == B->item && A->bucket_index == B->bucket_index); -} - -/* - * Helper functions for iterating over the hash map. - */ - -/* On return idx will contain an invalid index which is always equal to - * hash_map->buckets.size_ */ -void intrusive_hash_map_end(const intrusive_hash_map *hash_map, hm_index *idx); - -/* Iterates index to the next valid entry in the hash map and stores the - * index within idx. If end of table is reached, idx will contain the same - * values as if intrusive_hash_map_end() was called. */ -void intrusive_hash_map_next(const intrusive_hash_map *hash_map, hm_index *idx); - -/* On return, idx will contain the index of the first non-null entry in the hash - * map. If the hash map is empty, idx will contain the same values as if - * intrusive_hash_map_end() was called. */ -void intrusive_hash_map_begin(const intrusive_hash_map *hash_map, - hm_index *idx); - -/* Initialize intrusive hash map data structure. This must be called before - * the hash map can be used. The initial size of an intrusive hash map will be - * 2^initial_log2_map_size (valid range is [0, 31]). */ -void intrusive_hash_map_init(intrusive_hash_map *hash_map, - uint32_t initial_log2_map_size); - -/* Returns true if the hash map is empty and false otherwise. */ -bool intrusive_hash_map_empty(const intrusive_hash_map *hash_map); - -/* Returns the number of elements currently in the hash map. */ -size_t intrusive_hash_map_size(const intrusive_hash_map *hash_map); - -/* Find a hm_item within the hash map by key. Returns NULL if item was not - * found. */ -hm_item *intrusive_hash_map_find(const intrusive_hash_map *hash_map, - uint64_t key); - -/* Erase the hm_item that corresponds with key. If the hm_item is found, return - * the pointer to the hm_item. Else returns NULL. */ -hm_item *intrusive_hash_map_erase(intrusive_hash_map *hash_map, uint64_t key); - -/* Attempts to insert a new hm_item into the hash map. If an element with the - * same key already exists, it will not insert the new item and return false. - * Otherwise, it will insert the new item and return true. */ -bool intrusive_hash_map_insert(intrusive_hash_map *hash_map, hm_item *item); - -/* Clears entire contents of the hash map, but leaves internal data structure - * untouched. Second argument takes a function pointer to a function that will - * free the object designated by the user and pointed to by hash_map->value. */ -void intrusive_hash_map_clear(intrusive_hash_map *hash_map, - void (*free_object)(void *)); - -/* Erase all contents of hash map and free the memory. Hash map is invalid - * after calling this function and cannot be used until it has been - * reinitialized (intrusive_hash_map_init()). This function takes a function - * pointer to a function that will free the object designated by the user and - * pointed to by hash_map->value. */ -void intrusive_hash_map_free(intrusive_hash_map *hash_map, - void (*free_object)(void *)); - -#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_H */ diff --git a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map_internal.h b/Sources/CgRPC/src/core/ext/census/intrusive_hash_map_internal.h deleted file mode 100644 index e9c81fc85..000000000 --- a/Sources/CgRPC/src/core/ext/census/intrusive_hash_map_internal.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H -#define GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H - -#include -#include -#include -#include - -/* The chunked vector is a data structure that allocates buckets for use in the - * hash map. ChunkedVector is logically equivalent to T*[N] (cast void* as - * T*). It's internally implemented as an array of 1MB arrays to avoid - * allocating large consecutive memory chunks. This is an internal data - * structure that should never be accessed directly. */ -typedef struct chunked_vector { - size_t size_; - void **first_; - void ***rest_; -} chunked_vector; - -/* Core intrusive hash map data structure. All internal elements are managed by - * functions and should not be altered manually. */ -typedef struct intrusive_hash_map { - uint32_t num_items; - uint32_t extend_threshold; - uint32_t log2_num_buckets; - uint32_t hash_mask; - chunked_vector buckets; -} intrusive_hash_map; - -#endif /* GRPC_CORE_EXT_CENSUS_INTRUSIVE_HASH_MAP_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/ext/census/mlog.c b/Sources/CgRPC/src/core/ext/census/mlog.c deleted file mode 100644 index 4b8c8466b..000000000 --- a/Sources/CgRPC/src/core/ext/census/mlog.c +++ /dev/null @@ -1,586 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Implements an efficient in-memory log, optimized for multiple writers and -// a single reader. Available log space is divided up in blocks of -// CENSUS_LOG_2_MAX_RECORD_SIZE bytes. A block can be in one of the following -// three data structures: -// - Free blocks (free_block_list) -// - Blocks with unread data (dirty_block_list) -// - Blocks currently attached to cores (core_local_blocks[]) -// -// census_log_start_write() moves a block from core_local_blocks[] to the end of -// dirty_block_list when block: -// - is out-of-space OR -// - has an incomplete record (an incomplete record occurs when a thread calls -// census_log_start_write() and is context-switched before calling -// census_log_end_write() -// So, blocks in dirty_block_list are ordered, from oldest to newest, by the -// time when block is detached from the core. -// -// census_log_read_next() first iterates over dirty_block_list and then -// core_local_blocks[]. It moves completely read blocks from dirty_block_list -// to free_block_list. Blocks in core_local_blocks[] are not freed, even when -// completely read. -// -// If the log is configured to discard old records and free_block_list is empty, -// census_log_start_write() iterates over dirty_block_list to allocate a -// new block. It moves the oldest available block (no pending read/write) to -// core_local_blocks[]. -// -// core_local_block_struct is used to implement a map from core id to the block -// associated with that core. This mapping is advisory. It is possible that the -// block returned by this mapping is no longer associated with that core. This -// mapping is updated, lazily, by census_log_start_write(). -// -// Locking in block struct: -// -// Exclusive g_log.lock must be held before calling any functions operating on -// block structs except census_log_start_write() and census_log_end_write(). -// -// Writes to a block are serialized via writer_lock. census_log_start_write() -// acquires this lock and census_log_end_write() releases it. On failure to -// acquire the lock, writer allocates a new block for the current core and -// updates core_local_block accordingly. -// -// Simultaneous read and write access is allowed. Readers can safely read up to -// committed bytes (bytes_committed). -// -// reader_lock protects the block, currently being read, from getting recycled. -// start_read() acquires reader_lock and end_read() releases the lock. -// -// Read/write access to a block is disabled via try_disable_access(). It returns -// with both writer_lock and reader_lock held. These locks are subsequently -// released by enable_access() to enable access to the block. -// -// A note on naming: Most function/struct names are prepended by cl_ -// (shorthand for census_log). Further, functions that manipulate structures -// include the name of the structure, which will be passed as the first -// argument. E.g. cl_block_initialize() will initialize a cl_block. - -#include "src/core/ext/census/mlog.h" -#include -#include -#include -#include -#include -#include -#include -#include - -// End of platform specific code - -typedef struct census_log_block_list_struct { - struct census_log_block_list_struct* next; - struct census_log_block_list_struct* prev; - struct census_log_block* block; -} cl_block_list_struct; - -typedef struct census_log_block { - // Pointer to underlying buffer. - char* buffer; - gpr_atm writer_lock; - gpr_atm reader_lock; - // Keeps completely written bytes. Declared atomic because accessed - // simultaneously by reader and writer. - gpr_atm bytes_committed; - // Bytes already read. - size_t bytes_read; - // Links for list. - cl_block_list_struct link; -// We want this structure to be cacheline aligned. We assume the following -// sizes for the various parts on 32/64bit systems: -// type 32b size 64b size -// char* 4 8 -// 3x gpr_atm 12 24 -// size_t 4 8 -// cl_block_list_struct 12 24 -// TOTAL 32 64 -// -// Depending on the size of our cacheline and the architecture, we -// selectively add char buffering to this structure. The size is checked -// via assert in census_log_initialize(). -#if defined(GPR_ARCH_64) -#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 64) -#else -#if defined(GPR_ARCH_32) -#define CL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 32) -#else -#error "Unknown architecture" -#endif -#endif -#if CL_BLOCK_PAD_SIZE > 0 - char padding[CL_BLOCK_PAD_SIZE]; -#endif -} cl_block; - -// A list of cl_blocks, doubly-linked through cl_block::link. -typedef struct census_log_block_list { - int32_t count; // Number of items in list. - cl_block_list_struct ht; // head/tail of linked list. -} cl_block_list; - -// Cacheline aligned block pointers to avoid false sharing. Block pointer must -// be initialized via set_block(), before calling other functions -typedef struct census_log_core_local_block { - gpr_atm block; -// Ensure cachline alignment: we assume sizeof(gpr_atm) == 4 or 8 -#if defined(GPR_ARCH_64) -#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 8) -#else -#if defined(GPR_ARCH_32) -#define CL_CORE_LOCAL_BLOCK_PAD_SIZE (GPR_CACHELINE_SIZE - 4) -#else -#error "Unknown architecture" -#endif -#endif -#if CL_CORE_LOCAL_BLOCK_PAD_SIZE > 0 - char padding[CL_CORE_LOCAL_BLOCK_PAD_SIZE]; -#endif -} cl_core_local_block; - -struct census_log { - int discard_old_records; - // Number of cores (aka hardware-contexts) - unsigned num_cores; - // number of CENSUS_LOG_2_MAX_RECORD_SIZE blocks in log - uint32_t num_blocks; - cl_block* blocks; // Block metadata. - cl_core_local_block* core_local_blocks; // Keeps core to block mappings. - gpr_mu lock; - int initialized; // has log been initialized? - // Keeps the state of the reader iterator. A value of 0 indicates that - // iterator has reached the end. census_log_init_reader() resets the value - // to num_core to restart iteration. - uint32_t read_iterator_state; - // Points to the block being read. If non-NULL, the block is locked for - // reading(block_being_read_->reader_lock is held). - cl_block* block_being_read; - char* buffer; - cl_block_list free_block_list; - cl_block_list dirty_block_list; - gpr_atm out_of_space_count; -}; - -// Single internal log. -static struct census_log g_log; - -// Functions that operate on an atomic memory location used as a lock. - -// Returns non-zero if lock is acquired. -static int cl_try_lock(gpr_atm* lock) { return gpr_atm_acq_cas(lock, 0, 1); } - -static void cl_unlock(gpr_atm* lock) { gpr_atm_rel_store(lock, 0); } - -// Functions that operate on cl_core_local_block's. - -static void cl_core_local_block_set_block(cl_core_local_block* clb, - cl_block* block) { - gpr_atm_rel_store(&clb->block, (gpr_atm)block); -} - -static cl_block* cl_core_local_block_get_block(cl_core_local_block* clb) { - return (cl_block*)gpr_atm_acq_load(&clb->block); -} - -// Functions that operate on cl_block_list_struct's. - -static void cl_block_list_struct_initialize(cl_block_list_struct* bls, - cl_block* block) { - bls->next = bls->prev = bls; - bls->block = block; -} - -// Functions that operate on cl_block_list's. - -static void cl_block_list_initialize(cl_block_list* list) { - list->count = 0; - cl_block_list_struct_initialize(&list->ht, NULL); -} - -// Returns head of *this, or NULL if empty. -static cl_block* cl_block_list_head(cl_block_list* list) { - return list->ht.next->block; -} - -// Insert element *e after *pos. -static void cl_block_list_insert(cl_block_list* list, cl_block_list_struct* pos, - cl_block_list_struct* e) { - list->count++; - e->next = pos->next; - e->prev = pos; - e->next->prev = e; - e->prev->next = e; -} - -// Insert block at the head of the list -static void cl_block_list_insert_at_head(cl_block_list* list, cl_block* block) { - cl_block_list_insert(list, &list->ht, &block->link); -} - -// Insert block at the tail of the list. -static void cl_block_list_insert_at_tail(cl_block_list* list, cl_block* block) { - cl_block_list_insert(list, list->ht.prev, &block->link); -} - -// Removes block *b. Requires *b be in the list. -static void cl_block_list_remove(cl_block_list* list, cl_block* b) { - list->count--; - b->link.next->prev = b->link.prev; - b->link.prev->next = b->link.next; -} - -// Functions that operate on cl_block's - -static void cl_block_initialize(cl_block* block, char* buffer) { - block->buffer = buffer; - gpr_atm_rel_store(&block->writer_lock, 0); - gpr_atm_rel_store(&block->reader_lock, 0); - gpr_atm_rel_store(&block->bytes_committed, 0); - block->bytes_read = 0; - cl_block_list_struct_initialize(&block->link, block); -} - -// Guards against exposing partially written buffer to the reader. -static void cl_block_set_bytes_committed(cl_block* block, - size_t bytes_committed) { - gpr_atm_rel_store(&block->bytes_committed, (gpr_atm)bytes_committed); -} - -static size_t cl_block_get_bytes_committed(cl_block* block) { - return (size_t)gpr_atm_acq_load(&block->bytes_committed); -} - -// Tries to disable future read/write access to this block. Succeeds if: -// - no in-progress write AND -// - no in-progress read AND -// - 'discard_data' set to true OR no unread data -// On success, clears the block state and returns with writer_lock_ and -// reader_lock_ held. These locks are released by a subsequent -// cl_block_access_enable() call. -static bool cl_block_try_disable_access(cl_block* block, int discard_data) { - if (!cl_try_lock(&block->writer_lock)) { - return false; - } - if (!cl_try_lock(&block->reader_lock)) { - cl_unlock(&block->writer_lock); - return false; - } - if (!discard_data && - (block->bytes_read != cl_block_get_bytes_committed(block))) { - cl_unlock(&block->reader_lock); - cl_unlock(&block->writer_lock); - return false; - } - cl_block_set_bytes_committed(block, 0); - block->bytes_read = 0; - return true; -} - -static void cl_block_enable_access(cl_block* block) { - cl_unlock(&block->reader_lock); - cl_unlock(&block->writer_lock); -} - -// Returns with writer_lock held. -static void* cl_block_start_write(cl_block* block, size_t size) { - if (!cl_try_lock(&block->writer_lock)) { - return NULL; - } - size_t bytes_committed = cl_block_get_bytes_committed(block); - if (bytes_committed + size > CENSUS_LOG_MAX_RECORD_SIZE) { - cl_unlock(&block->writer_lock); - return NULL; - } - return block->buffer + bytes_committed; -} - -// Releases writer_lock and increments committed bytes by 'bytes_written'. -// 'bytes_written' must be <= 'size' specified in the corresponding -// StartWrite() call. This function is thread-safe. -static void cl_block_end_write(cl_block* block, size_t bytes_written) { - cl_block_set_bytes_committed( - block, cl_block_get_bytes_committed(block) + bytes_written); - cl_unlock(&block->writer_lock); -} - -// Returns a pointer to the first unread byte in buffer. The number of bytes -// available are returned in 'bytes_available'. Acquires reader lock that is -// released by a subsequent cl_block_end_read() call. Returns NULL if: -// - read in progress -// - no data available -static void* cl_block_start_read(cl_block* block, size_t* bytes_available) { - if (!cl_try_lock(&block->reader_lock)) { - return NULL; - } - // bytes_committed may change from under us. Use bytes_available to update - // bytes_read below. - size_t bytes_committed = cl_block_get_bytes_committed(block); - GPR_ASSERT(bytes_committed >= block->bytes_read); - *bytes_available = bytes_committed - block->bytes_read; - if (*bytes_available == 0) { - cl_unlock(&block->reader_lock); - return NULL; - } - void* record = block->buffer + block->bytes_read; - block->bytes_read += *bytes_available; - return record; -} - -static void cl_block_end_read(cl_block* block) { - cl_unlock(&block->reader_lock); -} - -// Internal functions operating on g_log - -// Allocates a new free block (or recycles an available dirty block if log is -// configured to discard old records). Returns NULL if out-of-space. -static cl_block* cl_allocate_block(void) { - cl_block* block = cl_block_list_head(&g_log.free_block_list); - if (block != NULL) { - cl_block_list_remove(&g_log.free_block_list, block); - return block; - } - if (!g_log.discard_old_records) { - // No free block and log is configured to keep old records. - return NULL; - } - // Recycle dirty block. Start from the oldest. - for (block = cl_block_list_head(&g_log.dirty_block_list); block != NULL; - block = block->link.next->block) { - if (cl_block_try_disable_access(block, 1 /* discard data */)) { - cl_block_list_remove(&g_log.dirty_block_list, block); - return block; - } - } - return NULL; -} - -// Allocates a new block and updates core id => block mapping. 'old_block' -// points to the block that the caller thinks is attached to -// 'core_id'. 'old_block' may be NULL. Returns true if: -// - allocated a new block OR -// - 'core_id' => 'old_block' mapping changed (another thread allocated a -// block before lock was acquired). -static bool cl_allocate_core_local_block(uint32_t core_id, - cl_block* old_block) { - // Now that we have the lock, check if core-local mapping has changed. - cl_core_local_block* core_local_block = &g_log.core_local_blocks[core_id]; - cl_block* block = cl_core_local_block_get_block(core_local_block); - if ((block != NULL) && (block != old_block)) { - return true; - } - if (block != NULL) { - cl_core_local_block_set_block(core_local_block, NULL); - cl_block_list_insert_at_tail(&g_log.dirty_block_list, block); - } - block = cl_allocate_block(); - if (block == NULL) { - return false; - } - cl_core_local_block_set_block(core_local_block, block); - cl_block_enable_access(block); - return true; -} - -static cl_block* cl_get_block(void* record) { - uintptr_t p = (uintptr_t)((char*)record - g_log.buffer); - uintptr_t index = p >> CENSUS_LOG_2_MAX_RECORD_SIZE; - return &g_log.blocks[index]; -} - -// Gets the next block to read and tries to free 'prev' block (if not NULL). -// Returns NULL if reached the end. -static cl_block* cl_next_block_to_read(cl_block* prev) { - cl_block* block = NULL; - if (g_log.read_iterator_state == g_log.num_cores) { - // We are traversing dirty list; find the next dirty block. - if (prev != NULL) { - // Try to free the previous block if there is no unread data. This - // block - // may have unread data if previously incomplete record completed - // between - // read_next() calls. - block = prev->link.next->block; - if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) { - cl_block_list_remove(&g_log.dirty_block_list, prev); - cl_block_list_insert_at_head(&g_log.free_block_list, prev); - } - } else { - block = cl_block_list_head(&g_log.dirty_block_list); - } - if (block != NULL) { - return block; - } - // We are done with the dirty list; moving on to core-local blocks. - } - while (g_log.read_iterator_state > 0) { - g_log.read_iterator_state--; - block = cl_core_local_block_get_block( - &g_log.core_local_blocks[g_log.read_iterator_state]); - if (block != NULL) { - return block; - } - } - return NULL; -} - -#define CL_LOG_2_MB 20 // 2^20 = 1MB - -// External functions: primary stats_log interface -void census_log_initialize(size_t size_in_mb, int discard_old_records) { - // Check cacheline alignment. - GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0); - GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0); - GPR_ASSERT(!g_log.initialized); - g_log.discard_old_records = discard_old_records; - g_log.num_cores = gpr_cpu_num_cores(); - // Ensure that we will not get any overflow in calaculating num_blocks - GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE); - GPR_ASSERT(size_in_mb < 1000); - // Ensure at least 2x as many blocks as there are cores. - g_log.num_blocks = - (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >> - CENSUS_LOG_2_MAX_RECORD_SIZE); - gpr_mu_init(&g_log.lock); - g_log.read_iterator_state = 0; - g_log.block_being_read = NULL; - g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned( - g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG); - memset(g_log.core_local_blocks, 0, - g_log.num_cores * sizeof(cl_core_local_block)); - g_log.blocks = (cl_block*)gpr_malloc_aligned( - g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG); - memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block)); - g_log.buffer = - (char*)gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); - memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE); - cl_block_list_initialize(&g_log.free_block_list); - cl_block_list_initialize(&g_log.dirty_block_list); - for (uint32_t i = 0; i < g_log.num_blocks; ++i) { - cl_block* block = g_log.blocks + i; - cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i)); - cl_block_try_disable_access(block, 1 /* discard data */); - cl_block_list_insert_at_tail(&g_log.free_block_list, block); - } - gpr_atm_rel_store(&g_log.out_of_space_count, 0); - g_log.initialized = 1; -} - -void census_log_shutdown(void) { - GPR_ASSERT(g_log.initialized); - gpr_mu_destroy(&g_log.lock); - gpr_free_aligned(g_log.core_local_blocks); - g_log.core_local_blocks = NULL; - gpr_free_aligned(g_log.blocks); - g_log.blocks = NULL; - gpr_free(g_log.buffer); - g_log.buffer = NULL; - g_log.initialized = 0; -} - -void* census_log_start_write(size_t size) { - // Used to bound number of times block allocation is attempted. - GPR_ASSERT(size > 0); - GPR_ASSERT(g_log.initialized); - if (size > CENSUS_LOG_MAX_RECORD_SIZE) { - return NULL; - } - uint32_t attempts_remaining = g_log.num_blocks; - uint32_t core_id = gpr_cpu_current_cpu(); - do { - void* record = NULL; - cl_block* block = - cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]); - if (block && (record = cl_block_start_write(block, size))) { - return record; - } - // Need to allocate a new block. We are here if: - // - No block associated with the core OR - // - Write in-progress on the block OR - // - block is out of space - gpr_mu_lock(&g_log.lock); - bool allocated = cl_allocate_core_local_block(core_id, block); - gpr_mu_unlock(&g_log.lock); - if (!allocated) { - gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); - return NULL; - } - } while (attempts_remaining--); - // Give up. - gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1); - return NULL; -} - -void census_log_end_write(void* record, size_t bytes_written) { - GPR_ASSERT(g_log.initialized); - cl_block_end_write(cl_get_block(record), bytes_written); -} - -void census_log_init_reader(void) { - GPR_ASSERT(g_log.initialized); - gpr_mu_lock(&g_log.lock); - // If a block is locked for reading unlock it. - if (g_log.block_being_read != NULL) { - cl_block_end_read(g_log.block_being_read); - g_log.block_being_read = NULL; - } - g_log.read_iterator_state = g_log.num_cores; - gpr_mu_unlock(&g_log.lock); -} - -const void* census_log_read_next(size_t* bytes_available) { - GPR_ASSERT(g_log.initialized); - gpr_mu_lock(&g_log.lock); - if (g_log.block_being_read != NULL) { - cl_block_end_read(g_log.block_being_read); - } - do { - g_log.block_being_read = cl_next_block_to_read(g_log.block_being_read); - if (g_log.block_being_read != NULL) { - void* record = - cl_block_start_read(g_log.block_being_read, bytes_available); - if (record != NULL) { - gpr_mu_unlock(&g_log.lock); - return record; - } - } - } while (g_log.block_being_read != NULL); - gpr_mu_unlock(&g_log.lock); - return NULL; -} - -size_t census_log_remaining_space(void) { - GPR_ASSERT(g_log.initialized); - size_t space = 0; - gpr_mu_lock(&g_log.lock); - if (g_log.discard_old_records) { - // Remaining space is not meaningful; just return the entire log space. - space = g_log.num_blocks << CENSUS_LOG_2_MAX_RECORD_SIZE; - } else { - GPR_ASSERT(g_log.free_block_list.count >= 0); - space = (size_t)g_log.free_block_list.count * CENSUS_LOG_MAX_RECORD_SIZE; - } - gpr_mu_unlock(&g_log.lock); - return space; -} - -int64_t census_log_out_of_space_count(void) { - GPR_ASSERT(g_log.initialized); - return gpr_atm_acq_load(&g_log.out_of_space_count); -} diff --git a/Sources/CgRPC/src/core/ext/census/mlog.h b/Sources/CgRPC/src/core/ext/census/mlog.h deleted file mode 100644 index 6f3125944..000000000 --- a/Sources/CgRPC/src/core/ext/census/mlog.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* A very fast in-memory log, optimized for multiple writers. */ - -#ifndef GRPC_CORE_EXT_CENSUS_MLOG_H -#define GRPC_CORE_EXT_CENSUS_MLOG_H - -#include -#include - -/* Maximum record size, in bytes. */ -#define CENSUS_LOG_2_MAX_RECORD_SIZE 14 /* 2^14 = 16KB */ -#define CENSUS_LOG_MAX_RECORD_SIZE (1 << CENSUS_LOG_2_MAX_RECORD_SIZE) - -/* Initialize the statistics logging subsystem with the given log size. A log - size of 0 will result in the smallest possible log for the platform - (approximately CENSUS_LOG_MAX_RECORD_SIZE * gpr_cpu_num_cores()). If - discard_old_records is non-zero, then new records will displace older ones - when the log is full. This function must be called before any other - census_log functions. -*/ -void census_log_initialize(size_t size_in_mb, int discard_old_records); - -/* Shutdown the logging subsystem. Caller must ensure that: - - no in progress or future call to any census_log functions - - no incomplete records -*/ -void census_log_shutdown(void); - -/* Allocates and returns a 'size' bytes record and marks it in use. A - subsequent census_log_end_write() marks the record complete. The - 'bytes_written' census_log_end_write() argument must be <= - 'size'. Returns NULL if out-of-space AND: - - log is configured to keep old records OR - - all blocks are pinned by incomplete records. -*/ -void* census_log_start_write(size_t size); - -void census_log_end_write(void* record, size_t bytes_written); - -void census_log_init_reader(void); - -/* census_log_read_next() iterates over blocks with data and for each block - returns a pointer to the first unread byte. The number of bytes that can be - read are returned in 'bytes_available'. Reader is expected to read all - available data. Reading the data consumes it i.e. it cannot be read again. - census_log_read_next() returns NULL if the end is reached i.e last block - is read. census_log_init_reader() starts the iteration or aborts the - current iteration. -*/ -const void* census_log_read_next(size_t* bytes_available); - -/* Returns estimated remaining space across all blocks, in bytes. If log is - configured to discard old records, returns total log space. Otherwise, - returns space available in empty blocks (partially filled blocks are - treated as full). -*/ -size_t census_log_remaining_space(void); - -/* Returns the number of times grpc_stats_log_start_write() failed due to - out-of-space. */ -int64_t census_log_out_of_space_count(void); - -#endif /* GRPC_CORE_EXT_CENSUS_MLOG_H */ diff --git a/Sources/CgRPC/src/core/ext/census/operation.c b/Sources/CgRPC/src/core/ext/census/operation.c deleted file mode 100644 index be88ac74e..000000000 --- a/Sources/CgRPC/src/core/ext/census/operation.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -/* TODO(aveitch): These are all placeholder implementations. */ - -census_timestamp census_start_rpc_op_timestamp(void) { - census_timestamp ct; - /* TODO(aveitch): assumes gpr_timespec implementation of census_timestamp. */ - ct.ts = gpr_now(GPR_CLOCK_MONOTONIC); - return ct; -} - -census_context *census_start_client_rpc_op( - const census_context *context, int64_t rpc_name_id, - const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, - const census_timestamp *start_time) { - return NULL; -} - -census_context *census_start_server_rpc_op( - const char *buffer, int64_t rpc_name_id, - const census_rpc_name_info *rpc_name_info, const char *peer, int trace_mask, - census_timestamp *start_time) { - return NULL; -} - -census_context *census_start_op(census_context *context, const char *family, - const char *name, int trace_mask) { - return NULL; -} - -void census_end_op(census_context *context, int status) {} diff --git a/Sources/CgRPC/src/core/ext/census/placeholders.c b/Sources/CgRPC/src/core/ext/census/placeholders.c deleted file mode 100644 index bed9837ee..000000000 --- a/Sources/CgRPC/src/core/ext/census/placeholders.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include - -/* Placeholders for the pending APIs */ - -int census_get_trace_record(census_trace_record *trace_record) { - (void)trace_record; - abort(); -} - -void census_record_values(census_context *context, census_value *values, - size_t nvalues) { - (void)context; - (void)values; - (void)nvalues; - abort(); -} - -void census_set_rpc_client_peer(census_context *context, const char *peer) { - (void)context; - (void)peer; - abort(); -} - -void census_trace_scan_end() { abort(); } - -int census_trace_scan_start(int consume) { - (void)consume; - abort(); -} diff --git a/Sources/CgRPC/src/core/ext/census/resource.c b/Sources/CgRPC/src/core/ext/census/resource.c deleted file mode 100644 index 44a887231..000000000 --- a/Sources/CgRPC/src/core/ext/census/resource.c +++ /dev/null @@ -1,303 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/resource.h" -#include "third_party/nanopb/pb_decode.h" - -#include -#include -#include -#include - -#include -#include - -// Protect local resource data structures. -static gpr_mu resource_lock; - -// Deleteing and creating resources are relatively rare events, and should not -// be done in the critical path of performance sensitive code. We record -// current resource id's used in a simple array, and just search it each time -// we need to assign a new id, or look up a resource. -static resource **resources = NULL; - -// Number of entries in *resources -static size_t n_resources = 0; - -// Number of defined resources -static size_t n_defined_resources = 0; - -void initialize_resources(void) { - gpr_mu_init(&resource_lock); - gpr_mu_lock(&resource_lock); - GPR_ASSERT(resources == NULL && n_resources == 0 && n_defined_resources == 0); - gpr_mu_unlock(&resource_lock); -} - -// Delete a resource given it's ID. The ID must be a valid resource ID. Must be -// called with resource_lock held. -static void delete_resource_locked(size_t rid) { - GPR_ASSERT(resources[rid] != NULL); - gpr_free(resources[rid]->name); - gpr_free(resources[rid]->description); - gpr_free(resources[rid]->numerators); - gpr_free(resources[rid]->denominators); - gpr_free(resources[rid]); - resources[rid] = NULL; - n_defined_resources--; -} - -void shutdown_resources(void) { - gpr_mu_lock(&resource_lock); - for (size_t i = 0; i < n_resources; i++) { - if (resources[i] != NULL) { - delete_resource_locked(i); - } - } - GPR_ASSERT(n_defined_resources == 0); - gpr_free(resources); - resources = NULL; - n_resources = 0; - gpr_mu_unlock(&resource_lock); -} - -// Check the contents of string fields in a resource proto. -static bool validate_string(pb_istream_t *stream, const pb_field_t *field, - void **arg) { - resource *vresource = (resource *)*arg; - switch (field->tag) { - case google_census_Resource_name_tag: - // Name must have at least one character - if (stream->bytes_left == 0) { - gpr_log(GPR_INFO, "Zero-length Resource name."); - return false; - } - vresource->name = (char *)gpr_malloc(stream->bytes_left + 1); - vresource->name[stream->bytes_left] = '\0'; - if (!pb_read(stream, (uint8_t *)vresource->name, stream->bytes_left)) { - return false; - } - // Can't have same name as an existing resource. - for (size_t i = 0; i < n_resources; i++) { - resource *compare = resources[i]; - if (compare == vresource || compare == NULL) continue; - if (strcmp(compare->name, vresource->name) == 0) { - gpr_log(GPR_INFO, "Duplicate Resource name %s.", vresource->name); - return false; - } - } - break; - case google_census_Resource_description_tag: - if (stream->bytes_left == 0) { - return true; - } - vresource->description = (char *)gpr_malloc(stream->bytes_left + 1); - vresource->description[stream->bytes_left] = '\0'; - if (!pb_read(stream, (uint8_t *)vresource->description, - stream->bytes_left)) { - return false; - } - break; - default: - // No other string fields in Resource. Print warning and skip. - gpr_log(GPR_INFO, "Unknown string field type in Resource protobuf."); - if (!pb_read(stream, NULL, stream->bytes_left)) { - return false; - } - break; - } - return true; -} - -// Decode numerators/denominators in a stream. The `count` and `bup` -// (BasicUnit pointer) are pointers to the approriate fields in a resource -// struct. -static bool validate_units_helper(pb_istream_t *stream, int *count, - google_census_Resource_BasicUnit **bup) { - while (stream->bytes_left) { - (*count)++; - // Have to allocate a new array of values. Normal case is 0 or 1, so - // this should normally not be an issue. - google_census_Resource_BasicUnit *new_bup = - (google_census_Resource_BasicUnit *)gpr_malloc( - (size_t)*count * sizeof(google_census_Resource_BasicUnit)); - if (*count != 1) { - memcpy(new_bup, *bup, - (size_t)(*count - 1) * sizeof(google_census_Resource_BasicUnit)); - gpr_free(*bup); - } - *bup = new_bup; - uint64_t value; - if (!pb_decode_varint(stream, &value)) { - return false; - } - *(*bup + *count - 1) = (google_census_Resource_BasicUnit)value; - } - return true; -} - -// Validate units field of a Resource proto. -static bool validate_units(pb_istream_t *stream, const pb_field_t *field, - void **arg) { - resource *vresource = (resource *)(*arg); - switch (field->tag) { - case google_census_Resource_MeasurementUnit_numerator_tag: - return validate_units_helper(stream, &vresource->n_numerators, - &vresource->numerators); - break; - case google_census_Resource_MeasurementUnit_denominator_tag: - return validate_units_helper(stream, &vresource->n_denominators, - &vresource->denominators); - break; - default: - gpr_log(GPR_ERROR, "Unknown field type."); - return false; - break; - } - return true; -} - -// Validate the contents of a Resource proto. `id` is the intended resource id. -static bool validate_resource_pb(const uint8_t *resource_pb, - size_t resource_pb_size, size_t id) { - GPR_ASSERT(id < n_resources); - if (resource_pb == NULL) { - return false; - } - google_census_Resource vresource; - vresource.name.funcs.decode = &validate_string; - vresource.name.arg = resources[id]; - vresource.description.funcs.decode = &validate_string; - vresource.description.arg = resources[id]; - vresource.unit.numerator.funcs.decode = &validate_units; - vresource.unit.numerator.arg = resources[id]; - vresource.unit.denominator.funcs.decode = &validate_units; - vresource.unit.denominator.arg = resources[id]; - - pb_istream_t stream = - pb_istream_from_buffer((uint8_t *)resource_pb, resource_pb_size); - if (!pb_decode(&stream, google_census_Resource_fields, &vresource)) { - return false; - } - // A Resource must have a name, a unit, with at least one numerator. - return (resources[id]->name != NULL && vresource.has_unit && - resources[id]->n_numerators > 0); -} - -// Allocate a blank resource, and return associated ID. Must be called with -// resource_lock held. -size_t allocate_resource(void) { - // use next_id to optimize expected placement of next new resource. - static size_t next_id = 0; - size_t id = n_resources; // resource ID - initialize to invalid value. - // Expand resources if needed. - if (n_resources == n_defined_resources) { - size_t new_n_resources = n_resources ? n_resources * 2 : 2; - resource **new_resources = - (resource **)gpr_malloc(new_n_resources * sizeof(resource *)); - if (n_resources != 0) { - memcpy(new_resources, resources, n_resources * sizeof(resource *)); - } - memset(new_resources + n_resources, 0, - (new_n_resources - n_resources) * sizeof(resource *)); - gpr_free(resources); - resources = new_resources; - n_resources = new_n_resources; - id = n_defined_resources; - } else { - GPR_ASSERT(n_defined_resources < n_resources); - // Find a free id. - for (size_t base = 0; base < n_resources; base++) { - id = (next_id + base) % n_resources; - if (resources[id] == NULL) break; - } - } - GPR_ASSERT(id < n_resources && resources[id] == NULL); - resources[id] = (resource *)gpr_malloc(sizeof(resource)); - memset(resources[id], 0, sizeof(resource)); - n_defined_resources++; - next_id = (id + 1) % n_resources; - return id; -} - -int32_t census_define_resource(const uint8_t *resource_pb, - size_t resource_pb_size) { - if (resource_pb == NULL) { - return -1; - } - gpr_mu_lock(&resource_lock); - size_t id = allocate_resource(); - // Validate pb, extract name. - if (!validate_resource_pb(resource_pb, resource_pb_size, id)) { - delete_resource_locked(id); - gpr_mu_unlock(&resource_lock); - return -1; - } - gpr_mu_unlock(&resource_lock); - return (int32_t)id; -} - -void census_delete_resource(int32_t rid) { - gpr_mu_lock(&resource_lock); - if (rid >= 0 && (size_t)rid < n_resources && resources[rid] != NULL) { - delete_resource_locked((size_t)rid); - } - gpr_mu_unlock(&resource_lock); -} - -int32_t census_resource_id(const char *name) { - gpr_mu_lock(&resource_lock); - for (int32_t id = 0; (size_t)id < n_resources; id++) { - if (resources[id] != NULL && strcmp(resources[id]->name, name) == 0) { - gpr_mu_unlock(&resource_lock); - return id; - } - } - gpr_mu_unlock(&resource_lock); - return -1; -} - -int32_t define_resource(const resource *base) { - GPR_ASSERT(base != NULL && base->name != NULL && base->n_numerators > 0 && - base->numerators != NULL); - gpr_mu_lock(&resource_lock); - size_t id = allocate_resource(); - size_t len = strlen(base->name) + 1; - resources[id]->name = (char *)gpr_malloc(len); - memcpy(resources[id]->name, base->name, len); - if (base->description) { - len = strlen(base->description) + 1; - resources[id]->description = (char *)gpr_malloc(len); - memcpy(resources[id]->description, base->description, len); - } - resources[id]->prefix = base->prefix; - resources[id]->n_numerators = base->n_numerators; - len = (size_t)base->n_numerators * sizeof(*base->numerators); - resources[id]->numerators = - (google_census_Resource_BasicUnit *)gpr_malloc(len); - memcpy(resources[id]->numerators, base->numerators, len); - resources[id]->n_denominators = base->n_denominators; - if (base->n_denominators != 0) { - len = (size_t)base->n_denominators * sizeof(*base->denominators); - resources[id]->denominators = - (google_census_Resource_BasicUnit *)gpr_malloc(len); - memcpy(resources[id]->denominators, base->denominators, len); - } - gpr_mu_unlock(&resource_lock); - return (int32_t)id; -} diff --git a/Sources/CgRPC/src/core/ext/census/resource.h b/Sources/CgRPC/src/core/ext/census/resource.h deleted file mode 100644 index b8bda2c72..000000000 --- a/Sources/CgRPC/src/core/ext/census/resource.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Census-internal resource definition and manipluation functions. */ -#ifndef GRPC_CORE_EXT_CENSUS_RESOURCE_H -#define GRPC_CORE_EXT_CENSUS_RESOURCE_H - -#include -#include "src/core/ext/census/gen/census.pb.h" - -/* Internal representation of a resource. */ -typedef struct { - char *name; - char *description; - int32_t prefix; - int n_numerators; - google_census_Resource_BasicUnit *numerators; - int n_denominators; - google_census_Resource_BasicUnit *denominators; -} resource; - -/* Initialize and shutdown the resources subsystem. */ -void initialize_resources(void); -void shutdown_resources(void); - -/* Add a new resource, given a proposed resource structure. Returns the - resource ID, or -ve on failure. - TODO(aveitch): this function exists to support addition of the base - resources. It should be removed when we have the ability to add resources - from configuration files. */ -int32_t define_resource(const resource *base); - -#endif /* GRPC_CORE_EXT_CENSUS_RESOURCE_H */ diff --git a/Sources/CgRPC/src/core/ext/census/rpc_metric_id.h b/Sources/CgRPC/src/core/ext/census/rpc_metric_id.h deleted file mode 100644 index ea493d728..000000000 --- a/Sources/CgRPC/src/core/ext/census/rpc_metric_id.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H -#define GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H - -/* Metric ID's used for RPC measurements. */ -/* Count of client requests sent. */ -#define CENSUS_METRIC_RPC_CLIENT_REQUESTS ((uint32_t)0) -/* Count of server requests sent. */ -#define CENSUS_METRIC_RPC_SERVER_REQUESTS ((uint32_t)1) -/* Client error counts. */ -#define CENSUS_METRIC_RPC_CLIENT_ERRORS ((uint32_t)2) -/* Server error counts. */ -#define CENSUS_METRIC_RPC_SERVER_ERRORS ((uint32_t)3) -/* Client side request latency. */ -#define CENSUS_METRIC_RPC_CLIENT_LATENCY ((uint32_t)4) -/* Server side request latency. */ -#define CENSUS_METRIC_RPC_SERVER_LATENCY ((uint32_t)5) - -#endif /* GRPC_CORE_EXT_CENSUS_RPC_METRIC_ID_H */ diff --git a/Sources/CgRPC/src/core/ext/census/trace_context.c b/Sources/CgRPC/src/core/ext/census/trace_context.c deleted file mode 100644 index af92ae6d9..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_context.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/trace_context.h" - -#include -#include -#include - -#include "third_party/nanopb/pb_decode.h" -#include "third_party/nanopb/pb_encode.h" - -// This function assumes the TraceContext is valid. -size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer, - const size_t buf_size) { - // Create a stream that will write to our buffer. - pb_ostream_t stream = pb_ostream_from_buffer(buffer, buf_size); - - // encode message - bool status = pb_encode(&stream, google_trace_TraceContext_fields, ctxt); - - if (!status) { - gpr_log(GPR_DEBUG, "TraceContext encoding failed: %s", - PB_GET_ERROR(&stream)); - return 0; - } - - return stream.bytes_written; -} - -bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer, - const size_t nbytes) { - // Create a stream that reads nbytes from the buffer. - pb_istream_t stream = pb_istream_from_buffer(buffer, nbytes); - - // decode message - bool status = pb_decode(&stream, google_trace_TraceContext_fields, ctxt); - - if (!status) { - gpr_log(GPR_DEBUG, "TraceContext decoding failed: %s", - PB_GET_ERROR(&stream)); - return false; - } - - // check fields - if (!ctxt->has_trace_id_hi || !ctxt->has_trace_id_lo) { - gpr_log(GPR_DEBUG, "Invalid TraceContext: missing trace_id"); - return false; - } - if (!ctxt->has_span_id) { - gpr_log(GPR_DEBUG, "Invalid TraceContext: missing span_id"); - return false; - } - - return true; -} diff --git a/Sources/CgRPC/src/core/ext/census/trace_context.h b/Sources/CgRPC/src/core/ext/census/trace_context.h deleted file mode 100644 index a7233e6a2..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_context.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Functions for manipulating trace contexts as defined in - src/proto/census/trace.proto */ -#ifndef GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H -#define GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H - -#include "src/core/ext/census/gen/trace_context.pb.h" - -/* Span option flags. */ -#define SPAN_OPTIONS_IS_SAMPLED 0x01 - -/* Maximum number of bytes required to encode a TraceContext (31) -1 byte for trace_id field -1 byte for trace_id length -1 byte for trace_id.hi field -8 bytes for trace_id.hi (uint64_t) -1 byte for trace_id.lo field -8 bytes for trace_id.lo (uint64_t) -1 byte for span_id field -8 bytes for span_id (uint64_t) -1 byte for is_sampled field -1 byte for is_sampled (bool) */ -#define TRACE_MAX_CONTEXT_SIZE 31 - -/* Encode a trace context (ctxt) into proto format to the buffer provided. The -size of buffer must be at least TRACE_MAX_CONTEXT_SIZE. On success, returns the -number of bytes successfully encoded into buffer. On failure, returns 0. */ -size_t encode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer, - const size_t buf_size); - -/* Decode a proto-encoded TraceContext from the provided buffer into the -TraceContext structure (ctxt). The function expects to be supplied the number -of bytes to be read from buffer (nbytes). This function will also validate that -the TraceContext has a span_id and a trace_id, and will return false if either -of these do not exist. On success, returns true and false otherwise. */ -bool decode_trace_context(google_trace_TraceContext *ctxt, uint8_t *buffer, - const size_t nbytes); - -#endif /* GRPC_CORE_EXT_CENSUS_TRACE_CONTEXT_H */ diff --git a/Sources/CgRPC/src/core/ext/census/trace_label.h b/Sources/CgRPC/src/core/ext/census/trace_label.h deleted file mode 100644 index 97ce399eb..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_label.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H -#define GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H - -#include "src/core/ext/census/trace_string.h" - -/* Trace label (key/value pair) stores a label name and the label value. The - value can be one of trace_string/int64_t/bool. */ -typedef struct trace_label { - trace_string key; - enum label_type { - /* Unknown value for debugging/error purposes */ - LABEL_UNKNOWN = 0, - /* A string value */ - LABEL_STRING = 1, - /* An integer value. */ - LABEL_INT = 2, - /* A boolean value. */ - LABEL_BOOL = 3, - } value_type; - - union value { - trace_string label_str; - int64_t label_int; - bool label_bool; - } value; -} trace_label; - -#endif /* GRPC_CORE_EXT_CENSUS_TRACE_LABEL_H */ diff --git a/Sources/CgRPC/src/core/ext/census/trace_propagation.h b/Sources/CgRPC/src/core/ext/census/trace_propagation.h deleted file mode 100644 index eecfcb7d0..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_propagation.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H -#define GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H - -#include "src/core/ext/census/tracing.h" - -/* Encoding and decoding functions for receiving and sending trace contexts - over the wire. Only RPC libraries should be calling these - functions. These functions return the number of bytes encoded/decoded - (0 if a failure has occurred). buf_size indicates the size of the - input/output buffer. trace_span_context is a struct that includes the - trace ID, span ID, and a set of option flags (is_sampled, etc.). */ - -/* Converts a span context to a binary byte buffer. */ -size_t trace_span_context_to_binary(const trace_span_context *ctxt, - uint8_t *buf, size_t buf_size); - -/* Reads a binary byte buffer and populates a span context structure. */ -size_t binary_to_trace_span_context(const uint8_t *buf, size_t buf_size, - trace_span_context *ctxt); - -/* Converts a span context to an http metadata compatible string. */ -size_t trace_span_context_to_http_format(const trace_span_context *ctxt, - char *buf, size_t buf_size); - -/* Reads an http metadata compatible string and populates a span context - structure. */ -size_t http_format_to_trace_span_context(const char *buf, size_t buf_size, - trace_span_context *ctxt); - -#endif /* GRPC_CORE_EXT_CENSUS_TRACE_PROPAGATION_H */ diff --git a/Sources/CgRPC/src/core/ext/census/trace_status.h b/Sources/CgRPC/src/core/ext/census/trace_status.h deleted file mode 100644 index dd83d3f72..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_status.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H -#define GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H - -#include "src/core/ext/census/trace_string.h" - -/* Stores a status code and status message for a trace. */ -typedef struct trace_status { - int64_t errorCode; - trace_string errorMessage; -} trace_status; - -#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STATUS_H */ diff --git a/Sources/CgRPC/src/core/ext/census/trace_string.h b/Sources/CgRPC/src/core/ext/census/trace_string.h deleted file mode 100644 index e4da3f590..000000000 --- a/Sources/CgRPC/src/core/ext/census/trace_string.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_TRACE_STRING_H -#define GRPC_CORE_EXT_CENSUS_TRACE_STRING_H - -#include - -/* String struct for tracing messages. Since this is a C API, we do not have - access to a string class. This is intended for use by higher level - languages which wrap around the C API, as most of them have a string class. - This will also be more efficient when copying, as we have an explicitly - specified length. Also, grpc_slice has reference counting which allows for - interning. */ -typedef struct trace_string { - char *string; - size_t length; -} trace_string; - -#endif /* GRPC_CORE_EXT_CENSUS_TRACE_STRING_H */ diff --git a/Sources/CgRPC/src/core/ext/census/tracing.c b/Sources/CgRPC/src/core/ext/census/tracing.c deleted file mode 100644 index 823c681ab..000000000 --- a/Sources/CgRPC/src/core/ext/census/tracing.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/census/tracing.h" - -#include -#include -#include -#include "src/core/ext/census/mlog.h" - -void trace_start_span(const trace_span_context *span_ctxt, - const trace_string name, const start_span_options *opts, - trace_span_context *new_span_ctxt, - bool has_remote_parent) { - // Noop implementation. -} - -void trace_add_span_annotation(const trace_string description, - const trace_label *labels, const size_t n_labels, - trace_span_context *span_ctxt) { - // Noop implementation. -} - -void trace_add_span_network_event_annotation(const trace_string description, - const trace_label *labels, - const size_t n_labels, - const gpr_timespec timestamp, - bool sent, uint64_t id, - trace_span_context *span_ctxt) { - // Noop implementation. -} - -void trace_add_span_labels(const trace_label *labels, const size_t n_labels, - trace_span_context *span_ctxt) { - // Noop implementation. -} - -void trace_end_span(const trace_status *status, trace_span_context *span_ctxt) { - // Noop implementation. -} diff --git a/Sources/CgRPC/src/core/ext/census/tracing.h b/Sources/CgRPC/src/core/ext/census/tracing.h deleted file mode 100644 index 038c9e279..000000000 --- a/Sources/CgRPC/src/core/ext/census/tracing.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_CENSUS_TRACING_H -#define GRPC_CORE_EXT_CENSUS_TRACING_H - -#include -#include -#include "src/core/ext/census/trace_context.h" -#include "src/core/ext/census/trace_label.h" -#include "src/core/ext/census/trace_status.h" - -/* This is the low level tracing API that other languages will interface with. - This is not intended to be accessed by the end-user, therefore it has been - designed with performance in mind rather than ease of use. */ - -/* The tracing level. */ -enum TraceLevel { - /* Annotations on this context will be silently discarded. */ - NO_TRACING = 0, - /* Annotations will not be saved to a persistent store. They will be - available via local APIs only. This setting is not propagated to child - spans. */ - TRANSIENT_TRACING = 1, - /* Annotations are recorded for the entire distributed trace and they are - saved to a persistent store. This setting is propagated to child spans. */ - PERSISTENT_TRACING = 2, -}; - -typedef struct trace_span_context { - /* Trace span context stores Span ID, Trace ID, and option flags. */ - /* Trace ID is 128 bits split into 2 64-bit chunks (hi and lo). */ - uint64_t trace_id_hi; - uint64_t trace_id_lo; - /* Span ID is 64 bits. */ - uint64_t span_id; - /* Span-options is 32-bit value which contains flag options. */ - uint32_t span_options; -} trace_span_context; - -typedef struct start_span_options { - /* If set, this will override the Span.local_start_time for the Span. */ - gpr_timespec local_start_timestamp; - - /* Linked spans can be used to identify spans that are linked to this span in - a different trace. This can be used (for example) in batching operations, - where a single batch handler processes multiple requests from different - traces. If set, points to a list of Spans are linked to the created Span.*/ - trace_span_context *linked_spans; - /* The number of linked spans. */ - size_t n_linked_spans; -} start_span_options; - -/* Create a new child Span (or root if parent is NULL), with parent being the - designated Span. The child span will have the provided name and starting - span options (optional). The bool has_remote_parent marks whether the - context refers to a remote parent span or not. */ -void trace_start_span(const trace_span_context *span_ctxt, - const trace_string name, const start_span_options *opts, - trace_span_context *new_span_ctxt, - bool has_remote_parent); - -/* Add a new Annotation to the Span. Annotations consist of a description - (trace_string) and a set of n labels (trace_label). This can be populated - with arbitrary user data. */ -void trace_add_span_annotation(const trace_string description, - const trace_label *labels, const size_t n_labels, - trace_span_context *span_ctxt); - -/* Add a new NetworkEvent annotation to a Span. This function is only intended - to be used by RPC systems (either client or server), not by higher level - applications. The timestamp type will be system-defined, the sent argument - designates whether this is a network send event (client request, server - reply)or receive (server request, client reply). The id argument corresponds - to Span.Annotation.NetworkEvent.id from the data model, and serves to uniquely - identify each network message. */ -void trace_add_span_network_event(const trace_string description, - const trace_label *labels, - const size_t n_labels, - const gpr_timespec timestamp, bool sent, - uint64_t id, trace_span_context *span_ctxt); - -/* Add a set of labels to the Span. These will correspond to the field -Span.labels in the data model. */ -void trace_add_span_labels(const trace_label *labels, const size_t n_labels, - trace_span_context *span_ctxt); - -/* Mark the end of Span Execution with the given status. Only the timing of the -first EndSpan call for a given Span will be recorded, and implementations are -free to ignore all further calls using the Span. EndSpanOptions can -optionally be NULL. */ -void trace_end_span(const trace_status *status, trace_span_context *span_ctxt); - -#endif /* GRPC_CORE_EXT_CENSUS_TRACING_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.cc new file mode 100644 index 000000000..3e2faa57b --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.cc @@ -0,0 +1,174 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/backup_poller.h" + +#include +#include +#include +#include +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/pollset.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/surface/completion_queue.h" + +#define DEFAULT_POLL_INTERVAL_MS 5000 + +namespace { +struct backup_poller { + grpc_timer polling_timer; + grpc_closure run_poller_closure; + grpc_closure shutdown_closure; + gpr_mu* pollset_mu; + grpc_pollset* pollset; // guarded by pollset_mu + bool shutting_down; // guarded by pollset_mu + gpr_refcount refs; + gpr_refcount shutdown_refs; +}; +} // namespace + +static gpr_once g_once = GPR_ONCE_INIT; +static gpr_mu g_poller_mu; +static backup_poller* g_poller = nullptr; // guarded by g_poller_mu +// g_poll_interval_ms is set only once at the first time +// grpc_client_channel_start_backup_polling() is called, after that it is +// treated as const. +static int g_poll_interval_ms = DEFAULT_POLL_INTERVAL_MS; + +static void init_globals() { + gpr_mu_init(&g_poller_mu); + char* env = gpr_getenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS"); + if (env != nullptr) { + int poll_interval_ms = gpr_parse_nonnegative_int(env); + if (poll_interval_ms == -1) { + gpr_log(GPR_ERROR, + "Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %s, " + "default value %d will be used.", + env, g_poll_interval_ms); + } else { + g_poll_interval_ms = poll_interval_ms; + } + } + gpr_free(env); +} + +static void backup_poller_shutdown_unref(backup_poller* p) { + if (gpr_unref(&p->shutdown_refs)) { + grpc_pollset_destroy(p->pollset); + gpr_free(p->pollset); + gpr_free(p); + } +} + +static void done_poller(void* arg, grpc_error* error) { + backup_poller_shutdown_unref(static_cast(arg)); +} + +static void g_poller_unref() { + gpr_mu_lock(&g_poller_mu); + if (gpr_unref(&g_poller->refs)) { + backup_poller* p = g_poller; + g_poller = nullptr; + gpr_mu_unlock(&g_poller_mu); + gpr_mu_lock(p->pollset_mu); + p->shutting_down = true; + grpc_pollset_shutdown( + p->pollset, GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller, p, + grpc_schedule_on_exec_ctx)); + gpr_mu_unlock(p->pollset_mu); + grpc_timer_cancel(&p->polling_timer); + } else { + gpr_mu_unlock(&g_poller_mu); + } +} + +static void run_poller(void* arg, grpc_error* error) { + backup_poller* p = static_cast(arg); + if (error != GRPC_ERROR_NONE) { + if (error != GRPC_ERROR_CANCELLED) { + GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error)); + } + backup_poller_shutdown_unref(p); + return; + } + gpr_mu_lock(p->pollset_mu); + if (p->shutting_down) { + gpr_mu_unlock(p->pollset_mu); + backup_poller_shutdown_unref(p); + return; + } + grpc_error* err = + grpc_pollset_work(p->pollset, nullptr, grpc_core::ExecCtx::Get()->Now()); + gpr_mu_unlock(p->pollset_mu); + GRPC_LOG_IF_ERROR("Run client channel backup poller", err); + grpc_timer_init(&p->polling_timer, + grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, + &p->run_poller_closure); +} + +static void g_poller_init_locked() { + if (g_poller == nullptr) { + g_poller = static_cast(gpr_zalloc(sizeof(backup_poller))); + g_poller->pollset = + static_cast(gpr_zalloc(grpc_pollset_size())); + g_poller->shutting_down = false; + grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu); + gpr_ref_init(&g_poller->refs, 0); + // one for timer cancellation, one for pollset shutdown + gpr_ref_init(&g_poller->shutdown_refs, 2); + GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller, + grpc_schedule_on_exec_ctx); + grpc_timer_init(&g_poller->polling_timer, + grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms, + &g_poller->run_poller_closure); + } +} + +void grpc_client_channel_start_backup_polling( + grpc_pollset_set* interested_parties) { + gpr_once_init(&g_once, init_globals); + if (g_poll_interval_ms == 0) { + return; + } + gpr_mu_lock(&g_poller_mu); + g_poller_init_locked(); + gpr_ref(&g_poller->refs); + /* Get a reference to g_poller->pollset before releasing g_poller_mu to make + * TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after + * releasing the lock and setting g_poller to NULL in g_poller_unref() is + * being flagged as a data-race by TSAN */ + grpc_pollset* pollset = g_poller->pollset; + gpr_mu_unlock(&g_poller_mu); + + grpc_pollset_set_add_pollset(interested_parties, pollset); +} + +void grpc_client_channel_stop_backup_polling( + grpc_pollset_set* interested_parties) { + if (g_poll_interval_ms == 0) { + return; + } + grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset); + g_poller_unref(); +} diff --git a/Sources/CgRPC/src/core/tsi/gts_transport_security.h b/Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.h similarity index 52% rename from Sources/CgRPC/src/core/tsi/gts_transport_security.h rename to Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.h index 538e1030b..8f132f968 100644 --- a/Sources/CgRPC/src/core/tsi/gts_transport_security.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/backup_poller.h @@ -16,22 +16,20 @@ * */ -#ifndef GRPC_CORE_TSI_GTS_TRANSPORT_SECURITY_H -#define GRPC_CORE_TSI_GTS_TRANSPORT_SECURITY_H +#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H +#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H + +#include #include -#include -#include +#include "src/core/lib/channel/channel_stack.h" -typedef struct gts_shared_resource { - gpr_thd_id thread_id; - grpc_channel *channel; - grpc_completion_queue *cq; - gpr_mu mu; -} gts_shared_resource; +/* Start polling \a interested_parties periodically in the timer thread */ +void grpc_client_channel_start_backup_polling( + grpc_pollset_set* interested_parties); -/* This method returns the address of gts_shared_resource object shared by all - * TSI handshakes. */ -gts_shared_resource *gts_get_shared_resource(void); +/* Stop polling \a interested_parties */ +void grpc_client_channel_stop_backup_polling( + grpc_pollset_set* interested_parties); -#endif /* GRPC_CORE_TSI_GTS_TRANSPORT_SECURITY_H */ +#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.c b/Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.cc similarity index 64% rename from Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.cc index 3844b9802..37860e82e 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/channel_connectivity.cc @@ -16,8 +16,12 @@ * */ +#include + #include "src/core/lib/surface/channel.h" +#include + #include #include @@ -27,26 +31,26 @@ #include "src/core/lib/surface/completion_queue.h" grpc_connectivity_state grpc_channel_check_connectivity_state( - grpc_channel *channel, int try_to_connect) { + grpc_channel* channel, int try_to_connect) { /* forward through to the underlying client channel */ - grpc_channel_element *client_channel_elem = + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; grpc_connectivity_state state; GRPC_API_TRACE( "grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2, (channel, try_to_connect)); if (client_channel_elem->filter == &grpc_client_channel_filter) { - state = grpc_client_channel_check_connectivity_state( - &exec_ctx, client_channel_elem, try_to_connect); - grpc_exec_ctx_finish(&exec_ctx); + state = grpc_client_channel_check_connectivity_state(client_channel_elem, + try_to_connect); + return state; } gpr_log(GPR_ERROR, "grpc_channel_check_connectivity_state called on something that is " "not a client channel, but '%s'", client_channel_elem->filter->name); - grpc_exec_ctx_finish(&exec_ctx); + return GRPC_CHANNEL_SHUTDOWN; } @@ -56,7 +60,8 @@ typedef enum { CALLING_BACK_AND_FINISHED, } callback_phase; -typedef struct { +namespace { +struct state_watcher { gpr_mu mu; callback_phase phase; grpc_closure on_complete; @@ -64,19 +69,19 @@ typedef struct { grpc_closure watcher_timer_init; grpc_timer alarm; grpc_connectivity_state state; - grpc_completion_queue *cq; + grpc_completion_queue* cq; grpc_cq_completion completion_storage; - grpc_channel *channel; - grpc_error *error; - void *tag; -} state_watcher; - -static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) { - grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element( + grpc_channel* channel; + grpc_error* error; + void* tag; +}; +} // namespace + +static void delete_state_watcher(state_watcher* w) { + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( grpc_channel_get_channel_stack(w->channel)); if (client_channel_elem->filter == &grpc_client_channel_filter) { - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel, - "watch_channel_connectivity"); + GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_channel_connectivity"); } else { abort(); } @@ -84,10 +89,9 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) { gpr_free(w); } -static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw, - grpc_cq_completion *ignored) { +static void finished_completion(void* pw, grpc_cq_completion* ignored) { bool should_delete = false; - state_watcher *w = (state_watcher *)pw; + state_watcher* w = static_cast(pw); gpr_mu_lock(&w->mu); switch (w->phase) { case WAITING: @@ -100,27 +104,27 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw, gpr_mu_unlock(&w->mu); if (should_delete) { - delete_state_watcher(exec_ctx, w); + delete_state_watcher(w); } } -static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w, - bool due_to_completion, grpc_error *error) { +static void partly_done(state_watcher* w, bool due_to_completion, + grpc_error* error) { if (due_to_completion) { - grpc_timer_cancel(exec_ctx, &w->alarm); + grpc_timer_cancel(&w->alarm); } else { - grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element( + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( grpc_channel_get_channel_stack(w->channel)); grpc_client_channel_watch_connectivity_state( - exec_ctx, client_channel_elem, - grpc_polling_entity_create_from_pollset(grpc_cq_pollset(w->cq)), NULL, - &w->on_complete, NULL); + client_channel_elem, + grpc_polling_entity_create_from_pollset(grpc_cq_pollset(w->cq)), + nullptr, &w->on_complete, nullptr); } gpr_mu_lock(&w->mu); if (due_to_completion) { - if (GRPC_TRACER_ON(grpc_trace_operation_failures)) { + if (grpc_trace_operation_failures.enabled()) { GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error)); } GRPC_ERROR_UNREF(error); @@ -147,7 +151,7 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w, w->error = error; } w->phase = CALLING_BACK_AND_FINISHED; - grpc_cq_end_op(exec_ctx, w->cq, w->tag, w->error, finished_completion, w, + grpc_cq_end_op(w->cq, w->tag, w->error, finished_completion, w, &w->completion_storage); break; case CALLING_BACK_AND_FINISHED: @@ -159,51 +163,47 @@ static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w, GRPC_ERROR_UNREF(error); } -static void watch_complete(grpc_exec_ctx *exec_ctx, void *pw, - grpc_error *error) { - partly_done(exec_ctx, (state_watcher *)pw, true, GRPC_ERROR_REF(error)); +static void watch_complete(void* pw, grpc_error* error) { + partly_done(static_cast(pw), true, GRPC_ERROR_REF(error)); } -static void timeout_complete(grpc_exec_ctx *exec_ctx, void *pw, - grpc_error *error) { - partly_done(exec_ctx, (state_watcher *)pw, false, GRPC_ERROR_REF(error)); +static void timeout_complete(void* pw, grpc_error* error) { + partly_done(static_cast(pw), false, GRPC_ERROR_REF(error)); } -int grpc_channel_num_external_connectivity_watchers(grpc_channel *channel) { - grpc_channel_element *client_channel_elem = +int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) { + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel)); return grpc_client_channel_num_external_connectivity_watchers( client_channel_elem); } typedef struct watcher_timer_init_arg { - state_watcher *w; + state_watcher* w; gpr_timespec deadline; } watcher_timer_init_arg; -static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { - watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg; +static void watcher_timer_init(void* arg, grpc_error* error_ignored) { + watcher_timer_init_arg* wa = static_cast(arg); - grpc_timer_init(exec_ctx, &wa->w->alarm, - gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC), - &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&wa->w->alarm, grpc_timespec_to_millis_round_up(wa->deadline), + &wa->w->on_timeout); gpr_free(wa); } -int grpc_channel_support_connectivity_watcher(grpc_channel *channel) { - grpc_channel_element *client_channel_elem = +int grpc_channel_support_connectivity_watcher(grpc_channel* channel) { + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel)); return client_channel_elem->filter != &grpc_client_channel_filter ? 0 : 1; } void grpc_channel_watch_connectivity_state( - grpc_channel *channel, grpc_connectivity_state last_observed_state, - gpr_timespec deadline, grpc_completion_queue *cq, void *tag) { - grpc_channel_element *client_channel_elem = + grpc_channel* channel, grpc_connectivity_state last_observed_state, + gpr_timespec deadline, grpc_completion_queue* cq, void* tag) { + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w)); + grpc_core::ExecCtx exec_ctx; + state_watcher* w = static_cast(gpr_malloc(sizeof(*w))); GRPC_API_TRACE( "grpc_channel_watch_connectivity_state(" @@ -211,8 +211,9 @@ void grpc_channel_watch_connectivity_state( "deadline=gpr_timespec { tv_sec: %" PRId64 ", tv_nsec: %d, clock_type: %d }, " "cq=%p, tag=%p)", - 7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec, - (int)deadline.clock_type, cq, tag)); + 7, + (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec, + (int)deadline.clock_type, cq, tag)); GPR_ASSERT(grpc_cq_begin_op(cq, tag)); @@ -226,10 +227,10 @@ void grpc_channel_watch_connectivity_state( w->cq = cq; w->tag = tag; w->channel = channel; - w->error = NULL; + w->error = nullptr; - watcher_timer_init_arg *wa = - (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg)); + watcher_timer_init_arg* wa = static_cast( + gpr_malloc(sizeof(watcher_timer_init_arg))); wa->w = w; wa->deadline = deadline; GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa, @@ -238,12 +239,10 @@ void grpc_channel_watch_connectivity_state( if (client_channel_elem->filter == &grpc_client_channel_filter) { GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity"); grpc_client_channel_watch_connectivity_state( - &exec_ctx, client_channel_elem, + client_channel_elem, grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &w->state, &w->on_complete, &w->watcher_timer_init); } else { abort(); } - - grpc_exec_ctx_finish(&exec_ctx); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.c b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.c deleted file mode 100644 index 016199b1f..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.c +++ /dev/null @@ -1,1657 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/client_channel.h" - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/ext/filters/client_channel/retry_throttle.h" -#include "src/core/ext/filters/client_channel/subchannel.h" -#include "src/core/ext/filters/deadline/deadline_filter.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/connected_channel.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/iomgr.h" -#include "src/core/lib/iomgr/polling_entity.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/channel.h" -#include "src/core/lib/transport/connectivity_state.h" -#include "src/core/lib/transport/metadata.h" -#include "src/core/lib/transport/metadata_batch.h" -#include "src/core/lib/transport/service_config.h" -#include "src/core/lib/transport/static_metadata.h" - -/* Client channel implementation */ - -grpc_tracer_flag grpc_client_channel_trace = - GRPC_TRACER_INITIALIZER(false, "client_channel"); - -/************************************************************************* - * METHOD-CONFIG TABLE - */ - -typedef enum { - /* zero so it can be default initialized */ - WAIT_FOR_READY_UNSET = 0, - WAIT_FOR_READY_FALSE, - WAIT_FOR_READY_TRUE -} wait_for_ready_value; - -typedef struct { - gpr_refcount refs; - gpr_timespec timeout; - wait_for_ready_value wait_for_ready; -} method_parameters; - -static method_parameters *method_parameters_ref( - method_parameters *method_params) { - gpr_ref(&method_params->refs); - return method_params; -} - -static void method_parameters_unref(method_parameters *method_params) { - if (gpr_unref(&method_params->refs)) { - gpr_free(method_params); - } -} - -static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) { - method_parameters_unref((method_parameters *)value); -} - -static bool parse_wait_for_ready(grpc_json *field, - wait_for_ready_value *wait_for_ready) { - if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) { - return false; - } - *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE - : WAIT_FOR_READY_FALSE; - return true; -} - -static bool parse_timeout(grpc_json *field, gpr_timespec *timeout) { - if (field->type != GRPC_JSON_STRING) return false; - size_t len = strlen(field->value); - if (field->value[len - 1] != 's') return false; - char *buf = gpr_strdup(field->value); - buf[len - 1] = '\0'; // Remove trailing 's'. - char *decimal_point = strchr(buf, '.'); - if (decimal_point != NULL) { - *decimal_point = '\0'; - timeout->tv_nsec = gpr_parse_nonnegative_int(decimal_point + 1); - if (timeout->tv_nsec == -1) { - gpr_free(buf); - return false; - } - // There should always be exactly 3, 6, or 9 fractional digits. - int multiplier = 1; - switch (strlen(decimal_point + 1)) { - case 9: - break; - case 6: - multiplier *= 1000; - break; - case 3: - multiplier *= 1000000; - break; - default: // Unsupported number of digits. - gpr_free(buf); - return false; - } - timeout->tv_nsec *= multiplier; - } - timeout->tv_sec = gpr_parse_nonnegative_int(buf); - gpr_free(buf); - if (timeout->tv_sec == -1) return false; - return true; -} - -static void *method_parameters_create_from_json(const grpc_json *json) { - wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET; - gpr_timespec timeout = {0, 0, GPR_TIMESPAN}; - for (grpc_json *field = json->child; field != NULL; field = field->next) { - if (field->key == NULL) continue; - if (strcmp(field->key, "waitForReady") == 0) { - if (wait_for_ready != WAIT_FOR_READY_UNSET) return NULL; // Duplicate. - if (!parse_wait_for_ready(field, &wait_for_ready)) return NULL; - } else if (strcmp(field->key, "timeout") == 0) { - if (timeout.tv_sec > 0 || timeout.tv_nsec > 0) return NULL; // Duplicate. - if (!parse_timeout(field, &timeout)) return NULL; - } - } - method_parameters *value = - (method_parameters *)gpr_malloc(sizeof(method_parameters)); - gpr_ref_init(&value->refs, 1); - value->timeout = timeout; - value->wait_for_ready = wait_for_ready; - return value; -} - -struct external_connectivity_watcher; - -/************************************************************************* - * CHANNEL-WIDE FUNCTIONS - */ - -typedef struct client_channel_channel_data { - /** resolver for this channel */ - grpc_resolver *resolver; - /** have we started resolving this channel */ - bool started_resolving; - /** is deadline checking enabled? */ - bool deadline_checking_enabled; - /** client channel factory */ - grpc_client_channel_factory *client_channel_factory; - - /** combiner protecting all variables below in this data structure */ - grpc_combiner *combiner; - /** currently active load balancer */ - grpc_lb_policy *lb_policy; - /** retry throttle data */ - grpc_server_retry_throttle_data *retry_throttle_data; - /** maps method names to method_parameters structs */ - grpc_slice_hash_table *method_params_table; - /** incoming resolver result - set by resolver.next() */ - grpc_channel_args *resolver_result; - /** a list of closures that are all waiting for resolver result to come in */ - grpc_closure_list waiting_for_resolver_result_closures; - /** resolver callback */ - grpc_closure on_resolver_result_changed; - /** connectivity state being tracked */ - grpc_connectivity_state_tracker state_tracker; - /** when an lb_policy arrives, should we try to exit idle */ - bool exit_idle_when_lb_policy_arrives; - /** owning stack */ - grpc_channel_stack *owning_stack; - /** interested parties (owned) */ - grpc_pollset_set *interested_parties; - - /* external_connectivity_watcher_list head is guarded by its own mutex, since - * counts need to be grabbed immediately without polling on a cq */ - gpr_mu external_connectivity_watcher_list_mu; - struct external_connectivity_watcher *external_connectivity_watcher_list_head; - - /* the following properties are guarded by a mutex since API's require them - to be instantaneously available */ - gpr_mu info_mu; - char *info_lb_policy_name; - /** service config in JSON form */ - char *info_service_config_json; -} channel_data; - -/** We create one watcher for each new lb_policy that is returned from a - resolver, to watch for state changes from the lb_policy. When a state - change is seen, we update the channel, and create a new watcher. */ -typedef struct { - channel_data *chand; - grpc_closure on_changed; - grpc_connectivity_state state; - grpc_lb_policy *lb_policy; -} lb_policy_connectivity_watcher; - -static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand, - grpc_lb_policy *lb_policy, - grpc_connectivity_state current_state); - -static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx, - channel_data *chand, - grpc_connectivity_state state, - grpc_error *error, - const char *reason) { - /* TODO: Improve failure handling: - * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE. - * - Hand over pending picks from old policies during the switch that happens - * when resolver provides an update. */ - if (chand->lb_policy != NULL) { - if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - /* cancel picks with wait_for_ready=false */ - grpc_lb_policy_cancel_picks_locked( - exec_ctx, chand->lb_policy, - /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY, - /* check= */ 0, GRPC_ERROR_REF(error)); - } else if (state == GRPC_CHANNEL_SHUTDOWN) { - /* cancel all picks */ - grpc_lb_policy_cancel_picks_locked(exec_ctx, chand->lb_policy, - /* mask= */ 0, /* check= */ 0, - GRPC_ERROR_REF(error)); - } - } - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand, - grpc_connectivity_state_name(state)); - } - grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error, - reason); -} - -static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg; - grpc_connectivity_state publish_state = w->state; - /* check if the notification is for the latest policy */ - if (w->lb_policy == w->chand->lb_policy) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand, - w->lb_policy, grpc_connectivity_state_name(w->state)); - } - if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { - publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; - grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver); - GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); - w->chand->lb_policy = NULL; - } - set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, - GRPC_ERROR_REF(error), "lb_changed"); - if (w->state != GRPC_CHANNEL_SHUTDOWN) { - watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state); - } - } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); - gpr_free(w); -} - -static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand, - grpc_lb_policy *lb_policy, - grpc_connectivity_state current_state) { - lb_policy_connectivity_watcher *w = - (lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w)); - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); - w->chand = chand; - GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w, - grpc_combiner_scheduler(chand->combiner)); - w->state = current_state; - w->lb_policy = lb_policy; - grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state, - &w->on_changed); -} - -static void start_resolving_locked(grpc_exec_ctx *exec_ctx, - channel_data *chand) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand); - } - GPR_ASSERT(!chand->started_resolving); - chand->started_resolving = true; - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); - grpc_resolver_next_locked(exec_ctx, chand->resolver, &chand->resolver_result, - &chand->on_resolver_result_changed); -} - -typedef struct { - char *server_name; - grpc_server_retry_throttle_data *retry_throttle_data; -} service_config_parsing_state; - -static void parse_retry_throttle_params(const grpc_json *field, void *arg) { - service_config_parsing_state *parsing_state = - (service_config_parsing_state *)arg; - if (strcmp(field->key, "retryThrottling") == 0) { - if (parsing_state->retry_throttle_data != NULL) return; // Duplicate. - if (field->type != GRPC_JSON_OBJECT) return; - int max_milli_tokens = 0; - int milli_token_ratio = 0; - for (grpc_json *sub_field = field->child; sub_field != NULL; - sub_field = sub_field->next) { - if (sub_field->key == NULL) return; - if (strcmp(sub_field->key, "maxTokens") == 0) { - if (max_milli_tokens != 0) return; // Duplicate. - if (sub_field->type != GRPC_JSON_NUMBER) return; - max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value); - if (max_milli_tokens == -1) return; - max_milli_tokens *= 1000; - } else if (strcmp(sub_field->key, "tokenRatio") == 0) { - if (milli_token_ratio != 0) return; // Duplicate. - if (sub_field->type != GRPC_JSON_NUMBER) return; - // We support up to 3 decimal digits. - size_t whole_len = strlen(sub_field->value); - uint32_t multiplier = 1; - uint32_t decimal_value = 0; - const char *decimal_point = strchr(sub_field->value, '.'); - if (decimal_point != NULL) { - whole_len = (size_t)(decimal_point - sub_field->value); - multiplier = 1000; - size_t decimal_len = strlen(decimal_point + 1); - if (decimal_len > 3) decimal_len = 3; - if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len, - &decimal_value)) { - return; - } - uint32_t decimal_multiplier = 1; - for (size_t i = 0; i < (3 - decimal_len); ++i) { - decimal_multiplier *= 10; - } - decimal_value *= decimal_multiplier; - } - uint32_t whole_value; - if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len, - &whole_value)) { - return; - } - milli_token_ratio = (int)((whole_value * multiplier) + decimal_value); - if (milli_token_ratio <= 0) return; - } - } - parsing_state->retry_throttle_data = - grpc_retry_throttle_map_get_data_for_server( - parsing_state->server_name, max_milli_tokens, milli_token_ratio); - } -} - -static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - channel_data *chand = (channel_data *)arg; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand, - grpc_error_string(error)); - } - // Extract the following fields from the resolver result, if non-NULL. - bool lb_policy_updated = false; - char *lb_policy_name_dup = NULL; - bool lb_policy_name_changed = false; - grpc_lb_policy *new_lb_policy = NULL; - char *service_config_json = NULL; - grpc_server_retry_throttle_data *retry_throttle_data = NULL; - grpc_slice_hash_table *method_params_table = NULL; - if (chand->resolver_result != NULL) { - // Find LB policy name. - const char *lb_policy_name = NULL; - const grpc_arg *channel_arg = - grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); - if (channel_arg != NULL) { - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - lb_policy_name = channel_arg->value.string; - } - // Special case: If at least one balancer address is present, we use - // the grpclb policy, regardless of what the resolver actually specified. - channel_arg = - grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES); - if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) { - grpc_lb_addresses *addresses = - (grpc_lb_addresses *)channel_arg->value.pointer.p; - bool found_balancer_address = false; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (addresses->addresses[i].is_balancer) { - found_balancer_address = true; - break; - } - } - if (found_balancer_address) { - if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) { - gpr_log(GPR_INFO, - "resolver requested LB policy %s but provided at least one " - "balancer address -- forcing use of grpclb LB policy", - lb_policy_name); - } - lb_policy_name = "grpclb"; - } - } - // Use pick_first if nothing was specified and we didn't select grpclb - // above. - if (lb_policy_name == NULL) lb_policy_name = "pick_first"; - grpc_lb_policy_args lb_policy_args; - lb_policy_args.args = chand->resolver_result; - lb_policy_args.client_channel_factory = chand->client_channel_factory; - lb_policy_args.combiner = chand->combiner; - // Check to see if we're already using the right LB policy. - // Note: It's safe to use chand->info_lb_policy_name here without - // taking a lock on chand->info_mu, because this function is the - // only thing that modifies its value, and it can only be invoked - // once at any given time. - lb_policy_name_changed = - chand->info_lb_policy_name == NULL || - strcmp(chand->info_lb_policy_name, lb_policy_name) != 0; - if (chand->lb_policy != NULL && !lb_policy_name_changed) { - // Continue using the same LB policy. Update with new addresses. - lb_policy_updated = true; - grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args); - } else { - // Instantiate new LB policy. - new_lb_policy = - grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args); - if (new_lb_policy == NULL) { - gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name); - } - } - // Find service config. - channel_arg = - grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG); - if (channel_arg != NULL) { - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - service_config_json = gpr_strdup(channel_arg->value.string); - grpc_service_config *service_config = - grpc_service_config_create(service_config_json); - if (service_config != NULL) { - channel_arg = - grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI); - GPR_ASSERT(channel_arg != NULL); - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - grpc_uri *uri = - grpc_uri_parse(exec_ctx, channel_arg->value.string, true); - GPR_ASSERT(uri->path[0] != '\0'); - service_config_parsing_state parsing_state; - memset(&parsing_state, 0, sizeof(parsing_state)); - parsing_state.server_name = - uri->path[0] == '/' ? uri->path + 1 : uri->path; - grpc_service_config_parse_global_params( - service_config, parse_retry_throttle_params, &parsing_state); - grpc_uri_destroy(uri); - retry_throttle_data = parsing_state.retry_throttle_data; - method_params_table = grpc_service_config_create_method_config_table( - exec_ctx, service_config, method_parameters_create_from_json, - method_parameters_free); - grpc_service_config_destroy(service_config); - } - } - // Before we clean up, save a copy of lb_policy_name, since it might - // be pointing to data inside chand->resolver_result. - // The copy will be saved in chand->lb_policy_name below. - lb_policy_name_dup = gpr_strdup(lb_policy_name); - grpc_channel_args_destroy(exec_ctx, chand->resolver_result); - chand->resolver_result = NULL; - } - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p: resolver result: lb_policy_name=\"%s\"%s, " - "service_config=\"%s\"", - chand, lb_policy_name_dup, - lb_policy_name_changed ? " (changed)" : "", service_config_json); - } - // Now swap out fields in chand. Note that the new values may still - // be NULL if (e.g.) the resolver failed to return results or the - // results did not contain the necessary data. - // - // First, swap out the data used by cc_get_channel_info(). - gpr_mu_lock(&chand->info_mu); - if (lb_policy_name_dup != NULL) { - gpr_free(chand->info_lb_policy_name); - chand->info_lb_policy_name = lb_policy_name_dup; - } - if (service_config_json != NULL) { - gpr_free(chand->info_service_config_json); - chand->info_service_config_json = service_config_json; - } - gpr_mu_unlock(&chand->info_mu); - // Swap out the retry throttle data. - if (chand->retry_throttle_data != NULL) { - grpc_server_retry_throttle_data_unref(chand->retry_throttle_data); - } - chand->retry_throttle_data = retry_throttle_data; - // Swap out the method params table. - if (chand->method_params_table != NULL) { - grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); - } - chand->method_params_table = method_params_table; - // If we have a new LB policy or are shutting down (in which case - // new_lb_policy will be NULL), swap out the LB policy, unreffing the - // old one and removing its fds from chand->interested_parties. - // Note that we do NOT do this if either (a) we updated the existing - // LB policy above or (b) we failed to create the new LB policy (in - // which case we want to continue using the most recent one we had). - if (new_lb_policy != NULL || error != GRPC_ERROR_NONE || - chand->resolver == NULL) { - if (chand->lb_policy != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand, - chand->lb_policy); - } - grpc_pollset_set_del_pollset_set(exec_ctx, - chand->lb_policy->interested_parties, - chand->interested_parties); - GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); - } - chand->lb_policy = new_lb_policy; - } - // Now that we've swapped out the relevant fields of chand, check for - // error or shutdown. - if (error != GRPC_ERROR_NONE || chand->resolver == NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand); - } - if (chand->resolver != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand); - } - grpc_resolver_shutdown_locked(exec_ctx, chand->resolver); - GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); - chand->resolver = NULL; - } - set_channel_connectivity_state_locked( - exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Got resolver result after disconnection", &error, 1), - "resolver_gone"); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver"); - grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Channel disconnected", &error, 1)); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, - &chand->waiting_for_resolver_result_closures); - } else { // Not shutting down. - grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE; - grpc_error *state_error = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy"); - if (new_lb_policy != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand); - } - GRPC_ERROR_UNREF(state_error); - state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy, - &state_error); - grpc_pollset_set_add_pollset_set(exec_ctx, - new_lb_policy->interested_parties, - chand->interested_parties); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, - &chand->waiting_for_resolver_result_closures); - if (chand->exit_idle_when_lb_policy_arrives) { - grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy); - chand->exit_idle_when_lb_policy_arrives = false; - } - watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state); - } - if (!lb_policy_updated) { - set_channel_connectivity_state_locked(exec_ctx, chand, state, - GRPC_ERROR_REF(state_error), - "new_lb+resolver"); - } - grpc_resolver_next_locked(exec_ctx, chand->resolver, - &chand->resolver_result, - &chand->on_resolver_result_changed); - GRPC_ERROR_UNREF(state_error); - } -} - -static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { - grpc_transport_op *op = (grpc_transport_op *)arg; - grpc_channel_element *elem = - (grpc_channel_element *)op->handler_private.extra_arg; - channel_data *chand = (channel_data *)elem->channel_data; - - if (op->on_connectivity_state_change != NULL) { - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &chand->state_tracker, op->connectivity_state, - op->on_connectivity_state_change); - op->on_connectivity_state_change = NULL; - op->connectivity_state = NULL; - } - - if (op->send_ping != NULL) { - if (chand->lb_policy == NULL) { - GRPC_CLOSURE_SCHED( - exec_ctx, op->send_ping, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); - } else { - grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping); - op->bind_pollset = NULL; - } - op->send_ping = NULL; - } - - if (op->disconnect_with_error != GRPC_ERROR_NONE) { - if (chand->resolver != NULL) { - set_channel_connectivity_state_locked( - exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_REF(op->disconnect_with_error), "disconnect"); - grpc_resolver_shutdown_locked(exec_ctx, chand->resolver); - GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); - chand->resolver = NULL; - if (!chand->started_resolving) { - grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, - GRPC_ERROR_REF(op->disconnect_with_error)); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, - &chand->waiting_for_resolver_result_closures); - } - if (chand->lb_policy != NULL) { - grpc_pollset_set_del_pollset_set(exec_ctx, - chand->lb_policy->interested_parties, - chand->interested_parties); - GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); - chand->lb_policy = NULL; - } - } - GRPC_ERROR_UNREF(op->disconnect_with_error); - } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op"); - - GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); -} - -static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) { - channel_data *chand = (channel_data *)elem->channel_data; - - GPR_ASSERT(op->set_accept_stream == false); - if (op->bind_pollset != NULL) { - grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, - op->bind_pollset); - } - - op->handler_private.extra_arg = elem; - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); - GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked, - op, grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); -} - -static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - const grpc_channel_info *info) { - channel_data *chand = (channel_data *)elem->channel_data; - gpr_mu_lock(&chand->info_mu); - if (info->lb_policy_name != NULL) { - *info->lb_policy_name = chand->info_lb_policy_name == NULL - ? NULL - : gpr_strdup(chand->info_lb_policy_name); - } - if (info->service_config_json != NULL) { - *info->service_config_json = - chand->info_service_config_json == NULL - ? NULL - : gpr_strdup(chand->info_service_config_json); - } - gpr_mu_unlock(&chand->info_mu); -} - -/* Constructor for channel_data */ -static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *chand = (channel_data *)elem->channel_data; - GPR_ASSERT(args->is_last); - GPR_ASSERT(elem->filter == &grpc_client_channel_filter); - // Initialize data members. - chand->combiner = grpc_combiner_create(); - gpr_mu_init(&chand->info_mu); - gpr_mu_init(&chand->external_connectivity_watcher_list_mu); - - gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); - chand->external_connectivity_watcher_list_head = NULL; - gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); - - chand->owning_stack = args->channel_stack; - GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed, - on_resolver_result_changed_locked, chand, - grpc_combiner_scheduler(chand->combiner)); - chand->interested_parties = grpc_pollset_set_create(); - grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, - "client_channel"); - // Record client channel factory. - const grpc_arg *arg = grpc_channel_args_find(args->channel_args, - GRPC_ARG_CLIENT_CHANNEL_FACTORY); - if (arg == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Missing client channel factory in args for client channel filter"); - } - if (arg->type != GRPC_ARG_POINTER) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "client channel factory arg must be a pointer"); - } - grpc_client_channel_factory_ref( - (grpc_client_channel_factory *)arg->value.pointer.p); - chand->client_channel_factory = - (grpc_client_channel_factory *)arg->value.pointer.p; - // Get server name to resolve, using proxy mapper if needed. - arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); - if (arg == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Missing server uri in args for client channel filter"); - } - if (arg->type != GRPC_ARG_STRING) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "server uri arg must be a string"); - } - char *proxy_name = NULL; - grpc_channel_args *new_args = NULL; - grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args, - &proxy_name, &new_args); - // Instantiate resolver. - chand->resolver = grpc_resolver_create( - exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string, - new_args != NULL ? new_args : args->channel_args, - chand->interested_parties, chand->combiner); - if (proxy_name != NULL) gpr_free(proxy_name); - if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args); - if (chand->resolver == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed"); - } - chand->deadline_checking_enabled = - grpc_deadline_checking_enabled(args->channel_args); - return GRPC_ERROR_NONE; -} - -static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_resolver *resolver = (grpc_resolver *)arg; - grpc_resolver_shutdown_locked(exec_ctx, resolver); - GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel"); -} - -/* Destructor for channel_data */ -static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - if (chand->resolver != NULL) { - GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver, - grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); - } - if (chand->client_channel_factory != NULL) { - grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory); - } - if (chand->lb_policy != NULL) { - grpc_pollset_set_del_pollset_set(exec_ctx, - chand->lb_policy->interested_parties, - chand->interested_parties); - GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); - } - gpr_free(chand->info_lb_policy_name); - gpr_free(chand->info_service_config_json); - if (chand->retry_throttle_data != NULL) { - grpc_server_retry_throttle_data_unref(chand->retry_throttle_data); - } - if (chand->method_params_table != NULL) { - grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); - } - grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); - grpc_pollset_set_destroy(exec_ctx, chand->interested_parties); - GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel"); - gpr_mu_destroy(&chand->info_mu); - gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu); -} - -/************************************************************************* - * PER-CALL FUNCTIONS - */ - -// Max number of batches that can be pending on a call at any given -// time. This includes: -// recv_initial_metadata -// send_initial_metadata -// recv_message -// send_message -// recv_trailing_metadata -// send_trailing_metadata -// We also add room for a single cancel_stream batch. -#define MAX_WAITING_BATCHES 7 - -/** Call data. Holds a pointer to grpc_subchannel_call and the - associated machinery to create such a pointer. - Handles queueing of stream ops until a call object is ready, waiting - for initial metadata before trying to create a call object, - and handling cancellation gracefully. */ -typedef struct client_channel_call_data { - // State for handling deadlines. - // The code in deadline_filter.c requires this to be the first field. - // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state - // and this struct both independently store pointers to the call stack - // and call combiner. If/when we have time, find a way to avoid this - // without breaking the grpc_deadline_state abstraction. - grpc_deadline_state deadline_state; - - grpc_slice path; // Request path. - gpr_timespec call_start_time; - gpr_timespec deadline; - gpr_arena *arena; - grpc_call_stack *owning_call; - grpc_call_combiner *call_combiner; - - grpc_server_retry_throttle_data *retry_throttle_data; - method_parameters *method_params; - - grpc_subchannel_call *subchannel_call; - grpc_error *error; - - grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending. - grpc_closure lb_pick_closure; - grpc_closure lb_pick_cancel_closure; - - grpc_connected_subchannel *connected_subchannel; - grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; - grpc_polling_entity *pollent; - - grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; - size_t waiting_for_pick_batches_count; - grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES]; - - grpc_transport_stream_op_batch *initial_metadata_batch; - - grpc_linked_mdelem lb_token_mdelem; - - grpc_closure on_complete; - grpc_closure *original_on_complete; -} call_data; - -grpc_subchannel_call *grpc_client_channel_get_subchannel_call( - grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; - return calld->subchannel_call; -} - -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_add( - call_data *calld, grpc_transport_stream_op_batch *batch) { - if (batch->send_initial_metadata) { - GPR_ASSERT(calld->initial_metadata_batch == NULL); - calld->initial_metadata_batch = batch; - } else { - GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = - batch; - } -} - -// This is called via the call combiner, so access to calld is synchronized. -static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - call_data *calld = (call_data *)arg; - if (calld->waiting_for_pick_batches_count > 0) { - --calld->waiting_for_pick_batches_count; - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count], - GRPC_ERROR_REF(error), calld->call_combiner); - } -} - -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s", - elem->channel_data, calld, calld->waiting_for_pick_batches_count, - grpc_error_string(error)); - } - for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { - GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], - fail_pending_batch_in_call_combiner, calld, - grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - &calld->handle_pending_batch_in_call_combiner[i], - GRPC_ERROR_REF(error), - "waiting_for_pick_batches_fail"); - } - if (calld->initial_metadata_batch != NULL) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error), - calld->call_combiner); - } else { - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "waiting_for_pick_batches_fail"); - } - GRPC_ERROR_UNREF(error); -} - -// This is called via the call combiner, so access to calld is synchronized. -static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *ignored) { - call_data *calld = (call_data *)arg; - if (calld->waiting_for_pick_batches_count > 0) { - --calld->waiting_for_pick_batches_count; - grpc_subchannel_call_process_op( - exec_ctx, calld->subchannel_call, - calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]); - } -} - -// This is called via the call combiner, so access to calld is synchronized. -static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR - " pending batches to subchannel_call=%p", - chand, calld, calld->waiting_for_pick_batches_count, - calld->subchannel_call); - } - for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { - GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], - run_pending_batch_in_call_combiner, calld, - grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - &calld->handle_pending_batch_in_call_combiner[i], - GRPC_ERROR_NONE, - "waiting_for_pick_batches_resume"); - } - GPR_ASSERT(calld->initial_metadata_batch != NULL); - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, - calld->initial_metadata_batch); -} - -// Applies service config to the call. Must be invoked once we know -// that the resolver has returned results to the channel. -static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call", - chand, calld); - } - if (chand->retry_throttle_data != NULL) { - calld->retry_throttle_data = - grpc_server_retry_throttle_data_ref(chand->retry_throttle_data); - } - if (chand->method_params_table != NULL) { - calld->method_params = (method_parameters *)grpc_method_config_table_get( - exec_ctx, chand->method_params_table, calld->path); - if (calld->method_params != NULL) { - method_parameters_ref(calld->method_params); - // If the deadline from the service config is shorter than the one - // from the client API, reset the deadline timer. - if (chand->deadline_checking_enabled && - gpr_time_cmp(calld->method_params->timeout, - gpr_time_0(GPR_TIMESPAN)) != 0) { - const gpr_timespec per_method_deadline = - gpr_time_add(calld->call_start_time, calld->method_params->timeout); - if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) { - calld->deadline = per_method_deadline; - grpc_deadline_state_reset(exec_ctx, elem, calld->deadline); - } - } - } - } -} - -static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_error *error) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - const grpc_connected_subchannel_call_args call_args = { - .pollent = calld->pollent, - .path = calld->path, - .start_time = calld->call_start_time, - .deadline = calld->deadline, - .arena = calld->arena, - .context = calld->subchannel_call_context, - .call_combiner = calld->call_combiner}; - grpc_error *new_error = grpc_connected_subchannel_create_call( - exec_ctx, calld->connected_subchannel, &call_args, - &calld->subchannel_call); - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", - chand, calld, calld->subchannel_call, grpc_error_string(new_error)); - } - if (new_error != GRPC_ERROR_NONE) { - new_error = grpc_error_add_child(new_error, error); - waiting_for_pick_batches_fail(exec_ctx, elem, new_error); - } else { - waiting_for_pick_batches_resume(exec_ctx, elem); - } - GRPC_ERROR_UNREF(error); -} - -// Invoked when a pick is completed, on both success or failure. -static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_error *error) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (calld->connected_subchannel == NULL) { - // Failed to create subchannel. - GRPC_ERROR_UNREF(calld->error); - calld->error = error == GRPC_ERROR_NONE - ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Call dropped by load balancing policy") - : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Failed to create subchannel", &error, 1); - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: failed to create subchannel: error=%s", chand, - calld, grpc_error_string(calld->error)); - } - waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error)); - } else { - /* Create call on subchannel. */ - create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); - } - GRPC_ERROR_UNREF(error); -} - -// A wrapper around pick_done_locked() that is used in cases where -// either (a) the pick was deferred pending a resolver result or (b) the -// pick was done asynchronously. Removes the call's polling entity from -// chand->interested_parties before invoking pick_done_locked(). -static void async_pick_done_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, grpc_error *error) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, - chand->interested_parties); - pick_done_locked(exec_ctx, elem, error); -} - -// Note: This runs under the client_channel combiner, but will NOT be -// holding the call combiner. -static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (calld->lb_policy != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", - chand, calld, calld->lb_policy); - } - grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, - &calld->connected_subchannel, - GRPC_ERROR_REF(error)); - } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel"); -} - -// Callback invoked by grpc_lb_policy_pick_locked() for async picks. -// Unrefs the LB policy and invokes async_pick_done_locked(). -static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", - chand, calld); - } - GPR_ASSERT(calld->lb_policy != NULL); - GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); - calld->lb_policy = NULL; - async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); -} - -// Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked(). -// If the pick was completed synchronously, unrefs the LB policy and -// returns true. -static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p", - chand, calld, chand->lb_policy); - } - apply_service_config_to_call_locked(exec_ctx, elem); - // If the application explicitly set wait_for_ready, use that. - // Otherwise, if the service config specified a value for this - // method, use that. - uint32_t initial_metadata_flags = - calld->initial_metadata_batch->payload->send_initial_metadata - .send_initial_metadata_flags; - const bool wait_for_ready_set_from_api = - initial_metadata_flags & - GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET; - const bool wait_for_ready_set_from_service_config = - calld->method_params != NULL && - calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET; - if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) { - if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) { - initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY; - } else { - initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; - } - } - const grpc_lb_policy_pick_args inputs = { - calld->initial_metadata_batch->payload->send_initial_metadata - .send_initial_metadata, - initial_metadata_flags, &calld->lb_token_mdelem}; - // Keep a ref to the LB policy in calld while the pick is pending. - GRPC_LB_POLICY_REF(chand->lb_policy, "pick_subchannel"); - calld->lb_policy = chand->lb_policy; - GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem, - grpc_combiner_scheduler(chand->combiner)); - const bool pick_done = grpc_lb_policy_pick_locked( - exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel, - calld->subchannel_call_context, NULL, &calld->lb_pick_closure); - if (pick_done) { - /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */ - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously", - chand, calld); - } - GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); - calld->lb_policy = NULL; - } else { - GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel"); - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure, - pick_callback_cancel_locked, elem, - grpc_combiner_scheduler(chand->combiner))); - } - return pick_done; -} - -typedef struct { - grpc_call_element *elem; - bool finished; - grpc_closure closure; - grpc_closure cancel_closure; -} pick_after_resolver_result_args; - -// Note: This runs under the client_channel combiner, but will NOT be -// holding the call combiner. -static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - pick_after_resolver_result_args *args = - (pick_after_resolver_result_args *)arg; - if (args->finished) { - gpr_free(args); - return; - } - // If we don't yet have a resolver result, then a closure for - // pick_after_resolver_result_done_locked() will have been added to - // chand->waiting_for_resolver_result_closures, and it may not be invoked - // until after this call has been destroyed. We mark the operation as - // finished, so that when pick_after_resolver_result_done_locked() - // is called, it will be a no-op. We also immediately invoke - // async_pick_done_locked() to propagate the error back to the caller. - args->finished = true; - grpc_call_element *elem = args->elem; - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: cancelling pick waiting for resolver result", - chand, calld); - } - // Note: Although we are not in the call combiner here, we are - // basically stealing the call combiner from the pending pick, so - // it's safe to call async_pick_done_locked() here -- we are - // essentially calling it here instead of calling it in - // pick_after_resolver_result_done_locked(). - async_pick_done_locked(exec_ctx, elem, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); -} - -static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - pick_after_resolver_result_args *args = - (pick_after_resolver_result_args *)arg; - if (args->finished) { - /* cancelled, do nothing */ - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "call cancelled before resolver result"); - } - gpr_free(args); - return; - } - args->finished = true; - grpc_call_element *elem = args->elem; - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", - chand, calld); - } - async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); - } else { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", - chand, calld); - } - if (pick_callback_start_locked(exec_ctx, elem)) { - // Even if the LB policy returns a result synchronously, we have - // already added our polling entity to chand->interested_parties - // in order to wait for the resolver result, so we need to - // remove it here. Therefore, we call async_pick_done_locked() - // instead of pick_done_locked(). - async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE); - } - } -} - -static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: deferring pick pending resolver result", chand, - calld); - } - pick_after_resolver_result_args *args = - (pick_after_resolver_result_args *)gpr_zalloc(sizeof(*args)); - args->elem = elem; - GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked, - args, grpc_combiner_scheduler(chand->combiner)); - grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, - &args->closure, GRPC_ERROR_NONE); - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&args->cancel_closure, - pick_after_resolver_result_cancel_locked, args, - grpc_combiner_scheduler(chand->combiner))); -} - -static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *ignored) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - GPR_ASSERT(calld->connected_subchannel == NULL); - if (chand->lb_policy != NULL) { - // We already have an LB policy, so ask it for a pick. - if (pick_callback_start_locked(exec_ctx, elem)) { - // Pick completed synchronously. - pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE); - return; - } - } else { - // We do not yet have an LB policy, so wait for a resolver result. - if (chand->resolver == NULL) { - pick_done_locked(exec_ctx, elem, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); - return; - } - if (!chand->started_resolving) { - start_resolving_locked(exec_ctx, chand); - } - pick_after_resolver_result_start_locked(exec_ctx, elem); - } - // We need to wait for either a resolver result or for an async result - // from the LB policy. Add the polling entity from call_data to the - // channel_data's interested_parties, so that the I/O of the LB policy - // and resolver can be done under it. The polling entity will be - // removed in async_pick_done_locked(). - grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, - chand->interested_parties); -} - -static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - if (calld->retry_throttle_data != NULL) { - if (error == GRPC_ERROR_NONE) { - grpc_server_retry_throttle_data_record_success( - calld->retry_throttle_data); - } else { - // TODO(roth): In a subsequent PR, check the return value here and - // decide whether or not to retry. Note that we should only - // record failures whose statuses match the configured retryable - // or non-fatal status codes. - grpc_server_retry_throttle_data_record_failure( - calld->retry_throttle_data); - } - } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete, - GRPC_ERROR_REF(error)); -} - -static void cc_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (chand->deadline_checking_enabled) { - grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, - batch); - } - GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); - // If we've previously been cancelled, immediately fail any new batches. - if (calld->error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", - chand, calld, grpc_error_string(calld->error)); - } - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner); - goto done; - } - if (batch->cancel_stream) { - // Stash a copy of cancel_error in our call data, so that we can use - // it for subsequent operations. This ensures that if the call is - // cancelled before any batches are passed down (e.g., if the deadline - // is in the past when the call starts), we can return the right - // error to the caller when the first batch does get passed down. - GRPC_ERROR_UNREF(calld->error); - calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, - calld, grpc_error_string(calld->error)); - } - // If we have a subchannel call, send the cancellation batch down. - // Otherwise, fail all pending batches. - if (calld->subchannel_call != NULL) { - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); - } else { - waiting_for_pick_batches_add(calld, batch); - waiting_for_pick_batches_fail(exec_ctx, elem, - GRPC_ERROR_REF(calld->error)); - } - goto done; - } - // Intercept on_complete for recv_trailing_metadata so that we can - // check retry throttle status. - if (batch->recv_trailing_metadata) { - GPR_ASSERT(batch->on_complete != NULL); - calld->original_on_complete = batch->on_complete; - GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem, - grpc_schedule_on_exec_ctx); - batch->on_complete = &calld->on_complete; - } - // Check if we've already gotten a subchannel call. - // Note that once we have completed the pick, we do not need to enter - // the channel combiner, which is more efficient (especially for - // streaming calls). - if (calld->subchannel_call != NULL) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: sending batch to subchannel_call=%p", chand, - calld, calld->subchannel_call); - } - grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); - goto done; - } - // We do not yet have a subchannel call. - // Add the batch to the waiting-for-pick list. - waiting_for_pick_batches_add(calld, batch); - // For batches containing a send_initial_metadata op, enter the channel - // combiner to start a pick. - if (batch->send_initial_metadata) { - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner", - chand, calld); - } - GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, - elem, grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); - } else { - // For all other batches, release the call combiner. - if (GRPC_TRACER_ON(grpc_client_channel_trace)) { - gpr_log(GPR_DEBUG, - "chand=%p calld=%p: saved batch, yeilding call combiner", chand, - calld); - } - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "batch does not include send_initial_metadata"); - } -done: - GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); -} - -/* Constructor for call_data */ -static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - // Initialize data members. - calld->path = grpc_slice_ref_internal(args->path); - calld->call_start_time = args->start_time; - calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); - calld->arena = args->arena; - calld->owning_call = args->call_stack; - calld->call_combiner = args->call_combiner; - if (chand->deadline_checking_enabled) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, - args->call_combiner, calld->deadline); - } - return GRPC_ERROR_NONE; -} - -/* Destructor for call_data */ -static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (chand->deadline_checking_enabled) { - grpc_deadline_state_destroy(exec_ctx, elem); - } - grpc_slice_unref_internal(exec_ctx, calld->path); - if (calld->method_params != NULL) { - method_parameters_unref(calld->method_params); - } - GRPC_ERROR_UNREF(calld->error); - if (calld->subchannel_call != NULL) { - grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, - then_schedule_closure); - then_schedule_closure = NULL; - GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call, - "client_channel_destroy_call"); - } - GPR_ASSERT(calld->lb_policy == NULL); - GPR_ASSERT(calld->waiting_for_pick_batches_count == 0); - if (calld->connected_subchannel != NULL) { - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel, - "picked"); - } - for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { - if (calld->subchannel_call_context[i].value != NULL) { - calld->subchannel_call_context[i].destroy( - calld->subchannel_call_context[i].value); - } - } - GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); -} - -static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent) { - call_data *calld = (call_data *)elem->call_data; - calld->pollent = pollent; -} - -/************************************************************************* - * EXPORTED SYMBOLS - */ - -const grpc_channel_filter grpc_client_channel_filter = { - cc_start_transport_stream_op_batch, - cc_start_transport_op, - sizeof(call_data), - cc_init_call_elem, - cc_set_pollset_or_pollset_set, - cc_destroy_call_elem, - sizeof(channel_data), - cc_init_channel_elem, - cc_destroy_channel_elem, - cc_get_channel_info, - "client-channel", -}; - -static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { - channel_data *chand = (channel_data *)arg; - if (chand->lb_policy != NULL) { - grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy); - } else { - chand->exit_idle_when_lb_policy_arrives = true; - if (!chand->started_resolving && chand->resolver != NULL) { - start_resolving_locked(exec_ctx, chand); - } - } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect"); -} - -grpc_connectivity_state grpc_client_channel_check_connectivity_state( - grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { - channel_data *chand = (channel_data *)elem->channel_data; - grpc_connectivity_state out = - grpc_connectivity_state_check(&chand->state_tracker); - if (out == GRPC_CHANNEL_IDLE && try_to_connect) { - GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); - GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand, - grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); - } - return out; -} - -typedef struct external_connectivity_watcher { - channel_data *chand; - grpc_polling_entity pollent; - grpc_closure *on_complete; - grpc_closure *watcher_timer_init; - grpc_connectivity_state *state; - grpc_closure my_closure; - struct external_connectivity_watcher *next; -} external_connectivity_watcher; - -static external_connectivity_watcher *lookup_external_connectivity_watcher( - channel_data *chand, grpc_closure *on_complete) { - gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); - external_connectivity_watcher *w = - chand->external_connectivity_watcher_list_head; - while (w != NULL && w->on_complete != on_complete) { - w = w->next; - } - gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); - return w; -} - -static void external_connectivity_watcher_list_append( - channel_data *chand, external_connectivity_watcher *w) { - GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete)); - - gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu); - GPR_ASSERT(!w->next); - w->next = chand->external_connectivity_watcher_list_head; - chand->external_connectivity_watcher_list_head = w; - gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu); -} - -static void external_connectivity_watcher_list_remove( - channel_data *chand, external_connectivity_watcher *too_remove) { - GPR_ASSERT( - lookup_external_connectivity_watcher(chand, too_remove->on_complete)); - gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); - if (too_remove == chand->external_connectivity_watcher_list_head) { - chand->external_connectivity_watcher_list_head = too_remove->next; - gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); - return; - } - external_connectivity_watcher *w = - chand->external_connectivity_watcher_list_head; - while (w != NULL) { - if (w->next == too_remove) { - w->next = w->next->next; - gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); - return; - } - w = w->next; - } - GPR_UNREACHABLE_CODE(return ); -} - -int grpc_client_channel_num_external_connectivity_watchers( - grpc_channel_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - int count = 0; - - gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); - external_connectivity_watcher *w = - chand->external_connectivity_watcher_list_head; - while (w != NULL) { - count++; - w = w->next; - } - gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); - - return count; -} - -static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - external_connectivity_watcher *w = (external_connectivity_watcher *)arg; - grpc_closure *follow_up = w->on_complete; - grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent, - w->chand->interested_parties); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, - "external_connectivity_watcher"); - external_connectivity_watcher_list_remove(w->chand, w); - gpr_free(w); - GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error)); -} - -static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_ignored) { - external_connectivity_watcher *w = (external_connectivity_watcher *)arg; - external_connectivity_watcher *found = NULL; - if (w->state != NULL) { - external_connectivity_watcher_list_append(w->chand, w); - GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE); - GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w, - grpc_schedule_on_exec_ctx); - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure); - } else { - GPR_ASSERT(w->watcher_timer_init == NULL); - found = lookup_external_connectivity_watcher(w->chand, w->on_complete); - if (found) { - GPR_ASSERT(found->on_complete == w->on_complete); - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &found->chand->state_tracker, NULL, &found->my_closure); - } - grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent, - w->chand->interested_parties); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, - "external_connectivity_watcher"); - gpr_free(w); - } -} - -void grpc_client_channel_watch_connectivity_state( - grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_polling_entity pollent, grpc_connectivity_state *state, - grpc_closure *closure, grpc_closure *watcher_timer_init) { - channel_data *chand = (channel_data *)elem->channel_data; - external_connectivity_watcher *w = - (external_connectivity_watcher *)gpr_zalloc(sizeof(*w)); - w->chand = chand; - w->pollent = pollent; - w->on_complete = closure; - w->state = state; - w->watcher_timer_init = watcher_timer_init; - grpc_polling_entity_add_to_pollset_set(exec_ctx, &w->pollent, - chand->interested_parties); - GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, - "external_connectivity_watcher"); - GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w, - grpc_combiner_scheduler(chand->combiner)), - GRPC_ERROR_NONE); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.cc new file mode 100644 index 000000000..80a647fa9 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.cc @@ -0,0 +1,3304 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "src/core/ext/filters/client_channel/backup_poller.h" +#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/method_params.h" +#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/ext/filters/client_channel/retry_throttle.h" +#include "src/core/ext/filters/client_channel/subchannel.h" +#include "src/core/ext/filters/deadline/deadline_filter.h" +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/connected_channel.h" +#include "src/core/lib/channel/status_util.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/iomgr.h" +#include "src/core/lib/iomgr/polling_entity.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/transport/connectivity_state.h" +#include "src/core/lib/transport/error_utils.h" +#include "src/core/lib/transport/metadata.h" +#include "src/core/lib/transport/metadata_batch.h" +#include "src/core/lib/transport/service_config.h" +#include "src/core/lib/transport/static_metadata.h" +#include "src/core/lib/transport/status_metadata.h" + +using grpc_core::internal::ClientChannelMethodParams; +using grpc_core::internal::ServerRetryThrottleData; + +/* Client channel implementation */ + +// By default, we buffer 256 KiB per RPC for retries. +// TODO(roth): Do we have any data to suggest a better value? +#define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10) + +// This value was picked arbitrarily. It can be changed if there is +// any even moderately compelling reason to do so. +#define RETRY_BACKOFF_JITTER 0.2 + +grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel"); + +/************************************************************************* + * CHANNEL-WIDE FUNCTIONS + */ + +struct external_connectivity_watcher; + +typedef grpc_core::SliceHashTable< + grpc_core::RefCountedPtr> + MethodParamsTable; + +typedef struct client_channel_channel_data { + grpc_core::OrphanablePtr resolver; + bool started_resolving; + bool deadline_checking_enabled; + grpc_client_channel_factory* client_channel_factory; + bool enable_retries; + size_t per_rpc_retry_buffer_size; + + /** combiner protecting all variables below in this data structure */ + grpc_combiner* combiner; + /** currently active load balancer */ + grpc_core::OrphanablePtr lb_policy; + /** retry throttle data */ + grpc_core::RefCountedPtr retry_throttle_data; + /** maps method names to method_parameters structs */ + grpc_core::RefCountedPtr method_params_table; + /** incoming resolver result - set by resolver.next() */ + grpc_channel_args* resolver_result; + /** a list of closures that are all waiting for resolver result to come in */ + grpc_closure_list waiting_for_resolver_result_closures; + /** resolver callback */ + grpc_closure on_resolver_result_changed; + /** connectivity state being tracked */ + grpc_connectivity_state_tracker state_tracker; + /** when an lb_policy arrives, should we try to exit idle */ + bool exit_idle_when_lb_policy_arrives; + /** owning stack */ + grpc_channel_stack* owning_stack; + /** interested parties (owned) */ + grpc_pollset_set* interested_parties; + + /* external_connectivity_watcher_list head is guarded by its own mutex, since + * counts need to be grabbed immediately without polling on a cq */ + gpr_mu external_connectivity_watcher_list_mu; + struct external_connectivity_watcher* external_connectivity_watcher_list_head; + + /* the following properties are guarded by a mutex since APIs require them + to be instantaneously available */ + gpr_mu info_mu; + char* info_lb_policy_name; + /** service config in JSON form */ + char* info_service_config_json; +} channel_data; + +typedef struct { + channel_data* chand; + /** used as an identifier, don't dereference it because the LB policy may be + * non-existing when the callback is run */ + grpc_core::LoadBalancingPolicy* lb_policy; + grpc_closure closure; +} reresolution_request_args; + +/** We create one watcher for each new lb_policy that is returned from a + resolver, to watch for state changes from the lb_policy. When a state + change is seen, we update the channel, and create a new watcher. */ +typedef struct { + channel_data* chand; + grpc_closure on_changed; + grpc_connectivity_state state; + grpc_core::LoadBalancingPolicy* lb_policy; +} lb_policy_connectivity_watcher; + +static void watch_lb_policy_locked(channel_data* chand, + grpc_core::LoadBalancingPolicy* lb_policy, + grpc_connectivity_state current_state); + +static void set_channel_connectivity_state_locked(channel_data* chand, + grpc_connectivity_state state, + grpc_error* error, + const char* reason) { + /* TODO: Improve failure handling: + * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE. + * - Hand over pending picks from old policies during the switch that happens + * when resolver provides an update. */ + if (chand->lb_policy != nullptr) { + if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + /* cancel picks with wait_for_ready=false */ + chand->lb_policy->CancelMatchingPicksLocked( + /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY, + /* check= */ 0, GRPC_ERROR_REF(error)); + } else if (state == GRPC_CHANNEL_SHUTDOWN) { + /* cancel all picks */ + chand->lb_policy->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0, + GRPC_ERROR_REF(error)); + } + } + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand, + grpc_connectivity_state_name(state)); + } + grpc_connectivity_state_set(&chand->state_tracker, state, error, reason); +} + +static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) { + lb_policy_connectivity_watcher* w = + static_cast(arg); + /* check if the notification is for the latest policy */ + if (w->lb_policy == w->chand->lb_policy.get()) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand, + w->lb_policy, grpc_connectivity_state_name(w->state)); + } + set_channel_connectivity_state_locked(w->chand, w->state, + GRPC_ERROR_REF(error), "lb_changed"); + if (w->state != GRPC_CHANNEL_SHUTDOWN) { + watch_lb_policy_locked(w->chand, w->lb_policy, w->state); + } + } + GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy"); + gpr_free(w); +} + +static void watch_lb_policy_locked(channel_data* chand, + grpc_core::LoadBalancingPolicy* lb_policy, + grpc_connectivity_state current_state) { + lb_policy_connectivity_watcher* w = + static_cast(gpr_malloc(sizeof(*w))); + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); + w->chand = chand; + GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w, + grpc_combiner_scheduler(chand->combiner)); + w->state = current_state; + w->lb_policy = lb_policy; + lb_policy->NotifyOnStateChangeLocked(&w->state, &w->on_changed); +} + +static void start_resolving_locked(channel_data* chand) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand); + } + GPR_ASSERT(!chand->started_resolving); + chand->started_resolving = true; + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); + chand->resolver->NextLocked(&chand->resolver_result, + &chand->on_resolver_result_changed); +} + +typedef struct { + char* server_name; + grpc_core::RefCountedPtr retry_throttle_data; +} service_config_parsing_state; + +static void parse_retry_throttle_params( + const grpc_json* field, service_config_parsing_state* parsing_state) { + if (strcmp(field->key, "retryThrottling") == 0) { + if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate. + if (field->type != GRPC_JSON_OBJECT) return; + int max_milli_tokens = 0; + int milli_token_ratio = 0; + for (grpc_json* sub_field = field->child; sub_field != nullptr; + sub_field = sub_field->next) { + if (sub_field->key == nullptr) return; + if (strcmp(sub_field->key, "maxTokens") == 0) { + if (max_milli_tokens != 0) return; // Duplicate. + if (sub_field->type != GRPC_JSON_NUMBER) return; + max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value); + if (max_milli_tokens == -1) return; + max_milli_tokens *= 1000; + } else if (strcmp(sub_field->key, "tokenRatio") == 0) { + if (milli_token_ratio != 0) return; // Duplicate. + if (sub_field->type != GRPC_JSON_NUMBER) return; + // We support up to 3 decimal digits. + size_t whole_len = strlen(sub_field->value); + uint32_t multiplier = 1; + uint32_t decimal_value = 0; + const char* decimal_point = strchr(sub_field->value, '.'); + if (decimal_point != nullptr) { + whole_len = static_cast(decimal_point - sub_field->value); + multiplier = 1000; + size_t decimal_len = strlen(decimal_point + 1); + if (decimal_len > 3) decimal_len = 3; + if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len, + &decimal_value)) { + return; + } + uint32_t decimal_multiplier = 1; + for (size_t i = 0; i < (3 - decimal_len); ++i) { + decimal_multiplier *= 10; + } + decimal_value *= decimal_multiplier; + } + uint32_t whole_value; + if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len, + &whole_value)) { + return; + } + milli_token_ratio = + static_cast((whole_value * multiplier) + decimal_value); + if (milli_token_ratio <= 0) return; + } + } + parsing_state->retry_throttle_data = + grpc_core::internal::ServerRetryThrottleMap::GetDataForServer( + parsing_state->server_name, max_milli_tokens, milli_token_ratio); + } +} + +static void request_reresolution_locked(void* arg, grpc_error* error) { + reresolution_request_args* args = + static_cast(arg); + channel_data* chand = args->chand; + // If this invocation is for a stale LB policy, treat it as an LB shutdown + // signal. + if (args->lb_policy != chand->lb_policy.get() || error != GRPC_ERROR_NONE || + chand->resolver == nullptr) { + GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution"); + gpr_free(args); + return; + } + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand); + } + chand->resolver->RequestReresolutionLocked(); + // Give back the closure to the LB policy. + chand->lb_policy->SetReresolutionClosureLocked(&args->closure); +} + +// TODO(roth): The logic in this function is very hard to follow. We +// should refactor this so that it's easier to understand, perhaps as +// part of changing the resolver API to more clearly differentiate +// between transient failures and shutdown. +static void on_resolver_result_changed_locked(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p: got resolver result: resolver_result=%p error=%s", chand, + chand->resolver_result, grpc_error_string(error)); + } + // Extract the following fields from the resolver result, if non-nullptr. + bool lb_policy_updated = false; + bool lb_policy_created = false; + char* lb_policy_name_dup = nullptr; + bool lb_policy_name_changed = false; + grpc_core::OrphanablePtr new_lb_policy; + char* service_config_json = nullptr; + grpc_core::RefCountedPtr retry_throttle_data; + grpc_core::RefCountedPtr method_params_table; + if (chand->resolver_result != nullptr) { + if (chand->resolver != nullptr) { + // Find LB policy name. + const grpc_arg* channel_arg = grpc_channel_args_find( + chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); + const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg); + // Special case: If at least one balancer address is present, we use + // the grpclb policy, regardless of what the resolver actually specified. + channel_arg = + grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES); + if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) { + grpc_lb_addresses* addresses = + static_cast(channel_arg->value.pointer.p); + bool found_balancer_address = false; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) { + found_balancer_address = true; + break; + } + } + if (found_balancer_address) { + if (lb_policy_name != nullptr && + strcmp(lb_policy_name, "grpclb") != 0) { + gpr_log(GPR_INFO, + "resolver requested LB policy %s but provided at least one " + "balancer address -- forcing use of grpclb LB policy", + lb_policy_name); + } + lb_policy_name = "grpclb"; + } + } + // Use pick_first if nothing was specified and we didn't select grpclb + // above. + if (lb_policy_name == nullptr) lb_policy_name = "pick_first"; + // Check to see if we're already using the right LB policy. + // Note: It's safe to use chand->info_lb_policy_name here without + // taking a lock on chand->info_mu, because this function is the + // only thing that modifies its value, and it can only be invoked + // once at any given time. + lb_policy_name_changed = + chand->info_lb_policy_name == nullptr || + gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0; + if (chand->lb_policy != nullptr && !lb_policy_name_changed) { + // Continue using the same LB policy. Update with new addresses. + lb_policy_updated = true; + chand->lb_policy->UpdateLocked(*chand->resolver_result); + } else { + // Instantiate new LB policy. + grpc_core::LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.combiner = chand->combiner; + lb_policy_args.client_channel_factory = chand->client_channel_factory; + lb_policy_args.args = chand->resolver_result; + new_lb_policy = + grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( + lb_policy_name, lb_policy_args); + if (new_lb_policy == nullptr) { + gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", + lb_policy_name); + } else { + lb_policy_created = true; + reresolution_request_args* args = + static_cast( + gpr_zalloc(sizeof(*args))); + args->chand = chand; + args->lb_policy = new_lb_policy.get(); + GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args, + grpc_combiner_scheduler(chand->combiner)); + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution"); + new_lb_policy->SetReresolutionClosureLocked(&args->closure); + } + } + // Before we clean up, save a copy of lb_policy_name, since it might + // be pointing to data inside chand->resolver_result. + // The copy will be saved in chand->lb_policy_name below. + lb_policy_name_dup = gpr_strdup(lb_policy_name); + // Find service config. + channel_arg = grpc_channel_args_find(chand->resolver_result, + GRPC_ARG_SERVICE_CONFIG); + service_config_json = + gpr_strdup(grpc_channel_arg_get_string(channel_arg)); + if (service_config_json != nullptr) { + grpc_core::UniquePtr service_config = + grpc_core::ServiceConfig::Create(service_config_json); + if (service_config != nullptr) { + if (chand->enable_retries) { + channel_arg = grpc_channel_args_find(chand->resolver_result, + GRPC_ARG_SERVER_URI); + const char* server_uri = grpc_channel_arg_get_string(channel_arg); + GPR_ASSERT(server_uri != nullptr); + grpc_uri* uri = grpc_uri_parse(server_uri, true); + GPR_ASSERT(uri->path[0] != '\0'); + service_config_parsing_state parsing_state; + memset(&parsing_state, 0, sizeof(parsing_state)); + parsing_state.server_name = + uri->path[0] == '/' ? uri->path + 1 : uri->path; + service_config->ParseGlobalParams(parse_retry_throttle_params, + &parsing_state); + grpc_uri_destroy(uri); + retry_throttle_data = std::move(parsing_state.retry_throttle_data); + } + method_params_table = service_config->CreateMethodConfigTable( + ClientChannelMethodParams::CreateFromJson); + } + } + } + } + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p: resolver result: lb_policy_name=\"%s\"%s, " + "service_config=\"%s\"", + chand, lb_policy_name_dup, + lb_policy_name_changed ? " (changed)" : "", service_config_json); + } + // Now swap out fields in chand. Note that the new values may still + // be nullptr if (e.g.) the resolver failed to return results or the + // results did not contain the necessary data. + // + // First, swap out the data used by cc_get_channel_info(). + gpr_mu_lock(&chand->info_mu); + if (lb_policy_name_dup != nullptr) { + gpr_free(chand->info_lb_policy_name); + chand->info_lb_policy_name = lb_policy_name_dup; + } + if (service_config_json != nullptr) { + gpr_free(chand->info_service_config_json); + chand->info_service_config_json = service_config_json; + } + gpr_mu_unlock(&chand->info_mu); + // Swap out the retry throttle data. + chand->retry_throttle_data = std::move(retry_throttle_data); + // Swap out the method params table. + chand->method_params_table = std::move(method_params_table); + // If we have a new LB policy or are shutting down (in which case + // new_lb_policy will be nullptr), swap out the LB policy, unreffing the + // old one and removing its fds from chand->interested_parties. + // Note that we do NOT do this if either (a) we updated the existing + // LB policy above or (b) we failed to create the new LB policy (in + // which case we want to continue using the most recent one we had). + if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE || + chand->resolver == nullptr) { + if (chand->lb_policy != nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand, + chand->lb_policy.get()); + } + grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), + chand->interested_parties); + chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get()); + chand->lb_policy.reset(); + } + chand->lb_policy = std::move(new_lb_policy); + } + // Now that we've swapped out the relevant fields of chand, check for + // error or shutdown. + if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: shutting down", chand); + } + if (chand->resolver != nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand); + } + chand->resolver.reset(); + } + set_channel_connectivity_state_locked( + chand, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Got resolver result after disconnection", &error, 1), + "resolver_gone"); + grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Channel disconnected", &error, 1)); + GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); + GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver"); + grpc_channel_args_destroy(chand->resolver_result); + chand->resolver_result = nullptr; + } else { // Not shutting down. + grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE; + grpc_error* state_error = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy"); + if (lb_policy_created) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand); + } + GRPC_ERROR_UNREF(state_error); + state = chand->lb_policy->CheckConnectivityLocked(&state_error); + grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(), + chand->interested_parties); + GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); + if (chand->exit_idle_when_lb_policy_arrives) { + chand->lb_policy->ExitIdleLocked(); + chand->exit_idle_when_lb_policy_arrives = false; + } + watch_lb_policy_locked(chand, chand->lb_policy.get(), state); + } else if (chand->resolver_result == nullptr) { + // Transient failure. + GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); + } + if (!lb_policy_updated) { + set_channel_connectivity_state_locked( + chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver"); + } + grpc_channel_args_destroy(chand->resolver_result); + chand->resolver_result = nullptr; + chand->resolver->NextLocked(&chand->resolver_result, + &chand->on_resolver_result_changed); + GRPC_ERROR_UNREF(state_error); + } +} + +static void start_transport_op_locked(void* arg, grpc_error* error_ignored) { + grpc_transport_op* op = static_cast(arg); + grpc_channel_element* elem = + static_cast(op->handler_private.extra_arg); + channel_data* chand = static_cast(elem->channel_data); + + if (op->on_connectivity_state_change != nullptr) { + grpc_connectivity_state_notify_on_state_change( + &chand->state_tracker, op->connectivity_state, + op->on_connectivity_state_change); + op->on_connectivity_state_change = nullptr; + op->connectivity_state = nullptr; + } + + if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { + if (chand->lb_policy == nullptr) { + GRPC_CLOSURE_SCHED( + op->send_ping.on_initiate, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); + GRPC_CLOSURE_SCHED( + op->send_ping.on_ack, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing")); + } else { + chand->lb_policy->PingOneLocked(op->send_ping.on_initiate, + op->send_ping.on_ack); + op->bind_pollset = nullptr; + } + op->send_ping.on_initiate = nullptr; + op->send_ping.on_ack = nullptr; + } + + if (op->disconnect_with_error != GRPC_ERROR_NONE) { + if (chand->resolver != nullptr) { + set_channel_connectivity_state_locked( + chand, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_REF(op->disconnect_with_error), "disconnect"); + chand->resolver.reset(); + if (!chand->started_resolving) { + grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures, + GRPC_ERROR_REF(op->disconnect_with_error)); + GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures); + } + if (chand->lb_policy != nullptr) { + grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), + chand->interested_parties); + chand->lb_policy.reset(); + } + } + GRPC_ERROR_UNREF(op->disconnect_with_error); + } + GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op"); + + GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE); +} + +static void cc_start_transport_op(grpc_channel_element* elem, + grpc_transport_op* op) { + channel_data* chand = static_cast(elem->channel_data); + + GPR_ASSERT(op->set_accept_stream == false); + if (op->bind_pollset != nullptr) { + grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset); + } + + op->handler_private.extra_arg = elem; + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked, + op, grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); +} + +static void cc_get_channel_info(grpc_channel_element* elem, + const grpc_channel_info* info) { + channel_data* chand = static_cast(elem->channel_data); + gpr_mu_lock(&chand->info_mu); + if (info->lb_policy_name != nullptr) { + *info->lb_policy_name = chand->info_lb_policy_name == nullptr + ? nullptr + : gpr_strdup(chand->info_lb_policy_name); + } + if (info->service_config_json != nullptr) { + *info->service_config_json = + chand->info_service_config_json == nullptr + ? nullptr + : gpr_strdup(chand->info_service_config_json); + } + gpr_mu_unlock(&chand->info_mu); +} + +/* Constructor for channel_data */ +static grpc_error* cc_init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* chand = static_cast(elem->channel_data); + GPR_ASSERT(args->is_last); + GPR_ASSERT(elem->filter == &grpc_client_channel_filter); + // Initialize data members. + chand->combiner = grpc_combiner_create(); + gpr_mu_init(&chand->info_mu); + gpr_mu_init(&chand->external_connectivity_watcher_list_mu); + + gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); + chand->external_connectivity_watcher_list_head = nullptr; + gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); + + chand->owning_stack = args->channel_stack; + GRPC_CLOSURE_INIT(&chand->on_resolver_result_changed, + on_resolver_result_changed_locked, chand, + grpc_combiner_scheduler(chand->combiner)); + chand->interested_parties = grpc_pollset_set_create(); + grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, + "client_channel"); + grpc_client_channel_start_backup_polling(chand->interested_parties); + // Record max per-RPC retry buffer size. + const grpc_arg* arg = grpc_channel_args_find( + args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE); + chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer( + arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX}); + // Record enable_retries. + arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES); + chand->enable_retries = grpc_channel_arg_get_bool(arg, true); + // Record client channel factory. + arg = grpc_channel_args_find(args->channel_args, + GRPC_ARG_CLIENT_CHANNEL_FACTORY); + if (arg == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Missing client channel factory in args for client channel filter"); + } + if (arg->type != GRPC_ARG_POINTER) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "client channel factory arg must be a pointer"); + } + grpc_client_channel_factory_ref( + static_cast(arg->value.pointer.p)); + chand->client_channel_factory = + static_cast(arg->value.pointer.p); + // Get server name to resolve, using proxy mapper if needed. + arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); + if (arg == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Missing server uri in args for client channel filter"); + } + if (arg->type != GRPC_ARG_STRING) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "server uri arg must be a string"); + } + char* proxy_name = nullptr; + grpc_channel_args* new_args = nullptr; + grpc_proxy_mappers_map_name(arg->value.string, args->channel_args, + &proxy_name, &new_args); + // Instantiate resolver. + chand->resolver = grpc_core::ResolverRegistry::CreateResolver( + proxy_name != nullptr ? proxy_name : arg->value.string, + new_args != nullptr ? new_args : args->channel_args, + chand->interested_parties, chand->combiner); + if (proxy_name != nullptr) gpr_free(proxy_name); + if (new_args != nullptr) grpc_channel_args_destroy(new_args); + if (chand->resolver == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed"); + } + chand->deadline_checking_enabled = + grpc_deadline_checking_enabled(args->channel_args); + return GRPC_ERROR_NONE; +} + +static void shutdown_resolver_locked(void* arg, grpc_error* error) { + grpc_core::Resolver* resolver = static_cast(arg); + resolver->Orphan(); +} + +/* Destructor for channel_data */ +static void cc_destroy_channel_elem(grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + if (chand->resolver != nullptr) { + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(), + grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); + } + if (chand->client_channel_factory != nullptr) { + grpc_client_channel_factory_unref(chand->client_channel_factory); + } + if (chand->lb_policy != nullptr) { + grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), + chand->interested_parties); + chand->lb_policy.reset(); + } + gpr_free(chand->info_lb_policy_name); + gpr_free(chand->info_service_config_json); + chand->retry_throttle_data.reset(); + chand->method_params_table.reset(); + grpc_client_channel_stop_backup_polling(chand->interested_parties); + grpc_connectivity_state_destroy(&chand->state_tracker); + grpc_pollset_set_destroy(chand->interested_parties); + GRPC_COMBINER_UNREF(chand->combiner, "client_channel"); + gpr_mu_destroy(&chand->info_mu); + gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu); +} + +/************************************************************************* + * PER-CALL FUNCTIONS + */ + +// Max number of batches that can be pending on a call at any given +// time. This includes one batch for each of the following ops: +// recv_initial_metadata +// send_initial_metadata +// recv_message +// send_message +// recv_trailing_metadata +// send_trailing_metadata +#define MAX_PENDING_BATCHES 6 + +// Retry support: +// +// In order to support retries, we act as a proxy for stream op batches. +// When we get a batch from the surface, we add it to our list of pending +// batches, and we then use those batches to construct separate "child" +// batches to be started on the subchannel call. When the child batches +// return, we then decide which pending batches have been completed and +// schedule their callbacks accordingly. If a subchannel call fails and +// we want to retry it, we do a new pick and start again, constructing +// new "child" batches for the new subchannel call. +// +// Note that retries are committed when receiving data from the server +// (except for Trailers-Only responses). However, there may be many +// send ops started before receiving any data, so we may have already +// completed some number of send ops (and returned the completions up to +// the surface) by the time we realize that we need to retry. To deal +// with this, we cache data for send ops, so that we can replay them on a +// different subchannel call even after we have completed the original +// batches. +// +// There are two sets of data to maintain: +// - In call_data (in the parent channel), we maintain a list of pending +// ops and cached data for send ops. +// - In the subchannel call, we maintain state to indicate what ops have +// already been sent down to that call. +// +// When constructing the "child" batches, we compare those two sets of +// data to see which batches need to be sent to the subchannel call. + +// TODO(roth): In subsequent PRs: +// - add support for transparent retries (including initial metadata) +// - figure out how to record stats in census for retries +// (census filter is on top of this one) +// - add census stats for retries + +// State used for starting a retryable batch on a subchannel call. +// This provides its own grpc_transport_stream_op_batch and other data +// structures needed to populate the ops in the batch. +// We allocate one struct on the arena for each attempt at starting a +// batch on a given subchannel call. +typedef struct { + gpr_refcount refs; + grpc_call_element* elem; + grpc_subchannel_call* subchannel_call; // Holds a ref. + // The batch to use in the subchannel call. + // Its payload field points to subchannel_call_retry_state.batch_payload. + grpc_transport_stream_op_batch batch; + // For send_initial_metadata. + // Note that we need to make a copy of the initial metadata for each + // subchannel call instead of just referring to the copy in call_data, + // because filters in the subchannel stack will probably add entries, + // so we need to start in a pristine state for each attempt of the call. + grpc_linked_mdelem* send_initial_metadata_storage; + grpc_metadata_batch send_initial_metadata; + // For send_message. + grpc_core::ManualConstructor + send_message; + // For send_trailing_metadata. + grpc_linked_mdelem* send_trailing_metadata_storage; + grpc_metadata_batch send_trailing_metadata; + // For intercepting recv_initial_metadata. + grpc_metadata_batch recv_initial_metadata; + grpc_closure recv_initial_metadata_ready; + bool trailing_metadata_available; + // For intercepting recv_message. + grpc_closure recv_message_ready; + grpc_core::OrphanablePtr recv_message; + // For intercepting recv_trailing_metadata. + grpc_metadata_batch recv_trailing_metadata; + grpc_transport_stream_stats collect_stats; + // For intercepting on_complete. + grpc_closure on_complete; +} subchannel_batch_data; + +// Retry state associated with a subchannel call. +// Stored in the parent_data of the subchannel call object. +typedef struct { + // subchannel_batch_data.batch.payload points to this. + grpc_transport_stream_op_batch_payload batch_payload; + // These fields indicate which ops have been started and completed on + // this subchannel call. + size_t started_send_message_count; + size_t completed_send_message_count; + size_t started_recv_message_count; + size_t completed_recv_message_count; + bool started_send_initial_metadata : 1; + bool completed_send_initial_metadata : 1; + bool started_send_trailing_metadata : 1; + bool completed_send_trailing_metadata : 1; + bool started_recv_initial_metadata : 1; + bool completed_recv_initial_metadata : 1; + bool started_recv_trailing_metadata : 1; + bool completed_recv_trailing_metadata : 1; + // State for callback processing. + bool retry_dispatched : 1; + subchannel_batch_data* recv_initial_metadata_ready_deferred_batch; + grpc_error* recv_initial_metadata_error; + subchannel_batch_data* recv_message_ready_deferred_batch; + grpc_error* recv_message_error; + subchannel_batch_data* recv_trailing_metadata_internal_batch; +} subchannel_call_retry_state; + +// Pending batches stored in call data. +typedef struct { + // The pending batch. If nullptr, this slot is empty. + grpc_transport_stream_op_batch* batch; + // Indicates whether payload for send ops has been cached in call data. + bool send_ops_cached; +} pending_batch; + +/** Call data. Holds a pointer to grpc_subchannel_call and the + associated machinery to create such a pointer. + Handles queueing of stream ops until a call object is ready, waiting + for initial metadata before trying to create a call object, + and handling cancellation gracefully. */ +typedef struct client_channel_call_data { + // State for handling deadlines. + // The code in deadline_filter.c requires this to be the first field. + // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state + // and this struct both independently store pointers to the call stack + // and call combiner. If/when we have time, find a way to avoid this + // without breaking the grpc_deadline_state abstraction. + grpc_deadline_state deadline_state; + + grpc_slice path; // Request path. + gpr_timespec call_start_time; + grpc_millis deadline; + gpr_arena* arena; + grpc_call_stack* owning_call; + grpc_call_combiner* call_combiner; + + grpc_core::RefCountedPtr retry_throttle_data; + grpc_core::RefCountedPtr method_params; + + grpc_subchannel_call* subchannel_call; + + // Set when we get a cancel_stream op. + grpc_error* cancel_error; + + grpc_core::LoadBalancingPolicy::PickState pick; + grpc_closure pick_closure; + grpc_closure pick_cancel_closure; + + grpc_polling_entity* pollent; + + // Batches are added to this list when received from above. + // They are removed when we are done handling the batch (i.e., when + // either we have invoked all of the batch's callbacks or we have + // passed the batch down to the subchannel call and are not + // intercepting any of its callbacks). + pending_batch pending_batches[MAX_PENDING_BATCHES]; + bool pending_send_initial_metadata : 1; + bool pending_send_message : 1; + bool pending_send_trailing_metadata : 1; + + // Retry state. + bool enable_retries : 1; + bool retry_committed : 1; + bool last_attempt_got_server_pushback : 1; + int num_attempts_completed; + size_t bytes_buffered_for_retry; + grpc_core::ManualConstructor retry_backoff; + grpc_timer retry_timer; + + // Cached data for retrying send ops. + // send_initial_metadata + bool seen_send_initial_metadata; + grpc_linked_mdelem* send_initial_metadata_storage; + grpc_metadata_batch send_initial_metadata; + uint32_t send_initial_metadata_flags; + gpr_atm* peer_string; + // send_message + // When we get a send_message op, we replace the original byte stream + // with a CachingByteStream that caches the slices to a local buffer for + // use in retries. + // Note: We inline the cache for the first 3 send_message ops and use + // dynamic allocation after that. This number was essentially picked + // at random; it could be changed in the future to tune performance. + grpc_core::ManualConstructor< + grpc_core::InlinedVector> + send_messages; + // send_trailing_metadata + bool seen_send_trailing_metadata; + grpc_linked_mdelem* send_trailing_metadata_storage; + grpc_metadata_batch send_trailing_metadata; +} call_data; + +// Forward declarations. +static void retry_commit(grpc_call_element* elem, + subchannel_call_retry_state* retry_state); +static void start_internal_recv_trailing_metadata(grpc_call_element* elem); +static void on_complete(void* arg, grpc_error* error); +static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored); +static void pick_after_resolver_result_start_locked(grpc_call_element* elem); +static void start_pick_locked(void* arg, grpc_error* ignored); + +// +// send op data caching +// + +// Caches data for send ops so that it can be retried later, if not +// already cached. +static void maybe_cache_send_ops_for_batch(call_data* calld, + pending_batch* pending) { + if (pending->send_ops_cached) return; + pending->send_ops_cached = true; + grpc_transport_stream_op_batch* batch = pending->batch; + // Save a copy of metadata for send_initial_metadata ops. + if (batch->send_initial_metadata) { + calld->seen_send_initial_metadata = true; + GPR_ASSERT(calld->send_initial_metadata_storage == nullptr); + grpc_metadata_batch* send_initial_metadata = + batch->payload->send_initial_metadata.send_initial_metadata; + calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc( + calld->arena, + sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count); + grpc_metadata_batch_copy(send_initial_metadata, + &calld->send_initial_metadata, + calld->send_initial_metadata_storage); + calld->send_initial_metadata_flags = + batch->payload->send_initial_metadata.send_initial_metadata_flags; + calld->peer_string = batch->payload->send_initial_metadata.peer_string; + } + // Set up cache for send_message ops. + if (batch->send_message) { + grpc_core::ByteStreamCache* cache = + static_cast( + gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache))); + new (cache) grpc_core::ByteStreamCache( + std::move(batch->payload->send_message.send_message)); + calld->send_messages->push_back(cache); + } + // Save metadata batch for send_trailing_metadata ops. + if (batch->send_trailing_metadata) { + calld->seen_send_trailing_metadata = true; + GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr); + grpc_metadata_batch* send_trailing_metadata = + batch->payload->send_trailing_metadata.send_trailing_metadata; + calld->send_trailing_metadata_storage = + (grpc_linked_mdelem*)gpr_arena_alloc( + calld->arena, + sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count); + grpc_metadata_batch_copy(send_trailing_metadata, + &calld->send_trailing_metadata, + calld->send_trailing_metadata_storage); + } +} + +// Frees cached send_initial_metadata. +static void free_cached_send_initial_metadata(channel_data* chand, + call_data* calld) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: destroying calld->send_initial_metadata", chand, + calld); + } + grpc_metadata_batch_destroy(&calld->send_initial_metadata); +} + +// Frees cached send_message at index idx. +static void free_cached_send_message(channel_data* chand, call_data* calld, + size_t idx) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]", + chand, calld, idx); + } + (*calld->send_messages)[idx]->Destroy(); +} + +// Frees cached send_trailing_metadata. +static void free_cached_send_trailing_metadata(channel_data* chand, + call_data* calld) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: destroying calld->send_trailing_metadata", + chand, calld); + } + grpc_metadata_batch_destroy(&calld->send_trailing_metadata); +} + +// Frees cached send ops that have already been completed after +// committing the call. +static void free_cached_send_op_data_after_commit( + grpc_call_element* elem, subchannel_call_retry_state* retry_state) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (retry_state->completed_send_initial_metadata) { + free_cached_send_initial_metadata(chand, calld); + } + for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) { + free_cached_send_message(chand, calld, i); + } + if (retry_state->completed_send_trailing_metadata) { + free_cached_send_trailing_metadata(chand, calld); + } +} + +// Frees cached send ops that were completed by the completed batch in +// batch_data. Used when batches are completed after the call is committed. +static void free_cached_send_op_data_for_completed_batch( + grpc_call_element* elem, subchannel_batch_data* batch_data, + subchannel_call_retry_state* retry_state) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (batch_data->batch.send_initial_metadata) { + free_cached_send_initial_metadata(chand, calld); + } + if (batch_data->batch.send_message) { + free_cached_send_message(chand, calld, + retry_state->completed_send_message_count - 1); + } + if (batch_data->batch.send_trailing_metadata) { + free_cached_send_trailing_metadata(chand, calld); + } +} + +// +// pending_batches management +// + +// Returns the index into calld->pending_batches to be used for batch. +static size_t get_batch_index(grpc_transport_stream_op_batch* batch) { + // Note: It is important the send_initial_metadata be the first entry + // here, since the code in pick_subchannel_locked() assumes it will be. + if (batch->send_initial_metadata) return 0; + if (batch->send_message) return 1; + if (batch->send_trailing_metadata) return 2; + if (batch->recv_initial_metadata) return 3; + if (batch->recv_message) return 4; + if (batch->recv_trailing_metadata) return 5; + GPR_UNREACHABLE_CODE(return (size_t)-1); +} + +// This is called via the call combiner, so access to calld is synchronized. +static void pending_batches_add(grpc_call_element* elem, + grpc_transport_stream_op_batch* batch) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + const size_t idx = get_batch_index(batch); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand, + calld, idx); + } + pending_batch* pending = &calld->pending_batches[idx]; + GPR_ASSERT(pending->batch == nullptr); + pending->batch = batch; + pending->send_ops_cached = false; + if (calld->enable_retries) { + // Update state in calld about pending batches. + // Also check if the batch takes us over the retry buffer limit. + // Note: We don't check the size of trailing metadata here, because + // gRPC clients do not send trailing metadata. + if (batch->send_initial_metadata) { + calld->pending_send_initial_metadata = true; + calld->bytes_buffered_for_retry += grpc_metadata_batch_size( + batch->payload->send_initial_metadata.send_initial_metadata); + } + if (batch->send_message) { + calld->pending_send_message = true; + calld->bytes_buffered_for_retry += + batch->payload->send_message.send_message->length(); + } + if (batch->send_trailing_metadata) { + calld->pending_send_trailing_metadata = true; + } + if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: exceeded retry buffer size, committing", + chand, calld); + } + subchannel_call_retry_state* retry_state = + calld->subchannel_call == nullptr + ? nullptr + : static_cast( + grpc_connected_subchannel_call_get_parent_data( + calld->subchannel_call)); + retry_commit(elem, retry_state); + // If we are not going to retry and have not yet started, pretend + // retries are disabled so that we don't bother with retry overhead. + if (calld->num_attempts_completed == 0) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: disabling retries before first attempt", + chand, calld); + } + calld->enable_retries = false; + } + } + } +} + +static void pending_batch_clear(call_data* calld, pending_batch* pending) { + if (calld->enable_retries) { + if (pending->batch->send_initial_metadata) { + calld->pending_send_initial_metadata = false; + } + if (pending->batch->send_message) { + calld->pending_send_message = false; + } + if (pending->batch->send_trailing_metadata) { + calld->pending_send_trailing_metadata = false; + } + } + pending->batch = nullptr; +} + +// This is called via the call combiner, so access to calld is synchronized. +static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) { + grpc_transport_stream_op_batch* batch = + static_cast(arg); + call_data* calld = static_cast(batch->handler_private.extra_arg); + // Note: This will release the call combiner. + grpc_transport_stream_op_batch_finish_with_failure( + batch, GRPC_ERROR_REF(error), calld->call_combiner); +} + +// This is called via the call combiner, so access to calld is synchronized. +// If yield_call_combiner is true, assumes responsibility for yielding +// the call combiner. +static void pending_batches_fail(grpc_call_element* elem, grpc_error* error, + bool yield_call_combiner) { + GPR_ASSERT(error != GRPC_ERROR_NONE); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + size_t num_batches = 0; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + if (calld->pending_batches[i].batch != nullptr) ++num_batches; + } + gpr_log(GPR_INFO, + "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s", + elem->channel_data, calld, num_batches, grpc_error_string(error)); + } + grpc_transport_stream_op_batch* + batches[GPR_ARRAY_SIZE(calld->pending_batches)]; + size_t num_batches = 0; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + grpc_transport_stream_op_batch* batch = pending->batch; + if (batch != nullptr) { + batches[num_batches++] = batch; + pending_batch_clear(calld, pending); + } + } + for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) { + grpc_transport_stream_op_batch* batch = batches[i]; + batch->handler_private.extra_arg = calld; + GRPC_CLOSURE_INIT(&batch->handler_private.closure, + fail_pending_batch_in_call_combiner, batch, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(calld->call_combiner, + &batch->handler_private.closure, + GRPC_ERROR_REF(error), "pending_batches_fail"); + } + if (yield_call_combiner) { + if (num_batches > 0) { + // Note: This will release the call combiner. + grpc_transport_stream_op_batch_finish_with_failure( + batches[0], GRPC_ERROR_REF(error), calld->call_combiner); + } else { + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail"); + } + } + GRPC_ERROR_UNREF(error); +} + +// This is called via the call combiner, so access to calld is synchronized. +static void resume_pending_batch_in_call_combiner(void* arg, + grpc_error* ignored) { + grpc_transport_stream_op_batch* batch = + static_cast(arg); + grpc_subchannel_call* subchannel_call = + static_cast(batch->handler_private.extra_arg); + // Note: This will release the call combiner. + grpc_subchannel_call_process_op(subchannel_call, batch); +} + +// This is called via the call combiner, so access to calld is synchronized. +static void pending_batches_resume(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (calld->enable_retries) { + start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE); + return; + } + // Retries not enabled; send down batches as-is. + if (grpc_client_channel_trace.enabled()) { + size_t num_batches = 0; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + if (calld->pending_batches[i].batch != nullptr) ++num_batches; + } + gpr_log(GPR_INFO, + "chand=%p calld=%p: starting %" PRIuPTR + " pending batches on subchannel_call=%p", + chand, calld, num_batches, calld->subchannel_call); + } + grpc_transport_stream_op_batch* + batches[GPR_ARRAY_SIZE(calld->pending_batches)]; + size_t num_batches = 0; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + grpc_transport_stream_op_batch* batch = pending->batch; + if (batch != nullptr) { + batches[num_batches++] = batch; + pending_batch_clear(calld, pending); + } + } + for (size_t i = 1; i < num_batches; ++i) { + grpc_transport_stream_op_batch* batch = batches[i]; + batch->handler_private.extra_arg = calld->subchannel_call; + GRPC_CLOSURE_INIT(&batch->handler_private.closure, + resume_pending_batch_in_call_combiner, batch, + grpc_schedule_on_exec_ctx); + GRPC_CALL_COMBINER_START(calld->call_combiner, + &batch->handler_private.closure, GRPC_ERROR_NONE, + "pending_batches_resume"); + } + GPR_ASSERT(num_batches > 0); + // Note: This will release the call combiner. + grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]); +} + +static void maybe_clear_pending_batch(grpc_call_element* elem, + pending_batch* pending) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + grpc_transport_stream_op_batch* batch = pending->batch; + // We clear the pending batch if all of its callbacks have been + // scheduled and reset to nullptr. + if (batch->on_complete == nullptr && + (!batch->recv_initial_metadata || + batch->payload->recv_initial_metadata.recv_initial_metadata_ready == + nullptr) && + (!batch->recv_message || + batch->payload->recv_message.recv_message_ready == nullptr)) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand, + calld); + } + pending_batch_clear(calld, pending); + } +} + +// Returns true if all ops in the pending batch have been completed. +static bool pending_batch_is_completed( + pending_batch* pending, call_data* calld, + subchannel_call_retry_state* retry_state) { + if (pending->batch == nullptr || pending->batch->on_complete == nullptr) { + return false; + } + if (pending->batch->send_initial_metadata && + !retry_state->completed_send_initial_metadata) { + return false; + } + if (pending->batch->send_message && + retry_state->completed_send_message_count < + calld->send_messages->size()) { + return false; + } + if (pending->batch->send_trailing_metadata && + !retry_state->completed_send_trailing_metadata) { + return false; + } + if (pending->batch->recv_initial_metadata && + !retry_state->completed_recv_initial_metadata) { + return false; + } + if (pending->batch->recv_message && + retry_state->completed_recv_message_count < + retry_state->started_recv_message_count) { + return false; + } + if (pending->batch->recv_trailing_metadata && + !retry_state->completed_recv_trailing_metadata) { + return false; + } + return true; +} + +// Returns true if any op in the batch was not yet started. +static bool pending_batch_is_unstarted( + pending_batch* pending, call_data* calld, + subchannel_call_retry_state* retry_state) { + if (pending->batch == nullptr || pending->batch->on_complete == nullptr) { + return false; + } + if (pending->batch->send_initial_metadata && + !retry_state->started_send_initial_metadata) { + return true; + } + if (pending->batch->send_message && + retry_state->started_send_message_count < calld->send_messages->size()) { + return true; + } + if (pending->batch->send_trailing_metadata && + !retry_state->started_send_trailing_metadata) { + return true; + } + if (pending->batch->recv_initial_metadata && + !retry_state->started_recv_initial_metadata) { + return true; + } + if (pending->batch->recv_message && + retry_state->completed_recv_message_count == + retry_state->started_recv_message_count) { + return true; + } + if (pending->batch->recv_trailing_metadata && + !retry_state->started_recv_trailing_metadata) { + return true; + } + return false; +} + +// +// retry code +// + +// Commits the call so that no further retry attempts will be performed. +static void retry_commit(grpc_call_element* elem, + subchannel_call_retry_state* retry_state) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (calld->retry_committed) return; + calld->retry_committed = true; + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld); + } + if (retry_state != nullptr) { + free_cached_send_op_data_after_commit(elem, retry_state); + } +} + +// Starts a retry after appropriate back-off. +static void do_retry(grpc_call_element* elem, + subchannel_call_retry_state* retry_state, + grpc_millis server_pushback_ms) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + GPR_ASSERT(calld->method_params != nullptr); + const ClientChannelMethodParams::RetryPolicy* retry_policy = + calld->method_params->retry_policy(); + GPR_ASSERT(retry_policy != nullptr); + // Reset subchannel call and connected subchannel. + if (calld->subchannel_call != nullptr) { + GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call, + "client_channel_call_retry"); + calld->subchannel_call = nullptr; + } + if (calld->pick.connected_subchannel != nullptr) { + calld->pick.connected_subchannel.reset(); + } + // Compute backoff delay. + grpc_millis next_attempt_time; + if (server_pushback_ms >= 0) { + next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms; + calld->last_attempt_got_server_pushback = true; + } else { + if (calld->num_attempts_completed == 1 || + calld->last_attempt_got_server_pushback) { + calld->retry_backoff.Init( + grpc_core::BackOff::Options() + .set_initial_backoff(retry_policy->initial_backoff) + .set_multiplier(retry_policy->backoff_multiplier) + .set_jitter(RETRY_BACKOFF_JITTER) + .set_max_backoff(retry_policy->max_backoff)); + calld->last_attempt_got_server_pushback = false; + } + next_attempt_time = calld->retry_backoff->NextAttemptTime(); + } + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand, + calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now()); + } + // Schedule retry after computed delay. + GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem, + grpc_combiner_scheduler(chand->combiner)); + grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure); + // Update bookkeeping. + if (retry_state != nullptr) retry_state->retry_dispatched = true; +} + +// Returns true if the call is being retried. +static bool maybe_retry(grpc_call_element* elem, + subchannel_batch_data* batch_data, + grpc_status_code status, + grpc_mdelem* server_pushback_md) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + // Get retry policy. + if (calld->method_params == nullptr) return false; + const ClientChannelMethodParams::RetryPolicy* retry_policy = + calld->method_params->retry_policy(); + if (retry_policy == nullptr) return false; + // If we've already dispatched a retry from this call, return true. + // This catches the case where the batch has multiple callbacks + // (i.e., it includes either recv_message or recv_initial_metadata). + subchannel_call_retry_state* retry_state = nullptr; + if (batch_data != nullptr) { + retry_state = static_cast( + grpc_connected_subchannel_call_get_parent_data( + batch_data->subchannel_call)); + if (retry_state->retry_dispatched) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand, + calld); + } + return true; + } + } + // Check status. + if (status == GRPC_STATUS_OK) { + if (calld->retry_throttle_data != nullptr) { + calld->retry_throttle_data->RecordSuccess(); + } + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld); + } + return false; + } + // Status is not OK. Check whether the status is retryable. + if (!retry_policy->retryable_status_codes.Contains(status)) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: status %s not configured as retryable", chand, + calld, grpc_status_code_to_string(status)); + } + return false; + } + // Record the failure and check whether retries are throttled. + // Note that it's important for this check to come after the status + // code check above, since we should only record failures whose statuses + // match the configured retryable status codes, so that we don't count + // things like failures due to malformed requests (INVALID_ARGUMENT). + // Conversely, it's important for this to come before the remaining + // checks, so that we don't fail to record failures due to other factors. + if (calld->retry_throttle_data != nullptr && + !calld->retry_throttle_data->RecordFailure()) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld); + } + return false; + } + // Check whether the call is committed. + if (calld->retry_committed) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand, + calld); + } + return false; + } + // Check whether we have retries remaining. + ++calld->num_attempts_completed; + if (calld->num_attempts_completed >= retry_policy->max_attempts) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand, + calld, retry_policy->max_attempts); + } + return false; + } + // If the call was cancelled from the surface, don't retry. + if (calld->cancel_error != GRPC_ERROR_NONE) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: call cancelled from surface, not retrying", + chand, calld); + } + return false; + } + // Check server push-back. + grpc_millis server_pushback_ms = -1; + if (server_pushback_md != nullptr) { + // If the value is "-1" or any other unparseable string, we do not retry. + uint32_t ms; + if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: not retrying due to server push-back", + chand, calld); + } + return false; + } else { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms", + chand, calld, ms); + } + server_pushback_ms = (grpc_millis)ms; + } + } + do_retry(elem, retry_state, server_pushback_ms); + return true; +} + +// +// subchannel_batch_data +// + +static subchannel_batch_data* batch_data_create(grpc_call_element* elem, + int refcount) { + call_data* calld = static_cast(elem->call_data); + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + calld->subchannel_call)); + subchannel_batch_data* batch_data = static_cast( + gpr_arena_alloc(calld->arena, sizeof(*batch_data))); + batch_data->elem = elem; + batch_data->subchannel_call = + GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create"); + batch_data->batch.payload = &retry_state->batch_payload; + gpr_ref_init(&batch_data->refs, refcount); + GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data, + grpc_schedule_on_exec_ctx); + batch_data->batch.on_complete = &batch_data->on_complete; + GRPC_CALL_STACK_REF(calld->owning_call, "batch_data"); + return batch_data; +} + +static void batch_data_unref(subchannel_batch_data* batch_data) { + if (gpr_unref(&batch_data->refs)) { + if (batch_data->send_initial_metadata_storage != nullptr) { + grpc_metadata_batch_destroy(&batch_data->send_initial_metadata); + } + if (batch_data->send_trailing_metadata_storage != nullptr) { + grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata); + } + if (batch_data->batch.recv_initial_metadata) { + grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata); + } + if (batch_data->batch.recv_trailing_metadata) { + grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata); + } + GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref"); + call_data* calld = static_cast(batch_data->elem->call_data); + GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data"); + } +} + +// +// recv_initial_metadata callback handling +// + +// Invokes recv_initial_metadata_ready for a subchannel batch. +static void invoke_recv_initial_metadata_callback(void* arg, + grpc_error* error) { + subchannel_batch_data* batch_data = static_cast(arg); + channel_data* chand = + static_cast(batch_data->elem->channel_data); + call_data* calld = static_cast(batch_data->elem->call_data); + // Find pending batch. + pending_batch* pending = nullptr; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch; + if (batch != nullptr && batch->recv_initial_metadata && + batch->payload->recv_initial_metadata.recv_initial_metadata_ready != + nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: invoking recv_initial_metadata_ready for " + "pending batch at index %" PRIuPTR, + chand, calld, i); + } + pending = &calld->pending_batches[i]; + break; + } + } + GPR_ASSERT(pending != nullptr); + // Return metadata. + grpc_metadata_batch_move( + &batch_data->recv_initial_metadata, + pending->batch->payload->recv_initial_metadata.recv_initial_metadata); + // Update bookkeeping. + // Note: Need to do this before invoking the callback, since invoking + // the callback will result in yielding the call combiner. + grpc_closure* recv_initial_metadata_ready = + pending->batch->payload->recv_initial_metadata + .recv_initial_metadata_ready; + pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready = + nullptr; + maybe_clear_pending_batch(batch_data->elem, pending); + batch_data_unref(batch_data); + // Invoke callback. + GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error)); +} + +// Intercepts recv_initial_metadata_ready callback for retries. +// Commits the call and returns the initial metadata up the stack. +static void recv_initial_metadata_ready(void* arg, grpc_error* error) { + subchannel_batch_data* batch_data = static_cast(arg); + grpc_call_element* elem = batch_data->elem; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s", + chand, calld, grpc_error_string(error)); + } + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + batch_data->subchannel_call)); + // If we got an error or a Trailers-Only response and have not yet gotten + // the recv_trailing_metadata on_complete callback, then defer + // propagating this callback back to the surface. We can evaluate whether + // to retry when recv_trailing_metadata comes back. + if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) && + !retry_state->completed_recv_trailing_metadata) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: deferring recv_initial_metadata_ready " + "(Trailers-Only)", + chand, calld); + } + retry_state->recv_initial_metadata_ready_deferred_batch = batch_data; + retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error); + if (!retry_state->started_recv_trailing_metadata) { + // recv_trailing_metadata not yet started by application; start it + // ourselves to get status. + start_internal_recv_trailing_metadata(elem); + } else { + GRPC_CALL_COMBINER_STOP( + calld->call_combiner, + "recv_initial_metadata_ready trailers-only or error"); + } + return; + } + // Received valid initial metadata, so commit the call. + retry_commit(elem, retry_state); + // Manually invoking a callback function; it does not take ownership of error. + invoke_recv_initial_metadata_callback(batch_data, error); + GRPC_ERROR_UNREF(error); +} + +// +// recv_message callback handling +// + +// Invokes recv_message_ready for a subchannel batch. +static void invoke_recv_message_callback(void* arg, grpc_error* error) { + subchannel_batch_data* batch_data = static_cast(arg); + channel_data* chand = + static_cast(batch_data->elem->channel_data); + call_data* calld = static_cast(batch_data->elem->call_data); + // Find pending op. + pending_batch* pending = nullptr; + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch; + if (batch != nullptr && batch->recv_message && + batch->payload->recv_message.recv_message_ready != nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: invoking recv_message_ready for " + "pending batch at index %" PRIuPTR, + chand, calld, i); + } + pending = &calld->pending_batches[i]; + break; + } + } + GPR_ASSERT(pending != nullptr); + // Return payload. + *pending->batch->payload->recv_message.recv_message = + std::move(batch_data->recv_message); + // Update bookkeeping. + // Note: Need to do this before invoking the callback, since invoking + // the callback will result in yielding the call combiner. + grpc_closure* recv_message_ready = + pending->batch->payload->recv_message.recv_message_ready; + pending->batch->payload->recv_message.recv_message_ready = nullptr; + maybe_clear_pending_batch(batch_data->elem, pending); + batch_data_unref(batch_data); + // Invoke callback. + GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error)); +} + +// Intercepts recv_message_ready callback for retries. +// Commits the call and returns the message up the stack. +static void recv_message_ready(void* arg, grpc_error* error) { + subchannel_batch_data* batch_data = static_cast(arg); + grpc_call_element* elem = batch_data->elem; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s", + chand, calld, grpc_error_string(error)); + } + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + batch_data->subchannel_call)); + // If we got an error or the payload was nullptr and we have not yet gotten + // the recv_trailing_metadata on_complete callback, then defer + // propagating this callback back to the surface. We can evaluate whether + // to retry when recv_trailing_metadata comes back. + if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) && + !retry_state->completed_recv_trailing_metadata) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: deferring recv_message_ready (nullptr " + "message and recv_trailing_metadata pending)", + chand, calld); + } + retry_state->recv_message_ready_deferred_batch = batch_data; + retry_state->recv_message_error = GRPC_ERROR_REF(error); + if (!retry_state->started_recv_trailing_metadata) { + // recv_trailing_metadata not yet started by application; start it + // ourselves to get status. + start_internal_recv_trailing_metadata(elem); + } else { + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null"); + } + return; + } + // Received a valid message, so commit the call. + retry_commit(elem, retry_state); + // Manually invoking a callback function; it does not take ownership of error. + invoke_recv_message_callback(batch_data, error); + GRPC_ERROR_UNREF(error); +} + +// +// list of closures to execute in call combiner +// + +// Represents a closure that needs to run in the call combiner as part of +// starting or completing a batch. +typedef struct { + grpc_closure* closure; + grpc_error* error; + const char* reason; + bool free_reason = false; +} closure_to_execute; + +static void execute_closures_in_call_combiner(grpc_call_element* elem, + const char* caller, + closure_to_execute* closures, + size_t num_closures) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + // Note that the call combiner will be yielded for each closure that + // we schedule. We're already running in the call combiner, so one of + // the closures can be scheduled directly, but the others will + // have to re-enter the call combiner. + if (num_closures > 0) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand, + calld, caller, closures[0].reason); + } + GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error); + if (closures[0].free_reason) { + gpr_free(const_cast(closures[0].reason)); + } + for (size_t i = 1; i < num_closures; ++i) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: %s starting closure in call combiner: %s", + chand, calld, caller, closures[i].reason); + } + GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure, + closures[i].error, closures[i].reason); + if (closures[i].free_reason) { + gpr_free(const_cast(closures[i].reason)); + } + } + } else { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand, + calld, caller); + } + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run"); + } +} + +// +// on_complete callback handling +// + +// Updates retry_state to reflect the ops completed in batch_data. +static void update_retry_state_for_completed_batch( + subchannel_batch_data* batch_data, + subchannel_call_retry_state* retry_state) { + if (batch_data->batch.send_initial_metadata) { + retry_state->completed_send_initial_metadata = true; + } + if (batch_data->batch.send_message) { + ++retry_state->completed_send_message_count; + } + if (batch_data->batch.send_trailing_metadata) { + retry_state->completed_send_trailing_metadata = true; + } + if (batch_data->batch.recv_initial_metadata) { + retry_state->completed_recv_initial_metadata = true; + } + if (batch_data->batch.recv_message) { + ++retry_state->completed_recv_message_count; + } + if (batch_data->batch.recv_trailing_metadata) { + retry_state->completed_recv_trailing_metadata = true; + } +} + +// Adds any necessary closures for deferred recv_initial_metadata and +// recv_message callbacks to closures, updating *num_closures as needed. +static void add_closures_for_deferred_recv_callbacks( + subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state, + closure_to_execute* closures, size_t* num_closures) { + if (batch_data->batch.recv_trailing_metadata) { + // Add closure for deferred recv_initial_metadata_ready. + if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) { + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = GRPC_CLOSURE_INIT( + &batch_data->recv_initial_metadata_ready, + invoke_recv_initial_metadata_callback, + retry_state->recv_initial_metadata_ready_deferred_batch, + grpc_schedule_on_exec_ctx); + closure->error = retry_state->recv_initial_metadata_error; + closure->reason = "resuming recv_initial_metadata_ready"; + retry_state->recv_initial_metadata_ready_deferred_batch = nullptr; + } + // Add closure for deferred recv_message_ready. + if (retry_state->recv_message_ready_deferred_batch != nullptr) { + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = GRPC_CLOSURE_INIT( + &batch_data->recv_message_ready, invoke_recv_message_callback, + retry_state->recv_message_ready_deferred_batch, + grpc_schedule_on_exec_ctx); + closure->error = retry_state->recv_message_error; + closure->reason = "resuming recv_message_ready"; + retry_state->recv_message_ready_deferred_batch = nullptr; + } + } +} + +// If there are any cached ops to replay or pending ops to start on the +// subchannel call, adds a closure to closures to invoke +// start_retriable_subchannel_batches(), updating *num_closures as needed. +static void add_closures_for_replay_or_pending_send_ops( + grpc_call_element* elem, subchannel_batch_data* batch_data, + subchannel_call_retry_state* retry_state, closure_to_execute* closures, + size_t* num_closures) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + bool have_pending_send_message_ops = + retry_state->started_send_message_count < calld->send_messages->size(); + bool have_pending_send_trailing_metadata_op = + calld->seen_send_trailing_metadata && + !retry_state->started_send_trailing_metadata; + if (!have_pending_send_message_ops && + !have_pending_send_trailing_metadata_op) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + grpc_transport_stream_op_batch* batch = pending->batch; + if (batch == nullptr || pending->send_ops_cached) continue; + if (batch->send_message) have_pending_send_message_ops = true; + if (batch->send_trailing_metadata) { + have_pending_send_trailing_metadata_op = true; + } + } + } + if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: starting next batch for pending send op(s)", + chand, calld); + } + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = GRPC_CLOSURE_INIT( + &batch_data->batch.handler_private.closure, + start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx); + closure->error = GRPC_ERROR_NONE; + closure->reason = "starting next batch for send_* op(s)"; + } +} + +// For any pending batch completed in batch_data, adds the necessary +// completion closures to closures, updating *num_closures as needed. +static void add_closures_for_completed_pending_batches( + grpc_call_element* elem, subchannel_batch_data* batch_data, + subchannel_call_retry_state* retry_state, grpc_error* error, + closure_to_execute* closures, size_t* num_closures) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + if (pending_batch_is_completed(pending, calld, retry_state)) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: pending batch completed at index %" PRIuPTR, + chand, calld, i); + } + // Copy the trailing metadata to return it to the surface. + if (batch_data->batch.recv_trailing_metadata) { + grpc_metadata_batch_move(&batch_data->recv_trailing_metadata, + pending->batch->payload->recv_trailing_metadata + .recv_trailing_metadata); + } + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = pending->batch->on_complete; + closure->error = GRPC_ERROR_REF(error); + closure->reason = "on_complete for pending batch"; + pending->batch->on_complete = nullptr; + maybe_clear_pending_batch(elem, pending); + } + } + GRPC_ERROR_UNREF(error); +} + +// For any pending batch containing an op that has not yet been started, +// adds the pending batch's completion closures to closures, updating +// *num_closures as needed. +static void add_closures_to_fail_unstarted_pending_batches( + grpc_call_element* elem, subchannel_call_retry_state* retry_state, + grpc_error* error, closure_to_execute* closures, size_t* num_closures) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + if (pending_batch_is_unstarted(pending, calld, retry_state)) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: failing unstarted pending batch at index " + "%" PRIuPTR, + chand, calld, i); + } + if (pending->batch->recv_initial_metadata) { + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = pending->batch->payload->recv_initial_metadata + .recv_initial_metadata_ready; + closure->error = GRPC_ERROR_REF(error); + closure->reason = + "failing recv_initial_metadata_ready for pending batch"; + pending->batch->payload->recv_initial_metadata + .recv_initial_metadata_ready = nullptr; + } + if (pending->batch->recv_message) { + *pending->batch->payload->recv_message.recv_message = nullptr; + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = + pending->batch->payload->recv_message.recv_message_ready; + closure->error = GRPC_ERROR_REF(error); + closure->reason = "failing recv_message_ready for pending batch"; + pending->batch->payload->recv_message.recv_message_ready = nullptr; + } + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = pending->batch->on_complete; + closure->error = GRPC_ERROR_REF(error); + closure->reason = "failing on_complete for pending batch"; + pending->batch->on_complete = nullptr; + maybe_clear_pending_batch(elem, pending); + } + } + GRPC_ERROR_UNREF(error); +} + +// Callback used to intercept on_complete from subchannel calls. +// Called only when retries are enabled. +static void on_complete(void* arg, grpc_error* error) { + subchannel_batch_data* batch_data = static_cast(arg); + grpc_call_element* elem = batch_data->elem; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch); + gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s", + chand, calld, grpc_error_string(error), batch_str); + gpr_free(batch_str); + } + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + batch_data->subchannel_call)); + // If we have previously completed recv_trailing_metadata, then the + // call is finished. + bool call_finished = retry_state->completed_recv_trailing_metadata; + // Record whether we were already committed before receiving this callback. + const bool previously_committed = calld->retry_committed; + // Update bookkeeping in retry_state. + update_retry_state_for_completed_batch(batch_data, retry_state); + if (call_finished) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand, + calld); + } + } else { + // Check if this batch finished the call, and if so, get its status. + // The call is finished if either (a) this callback was invoked with + // an error or (b) we receive status. + grpc_status_code status = GRPC_STATUS_OK; + grpc_mdelem* server_pushback_md = nullptr; + if (error != GRPC_ERROR_NONE) { // Case (a). + call_finished = true; + grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr, + nullptr); + } else if (batch_data->batch.recv_trailing_metadata) { // Case (b). + call_finished = true; + grpc_metadata_batch* md_batch = + batch_data->batch.payload->recv_trailing_metadata + .recv_trailing_metadata; + GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr); + status = grpc_get_status_code_from_metadata( + md_batch->idx.named.grpc_status->md); + if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) { + server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md; + } + } + // If the call just finished, check if we should retry. + if (call_finished) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand, + calld, grpc_status_code_to_string(status)); + } + if (maybe_retry(elem, batch_data, status, server_pushback_md)) { + // Unref batch_data for deferred recv_initial_metadata_ready or + // recv_message_ready callbacks, if any. + if (batch_data->batch.recv_trailing_metadata && + retry_state->recv_initial_metadata_ready_deferred_batch != + nullptr) { + batch_data_unref(batch_data); + GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error); + } + if (batch_data->batch.recv_trailing_metadata && + retry_state->recv_message_ready_deferred_batch != nullptr) { + batch_data_unref(batch_data); + GRPC_ERROR_UNREF(retry_state->recv_message_error); + } + batch_data_unref(batch_data); + return; + } + // Not retrying, so commit the call. + retry_commit(elem, retry_state); + } + } + // If we were already committed before receiving this callback, free + // cached data for send ops that we've just completed. (If the call has + // just now finished, the call to retry_commit() above will have freed all + // cached send ops, so we don't need to do it here.) + if (previously_committed) { + free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state); + } + // Call not being retried. + // Construct list of closures to execute. + // Max number of closures is number of pending batches plus one for + // each of: + // - recv_initial_metadata_ready (either deferred or unstarted) + // - recv_message_ready (either deferred or unstarted) + // - starting a new batch for pending send ops + closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3]; + size_t num_closures = 0; + // If there are deferred recv_initial_metadata_ready or recv_message_ready + // callbacks, add them to closures. + add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures, + &num_closures); + // Find pending batches whose ops are now complete and add their + // on_complete callbacks to closures. + add_closures_for_completed_pending_batches(elem, batch_data, retry_state, + GRPC_ERROR_REF(error), closures, + &num_closures); + // Add closures to handle any pending batches that have not yet been started. + // If the call is finished, we fail these batches; otherwise, we add a + // callback to start_retriable_subchannel_batches() to start them on + // the subchannel call. + if (call_finished) { + add_closures_to_fail_unstarted_pending_batches( + elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures); + } else { + add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state, + closures, &num_closures); + } + // Don't need batch_data anymore. + batch_data_unref(batch_data); + // Schedule all of the closures identified above. + execute_closures_in_call_combiner(elem, "on_complete", closures, + num_closures); +} + +// +// subchannel batch construction +// + +// Helper function used to start a subchannel batch in the call combiner. +static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) { + grpc_transport_stream_op_batch* batch = + static_cast(arg); + grpc_subchannel_call* subchannel_call = + static_cast(batch->handler_private.extra_arg); + // Note: This will release the call combiner. + grpc_subchannel_call_process_op(subchannel_call, batch); +} + +// Adds a closure to closures that will execute batch in the call combiner. +static void add_closure_for_subchannel_batch( + call_data* calld, grpc_transport_stream_op_batch* batch, + closure_to_execute* closures, size_t* num_closures) { + batch->handler_private.extra_arg = calld->subchannel_call; + GRPC_CLOSURE_INIT(&batch->handler_private.closure, + start_batch_in_call_combiner, batch, + grpc_schedule_on_exec_ctx); + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = &batch->handler_private.closure; + closure->error = GRPC_ERROR_NONE; + // If the tracer is enabled, we log a more detailed message, which + // requires dynamic allocation. This will be freed in + // start_retriable_subchannel_batches(). + if (grpc_client_channel_trace.enabled()) { + char* batch_str = grpc_transport_stream_op_batch_string(batch); + gpr_asprintf(const_cast(&closure->reason), + "starting batch in call combiner: %s", batch_str); + gpr_free(batch_str); + closure->free_reason = true; + } else { + closure->reason = "start_subchannel_batch"; + } +} + +// Adds retriable send_initial_metadata op to batch_data. +static void add_retriable_send_initial_metadata_op( + call_data* calld, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + // Maps the number of retries to the corresponding metadata value slice. + static const grpc_slice* retry_count_strings[] = { + &GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4}; + // We need to make a copy of the metadata batch for each attempt, since + // the filters in the subchannel stack may modify this batch, and we don't + // want those modifications to be passed forward to subsequent attempts. + // + // If we've already completed one or more attempts, add the + // grpc-retry-attempts header. + batch_data->send_initial_metadata_storage = + static_cast(gpr_arena_alloc( + calld->arena, sizeof(grpc_linked_mdelem) * + (calld->send_initial_metadata.list.count + + (calld->num_attempts_completed > 0)))); + grpc_metadata_batch_copy(&calld->send_initial_metadata, + &batch_data->send_initial_metadata, + batch_data->send_initial_metadata_storage); + if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts != + nullptr) { + grpc_metadata_batch_remove( + &batch_data->send_initial_metadata, + batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts); + } + if (calld->num_attempts_completed > 0) { + grpc_mdelem retry_md = grpc_mdelem_from_slices( + GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS, + *retry_count_strings[calld->num_attempts_completed - 1]); + grpc_error* error = grpc_metadata_batch_add_tail( + &batch_data->send_initial_metadata, + &batch_data->send_initial_metadata_storage[calld->send_initial_metadata + .list.count], + retry_md); + if (error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "error adding retry metadata: %s", + grpc_error_string(error)); + GPR_ASSERT(false); + } + } + retry_state->started_send_initial_metadata = true; + batch_data->batch.send_initial_metadata = true; + batch_data->batch.payload->send_initial_metadata.send_initial_metadata = + &batch_data->send_initial_metadata; + batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags = + calld->send_initial_metadata_flags; + batch_data->batch.payload->send_initial_metadata.peer_string = + calld->peer_string; +} + +// Adds retriable send_message op to batch_data. +static void add_retriable_send_message_op( + grpc_call_element* elem, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]", + chand, calld, retry_state->started_send_message_count); + } + grpc_core::ByteStreamCache* cache = + (*calld->send_messages)[retry_state->started_send_message_count]; + ++retry_state->started_send_message_count; + batch_data->send_message.Init(cache); + batch_data->batch.send_message = true; + batch_data->batch.payload->send_message.send_message.reset( + batch_data->send_message.get()); +} + +// Adds retriable send_trailing_metadata op to batch_data. +static void add_retriable_send_trailing_metadata_op( + call_data* calld, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + // We need to make a copy of the metadata batch for each attempt, since + // the filters in the subchannel stack may modify this batch, and we don't + // want those modifications to be passed forward to subsequent attempts. + batch_data->send_trailing_metadata_storage = + static_cast(gpr_arena_alloc( + calld->arena, sizeof(grpc_linked_mdelem) * + calld->send_trailing_metadata.list.count)); + grpc_metadata_batch_copy(&calld->send_trailing_metadata, + &batch_data->send_trailing_metadata, + batch_data->send_trailing_metadata_storage); + retry_state->started_send_trailing_metadata = true; + batch_data->batch.send_trailing_metadata = true; + batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata = + &batch_data->send_trailing_metadata; +} + +// Adds retriable recv_initial_metadata op to batch_data. +static void add_retriable_recv_initial_metadata_op( + call_data* calld, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + retry_state->started_recv_initial_metadata = true; + batch_data->batch.recv_initial_metadata = true; + grpc_metadata_batch_init(&batch_data->recv_initial_metadata); + batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata = + &batch_data->recv_initial_metadata; + batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available = + &batch_data->trailing_metadata_available; + GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready, + recv_initial_metadata_ready, batch_data, + grpc_schedule_on_exec_ctx); + batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready = + &batch_data->recv_initial_metadata_ready; +} + +// Adds retriable recv_message op to batch_data. +static void add_retriable_recv_message_op( + call_data* calld, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + ++retry_state->started_recv_message_count; + batch_data->batch.recv_message = true; + batch_data->batch.payload->recv_message.recv_message = + &batch_data->recv_message; + GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready, + batch_data, grpc_schedule_on_exec_ctx); + batch_data->batch.payload->recv_message.recv_message_ready = + &batch_data->recv_message_ready; +} + +// Adds retriable recv_trailing_metadata op to batch_data. +static void add_retriable_recv_trailing_metadata_op( + call_data* calld, subchannel_call_retry_state* retry_state, + subchannel_batch_data* batch_data) { + retry_state->started_recv_trailing_metadata = true; + batch_data->batch.recv_trailing_metadata = true; + grpc_metadata_batch_init(&batch_data->recv_trailing_metadata); + batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata = + &batch_data->recv_trailing_metadata; + batch_data->batch.collect_stats = true; + batch_data->batch.payload->collect_stats.collect_stats = + &batch_data->collect_stats; +} + +// Helper function used to start a recv_trailing_metadata batch. This +// is used in the case where a recv_initial_metadata or recv_message +// op fails in a way that we know the call is over but when the application +// has not yet started its own recv_trailing_metadata op. +static void start_internal_recv_trailing_metadata(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: call failed but recv_trailing_metadata not " + "started; starting it internally", + chand, calld); + } + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + calld->subchannel_call)); + // Create batch_data with 2 refs, since this batch will be unreffed twice: + // once when the subchannel batch returns, and again when we actually get + // a recv_trailing_metadata op from the surface. + subchannel_batch_data* batch_data = batch_data_create(elem, 2); + add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data); + retry_state->recv_trailing_metadata_internal_batch = batch_data; + // Note: This will release the call combiner. + grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch); +} + +// If there are any cached send ops that need to be replayed on the +// current subchannel call, creates and returns a new subchannel batch +// to replay those ops. Otherwise, returns nullptr. +static subchannel_batch_data* maybe_create_subchannel_batch_for_replay( + grpc_call_element* elem, subchannel_call_retry_state* retry_state) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + subchannel_batch_data* replay_batch_data = nullptr; + // send_initial_metadata. + if (calld->seen_send_initial_metadata && + !retry_state->started_send_initial_metadata && + !calld->pending_send_initial_metadata) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: replaying previously completed " + "send_initial_metadata op", + chand, calld); + } + replay_batch_data = batch_data_create(elem, 1); + add_retriable_send_initial_metadata_op(calld, retry_state, + replay_batch_data); + } + // send_message. + // Note that we can only have one send_message op in flight at a time. + if (retry_state->started_send_message_count < calld->send_messages->size() && + retry_state->started_send_message_count == + retry_state->completed_send_message_count && + !calld->pending_send_message) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: replaying previously completed " + "send_message op", + chand, calld); + } + if (replay_batch_data == nullptr) { + replay_batch_data = batch_data_create(elem, 1); + } + add_retriable_send_message_op(elem, retry_state, replay_batch_data); + } + // send_trailing_metadata. + // Note that we only add this op if we have no more send_message ops + // to start, since we can't send down any more send_message ops after + // send_trailing_metadata. + if (calld->seen_send_trailing_metadata && + retry_state->started_send_message_count == calld->send_messages->size() && + !retry_state->started_send_trailing_metadata && + !calld->pending_send_trailing_metadata) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: replaying previously completed " + "send_trailing_metadata op", + chand, calld); + } + if (replay_batch_data == nullptr) { + replay_batch_data = batch_data_create(elem, 1); + } + add_retriable_send_trailing_metadata_op(calld, retry_state, + replay_batch_data); + } + return replay_batch_data; +} + +// Adds subchannel batches for pending batches to batches, updating +// *num_batches as needed. +static void add_subchannel_batches_for_pending_batches( + grpc_call_element* elem, subchannel_call_retry_state* retry_state, + closure_to_execute* closures, size_t* num_closures) { + call_data* calld = static_cast(elem->call_data); + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + pending_batch* pending = &calld->pending_batches[i]; + grpc_transport_stream_op_batch* batch = pending->batch; + if (batch == nullptr) continue; + // Skip any batch that either (a) has already been started on this + // subchannel call or (b) we can't start yet because we're still + // replaying send ops that need to be completed first. + // TODO(roth): Note that if any one op in the batch can't be sent + // yet due to ops that we're replaying, we don't start any of the ops + // in the batch. This is probably okay, but it could conceivably + // lead to increased latency in some cases -- e.g., we could delay + // starting a recv op due to it being in the same batch with a send + // op. If/when we revamp the callback protocol in + // transport_stream_op_batch, we may be able to fix this. + if (batch->send_initial_metadata && + retry_state->started_send_initial_metadata) { + continue; + } + if (batch->send_message && retry_state->completed_send_message_count < + retry_state->started_send_message_count) { + continue; + } + // Note that we only start send_trailing_metadata if we have no more + // send_message ops to start, since we can't send down any more + // send_message ops after send_trailing_metadata. + if (batch->send_trailing_metadata && + (retry_state->started_send_message_count + batch->send_message < + calld->send_messages->size() || + retry_state->started_send_trailing_metadata)) { + continue; + } + if (batch->recv_initial_metadata && + retry_state->started_recv_initial_metadata) { + continue; + } + if (batch->recv_message && retry_state->completed_recv_message_count < + retry_state->started_recv_message_count) { + continue; + } + if (batch->recv_trailing_metadata && + retry_state->started_recv_trailing_metadata) { + // If we previously completed a recv_trailing_metadata op + // initiated by start_internal_recv_trailing_metadata(), use the + // result of that instead of trying to re-start this op. + if (retry_state->recv_trailing_metadata_internal_batch != nullptr) { + // If the batch completed, then trigger the completion callback + // directly, so that we return the previously returned results to + // the application. Otherwise, just unref the internally + // started subchannel batch, since we'll propagate the + // completion when it completes. + if (retry_state->completed_recv_trailing_metadata) { + subchannel_batch_data* batch_data = + retry_state->recv_trailing_metadata_internal_batch; + closure_to_execute* closure = &closures[(*num_closures)++]; + closure->closure = &batch_data->on_complete; + // Batches containing recv_trailing_metadata always succeed. + closure->error = GRPC_ERROR_NONE; + closure->reason = + "re-executing on_complete for recv_trailing_metadata " + "to propagate internally triggered result"; + } else { + batch_data_unref(retry_state->recv_trailing_metadata_internal_batch); + } + retry_state->recv_trailing_metadata_internal_batch = nullptr; + } + continue; + } + // If we're not retrying, just send the batch as-is. + if (calld->method_params == nullptr || + calld->method_params->retry_policy() == nullptr || + calld->retry_committed) { + add_closure_for_subchannel_batch(calld, batch, closures, num_closures); + pending_batch_clear(calld, pending); + continue; + } + // Create batch with the right number of callbacks. + const int num_callbacks = + 1 + batch->recv_initial_metadata + batch->recv_message; + subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks); + // Cache send ops if needed. + maybe_cache_send_ops_for_batch(calld, pending); + // send_initial_metadata. + if (batch->send_initial_metadata) { + add_retriable_send_initial_metadata_op(calld, retry_state, batch_data); + } + // send_message. + if (batch->send_message) { + add_retriable_send_message_op(elem, retry_state, batch_data); + } + // send_trailing_metadata. + if (batch->send_trailing_metadata) { + add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data); + } + // recv_initial_metadata. + if (batch->recv_initial_metadata) { + // recv_flags is only used on the server side. + GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr); + add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data); + } + // recv_message. + if (batch->recv_message) { + add_retriable_recv_message_op(calld, retry_state, batch_data); + } + // recv_trailing_metadata. + if (batch->recv_trailing_metadata) { + GPR_ASSERT(batch->collect_stats); + add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data); + } + add_closure_for_subchannel_batch(calld, &batch_data->batch, closures, + num_closures); + } +} + +// Constructs and starts whatever subchannel batches are needed on the +// subchannel call. +static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) { + grpc_call_element* elem = static_cast(arg); + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches", + chand, calld); + } + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + calld->subchannel_call)); + // Construct list of closures to execute, one for each pending batch. + // We can start up to 6 batches. + closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches)]; + size_t num_closures = 0; + // Replay previously-returned send_* ops if needed. + subchannel_batch_data* replay_batch_data = + maybe_create_subchannel_batch_for_replay(elem, retry_state); + if (replay_batch_data != nullptr) { + add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures, + &num_closures); + } + // Now add pending batches. + add_subchannel_batches_for_pending_batches(elem, retry_state, closures, + &num_closures); + // Start batches on subchannel call. + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: starting %" PRIuPTR + " retriable batches on subchannel_call=%p", + chand, calld, num_closures, calld->subchannel_call); + } + execute_closures_in_call_combiner(elem, "start_retriable_subchannel_batches", + closures, num_closures); +} + +// +// LB pick +// + +static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + const size_t parent_data_size = + calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0; + const grpc_core::ConnectedSubchannel::CallArgs call_args = { + calld->pollent, // pollent + calld->path, // path + calld->call_start_time, // start_time + calld->deadline, // deadline + calld->arena, // arena + calld->pick.subchannel_call_context, // context + calld->call_combiner, // call_combiner + parent_data_size // parent_data_size + }; + grpc_error* new_error = calld->pick.connected_subchannel->CreateCall( + call_args, &calld->subchannel_call); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s", + chand, calld, calld->subchannel_call, grpc_error_string(new_error)); + } + if (new_error != GRPC_ERROR_NONE) { + new_error = grpc_error_add_child(new_error, error); + pending_batches_fail(elem, new_error, true /* yield_call_combiner */); + } else { + if (parent_data_size > 0) { + subchannel_call_retry_state* retry_state = + static_cast( + grpc_connected_subchannel_call_get_parent_data( + calld->subchannel_call)); + retry_state->batch_payload.context = calld->pick.subchannel_call_context; + } + pending_batches_resume(elem); + } + GRPC_ERROR_UNREF(error); +} + +// Invoked when a pick is completed, on both success or failure. +static void pick_done(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (calld->pick.connected_subchannel == nullptr) { + // Failed to create subchannel. + // If there was no error, this is an LB policy drop, in which case + // we return an error; otherwise, we may retry. + grpc_status_code status = GRPC_STATUS_OK; + grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr, + nullptr); + if (error == GRPC_ERROR_NONE || !calld->enable_retries || + !maybe_retry(elem, nullptr /* batch_data */, status, + nullptr /* server_pushback_md */)) { + grpc_error* new_error = + error == GRPC_ERROR_NONE + ? GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Call dropped by load balancing policy") + : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Failed to create subchannel", &error, 1); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: failed to create subchannel: error=%s", + chand, calld, grpc_error_string(new_error)); + } + pending_batches_fail(elem, new_error, true /* yield_call_combiner */); + } + } else { + /* Create call on subchannel. */ + create_subchannel_call(elem, GRPC_ERROR_REF(error)); + } +} + +// Invoked when a pick is completed to leave the client_channel combiner +// and continue processing in the call combiner. +static void pick_done_locked(grpc_call_element* elem, grpc_error* error) { + call_data* calld = static_cast(elem->call_data); + GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem, + grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_SCHED(&calld->pick_closure, error); +} + +// A wrapper around pick_done_locked() that is used in cases where +// either (a) the pick was deferred pending a resolver result or (b) the +// pick was done asynchronously. Removes the call's polling entity from +// chand->interested_parties before invoking pick_done_locked(). +static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + grpc_polling_entity_del_from_pollset_set(calld->pollent, + chand->interested_parties); + pick_done_locked(elem, error); +} + +// Note: This runs under the client_channel combiner, but will NOT be +// holding the call combiner. +static void pick_callback_cancel_locked(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + // Note: chand->lb_policy may have changed since we started our pick, + // in which case we will be cancelling the pick on a policy other than + // the one we started it on. However, this will just be a no-op. + if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p", + chand, calld, chand->lb_policy.get()); + } + chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error)); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel"); +} + +// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks. +// Unrefs the LB policy and invokes async_pick_done_locked(). +static void pick_callback_done_locked(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand, + calld); + } + async_pick_done_locked(elem, GRPC_ERROR_REF(error)); + GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); +} + +// Applies service config to the call. Must be invoked once we know +// that the resolver has returned results to the channel. +static void apply_service_config_to_call_locked(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call", + chand, calld); + } + if (chand->retry_throttle_data != nullptr) { + calld->retry_throttle_data = chand->retry_throttle_data->Ref(); + } + if (chand->method_params_table != nullptr) { + calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup( + *chand->method_params_table, calld->path); + if (calld->method_params != nullptr) { + // If the deadline from the service config is shorter than the one + // from the client API, reset the deadline timer. + if (chand->deadline_checking_enabled && + calld->method_params->timeout() != 0) { + const grpc_millis per_method_deadline = + grpc_timespec_to_millis_round_up(calld->call_start_time) + + calld->method_params->timeout(); + if (per_method_deadline < calld->deadline) { + calld->deadline = per_method_deadline; + grpc_deadline_state_reset(elem, calld->deadline); + } + } + } + } + // If no retry policy, disable retries. + // TODO(roth): Remove this when adding support for transparent retries. + if (calld->method_params == nullptr || + calld->method_params->retry_policy() == nullptr) { + calld->enable_retries = false; + } +} + +// Starts a pick on chand->lb_policy. +// Returns true if pick is completed synchronously. +static bool pick_callback_start_locked(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand, + calld, chand->lb_policy.get()); + } + // Only get service config data on the first attempt. + if (calld->num_attempts_completed == 0) { + apply_service_config_to_call_locked(elem); + } + // If the application explicitly set wait_for_ready, use that. + // Otherwise, if the service config specified a value for this + // method, use that. + // + // The send_initial_metadata batch will be the first one in the list, + // as set by get_batch_index() above. + calld->pick.initial_metadata = + calld->seen_send_initial_metadata + ? &calld->send_initial_metadata + : calld->pending_batches[0] + .batch->payload->send_initial_metadata.send_initial_metadata; + uint32_t send_initial_metadata_flags = + calld->seen_send_initial_metadata + ? calld->send_initial_metadata_flags + : calld->pending_batches[0] + .batch->payload->send_initial_metadata + .send_initial_metadata_flags; + const bool wait_for_ready_set_from_api = + send_initial_metadata_flags & + GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET; + const bool wait_for_ready_set_from_service_config = + calld->method_params != nullptr && + calld->method_params->wait_for_ready() != + ClientChannelMethodParams::WAIT_FOR_READY_UNSET; + if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) { + if (calld->method_params->wait_for_ready() == + ClientChannelMethodParams::WAIT_FOR_READY_TRUE) { + send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY; + } else { + send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY; + } + } + calld->pick.initial_metadata_flags = send_initial_metadata_flags; + GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem, + grpc_combiner_scheduler(chand->combiner)); + calld->pick.on_complete = &calld->pick_closure; + GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback"); + const bool pick_done = chand->lb_policy->PickLocked(&calld->pick); + if (pick_done) { + // Pick completed synchronously. + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously", + chand, calld); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); + } else { + GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel"); + grpc_call_combiner_set_notify_on_cancel( + calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->pick_cancel_closure, + pick_callback_cancel_locked, elem, + grpc_combiner_scheduler(chand->combiner))); + } + return pick_done; +} + +typedef struct { + grpc_call_element* elem; + bool finished; + grpc_closure closure; + grpc_closure cancel_closure; +} pick_after_resolver_result_args; + +// Note: This runs under the client_channel combiner, but will NOT be +// holding the call combiner. +static void pick_after_resolver_result_cancel_locked(void* arg, + grpc_error* error) { + pick_after_resolver_result_args* args = + static_cast(arg); + if (args->finished) { + gpr_free(args); + return; + } + // If we don't yet have a resolver result, then a closure for + // pick_after_resolver_result_done_locked() will have been added to + // chand->waiting_for_resolver_result_closures, and it may not be invoked + // until after this call has been destroyed. We mark the operation as + // finished, so that when pick_after_resolver_result_done_locked() + // is called, it will be a no-op. We also immediately invoke + // async_pick_done_locked() to propagate the error back to the caller. + args->finished = true; + grpc_call_element* elem = args->elem; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: cancelling pick waiting for resolver result", + chand, calld); + } + // Note: Although we are not in the call combiner here, we are + // basically stealing the call combiner from the pending pick, so + // it's safe to call async_pick_done_locked() here -- we are + // essentially calling it here instead of calling it in + // pick_after_resolver_result_done_locked(). + async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick cancelled", &error, 1)); +} + +static void pick_after_resolver_result_done_locked(void* arg, + grpc_error* error) { + pick_after_resolver_result_args* args = + static_cast(arg); + if (args->finished) { + /* cancelled, do nothing */ + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "call cancelled before resolver result"); + } + gpr_free(args); + return; + } + args->finished = true; + grpc_call_element* elem = args->elem; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (error != GRPC_ERROR_NONE) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data", + chand, calld); + } + async_pick_done_locked(elem, GRPC_ERROR_REF(error)); + } else if (chand->resolver == nullptr) { + // Shutting down. + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand, + calld); + } + async_pick_done_locked( + elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); + } else if (chand->lb_policy == nullptr) { + // Transient resolver failure. + // If call has wait_for_ready=true, try again; otherwise, fail. + uint32_t send_initial_metadata_flags = + calld->seen_send_initial_metadata + ? calld->send_initial_metadata_flags + : calld->pending_batches[0] + .batch->payload->send_initial_metadata + .send_initial_metadata_flags; + if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: resolver returned but no LB policy; " + "wait_for_ready=true; trying again", + chand, calld); + } + pick_after_resolver_result_start_locked(elem); + } else { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: resolver returned but no LB policy; " + "wait_for_ready=false; failing", + chand, calld); + } + async_pick_done_locked( + elem, + grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); + } + } else { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick", + chand, calld); + } + if (pick_callback_start_locked(elem)) { + // Even if the LB policy returns a result synchronously, we have + // already added our polling entity to chand->interested_parties + // in order to wait for the resolver result, so we need to + // remove it here. Therefore, we call async_pick_done_locked() + // instead of pick_done_locked(). + async_pick_done_locked(elem, GRPC_ERROR_NONE); + } + } +} + +static void pick_after_resolver_result_start_locked(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: deferring pick pending resolver result", chand, + calld); + } + pick_after_resolver_result_args* args = + static_cast(gpr_zalloc(sizeof(*args))); + args->elem = elem; + GRPC_CLOSURE_INIT(&args->closure, pick_after_resolver_result_done_locked, + args, grpc_combiner_scheduler(chand->combiner)); + grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, + &args->closure, GRPC_ERROR_NONE); + grpc_call_combiner_set_notify_on_cancel( + calld->call_combiner, + GRPC_CLOSURE_INIT(&args->cancel_closure, + pick_after_resolver_result_cancel_locked, args, + grpc_combiner_scheduler(chand->combiner))); +} + +static void start_pick_locked(void* arg, grpc_error* ignored) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + GPR_ASSERT(calld->pick.connected_subchannel == nullptr); + GPR_ASSERT(calld->subchannel_call == nullptr); + if (chand->lb_policy != nullptr) { + // We already have an LB policy, so ask it for a pick. + if (pick_callback_start_locked(elem)) { + // Pick completed synchronously. + pick_done_locked(elem, GRPC_ERROR_NONE); + return; + } + } else { + // We do not yet have an LB policy, so wait for a resolver result. + if (chand->resolver == nullptr) { + pick_done_locked(elem, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected")); + return; + } + if (!chand->started_resolving) { + start_resolving_locked(chand); + } + pick_after_resolver_result_start_locked(elem); + } + // We need to wait for either a resolver result or for an async result + // from the LB policy. Add the polling entity from call_data to the + // channel_data's interested_parties, so that the I/O of the LB policy + // and resolver can be done under it. The polling entity will be + // removed in async_pick_done_locked(). + grpc_polling_entity_add_to_pollset_set(calld->pollent, + chand->interested_parties); +} + +// +// filter call vtable functions +// + +static void cc_start_transport_stream_op_batch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0); + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + if (chand->deadline_checking_enabled) { + grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch); + } + // If we've previously been cancelled, immediately fail any new batches. + if (calld->cancel_error != GRPC_ERROR_NONE) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s", + chand, calld, grpc_error_string(calld->cancel_error)); + } + // Note: This will release the call combiner. + grpc_transport_stream_op_batch_finish_with_failure( + batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner); + return; + } + // Handle cancellation. + if (batch->cancel_stream) { + // Stash a copy of cancel_error in our call data, so that we can use + // it for subsequent operations. This ensures that if the call is + // cancelled before any batches are passed down (e.g., if the deadline + // is in the past when the call starts), we can return the right + // error to the caller when the first batch does get passed down. + GRPC_ERROR_UNREF(calld->cancel_error); + calld->cancel_error = + GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand, + calld, grpc_error_string(calld->cancel_error)); + } + // If we do not have a subchannel call (i.e., a pick has not yet + // been started), fail all pending batches. Otherwise, send the + // cancellation down to the subchannel call. + if (calld->subchannel_call == nullptr) { + pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error), + false /* yield_call_combiner */); + // Note: This will release the call combiner. + grpc_transport_stream_op_batch_finish_with_failure( + batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner); + } else { + // Note: This will release the call combiner. + grpc_subchannel_call_process_op(calld->subchannel_call, batch); + } + return; + } + // Add the batch to the pending list. + pending_batches_add(elem, batch); + // Check if we've already gotten a subchannel call. + // Note that once we have completed the pick, we do not need to enter + // the channel combiner, which is more efficient (especially for + // streaming calls). + if (calld->subchannel_call != nullptr) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: starting batch on subchannel_call=%p", chand, + calld, calld->subchannel_call); + } + pending_batches_resume(elem); + return; + } + // We do not yet have a subchannel call. + // For batches containing a send_initial_metadata op, enter the channel + // combiner to start a pick. + if (batch->send_initial_metadata) { + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner", + chand, calld); + } + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, + elem, grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); + } else { + // For all other batches, release the call combiner. + if (grpc_client_channel_trace.enabled()) { + gpr_log(GPR_INFO, + "chand=%p calld=%p: saved batch, yeilding call combiner", chand, + calld); + } + GRPC_CALL_COMBINER_STOP(calld->call_combiner, + "batch does not include send_initial_metadata"); + } +} + +/* Constructor for call_data */ +static grpc_error* cc_init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + // Initialize data members. + calld->path = grpc_slice_ref_internal(args->path); + calld->call_start_time = args->start_time; + calld->deadline = args->deadline; + calld->arena = args->arena; + calld->owning_call = args->call_stack; + calld->call_combiner = args->call_combiner; + if (chand->deadline_checking_enabled) { + grpc_deadline_state_init(elem, args->call_stack, args->call_combiner, + calld->deadline); + } + calld->enable_retries = chand->enable_retries; + calld->send_messages.Init(); + return GRPC_ERROR_NONE; +} + +/* Destructor for call_data */ +static void cc_destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + if (chand->deadline_checking_enabled) { + grpc_deadline_state_destroy(elem); + } + grpc_slice_unref_internal(calld->path); + calld->retry_throttle_data.reset(); + calld->method_params.reset(); + GRPC_ERROR_UNREF(calld->cancel_error); + if (calld->subchannel_call != nullptr) { + grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, + then_schedule_closure); + then_schedule_closure = nullptr; + GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call, + "client_channel_destroy_call"); + } + for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { + GPR_ASSERT(calld->pending_batches[i].batch == nullptr); + } + if (calld->pick.connected_subchannel != nullptr) { + calld->pick.connected_subchannel.reset(); + } + for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) { + if (calld->pick.subchannel_call_context[i].value != nullptr) { + calld->pick.subchannel_call_context[i].destroy( + calld->pick.subchannel_call_context[i].value); + } + } + calld->send_messages.Destroy(); + GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE); +} + +static void cc_set_pollset_or_pollset_set(grpc_call_element* elem, + grpc_polling_entity* pollent) { + call_data* calld = static_cast(elem->call_data); + calld->pollent = pollent; +} + +/************************************************************************* + * EXPORTED SYMBOLS + */ + +const grpc_channel_filter grpc_client_channel_filter = { + cc_start_transport_stream_op_batch, + cc_start_transport_op, + sizeof(call_data), + cc_init_call_elem, + cc_set_pollset_or_pollset_set, + cc_destroy_call_elem, + sizeof(channel_data), + cc_init_channel_elem, + cc_destroy_channel_elem, + cc_get_channel_info, + "client-channel", +}; + +static void try_to_connect_locked(void* arg, grpc_error* error_ignored) { + channel_data* chand = static_cast(arg); + if (chand->lb_policy != nullptr) { + chand->lb_policy->ExitIdleLocked(); + } else { + chand->exit_idle_when_lb_policy_arrives = true; + if (!chand->started_resolving && chand->resolver != nullptr) { + start_resolving_locked(chand); + } + } + GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect"); +} + +grpc_connectivity_state grpc_client_channel_check_connectivity_state( + grpc_channel_element* elem, int try_to_connect) { + channel_data* chand = static_cast(elem->channel_data); + grpc_connectivity_state out = + grpc_connectivity_state_check(&chand->state_tracker); + if (out == GRPC_CHANNEL_IDLE && try_to_connect) { + GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(try_to_connect_locked, chand, + grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); + } + return out; +} + +typedef struct external_connectivity_watcher { + channel_data* chand; + grpc_polling_entity pollent; + grpc_closure* on_complete; + grpc_closure* watcher_timer_init; + grpc_connectivity_state* state; + grpc_closure my_closure; + struct external_connectivity_watcher* next; +} external_connectivity_watcher; + +static external_connectivity_watcher* lookup_external_connectivity_watcher( + channel_data* chand, grpc_closure* on_complete) { + gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); + external_connectivity_watcher* w = + chand->external_connectivity_watcher_list_head; + while (w != nullptr && w->on_complete != on_complete) { + w = w->next; + } + gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); + return w; +} + +static void external_connectivity_watcher_list_append( + channel_data* chand, external_connectivity_watcher* w) { + GPR_ASSERT(!lookup_external_connectivity_watcher(chand, w->on_complete)); + + gpr_mu_lock(&w->chand->external_connectivity_watcher_list_mu); + GPR_ASSERT(!w->next); + w->next = chand->external_connectivity_watcher_list_head; + chand->external_connectivity_watcher_list_head = w; + gpr_mu_unlock(&w->chand->external_connectivity_watcher_list_mu); +} + +static void external_connectivity_watcher_list_remove( + channel_data* chand, external_connectivity_watcher* too_remove) { + GPR_ASSERT( + lookup_external_connectivity_watcher(chand, too_remove->on_complete)); + gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); + if (too_remove == chand->external_connectivity_watcher_list_head) { + chand->external_connectivity_watcher_list_head = too_remove->next; + gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); + return; + } + external_connectivity_watcher* w = + chand->external_connectivity_watcher_list_head; + while (w != nullptr) { + if (w->next == too_remove) { + w->next = w->next->next; + gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); + return; + } + w = w->next; + } + GPR_UNREACHABLE_CODE(return ); +} + +int grpc_client_channel_num_external_connectivity_watchers( + grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + int count = 0; + + gpr_mu_lock(&chand->external_connectivity_watcher_list_mu); + external_connectivity_watcher* w = + chand->external_connectivity_watcher_list_head; + while (w != nullptr) { + count++; + w = w->next; + } + gpr_mu_unlock(&chand->external_connectivity_watcher_list_mu); + + return count; +} + +static void on_external_watch_complete_locked(void* arg, grpc_error* error) { + external_connectivity_watcher* w = + static_cast(arg); + grpc_closure* follow_up = w->on_complete; + grpc_polling_entity_del_from_pollset_set(&w->pollent, + w->chand->interested_parties); + GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, + "external_connectivity_watcher"); + external_connectivity_watcher_list_remove(w->chand, w); + gpr_free(w); + GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error)); +} + +static void watch_connectivity_state_locked(void* arg, + grpc_error* error_ignored) { + external_connectivity_watcher* w = + static_cast(arg); + external_connectivity_watcher* found = nullptr; + if (w->state != nullptr) { + external_connectivity_watcher_list_append(w->chand, w); + // An assumption is being made that the closure is scheduled on the exec ctx + // scheduler and that GRPC_CLOSURE_RUN would run the closure immediately. + GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE); + GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w, + grpc_combiner_scheduler(w->chand->combiner)); + grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker, + w->state, &w->my_closure); + } else { + GPR_ASSERT(w->watcher_timer_init == nullptr); + found = lookup_external_connectivity_watcher(w->chand, w->on_complete); + if (found) { + GPR_ASSERT(found->on_complete == w->on_complete); + grpc_connectivity_state_notify_on_state_change( + &found->chand->state_tracker, nullptr, &found->my_closure); + } + grpc_polling_entity_del_from_pollset_set(&w->pollent, + w->chand->interested_parties); + GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, + "external_connectivity_watcher"); + gpr_free(w); + } +} + +void grpc_client_channel_watch_connectivity_state( + grpc_channel_element* elem, grpc_polling_entity pollent, + grpc_connectivity_state* state, grpc_closure* closure, + grpc_closure* watcher_timer_init) { + channel_data* chand = static_cast(elem->channel_data); + external_connectivity_watcher* w = + static_cast(gpr_zalloc(sizeof(*w))); + w->chand = chand; + w->pollent = pollent; + w->on_complete = closure; + w->state = state; + w->watcher_timer_init = watcher_timer_init; + grpc_polling_entity_add_to_pollset_set(&w->pollent, + chand->interested_parties); + GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, + "external_connectivity_watcher"); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w, + grpc_combiner_scheduler(chand->combiner)), + GRPC_ERROR_NONE); +} + +grpc_subchannel_call* grpc_client_channel_get_subchannel_call( + grpc_call_element* elem) { + call_data* calld = static_cast(elem->call_data); + return calld->subchannel_call; +} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.h b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.h index c99f0092e..a21e5623a 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel.h @@ -19,11 +19,13 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H +#include + #include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/ext/filters/client_channel/resolver.h" #include "src/core/lib/channel/channel_stack.h" -extern grpc_tracer_flag grpc_client_channel_trace; +extern grpc_core::TraceFlag grpc_client_channel_trace; // Channel arg key for server URI string. #define GRPC_ARG_SERVER_URI "grpc.server_uri" @@ -38,18 +40,18 @@ extern grpc_tracer_flag grpc_client_channel_trace; extern const grpc_channel_filter grpc_client_channel_filter; grpc_connectivity_state grpc_client_channel_check_connectivity_state( - grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect); + grpc_channel_element* elem, int try_to_connect); int grpc_client_channel_num_external_connectivity_watchers( - grpc_channel_element *elem); + grpc_channel_element* elem); void grpc_client_channel_watch_connectivity_state( - grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_polling_entity pollent, grpc_connectivity_state *state, - grpc_closure *on_complete, grpc_closure *watcher_timer_init); + grpc_channel_element* elem, grpc_polling_entity pollent, + grpc_connectivity_state* state, grpc_closure* on_complete, + grpc_closure* watcher_timer_init); /* Debug helper: pull the subchannel call from a call stack element */ -grpc_subchannel_call *grpc_client_channel_get_subchannel_call( - grpc_call_element *elem); +grpc_subchannel_call* grpc_client_channel_get_subchannel_call( + grpc_call_element* elem); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.c b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.cc similarity index 63% rename from Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.cc index 57eac8f87..172e9f03c 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/lib/channel/channel_args.h" @@ -23,33 +25,30 @@ void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory) { factory->vtable->ref(factory); } -void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx, - grpc_client_channel_factory* factory) { - factory->vtable->unref(exec_ctx, factory); +void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) { + factory->vtable->unref(factory); } grpc_subchannel* grpc_client_channel_factory_create_subchannel( - grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory, - const grpc_subchannel_args* args) { - return factory->vtable->create_subchannel(exec_ctx, factory, args); + grpc_client_channel_factory* factory, const grpc_subchannel_args* args) { + return factory->vtable->create_subchannel(factory, args); } grpc_channel* grpc_client_channel_factory_create_channel( - grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory, - const char* target, grpc_client_channel_type type, - const grpc_channel_args* args) { - return factory->vtable->create_client_channel(exec_ctx, factory, target, type, - args); + grpc_client_channel_factory* factory, const char* target, + grpc_client_channel_type type, const grpc_channel_args* args) { + return factory->vtable->create_client_channel(factory, target, type, args); } static void* factory_arg_copy(void* factory) { - grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory); + grpc_client_channel_factory_ref( + static_cast(factory)); return factory; } -static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) { - grpc_client_channel_factory_unref(exec_ctx, - (grpc_client_channel_factory*)factory); +static void factory_arg_destroy(void* factory) { + grpc_client_channel_factory_unref( + static_cast(factory)); } static int factory_arg_cmp(void* factory1, void* factory2) { diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.h b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.h index ce6266c76..601ec46b2 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_factory.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H +#include + #include #include "src/core/ext/filters/client_channel/subchannel.h" @@ -40,38 +42,33 @@ typedef enum { /** Constructor for new configured channels. Creating decorators around this type is encouraged to adapt behavior. */ struct grpc_client_channel_factory { - const grpc_client_channel_factory_vtable *vtable; + const grpc_client_channel_factory_vtable* vtable; }; struct grpc_client_channel_factory_vtable { - void (*ref)(grpc_client_channel_factory *factory); - void (*unref)(grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory); - grpc_subchannel *(*create_subchannel)(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory, - const grpc_subchannel_args *args); - grpc_channel *(*create_client_channel)(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory, - const char *target, + void (*ref)(grpc_client_channel_factory* factory); + void (*unref)(grpc_client_channel_factory* factory); + grpc_subchannel* (*create_subchannel)(grpc_client_channel_factory* factory, + const grpc_subchannel_args* args); + grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory, + const char* target, grpc_client_channel_type type, - const grpc_channel_args *args); + const grpc_channel_args* args); }; -void grpc_client_channel_factory_ref(grpc_client_channel_factory *factory); -void grpc_client_channel_factory_unref(grpc_exec_ctx *exec_ctx, - grpc_client_channel_factory *factory); +void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory); +void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory); /** Create a new grpc_subchannel */ -grpc_subchannel *grpc_client_channel_factory_create_subchannel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory, - const grpc_subchannel_args *args); +grpc_subchannel* grpc_client_channel_factory_create_subchannel( + grpc_client_channel_factory* factory, const grpc_subchannel_args* args); /** Create a new grpc_channel */ -grpc_channel *grpc_client_channel_factory_create_channel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *factory, - const char *target, grpc_client_channel_type type, - const grpc_channel_args *args); +grpc_channel* grpc_client_channel_factory_create_channel( + grpc_client_channel_factory* factory, const char* target, + grpc_client_channel_type type, const grpc_channel_args* args); grpc_arg grpc_client_channel_factory_create_channel_arg( - grpc_client_channel_factory *factory); + grpc_client_channel_factory* factory); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.c b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.cc similarity index 50% rename from Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.cc index 1f71c5a7f..8385852d1 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/client_channel_plugin.cc @@ -34,61 +34,29 @@ #include "src/core/ext/filters/client_channel/subchannel_index.h" #include "src/core/lib/surface/channel_init.h" -static bool append_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, void *arg) { +static bool append_filter(grpc_channel_stack_builder* builder, void* arg) { return grpc_channel_stack_builder_append_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); -} - -static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *unused) { - const grpc_channel_args *args = - grpc_channel_stack_builder_get_channel_arguments(builder); - for (size_t i = 0; i < args->num_args; i++) { - if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY) || - 0 == strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { - return true; - } - } - char *default_authority = grpc_get_default_authority( - exec_ctx, grpc_channel_stack_builder_get_target(builder)); - if (default_authority != NULL) { - grpc_arg arg = grpc_channel_arg_string_create( - (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); - grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder, - new_args); - gpr_free(default_authority); - grpc_channel_args_destroy(exec_ctx, new_args); - } - return true; + builder, static_cast(arg), nullptr, nullptr); } void grpc_client_channel_init(void) { - grpc_lb_policy_registry_init(); - grpc_resolver_registry_init(); - grpc_retry_throttle_map_init(); + grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry(); + grpc_core::ResolverRegistry::Builder::InitRegistry(); + grpc_core::internal::ServerRetryThrottleMap::Init(); grpc_proxy_mapper_registry_init(); grpc_register_http_proxy_mapper(); grpc_subchannel_index_init(); - grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MIN, - set_default_host_if_unset, NULL); grpc_channel_init_register_stage( GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter, - (void *)&grpc_client_channel_filter); + (void*)&grpc_client_channel_filter); grpc_http_connect_register_handshaker_factory(); - grpc_register_tracer(&grpc_client_channel_trace); -#ifndef NDEBUG - grpc_register_tracer(&grpc_trace_resolver_refcount); -#endif } void grpc_client_channel_shutdown(void) { grpc_subchannel_index_shutdown(); grpc_channel_init_shutdown(); grpc_proxy_mapper_registry_shutdown(); - grpc_retry_throttle_map_shutdown(); - grpc_resolver_registry_shutdown(); - grpc_lb_policy_registry_shutdown(); + grpc_core::internal::ServerRetryThrottleMap::Shutdown(); + grpc_core::ResolverRegistry::Builder::ShutdownRegistry(); + grpc_core::LoadBalancingPolicyRegistry::Builder::ShutdownRegistry(); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/connector.c b/Sources/CgRPC/src/core/ext/filters/client_channel/connector.cc similarity index 67% rename from Sources/CgRPC/src/core/ext/filters/client_channel/connector.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/connector.cc index c258468e5..5e04b3b45 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/connector.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/connector.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/connector.h" grpc_connector* grpc_connector_ref(grpc_connector* connector) { @@ -23,18 +25,17 @@ grpc_connector* grpc_connector_ref(grpc_connector* connector) { return connector; } -void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector) { - connector->vtable->unref(exec_ctx, connector); +void grpc_connector_unref(grpc_connector* connector) { + connector->vtable->unref(connector); } -void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector, +void grpc_connector_connect(grpc_connector* connector, const grpc_connect_in_args* in_args, grpc_connect_out_args* out_args, grpc_closure* notify) { - connector->vtable->connect(exec_ctx, connector, in_args, out_args, notify); + connector->vtable->connect(connector, in_args, out_args, notify); } -void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector, - grpc_error* why) { - connector->vtable->shutdown(exec_ctx, connector, why); +void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why) { + connector->vtable->shutdown(connector, why); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/connector.h b/Sources/CgRPC/src/core/ext/filters/client_channel/connector.h index 7f3d4a1cc..556594929 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/connector.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/connector.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H +#include + #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/transport/transport.h" @@ -27,47 +29,45 @@ typedef struct grpc_connector grpc_connector; typedef struct grpc_connector_vtable grpc_connector_vtable; struct grpc_connector { - const grpc_connector_vtable *vtable; + const grpc_connector_vtable* vtable; }; typedef struct { /** set of pollsets interested in this connection */ - grpc_pollset_set *interested_parties; + grpc_pollset_set* interested_parties; /** deadline for connection */ - gpr_timespec deadline; + grpc_millis deadline; /** channel arguments (to be passed to transport) */ - const grpc_channel_args *channel_args; + const grpc_channel_args* channel_args; } grpc_connect_in_args; typedef struct { /** the connected transport */ - grpc_transport *transport; + grpc_transport* transport; /** channel arguments (to be passed to the filters) */ - grpc_channel_args *channel_args; + grpc_channel_args* channel_args; } grpc_connect_out_args; struct grpc_connector_vtable { - void (*ref)(grpc_connector *connector); - void (*unref)(grpc_exec_ctx *exec_ctx, grpc_connector *connector); + void (*ref)(grpc_connector* connector); + void (*unref)(grpc_connector* connector); /** Implementation of grpc_connector_shutdown */ - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_connector *connector, - grpc_error *why); + void (*shutdown)(grpc_connector* connector, grpc_error* why); /** Implementation of grpc_connector_connect */ - void (*connect)(grpc_exec_ctx *exec_ctx, grpc_connector *connector, - const grpc_connect_in_args *in_args, - grpc_connect_out_args *out_args, grpc_closure *notify); + void (*connect)(grpc_connector* connector, + const grpc_connect_in_args* in_args, + grpc_connect_out_args* out_args, grpc_closure* notify); }; -grpc_connector *grpc_connector_ref(grpc_connector *connector); -void grpc_connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *connector); +grpc_connector* grpc_connector_ref(grpc_connector* connector); +void grpc_connector_unref(grpc_connector* connector); /** Connect using the connector: max one outstanding call at a time */ -void grpc_connector_connect(grpc_exec_ctx *exec_ctx, grpc_connector *connector, - const grpc_connect_in_args *in_args, - grpc_connect_out_args *out_args, - grpc_closure *notify); +void grpc_connector_connect(grpc_connector* connector, + const grpc_connect_in_args* in_args, + grpc_connect_out_args* out_args, + grpc_closure* notify); /** Cancel any pending connection */ -void grpc_connector_shutdown(grpc_exec_ctx *exec_ctx, grpc_connector *connector, - grpc_error *why); +void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.c b/Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.cc similarity index 72% rename from Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.cc index 418bb41ef..4e8b8b71d 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/http_connect_handshaker.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/http_connect_handshaker.h" #include @@ -30,11 +32,11 @@ #include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/handshaker_registry.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/http/format_request.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" typedef struct http_connect_handshaker { // Base class. Must be first. @@ -61,41 +63,38 @@ typedef struct http_connect_handshaker { } http_connect_handshaker; // Unref and clean up handshaker. -static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx, - http_connect_handshaker* handshaker) { +static void http_connect_handshaker_unref(http_connect_handshaker* handshaker) { if (gpr_unref(&handshaker->refcount)) { gpr_mu_destroy(&handshaker->mu); - if (handshaker->endpoint_to_destroy != NULL) { - grpc_endpoint_destroy(exec_ctx, handshaker->endpoint_to_destroy); + if (handshaker->endpoint_to_destroy != nullptr) { + grpc_endpoint_destroy(handshaker->endpoint_to_destroy); } - if (handshaker->read_buffer_to_destroy != NULL) { - grpc_slice_buffer_destroy_internal(exec_ctx, - handshaker->read_buffer_to_destroy); + if (handshaker->read_buffer_to_destroy != nullptr) { + grpc_slice_buffer_destroy_internal(handshaker->read_buffer_to_destroy); gpr_free(handshaker->read_buffer_to_destroy); } - grpc_slice_buffer_destroy_internal(exec_ctx, &handshaker->write_buffer); + grpc_slice_buffer_destroy_internal(&handshaker->write_buffer); grpc_http_parser_destroy(&handshaker->http_parser); grpc_http_response_destroy(&handshaker->http_response); gpr_free(handshaker); } } -// Set args fields to NULL, saving the endpoint and read buffer for +// Set args fields to nullptr, saving the endpoint and read buffer for // later destruction. static void cleanup_args_for_failure_locked( - grpc_exec_ctx* exec_ctx, http_connect_handshaker* handshaker) { + http_connect_handshaker* handshaker) { handshaker->endpoint_to_destroy = handshaker->args->endpoint; - handshaker->args->endpoint = NULL; + handshaker->args->endpoint = nullptr; handshaker->read_buffer_to_destroy = handshaker->args->read_buffer; - handshaker->args->read_buffer = NULL; - grpc_channel_args_destroy(exec_ctx, handshaker->args->args); - handshaker->args->args = NULL; + handshaker->args->read_buffer = nullptr; + grpc_channel_args_destroy(handshaker->args->args); + handshaker->args->args = nullptr; } // If the handshake failed or we're shutting down, clean up and invoke the // callback with the error. -static void handshake_failed_locked(grpc_exec_ctx* exec_ctx, - http_connect_handshaker* handshaker, +static void handshake_failed_locked(http_connect_handshaker* handshaker, grpc_error* error) { if (error == GRPC_ERROR_NONE) { // If we were shut down after an endpoint operation succeeded but @@ -108,34 +107,33 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx, // before destroying them, even if we know that there are no // pending read/write callbacks. This should be fixed, at which // point this can be removed. - grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint, - GRPC_ERROR_REF(error)); + grpc_endpoint_shutdown(handshaker->args->endpoint, GRPC_ERROR_REF(error)); // Not shutting down, so the handshake failed. Clean up before // invoking the callback. - cleanup_args_for_failure_locked(exec_ctx, handshaker); + cleanup_args_for_failure_locked(handshaker); // Set shutdown to true so that subsequent calls to // http_connect_handshaker_shutdown() do nothing. handshaker->shutdown = true; } // Invoke callback. - GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error); + GRPC_CLOSURE_SCHED(handshaker->on_handshake_done, error); } // Callback invoked when finished writing HTTP CONNECT request. -static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - http_connect_handshaker* handshaker = (http_connect_handshaker*)arg; +static void on_write_done(void* arg, grpc_error* error) { + http_connect_handshaker* handshaker = + static_cast(arg); gpr_mu_lock(&handshaker->mu); if (error != GRPC_ERROR_NONE || handshaker->shutdown) { // If the write failed or we're shutting down, clean up and invoke the // callback with the error. - handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error)); + handshake_failed_locked(handshaker, GRPC_ERROR_REF(error)); gpr_mu_unlock(&handshaker->mu); - http_connect_handshaker_unref(exec_ctx, handshaker); + http_connect_handshaker_unref(handshaker); } else { // Otherwise, read the response. // The read callback inherits our ref to the handshaker. - grpc_endpoint_read(exec_ctx, handshaker->args->endpoint, + grpc_endpoint_read(handshaker->args->endpoint, handshaker->args->read_buffer, &handshaker->response_read_closure); gpr_mu_unlock(&handshaker->mu); @@ -143,14 +141,14 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg, } // Callback invoked for reading HTTP CONNECT response. -static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - http_connect_handshaker* handshaker = (http_connect_handshaker*)arg; +static void on_read_done(void* arg, grpc_error* error) { + http_connect_handshaker* handshaker = + static_cast(arg); gpr_mu_lock(&handshaker->mu); if (error != GRPC_ERROR_NONE || handshaker->shutdown) { // If the read failed or we're shutting down, clean up and invoke the // callback with the error. - handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error)); + handshake_failed_locked(handshaker, GRPC_ERROR_REF(error)); goto done; } // Add buffer to parser. @@ -161,7 +159,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, handshaker->args->read_buffer->slices[i], &body_start_offset); if (error != GRPC_ERROR_NONE) { - handshake_failed_locked(exec_ctx, handshaker, error); + handshake_failed_locked(handshaker, error); goto done; } if (handshaker->http_parser.state == GRPC_HTTP_BODY) { @@ -180,7 +178,7 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, &handshaker->args->read_buffer->slices[i + 1], handshaker->args->read_buffer->count - i - 1); grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &tmp_buffer); + grpc_slice_buffer_destroy_internal(&tmp_buffer); break; } } @@ -197,9 +195,8 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, // complete (e.g., handling chunked transfer encoding or looking // at the Content-Length: header). if (handshaker->http_parser.state != GRPC_HTTP_BODY) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - handshaker->args->read_buffer); - grpc_endpoint_read(exec_ctx, handshaker->args->endpoint, + grpc_slice_buffer_reset_and_unref_internal(handshaker->args->read_buffer); + grpc_endpoint_read(handshaker->args->endpoint, handshaker->args->read_buffer, &handshaker->response_read_closure); gpr_mu_unlock(&handshaker->mu); @@ -213,79 +210,77 @@ static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg, handshaker->http_response.status); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - handshake_failed_locked(exec_ctx, handshaker, error); + handshake_failed_locked(handshaker, error); goto done; } // Success. Invoke handshake-done callback. - GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error); + GRPC_CLOSURE_SCHED(handshaker->on_handshake_done, error); done: // Set shutdown to true so that subsequent calls to // http_connect_handshaker_shutdown() do nothing. handshaker->shutdown = true; gpr_mu_unlock(&handshaker->mu); - http_connect_handshaker_unref(exec_ctx, handshaker); + http_connect_handshaker_unref(handshaker); } // // Public handshaker methods // -static void http_connect_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker_in) { - http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in; - http_connect_handshaker_unref(exec_ctx, handshaker); +static void http_connect_handshaker_destroy(grpc_handshaker* handshaker_in) { + http_connect_handshaker* handshaker = + reinterpret_cast(handshaker_in); + http_connect_handshaker_unref(handshaker); } -static void http_connect_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker_in, +static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in, grpc_error* why) { - http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in; + http_connect_handshaker* handshaker = + reinterpret_cast(handshaker_in); gpr_mu_lock(&handshaker->mu); if (!handshaker->shutdown) { handshaker->shutdown = true; - grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint, - GRPC_ERROR_REF(why)); - cleanup_args_for_failure_locked(exec_ctx, handshaker); + grpc_endpoint_shutdown(handshaker->args->endpoint, GRPC_ERROR_REF(why)); + cleanup_args_for_failure_locked(handshaker); } gpr_mu_unlock(&handshaker->mu); GRPC_ERROR_UNREF(why); } static void http_connect_handshaker_do_handshake( - grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker_in, - grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, - grpc_handshaker_args* args) { - http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in; + grpc_handshaker* handshaker_in, grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, grpc_handshaker_args* args) { + http_connect_handshaker* handshaker = + reinterpret_cast(handshaker_in); // Check for HTTP CONNECT channel arg. // If not found, invoke on_handshake_done without doing anything. const grpc_arg* arg = grpc_channel_args_find(args->args, GRPC_ARG_HTTP_CONNECT_SERVER); - if (arg == NULL) { + char* server_name = grpc_channel_arg_get_string(arg); + if (server_name == nullptr) { // Set shutdown to true so that subsequent calls to // http_connect_handshaker_shutdown() do nothing. gpr_mu_lock(&handshaker->mu); handshaker->shutdown = true; gpr_mu_unlock(&handshaker->mu); - GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(on_handshake_done, GRPC_ERROR_NONE); return; } - GPR_ASSERT(arg->type == GRPC_ARG_STRING); - char* server_name = arg->value.string; // Get headers from channel args. arg = grpc_channel_args_find(args->args, GRPC_ARG_HTTP_CONNECT_HEADERS); - grpc_http_header* headers = NULL; + char* arg_header_string = grpc_channel_arg_get_string(arg); + grpc_http_header* headers = nullptr; size_t num_headers = 0; - char** header_strings = NULL; + char** header_strings = nullptr; size_t num_header_strings = 0; - if (arg != NULL) { - GPR_ASSERT(arg->type == GRPC_ARG_STRING); - gpr_string_split(arg->value.string, "\n", &header_strings, + if (arg_header_string != nullptr) { + gpr_string_split(arg_header_string, "\n", &header_strings, &num_header_strings); - headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) * - num_header_strings); + headers = static_cast( + gpr_malloc(sizeof(grpc_http_header) * num_header_strings)); for (size_t i = 0; i < num_header_strings; ++i) { char* sep = strchr(header_strings[i], ':'); - if (sep == NULL) { + if (sep == nullptr) { gpr_log(GPR_ERROR, "skipping unparseable HTTP CONNECT header: %s", header_strings[i]); continue; @@ -324,18 +319,18 @@ static void http_connect_handshaker_do_handshake( gpr_free(header_strings); // Take a new ref to be held by the write callback. gpr_ref(&handshaker->refcount); - grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer, + grpc_endpoint_write(args->endpoint, &handshaker->write_buffer, &handshaker->request_done_closure); gpr_mu_unlock(&handshaker->mu); } static const grpc_handshaker_vtable http_connect_handshaker_vtable = { http_connect_handshaker_destroy, http_connect_handshaker_shutdown, - http_connect_handshaker_do_handshake}; + http_connect_handshaker_do_handshake, "http_connect"}; static grpc_handshaker* grpc_http_connect_handshaker_create() { http_connect_handshaker* handshaker = - (http_connect_handshaker*)gpr_malloc(sizeof(*handshaker)); + static_cast(gpr_malloc(sizeof(*handshaker))); memset(handshaker, 0, sizeof(*handshaker)); grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base); gpr_mu_init(&handshaker->mu); @@ -355,14 +350,13 @@ static grpc_handshaker* grpc_http_connect_handshaker_create() { // static void handshaker_factory_add_handshakers( - grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* factory, - const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) { + grpc_handshaker_factory* factory, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { grpc_handshake_manager_add(handshake_mgr, grpc_http_connect_handshaker_create()); } -static void handshaker_factory_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker_factory* factory) {} +static void handshaker_factory_destroy(grpc_handshaker_factory* factory) {} static const grpc_handshaker_factory_vtable handshaker_factory_vtable = { handshaker_factory_add_handshakers, handshaker_factory_destroy}; diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.c b/Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.cc similarity index 82% rename from Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.cc index a16b44d3d..29a6c0e36 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/http_proxy.cc @@ -16,13 +16,14 @@ * */ +#include + #include "src/core/ext/filters/client_channel/http_proxy.h" #include #include #include -#include #include #include @@ -30,26 +31,26 @@ #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/slice/b64.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" /** * Parses the 'http_proxy' env var and returns the proxy hostname to resolve or - * NULL on error. Also sets 'user_cred' to user credentials if present in the + * nullptr on error. Also sets 'user_cred' to user credentials if present in the * 'http_proxy' env var, otherwise leaves it unchanged. It is caller's * responsibility to gpr_free user_cred. */ -static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) { - GPR_ASSERT(user_cred != NULL); - char* proxy_name = NULL; +static char* get_http_proxy_server(char** user_cred) { + GPR_ASSERT(user_cred != nullptr); + char* proxy_name = nullptr; char* uri_str = gpr_getenv("http_proxy"); - char** authority_strs = NULL; + char** authority_strs = nullptr; size_t authority_nstrs; - if (uri_str == NULL) return NULL; - grpc_uri* uri = - grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */); - if (uri == NULL || uri->authority == NULL) { + if (uri_str == nullptr) return nullptr; + grpc_uri* uri = grpc_uri_parse(uri_str, false /* suppress_errors */); + if (uri == nullptr || uri->authority == nullptr) { gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var"); goto done; } @@ -73,7 +74,7 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) { for (size_t i = 0; i < authority_nstrs; i++) { gpr_free(authority_strs[i]); } - proxy_name = NULL; + proxy_name = nullptr; } gpr_free(authority_strs); done: @@ -82,19 +83,17 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) { return proxy_name; } -static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +static bool proxy_mapper_map_name(grpc_proxy_mapper* mapper, const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { - char* user_cred = NULL; - *name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred); - if (*name_to_resolve == NULL) return false; - char* no_proxy_str = NULL; - grpc_uri* uri = - grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); - if (uri == NULL || uri->path[0] == '\0') { + char* user_cred = nullptr; + *name_to_resolve = get_http_proxy_server(&user_cred); + if (*name_to_resolve == nullptr) return false; + char* no_proxy_str = nullptr; + grpc_uri* uri = grpc_uri_parse(server_uri, false /* suppress_errors */); + if (uri == nullptr || uri->path[0] == '\0') { gpr_log(GPR_ERROR, "'http_proxy' environment variable set, but cannot " "parse server URI '%s' -- not using proxy", @@ -107,7 +106,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, goto no_use_proxy; } no_proxy_str = gpr_getenv("no_proxy"); - if (no_proxy_str != NULL) { + if (no_proxy_str != nullptr) { static const char* NO_PROXY_SEPARATOR = ","; bool use_proxy = true; char* server_host; @@ -118,6 +117,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, "unable to split host and port, not checking no_proxy list for " "host '%s'", server_uri); + gpr_free(no_proxy_str); } else { size_t uri_len = strlen(server_host); char** no_proxy_hosts; @@ -142,6 +142,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, gpr_free(no_proxy_hosts); gpr_free(server_host); gpr_free(server_port); + gpr_free(no_proxy_str); if (!use_proxy) goto no_use_proxy; } } @@ -149,7 +150,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, args_to_add[0] = grpc_channel_arg_string_create( (char*)GRPC_ARG_HTTP_CONNECT_SERVER, uri->path[0] == '/' ? uri->path + 1 : uri->path); - if (user_cred != NULL) { + if (user_cred != nullptr) { /* Use base64 encoding for user credentials as stated in RFC 7617 */ char* encoded_user_cred = grpc_base64_encode(user_cred, strlen(user_cred), 0, 0); @@ -167,15 +168,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, gpr_free(user_cred); return true; no_use_proxy: - if (uri != NULL) grpc_uri_destroy(uri); + if (uri != nullptr) grpc_uri_destroy(uri); gpr_free(*name_to_resolve); - *name_to_resolve = NULL; + *name_to_resolve = nullptr; gpr_free(user_cred); return false; } -static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +static bool proxy_mapper_map_address(grpc_proxy_mapper* mapper, const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.c deleted file mode 100644 index 8e6673d73..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/lb_policy.h" -#include "src/core/lib/iomgr/combiner.h" - -#define WEAK_REF_BITS 16 - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_lb_policy_refcount = - GRPC_TRACER_INITIALIZER(false, "lb_policy_refcount"); -#endif - -void grpc_lb_policy_init(grpc_lb_policy *policy, - const grpc_lb_policy_vtable *vtable, - grpc_combiner *combiner) { - policy->vtable = vtable; - gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS); - policy->interested_parties = grpc_pollset_set_create(); - policy->combiner = GRPC_COMBINER_REF(combiner, "lb_policy"); -} - -#ifndef NDEBUG -#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason -#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose -#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason -#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose -#else -#define REF_FUNC_EXTRA_ARGS -#define REF_MUTATE_EXTRA_ARGS -#define REF_FUNC_PASS_ARGS(new_reason) -#define REF_MUTATE_PASS_ARGS(x) -#endif - -static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta, - int barrier REF_MUTATE_EXTRA_ARGS) { - gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta) - : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_lb_policy_refcount)) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "LB_POLICY: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c, - purpose, old_val, old_val + delta, reason); - } -#endif - return old_val; -} - -void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) { - ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF")); -} - -static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_lb_policy *policy = (grpc_lb_policy *)arg; - policy->vtable->shutdown_locked(exec_ctx, policy); - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref"); -} - -void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) { - gpr_atm old_val = - ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS), - 1 REF_MUTATE_PASS_ARGS("STRONG_UNREF")); - gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1); - gpr_atm check = 1 << WEAK_REF_BITS; - if ((old_val & mask) == check) { - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE( - shutdown_locked, policy, - grpc_combiner_scheduler(policy->combiner)), - GRPC_ERROR_NONE); - } else { - grpc_lb_policy_weak_unref(exec_ctx, - policy REF_FUNC_PASS_ARGS("strong-unref")); - } -} - -void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) { - ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF")); -} - -void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) { - gpr_atm old_val = - ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF")); - if (old_val == 1) { - grpc_pollset_set_destroy(exec_ctx, policy->interested_parties); - grpc_combiner *combiner = policy->combiner; - policy->vtable->destroy(exec_ctx, policy); - GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy"); - } -} - -int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, - void **user_data, grpc_closure *on_complete) { - return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target, - context, user_data, on_complete); -} - -void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - grpc_connected_subchannel **target, - grpc_error *error) { - policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error); -} - -void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error) { - policy->vtable->cancel_picks_locked(exec_ctx, policy, - initial_metadata_flags_mask, - initial_metadata_flags_eq, error); -} - -void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy) { - policy->vtable->exit_idle_locked(exec_ctx, policy); -} - -void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - grpc_closure *closure) { - policy->vtable->ping_one_locked(exec_ctx, policy, closure); -} - -void grpc_lb_policy_notify_on_state_change_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connectivity_state *state, grpc_closure *closure) { - policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state, - closure); -} - -grpc_connectivity_state grpc_lb_policy_check_connectivity_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_error **connectivity_error) { - return policy->vtable->check_connectivity_locked(exec_ctx, policy, - connectivity_error); -} - -void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - const grpc_lb_policy_args *lb_policy_args) { - policy->vtable->update_locked(exec_ctx, policy, lb_policy_args); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.cc new file mode 100644 index 000000000..e065f4563 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.cc @@ -0,0 +1,59 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/lib/iomgr/combiner.h" + +grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount( + false, "lb_policy_refcount"); + +namespace grpc_core { + +LoadBalancingPolicy::LoadBalancingPolicy(const Args& args) + : InternallyRefCountedWithTracing(&grpc_trace_lb_policy_refcount), + combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")), + client_channel_factory_(args.client_channel_factory), + interested_parties_(grpc_pollset_set_create()), + request_reresolution_(nullptr) {} + +LoadBalancingPolicy::~LoadBalancingPolicy() { + grpc_pollset_set_destroy(interested_parties_); + GRPC_COMBINER_UNREF(combiner_, "lb_policy"); +} + +void LoadBalancingPolicy::TryReresolutionLocked( + grpc_core::TraceFlag* grpc_lb_trace, grpc_error* error) { + if (request_reresolution_ != nullptr) { + GRPC_CLOSURE_SCHED(request_reresolution_, error); + request_reresolution_ = nullptr; + if (grpc_lb_trace->enabled()) { + gpr_log(GPR_INFO, + "%s %p: scheduling re-resolution closure with error=%s.", + grpc_lb_trace->name(), this, grpc_error_string(error)); + } + } else { + if (grpc_lb_trace->enabled()) { + gpr_log(GPR_INFO, "%s %p: no available re-resolution closure.", + grpc_lb_trace->name(), this); + } + } +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.h index 645d51e13..454e00a69 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy.h @@ -19,189 +19,187 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H +#include + +#include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/ext/filters/client_channel/subchannel.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/transport/connectivity_state.h" -/** A load balancing policy: specified by a vtable and a struct (which - is expected to be extended to contain some parameters) */ -typedef struct grpc_lb_policy grpc_lb_policy; -typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable; -typedef struct grpc_lb_policy_args grpc_lb_policy_args; - -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_lb_policy_refcount; -#endif - -struct grpc_lb_policy { - const grpc_lb_policy_vtable *vtable; - gpr_atm ref_pair; - /* owned pointer to interested parties in load balancing decisions */ - grpc_pollset_set *interested_parties; - /* combiner under which lb_policy actions take place */ - grpc_combiner *combiner; -}; - -/** Extra arguments for an LB pick */ -typedef struct grpc_lb_policy_pick_args { - /** Initial metadata associated with the picking call. */ - grpc_metadata_batch *initial_metadata; - /** Bitmask used for selective cancelling. See \a - * grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in - * grpc_types.h */ - uint32_t initial_metadata_flags; - /** Storage for LB token in \a initial_metadata, or NULL if not used */ - grpc_linked_mdelem *lb_token_mdelem_storage; -} grpc_lb_policy_pick_args; - -struct grpc_lb_policy_vtable { - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); - void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); - - /** \see grpc_lb_policy_pick */ - int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, void **user_data, - grpc_closure *on_complete); - - /** \see grpc_lb_policy_cancel_pick */ - void (*cancel_pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connected_subchannel **target, - grpc_error *error); - - /** \see grpc_lb_policy_cancel_picks */ - void (*cancel_picks_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error); - - /** \see grpc_lb_policy_ping_one */ - void (*ping_one_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_closure *closure); - - /** Try to enter a READY connectivity state */ - void (*exit_idle_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); - - /** check the current connectivity of the lb_policy */ - grpc_connectivity_state (*check_connectivity_locked)( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_error **connectivity_error); - - /** call notify when the connectivity state of a channel changes from *state. - Updates *state with the new state of the policy. Calling with a NULL \a - state cancels the subscription. */ - void (*notify_on_state_change_locked)(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - grpc_connectivity_state *state, - grpc_closure *closure); - - void (*update_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_args *args); +extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount; + +namespace grpc_core { + +/// Interface for load balancing policies. +/// +/// Note: All methods with a "Locked" suffix must be called from the +/// combiner passed to the constructor. +/// +/// Any I/O done by the LB policy should be done under the pollset_set +/// returned by \a interested_parties(). +class LoadBalancingPolicy + : public InternallyRefCountedWithTracing { + public: + struct Args { + /// The combiner under which all LB policy calls will be run. + /// Policy does NOT take ownership of the reference to the combiner. + // TODO(roth): Once we have a C++-like interface for combiners, this + // API should change to take a smart pointer that does pass ownership + // of a reference. + grpc_combiner* combiner = nullptr; + /// Used to create channels and subchannels. + grpc_client_channel_factory* client_channel_factory = nullptr; + /// Channel args from the resolver. + /// Note that the LB policy gets the set of addresses from the + /// GRPC_ARG_LB_ADDRESSES channel arg. + grpc_channel_args* args = nullptr; + }; + + /// State used for an LB pick. + struct PickState { + /// Initial metadata associated with the picking call. + grpc_metadata_batch* initial_metadata; + /// Bitmask used for selective cancelling. See + /// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in + /// grpc_types.h. + uint32_t initial_metadata_flags; + /// Storage for LB token in \a initial_metadata, or nullptr if not used. + grpc_linked_mdelem lb_token_mdelem_storage; + /// Closure to run when pick is complete, if not completed synchronously. + grpc_closure* on_complete; + /// Will be set to the selected subchannel, or nullptr on failure or when + /// the LB policy decides to drop the call. + RefCountedPtr connected_subchannel; + /// Will be populated with context to pass to the subchannel call, if + /// needed. + grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; + /// Upon success, \a *user_data will be set to whatever opaque information + /// may need to be propagated from the LB policy, or nullptr if not needed. + // TODO(roth): As part of revamping our metadata APIs, try to find a + // way to clean this up and C++-ify it. + void** user_data; + /// Next pointer. For internal use by LB policy. + PickState* next; + }; + + // Not copyable nor movable. + LoadBalancingPolicy(const LoadBalancingPolicy&) = delete; + LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete; + + /// Updates the policy with a new set of \a args from the resolver. + /// Note that the LB policy gets the set of addresses from the + /// GRPC_ARG_LB_ADDRESSES channel arg. + virtual void UpdateLocked(const grpc_channel_args& args) GRPC_ABSTRACT; + + /// Finds an appropriate subchannel for a call, based on data in \a pick. + /// \a pick must remain alive until the pick is complete. + /// + /// If the pick succeeds and a result is known immediately, returns true. + /// Otherwise, \a pick->on_complete will be invoked once the pick is + /// complete with its error argument set to indicate success or failure. + virtual bool PickLocked(PickState* pick) GRPC_ABSTRACT; + + /// Cancels \a pick. + /// The \a on_complete callback of the pending pick will be invoked with + /// \a pick->connected_subchannel set to null. + virtual void CancelPickLocked(PickState* pick, + grpc_error* error) GRPC_ABSTRACT; + + /// Cancels all pending picks for which their \a initial_metadata_flags (as + /// given in the call to \a PickLocked()) matches + /// \a initial_metadata_flags_eq when ANDed with + /// \a initial_metadata_flags_mask. + virtual void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) GRPC_ABSTRACT; + + /// Requests a notification when the connectivity state of the policy + /// changes from \a *state. When that happens, sets \a *state to the + /// new state and schedules \a closure. + virtual void NotifyOnStateChangeLocked(grpc_connectivity_state* state, + grpc_closure* closure) GRPC_ABSTRACT; + + /// Returns the policy's current connectivity state. Sets \a error to + /// the associated error, if any. + virtual grpc_connectivity_state CheckConnectivityLocked( + grpc_error** connectivity_error) GRPC_ABSTRACT; + + /// Hands off pending picks to \a new_policy. + virtual void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) + GRPC_ABSTRACT; + + /// Performs a connected subchannel ping via \a ConnectedSubchannel::Ping() + /// against one of the connected subchannels managed by the policy. + /// Note: This is intended only for use in tests. + virtual void PingOneLocked(grpc_closure* on_initiate, + grpc_closure* on_ack) GRPC_ABSTRACT; + + /// Tries to enter a READY connectivity state. + /// TODO(roth): As part of restructuring how we handle IDLE state, + /// consider whether this method is still needed. + virtual void ExitIdleLocked() GRPC_ABSTRACT; + + void Orphan() override { + // Invoke ShutdownAndUnrefLocked() inside of the combiner. + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(&LoadBalancingPolicy::ShutdownAndUnrefLocked, this, + grpc_combiner_scheduler(combiner_)), + GRPC_ERROR_NONE); + } + + /// Sets the re-resolution closure to \a request_reresolution. + void SetReresolutionClosureLocked(grpc_closure* request_reresolution) { + GPR_ASSERT(request_reresolution_ == nullptr); + request_reresolution_ = request_reresolution; + } + + grpc_pollset_set* interested_parties() const { return interested_parties_; } + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // So Delete() can access our protected dtor. + template + friend void Delete(T*); + + explicit LoadBalancingPolicy(const Args& args); + virtual ~LoadBalancingPolicy(); + + grpc_combiner* combiner() const { return combiner_; } + grpc_client_channel_factory* client_channel_factory() const { + return client_channel_factory_; + } + + /// Shuts down the policy. Any pending picks that have not been + /// handed off to a new policy via HandOffPendingPicksLocked() will be + /// failed. + virtual void ShutdownLocked() GRPC_ABSTRACT; + + /// Tries to request a re-resolution. + void TryReresolutionLocked(grpc_core::TraceFlag* grpc_lb_trace, + grpc_error* error); + + private: + static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) { + LoadBalancingPolicy* policy = static_cast(arg); + policy->ShutdownLocked(); + policy->Unref(); + } + + /// Combiner under which LB policy actions take place. + grpc_combiner* combiner_; + /// Client channel factory, used to create channels and subchannels. + grpc_client_channel_factory* client_channel_factory_; + /// Owned pointer to interested parties in load balancing decisions. + grpc_pollset_set* interested_parties_; + /// Callback to force a re-resolution. + grpc_closure* request_reresolution_; }; -#ifndef NDEBUG - -/* Strong references: the policy will shutdown when they reach zero */ -#define GRPC_LB_POLICY_REF(p, r) \ - grpc_lb_policy_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \ - grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r)) - -/* Weak references: they don't prevent the shutdown of the LB policy. When no - * strong references are left but there are still weak ones, shutdown is called. - * Once the weak reference also reaches zero, the LB policy is destroyed. */ -#define GRPC_LB_POLICY_WEAK_REF(p, r) \ - grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \ - grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r)) -void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line, - const char *reason); -void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const char *file, int line, const char *reason); -void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line, - const char *reason); -void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const char *file, int line, const char *reason); -#else -#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p)) -#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p)) -#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p)) -#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p)) -void grpc_lb_policy_ref(grpc_lb_policy *policy); -void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); -void grpc_lb_policy_weak_ref(grpc_lb_policy *policy); -void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy); -#endif - -/** called by concrete implementations to initialize the base struct */ -void grpc_lb_policy_init(grpc_lb_policy *policy, - const grpc_lb_policy_vtable *vtable, - grpc_combiner *combiner); - -/** Finds an appropriate subchannel for a call, based on \a pick_args. - - \a target will be set to the selected subchannel, or NULL on failure - or when the LB policy decides to drop the call. - - Upon success, \a user_data will be set to whatever opaque information - may need to be propagated from the LB policy, or NULL if not needed. - \a context will be populated with context to pass to the subchannel - call, if needed. - - If the pick succeeds and a result is known immediately, a non-zero - value will be returned. Otherwise, \a on_complete will be invoked - once the pick is complete with its error argument set to indicate - success or failure. - - Any IO should be done under the \a interested_parties \a grpc_pollset_set - in the \a grpc_lb_policy struct. */ -int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, - void **user_data, grpc_closure *on_complete); - -/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping) - against one of the connected subchannels managed by \a policy. */ -void grpc_lb_policy_ping_one_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - grpc_closure *closure); - -/** Cancel picks for \a target. - The \a on_complete callback of the pending picks will be invoked with \a - *target set to NULL. */ -void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - grpc_connected_subchannel **target, - grpc_error *error); - -/** Cancel all pending picks for which their \a initial_metadata_flags (as given - in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq - when AND'd with \a initial_metadata_flags_mask */ -void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error); - -/** Try to enter a READY connectivity state */ -void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy); - -/* Call notify when the connectivity state of a channel changes from \a *state. - * Updates \a *state with the new state of the policy */ -void grpc_lb_policy_notify_on_state_change_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_connectivity_state *state, grpc_closure *closure); - -grpc_connectivity_state grpc_lb_policy_check_connectivity_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - grpc_error **connectivity_error); - -/** Update \a policy with \a lb_policy_args. */ -void grpc_lb_policy_update_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *policy, - const grpc_lb_policy_args *lb_policy_args); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c deleted file mode 100644 index 7ad322902..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" - -#include -#include - -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/profiling/timers.h" - -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - return GRPC_ERROR_NONE; -} - -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) {} - -typedef struct { - // Stats object to update. - grpc_grpclb_client_stats *client_stats; - // State for intercepting send_initial_metadata. - grpc_closure on_complete_for_send; - grpc_closure *original_on_complete_for_send; - bool send_initial_metadata_succeeded; - // State for intercepting recv_initial_metadata. - grpc_closure recv_initial_metadata_ready; - grpc_closure *original_recv_initial_metadata_ready; - bool recv_initial_metadata_succeeded; -} call_data; - -static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - call_data *calld = (call_data *)arg; - if (error == GRPC_ERROR_NONE) { - calld->send_initial_metadata_succeeded = true; - } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete_for_send, - GRPC_ERROR_REF(error)); -} - -static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - call_data *calld = (call_data *)arg; - if (error == GRPC_ERROR_NONE) { - calld->recv_initial_metadata_succeeded = true; - } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready, - GRPC_ERROR_REF(error)); -} - -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; - // Get stats object from context and take a ref. - GPR_ASSERT(args->context != NULL); - GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL); - calld->client_stats = grpc_grpclb_client_stats_ref( - (grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS] - .value); - // Record call started. - grpc_grpclb_client_stats_add_call_started(calld->client_stats); - return GRPC_ERROR_NONE; -} - -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *calld = (call_data *)elem->call_data; - // Record call finished, optionally setting client_failed_to_send and - // received. - grpc_grpclb_client_stats_add_call_finished( - !calld->send_initial_metadata_succeeded /* client_failed_to_send */, - calld->recv_initial_metadata_succeeded /* known_received */, - calld->client_stats); - // All done, so unref the stats object. - grpc_grpclb_client_stats_unref(calld->client_stats); -} - -static void start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; - GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0); - // Intercept send_initial_metadata. - if (batch->send_initial_metadata) { - calld->original_on_complete_for_send = batch->on_complete; - GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, calld, - grpc_schedule_on_exec_ctx); - batch->on_complete = &calld->on_complete_for_send; - } - // Intercept recv_initial_metadata. - if (batch->recv_initial_metadata) { - calld->original_recv_initial_metadata_ready = - batch->payload->recv_initial_metadata.recv_initial_metadata_ready; - GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, - recv_initial_metadata_ready, calld, - grpc_schedule_on_exec_ctx); - batch->payload->recv_initial_metadata.recv_initial_metadata_ready = - &calld->recv_initial_metadata_ready; - } - // Chain to next filter. - grpc_call_next_op(exec_ctx, elem, batch); - GPR_TIMER_END("clr_start_transport_stream_op_batch", 0); -} - -const grpc_channel_filter grpc_client_load_reporting_filter = { - start_transport_stream_op_batch, - grpc_channel_next_op, - sizeof(call_data), - init_call_elem, - grpc_call_stack_ignore_set_pollset_or_pollset_set, - destroy_call_elem, - 0, // sizeof(channel_data) - init_channel_elem, - destroy_channel_elem, - grpc_channel_next_get_info, - "client_load_reporting"}; diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc new file mode 100644 index 000000000..18ef1f6ff --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc @@ -0,0 +1,138 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" + +#include +#include + +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/profiling/timers.h" + +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + return GRPC_ERROR_NONE; +} + +static void destroy_channel_elem(grpc_channel_element* elem) {} + +namespace { +struct call_data { + // Stats object to update. + grpc_grpclb_client_stats* client_stats; + // State for intercepting send_initial_metadata. + grpc_closure on_complete_for_send; + grpc_closure* original_on_complete_for_send; + bool send_initial_metadata_succeeded; + // State for intercepting recv_initial_metadata. + grpc_closure recv_initial_metadata_ready; + grpc_closure* original_recv_initial_metadata_ready; + bool recv_initial_metadata_succeeded; +}; +} // namespace + +static void on_complete_for_send(void* arg, grpc_error* error) { + call_data* calld = static_cast(arg); + if (error == GRPC_ERROR_NONE) { + calld->send_initial_metadata_succeeded = true; + } + GRPC_CLOSURE_RUN(calld->original_on_complete_for_send, GRPC_ERROR_REF(error)); +} + +static void recv_initial_metadata_ready(void* arg, grpc_error* error) { + call_data* calld = static_cast(arg); + if (error == GRPC_ERROR_NONE) { + calld->recv_initial_metadata_succeeded = true; + } + GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, + GRPC_ERROR_REF(error)); +} + +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + // Get stats object from context and take a ref. + GPR_ASSERT(args->context != nullptr); + if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) { + calld->client_stats = + grpc_grpclb_client_stats_ref(static_cast( + args->context[GRPC_GRPCLB_CLIENT_STATS].value)); + // Record call started. + grpc_grpclb_client_stats_add_call_started(calld->client_stats); + } + return GRPC_ERROR_NONE; +} + +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + call_data* calld = static_cast(elem->call_data); + if (calld->client_stats != nullptr) { + // Record call finished, optionally setting client_failed_to_send and + // received. + grpc_grpclb_client_stats_add_call_finished( + !calld->send_initial_metadata_succeeded /* client_failed_to_send */, + calld->recv_initial_metadata_succeeded /* known_received */, + calld->client_stats); + // All done, so unref the stats object. + grpc_grpclb_client_stats_unref(calld->client_stats); + } +} + +static void start_transport_stream_op_batch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); + GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0); + if (calld->client_stats != nullptr) { + // Intercept send_initial_metadata. + if (batch->send_initial_metadata) { + calld->original_on_complete_for_send = batch->on_complete; + GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, + calld, grpc_schedule_on_exec_ctx); + batch->on_complete = &calld->on_complete_for_send; + } + // Intercept recv_initial_metadata. + if (batch->recv_initial_metadata) { + calld->original_recv_initial_metadata_ready = + batch->payload->recv_initial_metadata.recv_initial_metadata_ready; + GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, + recv_initial_metadata_ready, calld, + grpc_schedule_on_exec_ctx); + batch->payload->recv_initial_metadata.recv_initial_metadata_ready = + &calld->recv_initial_metadata_ready; + } + } + // Chain to next filter. + grpc_call_next_op(elem, batch); +} + +const grpc_channel_filter grpc_client_load_reporting_filter = { + start_transport_stream_op_batch, + grpc_channel_next_op, + sizeof(call_data), + init_call_elem, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + destroy_call_elem, + 0, // sizeof(channel_data) + init_channel_elem, + destroy_channel_elem, + grpc_channel_next_get_info, + "client_load_reporting"}; diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h index 51e30b20b..838e2ef1c 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h @@ -19,9 +19,11 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_client_load_reporting_filter; #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c deleted file mode 100644 index 8dc81b46d..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ /dev/null @@ -1,2021 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** Implementation of the gRPC LB policy. - * - * This policy takes as input a set of resolved addresses {a1..an} for which the - * LB set was set (it's the resolver's responsibility to ensure this). That is - * to say, {a1..an} represent a collection of LB servers. - * - * An internal channel (\a glb_lb_policy.lb_channel) is created over {a1..an}. - * This channel behaves just like a regular channel. In particular, the - * constructed URI over the addresses a1..an will use the default pick first - * policy to select from this list of LB server backends. - * - * The first time the policy gets a request for a pick, a ping, or to exit the - * idle state, \a query_for_backends_locked() is called. This function sets up - * and initiates the internal communication with the LB server. In particular, - * it's responsible for instantiating the internal *streaming* call to the LB - * server (whichever address from {a1..an} pick-first chose). This call is - * serviced by two callbacks, \a lb_on_server_status_received and \a - * lb_on_response_received. The former will be called when the call to the LB - * server completes. This can happen if the LB server closes the connection or - * if this policy itself cancels the call (for example because it's shutting - * down). If the internal call times out, the usual behavior of pick-first - * applies, continuing to pick from the list {a1..an}. - * - * Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a - * res_recv. An invalid one results in the termination of the streaming call. A - * new streaming call should be created if possible, failing the original call - * otherwise. For a valid \a LoadBalancingResponse, the server list of actual - * backends is extracted. A Round Robin policy will be created from this list. - * There are two possible scenarios: - * - * 1. This is the first server list received. There was no previous instance of - * the Round Robin policy. \a rr_handover_locked() will instantiate the RR - * policy and perform all the pending operations over it. - * 2. There's already a RR policy instance active. We need to introduce the new - * one build from the new serverlist, but taking care not to disrupt the - * operations in progress over the old RR instance. This is done by - * decreasing the reference count on the old policy. The moment no more - * references are held on the old RR policy, it'll be destroyed and \a - * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN - * state. At this point we can transition to a new RR instance safely, which - * is done once again via \a rr_handover_locked(). - * - * - * Once a RR policy instance is in place (and getting updated as described), - * calls to for a pick, a ping or a cancellation will be serviced right away by - * forwarding them to the RR instance. Any time there's no RR policy available - * (ie, right after the creation of the gRPCLB policy, if an empty serverlist is - * received, etc), pick/ping requests are added to a list of pending picks/pings - * to be flushed and serviced as part of \a rr_handover_locked() the moment the - * RR policy instance becomes available. - * - * \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the - * high level design and details. */ - -/* TODO(dgq): - * - Implement LB service forwarding (point 2c. in the doc's diagram). - */ - -/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when - using that endpoint. Because of various transitive includes in uv.h, - including windows.h on Windows, uv.h must be included before other system - headers. Therefore, sockaddr.h must always be included first */ -#include "src/core/lib/iomgr/sockaddr.h" - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "src/core/ext/filters/client_channel/client_channel.h" -#include "src/core/ext/filters/client_channel/client_channel_factory.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/parse_address.h" -#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" -#include "src/core/ext/filters/client_channel/subchannel_index.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/channel_stack.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/slice/slice_hash_table.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/backoff.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/call.h" -#include "src/core/lib/surface/channel.h" -#include "src/core/lib/surface/channel_init.h" -#include "src/core/lib/transport/static_metadata.h" - -#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20 -#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1 -#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6 -#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120 -#define GRPC_GRPCLB_RECONNECT_JITTER 0.2 -#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000 - -grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb"); - -/* add lb_token of selected subchannel (address) to the call's initial - * metadata */ -static grpc_error *initial_metadata_add_lb_token( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *initial_metadata, - grpc_linked_mdelem *lb_token_mdelem_storage, grpc_mdelem lb_token) { - GPR_ASSERT(lb_token_mdelem_storage != NULL); - GPR_ASSERT(!GRPC_MDISNULL(lb_token)); - return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata, - lb_token_mdelem_storage, lb_token); -} - -static void destroy_client_stats(void *arg) { - grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg); -} - -typedef struct wrapped_rr_closure_arg { - /* the closure instance using this struct as argument */ - grpc_closure wrapper_closure; - - /* the original closure. Usually a on_complete/notify cb for pick() and ping() - * calls against the internal RR instance, respectively. */ - grpc_closure *wrapped_closure; - - /* the pick's initial metadata, kept in order to append the LB token for the - * pick */ - grpc_metadata_batch *initial_metadata; - - /* the picked target, used to determine which LB token to add to the pick's - * initial metadata */ - grpc_connected_subchannel **target; - - /* the context to be populated for the subchannel call */ - grpc_call_context_element *context; - - /* Stats for client-side load reporting. Note that this holds a - * reference, which must be either passed on via context or unreffed. */ - grpc_grpclb_client_stats *client_stats; - - /* the LB token associated with the pick */ - grpc_mdelem lb_token; - - /* storage for the lb token initial metadata mdelem */ - grpc_linked_mdelem *lb_token_mdelem_storage; - - /* The RR instance related to the closure */ - grpc_lb_policy *rr_policy; - - /* heap memory to be freed upon closure execution. */ - void *free_when_done; -} wrapped_rr_closure_arg; - -/* The \a on_complete closure passed as part of the pick requires keeping a - * reference to its associated round robin instance. We wrap this closure in - * order to unref the round robin instance upon its invocation */ -static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg; - - GPR_ASSERT(wc_arg->wrapped_closure != NULL); - GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error)); - - if (wc_arg->rr_policy != NULL) { - /* if *target is NULL, no pick has been made by the RR policy (eg, all - * addresses failed to connect). There won't be any user_data/token - * available */ - if (*wc_arg->target != NULL) { - if (!GRPC_MDISNULL(wc_arg->lb_token)) { - initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata, - wc_arg->lb_token_mdelem_storage, - GRPC_MDELEM_REF(wc_arg->lb_token)); - } else { - gpr_log(GPR_ERROR, - "No LB token for connected subchannel pick %p (from RR " - "instance %p).", - (void *)*wc_arg->target, (void *)wc_arg->rr_policy); - abort(); - } - // Pass on client stats via context. Passes ownership of the reference. - GPR_ASSERT(wc_arg->client_stats != NULL); - wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats; - wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats; - } else { - grpc_grpclb_client_stats_unref(wc_arg->client_stats); - } - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy); - } - GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure"); - } - GPR_ASSERT(wc_arg->free_when_done != NULL); - gpr_free(wc_arg->free_when_done); -} - -/* Linked list of pending pick requests. It stores all information needed to - * eventually call (Round Robin's) pick() on them. They mainly stay pending - * waiting for the RR policy to be created/updated. - * - * One particularity is the wrapping of the user-provided \a on_complete closure - * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in - * order to correctly unref the RR policy instance upon completion of the pick. - * See \a wrapped_rr_closure for details. */ -typedef struct pending_pick { - struct pending_pick *next; - - /* original pick()'s arguments */ - grpc_lb_policy_pick_args pick_args; - - /* output argument where to store the pick()ed connected subchannel, or NULL - * upon error. */ - grpc_connected_subchannel **target; - - /* args for wrapped_on_complete */ - wrapped_rr_closure_arg wrapped_on_complete_arg; -} pending_pick; - -static void add_pending_pick(pending_pick **root, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, - grpc_closure *on_complete) { - pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp)); - pp->next = *root; - pp->pick_args = *pick_args; - pp->target = target; - pp->wrapped_on_complete_arg.wrapped_closure = on_complete; - pp->wrapped_on_complete_arg.target = target; - pp->wrapped_on_complete_arg.context = context; - pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata; - pp->wrapped_on_complete_arg.lb_token_mdelem_storage = - pick_args->lb_token_mdelem_storage; - pp->wrapped_on_complete_arg.free_when_done = pp; - GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure, - wrapped_rr_closure, &pp->wrapped_on_complete_arg, - grpc_schedule_on_exec_ctx); - *root = pp; -} - -/* Same as the \a pending_pick struct but for ping operations */ -typedef struct pending_ping { - struct pending_ping *next; - - /* args for wrapped_notify */ - wrapped_rr_closure_arg wrapped_notify_arg; -} pending_ping; - -static void add_pending_ping(pending_ping **root, grpc_closure *notify) { - pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping)); - pping->wrapped_notify_arg.wrapped_closure = notify; - pping->wrapped_notify_arg.free_when_done = pping; - pping->next = *root; - GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure, - wrapped_rr_closure, &pping->wrapped_notify_arg, - grpc_schedule_on_exec_ctx); - *root = pping; -} - -/* - * glb_lb_policy - */ -typedef struct rr_connectivity_data rr_connectivity_data; - -typedef struct glb_lb_policy { - /** base policy: must be first */ - grpc_lb_policy base; - - /** who the client is trying to communicate with */ - const char *server_name; - grpc_client_channel_factory *cc_factory; - grpc_channel_args *args; - - /** timeout in milliseconds for the LB call. 0 means no deadline. */ - int lb_call_timeout_ms; - - /** timeout in milliseconds for before using fallback backend addresses. - * 0 means not using fallback. */ - int lb_fallback_timeout_ms; - - /** for communicating with the LB server */ - grpc_channel *lb_channel; - - /** response generator to inject address updates into \a lb_channel */ - grpc_fake_resolver_response_generator *response_generator; - - /** the RR policy to use of the backend servers returned by the LB server */ - grpc_lb_policy *rr_policy; - - bool started_picking; - - /** our connectivity state tracker */ - grpc_connectivity_state_tracker state_tracker; - - /** connectivity state of the LB channel */ - grpc_connectivity_state lb_channel_connectivity; - - /** stores the deserialized response from the LB. May be NULL until one such - * response has arrived. */ - grpc_grpclb_serverlist *serverlist; - - /** Index into serverlist for next pick. - * If the server at this index is a drop, we return a drop. - * Otherwise, we delegate to the RR policy. */ - size_t serverlist_index; - - /** stores the backend addresses from the resolver */ - grpc_lb_addresses *fallback_backend_addresses; - - /** list of picks that are waiting on RR's policy connectivity */ - pending_pick *pending_picks; - - /** list of pings that are waiting on RR's policy connectivity */ - pending_ping *pending_pings; - - bool shutting_down; - - /** are we currently updating lb_call? */ - bool updating_lb_call; - - /** are we currently updating lb_channel? */ - bool updating_lb_channel; - - /** are we already watching the LB channel's connectivity? */ - bool watching_lb_channel; - - /** is \a lb_call_retry_timer active? */ - bool retry_timer_active; - - /** is \a lb_fallback_timer active? */ - bool fallback_timer_active; - - /** called upon changes to the LB channel's connectivity. */ - grpc_closure lb_channel_on_connectivity_changed; - - /** args from the latest update received while already updating, or NULL */ - grpc_lb_policy_args *pending_update_args; - - /************************************************************/ - /* client data associated with the LB server communication */ - /************************************************************/ - /* Status from the LB server has been received. This signals the end of the LB - * call. */ - grpc_closure lb_on_server_status_received; - - /* A response from the LB server has been received. Process it */ - grpc_closure lb_on_response_received; - - /* LB call retry timer callback. */ - grpc_closure lb_on_call_retry; - - /* LB fallback timer callback. */ - grpc_closure lb_on_fallback; - - grpc_call *lb_call; /* streaming call to the LB server, */ - - grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */ - grpc_metadata_array - lb_trailing_metadata_recv; /* trailing MD from LB server */ - - /* what's being sent to the LB server. Note that its value may vary if the LB - * server indicates a redirect. */ - grpc_byte_buffer *lb_request_payload; - - /* response the LB server, if any. Processed in lb_on_response_received() */ - grpc_byte_buffer *lb_response_payload; - - /* call status code and details, set in lb_on_server_status_received() */ - grpc_status_code lb_call_status; - grpc_slice lb_call_status_details; - - /** LB call retry backoff state */ - gpr_backoff lb_call_backoff_state; - - /** LB call retry timer */ - grpc_timer lb_call_retry_timer; - - /** LB fallback timer */ - grpc_timer lb_fallback_timer; - - bool seen_initial_response; - - /* Stats for client-side load reporting. Should be unreffed and - * recreated whenever lb_call is replaced. */ - grpc_grpclb_client_stats *client_stats; - /* Interval and timer for next client load report. */ - gpr_timespec client_stats_report_interval; - grpc_timer client_load_report_timer; - bool client_load_report_timer_pending; - bool last_client_load_report_counters_were_zero; - /* Closure used for either the load report timer or the callback for - * completion of sending the load report. */ - grpc_closure client_load_report_closure; - /* Client load report message payload. */ - grpc_byte_buffer *client_load_report_payload; -} glb_lb_policy; - -/* Keeps track and reacts to changes in connectivity of the RR instance */ -struct rr_connectivity_data { - grpc_closure on_change; - grpc_connectivity_state state; - glb_lb_policy *glb_policy; -}; - -static bool is_server_valid(const grpc_grpclb_server *server, size_t idx, - bool log) { - if (server->drop) return false; - const grpc_grpclb_ip_address *ip = &server->ip_address; - if (server->port >> 16 != 0) { - if (log) { - gpr_log(GPR_ERROR, - "Invalid port '%d' at index %lu of serverlist. Ignoring.", - server->port, (unsigned long)idx); - } - return false; - } - if (ip->size != 4 && ip->size != 16) { - if (log) { - gpr_log(GPR_ERROR, - "Expected IP to be 4 or 16 bytes, got %d at index %lu of " - "serverlist. Ignoring", - ip->size, (unsigned long)idx); - } - return false; - } - return true; -} - -/* vtable for LB tokens in grpc_lb_addresses. */ -static void *lb_token_copy(void *token) { - return token == NULL - ? NULL - : (void *)GRPC_MDELEM_REF((grpc_mdelem){(uintptr_t)token}).payload; -} -static void lb_token_destroy(grpc_exec_ctx *exec_ctx, void *token) { - if (token != NULL) { - GRPC_MDELEM_UNREF(exec_ctx, (grpc_mdelem){(uintptr_t)token}); - } -} -static int lb_token_cmp(void *token1, void *token2) { - if (token1 > token2) return 1; - if (token1 < token2) return -1; - return 0; -} -static const grpc_lb_user_data_vtable lb_token_vtable = { - lb_token_copy, lb_token_destroy, lb_token_cmp}; - -static void parse_server(const grpc_grpclb_server *server, - grpc_resolved_address *addr) { - memset(addr, 0, sizeof(*addr)); - if (server->drop) return; - const uint16_t netorder_port = htons((uint16_t)server->port); - /* the addresses are given in binary format (a in(6)_addr struct) in - * server->ip_address.bytes. */ - const grpc_grpclb_ip_address *ip = &server->ip_address; - if (ip->size == 4) { - addr->len = sizeof(struct sockaddr_in); - struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr; - addr4->sin_family = AF_INET; - memcpy(&addr4->sin_addr, ip->bytes, ip->size); - addr4->sin_port = netorder_port; - } else if (ip->size == 16) { - addr->len = sizeof(struct sockaddr_in6); - struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr; - addr6->sin6_family = AF_INET6; - memcpy(&addr6->sin6_addr, ip->bytes, ip->size); - addr6->sin6_port = netorder_port; - } -} - -/* Returns addresses extracted from \a serverlist. */ -static grpc_lb_addresses *process_serverlist_locked( - grpc_exec_ctx *exec_ctx, const grpc_grpclb_serverlist *serverlist) { - size_t num_valid = 0; - /* first pass: count how many are valid in order to allocate the necessary - * memory in a single block */ - for (size_t i = 0; i < serverlist->num_servers; ++i) { - if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid; - } - grpc_lb_addresses *lb_addresses = - grpc_lb_addresses_create(num_valid, &lb_token_vtable); - /* second pass: actually populate the addresses and LB tokens (aka user data - * to the outside world) to be read by the RR policy during its creation. - * Given that the validity tests are very cheap, they are performed again - * instead of marking the valid ones during the first pass, as this would - * incurr in an allocation due to the arbitrary number of server */ - size_t addr_idx = 0; - for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) { - const grpc_grpclb_server *server = serverlist->servers[sl_idx]; - if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue; - GPR_ASSERT(addr_idx < num_valid); - /* address processing */ - grpc_resolved_address addr; - parse_server(server, &addr); - /* lb token processing */ - void *user_data; - if (server->has_load_balance_token) { - const size_t lb_token_max_length = - GPR_ARRAY_SIZE(server->load_balance_token); - const size_t lb_token_length = - strnlen(server->load_balance_token, lb_token_max_length); - grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer( - server->load_balance_token, lb_token_length); - user_data = (void *)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN, - lb_token_mdstr) - .payload; - } else { - char *uri = grpc_sockaddr_to_uri(&addr); - gpr_log(GPR_INFO, - "Missing LB token for backend address '%s'. The empty token will " - "be used instead", - uri); - gpr_free(uri); - user_data = (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload; - } - - grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len, - false /* is_balancer */, - NULL /* balancer_name */, user_data); - ++addr_idx; - } - GPR_ASSERT(addr_idx == num_valid); - return lb_addresses; -} - -/* Returns the backend addresses extracted from the given addresses */ -static grpc_lb_addresses *extract_backend_addresses_locked( - grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) { - /* first pass: count the number of backend addresses */ - size_t num_backends = 0; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (!addresses->addresses[i].is_balancer) { - ++num_backends; - } - } - /* second pass: actually populate the addresses and (empty) LB tokens */ - grpc_lb_addresses *backend_addresses = - grpc_lb_addresses_create(num_backends, &lb_token_vtable); - size_t num_copied = 0; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (addresses->addresses[i].is_balancer) continue; - const grpc_resolved_address *addr = &addresses->addresses[i].address; - grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr, - addr->len, false /* is_balancer */, - NULL /* balancer_name */, - (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload); - ++num_copied; - } - return backend_addresses; -} - -static void update_lb_connectivity_status_locked( - grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, - grpc_connectivity_state rr_state, grpc_error *rr_state_error) { - const grpc_connectivity_state curr_glb_state = - grpc_connectivity_state_check(&glb_policy->state_tracker); - - /* The new connectivity status is a function of the previous one and the new - * input coming from the status of the RR policy. - * - * current state (grpclb's) - * | - * v || I | C | R | TF | SD | <- new state (RR's) - * ===++====+=====+=====+======+======+ - * I || I | C | R | [I] | [I] | - * ---++----+-----+-----+------+------+ - * C || I | C | R | [C] | [C] | - * ---++----+-----+-----+------+------+ - * R || I | C | R | [R] | [R] | - * ---++----+-----+-----+------+------+ - * TF || I | C | R | [TF] | [TF] | - * ---++----+-----+-----+------+------+ - * SD || NA | NA | NA | NA | NA | (*) - * ---++----+-----+-----+------+------+ - * - * A [STATE] indicates that the old RR policy is kept. In those cases, STATE - * is the current state of grpclb, which is left untouched. - * - * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to - * the previous RR instance. - * - * Note that the status is never updated to SHUTDOWN as a result of calling - * this function. Only glb_shutdown() has the power to set that state. - * - * (*) This function mustn't be called during shutting down. */ - GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN); - - switch (rr_state) { - case GRPC_CHANNEL_TRANSIENT_FAILURE: - case GRPC_CHANNEL_SHUTDOWN: - GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE); - break; - case GRPC_CHANNEL_INIT: - case GRPC_CHANNEL_IDLE: - case GRPC_CHANNEL_CONNECTING: - case GRPC_CHANNEL_READY: - GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE); - } - - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log( - GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.", - grpc_connectivity_state_name(rr_state), (void *)glb_policy->rr_policy); - } - grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state, - rr_state_error, - "update_lb_connectivity_status_locked"); -} - -/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return - * immediately (ignoring its completion callback), we need to perform the - * cleanups this callback would otherwise be resposible for. - * If \a force_async is true, then we will manually schedule the - * completion callback even if the pick is available immediately. */ -static bool pick_from_internal_rr_locked( - grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, - const grpc_lb_policy_pick_args *pick_args, bool force_async, - grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) { - // Check for drops if we are not using fallback backend addresses. - if (glb_policy->serverlist != NULL) { - // Look at the index into the serverlist to see if we should drop this call. - grpc_grpclb_server *server = - glb_policy->serverlist->servers[glb_policy->serverlist_index++]; - if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) { - glb_policy->serverlist_index = 0; // Wrap-around. - } - if (server->drop) { - // Not using the RR policy, so unref it. - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", - (intptr_t)wc_arg->rr_policy); - } - GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); - // Update client load reporting stats to indicate the number of - // dropped calls. Note that we have to do this here instead of in - // the client_load_reporting filter, because we do not create a - // subchannel call (and therefore no client_load_reporting filter) - // for dropped calls. - grpc_grpclb_client_stats_add_call_dropped_locked( - server->load_balance_token, wc_arg->client_stats); - grpc_grpclb_client_stats_unref(wc_arg->client_stats); - if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); - GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); - gpr_free(wc_arg->free_when_done); - return false; - } - gpr_free(wc_arg->free_when_done); - return true; - } - } - // Pick via the RR policy. - const bool pick_done = grpc_lb_policy_pick_locked( - exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context, - (void **)&wc_arg->lb_token, &wc_arg->wrapper_closure); - if (pick_done) { - /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")", - (intptr_t)wc_arg->rr_policy); - } - GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); - /* add the load reporting initial metadata */ - initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata, - pick_args->lb_token_mdelem_storage, - GRPC_MDELEM_REF(wc_arg->lb_token)); - // Pass on client stats via context. Passes ownership of the reference. - GPR_ASSERT(wc_arg->client_stats != NULL); - wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats; - wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats; - if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); - GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); - gpr_free(wc_arg->free_when_done); - return false; - } - gpr_free(wc_arg->free_when_done); - } - /* else, the pending pick will be registered and taken care of by the - * pending pick list inside the RR policy (glb_policy->rr_policy). - * Eventually, wrapped_on_complete will be called, which will -among other - * things- add the LB token to the call's initial metadata */ - return pick_done; -} - -static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - grpc_lb_addresses *addresses; - if (glb_policy->serverlist != NULL) { - GPR_ASSERT(glb_policy->serverlist->num_servers > 0); - addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist); - } else { - // If rr_handover_locked() is invoked when we haven't received any - // serverlist from the balancer, we use the fallback backends returned by - // the resolver. Note that the fallback backend list may be empty, in which - // case the new round_robin policy will keep the requested picks pending. - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); - addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses); - } - GPR_ASSERT(addresses != NULL); - grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args)); - args->client_channel_factory = glb_policy->cc_factory; - args->combiner = glb_policy->base.combiner; - // Replace the LB addresses in the channel args that we pass down to - // the subchannel. - static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES}; - const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses); - args->args = grpc_channel_args_copy_and_add_and_remove( - glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg, - 1); - grpc_lb_addresses_destroy(exec_ctx, addresses); - return args; -} - -static void lb_policy_args_destroy(grpc_exec_ctx *exec_ctx, - grpc_lb_policy_args *args) { - grpc_channel_args_destroy(exec_ctx, args->args); - gpr_free(args); -} - -static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error); -static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, - grpc_lb_policy_args *args) { - GPR_ASSERT(glb_policy->rr_policy == NULL); - - grpc_lb_policy *new_rr_policy = - grpc_lb_policy_create(exec_ctx, "round_robin", args); - if (new_rr_policy == NULL) { - gpr_log(GPR_ERROR, - "Failure creating a RoundRobin policy for serverlist update with " - "%lu entries. The previous RR instance (%p), if any, will continue " - "to be used. Future updates from the LB will attempt to create new " - "instances.", - (unsigned long)glb_policy->serverlist->num_servers, - (void *)glb_policy->rr_policy); - return; - } - glb_policy->rr_policy = new_rr_policy; - grpc_error *rr_state_error = NULL; - const grpc_connectivity_state rr_state = - grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy, - &rr_state_error); - /* Connectivity state is a function of the RR policy updated/created */ - update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state, - rr_state_error); - /* Add the gRPC LB's interested_parties pollset_set to that of the newly - * created RR policy. This will make the RR policy progress upon activity on - * gRPC LB, which in turn is tied to the application's call */ - grpc_pollset_set_add_pollset_set(exec_ctx, - glb_policy->rr_policy->interested_parties, - glb_policy->base.interested_parties); - - /* Allocate the data for the tracking of the new RR policy's connectivity. - * It'll be deallocated in glb_rr_connectivity_changed() */ - rr_connectivity_data *rr_connectivity = - (rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data)); - GRPC_CLOSURE_INIT(&rr_connectivity->on_change, - glb_rr_connectivity_changed_locked, rr_connectivity, - grpc_combiner_scheduler(glb_policy->base.combiner)); - rr_connectivity->glb_policy = glb_policy; - rr_connectivity->state = rr_state; - - /* Subscribe to changes to the connectivity of the new RR */ - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb"); - grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy, - &rr_connectivity->state, - &rr_connectivity->on_change); - grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy); - - /* Update picks and pings in wait */ - pending_pick *pp; - while ((pp = glb_policy->pending_picks)) { - glb_policy->pending_picks = pp->next; - GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick"); - pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy; - pp->wrapped_on_complete_arg.client_stats = - grpc_grpclb_client_stats_ref(glb_policy->client_stats); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p", - (void *)glb_policy->rr_policy); - } - pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args, - true /* force_async */, pp->target, - &pp->wrapped_on_complete_arg); - } - - pending_ping *pping; - while ((pping = glb_policy->pending_pings)) { - glb_policy->pending_pings = pping->next; - GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping"); - pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy; - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "", - (intptr_t)glb_policy->rr_policy); - } - grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, - &pping->wrapped_notify_arg.wrapper_closure); - } -} - -/* glb_policy->rr_policy may be NULL (initial handover) */ -static void rr_handover_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - if (glb_policy->shutting_down) return; - grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy); - GPR_ASSERT(args != NULL); - if (glb_policy->rr_policy != NULL) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)", - (void *)glb_policy->rr_policy); - } - grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args); - } else { - create_rr_locked(exec_ctx, glb_policy, args); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)", - (void *)glb_policy->rr_policy); - } - } - lb_policy_args_destroy(exec_ctx, args); -} - -static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg; - glb_lb_policy *glb_policy = rr_connectivity->glb_policy; - if (glb_policy->shutting_down) { - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "glb_rr_connectivity_cb"); - gpr_free(rr_connectivity); - return; - } - if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) { - /* An RR policy that has transitioned into the SHUTDOWN connectivity state - * should not be considered for picks or updates: the SHUTDOWN state is a - * sink, policies can't transition back from it. .*/ - GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, - "rr_connectivity_shutdown"); - glb_policy->rr_policy = NULL; - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "glb_rr_connectivity_cb"); - gpr_free(rr_connectivity); - return; - } - /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */ - update_lb_connectivity_status_locked( - exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error)); - /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */ - grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy, - &rr_connectivity->state, - &rr_connectivity->on_change); -} - -static void destroy_balancer_name(grpc_exec_ctx *exec_ctx, - void *balancer_name) { - gpr_free(balancer_name); -} - -static grpc_slice_hash_table_entry targets_info_entry_create( - const char *address, const char *balancer_name) { - grpc_slice_hash_table_entry entry; - entry.key = grpc_slice_from_copied_string(address); - entry.value = gpr_strdup(balancer_name); - return entry; -} - -static int balancer_name_cmp_fn(void *a, void *b) { - const char *a_str = (const char *)a; - const char *b_str = (const char *)b; - return strcmp(a_str, b_str); -} - -/* Returns the channel args for the LB channel, used to create a bidirectional - * stream for the reception of load balancing updates. - * - * Inputs: - * - \a addresses: corresponding to the balancers. - * - \a response_generator: in order to propagate updates from the resolver - * above the grpclb policy. - * - \a args: other args inherited from the grpclb policy. */ -static grpc_channel_args *build_lb_channel_args( - grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses, - grpc_fake_resolver_response_generator *response_generator, - const grpc_channel_args *args) { - size_t num_grpclb_addrs = 0; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs; - } - /* All input addresses come from a resolver that claims they are LB services. - * It's the resolver's responsibility to make sure this policy is only - * instantiated and used in that case. Otherwise, something has gone wrong. */ - GPR_ASSERT(num_grpclb_addrs > 0); - grpc_lb_addresses *lb_addresses = - grpc_lb_addresses_create(num_grpclb_addrs, NULL); - grpc_slice_hash_table_entry *targets_info_entries = - (grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) * - num_grpclb_addrs); - - size_t lb_addresses_idx = 0; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (!addresses->addresses[i].is_balancer) continue; - if (addresses->addresses[i].user_data != NULL) { - gpr_log(GPR_ERROR, - "This LB policy doesn't support user data. It will be ignored"); - } - char *addr_str; - GPR_ASSERT(grpc_sockaddr_to_string( - &addr_str, &addresses->addresses[i].address, true) > 0); - targets_info_entries[lb_addresses_idx] = targets_info_entry_create( - addr_str, addresses->addresses[i].balancer_name); - gpr_free(addr_str); - - grpc_lb_addresses_set_address( - lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr, - addresses->addresses[i].address.len, false /* is balancer */, - addresses->addresses[i].balancer_name, NULL /* user data */); - } - GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx); - grpc_slice_hash_table *targets_info = - grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries, - destroy_balancer_name, balancer_name_cmp_fn); - gpr_free(targets_info_entries); - - grpc_channel_args *lb_channel_args = - grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info, - response_generator, args); - - grpc_arg lb_channel_addresses_arg = - grpc_lb_addresses_create_channel_arg(lb_addresses); - - grpc_channel_args *result = grpc_channel_args_copy_and_add( - lb_channel_args, &lb_channel_addresses_arg, 1); - grpc_slice_hash_table_unref(exec_ctx, targets_info); - grpc_channel_args_destroy(exec_ctx, lb_channel_args); - grpc_lb_addresses_destroy(exec_ctx, lb_addresses); - return result; -} - -static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - GPR_ASSERT(glb_policy->pending_picks == NULL); - GPR_ASSERT(glb_policy->pending_pings == NULL); - gpr_free((void *)glb_policy->server_name); - grpc_channel_args_destroy(exec_ctx, glb_policy->args); - if (glb_policy->client_stats != NULL) { - grpc_grpclb_client_stats_unref(glb_policy->client_stats); - } - grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker); - if (glb_policy->serverlist != NULL) { - grpc_grpclb_destroy_serverlist(glb_policy->serverlist); - } - if (glb_policy->fallback_backend_addresses != NULL) { - grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); - } - grpc_fake_resolver_response_generator_unref(glb_policy->response_generator); - grpc_subchannel_index_unref(); - if (glb_policy->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args); - gpr_free(glb_policy->pending_update_args); - } - gpr_free(glb_policy); -} - -static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - glb_policy->shutting_down = true; - - /* We need a copy of the lb_call pointer because we can't cancell the call - * while holding glb_policy->mu: lb_on_server_status_received, invoked due to - * the cancel, needs to acquire that same lock */ - grpc_call *lb_call = glb_policy->lb_call; - - /* glb_policy->lb_call and this local lb_call must be consistent at this point - * because glb_policy->lb_call is only assigned in lb_call_init_locked as part - * of query_for_backends_locked, which can only be invoked while - * glb_policy->shutting_down is false. */ - if (lb_call != NULL) { - grpc_call_cancel(lb_call, NULL); - /* lb_on_server_status_received will pick up the cancel and clean up */ - } - if (glb_policy->retry_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); - glb_policy->retry_timer_active = false; - } - - pending_pick *pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; - pending_ping *pping = glb_policy->pending_pings; - glb_policy->pending_pings = NULL; - if (glb_policy->rr_policy != NULL) { - GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown"); - } - // We destroy the LB channel here because - // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy - // instance. Destroying the lb channel in glb_destroy would likely result in - // a callback invocation without a valid glb_policy arg. - if (glb_policy->lb_channel != NULL) { - grpc_channel_destroy(glb_policy->lb_channel); - glb_policy->lb_channel = NULL; - } - grpc_connectivity_state_set( - exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown"); - - while (pp != NULL) { - pending_pick *next = pp->next; - *pp->target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_NONE); - pp = next; - } - - while (pping != NULL) { - pending_ping *next = pping->next; - GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, - GRPC_ERROR_NONE); - pping = next; - } -} - -// Cancel a specific pending pick. -// -// A grpclb pick progresses as follows: -// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be -// handed over to the RR policy (in create_rr_locked()). From that point -// onwards, it'll be RR's responsibility. For cancellations, that implies the -// pick needs also be cancelled by the RR instance. -// - Otherwise, without an RR instance, picks stay pending at this policy's -// level (grpclb), inside the glb_policy->pending_picks list. To cancel these, -// we invoke the completion closure and set *target to NULL right here. -static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - pending_pick *pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if (pp->target == target) { - *target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick Cancelled", &error, 1)); - } else { - pp->next = glb_policy->pending_picks; - glb_policy->pending_picks = pp; - } - pp = next; - } - if (glb_policy->rr_policy != NULL) { - grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target, - GRPC_ERROR_REF(error)); - } - GRPC_ERROR_UNREF(error); -} - -// Cancel all pending picks. -// -// A grpclb pick progresses as follows: -// - If there's a Round Robin policy (glb_policy->rr_policy) available, it'll be -// handed over to the RR policy (in create_rr_locked()). From that point -// onwards, it'll be RR's responsibility. For cancellations, that implies the -// pick needs also be cancelled by the RR instance. -// - Otherwise, without an RR instance, picks stay pending at this policy's -// level (grpclb), inside the glb_policy->pending_picks list. To cancel these, -// we invoke the completion closure and set *target to NULL right here. -static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *pol, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - pending_pick *pp = glb_policy->pending_picks; - glb_policy->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) == - initial_metadata_flags_eq) { - GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick Cancelled", &error, 1)); - } else { - pp->next = glb_policy->pending_picks; - glb_policy->pending_picks = pp; - } - pp = next; - } - if (glb_policy->rr_policy != NULL) { - grpc_lb_policy_cancel_picks_locked( - exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask, - initial_metadata_flags_eq, GRPC_ERROR_REF(error)); - } - GRPC_ERROR_UNREF(error); -} - -static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); -static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy); -static void start_picking_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - /* start a timer to fall back */ - if (glb_policy->lb_fallback_timeout_ms > 0 && - glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = gpr_time_add( - now, - gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN)); - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer"); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked, - glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - glb_policy->fallback_timer_active = true; - grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline, - &glb_policy->lb_on_fallback, now); - } - - glb_policy->started_picking = true; - gpr_backoff_reset(&glb_policy->lb_call_backoff_state); - query_for_backends_locked(exec_ctx, glb_policy); -} - -static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - if (!glb_policy->started_picking) { - start_picking_locked(exec_ctx, glb_policy); - } -} - -static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, void **user_data, - grpc_closure *on_complete) { - if (pick_args->lb_token_mdelem_storage == NULL) { - *target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, on_complete, - GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "No mdelem storage for the LB token. Load reporting " - "won't work without it. Failing")); - return 0; - } - - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - bool pick_done; - - if (glb_policy->rr_policy != NULL) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p", - (void *)glb_policy, (void *)glb_policy->rr_policy); - } - GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick"); - - wrapped_rr_closure_arg *wc_arg = - (wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg)); - - GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg, - grpc_schedule_on_exec_ctx); - wc_arg->rr_policy = glb_policy->rr_policy; - wc_arg->target = target; - wc_arg->context = context; - GPR_ASSERT(glb_policy->client_stats != NULL); - wc_arg->client_stats = - grpc_grpclb_client_stats_ref(glb_policy->client_stats); - wc_arg->wrapped_closure = on_complete; - wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; - wc_arg->initial_metadata = pick_args->initial_metadata; - wc_arg->free_when_done = wc_arg; - pick_done = - pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args, - false /* force_async */, target, wc_arg); - } else { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, - "No RR policy in grpclb instance %p. Adding to grpclb's pending " - "picks", - (void *)(glb_policy)); - } - add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, - on_complete); - - if (!glb_policy->started_picking) { - start_picking_locked(exec_ctx, glb_policy); - } - pick_done = false; - } - return pick_done; -} - -static grpc_connectivity_state glb_check_connectivity_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_error **connectivity_error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - return grpc_connectivity_state_get(&glb_policy->state_tracker, - connectivity_error); -} - -static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_closure *closure) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - if (glb_policy->rr_policy) { - grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure); - } else { - add_pending_ping(&glb_policy->pending_pings, closure); - if (!glb_policy->started_picking) { - start_picking_locked(exec_ctx, glb_policy); - } - } -} - -static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *pol, - grpc_connectivity_state *current, - grpc_closure *notify) { - glb_lb_policy *glb_policy = (glb_lb_policy *)pol; - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &glb_policy->state_tracker, current, notify); -} - -static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->retry_timer_active = false; - if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", - (void *)glb_policy); - } - GPR_ASSERT(glb_policy->lb_call == NULL); - query_for_backends_locked(exec_ctx, glb_policy); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); -} - -static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - if (glb_policy->started_picking && glb_policy->updating_lb_call) { - if (glb_policy->retry_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); - } - if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy); - glb_policy->updating_lb_call = false; - } else if (!glb_policy->shutting_down) { - /* if we aren't shutting down, restart the LB client call after some time */ - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = - gpr_backoff_step(&glb_policy->lb_call_backoff_state, now); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", - (void *)glb_policy); - gpr_timespec timeout = gpr_time_sub(next_try, now); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, - "... retry_timer_active in %" PRId64 ".%09d seconds.", - timeout.tv_sec, timeout.tv_nsec); - } else { - gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); - } - } - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, - lb_call_on_retry_timer_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - glb_policy->retry_timer_active = true; - grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, - &glb_policy->lb_on_call_retry, now); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_server_status_received_locked"); -} - -static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); - -static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - const gpr_timespec next_client_load_report_time = - gpr_time_add(now, glb_policy->client_stats_report_interval); - GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, - send_client_load_report_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer, - next_client_load_report_time, - &glb_policy->client_load_report_closure, now); -} - -static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - grpc_byte_buffer_destroy(glb_policy->client_load_report_payload); - glb_policy->client_load_report_payload = NULL; - if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) { - glb_policy->client_load_report_timer_pending = false; - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "client_load_report"); - return; - } - schedule_next_client_load_report(exec_ctx, glb_policy); -} - -static bool load_report_counters_are_zero(grpc_grpclb_request *request) { - grpc_grpclb_dropped_call_counts *drop_entries = - (grpc_grpclb_dropped_call_counts *) - request->client_stats.calls_finished_with_drop.arg; - return request->client_stats.num_calls_started == 0 && - request->client_stats.num_calls_finished == 0 && - request->client_stats.num_calls_finished_with_client_failed_to_send == - 0 && - request->client_stats.num_calls_finished_known_received == 0 && - (drop_entries == NULL || drop_entries->num_entries == 0); -} - -static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) { - glb_policy->client_load_report_timer_pending = false; - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "client_load_report"); - if (glb_policy->lb_call == NULL) { - maybe_restart_lb_call(exec_ctx, glb_policy); - } - return; - } - // Construct message payload. - GPR_ASSERT(glb_policy->client_load_report_payload == NULL); - grpc_grpclb_request *request = - grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats); - // Skip client load report if the counters were all zero in the last - // report and they are still zero in this one. - if (load_report_counters_are_zero(request)) { - if (glb_policy->last_client_load_report_counters_were_zero) { - grpc_grpclb_request_destroy(request); - schedule_next_client_load_report(exec_ctx, glb_policy); - return; - } - glb_policy->last_client_load_report_counters_were_zero = true; - } else { - glb_policy->last_client_load_report_counters_were_zero = false; - } - grpc_slice request_payload_slice = grpc_grpclb_request_encode(request); - glb_policy->client_load_report_payload = - grpc_raw_byte_buffer_create(&request_payload_slice, 1); - grpc_slice_unref_internal(exec_ctx, request_payload_slice); - grpc_grpclb_request_destroy(request); - // Send load report message. - grpc_op op; - memset(&op, 0, sizeof(op)); - op.op = GRPC_OP_SEND_MESSAGE; - op.data.send_message.send_message = glb_policy->client_load_report_payload; - GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, - client_load_report_done_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - grpc_call_error call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, &op, 1, - &glb_policy->client_load_report_closure); - if (call_error != GRPC_CALL_OK) { - gpr_log(GPR_ERROR, "call_error=%d", call_error); - GPR_ASSERT(GRPC_CALL_OK == call_error); - } -} - -static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error); -static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); -static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - GPR_ASSERT(glb_policy->server_name != NULL); - GPR_ASSERT(glb_policy->server_name[0] != '\0'); - GPR_ASSERT(glb_policy->lb_call == NULL); - GPR_ASSERT(!glb_policy->shutting_down); - - /* Note the following LB call progresses every time there's activity in \a - * glb_policy->base.interested_parties, which is comprised of the polling - * entities from \a client_channel. */ - grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name); - gpr_timespec deadline = - glb_policy->lb_call_timeout_ms == 0 - ? gpr_inf_future(GPR_CLOCK_MONOTONIC) - : gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_millis(glb_policy->lb_call_timeout_ms, - GPR_TIMESPAN)); - glb_policy->lb_call = grpc_channel_create_pollset_set_call( - exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS, - glb_policy->base.interested_parties, - GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD, - &host, deadline, NULL); - grpc_slice_unref_internal(exec_ctx, host); - - if (glb_policy->client_stats != NULL) { - grpc_grpclb_client_stats_unref(glb_policy->client_stats); - } - glb_policy->client_stats = grpc_grpclb_client_stats_create(); - - grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv); - grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv); - - grpc_grpclb_request *request = - grpc_grpclb_request_create(glb_policy->server_name); - grpc_slice request_payload_slice = grpc_grpclb_request_encode(request); - glb_policy->lb_request_payload = - grpc_raw_byte_buffer_create(&request_payload_slice, 1); - grpc_slice_unref_internal(exec_ctx, request_payload_slice); - grpc_grpclb_request_destroy(request); - - GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received, - lb_on_server_status_received_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_response_received, - lb_on_response_received_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - - gpr_backoff_init(&glb_policy->lb_call_backoff_state, - GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_GRPCLB_RECONNECT_JITTER, - GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000); - - glb_policy->seen_initial_response = false; - glb_policy->last_client_load_report_counters_were_zero = false; -} - -static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - GPR_ASSERT(glb_policy->lb_call != NULL); - grpc_call_unref(glb_policy->lb_call); - glb_policy->lb_call = NULL; - - grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv); - grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv); - - grpc_byte_buffer_destroy(glb_policy->lb_request_payload); - grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details); - - if (glb_policy->client_load_report_timer_pending) { - grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer); - } -} - -/* - * Auxiliary functions and LB client callbacks. - */ -static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - GPR_ASSERT(glb_policy->lb_channel != NULL); - if (glb_policy->shutting_down) return; - - lb_call_init_locked(exec_ctx, glb_policy); - - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)", - (void *)glb_policy, (void *)glb_policy->lb_channel, - (void *)glb_policy->lb_call); - } - GPR_ASSERT(glb_policy->lb_call != NULL); - - grpc_call_error call_error; - grpc_op ops[3]; - memset(ops, 0, sizeof(ops)); - - grpc_op *op = ops; - op->op = GRPC_OP_SEND_INITIAL_METADATA; - op->data.send_initial_metadata.count = 0; - op->flags = 0; - op->reserved = NULL; - op++; - op->op = GRPC_OP_RECV_INITIAL_METADATA; - op->data.recv_initial_metadata.recv_initial_metadata = - &glb_policy->lb_initial_metadata_recv; - op->flags = 0; - op->reserved = NULL; - op++; - GPR_ASSERT(glb_policy->lb_request_payload != NULL); - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message.send_message = glb_policy->lb_request_payload; - op->flags = 0; - op->reserved = NULL; - op++; - call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call, - ops, (size_t)(op - ops), NULL); - GPR_ASSERT(GRPC_CALL_OK == call_error); - - op = ops; - op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; - op->data.recv_status_on_client.trailing_metadata = - &glb_policy->lb_trailing_metadata_recv; - op->data.recv_status_on_client.status = &glb_policy->lb_call_status; - op->data.recv_status_on_client.status_details = - &glb_policy->lb_call_status_details; - op->flags = 0; - op->reserved = NULL; - op++; - /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref - * count goes to zero) to be unref'd in lb_on_server_status_received_locked */ - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, - "lb_on_server_status_received_locked"); - call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), - &glb_policy->lb_on_server_status_received); - GPR_ASSERT(GRPC_CALL_OK == call_error); - - op = ops; - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &glb_policy->lb_response_payload; - op->flags = 0; - op->reserved = NULL; - op++; - /* take another weak ref to be unref'd/reused in - * lb_on_response_received_locked */ - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked"); - call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), - &glb_policy->lb_on_response_received); - GPR_ASSERT(GRPC_CALL_OK == call_error); -} - -static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - grpc_op ops[2]; - memset(ops, 0, sizeof(ops)); - grpc_op *op = ops; - if (glb_policy->lb_response_payload != NULL) { - gpr_backoff_reset(&glb_policy->lb_call_backoff_state); - /* Received data from the LB server. Look inside - * glb_policy->lb_response_payload, for a serverlist. */ - grpc_byte_buffer_reader bbr; - grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload); - grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr); - grpc_byte_buffer_reader_destroy(&bbr); - grpc_byte_buffer_destroy(glb_policy->lb_response_payload); - - grpc_grpclb_initial_response *response = NULL; - if (!glb_policy->seen_initial_response && - (response = grpc_grpclb_initial_response_parse(response_slice)) != - NULL) { - if (response->has_client_stats_report_interval) { - glb_policy->client_stats_report_interval = - gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN), - grpc_grpclb_duration_to_timespec( - &response->client_stats_report_interval)); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "received initial LB response message; " - "client load reporting interval = %" PRId64 ".%09d sec", - glb_policy->client_stats_report_interval.tv_sec, - glb_policy->client_stats_report_interval.tv_nsec); - } - /* take a weak ref (won't prevent calling of \a glb_shutdown() if the - * strong ref count goes to zero) to be unref'd in - * send_client_load_report_locked() */ - glb_policy->client_load_report_timer_pending = true; - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report"); - schedule_next_client_load_report(exec_ctx, glb_policy); - } else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "received initial LB response message; " - "client load reporting NOT enabled"); - } - grpc_grpclb_initial_response_destroy(response); - glb_policy->seen_initial_response = true; - } else { - grpc_grpclb_serverlist *serverlist = - grpc_grpclb_response_parse_serverlist(response_slice); - if (serverlist != NULL) { - GPR_ASSERT(glb_policy->lb_call != NULL); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Serverlist with %lu servers received", - (unsigned long)serverlist->num_servers); - for (size_t i = 0; i < serverlist->num_servers; ++i) { - grpc_resolved_address addr; - parse_server(serverlist->servers[i], &addr); - char *ipport; - grpc_sockaddr_to_string(&ipport, &addr, false); - gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport); - gpr_free(ipport); - } - } - /* update serverlist */ - if (serverlist->num_servers > 0) { - if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, - serverlist)) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Incoming server list identical to current, ignoring."); - } - grpc_grpclb_destroy_serverlist(serverlist); - } else { /* new serverlist */ - if (glb_policy->serverlist != NULL) { - /* dispose of the old serverlist */ - grpc_grpclb_destroy_serverlist(glb_policy->serverlist); - } else { - /* or dispose of the fallback */ - grpc_lb_addresses_destroy(exec_ctx, - glb_policy->fallback_backend_addresses); - glb_policy->fallback_backend_addresses = NULL; - if (glb_policy->fallback_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer); - glb_policy->fallback_timer_active = false; - } - } - /* and update the copy in the glb_lb_policy instance. This - * serverlist instance will be destroyed either upon the next - * update or in glb_destroy() */ - glb_policy->serverlist = serverlist; - glb_policy->serverlist_index = 0; - rr_handover_locked(exec_ctx, glb_policy); - } - } else { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Received empty server list, ignoring."); - } - grpc_grpclb_destroy_serverlist(serverlist); - } - } else { /* serverlist == NULL */ - gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.", - grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX)); - } - } - grpc_slice_unref_internal(exec_ctx, response_slice); - if (!glb_policy->shutting_down) { - /* keep listening for serverlist updates */ - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &glb_policy->lb_response_payload; - op->flags = 0; - op->reserved = NULL; - op++; - /* reuse the "lb_on_response_received_locked" weak ref taken in - * query_for_backends_locked() */ - const grpc_call_error call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), - &glb_policy->lb_on_response_received); /* loop */ - GPR_ASSERT(GRPC_CALL_OK == call_error); - } else { - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_response_received_locked_shutdown"); - } - } else { /* empty payload: call cancelled. */ - /* dispose of the "lb_on_response_received_locked" weak ref taken in - * query_for_backends_locked() and reused in every reception loop */ - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_response_received_locked_empty_payload"); - } -} - -static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->fallback_timer_active = false; - /* If we receive a serverlist after the timer fires but before this callback - * actually runs, don't fall back. */ - if (glb_policy->serverlist == NULL) { - if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Falling back to use backends from resolver (grpclb %p)", - (void *)glb_policy); - } - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); - rr_handover_locked(exec_ctx, glb_policy); - } - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "grpclb_fallback_timer"); -} - -static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - GPR_ASSERT(glb_policy->lb_call != NULL); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - char *status_details = - grpc_slice_to_c_string(glb_policy->lb_call_status_details); - gpr_log(GPR_INFO, - "Status from LB server received. Status = %d, Details = '%s', " - "(call: %p), error %p", - glb_policy->lb_call_status, status_details, - (void *)glb_policy->lb_call, (void *)error); - gpr_free(status_details); - } - /* We need to perform cleanups no matter what. */ - lb_call_destroy_locked(exec_ctx, glb_policy); - // If the load report timer is still pending, we wait for it to be - // called before restarting the call. Otherwise, we restart the call - // here. - if (!glb_policy->client_load_report_timer_pending) { - maybe_restart_lb_call(exec_ctx, glb_policy); - } -} - -static void fallback_update_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy, - const grpc_lb_addresses *addresses) { - GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); - grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); - glb_policy->fallback_backend_addresses = - extract_backend_addresses_locked(exec_ctx, addresses); - if (glb_policy->lb_fallback_timeout_ms > 0 && - !glb_policy->fallback_timer_active) { - rr_handover_locked(exec_ctx, glb_policy); - } -} - -static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_args *args) { - glb_lb_policy *glb_policy = (glb_lb_policy *)policy; - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - if (glb_policy->lb_channel == NULL) { - // If we don't have a current channel to the LB, go into TRANSIENT - // FAILURE. - grpc_connectivity_state_set( - exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), - "glb_update_missing"); - } else { - // otherwise, keep using the current LB channel (ignore this update). - gpr_log(GPR_ERROR, - "No valid LB addresses channel arg for grpclb %p update, " - "ignoring.", - (void *)glb_policy); - } - return; - } - const grpc_lb_addresses *addresses = - (const grpc_lb_addresses *)arg->value.pointer.p; - - if (glb_policy->serverlist == NULL) { - // If a non-empty serverlist hasn't been received from the balancer, - // propagate the update to fallback_backend_addresses. - fallback_update_locked(exec_ctx, glb_policy, addresses); - } else if (glb_policy->updating_lb_channel) { - // If we have recieved serverlist from the balancer, we need to defer update - // when there is an in-progress one. - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Update already in progress for grpclb %p. Deferring update.", - (void *)glb_policy); - } - if (glb_policy->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, - glb_policy->pending_update_args->args); - gpr_free(glb_policy->pending_update_args); - } - glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc( - sizeof(*glb_policy->pending_update_args)); - glb_policy->pending_update_args->client_channel_factory = - args->client_channel_factory; - glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args); - glb_policy->pending_update_args->combiner = args->combiner; - return; - } - - glb_policy->updating_lb_channel = true; - GPR_ASSERT(glb_policy->lb_channel != NULL); - grpc_channel_args *lb_channel_args = build_lb_channel_args( - exec_ctx, addresses, glb_policy->response_generator, args->args); - /* Propagate updates to the LB channel (pick first) through the fake resolver - */ - grpc_fake_resolver_response_generator_set_response( - exec_ctx, glb_policy->response_generator, lb_channel_args); - grpc_channel_args_destroy(exec_ctx, lb_channel_args); - - if (!glb_policy->watching_lb_channel) { - // Watch the LB channel connectivity for connection. - glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state( - glb_policy->lb_channel, true /* try to connect */); - grpc_channel_element *client_channel_elem = grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(glb_policy->lb_channel)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - glb_policy->watching_lb_channel = true; - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity"); - grpc_client_channel_watch_connectivity_state( - exec_ctx, client_channel_elem, - grpc_polling_entity_create_from_pollset_set( - glb_policy->base.interested_parties), - &glb_policy->lb_channel_connectivity, - &glb_policy->lb_channel_on_connectivity_changed, NULL); - } -} - -// Invoked as part of the update process. It continues watching the LB channel -// until it shuts down or becomes READY. It's invoked even if the LB channel -// stayed READY throughout the update (for example if the update is identical). -static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - if (glb_policy->shutting_down) goto done; - // Re-initialize the lb_call. This should also take care of updating the - // embedded RR policy. Note that the current RR policy, if any, will stay in - // effect until an update from the new lb_call is received. - switch (glb_policy->lb_channel_connectivity) { - case GRPC_CHANNEL_INIT: - case GRPC_CHANNEL_CONNECTING: - case GRPC_CHANNEL_TRANSIENT_FAILURE: { - /* resub. */ - grpc_channel_element *client_channel_elem = - grpc_channel_stack_last_element( - grpc_channel_get_channel_stack(glb_policy->lb_channel)); - GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); - grpc_client_channel_watch_connectivity_state( - exec_ctx, client_channel_elem, - grpc_polling_entity_create_from_pollset_set( - glb_policy->base.interested_parties), - &glb_policy->lb_channel_connectivity, - &glb_policy->lb_channel_on_connectivity_changed, NULL); - break; - } - case GRPC_CHANNEL_IDLE: - // lb channel inactive (probably shutdown prior to update). Restart lb - // call to kick the lb channel into gear. - GPR_ASSERT(glb_policy->lb_call == NULL); - /* fallthrough */ - case GRPC_CHANNEL_READY: - if (glb_policy->lb_call != NULL) { - glb_policy->updating_lb_channel = false; - glb_policy->updating_lb_call = true; - grpc_call_cancel(glb_policy->lb_call, NULL); - // lb_on_server_status_received will pick up the cancel and reinit - // lb_call. - if (glb_policy->pending_update_args != NULL) { - grpc_lb_policy_args *args = glb_policy->pending_update_args; - glb_policy->pending_update_args = NULL; - glb_update_locked(exec_ctx, &glb_policy->base, args); - grpc_channel_args_destroy(exec_ctx, args->args); - gpr_free(args); - } - } else if (glb_policy->started_picking && !glb_policy->shutting_down) { - if (glb_policy->retry_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); - glb_policy->retry_timer_active = false; - } - start_picking_locked(exec_ctx, glb_policy); - } - /* fallthrough */ - case GRPC_CHANNEL_SHUTDOWN: - done: - glb_policy->watching_lb_channel = false; - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "watch_lb_channel_connectivity_cb_shutdown"); - break; - } -} - -/* Code wiring the policy with the rest of the core */ -static const grpc_lb_policy_vtable glb_lb_policy_vtable = { - glb_destroy, - glb_shutdown_locked, - glb_pick_locked, - glb_cancel_pick_locked, - glb_cancel_picks_locked, - glb_ping_one_locked, - glb_exit_idle_locked, - glb_check_connectivity_locked, - glb_notify_on_state_change_locked, - glb_update_locked}; - -static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, - grpc_lb_policy_factory *factory, - grpc_lb_policy_args *args) { - /* Count the number of gRPC-LB addresses. There must be at least one. */ - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - return NULL; - } - grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p; - size_t num_grpclb_addrs = 0; - for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs; - } - if (num_grpclb_addrs == 0) return NULL; - - glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy)); - - /* Get server name. */ - arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); - GPR_ASSERT(arg != NULL); - GPR_ASSERT(arg->type == GRPC_ARG_STRING); - grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true); - GPR_ASSERT(uri->path[0] != '\0'); - glb_policy->server_name = - gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.", - glb_policy->server_name); - } - grpc_uri_destroy(uri); - - glb_policy->cc_factory = args->client_channel_factory; - GPR_ASSERT(glb_policy->cc_factory != NULL); - - arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS); - glb_policy->lb_call_timeout_ms = - grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX}); - - arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS); - glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer( - arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, - INT_MAX}); - - // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, - // since we use this to trigger the client_load_reporting filter. - grpc_arg new_arg = grpc_channel_arg_string_create( - (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb"); - static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME}; - glb_policy->args = grpc_channel_args_copy_and_add_and_remove( - args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); - - /* Extract the backend addresses (may be empty) from the resolver for - * fallback. */ - glb_policy->fallback_backend_addresses = - extract_backend_addresses_locked(exec_ctx, addresses); - - /* Create a client channel over them to communicate with a LB service */ - glb_policy->response_generator = - grpc_fake_resolver_response_generator_create(); - grpc_channel_args *lb_channel_args = build_lb_channel_args( - exec_ctx, addresses, glb_policy->response_generator, args->args); - char *uri_str; - gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name); - glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel( - exec_ctx, uri_str, args->client_channel_factory, lb_channel_args); - - /* Propagate initial resolution */ - grpc_fake_resolver_response_generator_set_response( - exec_ctx, glb_policy->response_generator, lb_channel_args); - grpc_channel_args_destroy(exec_ctx, lb_channel_args); - gpr_free(uri_str); - if (glb_policy->lb_channel == NULL) { - gpr_free((void *)glb_policy->server_name); - grpc_channel_args_destroy(exec_ctx, glb_policy->args); - gpr_free(glb_policy); - return NULL; - } - grpc_subchannel_index_ref(); - GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed, - glb_lb_channel_on_connectivity_changed_cb, glb_policy, - grpc_combiner_scheduler(args->combiner)); - grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner); - grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE, - "grpclb"); - return &glb_policy->base; -} - -static void glb_factory_ref(grpc_lb_policy_factory *factory) {} - -static void glb_factory_unref(grpc_lb_policy_factory *factory) {} - -static const grpc_lb_policy_factory_vtable glb_factory_vtable = { - glb_factory_ref, glb_factory_unref, glb_create, "grpclb"}; - -static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable}; - -grpc_lb_policy_factory *grpc_glb_lb_factory_create() { - return &glb_lb_policy_factory; -} - -/* Plugin registration */ - -// Only add client_load_reporting filter if the grpclb LB policy is used. -static bool maybe_add_client_load_reporting_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { - const grpc_channel_args *args = - grpc_channel_stack_builder_get_channel_arguments(builder); - const grpc_arg *channel_arg = - grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME); - if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING && - strcmp(channel_arg->value.string, "grpclb") == 0) { - return grpc_channel_stack_builder_append_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); - } - return true; -} - -void grpc_lb_policy_grpclb_init() { - grpc_register_lb_policy(grpc_glb_lb_factory_create()); - grpc_register_tracer(&grpc_lb_glb_trace); -#ifndef NDEBUG - grpc_register_tracer(&grpc_trace_lb_policy_refcount); -#endif - grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, - GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_client_load_reporting_filter, - (void *)&grpc_client_load_reporting_filter); -} - -void grpc_lb_policy_grpclb_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc new file mode 100644 index 000000000..1a675476f --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -0,0 +1,1909 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/// Implementation of the gRPC LB policy. +/// +/// This policy takes as input a list of resolved addresses, which must +/// include at least one balancer address. +/// +/// An internal channel (\a lb_channel_) is created for the addresses +/// from that are balancers. This channel behaves just like a regular +/// channel that uses pick_first to select from the list of balancer +/// addresses. +/// +/// The first time the policy gets a request for a pick, a ping, or to exit +/// the idle state, \a StartPickingLocked() is called. This method is +/// responsible for instantiating the internal *streaming* call to the LB +/// server (whichever address pick_first chose). The call will be complete +/// when either the balancer sends status or when we cancel the call (e.g., +/// because we are shutting down). In needed, we retry the call. If we +/// received at least one valid message from the server, a new call attempt +/// will be made immediately; otherwise, we apply back-off delays between +/// attempts. +/// +/// We maintain an internal round_robin policy instance for distributing +/// requests across backends. Whenever we receive a new serverlist from +/// the balancer, we update the round_robin policy with the new list of +/// addresses. If we cannot communicate with the balancer on startup, +/// however, we may enter fallback mode, in which case we will populate +/// the RR policy's addresses from the backend addresses returned by the +/// resolver. +/// +/// Once an RR policy instance is in place (and getting updated as described), +/// calls for a pick, a ping, or a cancellation will be serviced right +/// away by forwarding them to the RR instance. Any time there's no RR +/// policy available (i.e., right after the creation of the gRPCLB policy), +/// pick and ping requests are added to a list of pending picks and pings +/// to be flushed and serviced when the RR policy instance becomes available. +/// +/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the +/// high level design and details. + +// With the addition of a libuv endpoint, sockaddr.h now includes uv.h when +// using that endpoint. Because of various transitive includes in uv.h, +// including windows.h on Windows, uv.h must be included before other system +// headers. Therefore, sockaddr.h must always be included first. +#include + +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/socket_utils.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/client_channel_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/parse_address.h" +#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" +#include "src/core/ext/filters/client_channel/subchannel_index.h" +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/slice/slice_hash_table.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/surface/call.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/surface/channel_init.h" +#include "src/core/lib/transport/static_metadata.h" + +#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1 +#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6 +#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120 +#define GRPC_GRPCLB_RECONNECT_JITTER 0.2 +#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000 + +namespace grpc_core { + +TraceFlag grpc_lb_glb_trace(false, "glb"); + +namespace { + +class GrpcLb : public LoadBalancingPolicy { + public: + GrpcLb(const grpc_lb_addresses* addresses, const Args& args); + + void UpdateLocked(const grpc_channel_args& args) override; + bool PickLocked(PickState* pick) override; + void CancelPickLocked(PickState* pick, grpc_error* error) override; + void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) override; + void NotifyOnStateChangeLocked(grpc_connectivity_state* state, + grpc_closure* closure) override; + grpc_connectivity_state CheckConnectivityLocked( + grpc_error** connectivity_error) override; + void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override; + void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override; + void ExitIdleLocked() override; + + private: + /// Linked list of pending pick requests. It stores all information needed to + /// eventually call (Round Robin's) pick() on them. They mainly stay pending + /// waiting for the RR policy to be created. + /// + /// Note that when a pick is sent to the RR policy, we inject our own + /// on_complete callback, so that we can intercept the result before + /// invoking the original on_complete callback. This allows us to set the + /// LB token metadata and add client_stats to the call context. + /// See \a pending_pick_complete() for details. + struct PendingPick { + // The grpclb instance that created the wrapping. This instance is not + // owned; reference counts are untouched. It's used only for logging + // purposes. + GrpcLb* grpclb_policy; + // The original pick. + PickState* pick; + // Our on_complete closure and the original one. + grpc_closure on_complete; + grpc_closure* original_on_complete; + // The LB token associated with the pick. This is set via user_data in + // the pick. + grpc_mdelem lb_token; + // Stats for client-side load reporting. Note that this holds a + // reference, which must be either passed on via context or unreffed. + grpc_grpclb_client_stats* client_stats = nullptr; + // Next pending pick. + PendingPick* next = nullptr; + }; + + /// A linked list of pending pings waiting for the RR policy to be created. + struct PendingPing { + grpc_closure* on_initiate; + grpc_closure* on_ack; + PendingPing* next = nullptr; + }; + + /// Contains a call to the LB server and all the data related to the call. + class BalancerCallState + : public InternallyRefCountedWithTracing { + public: + explicit BalancerCallState( + RefCountedPtr parent_grpclb_policy); + + // It's the caller's responsibility to ensure that Orphan() is called from + // inside the combiner. + void Orphan() override; + + void StartQuery(); + + grpc_grpclb_client_stats* client_stats() const { return client_stats_; } + bool seen_initial_response() const { return seen_initial_response_; } + + private: + // So Delete() can access our private dtor. + template + friend void grpc_core::Delete(T*); + + ~BalancerCallState(); + + GrpcLb* grpclb_policy() const { + return static_cast(grpclb_policy_.get()); + } + + void ScheduleNextClientLoadReportLocked(); + void SendClientLoadReportLocked(); + + static bool LoadReportCountersAreZero(grpc_grpclb_request* request); + + static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error); + static void ClientLoadReportDoneLocked(void* arg, grpc_error* error); + static void OnInitialRequestSentLocked(void* arg, grpc_error* error); + static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error); + static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error); + + // The owning LB policy. + RefCountedPtr grpclb_policy_; + + // The streaming call to the LB server. Always non-NULL. + grpc_call* lb_call_ = nullptr; + + // recv_initial_metadata + grpc_metadata_array lb_initial_metadata_recv_; + + // send_message + grpc_byte_buffer* send_message_payload_ = nullptr; + grpc_closure lb_on_initial_request_sent_; + + // recv_message + grpc_byte_buffer* recv_message_payload_ = nullptr; + grpc_closure lb_on_balancer_message_received_; + bool seen_initial_response_ = false; + + // recv_trailing_metadata + grpc_closure lb_on_balancer_status_received_; + grpc_metadata_array lb_trailing_metadata_recv_; + grpc_status_code lb_call_status_; + grpc_slice lb_call_status_details_; + + // The stats for client-side load reporting associated with this LB call. + // Created after the first serverlist is received. + grpc_grpclb_client_stats* client_stats_ = nullptr; + grpc_millis client_stats_report_interval_ = 0; + grpc_timer client_load_report_timer_; + bool client_load_report_timer_callback_pending_ = false; + bool last_client_load_report_counters_were_zero_ = false; + bool client_load_report_is_due_ = false; + // The closure used for either the load report timer or the callback for + // completion of sending the load report. + grpc_closure client_load_report_closure_; + }; + + ~GrpcLb(); + + void ShutdownLocked() override; + + // Helper function used in ctor and UpdateLocked(). + void ProcessChannelArgsLocked(const grpc_channel_args& args); + + // Methods for dealing with the balancer channel and call. + void StartPickingLocked(); + void StartBalancerCallLocked(); + static void OnFallbackTimerLocked(void* arg, grpc_error* error); + void StartBalancerCallRetryTimerLocked(); + static void OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error); + static void OnBalancerChannelConnectivityChangedLocked(void* arg, + grpc_error* error); + + // Pending pick methods. + static void PendingPickSetMetadataAndContext(PendingPick* pp); + PendingPick* PendingPickCreate(PickState* pick); + void AddPendingPick(PendingPick* pp); + static void OnPendingPickComplete(void* arg, grpc_error* error); + + // Pending ping methods. + void AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack); + + // Methods for dealing with the RR policy. + void CreateOrUpdateRoundRobinPolicyLocked(); + grpc_channel_args* CreateRoundRobinPolicyArgsLocked(); + void CreateRoundRobinPolicyLocked(const Args& args); + bool PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp); + void UpdateConnectivityStateFromRoundRobinPolicyLocked( + grpc_error* rr_state_error); + static void OnRoundRobinConnectivityChangedLocked(void* arg, + grpc_error* error); + static void OnRoundRobinRequestReresolutionLocked(void* arg, + grpc_error* error); + + // Who the client is trying to communicate with. + const char* server_name_ = nullptr; + + // Current channel args from the resolver. + grpc_channel_args* args_ = nullptr; + + // Internal state. + bool started_picking_ = false; + bool shutting_down_ = false; + grpc_connectivity_state_tracker state_tracker_; + + // The channel for communicating with the LB server. + grpc_channel* lb_channel_ = nullptr; + grpc_connectivity_state lb_channel_connectivity_; + grpc_closure lb_channel_on_connectivity_changed_; + // Are we already watching the LB channel's connectivity? + bool watching_lb_channel_ = false; + // Response generator to inject address updates into lb_channel_. + RefCountedPtr response_generator_; + + // The data associated with the current LB call. It holds a ref to this LB + // policy. It's initialized every time we query for backends. It's reset to + // NULL whenever the current LB call is no longer needed (e.g., the LB policy + // is shutting down, or the LB call has ended). A non-NULL lb_calld_ always + // contains a non-NULL lb_call_. + OrphanablePtr lb_calld_; + // Timeout in milliseconds for the LB call. 0 means no deadline. + int lb_call_timeout_ms_ = 0; + // Balancer call retry state. + BackOff lb_call_backoff_; + bool retry_timer_callback_pending_ = false; + grpc_timer lb_call_retry_timer_; + grpc_closure lb_on_call_retry_; + + // The deserialized response from the balancer. May be nullptr until one + // such response has arrived. + grpc_grpclb_serverlist* serverlist_ = nullptr; + // Index into serverlist for next pick. + // If the server at this index is a drop, we return a drop. + // Otherwise, we delegate to the RR policy. + size_t serverlist_index_ = 0; + + // Timeout in milliseconds for before using fallback backend addresses. + // 0 means not using fallback. + int lb_fallback_timeout_ms_ = 0; + // The backend addresses from the resolver. + grpc_lb_addresses* fallback_backend_addresses_ = nullptr; + // Fallback timer. + bool fallback_timer_callback_pending_ = false; + grpc_timer lb_fallback_timer_; + grpc_closure lb_on_fallback_; + + // Pending picks and pings that are waiting on the RR policy's connectivity. + PendingPick* pending_picks_ = nullptr; + PendingPing* pending_pings_ = nullptr; + + // The RR policy to use for the backends. + OrphanablePtr rr_policy_; + grpc_connectivity_state rr_connectivity_state_; + grpc_closure on_rr_connectivity_changed_; + grpc_closure on_rr_request_reresolution_; +}; + +// +// serverlist parsing code +// + +// vtable for LB tokens in grpc_lb_addresses +void* lb_token_copy(void* token) { + return token == nullptr + ? nullptr + : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload; +} +void lb_token_destroy(void* token) { + if (token != nullptr) { + GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token}); + } +} +int lb_token_cmp(void* token1, void* token2) { + if (token1 > token2) return 1; + if (token1 < token2) return -1; + return 0; +} +const grpc_lb_user_data_vtable lb_token_vtable = { + lb_token_copy, lb_token_destroy, lb_token_cmp}; + +// Returns the backend addresses extracted from the given addresses. +grpc_lb_addresses* ExtractBackendAddresses(const grpc_lb_addresses* addresses) { + // First pass: count the number of backend addresses. + size_t num_backends = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (!addresses->addresses[i].is_balancer) { + ++num_backends; + } + } + // Second pass: actually populate the addresses and (empty) LB tokens. + grpc_lb_addresses* backend_addresses = + grpc_lb_addresses_create(num_backends, &lb_token_vtable); + size_t num_copied = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) continue; + const grpc_resolved_address* addr = &addresses->addresses[i].address; + grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr, + addr->len, false /* is_balancer */, + nullptr /* balancer_name */, + (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload); + ++num_copied; + } + return backend_addresses; +} + +bool IsServerValid(const grpc_grpclb_server* server, size_t idx, bool log) { + if (server->drop) return false; + const grpc_grpclb_ip_address* ip = &server->ip_address; + if (server->port >> 16 != 0) { + if (log) { + gpr_log(GPR_ERROR, + "Invalid port '%d' at index %lu of serverlist. Ignoring.", + server->port, (unsigned long)idx); + } + return false; + } + if (ip->size != 4 && ip->size != 16) { + if (log) { + gpr_log(GPR_ERROR, + "Expected IP to be 4 or 16 bytes, got %d at index %lu of " + "serverlist. Ignoring", + ip->size, (unsigned long)idx); + } + return false; + } + return true; +} + +void ParseServer(const grpc_grpclb_server* server, + grpc_resolved_address* addr) { + memset(addr, 0, sizeof(*addr)); + if (server->drop) return; + const uint16_t netorder_port = grpc_htons((uint16_t)server->port); + /* the addresses are given in binary format (a in(6)_addr struct) in + * server->ip_address.bytes. */ + const grpc_grpclb_ip_address* ip = &server->ip_address; + if (ip->size == 4) { + addr->len = static_cast(sizeof(grpc_sockaddr_in)); + grpc_sockaddr_in* addr4 = reinterpret_cast(&addr->addr); + addr4->sin_family = GRPC_AF_INET; + memcpy(&addr4->sin_addr, ip->bytes, ip->size); + addr4->sin_port = netorder_port; + } else if (ip->size == 16) { + addr->len = static_cast(sizeof(grpc_sockaddr_in6)); + grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)&addr->addr; + addr6->sin6_family = GRPC_AF_INET6; + memcpy(&addr6->sin6_addr, ip->bytes, ip->size); + addr6->sin6_port = netorder_port; + } +} + +// Returns addresses extracted from \a serverlist. +grpc_lb_addresses* ProcessServerlist(const grpc_grpclb_serverlist* serverlist) { + size_t num_valid = 0; + /* first pass: count how many are valid in order to allocate the necessary + * memory in a single block */ + for (size_t i = 0; i < serverlist->num_servers; ++i) { + if (IsServerValid(serverlist->servers[i], i, true)) ++num_valid; + } + grpc_lb_addresses* lb_addresses = + grpc_lb_addresses_create(num_valid, &lb_token_vtable); + /* second pass: actually populate the addresses and LB tokens (aka user data + * to the outside world) to be read by the RR policy during its creation. + * Given that the validity tests are very cheap, they are performed again + * instead of marking the valid ones during the first pass, as this would + * incurr in an allocation due to the arbitrary number of server */ + size_t addr_idx = 0; + for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) { + const grpc_grpclb_server* server = serverlist->servers[sl_idx]; + if (!IsServerValid(serverlist->servers[sl_idx], sl_idx, false)) continue; + GPR_ASSERT(addr_idx < num_valid); + /* address processing */ + grpc_resolved_address addr; + ParseServer(server, &addr); + /* lb token processing */ + void* user_data; + if (server->has_load_balance_token) { + const size_t lb_token_max_length = + GPR_ARRAY_SIZE(server->load_balance_token); + const size_t lb_token_length = + strnlen(server->load_balance_token, lb_token_max_length); + grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer( + server->load_balance_token, lb_token_length); + user_data = + (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr) + .payload; + } else { + char* uri = grpc_sockaddr_to_uri(&addr); + gpr_log(GPR_INFO, + "Missing LB token for backend address '%s'. The empty token will " + "be used instead", + uri); + gpr_free(uri); + user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload; + } + grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len, + false /* is_balancer */, + nullptr /* balancer_name */, user_data); + ++addr_idx; + } + GPR_ASSERT(addr_idx == num_valid); + return lb_addresses; +} + +// +// GrpcLb::BalancerCallState +// + +GrpcLb::BalancerCallState::BalancerCallState( + RefCountedPtr parent_grpclb_policy) + : InternallyRefCountedWithTracing(&grpc_lb_glb_trace), + grpclb_policy_(std::move(parent_grpclb_policy)) { + GPR_ASSERT(grpclb_policy_ != nullptr); + GPR_ASSERT(!grpclb_policy()->shutting_down_); + // Init the LB call. Note that the LB call will progress every time there's + // activity in grpclb_policy_->interested_parties(), which is comprised of + // the polling entities from client_channel. + GPR_ASSERT(grpclb_policy()->server_name_ != nullptr); + GPR_ASSERT(grpclb_policy()->server_name_[0] != '\0'); + const grpc_millis deadline = + grpclb_policy()->lb_call_timeout_ms_ == 0 + ? GRPC_MILLIS_INF_FUTURE + : ExecCtx::Get()->Now() + grpclb_policy()->lb_call_timeout_ms_; + lb_call_ = grpc_channel_create_pollset_set_call( + grpclb_policy()->lb_channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, + grpclb_policy_->interested_parties(), + GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD, + nullptr, deadline, nullptr); + // Init the LB call request payload. + grpc_grpclb_request* request = + grpc_grpclb_request_create(grpclb_policy()->server_name_); + grpc_slice request_payload_slice = grpc_grpclb_request_encode(request); + send_message_payload_ = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_slice_unref_internal(request_payload_slice); + grpc_grpclb_request_destroy(request); + // Init other data associated with the LB call. + grpc_metadata_array_init(&lb_initial_metadata_recv_); + grpc_metadata_array_init(&lb_trailing_metadata_recv_); + GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSentLocked, + this, grpc_combiner_scheduler(grpclb_policy()->combiner())); + GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_, + OnBalancerMessageReceivedLocked, this, + grpc_combiner_scheduler(grpclb_policy()->combiner())); + GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, + OnBalancerStatusReceivedLocked, this, + grpc_combiner_scheduler(grpclb_policy()->combiner())); +} + +GrpcLb::BalancerCallState::~BalancerCallState() { + GPR_ASSERT(lb_call_ != nullptr); + grpc_call_unref(lb_call_); + grpc_metadata_array_destroy(&lb_initial_metadata_recv_); + grpc_metadata_array_destroy(&lb_trailing_metadata_recv_); + grpc_byte_buffer_destroy(send_message_payload_); + grpc_byte_buffer_destroy(recv_message_payload_); + grpc_slice_unref_internal(lb_call_status_details_); + if (client_stats_ != nullptr) { + grpc_grpclb_client_stats_unref(client_stats_); + } +} + +void GrpcLb::BalancerCallState::Orphan() { + GPR_ASSERT(lb_call_ != nullptr); + // If we are here because grpclb_policy wants to cancel the call, + // lb_on_balancer_status_received_ will complete the cancellation and clean + // up. Otherwise, we are here because grpclb_policy has to orphan a failed + // call, then the following cancellation will be a no-op. + grpc_call_cancel(lb_call_, nullptr); + if (client_load_report_timer_callback_pending_) { + grpc_timer_cancel(&client_load_report_timer_); + } + // Note that the initial ref is hold by lb_on_balancer_status_received_ + // instead of the caller of this function. So the corresponding unref happens + // in lb_on_balancer_status_received_ instead of here. +} + +void GrpcLb::BalancerCallState::StartQuery() { + GPR_ASSERT(lb_call_ != nullptr); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Starting LB call (lb_calld: %p, lb_call: %p)", + grpclb_policy_.get(), this, lb_call_); + } + // Create the ops. + grpc_call_error call_error; + grpc_op ops[3]; + memset(ops, 0, sizeof(ops)); + // Op: send initial metadata. + grpc_op* op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = 0; + op->reserved = nullptr; + op++; + // Op: send request message. + GPR_ASSERT(send_message_payload_ != nullptr); + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = send_message_payload_; + op->flags = 0; + op->reserved = nullptr; + op++; + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent"); + self.release(); + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_); + GPR_ASSERT(GRPC_CALL_OK == call_error); + // Op: recv initial metadata. + op = ops; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata.recv_initial_metadata = + &lb_initial_metadata_recv_; + op->flags = 0; + op->reserved = nullptr; + op++; + // Op: recv response. + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message.recv_message = &recv_message_payload_; + op->flags = 0; + op->reserved = nullptr; + op++; + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + self = Ref(DEBUG_LOCATION, "on_message_received"); + self.release(); + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); + // Op: recv server status. + op = ops; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = + &lb_trailing_metadata_recv_; + op->data.recv_status_on_client.status = &lb_call_status_; + op->data.recv_status_on_client.status_details = &lb_call_status_details_; + op->flags = 0; + op->reserved = nullptr; + op++; + // This callback signals the end of the LB call, so it relies on the initial + // ref instead of a new ref. When it's invoked, it's the initial ref that is + // unreffed. + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); +}; + +void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() { + const grpc_millis next_client_load_report_time = + ExecCtx::Get()->Now() + client_stats_report_interval_; + GRPC_CLOSURE_INIT(&client_load_report_closure_, + MaybeSendClientLoadReportLocked, this, + grpc_combiner_scheduler(grpclb_policy()->combiner())); + grpc_timer_init(&client_load_report_timer_, next_client_load_report_time, + &client_load_report_closure_); + client_load_report_timer_callback_pending_ = true; +} + +void GrpcLb::BalancerCallState::MaybeSendClientLoadReportLocked( + void* arg, grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + GrpcLb* grpclb_policy = lb_calld->grpclb_policy(); + lb_calld->client_load_report_timer_callback_pending_ = false; + if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) { + lb_calld->Unref(DEBUG_LOCATION, "client_load_report"); + return; + } + // If we've already sent the initial request, then we can go ahead and send + // the load report. Otherwise, we need to wait until the initial request has + // been sent to send this (see OnInitialRequestSentLocked()). + if (lb_calld->send_message_payload_ == nullptr) { + lb_calld->SendClientLoadReportLocked(); + } else { + lb_calld->client_load_report_is_due_ = true; + } +} + +bool GrpcLb::BalancerCallState::LoadReportCountersAreZero( + grpc_grpclb_request* request) { + grpc_grpclb_dropped_call_counts* drop_entries = + static_cast( + request->client_stats.calls_finished_with_drop.arg); + return request->client_stats.num_calls_started == 0 && + request->client_stats.num_calls_finished == 0 && + request->client_stats.num_calls_finished_with_client_failed_to_send == + 0 && + request->client_stats.num_calls_finished_known_received == 0 && + (drop_entries == nullptr || drop_entries->num_entries == 0); +} + +void GrpcLb::BalancerCallState::SendClientLoadReportLocked() { + // Construct message payload. + GPR_ASSERT(send_message_payload_ == nullptr); + grpc_grpclb_request* request = + grpc_grpclb_load_report_request_create_locked(client_stats_); + // Skip client load report if the counters were all zero in the last + // report and they are still zero in this one. + if (LoadReportCountersAreZero(request)) { + if (last_client_load_report_counters_were_zero_) { + grpc_grpclb_request_destroy(request); + ScheduleNextClientLoadReportLocked(); + return; + } + last_client_load_report_counters_were_zero_ = true; + } else { + last_client_load_report_counters_were_zero_ = false; + } + grpc_slice request_payload_slice = grpc_grpclb_request_encode(request); + send_message_payload_ = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_slice_unref_internal(request_payload_slice); + grpc_grpclb_request_destroy(request); + // Send the report. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_SEND_MESSAGE; + op.data.send_message.send_message = send_message_payload_; + GRPC_CLOSURE_INIT(&client_load_report_closure_, ClientLoadReportDoneLocked, + this, grpc_combiner_scheduler(grpclb_policy()->combiner())); + grpc_call_error call_error = grpc_call_start_batch_and_execute( + lb_call_, &op, 1, &client_load_report_closure_); + if (call_error != GRPC_CALL_OK) { + gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", grpclb_policy_.get(), + call_error); + GPR_ASSERT(GRPC_CALL_OK == call_error); + } +} + +void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + GrpcLb* grpclb_policy = lb_calld->grpclb_policy(); + grpc_byte_buffer_destroy(lb_calld->send_message_payload_); + lb_calld->send_message_payload_ = nullptr; + if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) { + lb_calld->Unref(DEBUG_LOCATION, "client_load_report"); + return; + } + lb_calld->ScheduleNextClientLoadReportLocked(); +} + +void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg, + grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + grpc_byte_buffer_destroy(lb_calld->send_message_payload_); + lb_calld->send_message_payload_ = nullptr; + // If we attempted to send a client load report before the initial request was + // sent (and this lb_calld is still in use), send the load report now. + if (lb_calld->client_load_report_is_due_ && + lb_calld == lb_calld->grpclb_policy()->lb_calld_.get()) { + lb_calld->SendClientLoadReportLocked(); + lb_calld->client_load_report_is_due_ = false; + } + lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent"); +} + +void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked( + void* arg, grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + GrpcLb* grpclb_policy = lb_calld->grpclb_policy(); + // Empty payload means the LB call was cancelled. + if (lb_calld != grpclb_policy->lb_calld_.get() || + lb_calld->recv_message_payload_ == nullptr) { + lb_calld->Unref(DEBUG_LOCATION, "on_message_received"); + return; + } + grpc_byte_buffer_reader bbr; + grpc_byte_buffer_reader_init(&bbr, lb_calld->recv_message_payload_); + grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr); + grpc_byte_buffer_reader_destroy(&bbr); + grpc_byte_buffer_destroy(lb_calld->recv_message_payload_); + lb_calld->recv_message_payload_ = nullptr; + grpc_grpclb_initial_response* initial_response; + grpc_grpclb_serverlist* serverlist; + if (!lb_calld->seen_initial_response_ && + (initial_response = grpc_grpclb_initial_response_parse(response_slice)) != + nullptr) { + // Have NOT seen initial response, look for initial response. + if (initial_response->has_client_stats_report_interval) { + lb_calld->client_stats_report_interval_ = GPR_MAX( + GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis( + &initial_response->client_stats_report_interval)); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Received initial LB response message; " + "client load reporting interval = %" PRIdPTR " milliseconds", + grpclb_policy, lb_calld->client_stats_report_interval_); + } + } else if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Received initial LB response message; client load " + "reporting NOT enabled", + grpclb_policy); + } + grpc_grpclb_initial_response_destroy(initial_response); + lb_calld->seen_initial_response_ = true; + } else if ((serverlist = grpc_grpclb_response_parse_serverlist( + response_slice)) != nullptr) { + // Have seen initial response, look for serverlist. + GPR_ASSERT(lb_calld->lb_call_ != nullptr); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Serverlist with %" PRIuPTR " servers received", + grpclb_policy, serverlist->num_servers); + for (size_t i = 0; i < serverlist->num_servers; ++i) { + grpc_resolved_address addr; + ParseServer(serverlist->servers[i], &addr); + char* ipport; + grpc_sockaddr_to_string(&ipport, &addr, false); + gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s", + grpclb_policy, i, ipport); + gpr_free(ipport); + } + } + /* update serverlist */ + if (serverlist->num_servers > 0) { + // Start sending client load report only after we start using the + // serverlist returned from the current LB call. + if (lb_calld->client_stats_report_interval_ > 0 && + lb_calld->client_stats_ == nullptr) { + lb_calld->client_stats_ = grpc_grpclb_client_stats_create(); + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + auto self = lb_calld->Ref(DEBUG_LOCATION, "client_load_report"); + self.release(); + lb_calld->ScheduleNextClientLoadReportLocked(); + } + if (grpc_grpclb_serverlist_equals(grpclb_policy->serverlist_, + serverlist)) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Incoming server list identical to current, " + "ignoring.", + grpclb_policy); + } + grpc_grpclb_destroy_serverlist(serverlist); + } else { /* new serverlist */ + if (grpclb_policy->serverlist_ != nullptr) { + /* dispose of the old serverlist */ + grpc_grpclb_destroy_serverlist(grpclb_policy->serverlist_); + } else { + /* or dispose of the fallback */ + grpc_lb_addresses_destroy(grpclb_policy->fallback_backend_addresses_); + grpclb_policy->fallback_backend_addresses_ = nullptr; + if (grpclb_policy->fallback_timer_callback_pending_) { + grpc_timer_cancel(&grpclb_policy->lb_fallback_timer_); + } + } + // and update the copy in the GrpcLb instance. This + // serverlist instance will be destroyed either upon the next + // update or when the GrpcLb instance is destroyed. + grpclb_policy->serverlist_ = serverlist; + grpclb_policy->serverlist_index_ = 0; + grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked(); + } + } else { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Received empty server list, ignoring.", + grpclb_policy); + } + grpc_grpclb_destroy_serverlist(serverlist); + } + } else { + // No valid initial response or serverlist found. + gpr_log(GPR_ERROR, + "[grpclb %p] Invalid LB response received: '%s'. Ignoring.", + grpclb_policy, + grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX)); + } + grpc_slice_unref_internal(response_slice); + if (!grpclb_policy->shutting_down_) { + // Keep listening for serverlist updates. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_RECV_MESSAGE; + op.data.recv_message.recv_message = &lb_calld->recv_message_payload_; + op.flags = 0; + op.reserved = nullptr; + // Reuse the "OnBalancerMessageReceivedLocked" ref taken in StartQuery(). + const grpc_call_error call_error = grpc_call_start_batch_and_execute( + lb_calld->lb_call_, &op, 1, + &lb_calld->lb_on_balancer_message_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); + } else { + lb_calld->Unref(DEBUG_LOCATION, "on_message_received+grpclb_shutdown"); + } +} + +void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked( + void* arg, grpc_error* error) { + BalancerCallState* lb_calld = static_cast(arg); + GrpcLb* grpclb_policy = lb_calld->grpclb_policy(); + GPR_ASSERT(lb_calld->lb_call_ != nullptr); + if (grpc_lb_glb_trace.enabled()) { + char* status_details = + grpc_slice_to_c_string(lb_calld->lb_call_status_details_); + gpr_log(GPR_INFO, + "[grpclb %p] Status from LB server received. Status = %d, details " + "= '%s', (lb_calld: %p, lb_call: %p), error '%s'", + grpclb_policy, lb_calld->lb_call_status_, status_details, lb_calld, + lb_calld->lb_call_, grpc_error_string(error)); + gpr_free(status_details); + } + grpclb_policy->TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_NONE); + // If this lb_calld is still in use, this call ended because of a failure so + // we want to retry connecting. Otherwise, we have deliberately ended this + // call and no further action is required. + if (lb_calld == grpclb_policy->lb_calld_.get()) { + grpclb_policy->lb_calld_.reset(); + GPR_ASSERT(!grpclb_policy->shutting_down_); + if (lb_calld->seen_initial_response_) { + // If we lose connection to the LB server, reset the backoff and restart + // the LB call immediately. + grpclb_policy->lb_call_backoff_.Reset(); + grpclb_policy->StartBalancerCallLocked(); + } else { + // If this LB call fails establishing any connection to the LB server, + // retry later. + grpclb_policy->StartBalancerCallRetryTimerLocked(); + } + } + lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended"); +} + +// +// helper code for creating balancer channel +// + +grpc_lb_addresses* ExtractBalancerAddresses( + const grpc_lb_addresses* addresses) { + size_t num_grpclb_addrs = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs; + } + // There must be at least one balancer address, or else the + // client_channel would not have chosen this LB policy. + GPR_ASSERT(num_grpclb_addrs > 0); + grpc_lb_addresses* lb_addresses = + grpc_lb_addresses_create(num_grpclb_addrs, nullptr); + size_t lb_addresses_idx = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (!addresses->addresses[i].is_balancer) continue; + if (addresses->addresses[i].user_data != nullptr) { + gpr_log(GPR_ERROR, + "This LB policy doesn't support user data. It will be ignored"); + } + grpc_lb_addresses_set_address( + lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr, + addresses->addresses[i].address.len, false /* is balancer */, + addresses->addresses[i].balancer_name, nullptr /* user data */); + } + GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx); + return lb_addresses; +} + +/* Returns the channel args for the LB channel, used to create a bidirectional + * stream for the reception of load balancing updates. + * + * Inputs: + * - \a addresses: corresponding to the balancers. + * - \a response_generator: in order to propagate updates from the resolver + * above the grpclb policy. + * - \a args: other args inherited from the grpclb policy. */ +grpc_channel_args* BuildBalancerChannelArgs( + const grpc_lb_addresses* addresses, + FakeResolverResponseGenerator* response_generator, + const grpc_channel_args* args) { + grpc_lb_addresses* lb_addresses = ExtractBalancerAddresses(addresses); + // Channel args to remove. + static const char* args_to_remove[] = { + // LB policy name, since we want to use the default (pick_first) in + // the LB channel. + GRPC_ARG_LB_POLICY_NAME, + // The channel arg for the server URI, since that will be different for + // the LB channel than for the parent channel. The client channel + // factory will re-add this arg with the right value. + GRPC_ARG_SERVER_URI, + // The resolved addresses, which will be generated by the name resolver + // used in the LB channel. Note that the LB channel will use the fake + // resolver, so this won't actually generate a query to DNS (or some + // other name service). However, the addresses returned by the fake + // resolver will have is_balancer=false, whereas our own addresses have + // is_balancer=true. We need the LB channel to return addresses with + // is_balancer=false so that it does not wind up recursively using the + // grpclb LB policy, as per the special case logic in client_channel.c. + GRPC_ARG_LB_ADDRESSES, + // The fake resolver response generator, because we are replacing it + // with the one from the grpclb policy, used to propagate updates to + // the LB channel. + GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, + // The LB channel should use the authority indicated by the target + // authority table (see \a grpc_lb_policy_grpclb_modify_lb_channel_args), + // as opposed to the authority from the parent channel. + GRPC_ARG_DEFAULT_AUTHORITY, + // Just as for \a GRPC_ARG_DEFAULT_AUTHORITY, the LB channel should be + // treated as a stand-alone channel and not inherit this argument from the + // args of the parent channel. + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG, + }; + // Channel args to add. + const grpc_arg args_to_add[] = { + // New LB addresses. + // Note that we pass these in both when creating the LB channel + // and via the fake resolver. The latter is what actually gets used. + grpc_lb_addresses_create_channel_arg(lb_addresses), + // The fake resolver response generator, which we use to inject + // address updates into the LB channel. + grpc_core::FakeResolverResponseGenerator::MakeChannelArg( + response_generator), + }; + // Construct channel args. + grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove( + args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), args_to_add, + GPR_ARRAY_SIZE(args_to_add)); + // Make any necessary modifications for security. + new_args = grpc_lb_policy_grpclb_modify_lb_channel_args(new_args); + // Clean up. + grpc_lb_addresses_destroy(lb_addresses); + return new_args; +} + +// +// ctor and dtor +// + +GrpcLb::GrpcLb(const grpc_lb_addresses* addresses, + const LoadBalancingPolicy::Args& args) + : LoadBalancingPolicy(args), + response_generator_(MakeRefCounted()), + lb_call_backoff_( + BackOff::Options() + .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * + 1000) + .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER) + .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * + 1000)) { + // Initialization. + grpc_subchannel_index_ref(); + GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, + &GrpcLb::OnBalancerChannelConnectivityChangedLocked, this, + grpc_combiner_scheduler(args.combiner)); + GRPC_CLOSURE_INIT(&on_rr_connectivity_changed_, + &GrpcLb::OnRoundRobinConnectivityChangedLocked, this, + grpc_combiner_scheduler(args.combiner)); + GRPC_CLOSURE_INIT(&on_rr_request_reresolution_, + &GrpcLb::OnRoundRobinRequestReresolutionLocked, this, + grpc_combiner_scheduler(args.combiner)); + grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, "grpclb"); + // Record server name. + const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); + const char* server_uri = grpc_channel_arg_get_string(arg); + GPR_ASSERT(server_uri != nullptr); + grpc_uri* uri = grpc_uri_parse(server_uri, true); + GPR_ASSERT(uri->path[0] != '\0'); + server_name_ = gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Will use '%s' as the server name for LB request.", + this, server_name_); + } + grpc_uri_destroy(uri); + // Record LB call timeout. + arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS); + lb_call_timeout_ms_ = grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX}); + // Record fallback timeout. + arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS); + lb_fallback_timeout_ms_ = grpc_channel_arg_get_integer( + arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX}); + // Process channel args. + ProcessChannelArgsLocked(*args.args); +} + +GrpcLb::~GrpcLb() { + GPR_ASSERT(pending_picks_ == nullptr); + GPR_ASSERT(pending_pings_ == nullptr); + gpr_free((void*)server_name_); + grpc_channel_args_destroy(args_); + grpc_connectivity_state_destroy(&state_tracker_); + if (serverlist_ != nullptr) { + grpc_grpclb_destroy_serverlist(serverlist_); + } + if (fallback_backend_addresses_ != nullptr) { + grpc_lb_addresses_destroy(fallback_backend_addresses_); + } + grpc_subchannel_index_unref(); +} + +void GrpcLb::ShutdownLocked() { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); + shutting_down_ = true; + lb_calld_.reset(); + if (retry_timer_callback_pending_) { + grpc_timer_cancel(&lb_call_retry_timer_); + } + if (fallback_timer_callback_pending_) { + grpc_timer_cancel(&lb_fallback_timer_); + } + rr_policy_.reset(); + TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_CANCELLED); + // We destroy the LB channel here instead of in our destructor because + // destroying the channel triggers a last callback to + // OnBalancerChannelConnectivityChangedLocked(), and we need to be + // alive when that callback is invoked. + if (lb_channel_ != nullptr) { + grpc_channel_destroy(lb_channel_); + lb_channel_ = nullptr; + } + grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_REF(error), "grpclb_shutdown"); + // Clear pending picks. + PendingPick* pp; + while ((pp = pending_picks_) != nullptr) { + pending_picks_ = pp->next; + pp->pick->connected_subchannel.reset(); + // Note: pp is deleted in this callback. + GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error)); + } + // Clear pending pings. + PendingPing* pping; + while ((pping = pending_pings_) != nullptr) { + pending_pings_ = pping->next; + GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error)); + Delete(pping); + } + GRPC_ERROR_UNREF(error); +} + +// +// public methods +// + +void GrpcLb::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) { + PendingPick* pp; + while ((pp = pending_picks_) != nullptr) { + pending_picks_ = pp->next; + pp->pick->on_complete = pp->original_on_complete; + pp->pick->user_data = nullptr; + if (new_policy->PickLocked(pp->pick)) { + // Synchronous return; schedule closure. + GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE); + } + Delete(pp); + } +} + +// Cancel a specific pending pick. +// +// A grpclb pick progresses as follows: +// - If there's a Round Robin policy (rr_policy_) available, it'll be +// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From +// that point onwards, it'll be RR's responsibility. For cancellations, that +// implies the pick needs also be cancelled by the RR instance. +// - Otherwise, without an RR instance, picks stay pending at this policy's +// level (grpclb), inside the pending_picks_ list. To cancel these, +// we invoke the completion closure and set the pick's connected +// subchannel to nullptr right here. +void GrpcLb::CancelPickLocked(PickState* pick, grpc_error* error) { + PendingPick* pp = pending_picks_; + pending_picks_ = nullptr; + while (pp != nullptr) { + PendingPick* next = pp->next; + if (pp->pick == pick) { + pick->connected_subchannel.reset(); + // Note: pp is deleted in this callback. + GRPC_CLOSURE_SCHED(&pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pp->next = pending_picks_; + pending_picks_ = pp; + } + pp = next; + } + if (rr_policy_ != nullptr) { + rr_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error)); + } + GRPC_ERROR_UNREF(error); +} + +// Cancel all pending picks. +// +// A grpclb pick progresses as follows: +// - If there's a Round Robin policy (rr_policy_) available, it'll be +// handed over to the RR policy (in CreateRoundRobinPolicyLocked()). From +// that point onwards, it'll be RR's responsibility. For cancellations, that +// implies the pick needs also be cancelled by the RR instance. +// - Otherwise, without an RR instance, picks stay pending at this policy's +// level (grpclb), inside the pending_picks_ list. To cancel these, +// we invoke the completion closure and set the pick's connected +// subchannel to nullptr right here. +void GrpcLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) { + PendingPick* pp = pending_picks_; + pending_picks_ = nullptr; + while (pp != nullptr) { + PendingPick* next = pp->next; + if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) == + initial_metadata_flags_eq) { + // Note: pp is deleted in this callback. + GRPC_CLOSURE_SCHED(&pp->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pp->next = pending_picks_; + pending_picks_ = pp; + } + pp = next; + } + if (rr_policy_ != nullptr) { + rr_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask, + initial_metadata_flags_eq, + GRPC_ERROR_REF(error)); + } + GRPC_ERROR_UNREF(error); +} + +void GrpcLb::ExitIdleLocked() { + if (!started_picking_) { + StartPickingLocked(); + } +} + +bool GrpcLb::PickLocked(PickState* pick) { + PendingPick* pp = PendingPickCreate(pick); + bool pick_done = false; + if (rr_policy_ != nullptr) { + const grpc_connectivity_state rr_connectivity_state = + rr_policy_->CheckConnectivityLocked(nullptr); + // The RR policy may have transitioned to SHUTDOWN but the callback + // registered to capture this event (on_rr_connectivity_changed_) may not + // have been invoked yet. We need to make sure we aren't trying to pick + // from an RR policy instance that's in shutdown. + if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] NOT picking from from RR %p: RR conn state=%s", + this, rr_policy_.get(), + grpc_connectivity_state_name(rr_connectivity_state)); + } + AddPendingPick(pp); + pick_done = false; + } else { // RR not in shutdown + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", this, + rr_policy_.get()); + } + pick_done = PickFromRoundRobinPolicyLocked(false /* force_async */, pp); + } + } else { // rr_policy_ == NULL + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] No RR policy. Adding to grpclb's pending picks", + this); + } + AddPendingPick(pp); + if (!started_picking_) { + StartPickingLocked(); + } + pick_done = false; + } + return pick_done; +} + +void GrpcLb::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) { + if (rr_policy_ != nullptr) { + rr_policy_->PingOneLocked(on_initiate, on_ack); + } else { + AddPendingPing(on_initiate, on_ack); + if (!started_picking_) { + StartPickingLocked(); + } + } +} + +grpc_connectivity_state GrpcLb::CheckConnectivityLocked( + grpc_error** connectivity_error) { + return grpc_connectivity_state_get(&state_tracker_, connectivity_error); +} + +void GrpcLb::NotifyOnStateChangeLocked(grpc_connectivity_state* current, + grpc_closure* notify) { + grpc_connectivity_state_notify_on_state_change(&state_tracker_, current, + notify); +} + +void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) { + const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + // Ignore this update. + gpr_log( + GPR_ERROR, + "[grpclb %p] No valid LB addresses channel arg in update, ignoring.", + this); + return; + } + const grpc_lb_addresses* addresses = + static_cast(arg->value.pointer.p); + // Update fallback address list. + if (fallback_backend_addresses_ != nullptr) { + grpc_lb_addresses_destroy(fallback_backend_addresses_); + } + fallback_backend_addresses_ = ExtractBackendAddresses(addresses); + // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, + // since we use this to trigger the client_load_reporting filter. + static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME}; + grpc_arg new_arg = grpc_channel_arg_string_create( + (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb"); + grpc_channel_args_destroy(args_); + args_ = grpc_channel_args_copy_and_add_and_remove( + &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); + // Construct args for balancer channel. + grpc_channel_args* lb_channel_args = + BuildBalancerChannelArgs(addresses, response_generator_.get(), &args); + // Create balancer channel if needed. + if (lb_channel_ == nullptr) { + char* uri_str; + gpr_asprintf(&uri_str, "fake:///%s", server_name_); + lb_channel_ = grpc_client_channel_factory_create_channel( + client_channel_factory(), uri_str, + GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, lb_channel_args); + GPR_ASSERT(lb_channel_ != nullptr); + gpr_free(uri_str); + } + // Propagate updates to the LB channel (pick_first) through the fake + // resolver. + response_generator_->SetResponse(lb_channel_args); + grpc_channel_args_destroy(lb_channel_args); +} + +void GrpcLb::UpdateLocked(const grpc_channel_args& args) { + ProcessChannelArgsLocked(args); + // If fallback is configured and the RR policy already exists, update + // it with the new fallback addresses. + if (lb_fallback_timeout_ms_ > 0 && rr_policy_ != nullptr) { + CreateOrUpdateRoundRobinPolicyLocked(); + } + // Start watching the LB channel connectivity for connection, if not + // already doing so. + if (!watching_lb_channel_) { + lb_channel_connectivity_ = grpc_channel_check_connectivity_state( + lb_channel_, true /* try to connect */); + grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( + grpc_channel_get_channel_stack(lb_channel_)); + GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); + watching_lb_channel_ = true; + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity"); + self.release(); + grpc_client_channel_watch_connectivity_state( + client_channel_elem, + grpc_polling_entity_create_from_pollset_set(interested_parties()), + &lb_channel_connectivity_, &lb_channel_on_connectivity_changed_, + nullptr); + } +} + +// +// code for balancer channel and call +// + +void GrpcLb::StartPickingLocked() { + // Start a timer to fall back. + if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr && + !fallback_timer_callback_pending_) { + grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_; + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + auto self = Ref(DEBUG_LOCATION, "on_fallback_timer"); + self.release(); + GRPC_CLOSURE_INIT(&lb_on_fallback_, &GrpcLb::OnFallbackTimerLocked, this, + grpc_combiner_scheduler(combiner())); + fallback_timer_callback_pending_ = true; + grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_); + } + started_picking_ = true; + StartBalancerCallLocked(); +} + +void GrpcLb::StartBalancerCallLocked() { + GPR_ASSERT(lb_channel_ != nullptr); + if (shutting_down_) return; + // Init the LB call data. + GPR_ASSERT(lb_calld_ == nullptr); + lb_calld_ = MakeOrphanable(Ref()); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)", + this, lb_channel_, lb_calld_.get()); + } + lb_calld_->StartQuery(); +} + +void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + grpclb_policy->fallback_timer_callback_pending_ = false; + // If we receive a serverlist after the timer fires but before this callback + // actually runs, don't fall back. + if (grpclb_policy->serverlist_ == nullptr && !grpclb_policy->shutting_down_ && + error == GRPC_ERROR_NONE) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Falling back to use backends from resolver", + grpclb_policy); + } + GPR_ASSERT(grpclb_policy->fallback_backend_addresses_ != nullptr); + grpclb_policy->CreateOrUpdateRoundRobinPolicyLocked(); + } + grpclb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer"); +} + +void GrpcLb::StartBalancerCallRetryTimerLocked() { + grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this); + grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + if (timeout > 0) { + gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", + this, timeout); + } else { + gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.", + this); + } + } + // TODO(roth): We currently track this ref manually. Once the + // ClosureRef API is ready, we should pass the RefCountedPtr<> along + // with the callback. + auto self = Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); + self.release(); + GRPC_CLOSURE_INIT(&lb_on_call_retry_, &GrpcLb::OnBalancerCallRetryTimerLocked, + this, grpc_combiner_scheduler(combiner())); + retry_timer_callback_pending_ = true; + grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_); +} + +void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + grpclb_policy->retry_timer_callback_pending_ = false; + if (!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE && + grpclb_policy->lb_calld_ == nullptr) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", + grpclb_policy); + } + grpclb_policy->StartBalancerCallLocked(); + } + grpclb_policy->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); +} + +// Invoked as part of the update process. It continues watching the LB channel +// until it shuts down or becomes READY. It's invoked even if the LB channel +// stayed READY throughout the update (for example if the update is identical). +void GrpcLb::OnBalancerChannelConnectivityChangedLocked(void* arg, + grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + if (grpclb_policy->shutting_down_) goto done; + // Re-initialize the lb_call. This should also take care of updating the + // embedded RR policy. Note that the current RR policy, if any, will stay in + // effect until an update from the new lb_call is received. + switch (grpclb_policy->lb_channel_connectivity_) { + case GRPC_CHANNEL_CONNECTING: + case GRPC_CHANNEL_TRANSIENT_FAILURE: { + // Keep watching the LB channel. + grpc_channel_element* client_channel_elem = + grpc_channel_stack_last_element( + grpc_channel_get_channel_stack(grpclb_policy->lb_channel_)); + GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); + grpc_client_channel_watch_connectivity_state( + client_channel_elem, + grpc_polling_entity_create_from_pollset_set( + grpclb_policy->interested_parties()), + &grpclb_policy->lb_channel_connectivity_, + &grpclb_policy->lb_channel_on_connectivity_changed_, nullptr); + break; + } + // The LB channel may be IDLE because it's shut down before the update. + // Restart the LB call to kick the LB channel into gear. + case GRPC_CHANNEL_IDLE: + case GRPC_CHANNEL_READY: + grpclb_policy->lb_calld_.reset(); + if (grpclb_policy->started_picking_) { + if (grpclb_policy->retry_timer_callback_pending_) { + grpc_timer_cancel(&grpclb_policy->lb_call_retry_timer_); + } + grpclb_policy->lb_call_backoff_.Reset(); + grpclb_policy->StartBalancerCallLocked(); + } + // Fall through. + case GRPC_CHANNEL_SHUTDOWN: + done: + grpclb_policy->watching_lb_channel_ = false; + grpclb_policy->Unref(DEBUG_LOCATION, + "watch_lb_channel_connectivity_cb_shutdown"); + } +} + +// +// PendingPick +// + +// Adds lb_token of selected subchannel (address) to the call's initial +// metadata. +grpc_error* AddLbTokenToInitialMetadata( + grpc_mdelem lb_token, grpc_linked_mdelem* lb_token_mdelem_storage, + grpc_metadata_batch* initial_metadata) { + GPR_ASSERT(lb_token_mdelem_storage != nullptr); + GPR_ASSERT(!GRPC_MDISNULL(lb_token)); + return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage, + lb_token); +} + +// Destroy function used when embedding client stats in call context. +void DestroyClientStats(void* arg) { + grpc_grpclb_client_stats_unref(static_cast(arg)); +} + +void GrpcLb::PendingPickSetMetadataAndContext(PendingPick* pp) { + /* if connected_subchannel is nullptr, no pick has been made by the RR + * policy (e.g., all addresses failed to connect). There won't be any + * user_data/token available */ + if (pp->pick->connected_subchannel != nullptr) { + if (!GRPC_MDISNULL(pp->lb_token)) { + AddLbTokenToInitialMetadata(GRPC_MDELEM_REF(pp->lb_token), + &pp->pick->lb_token_mdelem_storage, + pp->pick->initial_metadata); + } else { + gpr_log(GPR_ERROR, + "[grpclb %p] No LB token for connected subchannel pick %p", + pp->grpclb_policy, pp->pick); + abort(); + } + // Pass on client stats via context. Passes ownership of the reference. + if (pp->client_stats != nullptr) { + pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value = + pp->client_stats; + pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy = + DestroyClientStats; + } + } else { + if (pp->client_stats != nullptr) { + grpc_grpclb_client_stats_unref(pp->client_stats); + } + } +} + +/* The \a on_complete closure passed as part of the pick requires keeping a + * reference to its associated round robin instance. We wrap this closure in + * order to unref the round robin instance upon its invocation */ +void GrpcLb::OnPendingPickComplete(void* arg, grpc_error* error) { + PendingPick* pp = static_cast(arg); + PendingPickSetMetadataAndContext(pp); + GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error)); + Delete(pp); +} + +GrpcLb::PendingPick* GrpcLb::PendingPickCreate(PickState* pick) { + PendingPick* pp = New(); + pp->grpclb_policy = this; + pp->pick = pick; + GRPC_CLOSURE_INIT(&pp->on_complete, &GrpcLb::OnPendingPickComplete, pp, + grpc_schedule_on_exec_ctx); + pp->original_on_complete = pick->on_complete; + pick->on_complete = &pp->on_complete; + return pp; +} + +void GrpcLb::AddPendingPick(PendingPick* pp) { + pp->next = pending_picks_; + pending_picks_ = pp; +} + +// +// PendingPing +// + +void GrpcLb::AddPendingPing(grpc_closure* on_initiate, grpc_closure* on_ack) { + PendingPing* pping = New(); + pping->on_initiate = on_initiate; + pping->on_ack = on_ack; + pping->next = pending_pings_; + pending_pings_ = pping; +} + +// +// code for interacting with the RR policy +// + +// Performs a pick over \a rr_policy_. Given that a pick can return +// immediately (ignoring its completion callback), we need to perform the +// cleanups this callback would otherwise be responsible for. +// If \a force_async is true, then we will manually schedule the +// completion callback even if the pick is available immediately. +bool GrpcLb::PickFromRoundRobinPolicyLocked(bool force_async, PendingPick* pp) { + // Check for drops if we are not using fallback backend addresses. + if (serverlist_ != nullptr) { + // Look at the index into the serverlist to see if we should drop this call. + grpc_grpclb_server* server = serverlist_->servers[serverlist_index_++]; + if (serverlist_index_ == serverlist_->num_servers) { + serverlist_index_ = 0; // Wrap-around. + } + if (server->drop) { + // Update client load reporting stats to indicate the number of + // dropped calls. Note that we have to do this here instead of in + // the client_load_reporting filter, because we do not create a + // subchannel call (and therefore no client_load_reporting filter) + // for dropped calls. + if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) { + grpc_grpclb_client_stats_add_call_dropped_locked( + server->load_balance_token, lb_calld_->client_stats()); + } + if (force_async) { + GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE); + Delete(pp); + return false; + } + Delete(pp); + return true; + } + } + // Set client_stats and user_data. + if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) { + pp->client_stats = grpc_grpclb_client_stats_ref(lb_calld_->client_stats()); + } + GPR_ASSERT(pp->pick->user_data == nullptr); + pp->pick->user_data = (void**)&pp->lb_token; + // Pick via the RR policy. + bool pick_done = rr_policy_->PickLocked(pp->pick); + if (pick_done) { + PendingPickSetMetadataAndContext(pp); + if (force_async) { + GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE); + pick_done = false; + } + Delete(pp); + } + // else, the pending pick will be registered and taken care of by the + // pending pick list inside the RR policy. Eventually, + // OnPendingPickComplete() will be called, which will (among other + // things) add the LB token to the call's initial metadata. + return pick_done; +} + +void GrpcLb::CreateRoundRobinPolicyLocked(const Args& args) { + GPR_ASSERT(rr_policy_ == nullptr); + rr_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( + "round_robin", args); + if (rr_policy_ == nullptr) { + gpr_log(GPR_ERROR, "[grpclb %p] Failure creating a RoundRobin policy", + this); + return; + } + // TODO(roth): We currently track this ref manually. Once the new + // ClosureRef API is done, pass the RefCountedPtr<> along with the closure. + auto self = Ref(DEBUG_LOCATION, "on_rr_reresolution_requested"); + self.release(); + rr_policy_->SetReresolutionClosureLocked(&on_rr_request_reresolution_); + grpc_error* rr_state_error = nullptr; + rr_connectivity_state_ = rr_policy_->CheckConnectivityLocked(&rr_state_error); + // Connectivity state is a function of the RR policy updated/created. + UpdateConnectivityStateFromRoundRobinPolicyLocked(rr_state_error); + // Add the gRPC LB's interested_parties pollset_set to that of the newly + // created RR policy. This will make the RR policy progress upon activity on + // gRPC LB, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set(rr_policy_->interested_parties(), + interested_parties()); + // Subscribe to changes to the connectivity of the new RR. + // TODO(roth): We currently track this ref manually. Once the new + // ClosureRef API is done, pass the RefCountedPtr<> along with the closure. + self = Ref(DEBUG_LOCATION, "on_rr_connectivity_changed"); + self.release(); + rr_policy_->NotifyOnStateChangeLocked(&rr_connectivity_state_, + &on_rr_connectivity_changed_); + rr_policy_->ExitIdleLocked(); + // Send pending picks to RR policy. + PendingPick* pp; + while ((pp = pending_picks_)) { + pending_picks_ = pp->next; + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, + "[grpclb %p] Pending pick about to (async) PICK from RR %p", this, + rr_policy_.get()); + } + PickFromRoundRobinPolicyLocked(true /* force_async */, pp); + } + // Send pending pings to RR policy. + PendingPing* pping; + while ((pping = pending_pings_)) { + pending_pings_ = pping->next; + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p", + this, rr_policy_.get()); + } + rr_policy_->PingOneLocked(pping->on_initiate, pping->on_ack); + Delete(pping); + } +} + +grpc_channel_args* GrpcLb::CreateRoundRobinPolicyArgsLocked() { + grpc_lb_addresses* addresses; + if (serverlist_ != nullptr) { + GPR_ASSERT(serverlist_->num_servers > 0); + addresses = ProcessServerlist(serverlist_); + } else { + // If CreateOrUpdateRoundRobinPolicyLocked() is invoked when we haven't + // received any serverlist from the balancer, we use the fallback backends + // returned by the resolver. Note that the fallback backend list may be + // empty, in which case the new round_robin policy will keep the requested + // picks pending. + GPR_ASSERT(fallback_backend_addresses_ != nullptr); + addresses = grpc_lb_addresses_copy(fallback_backend_addresses_); + } + GPR_ASSERT(addresses != nullptr); + // Replace the LB addresses in the channel args that we pass down to + // the subchannel. + static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES}; + const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses); + grpc_channel_args* args = grpc_channel_args_copy_and_add_and_remove( + args_, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg, 1); + grpc_lb_addresses_destroy(addresses); + return args; +} + +void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() { + if (shutting_down_) return; + grpc_channel_args* args = CreateRoundRobinPolicyArgsLocked(); + GPR_ASSERT(args != nullptr); + if (rr_policy_ != nullptr) { + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this, + rr_policy_.get()); + } + rr_policy_->UpdateLocked(*args); + } else { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.combiner = combiner(); + lb_policy_args.client_channel_factory = client_channel_factory(); + lb_policy_args.args = args; + CreateRoundRobinPolicyLocked(lb_policy_args); + if (grpc_lb_glb_trace.enabled()) { + gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this, + rr_policy_.get()); + } + } + grpc_channel_args_destroy(args); +} + +void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg, + grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + if (grpclb_policy->shutting_down_ || error != GRPC_ERROR_NONE) { + grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_reresolution_requested"); + return; + } + if (grpc_lb_glb_trace.enabled()) { + gpr_log( + GPR_INFO, + "[grpclb %p] Re-resolution requested from the internal RR policy (%p).", + grpclb_policy, grpclb_policy->rr_policy_.get()); + } + // If we are talking to a balancer, we expect to get updated addresses form + // the balancer, so we can ignore the re-resolution request from the RR + // policy. Otherwise, handle the re-resolution request using the + // grpclb policy's original re-resolution closure. + if (grpclb_policy->lb_calld_ == nullptr || + !grpclb_policy->lb_calld_->seen_initial_response()) { + grpclb_policy->TryReresolutionLocked(&grpc_lb_glb_trace, GRPC_ERROR_NONE); + } + // Give back the wrapper closure to the RR policy. + grpclb_policy->rr_policy_->SetReresolutionClosureLocked( + &grpclb_policy->on_rr_request_reresolution_); +} + +void GrpcLb::UpdateConnectivityStateFromRoundRobinPolicyLocked( + grpc_error* rr_state_error) { + const grpc_connectivity_state curr_glb_state = + grpc_connectivity_state_check(&state_tracker_); + /* The new connectivity status is a function of the previous one and the new + * input coming from the status of the RR policy. + * + * current state (grpclb's) + * | + * v || I | C | R | TF | SD | <- new state (RR's) + * ===++====+=====+=====+======+======+ + * I || I | C | R | [I] | [I] | + * ---++----+-----+-----+------+------+ + * C || I | C | R | [C] | [C] | + * ---++----+-----+-----+------+------+ + * R || I | C | R | [R] | [R] | + * ---++----+-----+-----+------+------+ + * TF || I | C | R | [TF] | [TF] | + * ---++----+-----+-----+------+------+ + * SD || NA | NA | NA | NA | NA | (*) + * ---++----+-----+-----+------+------+ + * + * A [STATE] indicates that the old RR policy is kept. In those cases, STATE + * is the current state of grpclb, which is left untouched. + * + * In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to + * the previous RR instance. + * + * Note that the status is never updated to SHUTDOWN as a result of calling + * this function. Only glb_shutdown() has the power to set that state. + * + * (*) This function mustn't be called during shutting down. */ + GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN); + switch (rr_connectivity_state_) { + case GRPC_CHANNEL_TRANSIENT_FAILURE: + case GRPC_CHANNEL_SHUTDOWN: + GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE); + break; + case GRPC_CHANNEL_IDLE: + case GRPC_CHANNEL_CONNECTING: + case GRPC_CHANNEL_READY: + GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE); + } + if (grpc_lb_glb_trace.enabled()) { + gpr_log( + GPR_INFO, + "[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.", + this, grpc_connectivity_state_name(rr_connectivity_state_), + rr_policy_.get()); + } + grpc_connectivity_state_set(&state_tracker_, rr_connectivity_state_, + rr_state_error, + "update_lb_connectivity_status_locked"); +} + +void GrpcLb::OnRoundRobinConnectivityChangedLocked(void* arg, + grpc_error* error) { + GrpcLb* grpclb_policy = static_cast(arg); + if (grpclb_policy->shutting_down_) { + grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_connectivity_changed"); + return; + } + grpclb_policy->UpdateConnectivityStateFromRoundRobinPolicyLocked( + GRPC_ERROR_REF(error)); + // Resubscribe. Reuse the "on_rr_connectivity_changed" ref. + grpclb_policy->rr_policy_->NotifyOnStateChangeLocked( + &grpclb_policy->rr_connectivity_state_, + &grpclb_policy->on_rr_connectivity_changed_); +} + +// +// factory +// + +class GrpcLbFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + const LoadBalancingPolicy::Args& args) const override { + /* Count the number of gRPC-LB addresses. There must be at least one. */ + const grpc_arg* arg = + grpc_channel_args_find(args.args, GRPC_ARG_LB_ADDRESSES); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + return nullptr; + } + grpc_lb_addresses* addresses = + static_cast(arg->value.pointer.p); + size_t num_grpclb_addrs = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs; + } + if (num_grpclb_addrs == 0) return nullptr; + return OrphanablePtr(New(addresses, args)); + } + + const char* name() const override { return "grpclb"; } +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +namespace { + +// Only add client_load_reporting filter if the grpclb LB policy is used. +bool maybe_add_client_load_reporting_filter(grpc_channel_stack_builder* builder, + void* arg) { + const grpc_channel_args* args = + grpc_channel_stack_builder_get_channel_arguments(builder); + const grpc_arg* channel_arg = + grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME); + if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING && + strcmp(channel_arg->value.string, "grpclb") == 0) { + return grpc_channel_stack_builder_append_filter( + builder, (const grpc_channel_filter*)arg, nullptr, nullptr); + } + return true; +} + +} // namespace + +void grpc_lb_policy_grpclb_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + grpc_core::UniquePtr( + grpc_core::New())); + grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, + GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, + maybe_add_client_load_reporting_filter, + (void*)&grpc_client_load_reporting_filter); +} + +void grpc_lb_policy_grpclb_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h deleted file mode 100644 index 63ad66c5e..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H -#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H - -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" - -/** Returns a load balancing factory for the glb policy, which tries to connect - * to a load balancing server to decide the next successfully connected - * subchannel to pick. */ -grpc_lb_policy_factory *grpc_glb_lb_factory_create(); - -#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h index 6120bf53f..825065a9c 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h @@ -19,26 +19,18 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" -#include "src/core/lib/slice/slice_hash_table.h" +#include -/** Create the channel used for communicating with an LB service. - * Note that an LB *service* may be comprised of several LB *servers*. - * - * \a lb_service_target_addresses is the target URI containing the addresses - * from resolving the LB service's name (eg, ipv4:10.0.0.1:1234,10.2.3.4:9876). - * \a client_channel_factory will be used for the creation of the LB channel, - * alongside the channel args passed in \a args. */ -grpc_channel *grpc_lb_policy_grpclb_create_lb_channel( - grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses, - grpc_client_channel_factory *client_channel_factory, - grpc_channel_args *args); +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args( - grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info, - grpc_fake_resolver_response_generator *response_generator, - const grpc_channel_args *args); +/// Makes any necessary modifications to \a args for use in the grpclb +/// balancer channel. +/// +/// Takes ownership of \a args. +/// +/// Caller takes ownership of the returned args. +grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args( + grpc_channel_args* args); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c deleted file mode 100644 index 2681b2a07..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include - -#include "src/core/ext/filters/client_channel/client_channel.h" -#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/security/credentials/credentials.h" -#include "src/core/lib/security/transport/lb_targets_info.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" - -grpc_channel *grpc_lb_policy_grpclb_create_lb_channel( - grpc_exec_ctx *exec_ctx, const char *lb_service_target_addresses, - grpc_client_channel_factory *client_channel_factory, - grpc_channel_args *args) { - grpc_channel_args *new_args = args; - grpc_channel_credentials *channel_credentials = - grpc_channel_credentials_find_in_args(args); - if (channel_credentials != NULL) { - /* Substitute the channel credentials with a version without call - * credentials: the load balancer is not necessarily trusted to handle - * bearer token credentials */ - static const char *keys_to_remove[] = {GRPC_ARG_CHANNEL_CREDENTIALS}; - grpc_channel_credentials *creds_sans_call_creds = - grpc_channel_credentials_duplicate_without_call_credentials( - channel_credentials); - GPR_ASSERT(creds_sans_call_creds != NULL); - grpc_arg args_to_add[] = { - grpc_channel_credentials_to_arg(creds_sans_call_creds)}; - /* Create the new set of channel args */ - new_args = grpc_channel_args_copy_and_add_and_remove( - args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add, - GPR_ARRAY_SIZE(args_to_add)); - grpc_channel_credentials_unref(exec_ctx, creds_sans_call_creds); - } - grpc_channel *lb_channel = grpc_client_channel_factory_create_channel( - exec_ctx, client_channel_factory, lb_service_target_addresses, - GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args); - if (channel_credentials != NULL) { - grpc_channel_args_destroy(exec_ctx, new_args); - } - return lb_channel; -} - -grpc_channel_args *grpc_lb_policy_grpclb_build_lb_channel_args( - grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *targets_info, - grpc_fake_resolver_response_generator *response_generator, - const grpc_channel_args *args) { - const grpc_arg to_add[] = { - grpc_lb_targets_info_create_channel_arg(targets_info), - grpc_fake_resolver_response_generator_arg(response_generator)}; - /* We remove: - * - * - The channel arg for the LB policy name, since we want to use the default - * (pick_first) in this case. - * - * - The channel arg for the resolved addresses, since that will be generated - * by the name resolver used in the LB channel. Note that the LB channel - * will use the fake resolver, so this won't actually generate a query - * to DNS (or some other name service). However, the addresses returned by - * the fake resolver will have is_balancer=false, whereas our own - * addresses have is_balancer=true. We need the LB channel to return - * addresses with is_balancer=false so that it does not wind up recursively - * using the grpclb LB policy, as per the special case logic in - * client_channel.c. - * - * - The channel arg for the server URI, since that will be different for the - * LB channel than for the parent channel (the client channel factory will - * re-add this arg with the right value). - * - * - The fake resolver generator, because we are replacing it with the one - * from the grpclb policy, used to propagate updates to the LB channel. */ - static const char *keys_to_remove[] = { - GRPC_ARG_LB_POLICY_NAME, GRPC_ARG_LB_ADDRESSES, GRPC_ARG_SERVER_URI, - GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR}; - /* Add the targets info table to be used for secure naming */ - return grpc_channel_args_copy_and_add_and_remove( - args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), to_add, - GPR_ARRAY_SIZE(to_add)); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc new file mode 100644 index 000000000..441efd5e2 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc @@ -0,0 +1,108 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h" + +#include + +#include +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/transport/target_authority_table.h" +#include "src/core/lib/slice/slice_internal.h" + +namespace grpc_core { +namespace { + +int BalancerNameCmp(const grpc_core::UniquePtr& a, + const grpc_core::UniquePtr& b) { + return strcmp(a.get(), b.get()); +} + +RefCountedPtr CreateTargetAuthorityTable( + grpc_lb_addresses* addresses) { + TargetAuthorityTable::Entry* target_authority_entries = + static_cast(gpr_zalloc( + sizeof(*target_authority_entries) * addresses->num_addresses)); + for (size_t i = 0; i < addresses->num_addresses; ++i) { + char* addr_str; + GPR_ASSERT(grpc_sockaddr_to_string( + &addr_str, &addresses->addresses[i].address, true) > 0); + target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str); + target_authority_entries[i].value.reset( + gpr_strdup(addresses->addresses[i].balancer_name)); + gpr_free(addr_str); + } + RefCountedPtr target_authority_table = + TargetAuthorityTable::Create(addresses->num_addresses, + target_authority_entries, BalancerNameCmp); + gpr_free(target_authority_entries); + return target_authority_table; +} + +} // namespace +} // namespace grpc_core + +grpc_channel_args* grpc_lb_policy_grpclb_modify_lb_channel_args( + grpc_channel_args* args) { + const char* args_to_remove[1]; + size_t num_args_to_remove = 0; + grpc_arg args_to_add[2]; + size_t num_args_to_add = 0; + // Add arg for targets info table. + const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_LB_ADDRESSES); + GPR_ASSERT(arg != nullptr); + GPR_ASSERT(arg->type == GRPC_ARG_POINTER); + grpc_lb_addresses* addresses = + static_cast(arg->value.pointer.p); + grpc_core::RefCountedPtr + target_authority_table = grpc_core::CreateTargetAuthorityTable(addresses); + args_to_add[num_args_to_add++] = + grpc_core::CreateTargetAuthorityTableChannelArg( + target_authority_table.get()); + // Substitute the channel credentials with a version without call + // credentials: the load balancer is not necessarily trusted to handle + // bearer token credentials. + grpc_channel_credentials* channel_credentials = + grpc_channel_credentials_find_in_args(args); + grpc_channel_credentials* creds_sans_call_creds = nullptr; + if (channel_credentials != nullptr) { + creds_sans_call_creds = + grpc_channel_credentials_duplicate_without_call_credentials( + channel_credentials); + GPR_ASSERT(creds_sans_call_creds != nullptr); + args_to_remove[num_args_to_remove++] = GRPC_ARG_CHANNEL_CREDENTIALS; + args_to_add[num_args_to_add++] = + grpc_channel_credentials_to_arg(creds_sans_call_creds); + } + grpc_channel_args* result = grpc_channel_args_copy_and_add_and_remove( + args, args_to_remove, num_args_to_remove, args_to_add, num_args_to_add); + // Clean up. + grpc_channel_args_destroy(args); + if (creds_sans_call_creds != nullptr) { + grpc_channel_credentials_unref(creds_sans_call_creds); + } + return result; +} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc similarity index 88% rename from Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc index 903120ca7..dfbaead7d 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" #include @@ -24,7 +26,6 @@ #include #include #include -#include #include "src/core/lib/channel/channel_args.h" @@ -43,7 +44,7 @@ struct grpc_grpclb_client_stats { grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() { grpc_grpclb_client_stats* client_stats = - (grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats)); + static_cast(gpr_zalloc(sizeof(*client_stats))); gpr_ref_init(&client_stats->refs, 1); return client_stats; } @@ -87,10 +88,10 @@ void grpc_grpclb_client_stats_add_call_dropped_locked( gpr_atm_full_fetch_add(&client_stats->num_calls_started, (gpr_atm)1); gpr_atm_full_fetch_add(&client_stats->num_calls_finished, (gpr_atm)1); // Record the drop. - if (client_stats->drop_token_counts == NULL) { + if (client_stats->drop_token_counts == nullptr) { client_stats->drop_token_counts = - (grpc_grpclb_dropped_call_counts*)gpr_zalloc( - sizeof(grpc_grpclb_dropped_call_counts)); + static_cast( + gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts))); } grpc_grpclb_dropped_call_counts* drop_token_counts = client_stats->drop_token_counts; @@ -105,9 +106,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked( while (new_num_entries < drop_token_counts->num_entries + 1) { new_num_entries *= 2; } - drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc( - drop_token_counts->token_counts, - new_num_entries * sizeof(grpc_grpclb_drop_token_count)); + drop_token_counts->token_counts = static_cast( + gpr_realloc(drop_token_counts->token_counts, + new_num_entries * sizeof(grpc_grpclb_drop_token_count))); grpc_grpclb_drop_token_count* new_entry = &drop_token_counts->token_counts[drop_token_counts->num_entries++]; new_entry->token = gpr_strdup(token); @@ -115,7 +116,7 @@ void grpc_grpclb_client_stats_add_call_dropped_locked( } static void atomic_get_and_reset_counter(int64_t* value, gpr_atm* counter) { - *value = (int64_t)gpr_atm_acq_load(counter); + *value = static_cast(gpr_atm_acq_load(counter)); gpr_atm_full_fetch_add(counter, (gpr_atm)(-*value)); } @@ -136,12 +137,12 @@ void grpc_grpclb_client_stats_get_locked( num_calls_finished_known_received, &client_stats->num_calls_finished_known_received); *drop_token_counts = client_stats->drop_token_counts; - client_stats->drop_token_counts = NULL; + client_stats->drop_token_counts = nullptr; } void grpc_grpclb_dropped_call_counts_destroy( grpc_grpclb_dropped_call_counts* drop_entries) { - if (drop_entries != NULL) { + if (drop_entries != nullptr) { for (size_t i = 0; i < drop_entries->num_entries; ++i) { gpr_free(drop_entries->token_counts[i].token); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h index c51e2a431..c971e5688 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H +#include + #include #include @@ -62,4 +64,4 @@ void grpc_grpclb_dropped_call_counts_destroy( grpc_grpclb_dropped_call_counts* drop_entries); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc similarity index 65% rename from Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc index 8ef6dfc6f..7ef3bcf24 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h" #include "third_party/nanopb/pb_decode.h" #include "third_party/nanopb/pb_encode.h" @@ -23,9 +25,9 @@ #include /* invoked once for every Server in ServerList */ -static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field, - void **arg) { - grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg; +static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field, + void** arg) { + grpc_grpclb_serverlist* sl = static_cast(*arg); grpc_grpclb_server server; if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) { gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream)); @@ -40,16 +42,16 @@ typedef struct decode_serverlist_arg { * which index of the serverlist are we currently decoding */ size_t decoding_idx; /* The decoded serverlist */ - grpc_grpclb_serverlist *serverlist; + grpc_grpclb_serverlist* serverlist; } decode_serverlist_arg; /* invoked once for every Server in ServerList */ -static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field, - void **arg) { - decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg; +static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field, + void** arg) { + decode_serverlist_arg* dec_arg = static_cast(*arg); GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx); - grpc_grpclb_server *server = - (grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server)); + grpc_grpclb_server* server = + static_cast(gpr_zalloc(sizeof(grpc_grpclb_server))); if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) { gpr_free(server); gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream)); @@ -59,9 +61,9 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field, return true; } -grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) { - grpc_grpclb_request *req = - (grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request)); +grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name) { + grpc_grpclb_request* req = static_cast( + gpr_malloc(sizeof(grpc_grpclb_request))); req->has_client_stats = false; req->has_initial_request = true; req->initial_request.has_name = true; @@ -71,25 +73,25 @@ grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) { } static void populate_timestamp(gpr_timespec timestamp, - struct _grpc_lb_v1_Timestamp *timestamp_pb) { + struct _grpc_lb_v1_Timestamp* timestamp_pb) { timestamp_pb->has_seconds = true; timestamp_pb->seconds = timestamp.tv_sec; timestamp_pb->has_nanos = true; timestamp_pb->nanos = timestamp.tv_nsec; } -static bool encode_string(pb_ostream_t *stream, const pb_field_t *field, - void *const *arg) { - char *str = (char *)*arg; +static bool encode_string(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg) { + char* str = static_cast(*arg); if (!pb_encode_tag_for_field(stream, field)) return false; - return pb_encode_string(stream, (uint8_t *)str, strlen(str)); + return pb_encode_string(stream, reinterpret_cast(str), strlen(str)); } -static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field, - void *const *arg) { - grpc_grpclb_dropped_call_counts *drop_entries = - (grpc_grpclb_dropped_call_counts *)*arg; - if (drop_entries == NULL) return true; +static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg) { + grpc_grpclb_dropped_call_counts* drop_entries = + static_cast(*arg); + if (drop_entries == nullptr) return true; for (size_t i = 0; i < drop_entries->num_entries; ++i) { if (!pb_encode_tag_for_field(stream, field)) return false; grpc_lb_v1_ClientStatsPerToken drop_message; @@ -105,10 +107,10 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field, return true; } -grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked( - grpc_grpclb_client_stats *client_stats) { - grpc_grpclb_request *req = - (grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request)); +grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked( + grpc_grpclb_client_stats* client_stats) { + grpc_grpclb_request* req = static_cast( + gpr_zalloc(sizeof(grpc_grpclb_request))); req->has_client_stats = true; req->client_stats.has_timestamp = true; populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp); @@ -123,12 +125,12 @@ grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked( &req->client_stats.num_calls_finished, &req->client_stats.num_calls_finished_with_client_failed_to_send, &req->client_stats.num_calls_finished_known_received, - (grpc_grpclb_dropped_call_counts **)&req->client_stats - .calls_finished_with_drop.arg); + reinterpret_cast( + &req->client_stats.calls_finished_with_drop.arg)); return req; } -grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) { +grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request) { size_t encoded_length; pb_ostream_t sizestream; pb_ostream_t outputstream; @@ -145,18 +147,18 @@ grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request) { return slice; } -void grpc_grpclb_request_destroy(grpc_grpclb_request *request) { +void grpc_grpclb_request_destroy(grpc_grpclb_request* request) { if (request->has_client_stats) { - grpc_grpclb_dropped_call_counts *drop_entries = - (grpc_grpclb_dropped_call_counts *) - request->client_stats.calls_finished_with_drop.arg; + grpc_grpclb_dropped_call_counts* drop_entries = + static_cast( + request->client_stats.calls_finished_with_drop.arg); grpc_grpclb_dropped_call_counts_destroy(drop_entries); } gpr_free(request); } typedef grpc_lb_v1_LoadBalanceResponse grpc_grpclb_response; -grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse( +grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse( grpc_slice encoded_grpc_grpclb_response) { pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response), @@ -165,28 +167,28 @@ grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse( memset(&res, 0, sizeof(grpc_grpclb_response)); if (!pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res)) { gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); - return NULL; + return nullptr; } - if (!res.has_initial_response) return NULL; + if (!res.has_initial_response) return nullptr; - grpc_grpclb_initial_response *initial_res = - (grpc_grpclb_initial_response *)gpr_malloc( - sizeof(grpc_grpclb_initial_response)); + grpc_grpclb_initial_response* initial_res = + static_cast( + gpr_malloc(sizeof(grpc_grpclb_initial_response))); memcpy(initial_res, &res.initial_response, sizeof(grpc_grpclb_initial_response)); return initial_res; } -grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist( +grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist( grpc_slice encoded_grpc_grpclb_response) { pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response), GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response)); pb_istream_t stream_at_start = stream; - grpc_grpclb_serverlist *sl = - (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist)); + grpc_grpclb_serverlist* sl = static_cast( + gpr_zalloc(sizeof(grpc_grpclb_serverlist))); grpc_grpclb_response res; memset(&res, 0, sizeof(grpc_grpclb_response)); // First pass: count number of servers. @@ -196,12 +198,12 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist( if (!status) { gpr_free(sl); gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); - return NULL; + return nullptr; } // Second pass: populate servers. if (sl->num_servers > 0) { - sl->servers = (grpc_grpclb_server **)gpr_zalloc( - sizeof(grpc_grpclb_server *) * sl->num_servers); + sl->servers = static_cast( + gpr_zalloc(sizeof(grpc_grpclb_server*) * sl->num_servers)); decode_serverlist_arg decode_arg; memset(&decode_arg, 0, sizeof(decode_arg)); decode_arg.serverlist = sl; @@ -212,17 +214,14 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist( if (!status) { grpc_grpclb_destroy_serverlist(sl); gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); - return NULL; + return nullptr; } } - if (res.server_list.has_expiration_interval) { - sl->expiration_interval = res.server_list.expiration_interval; - } return sl; } -void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) { - if (serverlist == NULL) { +void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist) { + if (serverlist == nullptr) { return; } for (size_t i = 0; i < serverlist->num_servers; i++) { @@ -232,35 +231,29 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) { gpr_free(serverlist); } -grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy( - const grpc_grpclb_serverlist *sl) { - grpc_grpclb_serverlist *copy = - (grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist)); +grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy( + const grpc_grpclb_serverlist* sl) { + grpc_grpclb_serverlist* copy = static_cast( + gpr_zalloc(sizeof(grpc_grpclb_serverlist))); copy->num_servers = sl->num_servers; - memcpy(©->expiration_interval, &sl->expiration_interval, - sizeof(grpc_grpclb_duration)); - copy->servers = (grpc_grpclb_server **)gpr_malloc( - sizeof(grpc_grpclb_server *) * sl->num_servers); + copy->servers = static_cast( + gpr_malloc(sizeof(grpc_grpclb_server*) * sl->num_servers)); for (size_t i = 0; i < sl->num_servers; i++) { - copy->servers[i] = - (grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server)); + copy->servers[i] = static_cast( + gpr_malloc(sizeof(grpc_grpclb_server))); memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server)); } return copy; } -bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs, - const grpc_grpclb_serverlist *rhs) { - if (lhs == NULL || rhs == NULL) { +bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs, + const grpc_grpclb_serverlist* rhs) { + if (lhs == nullptr || rhs == nullptr) { return false; } if (lhs->num_servers != rhs->num_servers) { return false; } - if (grpc_grpclb_duration_compare(&lhs->expiration_interval, - &rhs->expiration_interval) != 0) { - return false; - } for (size_t i = 0; i < lhs->num_servers; i++) { if (!grpc_grpclb_server_equals(lhs->servers[i], rhs->servers[i])) { return false; @@ -269,13 +262,13 @@ bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs, return true; } -bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs, - const grpc_grpclb_server *rhs) { +bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs, + const grpc_grpclb_server* rhs) { return memcmp(lhs, rhs, sizeof(grpc_grpclb_server)) == 0; } -int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs, - const grpc_grpclb_duration *rhs) { +int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs, + const grpc_grpclb_duration* rhs) { GPR_ASSERT(lhs && rhs); if (lhs->has_seconds && rhs->has_seconds) { if (lhs->seconds < rhs->seconds) return -1; @@ -299,16 +292,13 @@ int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs, return 0; } -gpr_timespec grpc_grpclb_duration_to_timespec( - grpc_grpclb_duration *duration_pb) { - gpr_timespec duration; - duration.tv_sec = duration_pb->has_seconds ? duration_pb->seconds : 0; - duration.tv_nsec = duration_pb->has_nanos ? duration_pb->nanos : 0; - duration.clock_type = GPR_TIMESPAN; - return duration; +grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb) { + return static_cast( + (duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC + + (duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS); } void grpc_grpclb_initial_response_destroy( - grpc_grpclb_initial_response *response) { + grpc_grpclb_initial_response* response) { gpr_free(response); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h index c4a98492c..d4270f253 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h @@ -19,16 +19,14 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H +#include + #include #include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h" #include "src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h" #include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#ifdef __cplusplus -extern "C" { -#endif - #define GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH 128 typedef grpc_lb_v1_Server_ip_address_t grpc_grpclb_ip_address; @@ -37,60 +35,54 @@ typedef grpc_lb_v1_InitialLoadBalanceResponse grpc_grpclb_initial_response; typedef grpc_lb_v1_Server grpc_grpclb_server; typedef grpc_lb_v1_Duration grpc_grpclb_duration; typedef struct { - grpc_grpclb_server **servers; + grpc_grpclb_server** servers; size_t num_servers; - grpc_grpclb_duration expiration_interval; } grpc_grpclb_serverlist; /** Create a request for a gRPC LB service under \a lb_service_name */ -grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name); -grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked( - grpc_grpclb_client_stats *client_stats); +grpc_grpclb_request* grpc_grpclb_request_create(const char* lb_service_name); +grpc_grpclb_request* grpc_grpclb_load_report_request_create_locked( + grpc_grpclb_client_stats* client_stats); /** Protocol Buffers v3-encode \a request */ -grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request *request); +grpc_slice grpc_grpclb_request_encode(const grpc_grpclb_request* request); /** Destroy \a request */ -void grpc_grpclb_request_destroy(grpc_grpclb_request *request); +void grpc_grpclb_request_destroy(grpc_grpclb_request* request); /** Parse (ie, decode) the bytes in \a encoded_grpc_grpclb_response as a \a * grpc_grpclb_initial_response */ -grpc_grpclb_initial_response *grpc_grpclb_initial_response_parse( +grpc_grpclb_initial_response* grpc_grpclb_initial_response_parse( grpc_slice encoded_grpc_grpclb_response); /** Parse the list of servers from an encoded \a grpc_grpclb_response */ -grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist( +grpc_grpclb_serverlist* grpc_grpclb_response_parse_serverlist( grpc_slice encoded_grpc_grpclb_response); /** Return a copy of \a sl. The caller is responsible for calling \a * grpc_grpclb_destroy_serverlist on the returned copy. */ -grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy( - const grpc_grpclb_serverlist *sl); +grpc_grpclb_serverlist* grpc_grpclb_serverlist_copy( + const grpc_grpclb_serverlist* sl); -bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist *lhs, - const grpc_grpclb_serverlist *rhs); +bool grpc_grpclb_serverlist_equals(const grpc_grpclb_serverlist* lhs, + const grpc_grpclb_serverlist* rhs); -bool grpc_grpclb_server_equals(const grpc_grpclb_server *lhs, - const grpc_grpclb_server *rhs); +bool grpc_grpclb_server_equals(const grpc_grpclb_server* lhs, + const grpc_grpclb_server* rhs); /** Destroy \a serverlist */ -void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist); +void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist* serverlist); /** Compare \a lhs against \a rhs and return 0 if \a lhs and \a rhs are equal, * < 0 if \a lhs represents a duration shorter than \a rhs and > 0 otherwise */ -int grpc_grpclb_duration_compare(const grpc_grpclb_duration *lhs, - const grpc_grpclb_duration *rhs); +int grpc_grpclb_duration_compare(const grpc_grpclb_duration* lhs, + const grpc_grpclb_duration* rhs); -gpr_timespec grpc_grpclb_duration_to_timespec( - grpc_grpclb_duration *duration_pb); +grpc_millis grpc_grpclb_duration_to_millis(grpc_grpclb_duration* duration_pb); /** Destroy \a initial_response */ void grpc_grpclb_initial_response_destroy( - grpc_grpclb_initial_response *response); - -#ifdef __cplusplus -} -#endif + grpc_grpclb_initial_response* response); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c index 6a5d54c82..4e6c5cc83 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c @@ -61,9 +61,8 @@ const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3] = { PB_LAST_FIELD }; -const pb_field_t grpc_lb_v1_ServerList_fields[3] = { +const pb_field_t grpc_lb_v1_ServerList_fields[2] = { PB_FIELD( 1, MESSAGE , REPEATED, CALLBACK, FIRST, grpc_lb_v1_ServerList, servers, servers, &grpc_lb_v1_Server_fields), - PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_lb_v1_ServerList, expiration_interval, servers, &grpc_lb_v1_Duration_fields), PB_LAST_FIELD }; @@ -85,7 +84,7 @@ const pb_field_t grpc_lb_v1_Server_fields[5] = { * numbers or field sizes that are larger than what can fit in 8 or 16 bit * field descriptors. */ -PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server) +PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server) #endif #if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT) @@ -96,7 +95,7 @@ PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) * numbers or field sizes that are larger than what can fit in the default * 8 bit descriptors. */ -PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server) +PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server) #endif diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h index 93333d1ae..066c07620 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h @@ -14,6 +14,11 @@ extern "C" { #endif /* Struct definitions */ +typedef struct _grpc_lb_v1_ServerList { + pb_callback_t servers; +/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */ +} grpc_lb_v1_ServerList; + typedef struct _grpc_lb_v1_ClientStatsPerToken { pb_callback_t load_balance_token; bool has_num_calls; @@ -79,13 +84,6 @@ typedef struct _grpc_lb_v1_InitialLoadBalanceResponse { /* @@protoc_insertion_point(struct:grpc_lb_v1_InitialLoadBalanceResponse) */ } grpc_lb_v1_InitialLoadBalanceResponse; -typedef struct _grpc_lb_v1_ServerList { - pb_callback_t servers; - bool has_expiration_interval; - grpc_lb_v1_Duration expiration_interval; -/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */ -} grpc_lb_v1_ServerList; - typedef struct _grpc_lb_v1_LoadBalanceRequest { bool has_initial_request; grpc_lb_v1_InitialLoadBalanceRequest initial_request; @@ -113,7 +111,7 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse { #define grpc_lb_v1_ClientStats_init_default {false, grpc_lb_v1_Timestamp_init_default, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}} #define grpc_lb_v1_LoadBalanceResponse_init_default {false, grpc_lb_v1_InitialLoadBalanceResponse_init_default, false, grpc_lb_v1_ServerList_init_default} #define grpc_lb_v1_InitialLoadBalanceResponse_init_default {false, "", false, grpc_lb_v1_Duration_init_default} -#define grpc_lb_v1_ServerList_init_default {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_default} +#define grpc_lb_v1_ServerList_init_default {{{NULL}, NULL}} #define grpc_lb_v1_Server_init_default {false, {0, {0}}, false, 0, false, "", false, 0} #define grpc_lb_v1_Duration_init_zero {false, 0, false, 0} #define grpc_lb_v1_Timestamp_init_zero {false, 0, false, 0} @@ -123,10 +121,11 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse { #define grpc_lb_v1_ClientStats_init_zero {false, grpc_lb_v1_Timestamp_init_zero, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}} #define grpc_lb_v1_LoadBalanceResponse_init_zero {false, grpc_lb_v1_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v1_ServerList_init_zero} #define grpc_lb_v1_InitialLoadBalanceResponse_init_zero {false, "", false, grpc_lb_v1_Duration_init_zero} -#define grpc_lb_v1_ServerList_init_zero {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_zero} +#define grpc_lb_v1_ServerList_init_zero {{{NULL}, NULL}} #define grpc_lb_v1_Server_init_zero {false, {0, {0}}, false, 0, false, "", false, 0} /* Field tags (for use in manual encoding/decoding) */ +#define grpc_lb_v1_ServerList_servers_tag 1 #define grpc_lb_v1_ClientStatsPerToken_load_balance_token_tag 1 #define grpc_lb_v1_ClientStatsPerToken_num_calls_tag 2 #define grpc_lb_v1_Duration_seconds_tag 1 @@ -146,8 +145,6 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse { #define grpc_lb_v1_ClientStats_calls_finished_with_drop_tag 8 #define grpc_lb_v1_InitialLoadBalanceResponse_load_balancer_delegate_tag 1 #define grpc_lb_v1_InitialLoadBalanceResponse_client_stats_report_interval_tag 2 -#define grpc_lb_v1_ServerList_servers_tag 1 -#define grpc_lb_v1_ServerList_expiration_interval_tag 3 #define grpc_lb_v1_LoadBalanceRequest_initial_request_tag 1 #define grpc_lb_v1_LoadBalanceRequest_client_stats_tag 2 #define grpc_lb_v1_LoadBalanceResponse_initial_response_tag 1 @@ -162,7 +159,7 @@ extern const pb_field_t grpc_lb_v1_ClientStatsPerToken_fields[3]; extern const pb_field_t grpc_lb_v1_ClientStats_fields[7]; extern const pb_field_t grpc_lb_v1_LoadBalanceResponse_fields[3]; extern const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3]; -extern const pb_field_t grpc_lb_v1_ServerList_fields[3]; +extern const pb_field_t grpc_lb_v1_ServerList_fields[2]; extern const pb_field_t grpc_lb_v1_Server_fields[5]; /* Maximum encoded size of messages (where known) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c deleted file mode 100644 index d20cbb838..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +++ /dev/null @@ -1,714 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include - -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/subchannel.h" -#include "src/core/ext/filters/client_channel/subchannel_index.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/transport/connectivity_state.h" - -grpc_tracer_flag grpc_lb_pick_first_trace = - GRPC_TRACER_INITIALIZER(false, "pick_first"); - -typedef struct pending_pick { - struct pending_pick *next; - uint32_t initial_metadata_flags; - grpc_connected_subchannel **target; - grpc_closure *on_complete; -} pending_pick; - -typedef struct { - /** base policy: must be first */ - grpc_lb_policy base; - /** all our subchannels */ - grpc_subchannel **subchannels; - grpc_subchannel **new_subchannels; - size_t num_subchannels; - size_t num_new_subchannels; - - grpc_closure connectivity_changed; - - /** remaining members are protected by the combiner */ - - /** the selected channel */ - grpc_connected_subchannel *selected; - - /** the subchannel key for \a selected, or NULL if \a selected not set */ - const grpc_subchannel_key *selected_key; - - /** have we started picking? */ - bool started_picking; - /** are we shut down? */ - bool shutdown; - /** are we updating the selected subchannel? */ - bool updating_selected; - /** are we updating the subchannel candidates? */ - bool updating_subchannels; - /** args from the latest update received while already updating, or NULL */ - grpc_lb_policy_args *pending_update_args; - /** which subchannel are we watching? */ - size_t checking_subchannel; - /** what is the connectivity of that channel? */ - grpc_connectivity_state checking_connectivity; - /** list of picks that are waiting on connectivity */ - pending_pick *pending_picks; - - /** our connectivity state tracker */ - grpc_connectivity_state_tracker state_tracker; -} pick_first_lb_policy; - -static void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - GPR_ASSERT(p->pending_picks == NULL); - for (size_t i = 0; i < p->num_subchannels; i++) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first_destroy"); - } - if (p->selected != NULL) { - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, - "picked_first_destroy"); - } - grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker); - grpc_subchannel_index_unref(); - if (p->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args); - gpr_free(p->pending_update_args); - } - gpr_free(p->subchannels); - gpr_free(p->new_subchannels); - gpr_free(p); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, "Pick First %p destroyed.", (void *)p); - } -} - -static void pf_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - pending_pick *pp; - p->shutdown = true; - pp = p->pending_picks; - p->pending_picks = NULL; - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"), "shutdown"); - /* cancel subscription */ - if (p->selected != NULL) { - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed); - } else if (p->num_subchannels > 0 && p->started_picking) { - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL, - &p->connectivity_changed); - } - while (pp != NULL) { - pending_pick *next = pp->next; - *pp->target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); - gpr_free(pp); - pp = next; - } -} - -static void pf_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target, - grpc_error *error) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - pending_pick *pp; - pp = p->pending_picks; - p->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if (pp->target == target) { - *target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick Cancelled", &error, 1)); - gpr_free(pp); - } else { - pp->next = p->pending_picks; - p->pending_picks = pp; - } - pp = next; - } - GRPC_ERROR_UNREF(error); -} - -static void pf_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - pending_pick *pp; - pp = p->pending_picks; - p->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == - initial_metadata_flags_eq) { - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick Cancelled", &error, 1)); - gpr_free(pp); - } else { - pp->next = p->pending_picks; - p->pending_picks = pp; - } - pp = next; - } - GRPC_ERROR_UNREF(error); -} - -static void start_picking_locked(grpc_exec_ctx *exec_ctx, - pick_first_lb_policy *p) { - p->started_picking = true; - if (p->subchannels != NULL) { - GPR_ASSERT(p->num_subchannels > 0); - p->checking_subchannel = 0; - p->checking_connectivity = GRPC_CHANNEL_IDLE; - GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity"); - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], - p->base.interested_parties, &p->checking_connectivity, - &p->connectivity_changed); - } -} - -static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - if (!p->started_picking) { - start_picking_locked(exec_ctx, p); - } -} - -static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, void **user_data, - grpc_closure *on_complete) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - pending_pick *pp; - - /* Check atomically for a selected channel */ - if (p->selected != NULL) { - *target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked"); - return 1; - } - - /* No subchannel selected yet, so try again */ - if (!p->started_picking) { - start_picking_locked(exec_ctx, p); - } - pp = (pending_pick *)gpr_malloc(sizeof(*pp)); - pp->next = p->pending_picks; - pp->target = target; - pp->initial_metadata_flags = pick_args->initial_metadata_flags; - pp->on_complete = on_complete; - p->pending_picks = pp; - return 0; -} - -static void destroy_subchannels_locked(grpc_exec_ctx *exec_ctx, - pick_first_lb_policy *p) { - size_t num_subchannels = p->num_subchannels; - grpc_subchannel **subchannels = p->subchannels; - - p->num_subchannels = 0; - p->subchannels = NULL; - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels"); - - for (size_t i = 0; i < num_subchannels; i++) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first"); - } - gpr_free(subchannels); -} - -static grpc_connectivity_state pf_check_connectivity_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - return grpc_connectivity_state_get(&p->state_tracker, error); -} - -static void pf_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *pol, - grpc_connectivity_state *current, - grpc_closure *notify) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker, - current, notify); -} - -static void pf_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_closure *closure) { - pick_first_lb_policy *p = (pick_first_lb_policy *)pol; - if (p->selected) { - grpc_connected_subchannel_ping(exec_ctx, p->selected, closure); - } else { - GRPC_CLOSURE_SCHED(exec_ctx, closure, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); - } -} - -/* unsubscribe all subchannels */ -static void stop_connectivity_watchers(grpc_exec_ctx *exec_ctx, - pick_first_lb_policy *p) { - if (p->num_subchannels > 0) { - GPR_ASSERT(p->selected == NULL); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, "Pick First %p unsubscribing from subchannel %p", - (void *)p, (void *)p->subchannels[p->checking_subchannel]); - } - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL, - &p->connectivity_changed); - p->updating_subchannels = true; - } else if (p->selected != NULL) { - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, - "Pick First %p unsubscribing from selected subchannel %p", - (void *)p, (void *)p->selected); - } - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed); - p->updating_selected = true; - } -} - -/* true upon success */ -static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_args *args) { - pick_first_lb_policy *p = (pick_first_lb_policy *)policy; - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - if (p->subchannels == NULL) { - // If we don't have a current subchannel list, go into TRANSIENT FAILURE. - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), - "pf_update_missing"); - } else { - // otherwise, keep using the current subchannel list (ignore this update). - gpr_log(GPR_ERROR, - "No valid LB addresses channel arg for Pick First %p update, " - "ignoring.", - (void *)p); - } - return; - } - const grpc_lb_addresses *addresses = - (const grpc_lb_addresses *)arg->value.pointer.p; - if (addresses->num_addresses == 0) { - // Empty update. Unsubscribe from all current subchannels and put the - // channel in TRANSIENT_FAILURE. - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), - "pf_update_empty"); - stop_connectivity_watchers(exec_ctx, p); - return; - } - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_INFO, "Pick First %p received update with %lu addresses", - (void *)p, (unsigned long)addresses->num_addresses); - } - grpc_subchannel_args *sc_args = (grpc_subchannel_args *)gpr_zalloc( - sizeof(*sc_args) * addresses->num_addresses); - /* We remove the following keys in order for subchannel keys belonging to - * subchannels point to the same address to match. */ - static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS, - GRPC_ARG_LB_ADDRESSES}; - size_t sc_args_count = 0; - - /* Create list of subchannel args for new addresses in \a args. */ - for (size_t i = 0; i < addresses->num_addresses; i++) { - // If there were any balancer, we would have chosen grpclb policy instead. - GPR_ASSERT(!addresses->addresses[i].is_balancer); - if (addresses->addresses[i].user_data != NULL) { - gpr_log(GPR_ERROR, - "This LB policy doesn't support user data. It will be ignored"); - } - grpc_arg addr_arg = - grpc_create_subchannel_address_arg(&addresses->addresses[i].address); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove( - args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, - 1); - gpr_free(addr_arg.value.string); - sc_args[sc_args_count++].args = new_args; - } - - /* Check if p->selected is amongst them. If so, we are done. */ - if (p->selected != NULL) { - GPR_ASSERT(p->selected_key != NULL); - for (size_t i = 0; i < sc_args_count; i++) { - grpc_subchannel_key *ith_sc_key = grpc_subchannel_key_create(&sc_args[i]); - const bool found_selected = - grpc_subchannel_key_compare(p->selected_key, ith_sc_key) == 0; - grpc_subchannel_key_destroy(exec_ctx, ith_sc_key); - if (found_selected) { - // The currently selected subchannel is in the update: we are done. - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_INFO, - "Pick First %p found already selected subchannel %p amongst " - "updates. Update done.", - (void *)p, (void *)p->selected); - } - for (size_t j = 0; j < sc_args_count; j++) { - grpc_channel_args_destroy(exec_ctx, - (grpc_channel_args *)sc_args[j].args); - } - gpr_free(sc_args); - return; - } - } - } - // We only check for already running updates here because if the previous - // steps were successful, the update can be considered done without any - // interference (ie, no callbacks were scheduled). - if (p->updating_selected || p->updating_subchannels) { - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_INFO, - "Update already in progress for pick first %p. Deferring update.", - (void *)p); - } - if (p->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args); - gpr_free(p->pending_update_args); - } - p->pending_update_args = - (grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args)); - p->pending_update_args->client_channel_factory = - args->client_channel_factory; - p->pending_update_args->args = grpc_channel_args_copy(args->args); - p->pending_update_args->combiner = args->combiner; - return; - } - /* Create the subchannels for the new subchannel args/addresses. */ - grpc_subchannel **new_subchannels = - (grpc_subchannel **)gpr_zalloc(sizeof(*new_subchannels) * sc_args_count); - size_t num_new_subchannels = 0; - for (size_t i = 0; i < sc_args_count; i++) { - grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel( - exec_ctx, args->client_channel_factory, &sc_args[i]); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - char *address_uri = - grpc_sockaddr_to_uri(&addresses->addresses[i].address); - gpr_log(GPR_INFO, - "Pick First %p created subchannel %p for address uri %s", - (void *)p, (void *)subchannel, address_uri); - gpr_free(address_uri); - } - grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)sc_args[i].args); - if (subchannel != NULL) new_subchannels[num_new_subchannels++] = subchannel; - } - gpr_free(sc_args); - if (num_new_subchannels == 0) { - gpr_free(new_subchannels); - // Empty update. Unsubscribe from all current subchannels and put the - // channel in TRANSIENT_FAILURE. - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid addresses in update"), - "pf_update_no_valid_addresses"); - stop_connectivity_watchers(exec_ctx, p); - return; - } - - /* Destroy the current subchannels. Repurpose pf_shutdown/destroy. */ - stop_connectivity_watchers(exec_ctx, p); - - /* Save new subchannels. The switch over will happen in - * pf_connectivity_changed_locked */ - if (p->updating_selected || p->updating_subchannels) { - p->num_new_subchannels = num_new_subchannels; - p->new_subchannels = new_subchannels; - } else { /* nothing is updating. Get things moving from here */ - p->num_subchannels = num_new_subchannels; - p->subchannels = new_subchannels; - p->new_subchannels = NULL; - p->num_new_subchannels = 0; - if (p->started_picking) { - p->checking_subchannel = 0; - p->checking_connectivity = GRPC_CHANNEL_IDLE; - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], - p->base.interested_parties, &p->checking_connectivity, - &p->connectivity_changed); - } - } -} - -static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - pick_first_lb_policy *p = (pick_first_lb_policy *)arg; - grpc_subchannel *selected_subchannel; - pending_pick *pp; - - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log( - GPR_DEBUG, - "Pick First %p connectivity changed. Updating selected: %d; Updating " - "subchannels: %d; Checking %lu index (%lu total); State: %d; ", - (void *)p, p->updating_selected, p->updating_subchannels, - (unsigned long)p->checking_subchannel, - (unsigned long)p->num_subchannels, p->checking_connectivity); - } - bool restart = false; - if (p->updating_selected && error != GRPC_ERROR_NONE) { - /* Captured the unsubscription for p->selected */ - GPR_ASSERT(p->selected != NULL); - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, - "pf_update_connectivity"); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, "Pick First %p unreffing selected subchannel %p", - (void *)p, (void *)p->selected); - } - p->updating_selected = false; - if (p->num_new_subchannels == 0) { - p->selected = NULL; - return; - } - restart = true; - } - if (p->updating_subchannels && error != GRPC_ERROR_NONE) { - /* Captured the unsubscription for the checking subchannel */ - GPR_ASSERT(p->selected == NULL); - for (size_t i = 0; i < p->num_subchannels; i++) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], - "pf_update_connectivity"); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, "Pick First %p unreffing subchannel %p", (void *)p, - (void *)p->subchannels[i]); - } - } - gpr_free(p->subchannels); - p->subchannels = NULL; - p->num_subchannels = 0; - p->updating_subchannels = false; - if (p->num_new_subchannels == 0) return; - restart = true; - } - if (restart) { - p->selected = NULL; - p->selected_key = NULL; - GPR_ASSERT(p->new_subchannels != NULL); - GPR_ASSERT(p->num_new_subchannels > 0); - p->num_subchannels = p->num_new_subchannels; - p->subchannels = p->new_subchannels; - p->num_new_subchannels = 0; - p->new_subchannels = NULL; - if (p->started_picking) { - /* If we were picking, continue to do so over the new subchannels, - * starting from the 0th index. */ - p->checking_subchannel = 0; - p->checking_connectivity = GRPC_CHANNEL_IDLE; - /* reuses the weak ref from start_picking_locked */ - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], - p->base.interested_parties, &p->checking_connectivity, - &p->connectivity_changed); - } - if (p->pending_update_args != NULL) { - const grpc_lb_policy_args *args = p->pending_update_args; - p->pending_update_args = NULL; - pf_update_locked(exec_ctx, &p->base, args); - } - return; - } - GRPC_ERROR_REF(error); - if (p->shutdown) { - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity"); - GRPC_ERROR_UNREF(error); - return; - } else if (p->selected != NULL) { - if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) { - /* if the selected channel goes bad, we're done */ - p->checking_connectivity = GRPC_CHANNEL_SHUTDOWN; - } - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, - p->checking_connectivity, GRPC_ERROR_REF(error), - "selected_changed"); - if (p->checking_connectivity != GRPC_CHANNEL_SHUTDOWN) { - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, p->selected, p->base.interested_parties, - &p->checking_connectivity, &p->connectivity_changed); - } else { - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity"); - } - } else { - loop: - switch (p->checking_connectivity) { - case GRPC_CHANNEL_INIT: - GPR_UNREACHABLE_CODE(return ); - case GRPC_CHANNEL_READY: - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, - GRPC_CHANNEL_READY, GRPC_ERROR_NONE, - "connecting_ready"); - selected_subchannel = p->subchannels[p->checking_subchannel]; - p->selected = GRPC_CONNECTED_SUBCHANNEL_REF( - grpc_subchannel_get_connected_subchannel(selected_subchannel), - "picked_first"); - - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_INFO, - "Pick First %p selected subchannel %p (connected %p)", - (void *)p, (void *)selected_subchannel, (void *)p->selected); - } - p->selected_key = grpc_subchannel_get_key(selected_subchannel); - /* drop the pick list: we are connected now */ - GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels"); - destroy_subchannels_locked(exec_ctx, p); - /* update any calls that were waiting for a pick */ - while ((pp = p->pending_picks)) { - p->pending_picks = pp->next; - *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked"); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_INFO, - "Servicing pending pick with selected subchannel %p", - (void *)p->selected); - } - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); - gpr_free(pp); - } - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, p->selected, p->base.interested_parties, - &p->checking_connectivity, &p->connectivity_changed); - break; - case GRPC_CHANNEL_TRANSIENT_FAILURE: - p->checking_subchannel = - (p->checking_subchannel + 1) % p->num_subchannels; - if (p->checking_subchannel == 0) { - /* only trigger transient failure when we've tried all alternatives - */ - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_REF(error), "connecting_transient_failure"); - } - GRPC_ERROR_UNREF(error); - p->checking_connectivity = grpc_subchannel_check_connectivity( - p->subchannels[p->checking_subchannel], &error); - if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) { - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], - p->base.interested_parties, &p->checking_connectivity, - &p->connectivity_changed); - } else { - goto loop; - } - break; - case GRPC_CHANNEL_CONNECTING: - case GRPC_CHANNEL_IDLE: - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING, - GRPC_ERROR_REF(error), "connecting_changed"); - grpc_subchannel_notify_on_state_change( - exec_ctx, p->subchannels[p->checking_subchannel], - p->base.interested_parties, &p->checking_connectivity, - &p->connectivity_changed); - break; - case GRPC_CHANNEL_SHUTDOWN: - p->num_subchannels--; - GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel], - p->subchannels[p->num_subchannels]); - GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels], - "pick_first"); - if (p->num_subchannels == 0) { - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick first exhausted channels", &error, 1), - "no_more_channels"); - while ((pp = p->pending_picks)) { - p->pending_picks = pp->next; - *pp->target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); - gpr_free(pp); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, - "pick_first_connectivity"); - } else { - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_REF(error), "subchannel_failed"); - p->checking_subchannel %= p->num_subchannels; - GRPC_ERROR_UNREF(error); - p->checking_connectivity = grpc_subchannel_check_connectivity( - p->subchannels[p->checking_subchannel], &error); - goto loop; - } - } - } - - GRPC_ERROR_UNREF(error); -} - -static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = { - pf_destroy, - pf_shutdown_locked, - pf_pick_locked, - pf_cancel_pick_locked, - pf_cancel_picks_locked, - pf_ping_one_locked, - pf_exit_idle_locked, - pf_check_connectivity_locked, - pf_notify_on_state_change_locked, - pf_update_locked}; - -static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {} - -static void pick_first_factory_unref(grpc_lb_policy_factory *factory) {} - -static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx, - grpc_lb_policy_factory *factory, - grpc_lb_policy_args *args) { - GPR_ASSERT(args->client_channel_factory != NULL); - pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p)); - if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) { - gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p); - } - pf_update_locked(exec_ctx, &p->base, args); - grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner); - grpc_subchannel_index_ref(); - GRPC_CLOSURE_INIT(&p->connectivity_changed, pf_connectivity_changed_locked, p, - grpc_combiner_scheduler(args->combiner)); - return &p->base; -} - -static const grpc_lb_policy_factory_vtable pick_first_factory_vtable = { - pick_first_factory_ref, pick_first_factory_unref, create_pick_first, - "pick_first"}; - -static grpc_lb_policy_factory pick_first_lb_policy_factory = { - &pick_first_factory_vtable}; - -static grpc_lb_policy_factory *pick_first_lb_factory_create() { - return &pick_first_lb_policy_factory; -} - -/* Plugin registration */ - -void grpc_lb_policy_pick_first_init() { - grpc_register_lb_policy(pick_first_lb_factory_create()); - grpc_register_tracer(&grpc_lb_pick_first_trace); -} - -void grpc_lb_policy_pick_first_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc new file mode 100644 index 000000000..76df97669 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc @@ -0,0 +1,562 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/subchannel.h" +#include "src/core/ext/filters/client_channel/subchannel_index.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/transport/connectivity_state.h" + +namespace grpc_core { + +TraceFlag grpc_lb_pick_first_trace(false, "pick_first"); + +namespace { + +// +// pick_first LB policy +// + +class PickFirst : public LoadBalancingPolicy { + public: + explicit PickFirst(const Args& args); + + void UpdateLocked(const grpc_channel_args& args) override; + bool PickLocked(PickState* pick) override; + void CancelPickLocked(PickState* pick, grpc_error* error) override; + void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) override; + void NotifyOnStateChangeLocked(grpc_connectivity_state* state, + grpc_closure* closure) override; + grpc_connectivity_state CheckConnectivityLocked( + grpc_error** connectivity_error) override; + void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override; + void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override; + void ExitIdleLocked() override; + + private: + ~PickFirst(); + + class PickFirstSubchannelList; + + class PickFirstSubchannelData + : public SubchannelData { + public: + PickFirstSubchannelData(PickFirstSubchannelList* subchannel_list, + const grpc_lb_user_data_vtable* user_data_vtable, + const grpc_lb_address& address, + grpc_subchannel* subchannel, + grpc_combiner* combiner) + : SubchannelData(subchannel_list, user_data_vtable, address, subchannel, + combiner) {} + + void ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) override; + }; + + class PickFirstSubchannelList + : public SubchannelList { + public: + PickFirstSubchannelList(PickFirst* policy, TraceFlag* tracer, + const grpc_lb_addresses* addresses, + grpc_combiner* combiner, + grpc_client_channel_factory* client_channel_factory, + const grpc_channel_args& args) + : SubchannelList(policy, tracer, addresses, combiner, + client_channel_factory, args) { + // Need to maintain a ref to the LB policy as long as we maintain + // any references to subchannels, since the subchannels' + // pollset_sets will include the LB policy's pollset_set. + policy->Ref(DEBUG_LOCATION, "subchannel_list").release(); + } + + ~PickFirstSubchannelList() { + PickFirst* p = static_cast(policy()); + p->Unref(DEBUG_LOCATION, "subchannel_list"); + } + }; + + void ShutdownLocked() override; + + void StartPickingLocked(); + void DestroyUnselectedSubchannelsLocked(); + + // All our subchannels. + OrphanablePtr subchannel_list_; + // Latest pending subchannel list. + OrphanablePtr latest_pending_subchannel_list_; + // Selected subchannel in \a subchannel_list_. + PickFirstSubchannelData* selected_ = nullptr; + // Have we started picking? + bool started_picking_ = false; + // Are we shut down? + bool shutdown_ = false; + // List of picks that are waiting on connectivity. + PickState* pending_picks_ = nullptr; + // Our connectivity state tracker. + grpc_connectivity_state_tracker state_tracker_; +}; + +PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) { + GPR_ASSERT(args.client_channel_factory != nullptr); + grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, + "pick_first"); + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, "Pick First %p created.", this); + } + UpdateLocked(*args.args); + grpc_subchannel_index_ref(); +} + +PickFirst::~PickFirst() { + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, "Destroying Pick First %p", this); + } + GPR_ASSERT(subchannel_list_ == nullptr); + GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); + GPR_ASSERT(pending_picks_ == nullptr); + grpc_connectivity_state_destroy(&state_tracker_); + grpc_subchannel_index_unref(); +} + +void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) { + PickState* pick; + while ((pick = pending_picks_) != nullptr) { + pending_picks_ = pick->next; + if (new_policy->PickLocked(pick)) { + // Synchronous return, schedule closure. + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE); + } + } +} + +void PickFirst::ShutdownLocked() { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, "Pick First %p Shutting down", this); + } + shutdown_ = true; + PickState* pick; + while ((pick = pending_picks_) != nullptr) { + pending_picks_ = pick->next; + pick->connected_subchannel.reset(); + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error)); + } + grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_REF(error), "shutdown"); + subchannel_list_.reset(); + latest_pending_subchannel_list_.reset(); + TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_CANCELLED); + GRPC_ERROR_UNREF(error); +} + +void PickFirst::CancelPickLocked(PickState* pick, grpc_error* error) { + PickState* pp = pending_picks_; + pending_picks_ = nullptr; + while (pp != nullptr) { + PickState* next = pp->next; + if (pp == pick) { + pick->connected_subchannel.reset(); + GRPC_CLOSURE_SCHED(pick->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pp->next = pending_picks_; + pending_picks_ = pp; + } + pp = next; + } + GRPC_ERROR_UNREF(error); +} + +void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) { + PickState* pick = pending_picks_; + pending_picks_ = nullptr; + while (pick != nullptr) { + PickState* next = pick->next; + if ((pick->initial_metadata_flags & initial_metadata_flags_mask) == + initial_metadata_flags_eq) { + GRPC_CLOSURE_SCHED(pick->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pick->next = pending_picks_; + pending_picks_ = pick; + } + pick = next; + } + GRPC_ERROR_UNREF(error); +} + +void PickFirst::StartPickingLocked() { + started_picking_ = true; + if (subchannel_list_ != nullptr) { + for (size_t i = 0; i < subchannel_list_->num_subchannels(); ++i) { + if (subchannel_list_->subchannel(i)->subchannel() != nullptr) { + subchannel_list_->subchannel(i)->StartConnectivityWatchLocked(); + break; + } + } + } +} + +void PickFirst::ExitIdleLocked() { + if (!started_picking_) { + StartPickingLocked(); + } +} + +bool PickFirst::PickLocked(PickState* pick) { + // If we have a selected subchannel already, return synchronously. + if (selected_ != nullptr) { + pick->connected_subchannel = selected_->connected_subchannel()->Ref(); + return true; + } + // No subchannel selected yet, so handle asynchronously. + if (!started_picking_) { + StartPickingLocked(); + } + pick->next = pending_picks_; + pending_picks_ = pick; + return false; +} + +void PickFirst::DestroyUnselectedSubchannelsLocked() { + for (size_t i = 0; i < subchannel_list_->num_subchannels(); ++i) { + PickFirstSubchannelData* sd = subchannel_list_->subchannel(i); + if (selected_ != sd) { + sd->UnrefSubchannelLocked("selected_different_subchannel"); + } + } +} + +grpc_connectivity_state PickFirst::CheckConnectivityLocked(grpc_error** error) { + return grpc_connectivity_state_get(&state_tracker_, error); +} + +void PickFirst::NotifyOnStateChangeLocked(grpc_connectivity_state* current, + grpc_closure* notify) { + grpc_connectivity_state_notify_on_state_change(&state_tracker_, current, + notify); +} + +void PickFirst::PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) { + if (selected_ != nullptr) { + selected_->connected_subchannel()->Ping(on_initiate, on_ack); + } else { + GRPC_CLOSURE_SCHED(on_initiate, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); + GRPC_CLOSURE_SCHED(on_ack, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected")); + } +} + +void PickFirst::UpdateLocked(const grpc_channel_args& args) { + const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + if (subchannel_list_ == nullptr) { + // If we don't have a current subchannel list, go into TRANSIENT FAILURE. + grpc_connectivity_state_set( + &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), + "pf_update_missing"); + } else { + // otherwise, keep using the current subchannel list (ignore this update). + gpr_log(GPR_ERROR, + "No valid LB addresses channel arg for Pick First %p update, " + "ignoring.", + this); + } + return; + } + const grpc_lb_addresses* addresses = + static_cast(arg->value.pointer.p); + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p received update with %" PRIuPTR " addresses", this, + addresses->num_addresses); + } + auto subchannel_list = MakeOrphanable( + this, &grpc_lb_pick_first_trace, addresses, combiner(), + client_channel_factory(), args); + if (subchannel_list->num_subchannels() == 0) { + // Empty update or no valid subchannels. Unsubscribe from all current + // subchannels and put the channel in TRANSIENT_FAILURE. + grpc_connectivity_state_set( + &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), + "pf_update_empty"); + subchannel_list_ = std::move(subchannel_list); // Empty list. + selected_ = nullptr; + return; + } + if (selected_ == nullptr) { + // We don't yet have a selected subchannel, so replace the current + // subchannel list immediately. + subchannel_list_ = std::move(subchannel_list); + // If we've started picking, start trying to connect to the first + // subchannel in the new list. + if (started_picking_) { + subchannel_list_->subchannel(0)->StartConnectivityWatchLocked(); + } + } else { + // We do have a selected subchannel. + // Check if it's present in the new list. If so, we're done. + for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) { + PickFirstSubchannelData* sd = subchannel_list->subchannel(i); + if (sd->subchannel() == selected_->subchannel()) { + // The currently selected subchannel is in the update: we are done. + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p found already selected subchannel %p " + "at update index %" PRIuPTR " of %" PRIuPTR "; update done", + this, selected_->subchannel(), i, + subchannel_list->num_subchannels()); + } + // Make sure it's in state READY. It might not be if we grabbed + // the combiner while a connectivity state notification + // informing us otherwise is pending. + // Note that CheckConnectivityStateLocked() also takes a ref to + // the connected subchannel. + grpc_error* error = GRPC_ERROR_NONE; + if (sd->CheckConnectivityStateLocked(&error) == GRPC_CHANNEL_READY) { + selected_ = sd; + subchannel_list_ = std::move(subchannel_list); + DestroyUnselectedSubchannelsLocked(); + sd->StartConnectivityWatchLocked(); + // If there was a previously pending update (which may or may + // not have contained the currently selected subchannel), drop + // it, so that it doesn't override what we've done here. + latest_pending_subchannel_list_.reset(); + return; + } + GRPC_ERROR_UNREF(error); + } + } + // Not keeping the previous selected subchannel, so set the latest + // pending subchannel list to the new subchannel list. We will wait + // for it to report READY before swapping it into the current + // subchannel list. + if (latest_pending_subchannel_list_ != nullptr) { + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p Shutting down latest pending subchannel list " + "%p, about to be replaced by newer latest %p", + this, latest_pending_subchannel_list_.get(), + subchannel_list.get()); + } + } + latest_pending_subchannel_list_ = std::move(subchannel_list); + // If we've started picking, start trying to connect to the first + // subchannel in the new list. + if (started_picking_) { + latest_pending_subchannel_list_->subchannel(0) + ->StartConnectivityWatchLocked(); + } + } +} + +void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) { + PickFirst* p = static_cast(subchannel_list()->policy()); + // The notification must be for a subchannel in either the current or + // latest pending subchannel lists. + GPR_ASSERT(subchannel_list() == p->subchannel_list_.get() || + subchannel_list() == p->latest_pending_subchannel_list_.get()); + // Handle updates for the currently selected subchannel. + if (p->selected_ == this) { + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p connectivity changed for selected subchannel", p); + } + // If the new state is anything other than READY and there is a + // pending update, switch to the pending update. + if (connectivity_state != GRPC_CHANNEL_READY && + p->latest_pending_subchannel_list_ != nullptr) { + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p promoting pending subchannel list %p to " + "replace %p", + p, p->latest_pending_subchannel_list_.get(), + p->subchannel_list_.get()); + } + p->selected_ = nullptr; + StopConnectivityWatchLocked(); + p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); + grpc_connectivity_state_set( + &p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + error != GRPC_ERROR_NONE + ? GRPC_ERROR_REF(error) + : GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "selected subchannel not ready; switching to pending " + "update"), + "selected_not_ready+switch_to_update"); + } else { + // TODO(juanlishen): we re-resolve when the selected subchannel goes to + // TRANSIENT_FAILURE because we used to shut down in this case before + // re-resolution is introduced. But we need to investigate whether we + // really want to take any action instead of waiting for the selected + // subchannel reconnecting. + GPR_ASSERT(connectivity_state != GRPC_CHANNEL_SHUTDOWN); + if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + // If the selected channel goes bad, request a re-resolution. + grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_IDLE, + GRPC_ERROR_NONE, + "selected_changed+reresolve"); + p->started_picking_ = false; + p->TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_NONE); + // In transient failure. Rely on re-resolution to recover. + p->selected_ = nullptr; + UnrefSubchannelLocked("pf_selected_shutdown"); + StopConnectivityWatchLocked(); + } else { + grpc_connectivity_state_set(&p->state_tracker_, connectivity_state, + GRPC_ERROR_REF(error), "selected_changed"); + // Renew notification. + RenewConnectivityWatchLocked(); + } + } + GRPC_ERROR_UNREF(error); + return; + } + // If we get here, there are two possible cases: + // 1. We do not currently have a selected subchannel, and the update is + // for a subchannel in p->subchannel_list_ that we're trying to + // connect to. The goal here is to find a subchannel that we can + // select. + // 2. We do currently have a selected subchannel, and the update is + // for a subchannel in p->latest_pending_subchannel_list_. The + // goal here is to find a subchannel from the update that we can + // select in place of the current one. + switch (connectivity_state) { + case GRPC_CHANNEL_READY: { + // Case 2. Promote p->latest_pending_subchannel_list_ to + // p->subchannel_list_. + if (subchannel_list() == p->latest_pending_subchannel_list_.get()) { + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Pick First %p promoting pending subchannel list %p to " + "replace %p", + p, p->latest_pending_subchannel_list_.get(), + p->subchannel_list_.get()); + } + p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); + } + // Cases 1 and 2. + grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY, + GRPC_ERROR_NONE, "connecting_ready"); + p->selected_ = this; + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p, + subchannel()); + } + // Drop all other subchannels, since we are now connected. + p->DestroyUnselectedSubchannelsLocked(); + // Update any calls that were waiting for a pick. + PickState* pick; + while ((pick = p->pending_picks_)) { + p->pending_picks_ = pick->next; + pick->connected_subchannel = + p->selected_->connected_subchannel()->Ref(); + if (grpc_lb_pick_first_trace.enabled()) { + gpr_log(GPR_INFO, + "Servicing pending pick with selected subchannel %p", + p->selected_); + } + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE); + } + // Renew notification. + RenewConnectivityWatchLocked(); + break; + } + case GRPC_CHANNEL_TRANSIENT_FAILURE: { + StopConnectivityWatchLocked(); + PickFirstSubchannelData* sd = this; + do { + size_t next_index = + (sd->Index() + 1) % subchannel_list()->num_subchannels(); + sd = subchannel_list()->subchannel(next_index); + } while (sd->subchannel() == nullptr); + // Case 1: Only set state to TRANSIENT_FAILURE if we've tried + // all subchannels. + if (sd->Index() == 0 && subchannel_list() == p->subchannel_list_.get()) { + grpc_connectivity_state_set( + &p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_REF(error), "connecting_transient_failure"); + } + sd->StartConnectivityWatchLocked(); + break; + } + case GRPC_CHANNEL_CONNECTING: + case GRPC_CHANNEL_IDLE: { + // Only update connectivity state in case 1. + if (subchannel_list() == p->subchannel_list_.get()) { + grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING, + GRPC_ERROR_REF(error), + "connecting_changed"); + } + // Renew notification. + RenewConnectivityWatchLocked(); + break; + } + case GRPC_CHANNEL_SHUTDOWN: + GPR_UNREACHABLE_CODE(break); + } + GRPC_ERROR_UNREF(error); +} + +// +// factory +// + +class PickFirstFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + const LoadBalancingPolicy::Args& args) const override { + return OrphanablePtr(New(args)); + } + + const char* name() const override { return "pick_first"; } +}; + +} // namespace + +} // namespace grpc_core + +void grpc_lb_policy_pick_first_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + grpc_core::UniquePtr( + grpc_core::New())); +} + +void grpc_lb_policy_pick_first_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c deleted file mode 100644 index a3a62e9f3..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +++ /dev/null @@ -1,924 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** Round Robin Policy. - * - * Before every pick, the \a get_next_ready_subchannel_index_locked function - * returns the p->subchannel_list->subchannels index for next subchannel, - * respecting the relative - * order of the addresses provided upon creation or updates. Note however that - * updates will start picking from the beginning of the updated list. */ - -#include - -#include - -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/subchannel.h" -#include "src/core/ext/filters/client_channel/subchannel_index.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/transport/connectivity_state.h" -#include "src/core/lib/transport/static_metadata.h" - -grpc_tracer_flag grpc_lb_round_robin_trace = - GRPC_TRACER_INITIALIZER(false, "round_robin"); - -/** List of entities waiting for a pick. - * - * Once a pick is available, \a target is updated and \a on_complete called. */ -typedef struct pending_pick { - struct pending_pick *next; - - /* output argument where to store the pick()ed user_data. It'll be NULL if no - * such data is present or there's an error (the definite test for errors is - * \a target being NULL). */ - void **user_data; - - /* bitmask passed to pick() and used for selective cancelling. See - * grpc_lb_policy_cancel_picks() */ - uint32_t initial_metadata_flags; - - /* output argument where to store the pick()ed connected subchannel, or NULL - * upon error. */ - grpc_connected_subchannel **target; - - /* to be invoked once the pick() has completed (regardless of success) */ - grpc_closure *on_complete; -} pending_pick; - -typedef struct rr_subchannel_list rr_subchannel_list; -typedef struct round_robin_lb_policy { - /** base policy: must be first */ - grpc_lb_policy base; - - rr_subchannel_list *subchannel_list; - - /** have we started picking? */ - bool started_picking; - /** are we shutting down? */ - bool shutdown; - /** List of picks that are waiting on connectivity */ - pending_pick *pending_picks; - - /** our connectivity state tracker */ - grpc_connectivity_state_tracker state_tracker; - - /** Index into subchannels for last pick. */ - size_t last_ready_subchannel_index; - - /** Latest version of the subchannel list. - * Subchannel connectivity callbacks will only promote updated subchannel - * lists if they equal \a latest_pending_subchannel_list. In other words, - * racing callbacks that reference outdated subchannel lists won't perform any - * update. */ - rr_subchannel_list *latest_pending_subchannel_list; -} round_robin_lb_policy; - -typedef struct { - /** backpointer to owning subchannel list */ - rr_subchannel_list *subchannel_list; - /** subchannel itself */ - grpc_subchannel *subchannel; - /** notification that connectivity has changed on subchannel */ - grpc_closure connectivity_changed_closure; - /** last observed connectivity. Not updated by - * \a grpc_subchannel_notify_on_state_change. Used to determine the previous - * state while processing the new state in \a rr_connectivity_changed */ - grpc_connectivity_state prev_connectivity_state; - /** current connectivity state. Updated by \a - * grpc_subchannel_notify_on_state_change */ - grpc_connectivity_state curr_connectivity_state; - /** connectivity state to be updated by the watcher, not guarded by - * the combiner. Will be moved to curr_connectivity_state inside of - * the combiner by rr_connectivity_changed_locked(). */ - grpc_connectivity_state pending_connectivity_state_unsafe; - /** the subchannel's target user data */ - void *user_data; - /** vtable to operate over \a user_data */ - const grpc_lb_user_data_vtable *user_data_vtable; -} subchannel_data; - -struct rr_subchannel_list { - /** backpointer to owning policy */ - round_robin_lb_policy *policy; - - /** all our subchannels */ - size_t num_subchannels; - subchannel_data *subchannels; - - /** how many subchannels are in state READY */ - size_t num_ready; - /** how many subchannels are in state TRANSIENT_FAILURE */ - size_t num_transient_failures; - /** how many subchannels are in state SHUTDOWN */ - size_t num_shutdown; - /** how many subchannels are in state IDLE */ - size_t num_idle; - - /** There will be one ref for each entry in subchannels for which there is a - * pending connectivity state watcher callback. */ - gpr_refcount refcount; - - /** Is this list shutting down? This may be true due to the shutdown of the - * policy itself or because a newer update has arrived while this one hadn't - * finished processing. */ - bool shutting_down; -}; - -static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p, - size_t num_subchannels) { - rr_subchannel_list *subchannel_list = - (rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list)); - subchannel_list->policy = p; - subchannel_list->subchannels = - (subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels); - subchannel_list->num_subchannels = num_subchannels; - gpr_ref_init(&subchannel_list->refcount, 1); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_INFO, "[RR %p] Created subchannel list %p for %lu subchannels", - (void *)p, (void *)subchannel_list, (unsigned long)num_subchannels); - } - return subchannel_list; -} - -static void rr_subchannel_list_destroy(grpc_exec_ctx *exec_ctx, - rr_subchannel_list *subchannel_list) { - GPR_ASSERT(subchannel_list->shutting_down); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_INFO, "[RR %p] Destroying subchannel_list %p", - (void *)subchannel_list->policy, (void *)subchannel_list); - } - for (size_t i = 0; i < subchannel_list->num_subchannels; i++) { - subchannel_data *sd = &subchannel_list->subchannels[i]; - if (sd->subchannel != NULL) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, - "rr_subchannel_list_destroy"); - } - sd->subchannel = NULL; - if (sd->user_data != NULL) { - GPR_ASSERT(sd->user_data_vtable != NULL); - sd->user_data_vtable->destroy(exec_ctx, sd->user_data); - sd->user_data = NULL; - } - } - gpr_free(subchannel_list->subchannels); - gpr_free(subchannel_list); -} - -static void rr_subchannel_list_ref(rr_subchannel_list *subchannel_list, - const char *reason) { - gpr_ref_non_zero(&subchannel_list->refcount); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count); - gpr_log(GPR_INFO, "[RR %p] subchannel_list %p REF %lu->%lu (%s)", - (void *)subchannel_list->policy, (void *)subchannel_list, - (unsigned long)(count - 1), (unsigned long)count, reason); - } -} - -static void rr_subchannel_list_unref(grpc_exec_ctx *exec_ctx, - rr_subchannel_list *subchannel_list, - const char *reason) { - const bool done = gpr_unref(&subchannel_list->refcount); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - const gpr_atm count = gpr_atm_acq_load(&subchannel_list->refcount.count); - gpr_log(GPR_INFO, "[RR %p] subchannel_list %p UNREF %lu->%lu (%s)", - (void *)subchannel_list->policy, (void *)subchannel_list, - (unsigned long)(count + 1), (unsigned long)count, reason); - } - if (done) { - rr_subchannel_list_destroy(exec_ctx, subchannel_list); - } -} - -/** Mark \a subchannel_list as discarded. Unsubscribes all its subchannels. The - * watcher's callback will ultimately unref \a subchannel_list. */ -static void rr_subchannel_list_shutdown_and_unref( - grpc_exec_ctx *exec_ctx, rr_subchannel_list *subchannel_list, - const char *reason) { - GPR_ASSERT(!subchannel_list->shutting_down); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "[RR %p] Shutting down subchannel_list %p (%s)", - (void *)subchannel_list->policy, (void *)subchannel_list, reason); - } - GPR_ASSERT(!subchannel_list->shutting_down); - subchannel_list->shutting_down = true; - for (size_t i = 0; i < subchannel_list->num_subchannels; i++) { - subchannel_data *sd = &subchannel_list->subchannels[i]; - if (sd->subchannel != NULL) { // if subchannel isn't shutdown, unsubscribe. - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log( - GPR_DEBUG, - "[RR %p] Unsubscribing from subchannel %p as part of shutting down " - "subchannel_list %p", - (void *)subchannel_list->policy, (void *)sd->subchannel, - (void *)subchannel_list); - } - grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, - NULL, - &sd->connectivity_changed_closure); - } - } - rr_subchannel_list_unref(exec_ctx, subchannel_list, reason); -} - -/** Returns the index into p->subchannel_list->subchannels of the next - * subchannel in READY state, or p->subchannel_list->num_subchannels if no - * subchannel is READY. - * - * Note that this function does *not* update p->last_ready_subchannel_index. - * The caller must do that if it returns a pick. */ -static size_t get_next_ready_subchannel_index_locked( - const round_robin_lb_policy *p) { - GPR_ASSERT(p->subchannel_list != NULL); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_INFO, - "[RR %p] getting next ready subchannel (out of %lu), " - "last_ready_subchannel_index=%lu", - (void *)p, (unsigned long)p->subchannel_list->num_subchannels, - (unsigned long)p->last_ready_subchannel_index); - } - for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) { - const size_t index = (i + p->last_ready_subchannel_index + 1) % - p->subchannel_list->num_subchannels; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log( - GPR_DEBUG, - "[RR %p] checking subchannel %p, subchannel_list %p, index %lu: " - "state=%s", - (void *)p, (void *)p->subchannel_list->subchannels[index].subchannel, - (void *)p->subchannel_list, (unsigned long)index, - grpc_connectivity_state_name( - p->subchannel_list->subchannels[index].curr_connectivity_state)); - } - if (p->subchannel_list->subchannels[index].curr_connectivity_state == - GRPC_CHANNEL_READY) { - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, - "[RR %p] found next ready subchannel (%p) at index %lu of " - "subchannel_list %p", - (void *)p, - (void *)p->subchannel_list->subchannels[index].subchannel, - (unsigned long)index, (void *)p->subchannel_list); - } - return index; - } - } - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", (void *)p); - } - return p->subchannel_list->num_subchannels; -} - -// Sets p->last_ready_subchannel_index to last_ready_index. -static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p, - size_t last_ready_index) { - GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels); - p->last_ready_subchannel_index = last_ready_index; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log( - GPR_DEBUG, - "[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)", - (void *)p, (unsigned long)last_ready_index, - (void *)p->subchannel_list->subchannels[last_ready_index].subchannel, - (void *)grpc_subchannel_get_connected_subchannel( - p->subchannel_list->subchannels[last_ready_index].subchannel)); - } -} - -static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p", - (void *)pol, (void *)pol); - } - grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker); - grpc_subchannel_index_unref(); - gpr_free(p); -} - -static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "[RR %p] Shutting down Round Robin policy at %p", - (void *)pol, (void *)pol); - } - p->shutdown = true; - pending_pick *pp; - while ((pp = p->pending_picks)) { - p->pending_picks = pp->next; - *pp->target = NULL; - GRPC_CLOSURE_SCHED( - exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown")); - gpr_free(pp); - } - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown"); - const bool latest_is_current = - p->subchannel_list == p->latest_pending_subchannel_list; - rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, - "sl_shutdown_rr_shutdown"); - p->subchannel_list = NULL; - if (!latest_is_current && p->latest_pending_subchannel_list != NULL && - !p->latest_pending_subchannel_list->shutting_down) { - rr_subchannel_list_shutdown_and_unref(exec_ctx, - p->latest_pending_subchannel_list, - "sl_shutdown_pending_rr_shutdown"); - p->latest_pending_subchannel_list = NULL; - } -} - -static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_connected_subchannel **target, - grpc_error *error) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - pending_pick *pp = p->pending_picks; - p->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if (pp->target == target) { - *target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); - gpr_free(pp); - } else { - pp->next = p->pending_picks; - p->pending_picks = pp; - } - pp = next; - } - GRPC_ERROR_UNREF(error); -} - -static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - uint32_t initial_metadata_flags_mask, - uint32_t initial_metadata_flags_eq, - grpc_error *error) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - pending_pick *pp = p->pending_picks; - p->pending_picks = NULL; - while (pp != NULL) { - pending_pick *next = pp->next; - if ((pp->initial_metadata_flags & initial_metadata_flags_mask) == - initial_metadata_flags_eq) { - *pp->target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Pick cancelled", &error, 1)); - gpr_free(pp); - } else { - pp->next = p->pending_picks; - p->pending_picks = pp; - } - pp = next; - } - GRPC_ERROR_UNREF(error); -} - -static void start_picking_locked(grpc_exec_ctx *exec_ctx, - round_robin_lb_policy *p) { - p->started_picking = true; - for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) { - subchannel_data *sd = &p->subchannel_list->subchannels[i]; - GRPC_LB_POLICY_WEAK_REF(&p->base, "start_picking_locked"); - rr_subchannel_list_ref(sd->subchannel_list, "started_picking"); - grpc_subchannel_notify_on_state_change( - exec_ctx, sd->subchannel, p->base.interested_parties, - &sd->pending_connectivity_state_unsafe, - &sd->connectivity_changed_closure); - } -} - -static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - if (!p->started_picking) { - start_picking_locked(exec_ctx, p); - } -} - -static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - const grpc_lb_policy_pick_args *pick_args, - grpc_connected_subchannel **target, - grpc_call_context_element *context, void **user_data, - grpc_closure *on_complete) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - GPR_ASSERT(!p->shutdown); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol); - } - if (p->subchannel_list != NULL) { - const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); - if (next_ready_index < p->subchannel_list->num_subchannels) { - /* readily available, report right away */ - subchannel_data *sd = &p->subchannel_list->subchannels[next_ready_index]; - *target = GRPC_CONNECTED_SUBCHANNEL_REF( - grpc_subchannel_get_connected_subchannel(sd->subchannel), - "rr_picked"); - if (user_data != NULL) { - *user_data = sd->user_data; - } - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log( - GPR_DEBUG, - "[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, " - "index %lu)", - (void *)p, (void *)sd->subchannel, (void *)*target, - (void *)sd->subchannel_list, (unsigned long)next_ready_index); - } - /* only advance the last picked pointer if the selection was used */ - update_last_ready_subchannel_index_locked(p, next_ready_index); - return 1; - } - } - /* no pick currently available. Save for later in list of pending picks */ - if (!p->started_picking) { - start_picking_locked(exec_ctx, p); - } - pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp)); - pp->next = p->pending_picks; - pp->target = target; - pp->on_complete = on_complete; - pp->initial_metadata_flags = pick_args->initial_metadata_flags; - pp->user_data = user_data; - p->pending_picks = pp; - return 0; -} - -static void update_state_counters_locked(subchannel_data *sd) { - rr_subchannel_list *subchannel_list = sd->subchannel_list; - if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) { - GPR_ASSERT(subchannel_list->num_ready > 0); - --subchannel_list->num_ready; - } else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - GPR_ASSERT(subchannel_list->num_transient_failures > 0); - --subchannel_list->num_transient_failures; - } else if (sd->prev_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { - GPR_ASSERT(subchannel_list->num_shutdown > 0); - --subchannel_list->num_shutdown; - } else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) { - GPR_ASSERT(subchannel_list->num_idle > 0); - --subchannel_list->num_idle; - } - if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { - ++subchannel_list->num_ready; - } else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - ++subchannel_list->num_transient_failures; - } else if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { - ++subchannel_list->num_shutdown; - } else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) { - ++subchannel_list->num_idle; - } -} - -/** Sets the policy's connectivity status based on that of the passed-in \a sd - * (the subchannel_data associted with the updated subchannel) and the - * subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be - * used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the - * connectivity status set. */ -static grpc_connectivity_state update_lb_connectivity_status_locked( - grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) { - /* In priority order. The first rule to match terminates the search (ie, if we - * are on rule n, all previous rules were unfulfilled). - * - * 1) RULE: ANY subchannel is READY => policy is READY. - * CHECK: At least one subchannel is ready iff p->ready_list is NOT empty. - * - * 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING. - * CHECK: sd->curr_connectivity_state == CONNECTING. - * - * 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN. - * CHECK: p->subchannel_list->num_shutdown == - * p->subchannel_list->num_subchannels. - * - * 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is - * TRANSIENT_FAILURE. - * CHECK: p->num_transient_failures == p->subchannel_list->num_subchannels. - * - * 5) RULE: ALL subchannels are IDLE => policy is IDLE. - * CHECK: p->num_idle == p->subchannel_list->num_subchannels. - */ - grpc_connectivity_state new_state = sd->curr_connectivity_state; - rr_subchannel_list *subchannel_list = sd->subchannel_list; - round_robin_lb_policy *p = subchannel_list->policy; - if (subchannel_list->num_ready > 0) { /* 1) READY */ - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY, - GRPC_ERROR_NONE, "rr_ready"); - new_state = GRPC_CHANNEL_READY; - } else if (sd->curr_connectivity_state == - GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */ - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, - GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE, - "rr_connecting"); - new_state = GRPC_CHANNEL_CONNECTING; - } else if (p->subchannel_list->num_shutdown == - p->subchannel_list->num_subchannels) { /* 3) SHUTDOWN */ - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, - GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), - "rr_shutdown"); - p->shutdown = true; - new_state = GRPC_CHANNEL_SHUTDOWN; - } else if (subchannel_list->num_transient_failures == - p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */ - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, - GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_REF(error), "rr_transient_failure"); - new_state = GRPC_CHANNEL_TRANSIENT_FAILURE; - } else if (subchannel_list->num_idle == - p->subchannel_list->num_subchannels) { /* 5) IDLE */ - grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE, - GRPC_ERROR_NONE, "rr_idle"); - new_state = GRPC_CHANNEL_IDLE; - } - GRPC_ERROR_UNREF(error); - return new_state; -} - -static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - subchannel_data *sd = (subchannel_data *)arg; - round_robin_lb_policy *p = sd->subchannel_list->policy; - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log( - GPR_DEBUG, - "[RR %p] connectivity changed for subchannel %p, subchannel_list %p: " - "prev_state=%s new_state=%s p->shutdown=%d " - "sd->subchannel_list->shutting_down=%d error=%s", - (void *)p, (void *)sd->subchannel, (void *)sd->subchannel_list, - grpc_connectivity_state_name(sd->prev_connectivity_state), - grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe), - p->shutdown, sd->subchannel_list->shutting_down, - grpc_error_string(error)); - } - // If the policy is shutting down, unref and return. - if (p->shutdown) { - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, - "pol_shutdown+started_picking"); - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pol_shutdown"); - return; - } - if (sd->subchannel_list->shutting_down && error == GRPC_ERROR_CANCELLED) { - // the subchannel list associated with sd has been discarded. This callback - // corresponds to the unsubscription. The unrefs correspond to the picking - // ref (start_picking_locked or update_started_picking). - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, - "sl_shutdown+started_picking"); - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "sl_shutdown+picking"); - return; - } - // Dispose of outdated subchannel lists. - if (sd->subchannel_list != p->subchannel_list && - sd->subchannel_list != p->latest_pending_subchannel_list) { - const char *reason = NULL; - if (sd->subchannel_list->shutting_down) { - reason = "sl_outdated_straggler"; - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason); - } else { - reason = "sl_outdated"; - rr_subchannel_list_shutdown_and_unref(exec_ctx, sd->subchannel_list, - reason); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, reason); - return; - } - // Now that we're inside the combiner, copy the pending connectivity - // state (which was set by the connectivity state watcher) to - // curr_connectivity_state, which is what we use inside of the combiner. - sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe; - // Update state counters and determine new overall state. - update_state_counters_locked(sd); - sd->prev_connectivity_state = sd->curr_connectivity_state; - const grpc_connectivity_state new_policy_connectivity_state = - update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error)); - // If the sd's new state is SHUTDOWN, unref the subchannel, and if the new - // policy's state is SHUTDOWN, clean up. - if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown"); - sd->subchannel = NULL; - if (sd->user_data != NULL) { - GPR_ASSERT(sd->user_data_vtable != NULL); - sd->user_data_vtable->destroy(exec_ctx, sd->user_data); - sd->user_data = NULL; - } - if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { - // the policy is shutting down. Flush all the pending picks... - pending_pick *pp; - while ((pp = p->pending_picks)) { - p->pending_picks = pp->next; - *pp->target = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); - gpr_free(pp); - } - } - rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, - "sd_shutdown+started_picking"); - // unref the "rr_connectivity_update" weak ref from start_picking. - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, - "rr_connectivity_sd_shutdown"); - } else { // sd not in SHUTDOWN - if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { - if (sd->subchannel_list != p->subchannel_list) { - // promote sd->subchannel_list to p->subchannel_list. - // sd->subchannel_list must be equal to - // p->latest_pending_subchannel_list because we have already filtered - // for sds belonging to outdated subchannel lists. - GPR_ASSERT(sd->subchannel_list == p->latest_pending_subchannel_list); - GPR_ASSERT(!sd->subchannel_list->shutting_down); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - const unsigned long num_subchannels = - p->subchannel_list != NULL - ? (unsigned long)p->subchannel_list->num_subchannels - : 0; - gpr_log(GPR_DEBUG, - "[RR %p] phasing out subchannel list %p (size %lu) in favor " - "of %p (size %lu)", - (void *)p, (void *)p->subchannel_list, num_subchannels, - (void *)sd->subchannel_list, num_subchannels); - } - if (p->subchannel_list != NULL) { - // dispose of the current subchannel_list - rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, - "sl_phase_out_shutdown"); - } - p->subchannel_list = p->latest_pending_subchannel_list; - p->latest_pending_subchannel_list = NULL; - } - /* at this point we know there's at least one suitable subchannel. Go - * ahead and pick one and notify the pending suitors in - * p->pending_picks. This preemtively replicates rr_pick()'s actions. */ - const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); - GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels); - subchannel_data *selected = - &p->subchannel_list->subchannels[next_ready_index]; - if (p->pending_picks != NULL) { - // if the selected subchannel is going to be used for the pending - // picks, update the last picked pointer - update_last_ready_subchannel_index_locked(p, next_ready_index); - } - pending_pick *pp; - while ((pp = p->pending_picks)) { - p->pending_picks = pp->next; - *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF( - grpc_subchannel_get_connected_subchannel(selected->subchannel), - "rr_picked"); - if (pp->user_data != NULL) { - *pp->user_data = selected->user_data; - } - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, - "[RR %p] Fulfilling pending pick. Target <-- subchannel %p " - "(subchannel_list %p, index %lu)", - (void *)p, (void *)selected->subchannel, - (void *)p->subchannel_list, (unsigned long)next_ready_index); - } - GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE); - gpr_free(pp); - } - } - /* renew notification: reuses the "rr_connectivity_update" weak ref on the - * policy as well as the sd->subchannel_list ref. */ - grpc_subchannel_notify_on_state_change( - exec_ctx, sd->subchannel, p->base.interested_parties, - &sd->pending_connectivity_state_unsafe, - &sd->connectivity_changed_closure); - } -} - -static grpc_connectivity_state rr_check_connectivity_locked( - grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_error **error) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - return grpc_connectivity_state_get(&p->state_tracker, error); -} - -static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx, - grpc_lb_policy *pol, - grpc_connectivity_state *current, - grpc_closure *notify) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker, - current, notify); -} - -static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, - grpc_closure *closure) { - round_robin_lb_policy *p = (round_robin_lb_policy *)pol; - const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); - if (next_ready_index < p->subchannel_list->num_subchannels) { - subchannel_data *selected = - &p->subchannel_list->subchannels[next_ready_index]; - grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF( - grpc_subchannel_get_connected_subchannel(selected->subchannel), - "rr_picked"); - grpc_connected_subchannel_ping(exec_ctx, target, closure); - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked"); - } else { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Round Robin not connected")); - } -} - -static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, - const grpc_lb_policy_args *args) { - round_robin_lb_policy *p = (round_robin_lb_policy *)policy; - const grpc_arg *arg = - grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) { - if (p->subchannel_list == NULL) { - // If we don't have a current subchannel list, go into TRANSIENT FAILURE. - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), - "rr_update_missing"); - } else { - // otherwise, keep using the current subchannel list (ignore this update). - gpr_log(GPR_ERROR, - "[RR %p] No valid LB addresses channel arg for update, ignoring.", - (void *)p); - } - return; - } - grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p; - rr_subchannel_list *subchannel_list = - rr_subchannel_list_create(p, addresses->num_addresses); - if (addresses->num_addresses == 0) { - grpc_connectivity_state_set( - exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), - "rr_update_empty"); - if (p->subchannel_list != NULL) { - rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, - "sl_shutdown_empty_update"); - } - p->subchannel_list = subchannel_list; // empty list - return; - } - size_t subchannel_index = 0; - if (p->latest_pending_subchannel_list != NULL && p->started_picking) { - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, - "[RR %p] Shutting down latest pending subchannel list %p, about " - "to be replaced by newer latest %p", - (void *)p, (void *)p->latest_pending_subchannel_list, - (void *)subchannel_list); - } - rr_subchannel_list_shutdown_and_unref( - exec_ctx, p->latest_pending_subchannel_list, "sl_outdated_dont_smash"); - } - p->latest_pending_subchannel_list = subchannel_list; - grpc_subchannel_args sc_args; - /* We need to remove the LB addresses in order to be able to compare the - * subchannel keys of subchannels from a different batch of addresses. */ - static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS, - GRPC_ARG_LB_ADDRESSES}; - /* Create subchannels for addresses in the update. */ - for (size_t i = 0; i < addresses->num_addresses; i++) { - // If there were any balancer, we would have chosen grpclb policy instead. - GPR_ASSERT(!addresses->addresses[i].is_balancer); - memset(&sc_args, 0, sizeof(grpc_subchannel_args)); - grpc_arg addr_arg = - grpc_create_subchannel_address_arg(&addresses->addresses[i].address); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add_and_remove( - args->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, - 1); - gpr_free(addr_arg.value.string); - sc_args.args = new_args; - grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel( - exec_ctx, args->client_channel_factory, &sc_args); - grpc_channel_args_destroy(exec_ctx, new_args); - grpc_error *error; - // Get the connectivity state of the subchannel. Already existing ones may - // be in a state other than INIT. - const grpc_connectivity_state subchannel_connectivity_state = - grpc_subchannel_check_connectivity(subchannel, &error); - if (error != GRPC_ERROR_NONE) { - // The subchannel is in error (e.g. shutting down). Ignore it. - GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannel, "new_sc_connectivity_error"); - GRPC_ERROR_UNREF(error); - continue; - } - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - char *address_uri = - grpc_sockaddr_to_uri(&addresses->addresses[i].address); - gpr_log( - GPR_DEBUG, - "[RR %p] index %lu: Created subchannel %p for address uri %s into " - "subchannel_list %p. Connectivity state %s", - (void *)p, (unsigned long)subchannel_index, (void *)subchannel, - address_uri, (void *)subchannel_list, - grpc_connectivity_state_name(subchannel_connectivity_state)); - gpr_free(address_uri); - } - subchannel_data *sd = &subchannel_list->subchannels[subchannel_index++]; - sd->subchannel_list = subchannel_list; - sd->subchannel = subchannel; - GRPC_CLOSURE_INIT(&sd->connectivity_changed_closure, - rr_connectivity_changed_locked, sd, - grpc_combiner_scheduler(args->combiner)); - /* use some sentinel value outside of the range of - * grpc_connectivity_state to signal an undefined previous state. We - * won't be referring to this value again and it'll be overwritten after - * the first call to rr_connectivity_changed_locked */ - sd->prev_connectivity_state = GRPC_CHANNEL_INIT; - sd->curr_connectivity_state = subchannel_connectivity_state; - sd->user_data_vtable = addresses->user_data_vtable; - if (sd->user_data_vtable != NULL) { - sd->user_data = - sd->user_data_vtable->copy(addresses->addresses[i].user_data); - } - if (p->started_picking) { - rr_subchannel_list_ref(sd->subchannel_list, "update_started_picking"); - GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity_update"); - /* 2. Watch every new subchannel. A subchannel list becomes active the - * moment one of its subchannels is READY. At that moment, we swap - * p->subchannel_list for sd->subchannel_list, provided the subchannel - * list is still valid (ie, isn't shutting down) */ - grpc_subchannel_notify_on_state_change( - exec_ctx, sd->subchannel, p->base.interested_parties, - &sd->pending_connectivity_state_unsafe, - &sd->connectivity_changed_closure); - } - } - if (!p->started_picking) { - // The policy isn't picking yet. Save the update for later, disposing of - // previous version if any. - if (p->subchannel_list != NULL) { - rr_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list, - "rr_update_before_started_picking"); - } - p->subchannel_list = subchannel_list; - p->latest_pending_subchannel_list = NULL; - } -} - -static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = { - rr_destroy, - rr_shutdown_locked, - rr_pick_locked, - rr_cancel_pick_locked, - rr_cancel_picks_locked, - rr_ping_one_locked, - rr_exit_idle_locked, - rr_check_connectivity_locked, - rr_notify_on_state_change_locked, - rr_update_locked}; - -static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {} - -static void round_robin_factory_unref(grpc_lb_policy_factory *factory) {} - -static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx, - grpc_lb_policy_factory *factory, - grpc_lb_policy_args *args) { - GPR_ASSERT(args->client_channel_factory != NULL); - round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p)); - grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner); - grpc_subchannel_index_ref(); - grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE, - "round_robin"); - rr_update_locked(exec_ctx, &p->base, args); - if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) { - gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void *)p, - (unsigned long)p->subchannel_list->num_subchannels); - } - return &p->base; -} - -static const grpc_lb_policy_factory_vtable round_robin_factory_vtable = { - round_robin_factory_ref, round_robin_factory_unref, round_robin_create, - "round_robin"}; - -static grpc_lb_policy_factory round_robin_lb_policy_factory = { - &round_robin_factory_vtable}; - -static grpc_lb_policy_factory *round_robin_lb_factory_create() { - return &round_robin_lb_policy_factory; -} - -/* Plugin registration */ - -void grpc_lb_policy_round_robin_init() { - grpc_register_lb_policy(round_robin_lb_factory_create()); - grpc_register_tracer(&grpc_lb_round_robin_trace); -} - -void grpc_lb_policy_round_robin_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc new file mode 100644 index 000000000..79e8ad566 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc @@ -0,0 +1,682 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** Round Robin Policy. + * + * Before every pick, the \a get_next_ready_subchannel_index_locked function + * returns the p->subchannel_list->subchannels index for next subchannel, + * respecting the relative order of the addresses provided upon creation or + * updates. Note however that updates will start picking from the beginning of + * the updated list. */ + +#include + +#include + +#include + +#include "src/core/ext/filters/client_channel/lb_policy/subchannel_list.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/subchannel.h" +#include "src/core/ext/filters/client_channel/subchannel_index.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/transport/connectivity_state.h" +#include "src/core/lib/transport/static_metadata.h" + +namespace grpc_core { + +TraceFlag grpc_lb_round_robin_trace(false, "round_robin"); + +namespace { + +// +// round_robin LB policy +// + +class RoundRobin : public LoadBalancingPolicy { + public: + explicit RoundRobin(const Args& args); + + void UpdateLocked(const grpc_channel_args& args) override; + bool PickLocked(PickState* pick) override; + void CancelPickLocked(PickState* pick, grpc_error* error) override; + void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) override; + void NotifyOnStateChangeLocked(grpc_connectivity_state* state, + grpc_closure* closure) override; + grpc_connectivity_state CheckConnectivityLocked( + grpc_error** connectivity_error) override; + void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override; + void PingOneLocked(grpc_closure* on_initiate, grpc_closure* on_ack) override; + void ExitIdleLocked() override; + + private: + ~RoundRobin(); + + // Forward declaration. + class RoundRobinSubchannelList; + + // Data for a particular subchannel in a subchannel list. + // This subclass adds the following functionality: + // - Tracks user_data associated with each address, which will be + // returned along with picks that select the subchannel. + // - Tracks the previous connectivity state of the subchannel, so that + // we know how many subchannels are in each state. + class RoundRobinSubchannelData + : public SubchannelData { + public: + RoundRobinSubchannelData(RoundRobinSubchannelList* subchannel_list, + const grpc_lb_user_data_vtable* user_data_vtable, + const grpc_lb_address& address, + grpc_subchannel* subchannel, + grpc_combiner* combiner) + : SubchannelData(subchannel_list, user_data_vtable, address, subchannel, + combiner), + user_data_vtable_(user_data_vtable), + user_data_(user_data_vtable_ != nullptr + ? user_data_vtable_->copy(address.user_data) + : nullptr) {} + + void UnrefSubchannelLocked(const char* reason) override { + SubchannelData::UnrefSubchannelLocked(reason); + if (user_data_ != nullptr) { + GPR_ASSERT(user_data_vtable_ != nullptr); + user_data_vtable_->destroy(user_data_); + user_data_ = nullptr; + } + } + + void* user_data() const { return user_data_; } + + grpc_connectivity_state connectivity_state() const { + return last_connectivity_state_; + } + + void UpdateConnectivityStateLocked( + grpc_connectivity_state connectivity_state, grpc_error* error); + + private: + void ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) override; + + const grpc_lb_user_data_vtable* user_data_vtable_; + void* user_data_ = nullptr; + grpc_connectivity_state last_connectivity_state_ = GRPC_CHANNEL_IDLE; + }; + + // A list of subchannels. + class RoundRobinSubchannelList + : public SubchannelList { + public: + RoundRobinSubchannelList( + RoundRobin* policy, TraceFlag* tracer, + const grpc_lb_addresses* addresses, grpc_combiner* combiner, + grpc_client_channel_factory* client_channel_factory, + const grpc_channel_args& args) + : SubchannelList(policy, tracer, addresses, combiner, + client_channel_factory, args) { + // Need to maintain a ref to the LB policy as long as we maintain + // any references to subchannels, since the subchannels' + // pollset_sets will include the LB policy's pollset_set. + policy->Ref(DEBUG_LOCATION, "subchannel_list").release(); + } + + ~RoundRobinSubchannelList() { + GRPC_ERROR_UNREF(last_transient_failure_error_); + RoundRobin* p = static_cast(policy()); + p->Unref(DEBUG_LOCATION, "subchannel_list"); + } + + // Starts watching the subchannels in this list. + void StartWatchingLocked(); + + // Updates the counters of subchannels in each state when a + // subchannel transitions from old_state to new_state. + // transient_failure_error is the error that is reported when + // new_state is TRANSIENT_FAILURE. + void UpdateStateCountersLocked(grpc_connectivity_state old_state, + grpc_connectivity_state new_state, + grpc_error* transient_failure_error); + + // If this subchannel list is the RR policy's current subchannel + // list, updates the RR policy's connectivity state based on the + // subchannel list's state counters. + void MaybeUpdateRoundRobinConnectivityStateLocked(); + + // Updates the RR policy's overall state based on the counters of + // subchannels in each state. + void UpdateRoundRobinStateFromSubchannelStateCountsLocked(); + + size_t GetNextReadySubchannelIndexLocked(); + void UpdateLastReadySubchannelIndexLocked(size_t last_ready_index); + + private: + size_t num_ready_ = 0; + size_t num_connecting_ = 0; + size_t num_transient_failure_ = 0; + grpc_error* last_transient_failure_error_ = GRPC_ERROR_NONE; + size_t last_ready_index_ = -1; // Index into list of last pick. + }; + + void ShutdownLocked() override; + + void StartPickingLocked(); + bool DoPickLocked(PickState* pick); + void DrainPendingPicksLocked(); + + /** list of subchannels */ + OrphanablePtr subchannel_list_; + /** Latest version of the subchannel list. + * Subchannel connectivity callbacks will only promote updated subchannel + * lists if they equal \a latest_pending_subchannel_list. In other words, + * racing callbacks that reference outdated subchannel lists won't perform any + * update. */ + OrphanablePtr latest_pending_subchannel_list_; + /** have we started picking? */ + bool started_picking_ = false; + /** are we shutting down? */ + bool shutdown_ = false; + /** List of picks that are waiting on connectivity */ + PickState* pending_picks_ = nullptr; + /** our connectivity state tracker */ + grpc_connectivity_state_tracker state_tracker_; +}; + +RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) { + GPR_ASSERT(args.client_channel_factory != nullptr); + grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, + "round_robin"); + UpdateLocked(*args.args); + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this, + subchannel_list_->num_subchannels()); + } + grpc_subchannel_index_ref(); +} + +RoundRobin::~RoundRobin() { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this); + } + GPR_ASSERT(subchannel_list_ == nullptr); + GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); + GPR_ASSERT(pending_picks_ == nullptr); + grpc_connectivity_state_destroy(&state_tracker_); + grpc_subchannel_index_unref(); +} + +void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) { + PickState* pick; + while ((pick = pending_picks_) != nullptr) { + pending_picks_ = pick->next; + if (new_policy->PickLocked(pick)) { + // Synchronous return, schedule closure. + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE); + } + } +} + +void RoundRobin::ShutdownLocked() { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] Shutting down", this); + } + shutdown_ = true; + PickState* pick; + while ((pick = pending_picks_) != nullptr) { + pending_picks_ = pick->next; + pick->connected_subchannel.reset(); + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error)); + } + grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_REF(error), "rr_shutdown"); + subchannel_list_.reset(); + latest_pending_subchannel_list_.reset(); + TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_CANCELLED); + GRPC_ERROR_UNREF(error); +} + +void RoundRobin::CancelPickLocked(PickState* pick, grpc_error* error) { + PickState* pp = pending_picks_; + pending_picks_ = nullptr; + while (pp != nullptr) { + PickState* next = pp->next; + if (pp == pick) { + pick->connected_subchannel.reset(); + GRPC_CLOSURE_SCHED(pick->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pp->next = pending_picks_; + pending_picks_ = pp; + } + pp = next; + } + GRPC_ERROR_UNREF(error); +} + +void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask, + uint32_t initial_metadata_flags_eq, + grpc_error* error) { + PickState* pick = pending_picks_; + pending_picks_ = nullptr; + while (pick != nullptr) { + PickState* next = pick->next; + if ((pick->initial_metadata_flags & initial_metadata_flags_mask) == + initial_metadata_flags_eq) { + pick->connected_subchannel.reset(); + GRPC_CLOSURE_SCHED(pick->on_complete, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Pick Cancelled", &error, 1)); + } else { + pick->next = pending_picks_; + pending_picks_ = pick; + } + pick = next; + } + GRPC_ERROR_UNREF(error); +} + +void RoundRobin::StartPickingLocked() { + started_picking_ = true; + subchannel_list_->StartWatchingLocked(); +} + +void RoundRobin::ExitIdleLocked() { + if (!started_picking_) { + StartPickingLocked(); + } +} + +bool RoundRobin::DoPickLocked(PickState* pick) { + const size_t next_ready_index = + subchannel_list_->GetNextReadySubchannelIndexLocked(); + if (next_ready_index < subchannel_list_->num_subchannels()) { + /* readily available, report right away */ + RoundRobinSubchannelData* sd = + subchannel_list_->subchannel(next_ready_index); + GPR_ASSERT(sd->connected_subchannel() != nullptr); + pick->connected_subchannel = sd->connected_subchannel()->Ref(); + if (pick->user_data != nullptr) { + *pick->user_data = sd->user_data(); + } + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, " + "index %" PRIuPTR ")", + this, sd->subchannel(), pick->connected_subchannel.get(), + sd->subchannel_list(), next_ready_index); + } + /* only advance the last picked pointer if the selection was used */ + subchannel_list_->UpdateLastReadySubchannelIndexLocked(next_ready_index); + return true; + } + return false; +} + +void RoundRobin::DrainPendingPicksLocked() { + PickState* pick; + while ((pick = pending_picks_)) { + pending_picks_ = pick->next; + GPR_ASSERT(DoPickLocked(pick)); + GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE); + } +} + +bool RoundRobin::PickLocked(PickState* pick) { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_); + } + GPR_ASSERT(!shutdown_); + if (subchannel_list_ != nullptr) { + if (DoPickLocked(pick)) return true; + } + /* no pick currently available. Save for later in list of pending picks */ + if (!started_picking_) { + StartPickingLocked(); + } + pick->next = pending_picks_; + pending_picks_ = pick; + return false; +} + +void RoundRobin::RoundRobinSubchannelList::StartWatchingLocked() { + if (num_subchannels() == 0) return; + // Check current state of each subchannel synchronously, since any + // subchannel already used by some other channel may have a non-IDLE + // state. + for (size_t i = 0; i < num_subchannels(); ++i) { + grpc_error* error = GRPC_ERROR_NONE; + grpc_connectivity_state state = + subchannel(i)->CheckConnectivityStateLocked(&error); + if (state != GRPC_CHANNEL_IDLE) { + subchannel(i)->UpdateConnectivityStateLocked(state, error); + } + } + // Now set the LB policy's state based on the subchannels' states. + UpdateRoundRobinStateFromSubchannelStateCountsLocked(); + // Start connectivity watch for each subchannel. + for (size_t i = 0; i < num_subchannels(); i++) { + if (subchannel(i)->subchannel() != nullptr) { + subchannel(i)->StartConnectivityWatchLocked(); + } + } +} + +void RoundRobin::RoundRobinSubchannelList::UpdateStateCountersLocked( + grpc_connectivity_state old_state, grpc_connectivity_state new_state, + grpc_error* transient_failure_error) { + GPR_ASSERT(old_state != GRPC_CHANNEL_SHUTDOWN); + GPR_ASSERT(new_state != GRPC_CHANNEL_SHUTDOWN); + if (old_state == GRPC_CHANNEL_READY) { + GPR_ASSERT(num_ready_ > 0); + --num_ready_; + } else if (old_state == GRPC_CHANNEL_CONNECTING) { + GPR_ASSERT(num_connecting_ > 0); + --num_connecting_; + } else if (old_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + GPR_ASSERT(num_transient_failure_ > 0); + --num_transient_failure_; + } + if (new_state == GRPC_CHANNEL_READY) { + ++num_ready_; + } else if (new_state == GRPC_CHANNEL_CONNECTING) { + ++num_connecting_; + } else if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + ++num_transient_failure_; + } + GRPC_ERROR_UNREF(last_transient_failure_error_); + last_transient_failure_error_ = transient_failure_error; +} + +// Sets the RR policy's connectivity state based on the current +// subchannel list. +void RoundRobin::RoundRobinSubchannelList:: + MaybeUpdateRoundRobinConnectivityStateLocked() { + RoundRobin* p = static_cast(policy()); + // Only set connectivity state if this is the current subchannel list. + if (p->subchannel_list_.get() != this) return; + /* In priority order. The first rule to match terminates the search (ie, if we + * are on rule n, all previous rules were unfulfilled). + * + * 1) RULE: ANY subchannel is READY => policy is READY. + * CHECK: subchannel_list->num_ready > 0. + * + * 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING. + * CHECK: sd->curr_connectivity_state == CONNECTING. + * + * 3) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is + * TRANSIENT_FAILURE. + * CHECK: subchannel_list->num_transient_failures == + * subchannel_list->num_subchannels. + */ + if (num_ready_ > 0) { + /* 1) READY */ + grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY, + GRPC_ERROR_NONE, "rr_ready"); + } else if (num_connecting_ > 0) { + /* 2) CONNECTING */ + grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING, + GRPC_ERROR_NONE, "rr_connecting"); + } else if (num_transient_failure_ == num_subchannels()) { + /* 3) TRANSIENT_FAILURE */ + grpc_connectivity_state_set(&p->state_tracker_, + GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_REF(last_transient_failure_error_), + "rr_exhausted_subchannels"); + } +} + +void RoundRobin::RoundRobinSubchannelList:: + UpdateRoundRobinStateFromSubchannelStateCountsLocked() { + RoundRobin* p = static_cast(policy()); + if (num_ready_ > 0) { + if (p->subchannel_list_.get() != this) { + // Promote this list to p->subchannel_list_. + // This list must be p->latest_pending_subchannel_list_, because + // any previous update would have been shut down already and + // therefore we would not be receiving a notification for them. + GPR_ASSERT(p->latest_pending_subchannel_list_.get() == this); + GPR_ASSERT(!shutting_down()); + if (grpc_lb_round_robin_trace.enabled()) { + const size_t old_num_subchannels = + p->subchannel_list_ != nullptr + ? p->subchannel_list_->num_subchannels() + : 0; + gpr_log(GPR_INFO, + "[RR %p] phasing out subchannel list %p (size %" PRIuPTR + ") in favor of %p (size %" PRIuPTR ")", + p, p->subchannel_list_.get(), old_num_subchannels, this, + num_subchannels()); + } + p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); + } + // Drain pending picks. + p->DrainPendingPicksLocked(); + } + // Update the RR policy's connectivity state if needed. + MaybeUpdateRoundRobinConnectivityStateLocked(); +} + +void RoundRobin::RoundRobinSubchannelData::UpdateConnectivityStateLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) { + RoundRobin* p = static_cast(subchannel_list()->policy()); + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log( + GPR_INFO, + "[RR %p] connectivity changed for subchannel %p, subchannel_list %p " + "(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s", + p, subchannel(), subchannel_list(), Index(), + subchannel_list()->num_subchannels(), + grpc_connectivity_state_name(last_connectivity_state_), + grpc_connectivity_state_name(connectivity_state)); + } + subchannel_list()->UpdateStateCountersLocked(last_connectivity_state_, + connectivity_state, error); + last_connectivity_state_ = connectivity_state; +} + +void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) { + RoundRobin* p = static_cast(subchannel_list()->policy()); + GPR_ASSERT(subchannel() != nullptr); + // If the new state is TRANSIENT_FAILURE, re-resolve. + // Only do this if we've started watching, not at startup time. + // Otherwise, if the subchannel was already in state TRANSIENT_FAILURE + // when the subchannel list was created, we'd wind up in a constant + // loop of re-resolution. + if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. " + "Requesting re-resolution", + p, subchannel()); + } + p->TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_NONE); + } + // Update state counters. + UpdateConnectivityStateLocked(connectivity_state, error); + // Update overall state and renew notification. + subchannel_list()->UpdateRoundRobinStateFromSubchannelStateCountsLocked(); + RenewConnectivityWatchLocked(); +} + +/** Returns the index into p->subchannel_list->subchannels of the next + * subchannel in READY state, or p->subchannel_list->num_subchannels if no + * subchannel is READY. + * + * Note that this function does *not* update p->last_ready_subchannel_index. + * The caller must do that if it returns a pick. */ +size_t +RoundRobin::RoundRobinSubchannelList::GetNextReadySubchannelIndexLocked() { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] getting next ready subchannel (out of %" PRIuPTR + "), last_ready_index=%" PRIuPTR, + policy(), num_subchannels(), last_ready_index_); + } + for (size_t i = 0; i < num_subchannels(); ++i) { + const size_t index = (i + last_ready_index_ + 1) % num_subchannels(); + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log( + GPR_INFO, + "[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR + ": state=%s", + policy(), subchannel(index)->subchannel(), this, index, + grpc_connectivity_state_name( + subchannel(index)->connectivity_state())); + } + if (subchannel(index)->connectivity_state() == GRPC_CHANNEL_READY) { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] found next ready subchannel (%p) at index %" PRIuPTR + " of subchannel_list %p", + policy(), subchannel(index)->subchannel(), index, this); + } + return index; + } + } + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] no subchannels in ready state", this); + } + return num_subchannels(); +} + +// Sets last_ready_index_ to last_ready_index. +void RoundRobin::RoundRobinSubchannelList::UpdateLastReadySubchannelIndexLocked( + size_t last_ready_index) { + GPR_ASSERT(last_ready_index < num_subchannels()); + last_ready_index_ = last_ready_index; + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] setting last_ready_subchannel_index=%" PRIuPTR + " (SC %p, CSC %p)", + policy(), last_ready_index, + subchannel(last_ready_index)->subchannel(), + subchannel(last_ready_index)->connected_subchannel()); + } +} + +grpc_connectivity_state RoundRobin::CheckConnectivityLocked( + grpc_error** error) { + return grpc_connectivity_state_get(&state_tracker_, error); +} + +void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current, + grpc_closure* notify) { + grpc_connectivity_state_notify_on_state_change(&state_tracker_, current, + notify); +} + +void RoundRobin::PingOneLocked(grpc_closure* on_initiate, + grpc_closure* on_ack) { + const size_t next_ready_index = + subchannel_list_->GetNextReadySubchannelIndexLocked(); + if (next_ready_index < subchannel_list_->num_subchannels()) { + RoundRobinSubchannelData* selected = + subchannel_list_->subchannel(next_ready_index); + selected->connected_subchannel()->Ping(on_initiate, on_ack); + } else { + GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Round Robin not connected")); + GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Round Robin not connected")); + } +} + +void RoundRobin::UpdateLocked(const grpc_channel_args& args) { + const grpc_arg* arg = grpc_channel_args_find(&args, GRPC_ARG_LB_ADDRESSES); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) { + gpr_log(GPR_ERROR, "[RR %p] update provided no addresses; ignoring", this); + // If we don't have a current subchannel list, go into TRANSIENT_FAILURE. + // Otherwise, keep using the current subchannel list (ignore this update). + if (subchannel_list_ == nullptr) { + grpc_connectivity_state_set( + &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), + "rr_update_missing"); + } + return; + } + grpc_lb_addresses* addresses = + static_cast(arg->value.pointer.p); + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses", + this, addresses->num_addresses); + } + // Replace latest_pending_subchannel_list_. + if (latest_pending_subchannel_list_ != nullptr) { + if (grpc_lb_round_robin_trace.enabled()) { + gpr_log(GPR_INFO, + "[RR %p] Shutting down previous pending subchannel list %p", this, + latest_pending_subchannel_list_.get()); + } + } + latest_pending_subchannel_list_ = MakeOrphanable( + this, &grpc_lb_round_robin_trace, addresses, combiner(), + client_channel_factory(), args); + // If we haven't started picking yet or the new list is empty, + // immediately promote the new list to the current list. + if (!started_picking_ || + latest_pending_subchannel_list_->num_subchannels() == 0) { + if (latest_pending_subchannel_list_->num_subchannels() == 0) { + grpc_connectivity_state_set( + &state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), + "rr_update_empty"); + } + subchannel_list_ = std::move(latest_pending_subchannel_list_); + } else { + // If we've started picking, start watching the new list. + latest_pending_subchannel_list_->StartWatchingLocked(); + } +} + +// +// factory +// + +class RoundRobinFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + const LoadBalancingPolicy::Args& args) const override { + return OrphanablePtr(New(args)); + } + + const char* name() const override { return "round_robin"; } +}; + +} // namespace + +} // namespace grpc_core + +void grpc_lb_policy_round_robin_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + grpc_core::UniquePtr( + grpc_core::New())); +} + +void grpc_lb_policy_round_robin_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h new file mode 100644 index 000000000..7e2046bcd --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h @@ -0,0 +1,536 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H +#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H + +#include + +#include + +#include + +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/subchannel.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/transport/connectivity_state.h" + +// Code for maintaining a list of subchannels within an LB policy. +// +// To use this, callers must create their own subclasses, like so: +/* + +class MySubchannelList; // Forward declaration. + +class MySubchannelData + : public SubchannelData { + public: + void ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, grpc_error* error) override { + // ...code to handle connectivity changes... + } +}; + +class MySubchannelList + : public SubchannelList { +}; + +*/ +// All methods with a Locked() suffix must be called from within the +// client_channel combiner. + +namespace grpc_core { + +// Stores data for a particular subchannel in a subchannel list. +// Callers must create a subclass that implements the +// ProcessConnectivityChangeLocked() method. +template +class SubchannelData { + public: + // Returns a pointer to the subchannel list containing this object. + SubchannelListType* subchannel_list() const { return subchannel_list_; } + + // Returns the index into the subchannel list of this object. + size_t Index() const { + return static_cast(static_cast(this) - + subchannel_list_->subchannel(0)); + } + + // Returns a pointer to the subchannel. + grpc_subchannel* subchannel() const { return subchannel_; } + + // Returns the connected subchannel. Will be null if the subchannel + // is not connected. + ConnectedSubchannel* connected_subchannel() const { + return connected_subchannel_.get(); + } + + // Synchronously checks the subchannel's connectivity state. + // Must not be called while there is a connectivity notification + // pending (i.e., between calling StartConnectivityWatchLocked() or + // RenewConnectivityWatchLocked() and the resulting invocation of + // ProcessConnectivityChangeLocked()). + grpc_connectivity_state CheckConnectivityStateLocked(grpc_error** error) { + GPR_ASSERT(!connectivity_notification_pending_); + pending_connectivity_state_unsafe_ = + grpc_subchannel_check_connectivity(subchannel(), error); + UpdateConnectedSubchannelLocked(); + return pending_connectivity_state_unsafe_; + } + + // Unrefs the subchannel. May be used if an individual subchannel is + // no longer needed even though the subchannel list as a whole is not + // being unreffed. + virtual void UnrefSubchannelLocked(const char* reason); + + // Starts watching the connectivity state of the subchannel. + // ProcessConnectivityChangeLocked() will be called when the + // connectivity state changes. + void StartConnectivityWatchLocked(); + + // Renews watching the connectivity state of the subchannel. + void RenewConnectivityWatchLocked(); + + // Stops watching the connectivity state of the subchannel. + void StopConnectivityWatchLocked(); + + // Cancels watching the connectivity state of the subchannel. + // Must be called only while there is a connectivity notification + // pending (i.e., between calling StartConnectivityWatchLocked() or + // RenewConnectivityWatchLocked() and the resulting invocation of + // ProcessConnectivityChangeLocked()). + // From within ProcessConnectivityChangeLocked(), use + // StopConnectivityWatchLocked() instead. + void CancelConnectivityWatchLocked(const char* reason); + + // Cancels any pending connectivity watch and unrefs the subchannel. + void ShutdownLocked(); + + GRPC_ABSTRACT_BASE_CLASS + + protected: + SubchannelData(SubchannelListType* subchannel_list, + const grpc_lb_user_data_vtable* user_data_vtable, + const grpc_lb_address& address, grpc_subchannel* subchannel, + grpc_combiner* combiner); + + virtual ~SubchannelData(); + + // After StartConnectivityWatchLocked() or RenewConnectivityWatchLocked() + // is called, this method will be invoked when the subchannel's connectivity + // state changes. + // Implementations must invoke either RenewConnectivityWatchLocked() or + // StopConnectivityWatchLocked() before returning. + virtual void ProcessConnectivityChangeLocked( + grpc_connectivity_state connectivity_state, + grpc_error* error) GRPC_ABSTRACT; + + private: + // Updates connected_subchannel_ based on pending_connectivity_state_unsafe_. + // Returns true if the connectivity state should be reported. + bool UpdateConnectedSubchannelLocked(); + + static void OnConnectivityChangedLocked(void* arg, grpc_error* error); + + // Backpointer to owning subchannel list. Not owned. + SubchannelListType* subchannel_list_; + + // The subchannel and connected subchannel. + grpc_subchannel* subchannel_; + RefCountedPtr connected_subchannel_; + + // Notification that connectivity has changed on subchannel. + grpc_closure connectivity_changed_closure_; + // Is a connectivity notification pending? + bool connectivity_notification_pending_ = false; + // Connectivity state to be updated by + // grpc_subchannel_notify_on_state_change(), not guarded by + // the combiner. + grpc_connectivity_state pending_connectivity_state_unsafe_; +}; + +// A list of subchannels. +template +class SubchannelList + : public InternallyRefCountedWithTracing { + public: + typedef InlinedVector SubchannelVector; + + // The number of subchannels in the list. + size_t num_subchannels() const { return subchannels_.size(); } + + // The data for the subchannel at a particular index. + SubchannelDataType* subchannel(size_t index) { return &subchannels_[index]; } + + // Returns true if the subchannel list is shutting down. + bool shutting_down() const { return shutting_down_; } + + // Accessors. + LoadBalancingPolicy* policy() const { return policy_; } + TraceFlag* tracer() const { return tracer_; } + + // Note: Caller must ensure that this is invoked inside of the combiner. + void Orphan() override { + ShutdownLocked(); + InternallyRefCountedWithTracing::Unref(DEBUG_LOCATION, + "shutdown"); + } + + GRPC_ABSTRACT_BASE_CLASS + + protected: + SubchannelList(LoadBalancingPolicy* policy, TraceFlag* tracer, + const grpc_lb_addresses* addresses, grpc_combiner* combiner, + grpc_client_channel_factory* client_channel_factory, + const grpc_channel_args& args); + + virtual ~SubchannelList(); + + private: + // So New() can call our private ctor. + template + friend T* New(Args&&... args); + + // For accessing Ref() and Unref(). + friend class SubchannelData; + + void ShutdownLocked(); + + // Backpointer to owning policy. + LoadBalancingPolicy* policy_; + + TraceFlag* tracer_; + + grpc_combiner* combiner_; + + // The list of subchannels. + SubchannelVector subchannels_; + + // Is this list shutting down? This may be true due to the shutdown of the + // policy itself or because a newer update has arrived while this one hadn't + // finished processing. + bool shutting_down_ = false; +}; + +// +// implementation -- no user-servicable parts below +// + +// +// SubchannelData +// + +template +SubchannelData::SubchannelData( + SubchannelListType* subchannel_list, + const grpc_lb_user_data_vtable* user_data_vtable, + const grpc_lb_address& address, grpc_subchannel* subchannel, + grpc_combiner* combiner) + : subchannel_list_(subchannel_list), + subchannel_(subchannel), + // We assume that the current state is IDLE. If not, we'll get a + // callback telling us that. + pending_connectivity_state_unsafe_(GRPC_CHANNEL_IDLE) { + GRPC_CLOSURE_INIT( + &connectivity_changed_closure_, + (&SubchannelData::OnConnectivityChangedLocked), + this, grpc_combiner_scheduler(combiner)); +} + +template +SubchannelData::~SubchannelData() { + UnrefSubchannelLocked("subchannel_data_destroy"); +} + +template +void SubchannelData:: + UnrefSubchannelLocked(const char* reason) { + if (subchannel_ != nullptr) { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): unreffing subchannel", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_); + } + GRPC_SUBCHANNEL_UNREF(subchannel_, reason); + subchannel_ = nullptr; + connected_subchannel_.reset(); + } +} + +template +void SubchannelData::StartConnectivityWatchLocked() { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): starting watch: requesting connectivity change " + "notification (from %s)", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_, + grpc_connectivity_state_name(pending_connectivity_state_unsafe_)); + } + GPR_ASSERT(!connectivity_notification_pending_); + connectivity_notification_pending_ = true; + subchannel_list()->Ref(DEBUG_LOCATION, "connectivity_watch").release(); + grpc_subchannel_notify_on_state_change( + subchannel_, subchannel_list_->policy()->interested_parties(), + &pending_connectivity_state_unsafe_, &connectivity_changed_closure_); +} + +template +void SubchannelData::RenewConnectivityWatchLocked() { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): renewing watch: requesting connectivity change " + "notification (from %s)", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_, + grpc_connectivity_state_name(pending_connectivity_state_unsafe_)); + } + GPR_ASSERT(connectivity_notification_pending_); + grpc_subchannel_notify_on_state_change( + subchannel_, subchannel_list_->policy()->interested_parties(), + &pending_connectivity_state_unsafe_, &connectivity_changed_closure_); +} + +template +void SubchannelData::StopConnectivityWatchLocked() { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): stopping connectivity watch", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_); + } + GPR_ASSERT(connectivity_notification_pending_); + connectivity_notification_pending_ = false; + subchannel_list()->Unref(DEBUG_LOCATION, "connectivity_watch"); +} + +template +void SubchannelData:: + CancelConnectivityWatchLocked(const char* reason) { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): canceling connectivity watch (%s)", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_, reason); + } + GPR_ASSERT(connectivity_notification_pending_); + grpc_subchannel_notify_on_state_change(subchannel_, nullptr, nullptr, + &connectivity_changed_closure_); +} + +template +bool SubchannelData::UpdateConnectedSubchannelLocked() { + // If the subchannel is READY, take a ref to the connected subchannel. + if (pending_connectivity_state_unsafe_ == GRPC_CHANNEL_READY) { + connected_subchannel_ = + grpc_subchannel_get_connected_subchannel(subchannel_); + // If the subchannel became disconnected between the time that READY + // was reported and the time we got here (e.g., between when a + // notification callback is scheduled and when it was actually run in + // the combiner), then the connected subchannel may have disappeared out + // from under us. In that case, we don't actually want to consider the + // subchannel to be in state READY. Instead, we use IDLE as the + // basis for any future connectivity watch; this is the one state that + // the subchannel will never transition back into, so this ensures + // that we will get a notification for the next state, even if that state + // is READY again (e.g., if the subchannel has transitioned back to + // READY before the next watch gets requested). + if (connected_subchannel_ == nullptr) { + if (subchannel_list_->tracer()->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): state is READY but connected subchannel is " + "null; moving to state IDLE", + subchannel_list_->tracer()->name(), subchannel_list_->policy(), + subchannel_list_, Index(), subchannel_list_->num_subchannels(), + subchannel_); + } + pending_connectivity_state_unsafe_ = GRPC_CHANNEL_IDLE; + return false; + } + } else { + // For any state other than READY, unref the connected subchannel. + connected_subchannel_.reset(); + } + return true; +} + +template +void SubchannelData:: + OnConnectivityChangedLocked(void* arg, grpc_error* error) { + SubchannelData* sd = static_cast(arg); + if (sd->subchannel_list_->tracer()->enabled()) { + gpr_log( + GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR + " (subchannel %p): connectivity changed: state=%s, error=%s, " + "shutting_down=%d", + sd->subchannel_list_->tracer()->name(), sd->subchannel_list_->policy(), + sd->subchannel_list_, sd->Index(), + sd->subchannel_list_->num_subchannels(), sd->subchannel_, + grpc_connectivity_state_name(sd->pending_connectivity_state_unsafe_), + grpc_error_string(error), sd->subchannel_list_->shutting_down()); + } + // If shutting down, unref subchannel and stop watching. + if (sd->subchannel_list_->shutting_down() || error == GRPC_ERROR_CANCELLED) { + sd->UnrefSubchannelLocked("connectivity_shutdown"); + sd->StopConnectivityWatchLocked(); + return; + } + // Get or release ref to connected subchannel. + if (!sd->UpdateConnectedSubchannelLocked()) { + // We don't want to report this connectivity state, so renew the watch. + sd->RenewConnectivityWatchLocked(); + return; + } + // Call the subclass's ProcessConnectivityChangeLocked() method. + sd->ProcessConnectivityChangeLocked(sd->pending_connectivity_state_unsafe_, + GRPC_ERROR_REF(error)); +} + +template +void SubchannelData::ShutdownLocked() { + // If there's a pending notification for this subchannel, cancel it; + // the callback is responsible for unreffing the subchannel. + // Otherwise, unref the subchannel directly. + if (connectivity_notification_pending_) { + CancelConnectivityWatchLocked("shutdown"); + } else if (subchannel_ != nullptr) { + UnrefSubchannelLocked("shutdown"); + } +} + +// +// SubchannelList +// + +template +SubchannelList::SubchannelList( + LoadBalancingPolicy* policy, TraceFlag* tracer, + const grpc_lb_addresses* addresses, grpc_combiner* combiner, + grpc_client_channel_factory* client_channel_factory, + const grpc_channel_args& args) + : InternallyRefCountedWithTracing(tracer), + policy_(policy), + tracer_(tracer), + combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) { + if (tracer_->enabled()) { + gpr_log(GPR_INFO, + "[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels", + tracer_->name(), policy, this, addresses->num_addresses); + } + subchannels_.reserve(addresses->num_addresses); + // We need to remove the LB addresses in order to be able to compare the + // subchannel keys of subchannels from a different batch of addresses. + static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS, + GRPC_ARG_LB_ADDRESSES}; + // Create a subchannel for each address. + grpc_subchannel_args sc_args; + for (size_t i = 0; i < addresses->num_addresses; i++) { + // If there were any balancer, we would have chosen grpclb policy instead. + GPR_ASSERT(!addresses->addresses[i].is_balancer); + memset(&sc_args, 0, sizeof(grpc_subchannel_args)); + grpc_arg addr_arg = + grpc_create_subchannel_address_arg(&addresses->addresses[i].address); + grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove( + &args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &addr_arg, 1); + gpr_free(addr_arg.value.string); + sc_args.args = new_args; + grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel( + client_channel_factory, &sc_args); + grpc_channel_args_destroy(new_args); + if (subchannel == nullptr) { + // Subchannel could not be created. + if (tracer_->enabled()) { + char* address_uri = + grpc_sockaddr_to_uri(&addresses->addresses[i].address); + gpr_log(GPR_INFO, + "[%s %p] could not create subchannel for address uri %s, " + "ignoring", + tracer_->name(), policy_, address_uri); + gpr_free(address_uri); + } + continue; + } + if (tracer_->enabled()) { + char* address_uri = + grpc_sockaddr_to_uri(&addresses->addresses[i].address); + gpr_log(GPR_INFO, + "[%s %p] subchannel list %p index %" PRIuPTR + ": Created subchannel %p for address uri %s", + tracer_->name(), policy_, this, subchannels_.size(), subchannel, + address_uri); + gpr_free(address_uri); + } + subchannels_.emplace_back(static_cast(this), + addresses->user_data_vtable, + addresses->addresses[i], subchannel, combiner); + } +} + +template +SubchannelList::~SubchannelList() { + if (tracer_->enabled()) { + gpr_log(GPR_INFO, "[%s %p] Destroying subchannel_list %p", tracer_->name(), + policy_, this); + } + GRPC_COMBINER_UNREF(combiner_, "subchannel_list"); +} + +template +void SubchannelList::ShutdownLocked() { + if (tracer_->enabled()) { + gpr_log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p", + tracer_->name(), policy_, this); + } + GPR_ASSERT(!shutting_down_); + shutting_down_ = true; + for (size_t i = 0; i < subchannels_.size(); i++) { + SubchannelDataType* sd = &subchannels_[i]; + sd->ShutdownLocked(); + } +} + +} // namespace grpc_core + +#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.cc similarity index 74% rename from Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.cc index 05ab43d0b..7c8cba55b 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -29,11 +31,12 @@ grpc_lb_addresses* grpc_lb_addresses_create( size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) { grpc_lb_addresses* addresses = - (grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses)); + static_cast(gpr_zalloc(sizeof(grpc_lb_addresses))); addresses->num_addresses = num_addresses; addresses->user_data_vtable = user_data_vtable; const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses; - addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size); + addresses->addresses = + static_cast(gpr_zalloc(addresses_size)); return addresses; } @@ -43,11 +46,11 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) { memcpy(new_addresses->addresses, addresses->addresses, sizeof(grpc_lb_address) * addresses->num_addresses); for (size_t i = 0; i < addresses->num_addresses; ++i) { - if (new_addresses->addresses[i].balancer_name != NULL) { + if (new_addresses->addresses[i].balancer_name != nullptr) { new_addresses->addresses[i].balancer_name = gpr_strdup(new_addresses->addresses[i].balancer_name); } - if (new_addresses->addresses[i].user_data != NULL) { + if (new_addresses->addresses[i].user_data != nullptr) { new_addresses->addresses[i].user_data = addresses->user_data_vtable->copy( new_addresses->addresses[i].user_data); } @@ -60,10 +63,10 @@ void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index, bool is_balancer, const char* balancer_name, void* user_data) { GPR_ASSERT(index < addresses->num_addresses); - if (user_data != NULL) GPR_ASSERT(addresses->user_data_vtable != NULL); + if (user_data != nullptr) GPR_ASSERT(addresses->user_data_vtable != nullptr); grpc_lb_address* target = &addresses->addresses[index]; memcpy(target->address.addr, address, address_len); - target->address.len = address_len; + target->address.len = static_cast(address_len); target->is_balancer = is_balancer; target->balancer_name = gpr_strdup(balancer_name); target->user_data = user_data; @@ -98,12 +101,12 @@ int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1, if (target1->is_balancer > target2->is_balancer) return 1; if (target1->is_balancer < target2->is_balancer) return -1; const char* balancer_name1 = - target1->balancer_name != NULL ? target1->balancer_name : ""; + target1->balancer_name != nullptr ? target1->balancer_name : ""; const char* balancer_name2 = - target2->balancer_name != NULL ? target2->balancer_name : ""; + target2->balancer_name != nullptr ? target2->balancer_name : ""; retval = strcmp(balancer_name1, balancer_name2); if (retval != 0) return retval; - if (addresses1->user_data_vtable != NULL) { + if (addresses1->user_data_vtable != nullptr) { retval = addresses1->user_data_vtable->cmp(target1->user_data, target2->user_data); if (retval != 0) return retval; @@ -112,13 +115,11 @@ int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1, return 0; } -void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx, - grpc_lb_addresses* addresses) { +void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) { for (size_t i = 0; i < addresses->num_addresses; ++i) { gpr_free(addresses->addresses[i].balancer_name); - if (addresses->addresses[i].user_data != NULL) { - addresses->user_data_vtable->destroy(exec_ctx, - addresses->addresses[i].user_data); + if (addresses->addresses[i].user_data != nullptr) { + addresses->user_data_vtable->destroy(addresses->addresses[i].user_data); } } gpr_free(addresses->addresses); @@ -126,14 +127,14 @@ void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx, } static void* lb_addresses_copy(void* addresses) { - return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses); + return grpc_lb_addresses_copy(static_cast(addresses)); } -static void lb_addresses_destroy(grpc_exec_ctx* exec_ctx, void* addresses) { - grpc_lb_addresses_destroy(exec_ctx, (grpc_lb_addresses*)addresses); +static void lb_addresses_destroy(void* addresses) { + grpc_lb_addresses_destroy(static_cast(addresses)); } static int lb_addresses_cmp(void* addresses1, void* addresses2) { - return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1, - (grpc_lb_addresses*)addresses2); + return grpc_lb_addresses_cmp(static_cast(addresses1), + static_cast(addresses2)); } static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = { lb_addresses_copy, lb_addresses_destroy, lb_addresses_cmp}; @@ -148,22 +149,7 @@ grpc_lb_addresses* grpc_lb_addresses_find_channel_arg( const grpc_channel_args* channel_args) { const grpc_arg* lb_addresses_arg = grpc_channel_args_find(channel_args, GRPC_ARG_LB_ADDRESSES); - if (lb_addresses_arg == NULL || lb_addresses_arg->type != GRPC_ARG_POINTER) - return NULL; - return (grpc_lb_addresses*)lb_addresses_arg->value.pointer.p; -} - -void grpc_lb_policy_factory_ref(grpc_lb_policy_factory* factory) { - factory->vtable->ref(factory); -} - -void grpc_lb_policy_factory_unref(grpc_lb_policy_factory* factory) { - factory->vtable->unref(factory); -} - -grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy( - grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory, - grpc_lb_policy_args* args) { - if (factory == NULL) return NULL; - return factory->vtable->create_lb_policy(exec_ctx, factory, args); + if (lb_addresses_arg == nullptr || lb_addresses_arg->type != GRPC_ARG_POINTER) + return nullptr; + return static_cast(lb_addresses_arg->value.pointer.p); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.h index cf0f8cb61..644025815 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_factory.h @@ -19,115 +19,109 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H -#include "src/core/lib/iomgr/exec_ctx.h" +#include + #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/ext/filters/client_channel/lb_policy.h" #include "src/core/ext/filters/client_channel/uri_parser.h" +// +// representation of an LB address +// + // Channel arg key for grpc_lb_addresses. #define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses" -typedef struct grpc_lb_policy_factory grpc_lb_policy_factory; -typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable; - -struct grpc_lb_policy_factory { - const grpc_lb_policy_factory_vtable *vtable; -}; - /** A resolved address alongside any LB related information associated with it. * \a user_data, if not NULL, contains opaque data meant to be consumed by the * gRPC LB policy. Note that no all LB policies support \a user_data as input. * Those who don't will simply ignore it and will correspondingly return NULL in * their namesake pick() output argument. */ +// TODO(roth): Once we figure out a better way of handling user_data in +// LB policies, convert these structs to C++ classes. typedef struct grpc_lb_address { grpc_resolved_address address; bool is_balancer; - char *balancer_name; /* For secure naming. */ - void *user_data; + char* balancer_name; /* For secure naming. */ + void* user_data; } grpc_lb_address; typedef struct grpc_lb_user_data_vtable { - void *(*copy)(void *); - void (*destroy)(grpc_exec_ctx *exec_ctx, void *); - int (*cmp)(void *, void *); + void* (*copy)(void*); + void (*destroy)(void*); + int (*cmp)(void*, void*); } grpc_lb_user_data_vtable; typedef struct grpc_lb_addresses { size_t num_addresses; - grpc_lb_address *addresses; - const grpc_lb_user_data_vtable *user_data_vtable; + grpc_lb_address* addresses; + const grpc_lb_user_data_vtable* user_data_vtable; } grpc_lb_addresses; /** Returns a grpc_addresses struct with enough space for \a num_addresses addresses. The \a user_data_vtable argument may be NULL if no user data will be added. */ -grpc_lb_addresses *grpc_lb_addresses_create( - size_t num_addresses, const grpc_lb_user_data_vtable *user_data_vtable); +grpc_lb_addresses* grpc_lb_addresses_create( + size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable); /** Creates a copy of \a addresses. */ -grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses); +grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses); /** Sets the value of the address at index \a index of \a addresses. * \a address is a socket address of length \a address_len. * Takes ownership of \a balancer_name. */ -void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index, - const void *address, size_t address_len, - bool is_balancer, const char *balancer_name, - void *user_data); +void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index, + const void* address, size_t address_len, + bool is_balancer, const char* balancer_name, + void* user_data); /** Sets the value of the address at index \a index of \a addresses from \a uri. * Returns true upon success, false otherwise. Takes ownership of \a * balancer_name. */ -bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses *addresses, - size_t index, const grpc_uri *uri, +bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses* addresses, + size_t index, const grpc_uri* uri, bool is_balancer, - const char *balancer_name, - void *user_data); + const char* balancer_name, + void* user_data); /** Compares \a addresses1 and \a addresses2. */ -int grpc_lb_addresses_cmp(const grpc_lb_addresses *addresses1, - const grpc_lb_addresses *addresses2); +int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1, + const grpc_lb_addresses* addresses2); /** Destroys \a addresses. */ -void grpc_lb_addresses_destroy(grpc_exec_ctx *exec_ctx, - grpc_lb_addresses *addresses); +void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses); /** Returns a channel arg containing \a addresses. */ grpc_arg grpc_lb_addresses_create_channel_arg( - const grpc_lb_addresses *addresses); + const grpc_lb_addresses* addresses); /** Returns the \a grpc_lb_addresses instance in \a channel_args or NULL */ -grpc_lb_addresses *grpc_lb_addresses_find_channel_arg( - const grpc_channel_args *channel_args); - -/** Arguments passed to LB policies. */ -struct grpc_lb_policy_args { - grpc_client_channel_factory *client_channel_factory; - grpc_channel_args *args; - grpc_combiner *combiner; -}; +grpc_lb_addresses* grpc_lb_addresses_find_channel_arg( + const grpc_channel_args* channel_args); -struct grpc_lb_policy_factory_vtable { - void (*ref)(grpc_lb_policy_factory *factory); - void (*unref)(grpc_lb_policy_factory *factory); +// +// LB policy factory +// - /** Implementation of grpc_lb_policy_factory_create_lb_policy */ - grpc_lb_policy *(*create_lb_policy)(grpc_exec_ctx *exec_ctx, - grpc_lb_policy_factory *factory, - grpc_lb_policy_args *args); +namespace grpc_core { - /** Name for the LB policy this factory implements */ - const char *name; -}; +class LoadBalancingPolicyFactory { + public: + /// Returns a new LB policy instance. + virtual OrphanablePtr CreateLoadBalancingPolicy( + const LoadBalancingPolicy::Args& args) const GRPC_ABSTRACT; + + /// Returns the LB policy name that this factory provides. + /// Caller does NOT take ownership of result. + virtual const char* name() const GRPC_ABSTRACT; -void grpc_lb_policy_factory_ref(grpc_lb_policy_factory *factory); -void grpc_lb_policy_factory_unref(grpc_lb_policy_factory *factory); + virtual ~LoadBalancingPolicyFactory() {} + + GRPC_ABSTRACT_BASE_CLASS +}; -/** Create a lb_policy instance. */ -grpc_lb_policy *grpc_lb_policy_factory_create_lb_policy( - grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory, - grpc_lb_policy_args *args); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.c b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.c deleted file mode 100644 index f2460f830..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" - -#include - -#include "src/core/lib/support/string.h" - -#define MAX_POLICIES 10 - -static grpc_lb_policy_factory *g_all_of_the_lb_policies[MAX_POLICIES]; -static int g_number_of_lb_policies = 0; - -void grpc_lb_policy_registry_init(void) { g_number_of_lb_policies = 0; } - -void grpc_lb_policy_registry_shutdown(void) { - int i; - for (i = 0; i < g_number_of_lb_policies; i++) { - grpc_lb_policy_factory_unref(g_all_of_the_lb_policies[i]); - } -} - -void grpc_register_lb_policy(grpc_lb_policy_factory *factory) { - int i; - for (i = 0; i < g_number_of_lb_policies; i++) { - GPR_ASSERT(0 != gpr_stricmp(factory->vtable->name, - g_all_of_the_lb_policies[i]->vtable->name)); - } - GPR_ASSERT(g_number_of_lb_policies != MAX_POLICIES); - grpc_lb_policy_factory_ref(factory); - g_all_of_the_lb_policies[g_number_of_lb_policies++] = factory; -} - -static grpc_lb_policy_factory *lookup_factory(const char *name) { - int i; - - if (name == NULL) return NULL; - - for (i = 0; i < g_number_of_lb_policies; i++) { - if (0 == gpr_stricmp(name, g_all_of_the_lb_policies[i]->vtable->name)) { - return g_all_of_the_lb_policies[i]; - } - } - - return NULL; -} - -grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name, - grpc_lb_policy_args *args) { - grpc_lb_policy_factory *factory = lookup_factory(name); - grpc_lb_policy *lb_policy = - grpc_lb_policy_factory_create_lb_policy(exec_ctx, factory, args); - return lb_policy; -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.cc new file mode 100644 index 000000000..d651b1120 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.cc @@ -0,0 +1,97 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" + +#include + +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/inlined_vector.h" + +namespace grpc_core { + +namespace { + +class RegistryState { + public: + RegistryState() {} + + void RegisterLoadBalancingPolicyFactory( + UniquePtr factory) { + for (size_t i = 0; i < factories_.size(); ++i) { + GPR_ASSERT(strcmp(factories_[i]->name(), factory->name()) != 0); + } + factories_.push_back(std::move(factory)); + } + + LoadBalancingPolicyFactory* GetLoadBalancingPolicyFactory( + const char* name) const { + for (size_t i = 0; i < factories_.size(); ++i) { + if (strcmp(name, factories_[i]->name()) == 0) { + return factories_[i].get(); + } + } + return nullptr; + } + + private: + InlinedVector, 10> factories_; +}; + +RegistryState* g_state = nullptr; + +} // namespace + +// +// LoadBalancingPolicyRegistry::Builder +// + +void LoadBalancingPolicyRegistry::Builder::InitRegistry() { + if (g_state == nullptr) g_state = New(); +} + +void LoadBalancingPolicyRegistry::Builder::ShutdownRegistry() { + Delete(g_state); + g_state = nullptr; +} + +void LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory( + UniquePtr factory) { + InitRegistry(); + g_state->RegisterLoadBalancingPolicyFactory(std::move(factory)); +} + +// +// LoadBalancingPolicyRegistry +// + +OrphanablePtr +LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( + const char* name, const LoadBalancingPolicy::Args& args) { + GPR_ASSERT(g_state != nullptr); + // Find factory. + LoadBalancingPolicyFactory* factory = + g_state->GetLoadBalancingPolicyFactory(name); + if (factory == nullptr) return nullptr; // Specified name not found. + // Create policy via factory. + return factory->CreateLoadBalancingPolicy(args); +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.h b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.h index f5995687c..2e9bb061e 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/lb_policy_registry.h @@ -19,22 +19,36 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H +#include + #include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/orphanable.h" -/** Initialize the registry and set \a default_factory as the factory to be - * returned when no name is provided in a lookup */ -void grpc_lb_policy_registry_init(void); -void grpc_lb_policy_registry_shutdown(void); +namespace grpc_core { -/** Register a LB policy factory. */ -void grpc_register_lb_policy(grpc_lb_policy_factory *factory); +class LoadBalancingPolicyRegistry { + public: + /// Methods used to create and populate the LoadBalancingPolicyRegistry. + /// NOT THREAD SAFE -- to be used only during global gRPC + /// initialization and shutdown. + class Builder { + public: + /// Global initialization and shutdown hooks. + static void InitRegistry(); + static void ShutdownRegistry(); -/** Create a \a grpc_lb_policy instance. - * - * If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init - * will be returned. */ -grpc_lb_policy *grpc_lb_policy_create(grpc_exec_ctx *exec_ctx, const char *name, - grpc_lb_policy_args *args); + /// Registers an LB policy factory. The factory will be used to create an + /// LB policy whose name matches that of the factory. + static void RegisterLoadBalancingPolicyFactory( + UniquePtr factory); + }; + + /// Creates an LB policy of the type specified by \a name. + static OrphanablePtr CreateLoadBalancingPolicy( + const char* name, const LoadBalancingPolicy::Args& args); +}; + +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.cc new file mode 100644 index 000000000..1f116bb67 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.cc @@ -0,0 +1,178 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include + +#include +#include +#include + +#include "src/core/ext/filters/client_channel/method_params.h" +#include "src/core/lib/channel/status_util.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/memory.h" + +// As per the retry design, we do not allow more than 5 retry attempts. +#define MAX_MAX_RETRY_ATTEMPTS 5 + +namespace grpc_core { +namespace internal { + +namespace { + +bool ParseWaitForReady( + grpc_json* field, ClientChannelMethodParams::WaitForReady* wait_for_ready) { + if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) { + return false; + } + *wait_for_ready = field->type == GRPC_JSON_TRUE + ? ClientChannelMethodParams::WAIT_FOR_READY_TRUE + : ClientChannelMethodParams::WAIT_FOR_READY_FALSE; + return true; +} + +// Parses a JSON field of the form generated for a google.proto.Duration +// proto message, as per: +// https://developers.google.com/protocol-buffers/docs/proto3#json +bool ParseDuration(grpc_json* field, grpc_millis* duration) { + if (field->type != GRPC_JSON_STRING) return false; + size_t len = strlen(field->value); + if (field->value[len - 1] != 's') return false; + UniquePtr buf(gpr_strdup(field->value)); + *(buf.get() + len - 1) = '\0'; // Remove trailing 's'. + char* decimal_point = strchr(buf.get(), '.'); + int nanos = 0; + if (decimal_point != nullptr) { + *decimal_point = '\0'; + nanos = gpr_parse_nonnegative_int(decimal_point + 1); + if (nanos == -1) { + return false; + } + int num_digits = static_cast(strlen(decimal_point + 1)); + if (num_digits > 9) { // We don't accept greater precision than nanos. + return false; + } + for (int i = 0; i < (9 - num_digits); ++i) { + nanos *= 10; + } + } + int seconds = + decimal_point == buf.get() ? 0 : gpr_parse_nonnegative_int(buf.get()); + if (seconds == -1) return false; + *duration = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS; + return true; +} + +UniquePtr ParseRetryPolicy( + grpc_json* field) { + auto retry_policy = MakeUnique(); + if (field->type != GRPC_JSON_OBJECT) return nullptr; + for (grpc_json* sub_field = field->child; sub_field != nullptr; + sub_field = sub_field->next) { + if (sub_field->key == nullptr) return nullptr; + if (strcmp(sub_field->key, "maxAttempts") == 0) { + if (retry_policy->max_attempts != 0) return nullptr; // Duplicate. + if (sub_field->type != GRPC_JSON_NUMBER) return nullptr; + retry_policy->max_attempts = gpr_parse_nonnegative_int(sub_field->value); + if (retry_policy->max_attempts <= 1) return nullptr; + if (retry_policy->max_attempts > MAX_MAX_RETRY_ATTEMPTS) { + gpr_log(GPR_ERROR, + "service config: clamped retryPolicy.maxAttempts at %d", + MAX_MAX_RETRY_ATTEMPTS); + retry_policy->max_attempts = MAX_MAX_RETRY_ATTEMPTS; + } + } else if (strcmp(sub_field->key, "initialBackoff") == 0) { + if (retry_policy->initial_backoff > 0) return nullptr; // Duplicate. + if (!ParseDuration(sub_field, &retry_policy->initial_backoff)) { + return nullptr; + } + if (retry_policy->initial_backoff == 0) return nullptr; + } else if (strcmp(sub_field->key, "maxBackoff") == 0) { + if (retry_policy->max_backoff > 0) return nullptr; // Duplicate. + if (!ParseDuration(sub_field, &retry_policy->max_backoff)) { + return nullptr; + } + if (retry_policy->max_backoff == 0) return nullptr; + } else if (strcmp(sub_field->key, "backoffMultiplier") == 0) { + if (retry_policy->backoff_multiplier != 0) return nullptr; // Duplicate. + if (sub_field->type != GRPC_JSON_NUMBER) return nullptr; + if (sscanf(sub_field->value, "%f", &retry_policy->backoff_multiplier) != + 1) { + return nullptr; + } + if (retry_policy->backoff_multiplier <= 0) return nullptr; + } else if (strcmp(sub_field->key, "retryableStatusCodes") == 0) { + if (!retry_policy->retryable_status_codes.Empty()) { + return nullptr; // Duplicate. + } + if (sub_field->type != GRPC_JSON_ARRAY) return nullptr; + for (grpc_json* element = sub_field->child; element != nullptr; + element = element->next) { + if (element->type != GRPC_JSON_STRING) return nullptr; + grpc_status_code status; + if (!grpc_status_code_from_string(element->value, &status)) { + return nullptr; + } + retry_policy->retryable_status_codes.Add(status); + } + if (retry_policy->retryable_status_codes.Empty()) return nullptr; + } + } + // Make sure required fields are set. + if (retry_policy->max_attempts == 0 || retry_policy->initial_backoff == 0 || + retry_policy->max_backoff == 0 || retry_policy->backoff_multiplier == 0 || + retry_policy->retryable_status_codes.Empty()) { + return nullptr; + } + return retry_policy; +} + +} // namespace + +RefCountedPtr +ClientChannelMethodParams::CreateFromJson(const grpc_json* json) { + RefCountedPtr method_params = + MakeRefCounted(); + for (grpc_json* field = json->child; field != nullptr; field = field->next) { + if (field->key == nullptr) continue; + if (strcmp(field->key, "waitForReady") == 0) { + if (method_params->wait_for_ready_ != WAIT_FOR_READY_UNSET) { + return nullptr; // Duplicate. + } + if (!ParseWaitForReady(field, &method_params->wait_for_ready_)) { + return nullptr; + } + } else if (strcmp(field->key, "timeout") == 0) { + if (method_params->timeout_ > 0) return nullptr; // Duplicate. + if (!ParseDuration(field, &method_params->timeout_)) return nullptr; + } else if (strcmp(field->key, "retryPolicy") == 0) { + if (method_params->retry_policy_ != nullptr) { + return nullptr; // Duplicate. + } + method_params->retry_policy_ = ParseRetryPolicy(field); + if (method_params->retry_policy_ == nullptr) return nullptr; + } + } + return method_params; +} + +} // namespace internal +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.h b/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.h new file mode 100644 index 000000000..a31d360f1 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/method_params.h @@ -0,0 +1,78 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H +#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H + +#include + +#include "src/core/lib/channel/status_util.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/exec_ctx.h" // for grpc_millis +#include "src/core/lib/json/json.h" + +namespace grpc_core { +namespace internal { + +class ClientChannelMethodParams : public RefCounted { + public: + enum WaitForReady { + WAIT_FOR_READY_UNSET = 0, + WAIT_FOR_READY_FALSE, + WAIT_FOR_READY_TRUE + }; + + struct RetryPolicy { + int max_attempts = 0; + grpc_millis initial_backoff = 0; + grpc_millis max_backoff = 0; + float backoff_multiplier = 0; + StatusCodeSet retryable_status_codes; + }; + + /// Creates a method_parameters object from \a json. + /// Intended for use with ServiceConfig::CreateMethodConfigTable(). + static RefCountedPtr CreateFromJson( + const grpc_json* json); + + grpc_millis timeout() const { return timeout_; } + WaitForReady wait_for_ready() const { return wait_for_ready_; } + const RetryPolicy* retry_policy() const { return retry_policy_.get(); } + + private: + // So New() can call our private ctor. + template + friend T* grpc_core::New(Args&&... args); + + // So Delete() can call our private dtor. + template + friend void grpc_core::Delete(T*); + + ClientChannelMethodParams() {} + virtual ~ClientChannelMethodParams() {} + + grpc_millis timeout_ = 0; + WaitForReady wait_for_ready_ = WAIT_FOR_READY_UNSET; + UniquePtr retry_policy_; +}; + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_METHOD_PARAMS_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.c b/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.cc similarity index 68% rename from Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.cc index 2152b5a1e..b3900114a 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.cc @@ -16,8 +16,11 @@ * */ +#include + #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/socket_utils.h" #include #include @@ -26,56 +29,58 @@ #endif #include -#include #include #include -#include "src/core/lib/support/string.h" + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" #ifdef GRPC_HAVE_UNIX_SOCKET -bool grpc_parse_unix(const grpc_uri *uri, - grpc_resolved_address *resolved_addr) { +bool grpc_parse_unix(const grpc_uri* uri, + grpc_resolved_address* resolved_addr) { if (strcmp("unix", uri->scheme) != 0) { gpr_log(GPR_ERROR, "Expected 'unix' scheme, got '%s'", uri->scheme); return false; } - struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr; + struct sockaddr_un* un = + reinterpret_cast(resolved_addr->addr); const size_t maxlen = sizeof(un->sun_path); const size_t path_len = strnlen(uri->path, maxlen); if (path_len == maxlen) return false; un->sun_family = AF_UNIX; strcpy(un->sun_path, uri->path); - resolved_addr->len = sizeof(*un); + resolved_addr->len = static_cast(sizeof(*un)); return true; } #else /* GRPC_HAVE_UNIX_SOCKET */ -bool grpc_parse_unix(const grpc_uri *uri, - grpc_resolved_address *resolved_addr) { +bool grpc_parse_unix(const grpc_uri* uri, + grpc_resolved_address* resolved_addr) { abort(); } #endif /* GRPC_HAVE_UNIX_SOCKET */ -bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr, +bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr, bool log_errors) { bool success = false; // Split host and port. - char *host; - char *port; + char* host; + char* port; if (!gpr_split_host_port(hostport, &host, &port)) return false; // Parse IP address. memset(addr, 0, sizeof(*addr)); - addr->len = sizeof(struct sockaddr_in); - struct sockaddr_in *in = (struct sockaddr_in *)addr->addr; - in->sin_family = AF_INET; - if (inet_pton(AF_INET, host, &in->sin_addr) == 0) { + addr->len = static_cast(sizeof(grpc_sockaddr_in)); + grpc_sockaddr_in* in = reinterpret_cast(addr->addr); + in->sin_family = GRPC_AF_INET; + if (grpc_inet_pton(GRPC_AF_INET, host, &in->sin_addr) == 0) { if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 address: '%s'", host); goto done; } // Parse port. - if (port == NULL) { + if (port == nullptr) { if (log_errors) gpr_log(GPR_ERROR, "no port given for ipv4 scheme"); goto done; } @@ -84,7 +89,7 @@ bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr, if (log_errors) gpr_log(GPR_ERROR, "invalid ipv4 port: '%s'", port); goto done; } - in->sin_port = htons((uint16_t)port_num); + in->sin_port = grpc_htons(static_cast(port_num)); success = true; done: gpr_free(host); @@ -92,40 +97,41 @@ bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr, return success; } -bool grpc_parse_ipv4(const grpc_uri *uri, - grpc_resolved_address *resolved_addr) { +bool grpc_parse_ipv4(const grpc_uri* uri, + grpc_resolved_address* resolved_addr) { if (strcmp("ipv4", uri->scheme) != 0) { gpr_log(GPR_ERROR, "Expected 'ipv4' scheme, got '%s'", uri->scheme); return false; } - const char *host_port = uri->path; + const char* host_port = uri->path; if (*host_port == '/') ++host_port; return grpc_parse_ipv4_hostport(host_port, resolved_addr, true /* log_errors */); } -bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr, +bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr, bool log_errors) { bool success = false; // Split host and port. - char *host; - char *port; + char* host; + char* port; if (!gpr_split_host_port(hostport, &host, &port)) return false; // Parse IP address. memset(addr, 0, sizeof(*addr)); - addr->len = sizeof(struct sockaddr_in6); - struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr->addr; - in6->sin6_family = AF_INET6; + addr->len = static_cast(sizeof(grpc_sockaddr_in6)); + grpc_sockaddr_in6* in6 = reinterpret_cast(addr->addr); + in6->sin6_family = GRPC_AF_INET6; // Handle the RFC6874 syntax for IPv6 zone identifiers. - char *host_end = (char *)gpr_memrchr(host, '%', strlen(host)); - if (host_end != NULL) { + char* host_end = static_cast(gpr_memrchr(host, '%', strlen(host))); + if (host_end != nullptr) { GPR_ASSERT(host_end >= host); - char host_without_scope[INET6_ADDRSTRLEN]; - size_t host_without_scope_len = (size_t)(host_end - host); + char host_without_scope[GRPC_INET6_ADDRSTRLEN]; + size_t host_without_scope_len = static_cast(host_end - host); uint32_t sin6_scope_id = 0; strncpy(host_without_scope, host, host_without_scope_len); host_without_scope[host_without_scope_len] = '\0'; - if (inet_pton(AF_INET6, host_without_scope, &in6->sin6_addr) == 0) { + if (grpc_inet_pton(GRPC_AF_INET6, host_without_scope, &in6->sin6_addr) == + 0) { gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host_without_scope); goto done; } @@ -138,13 +144,13 @@ bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr, // Handle "sin6_scope_id" being type "u_long". See grpc issue #10027. in6->sin6_scope_id = sin6_scope_id; } else { - if (inet_pton(AF_INET6, host, &in6->sin6_addr) == 0) { + if (grpc_inet_pton(GRPC_AF_INET6, host, &in6->sin6_addr) == 0) { gpr_log(GPR_ERROR, "invalid ipv6 address: '%s'", host); goto done; } } // Parse port. - if (port == NULL) { + if (port == nullptr) { if (log_errors) gpr_log(GPR_ERROR, "no port given for ipv6 scheme"); goto done; } @@ -153,7 +159,7 @@ bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr, if (log_errors) gpr_log(GPR_ERROR, "invalid ipv6 port: '%s'", port); goto done; } - in6->sin6_port = htons((uint16_t)port_num); + in6->sin6_port = grpc_htons(static_cast(port_num)); success = true; done: gpr_free(host); @@ -161,19 +167,19 @@ bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr, return success; } -bool grpc_parse_ipv6(const grpc_uri *uri, - grpc_resolved_address *resolved_addr) { +bool grpc_parse_ipv6(const grpc_uri* uri, + grpc_resolved_address* resolved_addr) { if (strcmp("ipv6", uri->scheme) != 0) { gpr_log(GPR_ERROR, "Expected 'ipv6' scheme, got '%s'", uri->scheme); return false; } - const char *host_port = uri->path; + const char* host_port = uri->path; if (*host_port == '/') ++host_port; return grpc_parse_ipv6_hostport(host_port, resolved_addr, true /* log_errors */); } -bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr) { +bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr) { if (strcmp("unix", uri->scheme) == 0) { return grpc_parse_unix(uri, resolved_addr); } else if (strcmp("ipv4", uri->scheme) == 0) { diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.h b/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.h index c90a827da..9a88b66ed 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/parse_address.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H +#include + #include #include "src/core/ext/filters/client_channel/uri_parser.h" @@ -26,23 +28,23 @@ /** Populate \a resolved_addr from \a uri, whose path is expected to contain a * unix socket path. Returns true upon success. */ -bool grpc_parse_unix(const grpc_uri *uri, grpc_resolved_address *resolved_addr); +bool grpc_parse_unix(const grpc_uri* uri, grpc_resolved_address* resolved_addr); /** Populate \a resolved_addr from \a uri, whose path is expected to contain an * IPv4 host:port pair. Returns true upon success. */ -bool grpc_parse_ipv4(const grpc_uri *uri, grpc_resolved_address *resolved_addr); +bool grpc_parse_ipv4(const grpc_uri* uri, grpc_resolved_address* resolved_addr); /** Populate \a resolved_addr from \a uri, whose path is expected to contain an * IPv6 host:port pair. Returns true upon success. */ -bool grpc_parse_ipv6(const grpc_uri *uri, grpc_resolved_address *resolved_addr); +bool grpc_parse_ipv6(const grpc_uri* uri, grpc_resolved_address* resolved_addr); /** Populate \a resolved_addr from \a uri. Returns true upon success. */ -bool grpc_parse_uri(const grpc_uri *uri, grpc_resolved_address *resolved_addr); +bool grpc_parse_uri(const grpc_uri* uri, grpc_resolved_address* resolved_addr); /** Parse bare IPv4 or IPv6 "IP:port" strings. */ -bool grpc_parse_ipv4_hostport(const char *hostport, grpc_resolved_address *addr, +bool grpc_parse_ipv4_hostport(const char* hostport, grpc_resolved_address* addr, bool log_errors); -bool grpc_parse_ipv6_hostport(const char *hostport, grpc_resolved_address *addr, +bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr, bool log_errors); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.c b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.cc similarity index 74% rename from Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.cc index c6ea5fc68..c4da06778 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/proxy_mapper.h" void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable, @@ -23,24 +25,22 @@ void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable, mapper->vtable = vtable; } -bool grpc_proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper, const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { - return mapper->vtable->map_name(exec_ctx, mapper, server_uri, args, - name_to_resolve, new_args); + return mapper->vtable->map_name(mapper, server_uri, args, name_to_resolve, + new_args); } -bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper, const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, grpc_channel_args** new_args) { - return mapper->vtable->map_address(exec_ctx, mapper, address, args, - new_address, new_args); + return mapper->vtable->map_address(mapper, address, args, new_address, + new_args); } void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper) { diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.h b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.h index a13861cca..634b0ed7b 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H +#include + #include #include @@ -32,14 +34,14 @@ typedef struct { /// If no proxy is needed, returns false. /// Otherwise, sets \a name_to_resolve, optionally sets \a new_args, /// and returns true. - bool (*map_name)(grpc_exec_ctx* exec_ctx, grpc_proxy_mapper* mapper, - const char* server_uri, const grpc_channel_args* args, - char** name_to_resolve, grpc_channel_args** new_args); + bool (*map_name)(grpc_proxy_mapper* mapper, const char* server_uri, + const grpc_channel_args* args, char** name_to_resolve, + grpc_channel_args** new_args); /// Determines the proxy address to use to contact \a address. /// If no proxy is needed, returns false. /// Otherwise, sets \a new_address, optionally sets \a new_args, and /// returns true. - bool (*map_address)(grpc_exec_ctx* exec_ctx, grpc_proxy_mapper* mapper, + bool (*map_address)(grpc_proxy_mapper* mapper, const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, @@ -55,15 +57,13 @@ struct grpc_proxy_mapper { void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable, grpc_proxy_mapper* mapper); -bool grpc_proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper, const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args); -bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper* mapper, +bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper, const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.c b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.cc similarity index 74% rename from Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.cc index 09967eea3..a02a5f5e2 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include @@ -34,8 +36,8 @@ typedef struct { static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list, bool at_start, grpc_proxy_mapper* mapper) { - list->list = (grpc_proxy_mapper**)gpr_realloc( - list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*)); + list->list = static_cast(gpr_realloc( + list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*))); if (at_start) { memmove(list->list + 1, list->list, sizeof(grpc_proxy_mapper*) * list->num_mappers); @@ -46,14 +48,13 @@ static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list, ++list->num_mappers; } -static bool grpc_proxy_mapper_list_map_name(grpc_exec_ctx* exec_ctx, - grpc_proxy_mapper_list* list, +static bool grpc_proxy_mapper_list_map_name(grpc_proxy_mapper_list* list, const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { for (size_t i = 0; i < list->num_mappers; ++i) { - if (grpc_proxy_mapper_map_name(exec_ctx, list->list[i], server_uri, args, + if (grpc_proxy_mapper_map_name(list->list[i], server_uri, args, name_to_resolve, new_args)) { return true; } @@ -62,12 +63,12 @@ static bool grpc_proxy_mapper_list_map_name(grpc_exec_ctx* exec_ctx, } static bool grpc_proxy_mapper_list_map_address( - grpc_exec_ctx* exec_ctx, grpc_proxy_mapper_list* list, - const grpc_resolved_address* address, const grpc_channel_args* args, - grpc_resolved_address** new_address, grpc_channel_args** new_args) { + grpc_proxy_mapper_list* list, const grpc_resolved_address* address, + const grpc_channel_args* args, grpc_resolved_address** new_address, + grpc_channel_args** new_args) { for (size_t i = 0; i < list->num_mappers; ++i) { - if (grpc_proxy_mapper_map_address(exec_ctx, list->list[i], address, args, - new_address, new_args)) { + if (grpc_proxy_mapper_map_address(list->list[i], address, args, new_address, + new_args)) { return true; } } @@ -105,20 +106,17 @@ void grpc_proxy_mapper_register(bool at_start, grpc_proxy_mapper* mapper) { grpc_proxy_mapper_list_register(&g_proxy_mapper_list, at_start, mapper); } -bool grpc_proxy_mappers_map_name(grpc_exec_ctx* exec_ctx, - const char* server_uri, +bool grpc_proxy_mappers_map_name(const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { - return grpc_proxy_mapper_list_map_name(exec_ctx, &g_proxy_mapper_list, - server_uri, args, name_to_resolve, - new_args); + return grpc_proxy_mapper_list_map_name(&g_proxy_mapper_list, server_uri, args, + name_to_resolve, new_args); } -bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx, - const grpc_resolved_address* address, +bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, grpc_channel_args** new_args) { - return grpc_proxy_mapper_list_map_address( - exec_ctx, &g_proxy_mapper_list, address, args, new_address, new_args); + return grpc_proxy_mapper_list_map_address(&g_proxy_mapper_list, address, args, + new_address, new_args); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.h b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.h index 99e54d1a7..326b582b9 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/proxy_mapper_registry.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H +#include + #include "src/core/ext/filters/client_channel/proxy_mapper.h" void grpc_proxy_mapper_registry_init(); @@ -29,14 +31,12 @@ void grpc_proxy_mapper_registry_shutdown(); /// the list. Otherwise, it will be added to the end. void grpc_proxy_mapper_register(bool at_start, grpc_proxy_mapper* mapper); -bool grpc_proxy_mappers_map_name(grpc_exec_ctx* exec_ctx, - const char* server_uri, +bool grpc_proxy_mappers_map_name(const char* server_uri, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args); -bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx, - const grpc_resolved_address* address, +bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address, const grpc_channel_args* args, grpc_resolved_address** new_address, grpc_channel_args** new_args); diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.c deleted file mode 100644 index 8401504fc..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/resolver.h" -#include "src/core/lib/iomgr/combiner.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_resolver_refcount = - GRPC_TRACER_INITIALIZER(false, "resolver_refcount"); -#endif - -void grpc_resolver_init(grpc_resolver *resolver, - const grpc_resolver_vtable *vtable, - grpc_combiner *combiner) { - resolver->vtable = vtable; - resolver->combiner = GRPC_COMBINER_REF(combiner, "resolver"); - gpr_ref_init(&resolver->refs, 1); -} - -#ifndef NDEBUG -void grpc_resolver_ref(grpc_resolver *resolver, const char *file, int line, - const char *reason) { - if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) { - gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "RESOLVER:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", resolver, - old_refs, old_refs + 1, reason); - } -#else -void grpc_resolver_ref(grpc_resolver *resolver) { -#endif - gpr_ref(&resolver->refs); -} - -#ifndef NDEBUG -void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, - const char *file, int line, const char *reason) { - if (GRPC_TRACER_ON(grpc_trace_resolver_refcount)) { - gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "RESOLVER:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", resolver, - old_refs, old_refs - 1, reason); - } -#else -void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) { -#endif - if (gpr_unref(&resolver->refs)) { - grpc_combiner *combiner = resolver->combiner; - resolver->vtable->destroy(exec_ctx, resolver); - GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver"); - } -} - -void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - resolver->vtable->shutdown_locked(exec_ctx, resolver); -} - -void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - resolver->vtable->channel_saw_error_locked(exec_ctx, resolver); -} - -void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, - grpc_channel_args **result, - grpc_closure *on_complete) { - resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.cc new file mode 100644 index 000000000..cd11eeb9e --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.cc @@ -0,0 +1,35 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/resolver.h" +#include "src/core/lib/iomgr/combiner.h" + +grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount(false, + "resolver_refcount"); + +namespace grpc_core { + +Resolver::Resolver(grpc_combiner* combiner) + : InternallyRefCountedWithTracing(&grpc_trace_resolver_refcount), + combiner_(GRPC_COMBINER_REF(combiner, "resolver")) {} + +Resolver::~Resolver() { GRPC_COMBINER_UNREF(combiner_, "resolver"); } + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.h index ae9c8f66f..02380314d 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver.h @@ -19,72 +19,120 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H -#include "src/core/ext/filters/client_channel/subchannel.h" +#include + +#include + +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/iomgr.h" -typedef struct grpc_resolver grpc_resolver; -typedef struct grpc_resolver_vtable grpc_resolver_vtable; +extern grpc_core::DebugOnlyTraceFlag grpc_trace_resolver_refcount; -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_resolver_refcount; -#endif +namespace grpc_core { -/** \a grpc_resolver provides \a grpc_channel_args objects to its caller */ -struct grpc_resolver { - const grpc_resolver_vtable *vtable; - gpr_refcount refs; - grpc_combiner *combiner; -}; +/// Interface for name resolution. +/// +/// This interface is designed to support both push-based and pull-based +/// mechanisms. A push-based mechanism is one where the resolver will +/// subscribe to updates for a given name, and the name service will +/// proactively send new data to the resolver whenever the data associated +/// with the name changes. A pull-based mechanism is one where the resolver +/// needs to query the name service again to get updated information (e.g., +/// DNS). +/// +/// Note: All methods with a "Locked" suffix must be called from the +/// combiner passed to the constructor. +class Resolver : public InternallyRefCountedWithTracing { + public: + // Not copyable nor movable. + Resolver(const Resolver&) = delete; + Resolver& operator=(const Resolver&) = delete; + + /// Requests a callback when a new result becomes available. + /// When the new result is available, sets \a *result to the new result + /// and schedules \a on_complete for execution. + /// Upon transient failure, sets \a *result to nullptr and schedules + /// \a on_complete with no error. + /// If resolution is fatally broken, sets \a *result to nullptr and + /// schedules \a on_complete with an error. + /// TODO(roth): When we have time, improve the way this API represents + /// transient failure vs. shutdown. + /// + /// Note that the client channel will almost always have a request + /// to \a NextLocked() pending. When it gets the callback, it will + /// process the new result and then immediately make another call to + /// \a NextLocked(). This allows push-based resolvers to provide new + /// data as soon as it becomes available. + virtual void NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) GRPC_ABSTRACT; + + /// Asks the resolver to obtain an updated resolver result, if + /// applicable. + /// + /// This is useful for pull-based implementations to decide when to + /// re-resolve. However, the implementation is not required to + /// re-resolve immediately upon receiving this call; it may instead + /// elect to delay based on some configured minimum time between + /// queries, to avoid hammering the name service with queries. + /// + /// For push-based implementations, this may be a no-op. + /// + /// If this causes new data to become available, then the currently + /// pending call to \a NextLocked() will return the new result. + /// + /// Note: Currently, all resolvers are required to return a new result + /// shortly after this method is called. For pull-based mechanisms, if + /// the implementation decides to delay querying the name service, it + /// should immediately return a new copy of the previously returned + /// result (and it can then return the updated data later, when it + /// actually does query the name service). For push-based mechanisms, + /// the implementation should immediately return a new copy of the + /// last-seen result. + /// TODO(roth): Remove this requirement once we fix pick_first to not + /// throw away unselected subchannels. + virtual void RequestReresolutionLocked() GRPC_ABSTRACT; + + void Orphan() override { + // Invoke ShutdownAndUnrefLocked() inside of the combiner. + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(&Resolver::ShutdownAndUnrefLocked, this, + grpc_combiner_scheduler(combiner_)), + GRPC_ERROR_NONE); + } + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // So Delete() can access our protected dtor. + template + friend void Delete(T*); + + /// Does NOT take ownership of the reference to \a combiner. + // TODO(roth): Once we have a C++-like interface for combiners, this + // API should change to take a RefCountedPtr<>, so that we always take + // ownership of a new ref. + explicit Resolver(grpc_combiner* combiner); + + virtual ~Resolver(); + + /// Shuts down the resolver. If there is a pending call to + /// NextLocked(), the callback will be scheduled with an error. + virtual void ShutdownLocked() GRPC_ABSTRACT; + + grpc_combiner* combiner() const { return combiner_; } + + private: + static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) { + Resolver* resolver = static_cast(arg); + resolver->ShutdownLocked(); + resolver->Unref(); + } -struct grpc_resolver_vtable { - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver); - void (*shutdown_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver); - void (*channel_saw_error_locked)(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver); - void (*next_locked)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, - grpc_channel_args **result, grpc_closure *on_complete); + grpc_combiner* combiner_; }; -#ifndef NDEBUG -#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_RESOLVER_UNREF(e, p, r) \ - grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r)) -void grpc_resolver_ref(grpc_resolver *policy, const char *file, int line, - const char *reason); -void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy, - const char *file, int line, const char *reason); -#else -#define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p)) -#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p)) -void grpc_resolver_ref(grpc_resolver *policy); -void grpc_resolver_unref(grpc_exec_ctx *exec_ctx, grpc_resolver *policy); -#endif - -void grpc_resolver_init(grpc_resolver *resolver, - const grpc_resolver_vtable *vtable, - grpc_combiner *combiner); - -void grpc_resolver_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver); - -/** Notification that the channel has seen an error on some address. - Can be used as a hint that re-resolution is desirable soon. - - Must be called from the combiner passed as a resolver_arg at construction - time.*/ -void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver); - -/** Get the next result from the resolver. Expected to set \a *result with - new channel args and then schedule \a on_complete for execution. - - If resolution is fatally broken, set \a *result to NULL and - schedule \a on_complete. - - Must be called from the combiner passed as a resolver_arg at construction - time.*/ -void grpc_resolver_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, - grpc_channel_args **result, - grpc_closure *on_complete); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c deleted file mode 100644 index 9bb229ad9..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +++ /dev/null @@ -1,458 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#if GRPC_ARES == 1 && !defined(GRPC_UV) - -#include -#include -#include -#include - -#include -#include -#include - -#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/gethostname.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/json/json.h" -#include "src/core/lib/support/backoff.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/transport/service_config.h" - -#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1 -#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1 -#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6 -#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120 -#define GRPC_DNS_RECONNECT_JITTER 0.2 - -typedef struct { - /** base class: must be first */ - grpc_resolver base; - /** DNS server to use (if not system default) */ - char *dns_server; - /** name to resolve (usually the same as target_name) */ - char *name_to_resolve; - /** default port to use */ - char *default_port; - /** channel args. */ - grpc_channel_args *channel_args; - /** whether to request the service config */ - bool request_service_config; - /** pollset_set to drive the name resolution process */ - grpc_pollset_set *interested_parties; - - /** Closures used by the combiner */ - grpc_closure dns_ares_on_retry_timer_locked; - grpc_closure dns_ares_on_resolved_locked; - - /** Combiner guarding the rest of the state */ - grpc_combiner *combiner; - /** are we currently resolving? */ - bool resolving; - /** the pending resolving request */ - grpc_ares_request *pending_request; - /** which version of the result have we published? */ - int published_version; - /** which version of the result is current? */ - int resolved_version; - /** pending next completion, or NULL */ - grpc_closure *next_completion; - /** target result address for next completion */ - grpc_channel_args **target_result; - /** current (fully resolved) result */ - grpc_channel_args *resolved_result; - /** retry timer */ - bool have_retry_timer; - grpc_timer retry_timer; - /** retry backoff state */ - gpr_backoff backoff_state; - - /** currently resolving addresses */ - grpc_lb_addresses *lb_addresses; - /** currently resolving service config */ - char *service_config_json; -} ares_dns_resolver; - -static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r); - -static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx, - ares_dns_resolver *r); -static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - ares_dns_resolver *r); - -static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r); -static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *r); -static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r, - grpc_channel_args **target_result, - grpc_closure *on_complete); - -static const grpc_resolver_vtable dns_ares_resolver_vtable = { - dns_ares_destroy, dns_ares_shutdown_locked, - dns_ares_channel_saw_error_locked, dns_ares_next_locked}; - -static void dns_ares_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - ares_dns_resolver *r = (ares_dns_resolver *)resolver; - if (r->have_retry_timer) { - grpc_timer_cancel(exec_ctx, &r->retry_timer); - } - if (r->pending_request != NULL) { - grpc_cancel_ares_request(exec_ctx, r->pending_request); - } - if (r->next_completion != NULL) { - *r->target_result = NULL; - GRPC_CLOSURE_SCHED( - exec_ctx, r->next_completion, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); - r->next_completion = NULL; - } -} - -static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - ares_dns_resolver *r = (ares_dns_resolver *)resolver; - if (!r->resolving) { - gpr_backoff_reset(&r->backoff_state); - dns_ares_start_resolving_locked(exec_ctx, r); - } -} - -static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - ares_dns_resolver *r = (ares_dns_resolver *)arg; - r->have_retry_timer = false; - if (error == GRPC_ERROR_NONE) { - if (!r->resolving) { - dns_ares_start_resolving_locked(exec_ctx, r); - } - } - GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer"); -} - -static bool value_in_json_array(grpc_json *array, const char *value) { - for (grpc_json *entry = array->child; entry != NULL; entry = entry->next) { - if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) { - return true; - } - } - return false; -} - -static char *choose_service_config(char *service_config_choice_json) { - grpc_json *choices_json = grpc_json_parse_string(service_config_choice_json); - if (choices_json == NULL || choices_json->type != GRPC_JSON_ARRAY) { - gpr_log(GPR_ERROR, "cannot parse service config JSON string"); - return NULL; - } - char *service_config = NULL; - for (grpc_json *choice = choices_json->child; choice != NULL; - choice = choice->next) { - if (choice->type != GRPC_JSON_OBJECT) { - gpr_log(GPR_ERROR, "cannot parse service config JSON string"); - break; - } - grpc_json *service_config_json = NULL; - for (grpc_json *field = choice->child; field != NULL; field = field->next) { - // Check client language, if specified. - if (strcmp(field->key, "clientLanguage") == 0) { - if (field->type != GRPC_JSON_ARRAY || - !value_in_json_array(field, "c++")) { - service_config_json = NULL; - break; - } - } - // Check client hostname, if specified. - if (strcmp(field->key, "clientHostname") == 0) { - char *hostname = grpc_gethostname(); - if (hostname == NULL || field->type != GRPC_JSON_ARRAY || - !value_in_json_array(field, hostname)) { - service_config_json = NULL; - break; - } - } - // Check percentage, if specified. - if (strcmp(field->key, "percentage") == 0) { - if (field->type != GRPC_JSON_NUMBER) { - service_config_json = NULL; - break; - } - int random_pct = rand() % 100; - int percentage; - if (sscanf(field->value, "%d", &percentage) != 1 || - random_pct > percentage || percentage == 0) { - service_config_json = NULL; - break; - } - } - // Save service config. - if (strcmp(field->key, "serviceConfig") == 0) { - if (field->type == GRPC_JSON_OBJECT) { - service_config_json = field; - } - } - } - if (service_config_json != NULL) { - service_config = grpc_json_dump_to_string(service_config_json, 0); - break; - } - } - grpc_json_destroy(choices_json); - return service_config; -} - -static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - ares_dns_resolver *r = (ares_dns_resolver *)arg; - grpc_channel_args *result = NULL; - GPR_ASSERT(r->resolving); - r->resolving = false; - r->pending_request = NULL; - if (r->lb_addresses != NULL) { - static const char *args_to_remove[2]; - size_t num_args_to_remove = 0; - grpc_arg new_args[3]; - size_t num_args_to_add = 0; - new_args[num_args_to_add++] = - grpc_lb_addresses_create_channel_arg(r->lb_addresses); - grpc_service_config *service_config = NULL; - char *service_config_string = NULL; - if (r->service_config_json != NULL) { - service_config_string = choose_service_config(r->service_config_json); - gpr_free(r->service_config_json); - if (service_config_string != NULL) { - gpr_log(GPR_INFO, "selected service config choice: %s", - service_config_string); - args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG; - new_args[num_args_to_add++] = grpc_channel_arg_string_create( - (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string); - service_config = grpc_service_config_create(service_config_string); - if (service_config != NULL) { - const char *lb_policy_name = - grpc_service_config_get_lb_policy_name(service_config); - if (lb_policy_name != NULL) { - args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME; - new_args[num_args_to_add++] = grpc_channel_arg_string_create( - (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name); - } - } - } - } - result = grpc_channel_args_copy_and_add_and_remove( - r->channel_args, args_to_remove, num_args_to_remove, new_args, - num_args_to_add); - if (service_config != NULL) grpc_service_config_destroy(service_config); - gpr_free(service_config_string); - grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses); - } else { - const char *msg = grpc_error_string(error); - gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now); - gpr_timespec timeout = gpr_time_sub(next_try, now); - gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", - grpc_error_string(error)); - GPR_ASSERT(!r->have_retry_timer); - r->have_retry_timer = true; - GRPC_RESOLVER_REF(&r->base, "retry-timer"); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec, - timeout.tv_nsec); - } else { - gpr_log(GPR_DEBUG, "retrying immediately"); - } - grpc_timer_init(exec_ctx, &r->retry_timer, next_try, - &r->dns_ares_on_retry_timer_locked, now); - } - if (r->resolved_result != NULL) { - grpc_channel_args_destroy(exec_ctx, r->resolved_result); - } - r->resolved_result = result; - r->resolved_version++; - dns_ares_maybe_finish_next_locked(exec_ctx, r); - GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving"); -} - -static void dns_ares_next_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver, - grpc_channel_args **target_result, - grpc_closure *on_complete) { - gpr_log(GPR_DEBUG, "dns_ares_next is called."); - ares_dns_resolver *r = (ares_dns_resolver *)resolver; - GPR_ASSERT(!r->next_completion); - r->next_completion = on_complete; - r->target_result = target_result; - if (r->resolved_version == 0 && !r->resolving) { - gpr_backoff_reset(&r->backoff_state); - dns_ares_start_resolving_locked(exec_ctx, r); - } else { - dns_ares_maybe_finish_next_locked(exec_ctx, r); - } -} - -static void dns_ares_start_resolving_locked(grpc_exec_ctx *exec_ctx, - ares_dns_resolver *r) { - GRPC_RESOLVER_REF(&r->base, "dns-resolving"); - GPR_ASSERT(!r->resolving); - r->resolving = true; - r->lb_addresses = NULL; - r->service_config_json = NULL; - r->pending_request = grpc_dns_lookup_ares( - exec_ctx, r->dns_server, r->name_to_resolve, r->default_port, - r->interested_parties, &r->dns_ares_on_resolved_locked, &r->lb_addresses, - true /* check_grpclb */, - r->request_service_config ? &r->service_config_json : NULL); -} - -static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - ares_dns_resolver *r) { - if (r->next_completion != NULL && - r->resolved_version != r->published_version) { - *r->target_result = r->resolved_result == NULL - ? NULL - : grpc_channel_args_copy(r->resolved_result); - gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked"); - GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE); - r->next_completion = NULL; - r->published_version = r->resolved_version; - } -} - -static void dns_ares_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { - gpr_log(GPR_DEBUG, "dns_ares_destroy"); - ares_dns_resolver *r = (ares_dns_resolver *)gr; - if (r->resolved_result != NULL) { - grpc_channel_args_destroy(exec_ctx, r->resolved_result); - } - grpc_pollset_set_destroy(exec_ctx, r->interested_parties); - gpr_free(r->dns_server); - gpr_free(r->name_to_resolve); - gpr_free(r->default_port); - grpc_channel_args_destroy(exec_ctx, r->channel_args); - gpr_free(r); -} - -static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx, - grpc_resolver_args *args, - const char *default_port) { - /* Get name from args. */ - const char *path = args->uri->path; - if (path[0] == '/') ++path; - /* Create resolver. */ - ares_dns_resolver *r = - (ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver)); - grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner); - if (0 != strcmp(args->uri->authority, "")) { - r->dns_server = gpr_strdup(args->uri->authority); - } - r->name_to_resolve = gpr_strdup(path); - r->default_port = gpr_strdup(default_port); - r->channel_args = grpc_channel_args_copy(args->args); - const grpc_arg *arg = grpc_channel_args_find( - r->channel_args, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION); - r->request_service_config = !grpc_channel_arg_get_integer( - arg, (grpc_integer_options){false, false, true}); - r->interested_parties = grpc_pollset_set_create(); - if (args->pollset_set != NULL) { - grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, - args->pollset_set); - } - gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_DNS_RECONNECT_JITTER, - GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); - GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked, - dns_ares_on_retry_timer_locked, r, - grpc_combiner_scheduler(r->base.combiner)); - GRPC_CLOSURE_INIT(&r->dns_ares_on_resolved_locked, - dns_ares_on_resolved_locked, r, - grpc_combiner_scheduler(r->base.combiner)); - return &r->base; -} - -/* - * FACTORY - */ - -static void dns_ares_factory_ref(grpc_resolver_factory *factory) {} - -static void dns_ares_factory_unref(grpc_resolver_factory *factory) {} - -static grpc_resolver *dns_factory_create_resolver( - grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, - grpc_resolver_args *args) { - return dns_ares_create(exec_ctx, args, "https"); -} - -static char *dns_ares_factory_get_default_host_name( - grpc_resolver_factory *factory, grpc_uri *uri) { - const char *path = uri->path; - if (path[0] == '/') ++path; - return gpr_strdup(path); -} - -static const grpc_resolver_factory_vtable dns_ares_factory_vtable = { - dns_ares_factory_ref, dns_ares_factory_unref, dns_factory_create_resolver, - dns_ares_factory_get_default_host_name, "dns"}; -static grpc_resolver_factory dns_resolver_factory = {&dns_ares_factory_vtable}; - -static grpc_resolver_factory *dns_ares_resolver_factory_create() { - return &dns_resolver_factory; -} - -void grpc_resolver_dns_ares_init(void) { - char *resolver = gpr_getenv("GRPC_DNS_RESOLVER"); - /* TODO(zyc): Turn on c-ares based resolver by default after the address - sorter and the CNAME support are added. */ - if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) { - grpc_error *error = grpc_ares_init(); - if (error != GRPC_ERROR_NONE) { - GRPC_LOG_IF_ERROR("ares_library_init() failed", error); - return; - } - grpc_resolve_address = grpc_resolve_address_ares; - grpc_register_resolver_type(dns_ares_resolver_factory_create()); - } - gpr_free(resolver); -} - -void grpc_resolver_dns_ares_shutdown(void) { - char *resolver = gpr_getenv("GRPC_DNS_RESOLVER"); - if (resolver != NULL && gpr_stricmp(resolver, "ares") == 0) { - grpc_ares_cleanup(); - } - gpr_free(resolver); -} - -#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */ - -void grpc_resolver_dns_ares_init(void) {} - -void grpc_resolver_dns_ares_shutdown(void) {} - -#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc new file mode 100644 index 000000000..c3c62b60b --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc @@ -0,0 +1,502 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#if GRPC_ARES == 1 && !defined(GRPC_UV) + +#include +#include +#include +#include + +#include +#include + +#include + +#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/gethostname.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/json/json.h" +#include "src/core/lib/transport/service_config.h" + +#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1 +#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6 +#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120 +#define GRPC_DNS_RECONNECT_JITTER 0.2 + +namespace grpc_core { + +namespace { + +const char kDefaultPort[] = "https"; + +class AresDnsResolver : public Resolver { + public: + explicit AresDnsResolver(const ResolverArgs& args); + + void NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) override; + + void RequestReresolutionLocked() override; + + void ShutdownLocked() override; + + private: + virtual ~AresDnsResolver(); + + void MaybeStartResolvingLocked(); + void StartResolvingLocked(); + void MaybeFinishNextLocked(); + + static void OnNextResolutionLocked(void* arg, grpc_error* error); + static void OnResolvedLocked(void* arg, grpc_error* error); + + /// DNS server to use (if not system default) + char* dns_server_; + /// name to resolve (usually the same as target_name) + char* name_to_resolve_; + /// channel args + grpc_channel_args* channel_args_; + /// whether to request the service config + bool request_service_config_; + /// pollset_set to drive the name resolution process + grpc_pollset_set* interested_parties_; + /// closures used by the combiner + grpc_closure on_next_resolution_; + grpc_closure on_resolved_; + /// are we currently resolving? + bool resolving_ = false; + /// the pending resolving request + grpc_ares_request* pending_request_ = nullptr; + /// which version of the result have we published? + int published_version_ = 0; + /// which version of the result is current? + int resolved_version_ = 0; + /// pending next completion, or NULL + grpc_closure* next_completion_ = nullptr; + /// target result address for next completion + grpc_channel_args** target_result_ = nullptr; + /// current (fully resolved) result + grpc_channel_args* resolved_result_ = nullptr; + /// next resolution timer + bool have_next_resolution_timer_ = false; + grpc_timer next_resolution_timer_; + /// min interval between DNS requests + grpc_millis min_time_between_resolutions_; + /// timestamp of last DNS request + grpc_millis last_resolution_timestamp_ = -1; + /// retry backoff state + BackOff backoff_; + /// currently resolving addresses + grpc_lb_addresses* lb_addresses_ = nullptr; + /// currently resolving service config + char* service_config_json_ = nullptr; +}; + +AresDnsResolver::AresDnsResolver(const ResolverArgs& args) + : Resolver(args.combiner), + backoff_( + BackOff::Options() + .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * + 1000) + .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_DNS_RECONNECT_JITTER) + .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + // Get name to resolve from URI path. + const char* path = args.uri->path; + if (path[0] == '/') ++path; + name_to_resolve_ = gpr_strdup(path); + // Get DNS server from URI authority. + dns_server_ = nullptr; + if (0 != strcmp(args.uri->authority, "")) { + dns_server_ = gpr_strdup(args.uri->authority); + } + channel_args_ = grpc_channel_args_copy(args.args); + const grpc_arg* arg = grpc_channel_args_find( + channel_args_, GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION); + request_service_config_ = !grpc_channel_arg_get_integer( + arg, (grpc_integer_options){false, false, true}); + arg = grpc_channel_args_find(channel_args_, + GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); + min_time_between_resolutions_ = + grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX}); + interested_parties_ = grpc_pollset_set_create(); + if (args.pollset_set != nullptr) { + grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); + } + GRPC_CLOSURE_INIT(&on_next_resolution_, OnNextResolutionLocked, this, + grpc_combiner_scheduler(combiner())); + GRPC_CLOSURE_INIT(&on_resolved_, OnResolvedLocked, this, + grpc_combiner_scheduler(combiner())); +} + +AresDnsResolver::~AresDnsResolver() { + gpr_log(GPR_DEBUG, "destroying AresDnsResolver"); + if (resolved_result_ != nullptr) { + grpc_channel_args_destroy(resolved_result_); + } + grpc_pollset_set_destroy(interested_parties_); + gpr_free(dns_server_); + gpr_free(name_to_resolve_); + grpc_channel_args_destroy(channel_args_); +} + +void AresDnsResolver::NextLocked(grpc_channel_args** target_result, + grpc_closure* on_complete) { + gpr_log(GPR_DEBUG, "AresDnsResolver::NextLocked() is called."); + GPR_ASSERT(next_completion_ == nullptr); + next_completion_ = on_complete; + target_result_ = target_result; + if (resolved_version_ == 0 && !resolving_) { + MaybeStartResolvingLocked(); + } else { + MaybeFinishNextLocked(); + } +} + +void AresDnsResolver::RequestReresolutionLocked() { + if (!resolving_) { + MaybeStartResolvingLocked(); + } +} + +void AresDnsResolver::ShutdownLocked() { + if (have_next_resolution_timer_) { + grpc_timer_cancel(&next_resolution_timer_); + } + if (pending_request_ != nullptr) { + grpc_cancel_ares_request(pending_request_); + } + if (next_completion_ != nullptr) { + *target_result_ = nullptr; + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Resolver Shutdown")); + next_completion_ = nullptr; + } +} + +void AresDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) { + AresDnsResolver* r = static_cast(arg); + r->have_next_resolution_timer_ = false; + if (error == GRPC_ERROR_NONE) { + if (!r->resolving_) { + r->StartResolvingLocked(); + } + } + r->Unref(DEBUG_LOCATION, "next_resolution_timer"); +} + +bool ValueInJsonArray(grpc_json* array, const char* value) { + for (grpc_json* entry = array->child; entry != nullptr; entry = entry->next) { + if (entry->type == GRPC_JSON_STRING && strcmp(entry->value, value) == 0) { + return true; + } + } + return false; +} + +char* ChooseServiceConfig(char* service_config_choice_json) { + grpc_json* choices_json = grpc_json_parse_string(service_config_choice_json); + if (choices_json == nullptr || choices_json->type != GRPC_JSON_ARRAY) { + gpr_log(GPR_ERROR, "cannot parse service config JSON string"); + return nullptr; + } + char* service_config = nullptr; + for (grpc_json* choice = choices_json->child; choice != nullptr; + choice = choice->next) { + if (choice->type != GRPC_JSON_OBJECT) { + gpr_log(GPR_ERROR, "cannot parse service config JSON string"); + break; + } + grpc_json* service_config_json = nullptr; + for (grpc_json* field = choice->child; field != nullptr; + field = field->next) { + // Check client language, if specified. + if (strcmp(field->key, "clientLanguage") == 0) { + if (field->type != GRPC_JSON_ARRAY || !ValueInJsonArray(field, "c++")) { + service_config_json = nullptr; + break; + } + } + // Check client hostname, if specified. + if (strcmp(field->key, "clientHostname") == 0) { + char* hostname = grpc_gethostname(); + if (hostname == nullptr || field->type != GRPC_JSON_ARRAY || + !ValueInJsonArray(field, hostname)) { + service_config_json = nullptr; + break; + } + } + // Check percentage, if specified. + if (strcmp(field->key, "percentage") == 0) { + if (field->type != GRPC_JSON_NUMBER) { + service_config_json = nullptr; + break; + } + int random_pct = rand() % 100; + int percentage; + if (sscanf(field->value, "%d", &percentage) != 1 || + random_pct > percentage || percentage == 0) { + service_config_json = nullptr; + break; + } + } + // Save service config. + if (strcmp(field->key, "serviceConfig") == 0) { + if (field->type == GRPC_JSON_OBJECT) { + service_config_json = field; + } + } + } + if (service_config_json != nullptr) { + service_config = grpc_json_dump_to_string(service_config_json, 0); + break; + } + } + grpc_json_destroy(choices_json); + return service_config; +} + +void AresDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { + AresDnsResolver* r = static_cast(arg); + grpc_channel_args* result = nullptr; + GPR_ASSERT(r->resolving_); + r->resolving_ = false; + r->pending_request_ = nullptr; + if (r->lb_addresses_ != nullptr) { + static const char* args_to_remove[2]; + size_t num_args_to_remove = 0; + grpc_arg new_args[3]; + size_t num_args_to_add = 0; + new_args[num_args_to_add++] = + grpc_lb_addresses_create_channel_arg(r->lb_addresses_); + grpc_core::UniquePtr service_config; + char* service_config_string = nullptr; + if (r->service_config_json_ != nullptr) { + service_config_string = ChooseServiceConfig(r->service_config_json_); + gpr_free(r->service_config_json_); + if (service_config_string != nullptr) { + gpr_log(GPR_INFO, "selected service config choice: %s", + service_config_string); + args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG; + new_args[num_args_to_add++] = grpc_channel_arg_string_create( + (char*)GRPC_ARG_SERVICE_CONFIG, service_config_string); + service_config = + grpc_core::ServiceConfig::Create(service_config_string); + if (service_config != nullptr) { + const char* lb_policy_name = + service_config->GetLoadBalancingPolicyName(); + if (lb_policy_name != nullptr) { + args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME; + new_args[num_args_to_add++] = grpc_channel_arg_string_create( + (char*)GRPC_ARG_LB_POLICY_NAME, + const_cast(lb_policy_name)); + } + } + } + } + result = grpc_channel_args_copy_and_add_and_remove( + r->channel_args_, args_to_remove, num_args_to_remove, new_args, + num_args_to_add); + gpr_free(service_config_string); + grpc_lb_addresses_destroy(r->lb_addresses_); + // Reset backoff state so that we start from the beginning when the + // next request gets triggered. + r->backoff_.Reset(); + } else { + const char* msg = grpc_error_string(error); + gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg); + grpc_millis next_try = r->backoff_.NextAttemptTime(); + grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", + grpc_error_string(error)); + GPR_ASSERT(!r->have_next_resolution_timer_); + r->have_next_resolution_timer_ = true; + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = r->Ref(DEBUG_LOCATION, "retry-timer"); + self.release(); + if (timeout > 0) { + gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout); + } else { + gpr_log(GPR_DEBUG, "retrying immediately"); + } + grpc_timer_init(&r->next_resolution_timer_, next_try, + &r->on_next_resolution_); + } + if (r->resolved_result_ != nullptr) { + grpc_channel_args_destroy(r->resolved_result_); + } + r->resolved_result_ = result; + ++r->resolved_version_; + r->MaybeFinishNextLocked(); + r->Unref(DEBUG_LOCATION, "dns-resolving"); +} + +void AresDnsResolver::MaybeStartResolvingLocked() { + // If there is an existing timer, the time it fires is the earliest time we + // can start the next resolution. + if (have_next_resolution_timer_) { + // TODO(dgq): remove the following two lines once Pick First stops + // discarding subchannels after selecting. + ++resolved_version_; + MaybeFinishNextLocked(); + return; + } + if (last_resolution_timestamp_ >= 0) { + const grpc_millis earliest_next_resolution = + last_resolution_timestamp_ + min_time_between_resolutions_; + const grpc_millis ms_until_next_resolution = + earliest_next_resolution - grpc_core::ExecCtx::Get()->Now(); + if (ms_until_next_resolution > 0) { + const grpc_millis last_resolution_ago = + grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_; + gpr_log(GPR_DEBUG, + "In cooldown from last resolution (from %" PRIdPTR + " ms ago). Will resolve again in %" PRIdPTR " ms", + last_resolution_ago, ms_until_next_resolution); + have_next_resolution_timer_ = true; + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = + Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown"); + self.release(); + grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution, + &on_next_resolution_); + // TODO(dgq): remove the following two lines once Pick First stops + // discarding subchannels after selecting. + ++resolved_version_; + MaybeFinishNextLocked(); + return; + } + } + StartResolvingLocked(); +} + +void AresDnsResolver::StartResolvingLocked() { + gpr_log(GPR_DEBUG, "Start resolving."); + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = Ref(DEBUG_LOCATION, "dns-resolving"); + self.release(); + GPR_ASSERT(!resolving_); + resolving_ = true; + lb_addresses_ = nullptr; + service_config_json_ = nullptr; + pending_request_ = grpc_dns_lookup_ares( + dns_server_, name_to_resolve_, kDefaultPort, interested_parties_, + &on_resolved_, &lb_addresses_, true /* check_grpclb */, + request_service_config_ ? &service_config_json_ : nullptr); + last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now(); +} + +void AresDnsResolver::MaybeFinishNextLocked() { + if (next_completion_ != nullptr && resolved_version_ != published_version_) { + *target_result_ = resolved_result_ == nullptr + ? nullptr + : grpc_channel_args_copy(resolved_result_); + gpr_log(GPR_DEBUG, "AresDnsResolver::MaybeFinishNextLocked()"); + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE); + next_completion_ = nullptr; + published_version_ = resolved_version_; + } +} + +// +// Factory +// + +class AresDnsResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + return OrphanablePtr(New(args)); + } + + const char* scheme() const override { return "dns"; } +}; + +} // namespace + +} // namespace grpc_core + +extern grpc_address_resolver_vtable* grpc_resolve_address_impl; +static grpc_address_resolver_vtable* default_resolver; + +static grpc_error* blocking_resolve_address_ares( + const char* name, const char* default_port, + grpc_resolved_addresses** addresses) { + return default_resolver->blocking_resolve_address(name, default_port, + addresses); +} + +static grpc_address_resolver_vtable ares_resolver = { + grpc_resolve_address_ares, blocking_resolve_address_ares}; + +void grpc_resolver_dns_ares_init() { + char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER"); + /* TODO(zyc): Turn on c-ares based resolver by default after the address + sorter and the CNAME support are added. */ + if (resolver_env != nullptr && gpr_stricmp(resolver_env, "ares") == 0) { + address_sorting_init(); + grpc_error* error = grpc_ares_init(); + if (error != GRPC_ERROR_NONE) { + GRPC_LOG_IF_ERROR("ares_library_init() failed", error); + return; + } + default_resolver = grpc_resolve_address_impl; + grpc_set_resolver_impl(&ares_resolver); + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); + } + gpr_free(resolver_env); +} + +void grpc_resolver_dns_ares_shutdown() { + char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER"); + if (resolver_env != nullptr && gpr_stricmp(resolver_env, "ares") == 0) { + address_sorting_shutdown(); + grpc_ares_cleanup(); + } + gpr_free(resolver_env); +} + +#else /* GRPC_ARES == 1 && !defined(GRPC_UV) */ + +void grpc_resolver_dns_ares_init(void) {} + +void grpc_resolver_dns_ares_shutdown(void) {} + +#endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h index 386012d2e..623954953 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h @@ -19,7 +19,9 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H -#include "src/core/lib/iomgr/exec_ctx.h" +#include + +#include #include "src/core/lib/iomgr/pollset_set.h" typedef struct grpc_ares_ev_driver grpc_ares_ev_driver; @@ -27,27 +29,25 @@ typedef struct grpc_ares_ev_driver grpc_ares_ev_driver; /* Start \a ev_driver. It will keep working until all IO on its ares_channel is done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks bound to its ares_channel when necessary. */ -void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver); +void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver); /* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to \a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the query. */ -ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver); +ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver); /* Creates a new grpc_ares_ev_driver. Returns GRPC_ERROR_NONE if \a ev_driver is created successfully. */ -grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver, - grpc_pollset_set *pollset_set); +grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver, + grpc_pollset_set* pollset_set); /* Destroys \a ev_driver asynchronously. Pending lookups made on \a ev_driver will be cancelled and their on_done callbacks will be invoked with a status of ARES_ECANCELLED. */ -void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver); +void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver); /* Shutdown all the grpc_fds used by \a ev_driver */ -void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver); +void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc similarity index 74% rename from Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc index c30cc93b6..b604f2bf1 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc @@ -16,6 +16,7 @@ * */ #include + #include "src/core/lib/iomgr/port.h" #if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET) @@ -28,16 +29,15 @@ #include #include #include -#include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/support/string.h" typedef struct fd_node { /** the owner of this fd node */ - grpc_ares_ev_driver *ev_driver; + grpc_ares_ev_driver* ev_driver; /** a closure wrapping on_readable_cb, which should be invoked when the grpc_fd in this node becomes readable. */ grpc_closure read_closure; @@ -45,12 +45,12 @@ typedef struct fd_node { grpc_fd in this node becomes writable. */ grpc_closure write_closure; /** next fd node in the list */ - struct fd_node *next; + struct fd_node* next; /** mutex guarding the rest of the state */ gpr_mu mu; /** the grpc_fd owned by this fd node */ - grpc_fd *fd; + grpc_fd* fd; /** if the readable closure has been registered */ bool readable_registered; /** if the writable closure has been registered */ @@ -63,42 +63,41 @@ struct grpc_ares_ev_driver { /** the ares_channel owned by this event driver */ ares_channel channel; /** pollset set for driving the IO events of the channel */ - grpc_pollset_set *pollset_set; + grpc_pollset_set* pollset_set; /** refcount of the event driver */ gpr_refcount refs; /** mutex guarding the rest of the state */ gpr_mu mu; /** a list of grpc_fd that this event driver is currently using. */ - fd_node *fds; + fd_node* fds; /** is this event driver currently working? */ bool working; /** is this event driver being shut down */ bool shutting_down; }; -static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver); +static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver); -static grpc_ares_ev_driver *grpc_ares_ev_driver_ref( - grpc_ares_ev_driver *ev_driver) { +static grpc_ares_ev_driver* grpc_ares_ev_driver_ref( + grpc_ares_ev_driver* ev_driver) { gpr_log(GPR_DEBUG, "Ref ev_driver %" PRIuPTR, (uintptr_t)ev_driver); gpr_ref(&ev_driver->refs); return ev_driver; } -static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver *ev_driver) { +static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver) { gpr_log(GPR_DEBUG, "Unref ev_driver %" PRIuPTR, (uintptr_t)ev_driver); if (gpr_unref(&ev_driver->refs)) { gpr_log(GPR_DEBUG, "destroy ev_driver %" PRIuPTR, (uintptr_t)ev_driver); - GPR_ASSERT(ev_driver->fds == NULL); + GPR_ASSERT(ev_driver->fds == nullptr); gpr_mu_destroy(&ev_driver->mu); ares_destroy(ev_driver->channel); gpr_free(ev_driver); } } -static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { +static void fd_node_destroy(fd_node* fdn) { gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd)); GPR_ASSERT(!fdn->readable_registered); GPR_ASSERT(!fdn->writable_registered); @@ -106,34 +105,35 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up immediately by another thread, and should not be closed by the following grpc_fd_orphan. */ - grpc_fd_orphan(exec_ctx, fdn->fd, NULL, NULL, true /* already_closed */, + grpc_fd_orphan(fdn->fd, nullptr, nullptr, true /* already_closed */, "c-ares query finished"); gpr_free(fdn); } -static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) { +static void fd_node_shutdown(fd_node* fdn) { gpr_mu_lock(&fdn->mu); fdn->shutting_down = true; if (!fdn->readable_registered && !fdn->writable_registered) { gpr_mu_unlock(&fdn->mu); - fd_node_destroy(exec_ctx, fdn); + fd_node_destroy(fdn); } else { - grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "c-ares fd shutdown")); + grpc_fd_shutdown( + fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown")); gpr_mu_unlock(&fdn->mu); } } -grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver, - grpc_pollset_set *pollset_set) { - *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver)); +grpc_error* grpc_ares_ev_driver_create(grpc_ares_ev_driver** ev_driver, + grpc_pollset_set* pollset_set) { + *ev_driver = static_cast( + gpr_malloc(sizeof(grpc_ares_ev_driver))); int status = ares_init(&(*ev_driver)->channel); gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create"); if (status != ARES_SUCCESS) { - char *err_msg; + char* err_msg; gpr_asprintf(&err_msg, "Failed to init ares channel. C-ares error: %s", ares_strerror(status)); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_msg); gpr_free(err_msg); gpr_free(*ev_driver); return err; @@ -141,13 +141,13 @@ grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver, gpr_mu_init(&(*ev_driver)->mu); gpr_ref_init(&(*ev_driver)->refs, 1); (*ev_driver)->pollset_set = pollset_set; - (*ev_driver)->fds = NULL; + (*ev_driver)->fds = nullptr; (*ev_driver)->working = false; (*ev_driver)->shutting_down = false; return GRPC_ERROR_NONE; } -void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) { +void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver) { // It's not safe to shut down remaining fds here directly, becauses // ares_host_callback does not provide an exec_ctx. We mark the event driver // as being shut down. If the event driver is working, @@ -159,14 +159,13 @@ void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver *ev_driver) { grpc_ares_ev_driver_unref(ev_driver); } -void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver) { +void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver) { gpr_mu_lock(&ev_driver->mu); ev_driver->shutting_down = true; - fd_node *fn = ev_driver->fds; - while (fn != NULL) { - grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "grpc_ares_ev_driver_shutdown")); + fd_node* fn = ev_driver->fds; + while (fn != nullptr) { + grpc_fd_shutdown(fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "grpc_ares_ev_driver_shutdown")); fn = fn->next; } gpr_mu_unlock(&ev_driver->mu); @@ -174,39 +173,38 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx, // Search fd in the fd_node list head. This is an O(n) search, the max possible // value of n is ARES_GETSOCK_MAXNUM (16). n is typically 1 - 2 in our tests. -static fd_node *pop_fd_node(fd_node **head, int fd) { +static fd_node* pop_fd_node(fd_node** head, int fd) { fd_node dummy_head; dummy_head.next = *head; - fd_node *node = &dummy_head; - while (node->next != NULL) { + fd_node* node = &dummy_head; + while (node->next != nullptr) { if (grpc_fd_wrapped_fd(node->next->fd) == fd) { - fd_node *ret = node->next; + fd_node* ret = node->next; node->next = node->next->next; *head = dummy_head.next; return ret; } node = node->next; } - return NULL; + return nullptr; } /* Check if \a fd is still readable */ -static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver, +static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver* ev_driver, int fd) { size_t bytes_available = 0; return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0; } -static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - fd_node *fdn = (fd_node *)arg; - grpc_ares_ev_driver *ev_driver = fdn->ev_driver; +static void on_readable_cb(void* arg, grpc_error* error) { + fd_node* fdn = static_cast(arg); + grpc_ares_ev_driver* ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->readable_registered = false; if (fdn->shutting_down && !fdn->writable_registered) { gpr_mu_unlock(&fdn->mu); - fd_node_destroy(exec_ctx, fdn); + fd_node_destroy(fdn); grpc_ares_ev_driver_unref(ev_driver); return; } @@ -227,21 +225,20 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, ares_cancel(ev_driver->channel); } gpr_mu_lock(&ev_driver->mu); - grpc_ares_notify_on_event_locked(exec_ctx, ev_driver); + grpc_ares_notify_on_event_locked(ev_driver); gpr_mu_unlock(&ev_driver->mu); grpc_ares_ev_driver_unref(ev_driver); } -static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - fd_node *fdn = (fd_node *)arg; - grpc_ares_ev_driver *ev_driver = fdn->ev_driver; +static void on_writable_cb(void* arg, grpc_error* error) { + fd_node* fdn = static_cast(arg); + grpc_ares_ev_driver* ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->writable_registered = false; if (fdn->shutting_down && !fdn->readable_registered) { gpr_mu_unlock(&fdn->mu); - fd_node_destroy(exec_ctx, fdn); + fd_node_destroy(fdn); grpc_ares_ev_driver_unref(ev_driver); return; } @@ -260,20 +257,19 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, ares_cancel(ev_driver->channel); } gpr_mu_lock(&ev_driver->mu); - grpc_ares_notify_on_event_locked(exec_ctx, ev_driver); + grpc_ares_notify_on_event_locked(ev_driver); gpr_mu_unlock(&ev_driver->mu); grpc_ares_ev_driver_unref(ev_driver); } -ares_channel *grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver *ev_driver) { +ares_channel* grpc_ares_ev_driver_get_channel(grpc_ares_ev_driver* ev_driver) { return &ev_driver->channel; } // Get the file descriptors used by the ev_driver's ares channel, register // driver_closure with these filedescriptors. -static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver) { - fd_node *new_list = NULL; +static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) { + fd_node* new_list = nullptr; if (!ev_driver->shutting_down) { ares_socket_t socks[ARES_GETSOCK_MAXNUM]; int socks_bitmask = @@ -281,12 +277,12 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; i++) { if (ARES_GETSOCK_READABLE(socks_bitmask, i) || ARES_GETSOCK_WRITABLE(socks_bitmask, i)) { - fd_node *fdn = pop_fd_node(&ev_driver->fds, socks[i]); + fd_node* fdn = pop_fd_node(&ev_driver->fds, socks[i]); // Create a new fd_node if sock[i] is not in the fd_node list. - if (fdn == NULL) { - char *fd_name; + if (fdn == nullptr) { + char* fd_name; gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i); - fdn = (fd_node *)gpr_malloc(sizeof(fd_node)); + fdn = static_cast(gpr_malloc(sizeof(fd_node))); gpr_log(GPR_DEBUG, "new fd: %d", socks[i]); fdn->fd = grpc_fd_create(socks[i], fd_name); fdn->ev_driver = ev_driver; @@ -298,7 +294,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn, grpc_schedule_on_exec_ctx); - grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, fdn->fd); + grpc_pollset_set_add_fd(ev_driver->pollset_set, fdn->fd); gpr_free(fd_name); } fdn->next = new_list; @@ -310,7 +306,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, !fdn->readable_registered) { grpc_ares_ev_driver_ref(ev_driver); gpr_log(GPR_DEBUG, "notify read on: %d", grpc_fd_wrapped_fd(fdn->fd)); - grpc_fd_notify_on_read(exec_ctx, fdn->fd, &fdn->read_closure); + grpc_fd_notify_on_read(fdn->fd, &fdn->read_closure); fdn->readable_registered = true; } // Register write_closure if the socket is writable and write_closure @@ -320,7 +316,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "notify write on: %d", grpc_fd_wrapped_fd(fdn->fd)); grpc_ares_ev_driver_ref(ev_driver); - grpc_fd_notify_on_write(exec_ctx, fdn->fd, &fdn->write_closure); + grpc_fd_notify_on_write(fdn->fd, &fdn->write_closure); fdn->writable_registered = true; } gpr_mu_unlock(&fdn->mu); @@ -330,25 +326,24 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, // Any remaining fds in ev_driver->fds were not returned by ares_getsock() and // are therefore no longer in use, so they can be shut down and removed from // the list. - while (ev_driver->fds != NULL) { - fd_node *cur = ev_driver->fds; + while (ev_driver->fds != nullptr) { + fd_node* cur = ev_driver->fds; ev_driver->fds = ev_driver->fds->next; - fd_node_shutdown(exec_ctx, cur); + fd_node_shutdown(cur); } ev_driver->fds = new_list; // If the ev driver has no working fd, all the tasks are done. - if (new_list == NULL) { + if (new_list == nullptr) { ev_driver->working = false; gpr_log(GPR_DEBUG, "ev driver stop working"); } } -void grpc_ares_ev_driver_start(grpc_exec_ctx *exec_ctx, - grpc_ares_ev_driver *ev_driver) { +void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver) { gpr_mu_lock(&ev_driver->mu); if (!ev_driver->working) { ev_driver->working = true; - grpc_ares_notify_on_event_locked(exec_ctx, ev_driver); + grpc_ares_notify_on_event_locked(ev_driver); } gpr_mu_unlock(&ev_driver->mu); } diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc similarity index 59% rename from Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc index 04379975e..e86ab5a37 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc @@ -17,6 +17,7 @@ */ #include + #if GRPC_ARES == 1 && !defined(GRPC_UV) #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" @@ -28,36 +29,39 @@ #include #include -#include #include #include #include -#include +#include #include "src/core/ext/filters/client_channel/parse_address.h" #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/nameser.h" #include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/support/string.h" static gpr_once g_basic_init = GPR_ONCE_INIT; static gpr_mu g_init_mu; +grpc_core::TraceFlag grpc_trace_cares_address_sorting(false, + "cares_address_sorting"); + struct grpc_ares_request { /** indicates the DNS server to use, if specified */ struct ares_addr_port_node dns_server_addr; /** following members are set in grpc_resolve_address_ares_impl */ /** closure to call when the request completes */ - grpc_closure *on_done; + grpc_closure* on_done; /** the pointer to receive the resolved addresses */ - grpc_lb_addresses **lb_addrs_out; + grpc_lb_addresses** lb_addrs_out; /** the pointer to receive the service config in JSON */ - char **service_config_json_out; + char** service_config_json_out; /** the evernt driver used by this request */ - grpc_ares_ev_driver *ev_driver; + grpc_ares_ev_driver* ev_driver; /** number of ongoing queries */ gpr_refcount pending_queries; @@ -66,15 +70,15 @@ struct grpc_ares_request { /** is there at least one successful query, set in on_done_cb */ bool success; /** the errors explaining the request failure, set in on_done_cb */ - grpc_error *error; + grpc_error* error; }; typedef struct grpc_ares_hostbyname_request { /** following members are set in create_hostbyname_request */ /** the top-level request instance */ - grpc_ares_request *parent_request; + grpc_ares_request* parent_request; /** host to resolve, parsed from the name to resolve */ - char *host; + char* host; /** port to fill in sockaddr_in, parsed from the name to resolve */ uint16_t port; /** is it a grpclb address */ @@ -83,48 +87,88 @@ typedef struct grpc_ares_hostbyname_request { static void do_basic_init(void) { gpr_mu_init(&g_init_mu); } -static uint16_t strhtons(const char *port) { +static uint16_t strhtons(const char* port) { if (strcmp(port, "http") == 0) { return htons(80); } else if (strcmp(port, "https") == 0) { return htons(443); } - return htons((unsigned short)atoi(port)); + return htons(static_cast(atoi(port))); } -static void grpc_ares_request_ref(grpc_ares_request *r) { +static void grpc_ares_request_ref(grpc_ares_request* r) { gpr_ref(&r->pending_queries); } -static void grpc_ares_request_unref(grpc_exec_ctx *exec_ctx, - grpc_ares_request *r) { +static void log_address_sorting_list(grpc_lb_addresses* lb_addrs, + const char* input_output_str) { + for (size_t i = 0; i < lb_addrs->num_addresses; i++) { + char* addr_str; + if (grpc_sockaddr_to_string(&addr_str, &lb_addrs->addresses[i].address, + true)) { + gpr_log(GPR_DEBUG, "c-ares address sorting: %s[%" PRIuPTR "]=%s", + input_output_str, i, addr_str); + gpr_free(addr_str); + } else { + gpr_log(GPR_DEBUG, + "c-ares address sorting: %s[%" PRIuPTR "]=", + input_output_str, i); + } + } +} + +void grpc_cares_wrapper_address_sorting_sort(grpc_lb_addresses* lb_addrs) { + if (grpc_trace_cares_address_sorting.enabled()) { + log_address_sorting_list(lb_addrs, "input"); + } + address_sorting_sortable* sortables = (address_sorting_sortable*)gpr_zalloc( + sizeof(address_sorting_sortable) * lb_addrs->num_addresses); + for (size_t i = 0; i < lb_addrs->num_addresses; i++) { + sortables[i].user_data = &lb_addrs->addresses[i]; + memcpy(&sortables[i].dest_addr.addr, &lb_addrs->addresses[i].address.addr, + lb_addrs->addresses[i].address.len); + sortables[i].dest_addr.len = lb_addrs->addresses[i].address.len; + } + address_sorting_rfc_6724_sort(sortables, lb_addrs->num_addresses); + grpc_lb_address* sorted_lb_addrs = (grpc_lb_address*)gpr_zalloc( + sizeof(grpc_lb_address) * lb_addrs->num_addresses); + for (size_t i = 0; i < lb_addrs->num_addresses; i++) { + sorted_lb_addrs[i] = *(grpc_lb_address*)sortables[i].user_data; + } + gpr_free(sortables); + gpr_free(lb_addrs->addresses); + lb_addrs->addresses = sorted_lb_addrs; + if (grpc_trace_cares_address_sorting.enabled()) { + log_address_sorting_list(lb_addrs, "output"); + } +} + +/* Allow tests to access grpc_ares_wrapper_address_sorting_sort */ +void grpc_cares_wrapper_test_only_address_sorting_sort( + grpc_lb_addresses* lb_addrs) { + grpc_cares_wrapper_address_sorting_sort(lb_addrs); +} + +static void grpc_ares_request_unref(grpc_ares_request* r) { /* If there are no pending queries, invoke on_done callback and destroy the request */ if (gpr_unref(&r->pending_queries)) { - /* TODO(zyc): Sort results with RFC6724 before invoking on_done. */ - if (exec_ctx == NULL) { - /* A new exec_ctx is created here, as the c-ares interface does not - provide one in ares_host_callback. It's safe to schedule on_done with - the newly created exec_ctx, since the caller has been warned not to - acquire locks in on_done. ares_dns_resolver is using combiner to - protect resources needed by on_done. */ - grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CLOSURE_SCHED(&new_exec_ctx, r->on_done, r->error); - grpc_exec_ctx_finish(&new_exec_ctx); - } else { - GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, r->error); + grpc_lb_addresses* lb_addrs = *(r->lb_addrs_out); + if (lb_addrs != nullptr) { + grpc_cares_wrapper_address_sorting_sort(lb_addrs); } + GRPC_CLOSURE_SCHED(r->on_done, r->error); gpr_mu_destroy(&r->mu); grpc_ares_ev_driver_destroy(r->ev_driver); gpr_free(r); } } -static grpc_ares_hostbyname_request *create_hostbyname_request( - grpc_ares_request *parent_request, char *host, uint16_t port, +static grpc_ares_hostbyname_request* create_hostbyname_request( + grpc_ares_request* parent_request, char* host, uint16_t port, bool is_balancer) { - grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)gpr_zalloc( - sizeof(grpc_ares_hostbyname_request)); + grpc_ares_hostbyname_request* hr = static_cast( + gpr_zalloc(sizeof(grpc_ares_hostbyname_request))); hr->parent_request = parent_request; hr->host = gpr_strdup(host); hr->port = port; @@ -133,34 +177,34 @@ static grpc_ares_hostbyname_request *create_hostbyname_request( return hr; } -static void destroy_hostbyname_request(grpc_exec_ctx *exec_ctx, - grpc_ares_hostbyname_request *hr) { - grpc_ares_request_unref(exec_ctx, hr->parent_request); +static void destroy_hostbyname_request(grpc_ares_hostbyname_request* hr) { + grpc_ares_request_unref(hr->parent_request); gpr_free(hr->host); gpr_free(hr); } -static void on_hostbyname_done_cb(void *arg, int status, int timeouts, - struct hostent *hostent) { - grpc_ares_hostbyname_request *hr = (grpc_ares_hostbyname_request *)arg; - grpc_ares_request *r = hr->parent_request; +static void on_hostbyname_done_cb(void* arg, int status, int timeouts, + struct hostent* hostent) { + grpc_ares_hostbyname_request* hr = + static_cast(arg); + grpc_ares_request* r = hr->parent_request; gpr_mu_lock(&r->mu); if (status == ARES_SUCCESS) { GRPC_ERROR_UNREF(r->error); r->error = GRPC_ERROR_NONE; r->success = true; - grpc_lb_addresses **lb_addresses = r->lb_addrs_out; - if (*lb_addresses == NULL) { - *lb_addresses = grpc_lb_addresses_create(0, NULL); + grpc_lb_addresses** lb_addresses = r->lb_addrs_out; + if (*lb_addresses == nullptr) { + *lb_addresses = grpc_lb_addresses_create(0, nullptr); } size_t prev_naddr = (*lb_addresses)->num_addresses; size_t i; - for (i = 0; hostent->h_addr_list[i] != NULL; i++) { + for (i = 0; hostent->h_addr_list[i] != nullptr; i++) { } (*lb_addresses)->num_addresses += i; - (*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc( - (*lb_addresses)->addresses, - sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses); + (*lb_addresses)->addresses = static_cast( + gpr_realloc((*lb_addresses)->addresses, + sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses)); for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) { switch (hostent->h_addrtype) { case AF_INET6: { @@ -169,13 +213,13 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin6_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in6_addr)); - addr.sin6_family = (sa_family_t)hostent->h_addrtype; + addr.sin6_family = static_cast(hostent->h_addrtype); addr.sin6_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, hr->is_balancer /* is_balancer */, - hr->is_balancer ? hr->host : NULL /* balancer_name */, - NULL /* user_data */); + hr->is_balancer ? hr->host : nullptr /* balancer_name */, + nullptr /* user_data */); char output[INET6_ADDRSTRLEN]; ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN); gpr_log(GPR_DEBUG, @@ -190,13 +234,13 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts, memset(&addr, 0, addr_len); memcpy(&addr.sin_addr, hostent->h_addr_list[i - prev_naddr], sizeof(struct in_addr)); - addr.sin_family = (sa_family_t)hostent->h_addrtype; + addr.sin_family = static_cast(hostent->h_addrtype); addr.sin_port = hr->port; grpc_lb_addresses_set_address( *lb_addresses, i, &addr, addr_len, hr->is_balancer /* is_balancer */, - hr->is_balancer ? hr->host : NULL /* balancer_name */, - NULL /* user_data */); + hr->is_balancer ? hr->host : nullptr /* balancer_name */, + nullptr /* user_data */); char output[INET_ADDRSTRLEN]; ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN); gpr_log(GPR_DEBUG, @@ -208,10 +252,10 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts, } } } else if (!r->success) { - char *error_msg; + char* error_msg; gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s", ares_strerror(status)); - grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); gpr_free(error_msg); if (r->error == GRPC_ERROR_NONE) { r->error = error; @@ -220,43 +264,43 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts, } } gpr_mu_unlock(&r->mu); - destroy_hostbyname_request(NULL, hr); + destroy_hostbyname_request(hr); } -static void on_srv_query_done_cb(void *arg, int status, int timeouts, - unsigned char *abuf, int alen) { - grpc_ares_request *r = (grpc_ares_request *)arg; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +static void on_srv_query_done_cb(void* arg, int status, int timeouts, + unsigned char* abuf, int alen) { + grpc_ares_request* r = static_cast(arg); + grpc_core::ExecCtx exec_ctx; gpr_log(GPR_DEBUG, "on_query_srv_done_cb"); if (status == ARES_SUCCESS) { gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS"); - struct ares_srv_reply *reply; + struct ares_srv_reply* reply; const int parse_status = ares_parse_srv_reply(abuf, alen, &reply); if (parse_status == ARES_SUCCESS) { - ares_channel *channel = grpc_ares_ev_driver_get_channel(r->ev_driver); - for (struct ares_srv_reply *srv_it = reply; srv_it != NULL; + ares_channel* channel = grpc_ares_ev_driver_get_channel(r->ev_driver); + for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr; srv_it = srv_it->next) { if (grpc_ipv6_loopback_available()) { - grpc_ares_hostbyname_request *hr = create_hostbyname_request( + grpc_ares_hostbyname_request* hr = create_hostbyname_request( r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET6, on_hostbyname_done_cb, hr); } - grpc_ares_hostbyname_request *hr = create_hostbyname_request( + grpc_ares_hostbyname_request* hr = create_hostbyname_request( r, srv_it->host, htons(srv_it->port), true /* is_balancer */); ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb, hr); - grpc_ares_ev_driver_start(&exec_ctx, r->ev_driver); + grpc_ares_ev_driver_start(r->ev_driver); } } - if (reply != NULL) { + if (reply != nullptr) { ares_free_data(reply); } } else if (!r->success) { - char *error_msg; + char* error_msg; gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s", ares_strerror(status)); - grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); gpr_free(error_msg); if (r->error == GRPC_ERROR_NONE) { r->error = error; @@ -264,27 +308,26 @@ static void on_srv_query_done_cb(void *arg, int status, int timeouts, r->error = grpc_error_add_child(error, r->error); } } - grpc_ares_request_unref(&exec_ctx, r); - grpc_exec_ctx_finish(&exec_ctx); + grpc_ares_request_unref(r); } static const char g_service_config_attribute_prefix[] = "grpc_config="; -static void on_txt_done_cb(void *arg, int status, int timeouts, - unsigned char *buf, int len) { +static void on_txt_done_cb(void* arg, int status, int timeouts, + unsigned char* buf, int len) { gpr_log(GPR_DEBUG, "on_txt_done_cb"); - char *error_msg; - grpc_ares_request *r = (grpc_ares_request *)arg; + char* error_msg; + grpc_ares_request* r = static_cast(arg); const size_t prefix_len = sizeof(g_service_config_attribute_prefix) - 1; - struct ares_txt_ext *result = NULL; - struct ares_txt_ext *reply = NULL; - grpc_error *error = GRPC_ERROR_NONE; + struct ares_txt_ext* result = nullptr; + struct ares_txt_ext* reply = nullptr; + grpc_error* error = GRPC_ERROR_NONE; gpr_mu_lock(&r->mu); if (status != ARES_SUCCESS) goto fail; status = ares_parse_txt_reply_ext(buf, len, &reply); if (status != ARES_SUCCESS) goto fail; // Find service config in TXT record. - for (result = reply; result != NULL; result = result->next) { + for (result = reply; result != nullptr; result = result->next) { if (result->record_start && memcmp(result->txt, g_service_config_attribute_prefix, prefix_len) == 0) { @@ -292,15 +335,17 @@ static void on_txt_done_cb(void *arg, int status, int timeouts, } } // Found a service config record. - if (result != NULL) { + if (result != nullptr) { size_t service_config_len = result->length - prefix_len; - *r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1); + *r->service_config_json_out = + static_cast(gpr_malloc(service_config_len + 1)); memcpy(*r->service_config_json_out, result->txt + prefix_len, service_config_len); - for (result = result->next; result != NULL && !result->record_start; + for (result = result->next; result != nullptr && !result->record_start; result = result->next) { - *r->service_config_json_out = (char *)gpr_realloc( - *r->service_config_json_out, service_config_len + result->length + 1); + *r->service_config_json_out = static_cast( + gpr_realloc(*r->service_config_json_out, + service_config_len + result->length + 1)); memcpy(*r->service_config_json_out + service_config_len, result->txt, result->length); service_config_len += result->length; @@ -323,18 +368,17 @@ static void on_txt_done_cb(void *arg, int status, int timeouts, } done: gpr_mu_unlock(&r->mu); - grpc_ares_request_unref(NULL, r); + grpc_ares_request_unref(r); } -static grpc_ares_request *grpc_dns_lookup_ares_impl( - grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name, - const char *default_port, grpc_pollset_set *interested_parties, - grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb, - char **service_config_json) { - grpc_error *error = GRPC_ERROR_NONE; - grpc_ares_hostbyname_request *hr = NULL; - grpc_ares_request *r = NULL; - ares_channel *channel = NULL; +static grpc_ares_request* grpc_dns_lookup_ares_impl( + const char* dns_server, const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) { + grpc_error* error = GRPC_ERROR_NONE; + grpc_ares_hostbyname_request* hr = nullptr; + grpc_ares_request* r = nullptr; + ares_channel* channel = nullptr; /* TODO(zyc): Enable tracing after #9603 is checked in */ /* if (grpc_dns_trace) { gpr_log(GPR_DEBUG, "resolve_address (blocking): name=%s, default_port=%s", @@ -342,16 +386,16 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( } */ /* parse name, splitting it into host and port parts */ - char *host; - char *port; + char* host; + char* port; gpr_split_host_port(name, &host, &port); - if (host == NULL) { + if (host == nullptr) { error = grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name)); goto error_cleanup; - } else if (port == NULL) { - if (default_port == NULL) { + } else if (port == nullptr) { + if (default_port == nullptr) { error = grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name)); @@ -360,11 +404,11 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( port = gpr_strdup(default_port); } - grpc_ares_ev_driver *ev_driver; + grpc_ares_ev_driver* ev_driver; error = grpc_ares_ev_driver_create(&ev_driver, interested_parties); if (error != GRPC_ERROR_NONE) goto error_cleanup; - r = (grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request)); + r = static_cast(gpr_zalloc(sizeof(grpc_ares_request))); gpr_mu_init(&r->mu); r->ev_driver = ev_driver; r->on_done = on_done; @@ -375,12 +419,12 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( channel = grpc_ares_ev_driver_get_channel(r->ev_driver); // If dns_server is specified, use it. - if (dns_server != NULL) { + if (dns_server != nullptr) { gpr_log(GPR_INFO, "Using DNS server %s", dns_server); grpc_resolved_address addr; if (grpc_parse_ipv4_hostport(dns_server, &addr, false /* log_errors */)) { r->dns_server_addr.family = AF_INET; - struct sockaddr_in *in = (struct sockaddr_in *)addr.addr; + struct sockaddr_in* in = reinterpret_cast(addr.addr); memcpy(&r->dns_server_addr.addr.addr4, &in->sin_addr, sizeof(struct in_addr)); r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr); @@ -388,7 +432,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( } else if (grpc_parse_ipv6_hostport(dns_server, &addr, false /* log_errors */)) { r->dns_server_addr.family = AF_INET6; - struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr.addr; + struct sockaddr_in6* in6 = + reinterpret_cast(addr.addr); memcpy(&r->dns_server_addr.addr.addr6, &in6->sin6_addr, sizeof(struct in6_addr)); r->dns_server_addr.tcp_port = grpc_sockaddr_get_port(&addr); @@ -402,7 +447,7 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( } int status = ares_set_servers_ports(*channel, &r->dns_server_addr); if (status != ARES_SUCCESS) { - char *error_msg; + char* error_msg; gpr_asprintf(&error_msg, "C-ares status is not ARES_SUCCESS: %s", ares_strerror(status)); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); @@ -423,53 +468,56 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl( if (check_grpclb) { /* Query the SRV record */ grpc_ares_request_ref(r); - char *service_name; + char* service_name; gpr_asprintf(&service_name, "_grpclb._tcp.%s", host); ares_query(*channel, service_name, ns_c_in, ns_t_srv, on_srv_query_done_cb, r); gpr_free(service_name); } - if (service_config_json != NULL) { + if (service_config_json != nullptr) { grpc_ares_request_ref(r); - ares_search(*channel, hr->host, ns_c_in, ns_t_txt, on_txt_done_cb, r); + char* config_name; + gpr_asprintf(&config_name, "_grpc_config.%s", host); + ares_search(*channel, config_name, ns_c_in, ns_t_txt, on_txt_done_cb, r); + gpr_free(config_name); } /* TODO(zyc): Handle CNAME records here. */ - grpc_ares_ev_driver_start(exec_ctx, r->ev_driver); - grpc_ares_request_unref(exec_ctx, r); + grpc_ares_ev_driver_start(r->ev_driver); + grpc_ares_request_unref(r); gpr_free(host); gpr_free(port); return r; error_cleanup: - GRPC_CLOSURE_SCHED(exec_ctx, on_done, error); + GRPC_CLOSURE_SCHED(on_done, error); gpr_free(host); gpr_free(port); - return NULL; + return nullptr; } -grpc_ares_request *(*grpc_dns_lookup_ares)( - grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name, - const char *default_port, grpc_pollset_set *interested_parties, - grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb, - char **service_config_json) = grpc_dns_lookup_ares_impl; +grpc_ares_request* (*grpc_dns_lookup_ares)( + const char* dns_server, const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_lb_addresses** addrs, bool check_grpclb, + char** service_config_json) = grpc_dns_lookup_ares_impl; -void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) { +void grpc_cancel_ares_request(grpc_ares_request* r) { if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) { - grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver); + grpc_ares_ev_driver_shutdown(r->ev_driver); } } -grpc_error *grpc_ares_init(void) { +grpc_error* grpc_ares_init(void) { gpr_once_init(&g_basic_init, do_basic_init); gpr_mu_lock(&g_init_mu); int status = ares_library_init(ARES_LIB_INIT_ALL); gpr_mu_unlock(&g_init_mu); if (status != ARES_SUCCESS) { - char *error_msg; + char* error_msg; gpr_asprintf(&error_msg, "ares_library_init failed: %s", ares_strerror(status)); - grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg); gpr_free(error_msg); return error; } @@ -488,63 +536,61 @@ void grpc_ares_cleanup(void) { typedef struct grpc_resolve_address_ares_request { /** the pointer to receive the resolved addresses */ - grpc_resolved_addresses **addrs_out; + grpc_resolved_addresses** addrs_out; /** currently resolving lb addresses */ - grpc_lb_addresses *lb_addrs; + grpc_lb_addresses* lb_addrs; /** closure to call when the resolve_address_ares request completes */ - grpc_closure *on_resolve_address_done; + grpc_closure* on_resolve_address_done; /** a closure wrapping on_dns_lookup_done_cb, which should be invoked when the grpc_dns_lookup_ares operation is done. */ grpc_closure on_dns_lookup_done; } grpc_resolve_address_ares_request; -static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_resolve_address_ares_request *r = - (grpc_resolve_address_ares_request *)arg; - grpc_resolved_addresses **resolved_addresses = r->addrs_out; - if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) { - *resolved_addresses = NULL; +static void on_dns_lookup_done_cb(void* arg, grpc_error* error) { + grpc_resolve_address_ares_request* r = + static_cast(arg); + grpc_resolved_addresses** resolved_addresses = r->addrs_out; + if (r->lb_addrs == nullptr || r->lb_addrs->num_addresses == 0) { + *resolved_addresses = nullptr; } else { - *resolved_addresses = - (grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses)); + *resolved_addresses = static_cast( + gpr_zalloc(sizeof(grpc_resolved_addresses))); (*resolved_addresses)->naddrs = r->lb_addrs->num_addresses; - (*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc( - sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs); + (*resolved_addresses)->addrs = + static_cast(gpr_zalloc( + sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs)); for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) { GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer); memcpy(&(*resolved_addresses)->addrs[i], &r->lb_addrs->addresses[i].address, sizeof(grpc_resolved_address)); } } - GRPC_CLOSURE_SCHED(exec_ctx, r->on_resolve_address_done, - GRPC_ERROR_REF(error)); - grpc_lb_addresses_destroy(exec_ctx, r->lb_addrs); + GRPC_CLOSURE_SCHED(r->on_resolve_address_done, GRPC_ERROR_REF(error)); + if (r->lb_addrs != nullptr) grpc_lb_addresses_destroy(r->lb_addrs); gpr_free(r); } -static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, - const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addrs) { - grpc_resolve_address_ares_request *r = - (grpc_resolve_address_ares_request *)gpr_zalloc( - sizeof(grpc_resolve_address_ares_request)); +static void grpc_resolve_address_ares_impl(const char* name, + const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addrs) { + grpc_resolve_address_ares_request* r = + static_cast( + gpr_zalloc(sizeof(grpc_resolve_address_ares_request))); r->addrs_out = addrs; r->on_resolve_address_done = on_done; GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r, grpc_schedule_on_exec_ctx); - grpc_dns_lookup_ares(exec_ctx, NULL /* dns_server */, name, default_port, + grpc_dns_lookup_ares(nullptr /* dns_server */, name, default_port, interested_parties, &r->on_dns_lookup_done, &r->lb_addrs, false /* check_grpclb */, - NULL /* service_config_json */); + nullptr /* service_config_json */); } void (*grpc_resolve_address_ares)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, - grpc_pollset_set *interested_parties, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl; + const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl; #endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h index 108333047..2d84a038d 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h @@ -19,12 +19,15 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H +#include + #include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/resolve_address.h" +extern grpc_core::TraceFlag grpc_trace_cares_address_sorting; + typedef struct grpc_ares_request grpc_ares_request; /* Asynchronously resolve \a name. Use \a default_port if a port isn't @@ -32,12 +35,11 @@ typedef struct grpc_ares_request grpc_ares_request; must be called at least once before this function. \a on_done may be called directly in this function without being scheduled with \a exec_ctx, so it must not try to acquire locks that are being held by the caller. */ -extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx, - const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addresses); +extern void (*grpc_resolve_address_ares)(const char* name, + const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addresses); /* Asynchronously resolve \a name. It will try to resolve grpclb SRV records in addition to the normal address records. For normal address records, it uses @@ -46,24 +48,27 @@ extern void (*grpc_resolve_address_ares)(grpc_exec_ctx *exec_ctx, function. \a on_done may be called directly in this function without being scheduled with \a exec_ctx, so it must not try to acquire locks that are being held by the caller. */ -extern grpc_ares_request *(*grpc_dns_lookup_ares)( - grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name, - const char *default_port, grpc_pollset_set *interested_parties, - grpc_closure *on_done, grpc_lb_addresses **addresses, bool check_grpclb, - char **service_config_json); +extern grpc_ares_request* (*grpc_dns_lookup_ares)( + const char* dns_server, const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_lb_addresses** addresses, bool check_grpclb, + char** service_config_json); /* Cancel the pending grpc_ares_request \a request */ -void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, - grpc_ares_request *request); +void grpc_cancel_ares_request(grpc_ares_request* request); /* Initialize gRPC ares wrapper. Must be called at least once before grpc_resolve_address_ares(). */ -grpc_error *grpc_ares_init(void); +grpc_error* grpc_ares_init(void); /* Uninitialized gRPC ares wrapper. If there was more than one previous call to grpc_ares_init(), this function uninitializes the gRPC ares wrapper only if it has been called the same number of times as grpc_ares_init(). */ void grpc_ares_cleanup(void); +/* Exposed only for testing */ +void grpc_cares_wrapper_test_only_address_sorting_sort( + grpc_lb_addresses* lb_addrs); + #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c deleted file mode 100644 index f2587c452..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * Copyright 2016-2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#if GRPC_ARES != 1 || defined(GRPC_UV) - -#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" - -struct grpc_ares_request { - char val; -}; - -static grpc_ares_request *grpc_dns_lookup_ares_impl( - grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name, - const char *default_port, grpc_pollset_set *interested_parties, - grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb, - char **service_config_json) { - return NULL; -} - -grpc_ares_request *(*grpc_dns_lookup_ares)( - grpc_exec_ctx *exec_ctx, const char *dns_server, const char *name, - const char *default_port, grpc_pollset_set *interested_parties, - grpc_closure *on_done, grpc_lb_addresses **addrs, bool check_grpclb, - char **service_config_json) = grpc_dns_lookup_ares_impl; - -void grpc_cancel_ares_request(grpc_exec_ctx *exec_ctx, grpc_ares_request *r) {} - -grpc_error *grpc_ares_init(void) { return GRPC_ERROR_NONE; } - -void grpc_ares_cleanup(void) {} - -static void grpc_resolve_address_ares_impl(grpc_exec_ctx *exec_ctx, - const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addrs) {} - -void (*grpc_resolve_address_ares)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, - grpc_pollset_set *interested_parties, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = grpc_resolve_address_ares_impl; - -#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc new file mode 100644 index 000000000..5096e480b --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc @@ -0,0 +1,59 @@ +/* + * + * Copyright 2016-2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#if GRPC_ARES != 1 || defined(GRPC_UV) + +#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" + +struct grpc_ares_request { + char val; +}; + +static grpc_ares_request* grpc_dns_lookup_ares_impl( + const char* dns_server, const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) { + return NULL; +} + +grpc_ares_request* (*grpc_dns_lookup_ares)( + const char* dns_server, const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_lb_addresses** addrs, bool check_grpclb, + char** service_config_json) = grpc_dns_lookup_ares_impl; + +void grpc_cancel_ares_request(grpc_ares_request* r) {} + +grpc_error* grpc_ares_init(void) { return GRPC_ERROR_NONE; } + +void grpc_ares_cleanup(void) {} + +static void grpc_resolve_address_ares_impl(const char* name, + const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addrs) {} + +void (*grpc_resolve_address_ares)( + const char* name, const char* default_port, + grpc_pollset_set* interested_parties, grpc_closure* on_done, + grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl; + +#endif /* GRPC_ARES != 1 || defined(GRPC_UV) */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c deleted file mode 100644 index 5ea75f055..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c +++ /dev/null @@ -1,310 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include -#include -#include - -#include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/support/backoff.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" - -#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1 -#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1 -#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6 -#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120 -#define GRPC_DNS_RECONNECT_JITTER 0.2 - -typedef struct { - /** base class: must be first */ - grpc_resolver base; - /** name to resolve */ - char *name_to_resolve; - /** default port to use */ - char *default_port; - /** channel args. */ - grpc_channel_args *channel_args; - /** pollset_set to drive the name resolution process */ - grpc_pollset_set *interested_parties; - - /** are we currently resolving? */ - bool resolving; - /** which version of the result have we published? */ - int published_version; - /** which version of the result is current? */ - int resolved_version; - /** pending next completion, or NULL */ - grpc_closure *next_completion; - /** target result address for next completion */ - grpc_channel_args **target_result; - /** current (fully resolved) result */ - grpc_channel_args *resolved_result; - /** retry timer */ - bool have_retry_timer; - grpc_timer retry_timer; - grpc_closure on_retry; - /** retry backoff state */ - gpr_backoff backoff_state; - - /** currently resolving addresses */ - grpc_resolved_addresses *addresses; -} dns_resolver; - -static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r); - -static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx, - dns_resolver *r); -static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - dns_resolver *r); - -static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r); -static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *r); -static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r, - grpc_channel_args **target_result, - grpc_closure *on_complete); - -static const grpc_resolver_vtable dns_resolver_vtable = { - dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked, - dns_next_locked}; - -static void dns_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - dns_resolver *r = (dns_resolver *)resolver; - if (r->have_retry_timer) { - grpc_timer_cancel(exec_ctx, &r->retry_timer); - } - if (r->next_completion != NULL) { - *r->target_result = NULL; - GRPC_CLOSURE_SCHED( - exec_ctx, r->next_completion, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); - r->next_completion = NULL; - } -} - -static void dns_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - dns_resolver *r = (dns_resolver *)resolver; - if (!r->resolving) { - gpr_backoff_reset(&r->backoff_state); - dns_start_resolving_locked(exec_ctx, r); - } -} - -static void dns_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver, - grpc_channel_args **target_result, - grpc_closure *on_complete) { - dns_resolver *r = (dns_resolver *)resolver; - GPR_ASSERT(!r->next_completion); - r->next_completion = on_complete; - r->target_result = target_result; - if (r->resolved_version == 0 && !r->resolving) { - gpr_backoff_reset(&r->backoff_state); - dns_start_resolving_locked(exec_ctx, r); - } else { - dns_maybe_finish_next_locked(exec_ctx, r); - } -} - -static void dns_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - dns_resolver *r = (dns_resolver *)arg; - - r->have_retry_timer = false; - if (error == GRPC_ERROR_NONE) { - if (!r->resolving) { - dns_start_resolving_locked(exec_ctx, r); - } - } - - GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer"); -} - -static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - dns_resolver *r = (dns_resolver *)arg; - grpc_channel_args *result = NULL; - GPR_ASSERT(r->resolving); - r->resolving = false; - if (r->addresses != NULL) { - grpc_lb_addresses *addresses = grpc_lb_addresses_create( - r->addresses->naddrs, NULL /* user_data_vtable */); - for (size_t i = 0; i < r->addresses->naddrs; ++i) { - grpc_lb_addresses_set_address( - addresses, i, &r->addresses->addrs[i].addr, - r->addresses->addrs[i].len, false /* is_balancer */, - NULL /* balancer_name */, NULL /* user_data */); - } - grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses); - result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1); - grpc_resolved_addresses_destroy(r->addresses); - grpc_lb_addresses_destroy(exec_ctx, addresses); - } else { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = gpr_backoff_step(&r->backoff_state, now); - gpr_timespec timeout = gpr_time_sub(next_try, now); - gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", - grpc_error_string(error)); - GPR_ASSERT(!r->have_retry_timer); - r->have_retry_timer = true; - GRPC_RESOLVER_REF(&r->base, "retry-timer"); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec, - timeout.tv_nsec); - } else { - gpr_log(GPR_DEBUG, "retrying immediately"); - } - GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r, - grpc_combiner_scheduler(r->base.combiner)); - grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry, now); - } - if (r->resolved_result != NULL) { - grpc_channel_args_destroy(exec_ctx, r->resolved_result); - } - r->resolved_result = result; - r->resolved_version++; - dns_maybe_finish_next_locked(exec_ctx, r); - - GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving"); -} - -static void dns_start_resolving_locked(grpc_exec_ctx *exec_ctx, - dns_resolver *r) { - GRPC_RESOLVER_REF(&r->base, "dns-resolving"); - GPR_ASSERT(!r->resolving); - r->resolving = true; - r->addresses = NULL; - grpc_resolve_address( - exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties, - GRPC_CLOSURE_CREATE(dns_on_resolved_locked, r, - grpc_combiner_scheduler(r->base.combiner)), - &r->addresses); -} - -static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - dns_resolver *r) { - if (r->next_completion != NULL && - r->resolved_version != r->published_version) { - *r->target_result = r->resolved_result == NULL - ? NULL - : grpc_channel_args_copy(r->resolved_result); - GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE); - r->next_completion = NULL; - r->published_version = r->resolved_version; - } -} - -static void dns_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { - dns_resolver *r = (dns_resolver *)gr; - if (r->resolved_result != NULL) { - grpc_channel_args_destroy(exec_ctx, r->resolved_result); - } - grpc_pollset_set_destroy(exec_ctx, r->interested_parties); - gpr_free(r->name_to_resolve); - gpr_free(r->default_port); - grpc_channel_args_destroy(exec_ctx, r->channel_args); - gpr_free(r); -} - -static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx, - grpc_resolver_args *args, - const char *default_port) { - if (0 != strcmp(args->uri->authority, "")) { - gpr_log(GPR_ERROR, "authority based dns uri's not supported"); - return NULL; - } - // Get name from args. - char *path = args->uri->path; - if (path[0] == '/') ++path; - // Create resolver. - dns_resolver *r = (dns_resolver *)gpr_zalloc(sizeof(dns_resolver)); - grpc_resolver_init(&r->base, &dns_resolver_vtable, args->combiner); - r->name_to_resolve = gpr_strdup(path); - r->default_port = gpr_strdup(default_port); - r->channel_args = grpc_channel_args_copy(args->args); - r->interested_parties = grpc_pollset_set_create(); - if (args->pollset_set != NULL) { - grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties, - args->pollset_set); - } - gpr_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS, - GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, - GRPC_DNS_RECONNECT_JITTER, - GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000, - GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000); - return &r->base; -} - -/* - * FACTORY - */ - -static void dns_factory_ref(grpc_resolver_factory *factory) {} - -static void dns_factory_unref(grpc_resolver_factory *factory) {} - -static grpc_resolver *dns_factory_create_resolver( - grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, - grpc_resolver_args *args) { - return dns_create(exec_ctx, args, "https"); -} - -static char *dns_factory_get_default_host_name(grpc_resolver_factory *factory, - grpc_uri *uri) { - const char *path = uri->path; - if (path[0] == '/') ++path; - return gpr_strdup(path); -} - -static const grpc_resolver_factory_vtable dns_factory_vtable = { - dns_factory_ref, dns_factory_unref, dns_factory_create_resolver, - dns_factory_get_default_host_name, "dns"}; -static grpc_resolver_factory dns_resolver_factory = {&dns_factory_vtable}; - -static grpc_resolver_factory *dns_resolver_factory_create() { - return &dns_resolver_factory; -} - -void grpc_resolver_dns_native_init(void) { - char *resolver = gpr_getenv("GRPC_DNS_RESOLVER"); - if (resolver != NULL && gpr_stricmp(resolver, "native") == 0) { - gpr_log(GPR_DEBUG, "Using native dns resolver"); - grpc_register_resolver_type(dns_resolver_factory_create()); - } else { - grpc_resolver_factory *existing_factory = - grpc_resolver_factory_lookup("dns"); - if (existing_factory == NULL) { - gpr_log(GPR_DEBUG, "Using native dns resolver"); - grpc_register_resolver_type(dns_resolver_factory_create()); - } else { - grpc_resolver_factory_unref(existing_factory); - } - } - gpr_free(resolver); -} - -void grpc_resolver_dns_native_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc new file mode 100644 index 000000000..e7842a795 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc @@ -0,0 +1,348 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include +#include + +#include +#include +#include + +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/timer.h" + +#define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1 +#define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6 +#define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120 +#define GRPC_DNS_RECONNECT_JITTER 0.2 + +namespace grpc_core { + +namespace { + +const char kDefaultPort[] = "https"; + +class NativeDnsResolver : public Resolver { + public: + explicit NativeDnsResolver(const ResolverArgs& args); + + void NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) override; + + void RequestReresolutionLocked() override; + + void ShutdownLocked() override; + + private: + virtual ~NativeDnsResolver(); + + void MaybeStartResolvingLocked(); + void StartResolvingLocked(); + void MaybeFinishNextLocked(); + + static void OnNextResolutionLocked(void* arg, grpc_error* error); + static void OnResolvedLocked(void* arg, grpc_error* error); + + /// name to resolve + char* name_to_resolve_ = nullptr; + /// channel args + grpc_channel_args* channel_args_ = nullptr; + /// pollset_set to drive the name resolution process + grpc_pollset_set* interested_parties_ = nullptr; + /// are we currently resolving? + bool resolving_ = false; + grpc_closure on_resolved_; + /// which version of the result have we published? + int published_version_ = 0; + /// which version of the result is current? + int resolved_version_ = 0; + /// pending next completion, or nullptr + grpc_closure* next_completion_ = nullptr; + /// target result address for next completion + grpc_channel_args** target_result_ = nullptr; + /// current (fully resolved) result + grpc_channel_args* resolved_result_ = nullptr; + /// next resolution timer + bool have_next_resolution_timer_ = false; + grpc_timer next_resolution_timer_; + grpc_closure on_next_resolution_; + /// min time between DNS requests + grpc_millis min_time_between_resolutions_; + /// timestamp of last DNS request + grpc_millis last_resolution_timestamp_ = -1; + /// retry backoff state + BackOff backoff_; + /// currently resolving addresses + grpc_resolved_addresses* addresses_ = nullptr; +}; + +NativeDnsResolver::NativeDnsResolver(const ResolverArgs& args) + : Resolver(args.combiner), + backoff_( + BackOff::Options() + .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * + 1000) + .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_DNS_RECONNECT_JITTER) + .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + char* path = args.uri->path; + if (path[0] == '/') ++path; + name_to_resolve_ = gpr_strdup(path); + channel_args_ = grpc_channel_args_copy(args.args); + const grpc_arg* arg = grpc_channel_args_find( + args.args, GRPC_ARG_DNS_MIN_TIME_BETWEEN_RESOLUTIONS_MS); + min_time_between_resolutions_ = + grpc_channel_arg_get_integer(arg, {1000, 0, INT_MAX}); + interested_parties_ = grpc_pollset_set_create(); + if (args.pollset_set != nullptr) { + grpc_pollset_set_add_pollset_set(interested_parties_, args.pollset_set); + } + GRPC_CLOSURE_INIT(&on_next_resolution_, + NativeDnsResolver::OnNextResolutionLocked, this, + grpc_combiner_scheduler(args.combiner)); + GRPC_CLOSURE_INIT(&on_resolved_, NativeDnsResolver::OnResolvedLocked, this, + grpc_combiner_scheduler(args.combiner)); +} + +NativeDnsResolver::~NativeDnsResolver() { + if (resolved_result_ != nullptr) { + grpc_channel_args_destroy(resolved_result_); + } + grpc_pollset_set_destroy(interested_parties_); + gpr_free(name_to_resolve_); + grpc_channel_args_destroy(channel_args_); +} + +void NativeDnsResolver::NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) { + GPR_ASSERT(next_completion_ == nullptr); + next_completion_ = on_complete; + target_result_ = result; + if (resolved_version_ == 0 && !resolving_) { + MaybeStartResolvingLocked(); + } else { + MaybeFinishNextLocked(); + } +} + +void NativeDnsResolver::RequestReresolutionLocked() { + if (!resolving_) { + MaybeStartResolvingLocked(); + } +} + +void NativeDnsResolver::ShutdownLocked() { + if (have_next_resolution_timer_) { + grpc_timer_cancel(&next_resolution_timer_); + } + if (next_completion_ != nullptr) { + *target_result_ = nullptr; + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Resolver Shutdown")); + next_completion_ = nullptr; + } +} + +void NativeDnsResolver::OnNextResolutionLocked(void* arg, grpc_error* error) { + NativeDnsResolver* r = static_cast(arg); + r->have_next_resolution_timer_ = false; + if (error == GRPC_ERROR_NONE && !r->resolving_) { + r->StartResolvingLocked(); + } + r->Unref(DEBUG_LOCATION, "retry-timer"); +} + +void NativeDnsResolver::OnResolvedLocked(void* arg, grpc_error* error) { + NativeDnsResolver* r = static_cast(arg); + grpc_channel_args* result = nullptr; + GPR_ASSERT(r->resolving_); + r->resolving_ = false; + GRPC_ERROR_REF(error); + error = + grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, + grpc_slice_from_copied_string(r->name_to_resolve_)); + if (r->addresses_ != nullptr) { + grpc_lb_addresses* addresses = grpc_lb_addresses_create( + r->addresses_->naddrs, nullptr /* user_data_vtable */); + for (size_t i = 0; i < r->addresses_->naddrs; ++i) { + grpc_lb_addresses_set_address( + addresses, i, &r->addresses_->addrs[i].addr, + r->addresses_->addrs[i].len, false /* is_balancer */, + nullptr /* balancer_name */, nullptr /* user_data */); + } + grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses); + result = grpc_channel_args_copy_and_add(r->channel_args_, &new_arg, 1); + grpc_resolved_addresses_destroy(r->addresses_); + grpc_lb_addresses_destroy(addresses); + // Reset backoff state so that we start from the beginning when the + // next request gets triggered. + r->backoff_.Reset(); + } else { + grpc_millis next_try = r->backoff_.NextAttemptTime(); + grpc_millis timeout = next_try - ExecCtx::Get()->Now(); + gpr_log(GPR_INFO, "dns resolution failed (will retry): %s", + grpc_error_string(error)); + GPR_ASSERT(!r->have_next_resolution_timer_); + r->have_next_resolution_timer_ = true; + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = + r->Ref(DEBUG_LOCATION, "next_resolution_timer"); + self.release(); + if (timeout > 0) { + gpr_log(GPR_DEBUG, "retrying in %" PRIdPTR " milliseconds", timeout); + } else { + gpr_log(GPR_DEBUG, "retrying immediately"); + } + grpc_timer_init(&r->next_resolution_timer_, next_try, + &r->on_next_resolution_); + } + if (r->resolved_result_ != nullptr) { + grpc_channel_args_destroy(r->resolved_result_); + } + r->resolved_result_ = result; + ++r->resolved_version_; + r->MaybeFinishNextLocked(); + GRPC_ERROR_UNREF(error); + r->Unref(DEBUG_LOCATION, "dns-resolving"); +} + +void NativeDnsResolver::MaybeStartResolvingLocked() { + // If there is an existing timer, the time it fires is the earliest time we + // can start the next resolution. + if (have_next_resolution_timer_) { + // TODO(dgq): remove the following two lines once Pick First stops + // discarding subchannels after selecting. + ++resolved_version_; + MaybeFinishNextLocked(); + return; + } + if (last_resolution_timestamp_ >= 0) { + const grpc_millis earliest_next_resolution = + last_resolution_timestamp_ + min_time_between_resolutions_; + const grpc_millis ms_until_next_resolution = + earliest_next_resolution - grpc_core::ExecCtx::Get()->Now(); + if (ms_until_next_resolution > 0) { + const grpc_millis last_resolution_ago = + grpc_core::ExecCtx::Get()->Now() - last_resolution_timestamp_; + gpr_log(GPR_DEBUG, + "In cooldown from last resolution (from %" PRIdPTR + " ms ago). Will resolve again in %" PRIdPTR " ms", + last_resolution_ago, ms_until_next_resolution); + have_next_resolution_timer_ = true; + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = + Ref(DEBUG_LOCATION, "next_resolution_timer_cooldown"); + self.release(); + grpc_timer_init(&next_resolution_timer_, ms_until_next_resolution, + &on_next_resolution_); + // TODO(dgq): remove the following two lines once Pick First stops + // discarding subchannels after selecting. + ++resolved_version_; + MaybeFinishNextLocked(); + return; + } + } + StartResolvingLocked(); +} + +void NativeDnsResolver::StartResolvingLocked() { + gpr_log(GPR_DEBUG, "Start resolving."); + // TODO(roth): We currently deal with this ref manually. Once the + // new closure API is done, find a way to track this ref with the timer + // callback as part of the type system. + RefCountedPtr self = Ref(DEBUG_LOCATION, "dns-resolving"); + self.release(); + GPR_ASSERT(!resolving_); + resolving_ = true; + addresses_ = nullptr; + grpc_resolve_address(name_to_resolve_, kDefaultPort, interested_parties_, + &on_resolved_, &addresses_); + last_resolution_timestamp_ = grpc_core::ExecCtx::Get()->Now(); +} + +void NativeDnsResolver::MaybeFinishNextLocked() { + if (next_completion_ != nullptr && resolved_version_ != published_version_) { + *target_result_ = resolved_result_ == nullptr + ? nullptr + : grpc_channel_args_copy(resolved_result_); + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE); + next_completion_ = nullptr; + published_version_ = resolved_version_; + } +} + +// +// Factory +// + +class NativeDnsResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + if (0 != strcmp(args.uri->authority, "")) { + gpr_log(GPR_ERROR, "authority based dns uri's not supported"); + return OrphanablePtr(nullptr); + } + return OrphanablePtr(New(args)); + } + + const char* scheme() const override { return "dns"; } +}; + +} // namespace + +} // namespace grpc_core + +void grpc_resolver_dns_native_init() { + char* resolver_env = gpr_getenv("GRPC_DNS_RESOLVER"); + if (resolver_env != nullptr && gpr_stricmp(resolver_env, "native") == 0) { + gpr_log(GPR_DEBUG, "Using native dns resolver"); + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); + } else { + grpc_core::ResolverRegistry::Builder::InitRegistry(); + grpc_core::ResolverFactory* existing_factory = + grpc_core::ResolverRegistry::LookupResolverFactory("dns"); + if (existing_factory == nullptr) { + gpr_log(GPR_DEBUG, "Using native dns resolver"); + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); + } + } + gpr_free(resolver_env); +} + +void grpc_resolver_dns_native_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c deleted file mode 100644 index 69ea440ae..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +++ /dev/null @@ -1,265 +0,0 @@ -// -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// This is similar to the sockaddr resolver, except that it supports a -// bunch of query args that are useful for dependency injection in tests. - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/ext/filters/client_channel/parse_address.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/closure.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" - -#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" - -// -// fake_resolver -// - -typedef struct { - // base class -- must be first - grpc_resolver base; - - // passed-in parameters - grpc_channel_args* channel_args; - - // If not NULL, the next set of resolution results to be returned to - // grpc_resolver_next_locked()'s closure. - grpc_channel_args* next_results; - - // Results to use for the pretended re-resolution in - // fake_resolver_channel_saw_error_locked(). - grpc_channel_args* results_upon_error; - - // pending next completion, or NULL - grpc_closure* next_completion; - // target result address for next completion - grpc_channel_args** target_result; -} fake_resolver; - -static void fake_resolver_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) { - fake_resolver* r = (fake_resolver*)gr; - grpc_channel_args_destroy(exec_ctx, r->next_results); - grpc_channel_args_destroy(exec_ctx, r->results_upon_error); - grpc_channel_args_destroy(exec_ctx, r->channel_args); - gpr_free(r); -} - -static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx, - grpc_resolver* resolver) { - fake_resolver* r = (fake_resolver*)resolver; - if (r->next_completion != NULL) { - *r->target_result = NULL; - GRPC_CLOSURE_SCHED( - exec_ctx, r->next_completion, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); - r->next_completion = NULL; - } -} - -static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx, - fake_resolver* r) { - if (r->next_completion != NULL && r->next_results != NULL) { - *r->target_result = - grpc_channel_args_union(r->next_results, r->channel_args); - grpc_channel_args_destroy(exec_ctx, r->next_results); - r->next_results = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE); - r->next_completion = NULL; - } -} - -static void fake_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx, - grpc_resolver* resolver) { - fake_resolver* r = (fake_resolver*)resolver; - if (r->next_results == NULL && r->results_upon_error != NULL) { - // Pretend we re-resolved. - r->next_results = grpc_channel_args_copy(r->results_upon_error); - } - fake_resolver_maybe_finish_next_locked(exec_ctx, r); -} - -static void fake_resolver_next_locked(grpc_exec_ctx* exec_ctx, - grpc_resolver* resolver, - grpc_channel_args** target_result, - grpc_closure* on_complete) { - fake_resolver* r = (fake_resolver*)resolver; - GPR_ASSERT(!r->next_completion); - r->next_completion = on_complete; - r->target_result = target_result; - fake_resolver_maybe_finish_next_locked(exec_ctx, r); -} - -static const grpc_resolver_vtable fake_resolver_vtable = { - fake_resolver_destroy, fake_resolver_shutdown_locked, - fake_resolver_channel_saw_error_locked, fake_resolver_next_locked}; - -struct grpc_fake_resolver_response_generator { - fake_resolver* resolver; // Set by the fake_resolver constructor to itself. - gpr_refcount refcount; -}; - -grpc_fake_resolver_response_generator* -grpc_fake_resolver_response_generator_create() { - grpc_fake_resolver_response_generator* generator = - (grpc_fake_resolver_response_generator*)gpr_zalloc(sizeof(*generator)); - gpr_ref_init(&generator->refcount, 1); - return generator; -} - -grpc_fake_resolver_response_generator* -grpc_fake_resolver_response_generator_ref( - grpc_fake_resolver_response_generator* generator) { - gpr_ref(&generator->refcount); - return generator; -} - -void grpc_fake_resolver_response_generator_unref( - grpc_fake_resolver_response_generator* generator) { - if (gpr_unref(&generator->refcount)) { - gpr_free(generator); - } -} - -typedef struct set_response_closure_arg { - grpc_closure set_response_closure; - grpc_fake_resolver_response_generator* generator; - grpc_channel_args* next_response; -} set_response_closure_arg; - -static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg; - grpc_fake_resolver_response_generator* generator = closure_arg->generator; - fake_resolver* r = generator->resolver; - if (r->next_results != NULL) { - grpc_channel_args_destroy(exec_ctx, r->next_results); - } - r->next_results = closure_arg->next_response; - if (r->results_upon_error != NULL) { - grpc_channel_args_destroy(exec_ctx, r->results_upon_error); - } - r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response); - gpr_free(closure_arg); - fake_resolver_maybe_finish_next_locked(exec_ctx, r); -} - -void grpc_fake_resolver_response_generator_set_response( - grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator, - grpc_channel_args* next_response) { - GPR_ASSERT(generator->resolver != NULL); - set_response_closure_arg* closure_arg = - (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg)); - closure_arg->generator = generator; - closure_arg->next_response = grpc_channel_args_copy(next_response); - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, - set_response_closure_fn, closure_arg, - grpc_combiner_scheduler( - generator->resolver->base.combiner)), - GRPC_ERROR_NONE); -} - -static void* response_generator_arg_copy(void* p) { - return grpc_fake_resolver_response_generator_ref( - (grpc_fake_resolver_response_generator*)p); -} - -static void response_generator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) { - grpc_fake_resolver_response_generator_unref( - (grpc_fake_resolver_response_generator*)p); -} - -static int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); } - -static const grpc_arg_pointer_vtable response_generator_arg_vtable = { - response_generator_arg_copy, response_generator_arg_destroy, - response_generator_cmp}; - -grpc_arg grpc_fake_resolver_response_generator_arg( - grpc_fake_resolver_response_generator* generator) { - grpc_arg arg; - arg.type = GRPC_ARG_POINTER; - arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR; - arg.value.pointer.p = generator; - arg.value.pointer.vtable = &response_generator_arg_vtable; - return arg; -} - -grpc_fake_resolver_response_generator* -grpc_fake_resolver_get_response_generator(const grpc_channel_args* args) { - const grpc_arg* arg = - grpc_channel_args_find(args, GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR); - if (arg == NULL || arg->type != GRPC_ARG_POINTER) return NULL; - return (grpc_fake_resolver_response_generator*)arg->value.pointer.p; -} - -// -// fake_resolver_factory -// - -static void fake_resolver_factory_ref(grpc_resolver_factory* factory) {} - -static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {} - -static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx, - grpc_resolver_factory* factory, - grpc_resolver_args* args) { - fake_resolver* r = (fake_resolver*)gpr_zalloc(sizeof(*r)); - r->channel_args = grpc_channel_args_copy(args->args); - grpc_resolver_init(&r->base, &fake_resolver_vtable, args->combiner); - grpc_fake_resolver_response_generator* response_generator = - grpc_fake_resolver_get_response_generator(args->args); - if (response_generator != NULL) response_generator->resolver = r; - return &r->base; -} - -static char* fake_resolver_get_default_authority(grpc_resolver_factory* factory, - grpc_uri* uri) { - const char* path = uri->path; - if (path[0] == '/') ++path; - return gpr_strdup(path); -} - -static const grpc_resolver_factory_vtable fake_resolver_factory_vtable = { - fake_resolver_factory_ref, fake_resolver_factory_unref, - fake_resolver_create, fake_resolver_get_default_authority, "fake"}; - -static grpc_resolver_factory fake_resolver_factory = { - &fake_resolver_factory_vtable}; - -void grpc_resolver_fake_init(void) { - grpc_register_resolver_type(&fake_resolver_factory); -} - -void grpc_resolver_fake_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc new file mode 100644 index 000000000..99a33f227 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc @@ -0,0 +1,297 @@ +// +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// This is similar to the sockaddr resolver, except that it supports a +// bunch of query args that are useful for dependency injection in tests. + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/parse_address.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/unix_sockets_posix.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" + +namespace grpc_core { + +// This cannot be in an anonymous namespace, because it is a friend of +// FakeResolverResponseGenerator. +class FakeResolver : public Resolver { + public: + explicit FakeResolver(const ResolverArgs& args); + + void NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) override; + + void RequestReresolutionLocked() override; + + private: + friend class FakeResolverResponseGenerator; + + virtual ~FakeResolver(); + + void MaybeFinishNextLocked(); + + void ShutdownLocked() override; + + // passed-in parameters + grpc_channel_args* channel_args_ = nullptr; + // If not NULL, the next set of resolution results to be returned to + // NextLocked()'s closure. + grpc_channel_args* next_results_ = nullptr; + // Results to use for the pretended re-resolution in + // RequestReresolutionLocked(). + grpc_channel_args* reresolution_results_ = nullptr; + // TODO(juanlishen): This can go away once pick_first is changed to not throw + // away its subchannels, since that will eliminate its dependence on + // channel_saw_error_locked() causing an immediate resolver return. + // A copy of the most-recently used resolution results. + grpc_channel_args* last_used_results_ = nullptr; + // pending next completion, or NULL + grpc_closure* next_completion_ = nullptr; + // target result address for next completion + grpc_channel_args** target_result_ = nullptr; + // if true, return failure + bool return_failure_ = false; +}; + +FakeResolver::FakeResolver(const ResolverArgs& args) : Resolver(args.combiner) { + channel_args_ = grpc_channel_args_copy(args.args); + FakeResolverResponseGenerator* response_generator = + FakeResolverResponseGenerator::GetFromArgs(args.args); + if (response_generator != nullptr) response_generator->resolver_ = this; +} + +FakeResolver::~FakeResolver() { + grpc_channel_args_destroy(next_results_); + grpc_channel_args_destroy(reresolution_results_); + grpc_channel_args_destroy(last_used_results_); + grpc_channel_args_destroy(channel_args_); +} + +void FakeResolver::NextLocked(grpc_channel_args** target_result, + grpc_closure* on_complete) { + GPR_ASSERT(next_completion_ == nullptr); + next_completion_ = on_complete; + target_result_ = target_result; + MaybeFinishNextLocked(); +} + +void FakeResolver::RequestReresolutionLocked() { + // A resolution must have been returned before an error is seen. + GPR_ASSERT(last_used_results_ != nullptr); + grpc_channel_args_destroy(next_results_); + if (reresolution_results_ != nullptr) { + next_results_ = grpc_channel_args_copy(reresolution_results_); + } else { + // If reresolution_results is unavailable, re-resolve with the most-recently + // used results to avoid a no-op re-resolution. + next_results_ = grpc_channel_args_copy(last_used_results_); + } + MaybeFinishNextLocked(); +} + +void FakeResolver::MaybeFinishNextLocked() { + if (next_completion_ != nullptr && + (next_results_ != nullptr || return_failure_)) { + *target_result_ = + return_failure_ ? nullptr + : grpc_channel_args_union(next_results_, channel_args_); + grpc_channel_args_destroy(next_results_); + next_results_ = nullptr; + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE); + next_completion_ = nullptr; + return_failure_ = false; + } +} + +void FakeResolver::ShutdownLocked() { + if (next_completion_ != nullptr) { + *target_result_ = nullptr; + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Resolver Shutdown")); + next_completion_ = nullptr; + } +} + +// +// FakeResolverResponseGenerator +// + +struct SetResponseClosureArg { + grpc_closure set_response_closure; + FakeResolverResponseGenerator* generator; + grpc_channel_args* response; +}; + +void FakeResolverResponseGenerator::SetResponseLocked(void* arg, + grpc_error* error) { + SetResponseClosureArg* closure_arg = static_cast(arg); + FakeResolver* resolver = closure_arg->generator->resolver_; + grpc_channel_args_destroy(resolver->next_results_); + resolver->next_results_ = closure_arg->response; + grpc_channel_args_destroy(resolver->last_used_results_); + resolver->last_used_results_ = grpc_channel_args_copy(closure_arg->response); + resolver->MaybeFinishNextLocked(); + Delete(closure_arg); +} + +void FakeResolverResponseGenerator::SetResponse(grpc_channel_args* response) { + GPR_ASSERT(response != nullptr); + GPR_ASSERT(resolver_ != nullptr); + SetResponseClosureArg* closure_arg = New(); + closure_arg->generator = this; + closure_arg->response = grpc_channel_args_copy(response); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetResponseLocked, + closure_arg, + grpc_combiner_scheduler(resolver_->combiner())), + GRPC_ERROR_NONE); +} + +void FakeResolverResponseGenerator::SetReresolutionResponseLocked( + void* arg, grpc_error* error) { + SetResponseClosureArg* closure_arg = static_cast(arg); + FakeResolver* resolver = closure_arg->generator->resolver_; + grpc_channel_args_destroy(resolver->reresolution_results_); + resolver->reresolution_results_ = closure_arg->response; + Delete(closure_arg); +} + +void FakeResolverResponseGenerator::SetReresolutionResponse( + grpc_channel_args* response) { + GPR_ASSERT(resolver_ != nullptr); + SetResponseClosureArg* closure_arg = New(); + closure_arg->generator = this; + closure_arg->response = + response != nullptr ? grpc_channel_args_copy(response) : nullptr; + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, + SetReresolutionResponseLocked, closure_arg, + grpc_combiner_scheduler(resolver_->combiner())), + GRPC_ERROR_NONE); +} + +void FakeResolverResponseGenerator::SetFailureLocked(void* arg, + grpc_error* error) { + SetResponseClosureArg* closure_arg = static_cast(arg); + FakeResolver* resolver = closure_arg->generator->resolver_; + resolver->return_failure_ = true; + resolver->MaybeFinishNextLocked(); + Delete(closure_arg); +} + +void FakeResolverResponseGenerator::SetFailure() { + GPR_ASSERT(resolver_ != nullptr); + SetResponseClosureArg* closure_arg = New(); + closure_arg->generator = this; + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&closure_arg->set_response_closure, SetFailureLocked, + closure_arg, + grpc_combiner_scheduler(resolver_->combiner())), + GRPC_ERROR_NONE); +} + +namespace { + +static void* response_generator_arg_copy(void* p) { + FakeResolverResponseGenerator* generator = + static_cast(p); + // TODO(roth): We currently deal with this ref manually. Once the + // new channel args code is converted to C++, find a way to track this ref + // in a cleaner way. + RefCountedPtr copy = generator->Ref(); + copy.release(); + return p; +} + +static void response_generator_arg_destroy(void* p) { + FakeResolverResponseGenerator* generator = + static_cast(p); + generator->Unref(); +} + +static int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); } + +static const grpc_arg_pointer_vtable response_generator_arg_vtable = { + response_generator_arg_copy, response_generator_arg_destroy, + response_generator_cmp}; + +} // namespace + +grpc_arg FakeResolverResponseGenerator::MakeChannelArg( + FakeResolverResponseGenerator* generator) { + grpc_arg arg; + arg.type = GRPC_ARG_POINTER; + arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR; + arg.value.pointer.p = generator; + arg.value.pointer.vtable = &response_generator_arg_vtable; + return arg; +} + +FakeResolverResponseGenerator* FakeResolverResponseGenerator::GetFromArgs( + const grpc_channel_args* args) { + const grpc_arg* arg = + grpc_channel_args_find(args, GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR); + if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr; + return static_cast(arg->value.pointer.p); +} + +// +// Factory +// + +namespace { + +class FakeResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + return OrphanablePtr(New(args)); + } + + const char* scheme() const override { return "fake"; } +}; + +} // namespace + +} // namespace grpc_core + +void grpc_resolver_fake_init() { + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); +} + +void grpc_resolver_fake_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h index c084ef2a5..e5175f9b7 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h @@ -17,44 +17,67 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H +#include + #include "src/core/ext/filters/client_channel/lb_policy_factory.h" #include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/ref_counted.h" #define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \ "grpc.fake_resolver.response_generator" -void grpc_resolver_fake_init(); - -// Instances of \a grpc_fake_resolver_response_generator are passed to the -// fake resolver in a channel argument (see \a -// grpc_fake_resolver_response_generator_arg) in order to inject and trigger -// custom resolutions. See also \a -// grpc_fake_resolver_response_generator_set_response. -typedef struct grpc_fake_resolver_response_generator - grpc_fake_resolver_response_generator; -grpc_fake_resolver_response_generator* -grpc_fake_resolver_response_generator_create(); - -// Instruct the fake resolver associated with the \a response_generator instance -// to trigger a new resolution for \a uri and \a args. -void grpc_fake_resolver_response_generator_set_response( - grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator, - grpc_channel_args* next_response); - -// Return a \a grpc_arg for a \a grpc_fake_resolver_response_generator instance. -grpc_arg grpc_fake_resolver_response_generator_arg( - grpc_fake_resolver_response_generator* generator); -// Return the \a grpc_fake_resolver_response_generator instance in \a args or -// NULL. -grpc_fake_resolver_response_generator* -grpc_fake_resolver_get_response_generator(const grpc_channel_args* args); - -grpc_fake_resolver_response_generator* -grpc_fake_resolver_response_generator_ref( - grpc_fake_resolver_response_generator* generator); -void grpc_fake_resolver_response_generator_unref( - grpc_fake_resolver_response_generator* generator); +namespace grpc_core { + +class FakeResolver; + +/// A mechanism for generating responses for the fake resolver. +/// An instance of this class is passed to the fake resolver via a channel +/// argument (see \a MakeChannelArg()) and used to inject and trigger custom +/// resolutions. +// TODO(roth): I would ideally like this to be InternallyRefCounted +// instead of RefCounted, but external refs are currently needed to +// encode this in channel args. Once channel_args are converted to C++, +// see if we can find a way to fix this. +class FakeResolverResponseGenerator + : public RefCounted { + public: + FakeResolverResponseGenerator() {} + + // Instructs the fake resolver associated with the response generator + // instance to trigger a new resolution with the specified response. + void SetResponse(grpc_channel_args* next_response); + + // Sets the re-resolution response, which is returned by the fake resolver + // when re-resolution is requested (via \a RequestReresolutionLocked()). + // The new re-resolution response replaces any previous re-resolution + // response that may have been set by a previous call. + // If the re-resolution response is set to NULL, then the fake + // resolver will return the last value set via \a SetResponse(). + void SetReresolutionResponse(grpc_channel_args* response); + + // Tells the resolver to return a transient failure (signalled by + // returning a null result with no error). + void SetFailure(); + + // Returns a channel arg containing \a generator. + static grpc_arg MakeChannelArg(FakeResolverResponseGenerator* generator); + + // Returns the response generator in \a args, or null if not found. + static FakeResolverResponseGenerator* GetFromArgs( + const grpc_channel_args* args); + + private: + friend class FakeResolver; + + static void SetResponseLocked(void* arg, grpc_error* error); + static void SetReresolutionResponseLocked(void* arg, grpc_error* error); + static void SetFailureLocked(void* arg, grpc_error* error); + + FakeResolver* resolver_ = nullptr; // Do not own. +}; + +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c deleted file mode 100644 index 7ceb8f40a..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * - * Copyright 2015-2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "src/core/ext/filters/client_channel/lb_policy_factory.h" -#include "src/core/ext/filters/client_channel/parse_address.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" - -typedef struct { - /** base class: must be first */ - grpc_resolver base; - /** the addresses that we've 'resolved' */ - grpc_lb_addresses *addresses; - /** channel args */ - grpc_channel_args *channel_args; - /** have we published? */ - bool published; - /** pending next completion, or NULL */ - grpc_closure *next_completion; - /** target result address for next completion */ - grpc_channel_args **target_result; -} sockaddr_resolver; - -static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *r); - -static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - sockaddr_resolver *r); - -static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r); -static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *r); -static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, grpc_resolver *r, - grpc_channel_args **target_result, - grpc_closure *on_complete); - -static const grpc_resolver_vtable sockaddr_resolver_vtable = { - sockaddr_destroy, sockaddr_shutdown_locked, - sockaddr_channel_saw_error_locked, sockaddr_next_locked}; - -static void sockaddr_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - sockaddr_resolver *r = (sockaddr_resolver *)resolver; - if (r->next_completion != NULL) { - *r->target_result = NULL; - GRPC_CLOSURE_SCHED( - exec_ctx, r->next_completion, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown")); - r->next_completion = NULL; - } -} - -static void sockaddr_channel_saw_error_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver) { - sockaddr_resolver *r = (sockaddr_resolver *)resolver; - r->published = false; - sockaddr_maybe_finish_next_locked(exec_ctx, r); -} - -static void sockaddr_next_locked(grpc_exec_ctx *exec_ctx, - grpc_resolver *resolver, - grpc_channel_args **target_result, - grpc_closure *on_complete) { - sockaddr_resolver *r = (sockaddr_resolver *)resolver; - GPR_ASSERT(!r->next_completion); - r->next_completion = on_complete; - r->target_result = target_result; - sockaddr_maybe_finish_next_locked(exec_ctx, r); -} - -static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx, - sockaddr_resolver *r) { - if (r->next_completion != NULL && !r->published) { - r->published = true; - grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses); - *r->target_result = - grpc_channel_args_copy_and_add(r->channel_args, &arg, 1); - GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE); - r->next_completion = NULL; - } -} - -static void sockaddr_destroy(grpc_exec_ctx *exec_ctx, grpc_resolver *gr) { - sockaddr_resolver *r = (sockaddr_resolver *)gr; - grpc_lb_addresses_destroy(exec_ctx, r->addresses); - grpc_channel_args_destroy(exec_ctx, r->channel_args); - gpr_free(r); -} - -static char *ip_get_default_authority(grpc_uri *uri) { - const char *path = uri->path; - if (path[0] == '/') ++path; - return gpr_strdup(path); -} - -static char *ipv4_get_default_authority(grpc_resolver_factory *factory, - grpc_uri *uri) { - return ip_get_default_authority(uri); -} - -static char *ipv6_get_default_authority(grpc_resolver_factory *factory, - grpc_uri *uri) { - return ip_get_default_authority(uri); -} - -#ifdef GRPC_HAVE_UNIX_SOCKET -char *unix_get_default_authority(grpc_resolver_factory *factory, - grpc_uri *uri) { - return gpr_strdup("localhost"); -} -#endif - -static void do_nothing(void *ignored) {} - -static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx, - grpc_resolver_args *args, - bool parse(const grpc_uri *uri, - grpc_resolved_address *dst)) { - if (0 != strcmp(args->uri->authority, "")) { - gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme", - args->uri->scheme); - return NULL; - } - /* Construct addresses. */ - grpc_slice path_slice = - grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing); - grpc_slice_buffer path_parts; - grpc_slice_buffer_init(&path_parts); - grpc_slice_split(path_slice, ",", &path_parts); - grpc_lb_addresses *addresses = - grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */); - bool errors_found = false; - for (size_t i = 0; i < addresses->num_addresses; i++) { - grpc_uri ith_uri = *args->uri; - char *part_str = grpc_slice_to_c_string(path_parts.slices[i]); - ith_uri.path = part_str; - if (!parse(&ith_uri, &addresses->addresses[i].address)) { - errors_found = true; /* GPR_TRUE */ - } - gpr_free(part_str); - if (errors_found) break; - } - grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts); - grpc_slice_unref_internal(exec_ctx, path_slice); - if (errors_found) { - grpc_lb_addresses_destroy(exec_ctx, addresses); - return NULL; - } - /* Instantiate resolver. */ - sockaddr_resolver *r = - (sockaddr_resolver *)gpr_zalloc(sizeof(sockaddr_resolver)); - r->addresses = addresses; - r->channel_args = grpc_channel_args_copy(args->args); - grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner); - return &r->base; -} - -/* - * FACTORY - */ - -static void sockaddr_factory_ref(grpc_resolver_factory *factory) {} - -static void sockaddr_factory_unref(grpc_resolver_factory *factory) {} - -#define DECL_FACTORY(name) \ - static grpc_resolver *name##_factory_create_resolver( \ - grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, \ - grpc_resolver_args *args) { \ - return sockaddr_create(exec_ctx, args, grpc_parse_##name); \ - } \ - static const grpc_resolver_factory_vtable name##_factory_vtable = { \ - sockaddr_factory_ref, sockaddr_factory_unref, \ - name##_factory_create_resolver, name##_get_default_authority, #name}; \ - static grpc_resolver_factory name##_resolver_factory = { \ - &name##_factory_vtable} - -#ifdef GRPC_HAVE_UNIX_SOCKET -DECL_FACTORY(unix); -#endif -DECL_FACTORY(ipv4); -DECL_FACTORY(ipv6); - -void grpc_resolver_sockaddr_init(void) { - grpc_register_resolver_type(&ipv4_resolver_factory); - grpc_register_resolver_type(&ipv6_resolver_factory); -#ifdef GRPC_HAVE_UNIX_SOCKET - grpc_register_resolver_type(&unix_resolver_factory); -#endif -} - -void grpc_resolver_sockaddr_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc new file mode 100644 index 000000000..f74ac5aeb --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc @@ -0,0 +1,214 @@ +/* + * + * Copyright 2015-2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include +#include +#include + +#include +#include + +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/parse_address.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/unix_sockets_posix.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +namespace grpc_core { + +namespace { + +class SockaddrResolver : public Resolver { + public: + /// Takes ownership of \a addresses. + SockaddrResolver(const ResolverArgs& args, grpc_lb_addresses* addresses); + + void NextLocked(grpc_channel_args** result, + grpc_closure* on_complete) override; + + void RequestReresolutionLocked() override; + + void ShutdownLocked() override; + + private: + virtual ~SockaddrResolver(); + + void MaybeFinishNextLocked(); + + /// the addresses that we've "resolved" + grpc_lb_addresses* addresses_ = nullptr; + /// channel args + grpc_channel_args* channel_args_ = nullptr; + /// have we published? + bool published_ = false; + /// pending next completion, or NULL + grpc_closure* next_completion_ = nullptr; + /// target result address for next completion + grpc_channel_args** target_result_ = nullptr; +}; + +SockaddrResolver::SockaddrResolver(const ResolverArgs& args, + grpc_lb_addresses* addresses) + : Resolver(args.combiner), + addresses_(addresses), + channel_args_(grpc_channel_args_copy(args.args)) {} + +SockaddrResolver::~SockaddrResolver() { + grpc_lb_addresses_destroy(addresses_); + grpc_channel_args_destroy(channel_args_); +} + +void SockaddrResolver::NextLocked(grpc_channel_args** target_result, + grpc_closure* on_complete) { + GPR_ASSERT(!next_completion_); + next_completion_ = on_complete; + target_result_ = target_result; + MaybeFinishNextLocked(); +} + +void SockaddrResolver::RequestReresolutionLocked() { + published_ = false; + MaybeFinishNextLocked(); +} + +void SockaddrResolver::ShutdownLocked() { + if (next_completion_ != nullptr) { + *target_result_ = nullptr; + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Resolver Shutdown")); + next_completion_ = nullptr; + } +} + +void SockaddrResolver::MaybeFinishNextLocked() { + if (next_completion_ != nullptr && !published_) { + published_ = true; + grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses_); + *target_result_ = grpc_channel_args_copy_and_add(channel_args_, &arg, 1); + GRPC_CLOSURE_SCHED(next_completion_, GRPC_ERROR_NONE); + next_completion_ = nullptr; + } +} + +// +// Factory +// + +void DoNothing(void* ignored) {} + +OrphanablePtr CreateSockaddrResolver( + const ResolverArgs& args, + bool parse(const grpc_uri* uri, grpc_resolved_address* dst)) { + if (0 != strcmp(args.uri->authority, "")) { + gpr_log(GPR_ERROR, "authority-based URIs not supported by the %s scheme", + args.uri->scheme); + return OrphanablePtr(nullptr); + } + // Construct addresses. + grpc_slice path_slice = + grpc_slice_new(args.uri->path, strlen(args.uri->path), DoNothing); + grpc_slice_buffer path_parts; + grpc_slice_buffer_init(&path_parts); + grpc_slice_split(path_slice, ",", &path_parts); + grpc_lb_addresses* addresses = grpc_lb_addresses_create( + path_parts.count, nullptr /* user_data_vtable */); + bool errors_found = false; + for (size_t i = 0; i < addresses->num_addresses; i++) { + grpc_uri ith_uri = *args.uri; + char* part_str = grpc_slice_to_c_string(path_parts.slices[i]); + ith_uri.path = part_str; + if (!parse(&ith_uri, &addresses->addresses[i].address)) { + errors_found = true; /* GPR_TRUE */ + } + gpr_free(part_str); + if (errors_found) break; + } + grpc_slice_buffer_destroy_internal(&path_parts); + grpc_slice_unref_internal(path_slice); + if (errors_found) { + grpc_lb_addresses_destroy(addresses); + return OrphanablePtr(nullptr); + } + // Instantiate resolver. + return OrphanablePtr(New(args, addresses)); +} + +class IPv4ResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + return CreateSockaddrResolver(args, grpc_parse_ipv4); + } + + const char* scheme() const override { return "ipv4"; } +}; + +class IPv6ResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + return CreateSockaddrResolver(args, grpc_parse_ipv6); + } + + const char* scheme() const override { return "ipv6"; } +}; + +#ifdef GRPC_HAVE_UNIX_SOCKET +class UnixResolverFactory : public ResolverFactory { + public: + OrphanablePtr CreateResolver( + const ResolverArgs& args) const override { + return CreateSockaddrResolver(args, grpc_parse_unix); + } + + UniquePtr GetDefaultAuthority(grpc_uri* uri) const override { + return UniquePtr(gpr_strdup("localhost")); + } + + const char* scheme() const override { return "unix"; } +}; +#endif // GRPC_HAVE_UNIX_SOCKET + +} // namespace + +} // namespace grpc_core + +void grpc_resolver_sockaddr_init() { + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); +#ifdef GRPC_HAVE_UNIX_SOCKET + grpc_core::ResolverRegistry::Builder::RegisterResolverFactory( + grpc_core::UniquePtr( + grpc_core::New())); +#endif +} + +void grpc_resolver_sockaddr_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.c deleted file mode 100644 index 6f0a7c1e3..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/resolver_factory.h" - -void grpc_resolver_factory_ref(grpc_resolver_factory* factory) { - factory->vtable->ref(factory); -} - -void grpc_resolver_factory_unref(grpc_resolver_factory* factory) { - factory->vtable->unref(factory); -} - -/** Create a resolver instance for a name */ -grpc_resolver* grpc_resolver_factory_create_resolver( - grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory, - grpc_resolver_args* args) { - if (factory == NULL) return NULL; - return factory->vtable->create_resolver(exec_ctx, factory, args); -} - -char* grpc_resolver_factory_get_default_authority( - grpc_resolver_factory* factory, grpc_uri* uri) { - if (factory == NULL) return NULL; - return factory->vtable->get_default_authority(factory, uri); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.h index 6bd7929d4..ee3cfeeb9 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_factory.h @@ -19,52 +19,53 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H -#include "src/core/ext/filters/client_channel/client_channel_factory.h" +#include + +#include + #include "src/core/ext/filters/client_channel/resolver.h" #include "src/core/ext/filters/client_channel/uri_parser.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/iomgr/pollset_set.h" -typedef struct grpc_resolver_factory grpc_resolver_factory; -typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable; +namespace grpc_core { -struct grpc_resolver_factory { - const grpc_resolver_factory_vtable *vtable; +struct ResolverArgs { + /// The parsed URI to resolve. + grpc_uri* uri = nullptr; + /// Channel args to be included in resolver results. + const grpc_channel_args* args = nullptr; + /// Used to drive I/O in the name resolution process. + grpc_pollset_set* pollset_set = nullptr; + /// The combiner under which all resolver calls will be run. + grpc_combiner* combiner = nullptr; }; -typedef struct grpc_resolver_args { - grpc_uri *uri; - const grpc_channel_args *args; - grpc_pollset_set *pollset_set; - grpc_combiner *combiner; -} grpc_resolver_args; +class ResolverFactory { + public: + /// Returns a new resolver instance. + virtual OrphanablePtr CreateResolver(const ResolverArgs& args) const + GRPC_ABSTRACT; -struct grpc_resolver_factory_vtable { - void (*ref)(grpc_resolver_factory *factory); - void (*unref)(grpc_resolver_factory *factory); + /// Returns a string representing the default authority to use for this + /// scheme. + virtual UniquePtr GetDefaultAuthority(grpc_uri* uri) const { + const char* path = uri->path; + if (path[0] == '/') ++path; + return UniquePtr(gpr_strdup(path)); + } - /** Implementation of grpc_resolver_factory_create_resolver */ - grpc_resolver *(*create_resolver)(grpc_exec_ctx *exec_ctx, - grpc_resolver_factory *factory, - grpc_resolver_args *args); + /// Returns the URI scheme that this factory implements. + /// Caller does NOT take ownership of result. + virtual const char* scheme() const GRPC_ABSTRACT; - /** Implementation of grpc_resolver_factory_get_default_authority */ - char *(*get_default_authority)(grpc_resolver_factory *factory, grpc_uri *uri); + virtual ~ResolverFactory() {} - /** URI scheme that this factory implements */ - const char *scheme; + GRPC_ABSTRACT_BASE_CLASS }; -void grpc_resolver_factory_ref(grpc_resolver_factory *resolver); -void grpc_resolver_factory_unref(grpc_resolver_factory *resolver); - -/** Create a resolver instance for a name */ -grpc_resolver *grpc_resolver_factory_create_resolver( - grpc_exec_ctx *exec_ctx, grpc_resolver_factory *factory, - grpc_resolver_args *args); - -/** Return a (freshly allocated with gpr_malloc) string representing - the default authority to use for this scheme. */ -char *grpc_resolver_factory_get_default_authority( - grpc_resolver_factory *factory, grpc_uri *uri); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.c b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.c deleted file mode 100644 index 1a0fb0bc3..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/resolver_registry.h" - -#include - -#include -#include -#include - -#define MAX_RESOLVERS 10 -#define DEFAULT_RESOLVER_PREFIX_MAX_LENGTH 32 - -static grpc_resolver_factory *g_all_of_the_resolvers[MAX_RESOLVERS]; -static int g_number_of_resolvers = 0; - -static char g_default_resolver_prefix[DEFAULT_RESOLVER_PREFIX_MAX_LENGTH] = - "dns:///"; - -void grpc_resolver_registry_init() {} - -void grpc_resolver_registry_shutdown(void) { - for (int i = 0; i < g_number_of_resolvers; i++) { - grpc_resolver_factory_unref(g_all_of_the_resolvers[i]); - } - // FIXME(ctiller): this should live in grpc_resolver_registry_init, - // however that would have the client_channel plugin call this AFTER we start - // registering resolvers from third party plugins, and so they'd never show - // up. - // We likely need some kind of dependency system for plugins.... what form - // that takes is TBD. - g_number_of_resolvers = 0; -} - -void grpc_resolver_registry_set_default_prefix( - const char *default_resolver_prefix) { - const size_t len = strlen(default_resolver_prefix); - GPR_ASSERT(len < DEFAULT_RESOLVER_PREFIX_MAX_LENGTH && - "default resolver prefix too long"); - GPR_ASSERT(len > 0 && "default resolver prefix can't be empty"); - // By the previous assert, default_resolver_prefix is safe to be copied with a - // plain strcpy. - strcpy(g_default_resolver_prefix, default_resolver_prefix); -} - -void grpc_register_resolver_type(grpc_resolver_factory *factory) { - int i; - for (i = 0; i < g_number_of_resolvers; i++) { - GPR_ASSERT(0 != strcmp(factory->vtable->scheme, - g_all_of_the_resolvers[i]->vtable->scheme)); - } - GPR_ASSERT(g_number_of_resolvers != MAX_RESOLVERS); - grpc_resolver_factory_ref(factory); - g_all_of_the_resolvers[g_number_of_resolvers++] = factory; -} - -static grpc_resolver_factory *lookup_factory(const char *name) { - int i; - - for (i = 0; i < g_number_of_resolvers; i++) { - if (0 == strcmp(name, g_all_of_the_resolvers[i]->vtable->scheme)) { - return g_all_of_the_resolvers[i]; - } - } - return NULL; -} - -grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name) { - grpc_resolver_factory *f = lookup_factory(name); - if (f) grpc_resolver_factory_ref(f); - return f; -} - -static grpc_resolver_factory *lookup_factory_by_uri(grpc_uri *uri) { - if (!uri) return NULL; - return lookup_factory(uri->scheme); -} - -static grpc_resolver_factory *resolve_factory(grpc_exec_ctx *exec_ctx, - const char *target, - grpc_uri **uri, - char **canonical_target) { - grpc_resolver_factory *factory = NULL; - - GPR_ASSERT(uri != NULL); - *uri = grpc_uri_parse(exec_ctx, target, 1); - factory = lookup_factory_by_uri(*uri); - if (factory == NULL) { - grpc_uri_destroy(*uri); - gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target); - *uri = grpc_uri_parse(exec_ctx, *canonical_target, 1); - factory = lookup_factory_by_uri(*uri); - if (factory == NULL) { - grpc_uri_destroy(grpc_uri_parse(exec_ctx, target, 0)); - grpc_uri_destroy(grpc_uri_parse(exec_ctx, *canonical_target, 0)); - gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, - *canonical_target); - } - } - return factory; -} - -grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, - const grpc_channel_args *args, - grpc_pollset_set *pollset_set, - grpc_combiner *combiner) { - grpc_uri *uri = NULL; - char *canonical_target = NULL; - grpc_resolver_factory *factory = - resolve_factory(exec_ctx, target, &uri, &canonical_target); - grpc_resolver *resolver; - grpc_resolver_args resolver_args; - memset(&resolver_args, 0, sizeof(resolver_args)); - resolver_args.uri = uri; - resolver_args.args = args; - resolver_args.pollset_set = pollset_set; - resolver_args.combiner = combiner; - resolver = - grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args); - grpc_uri_destroy(uri); - gpr_free(canonical_target); - return resolver; -} - -char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target) { - grpc_uri *uri = NULL; - char *canonical_target = NULL; - grpc_resolver_factory *factory = - resolve_factory(exec_ctx, target, &uri, &canonical_target); - char *authority = grpc_resolver_factory_get_default_authority(factory, uri); - grpc_uri_destroy(uri); - gpr_free(canonical_target); - return authority; -} - -char *grpc_resolver_factory_add_default_prefix_if_needed( - grpc_exec_ctx *exec_ctx, const char *target) { - grpc_uri *uri = NULL; - char *canonical_target = NULL; - resolve_factory(exec_ctx, target, &uri, &canonical_target); - grpc_uri_destroy(uri); - return canonical_target == NULL ? gpr_strdup(target) : canonical_target; -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.cc new file mode 100644 index 000000000..91c0267f9 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.cc @@ -0,0 +1,178 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/resolver_registry.h" + +#include + +#include +#include +#include + +namespace grpc_core { + +namespace { + +class RegistryState { + public: + RegistryState() : default_prefix_(gpr_strdup("dns:///")) {} + + void SetDefaultPrefix(const char* default_resolver_prefix) { + GPR_ASSERT(default_resolver_prefix != nullptr); + GPR_ASSERT(*default_resolver_prefix != '\0'); + default_prefix_.reset(gpr_strdup(default_resolver_prefix)); + } + + void RegisterResolverFactory(UniquePtr factory) { + for (size_t i = 0; i < factories_.size(); ++i) { + GPR_ASSERT(strcmp(factories_[i]->scheme(), factory->scheme()) != 0); + } + factories_.push_back(std::move(factory)); + } + + ResolverFactory* LookupResolverFactory(const char* scheme) const { + for (size_t i = 0; i < factories_.size(); ++i) { + if (strcmp(scheme, factories_[i]->scheme()) == 0) { + return factories_[i].get(); + } + } + return nullptr; + } + + // Returns the factory for the scheme of \a target. If \a target does + // not parse as a URI, prepends \a default_prefix_ and tries again. + // If URI parsing is successful (in either attempt), sets \a uri to + // point to the parsed URI. + // If \a default_prefix_ needs to be prepended, sets \a canonical_target + // to the canonical target string. + ResolverFactory* FindResolverFactory(const char* target, grpc_uri** uri, + char** canonical_target) const { + GPR_ASSERT(uri != nullptr); + *uri = grpc_uri_parse(target, 1); + ResolverFactory* factory = + *uri == nullptr ? nullptr : LookupResolverFactory((*uri)->scheme); + if (factory == nullptr) { + grpc_uri_destroy(*uri); + gpr_asprintf(canonical_target, "%s%s", default_prefix_.get(), target); + *uri = grpc_uri_parse(*canonical_target, 1); + factory = + *uri == nullptr ? nullptr : LookupResolverFactory((*uri)->scheme); + if (factory == nullptr) { + grpc_uri_destroy(grpc_uri_parse(target, 0)); + grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0)); + gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target, + *canonical_target); + } + } + return factory; + } + + private: + // We currently support 10 factories without doing additional + // allocation. This number could be raised if there is a case where + // more factories are needed and the additional allocations are + // hurting performance (which is unlikely, since these allocations + // only occur at gRPC initialization time). + InlinedVector, 10> factories_; + UniquePtr default_prefix_; +}; + +static RegistryState* g_state = nullptr; + +} // namespace + +// +// ResolverRegistry::Builder +// + +void ResolverRegistry::Builder::InitRegistry() { + if (g_state == nullptr) g_state = New(); +} + +void ResolverRegistry::Builder::ShutdownRegistry() { + Delete(g_state); + g_state = nullptr; +} + +void ResolverRegistry::Builder::SetDefaultPrefix( + const char* default_resolver_prefix) { + InitRegistry(); + g_state->SetDefaultPrefix(default_resolver_prefix); +} + +void ResolverRegistry::Builder::RegisterResolverFactory( + UniquePtr factory) { + InitRegistry(); + g_state->RegisterResolverFactory(std::move(factory)); +} + +// +// ResolverRegistry +// + +ResolverFactory* ResolverRegistry::LookupResolverFactory(const char* scheme) { + GPR_ASSERT(g_state != nullptr); + return g_state->LookupResolverFactory(scheme); +} + +OrphanablePtr ResolverRegistry::CreateResolver( + const char* target, const grpc_channel_args* args, + grpc_pollset_set* pollset_set, grpc_combiner* combiner) { + GPR_ASSERT(g_state != nullptr); + grpc_uri* uri = nullptr; + char* canonical_target = nullptr; + ResolverFactory* factory = + g_state->FindResolverFactory(target, &uri, &canonical_target); + ResolverArgs resolver_args; + resolver_args.uri = uri; + resolver_args.args = args; + resolver_args.pollset_set = pollset_set; + resolver_args.combiner = combiner; + OrphanablePtr resolver = + factory == nullptr ? nullptr : factory->CreateResolver(resolver_args); + grpc_uri_destroy(uri); + gpr_free(canonical_target); + return resolver; +} + +UniquePtr ResolverRegistry::GetDefaultAuthority(const char* target) { + GPR_ASSERT(g_state != nullptr); + grpc_uri* uri = nullptr; + char* canonical_target = nullptr; + ResolverFactory* factory = + g_state->FindResolverFactory(target, &uri, &canonical_target); + UniquePtr authority = + factory == nullptr ? nullptr : factory->GetDefaultAuthority(uri); + grpc_uri_destroy(uri); + gpr_free(canonical_target); + return authority; +} + +UniquePtr ResolverRegistry::AddDefaultPrefixIfNeeded(const char* target) { + GPR_ASSERT(g_state != nullptr); + grpc_uri* uri = nullptr; + char* canonical_target = nullptr; + g_state->FindResolverFactory(target, &uri, &canonical_target); + grpc_uri_destroy(uri); + return UniquePtr(canonical_target == nullptr ? gpr_strdup(target) + : canonical_target); +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.h b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.h index 692490543..d6ec6811b 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/resolver_registry.h @@ -19,51 +19,65 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H +#include + #include "src/core/ext/filters/client_channel/resolver_factory.h" +#include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/iomgr/pollset_set.h" -void grpc_resolver_registry_init(); -void grpc_resolver_registry_shutdown(void); +namespace grpc_core { + +class ResolverRegistry { + public: + /// Methods used to create and populate the ResolverRegistry. + /// NOT THREAD SAFE -- to be used only during global gRPC + /// initialization and shutdown. + class Builder { + public: + /// Global initialization and shutdown hooks. + static void InitRegistry(); + static void ShutdownRegistry(); + + /// Sets the default URI prefix to \a default_prefix. + /// Calls InitRegistry() if it has not already been called. + static void SetDefaultPrefix(const char* default_prefix); -/** Set the default URI prefix to \a default_prefix. */ -void grpc_resolver_registry_set_default_prefix(const char *default_prefix); + /// Registers a resolver factory. The factory will be used to create a + /// resolver for any URI whose scheme matches that of the factory. + /// Calls InitRegistry() if it has not already been called. + static void RegisterResolverFactory(UniquePtr factory); + }; -/** Register a resolver type. - URI's of \a scheme will be resolved with the given resolver. - If \a priority is greater than zero, then the resolver will be eligible - to resolve names that are passed in with no scheme. Higher priority - resolvers will be tried before lower priority schemes. */ -void grpc_register_resolver_type(grpc_resolver_factory *factory); + /// Creates a resolver given \a target. + /// First tries to parse \a target as a URI. If this succeeds, tries + /// to locate a registered resolver factory based on the URI scheme. + /// If parsing fails or there is no factory for the URI's scheme, + /// prepends default_prefix to target and tries again. + /// If a resolver factory is found, uses it to instantiate a resolver and + /// returns it; otherwise, returns nullptr. + /// \a args, \a pollset_set, and \a combiner are passed to the factory's + /// \a CreateResolver() method. + /// \a args are the channel args to be included in resolver results. + /// \a pollset_set is used to drive I/O in the name resolution process. + /// \a combiner is the combiner under which all resolver calls will be run. + static OrphanablePtr CreateResolver(const char* target, + const grpc_channel_args* args, + grpc_pollset_set* pollset_set, + grpc_combiner* combiner); -/** Create a resolver given \a target. - First tries to parse \a target as a URI. If this succeeds, tries - to locate a registered resolver factory based on the URI scheme. - If parsing or location fails, prefixes default_prefix from - grpc_resolver_registry_init to target, and tries again (if default_prefix - was not NULL). - If a resolver factory was found, use it to instantiate a resolver and - return it. - If a resolver factory was not found, return NULL. - \a args is a set of channel arguments to be included in the result - (typically the set of arguments passed in from the client API). - \a pollset_set is used to drive IO in the name resolution process, it - should not be NULL. */ -grpc_resolver *grpc_resolver_create(grpc_exec_ctx *exec_ctx, const char *target, - const grpc_channel_args *args, - grpc_pollset_set *pollset_set, - grpc_combiner *combiner); + /// Returns the default authority to pass from a client for \a target. + static UniquePtr GetDefaultAuthority(const char* target); -/** Find a resolver factory given a name and return an (owned-by-the-caller) - * reference to it */ -grpc_resolver_factory *grpc_resolver_factory_lookup(const char *name); + /// Returns \a target with the default prefix prepended, if needed. + static UniquePtr AddDefaultPrefixIfNeeded(const char* target); -/** Given a target, return a (freshly allocated with gpr_malloc) string - representing the default authority to pass from a client. */ -char *grpc_get_default_authority(grpc_exec_ctx *exec_ctx, const char *target); + /// Returns the resolver factory for \a scheme. + /// Caller does NOT own the return value. + static ResolverFactory* LookupResolverFactory(const char* scheme); +}; -/** Returns a newly allocated string containing \a target, adding the - default prefix if needed. */ -char *grpc_resolver_factory_add_default_prefix_if_needed( - grpc_exec_ctx *exec_ctx, const char *target); +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.c b/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.c deleted file mode 100644 index 09dcade08..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/retry_throttle.h" - -#include -#include - -#include -#include -#include -#include -#include - -// -// server_retry_throttle_data -// - -struct grpc_server_retry_throttle_data { - gpr_refcount refs; - int max_milli_tokens; - int milli_token_ratio; - gpr_atm milli_tokens; - // A pointer to the replacement for this grpc_server_retry_throttle_data - // entry. If non-NULL, then this entry is stale and must not be used. - // We hold a reference to the replacement. - gpr_atm replacement; -}; - -static void get_replacement_throttle_data_if_needed( - grpc_server_retry_throttle_data** throttle_data) { - while (true) { - grpc_server_retry_throttle_data* new_throttle_data = - (grpc_server_retry_throttle_data*)gpr_atm_acq_load( - &(*throttle_data)->replacement); - if (new_throttle_data == NULL) return; - *throttle_data = new_throttle_data; - } -} - -bool grpc_server_retry_throttle_data_record_failure( - grpc_server_retry_throttle_data* throttle_data) { - // First, check if we are stale and need to be replaced. - get_replacement_throttle_data_if_needed(&throttle_data); - // We decrement milli_tokens by 1000 (1 token) for each failure. - const int new_value = (int)gpr_atm_no_barrier_clamped_add( - &throttle_data->milli_tokens, (gpr_atm)-1000, (gpr_atm)0, - (gpr_atm)throttle_data->max_milli_tokens); - // Retries are allowed as long as the new value is above the threshold - // (max_milli_tokens / 2). - return new_value > throttle_data->max_milli_tokens / 2; -} - -void grpc_server_retry_throttle_data_record_success( - grpc_server_retry_throttle_data* throttle_data) { - // First, check if we are stale and need to be replaced. - get_replacement_throttle_data_if_needed(&throttle_data); - // We increment milli_tokens by milli_token_ratio for each success. - gpr_atm_no_barrier_clamped_add( - &throttle_data->milli_tokens, (gpr_atm)throttle_data->milli_token_ratio, - (gpr_atm)0, (gpr_atm)throttle_data->max_milli_tokens); -} - -grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref( - grpc_server_retry_throttle_data* throttle_data) { - gpr_ref(&throttle_data->refs); - return throttle_data; -} - -void grpc_server_retry_throttle_data_unref( - grpc_server_retry_throttle_data* throttle_data) { - if (gpr_unref(&throttle_data->refs)) { - grpc_server_retry_throttle_data* replacement = - (grpc_server_retry_throttle_data*)gpr_atm_acq_load( - &throttle_data->replacement); - if (replacement != NULL) { - grpc_server_retry_throttle_data_unref(replacement); - } - gpr_free(throttle_data); - } -} - -static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create( - int max_milli_tokens, int milli_token_ratio, - grpc_server_retry_throttle_data* old_throttle_data) { - grpc_server_retry_throttle_data* throttle_data = - (grpc_server_retry_throttle_data*)gpr_malloc(sizeof(*throttle_data)); - memset(throttle_data, 0, sizeof(*throttle_data)); - gpr_ref_init(&throttle_data->refs, 1); - throttle_data->max_milli_tokens = max_milli_tokens; - throttle_data->milli_token_ratio = milli_token_ratio; - int initial_milli_tokens = max_milli_tokens; - // If there was a pre-existing entry for this server name, initialize - // the token count by scaling proportionately to the old data. This - // ensures that if we're already throttling retries on the old scale, - // we will start out doing the same thing on the new one. - if (old_throttle_data != NULL) { - double token_fraction = - (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) / - (double)old_throttle_data->max_milli_tokens; - initial_milli_tokens = (int)(token_fraction * max_milli_tokens); - } - gpr_atm_rel_store(&throttle_data->milli_tokens, - (gpr_atm)initial_milli_tokens); - // If there was a pre-existing entry, mark it as stale and give it a - // pointer to the new entry, which is its replacement. - if (old_throttle_data != NULL) { - grpc_server_retry_throttle_data_ref(throttle_data); - gpr_atm_rel_store(&old_throttle_data->replacement, (gpr_atm)throttle_data); - } - return throttle_data; -} - -// -// avl vtable for string -> server_retry_throttle_data map -// - -static void* copy_server_name(void* key, void* unused) { - return gpr_strdup((const char*)key); -} - -static long compare_server_name(void* key1, void* key2, void* unused) { - return strcmp((const char*)key1, (const char*)key2); -} - -static void destroy_server_retry_throttle_data(void* value, void* unused) { - grpc_server_retry_throttle_data* throttle_data = - (grpc_server_retry_throttle_data*)value; - grpc_server_retry_throttle_data_unref(throttle_data); -} - -static void* copy_server_retry_throttle_data(void* value, void* unused) { - grpc_server_retry_throttle_data* throttle_data = - (grpc_server_retry_throttle_data*)value; - return grpc_server_retry_throttle_data_ref(throttle_data); -} - -static void destroy_server_name(void* key, void* unused) { gpr_free(key); } - -static const gpr_avl_vtable avl_vtable = { - destroy_server_name, copy_server_name, compare_server_name, - destroy_server_retry_throttle_data, copy_server_retry_throttle_data}; - -// -// server_retry_throttle_map -// - -static gpr_mu g_mu; -static gpr_avl g_avl; - -void grpc_retry_throttle_map_init() { - gpr_mu_init(&g_mu); - g_avl = gpr_avl_create(&avl_vtable); -} - -void grpc_retry_throttle_map_shutdown() { - gpr_mu_destroy(&g_mu); - gpr_avl_unref(g_avl, NULL); -} - -grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server( - const char* server_name, int max_milli_tokens, int milli_token_ratio) { - gpr_mu_lock(&g_mu); - grpc_server_retry_throttle_data* throttle_data = - (grpc_server_retry_throttle_data*)gpr_avl_get(g_avl, (char*)server_name, - NULL); - if (throttle_data == NULL) { - // Entry not found. Create a new one. - throttle_data = grpc_server_retry_throttle_data_create( - max_milli_tokens, milli_token_ratio, NULL); - g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data, NULL); - } else { - if (throttle_data->max_milli_tokens != max_milli_tokens || - throttle_data->milli_token_ratio != milli_token_ratio) { - // Entry found but with old parameters. Create a new one based on - // the original one. - throttle_data = grpc_server_retry_throttle_data_create( - max_milli_tokens, milli_token_ratio, throttle_data); - g_avl = gpr_avl_add(g_avl, (char*)server_name, throttle_data, NULL); - } else { - // Entry found. Increase refcount. - grpc_server_retry_throttle_data_ref(throttle_data); - } - } - gpr_mu_unlock(&g_mu); - return throttle_data; -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.cc new file mode 100644 index 000000000..bdeb7e4ca --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.cc @@ -0,0 +1,191 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/retry_throttle.h" + +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/avl/avl.h" + +namespace grpc_core { +namespace internal { + +// +// ServerRetryThrottleData +// + +ServerRetryThrottleData::ServerRetryThrottleData( + intptr_t max_milli_tokens, intptr_t milli_token_ratio, + ServerRetryThrottleData* old_throttle_data) + : max_milli_tokens_(max_milli_tokens), + milli_token_ratio_(milli_token_ratio) { + intptr_t initial_milli_tokens = max_milli_tokens; + // If there was a pre-existing entry for this server name, initialize + // the token count by scaling proportionately to the old data. This + // ensures that if we're already throttling retries on the old scale, + // we will start out doing the same thing on the new one. + if (old_throttle_data != nullptr) { + double token_fraction = + static_cast( + gpr_atm_acq_load(&old_throttle_data->milli_tokens_)) / + static_cast(old_throttle_data->max_milli_tokens_); + initial_milli_tokens = + static_cast(token_fraction * max_milli_tokens); + } + gpr_atm_rel_store(&milli_tokens_, static_cast(initial_milli_tokens)); + // If there was a pre-existing entry, mark it as stale and give it a + // pointer to the new entry, which is its replacement. + if (old_throttle_data != nullptr) { + Ref().release(); // Ref held by pre-existing entry. + gpr_atm_rel_store(&old_throttle_data->replacement_, + reinterpret_cast(this)); + } +} + +ServerRetryThrottleData::~ServerRetryThrottleData() { + ServerRetryThrottleData* replacement = + reinterpret_cast( + gpr_atm_acq_load(&replacement_)); + if (replacement != nullptr) { + replacement->Unref(); + } +} + +void ServerRetryThrottleData::GetReplacementThrottleDataIfNeeded( + ServerRetryThrottleData** throttle_data) { + while (true) { + ServerRetryThrottleData* new_throttle_data = + reinterpret_cast( + gpr_atm_acq_load(&(*throttle_data)->replacement_)); + if (new_throttle_data == nullptr) return; + *throttle_data = new_throttle_data; + } +} + +bool ServerRetryThrottleData::RecordFailure() { + // First, check if we are stale and need to be replaced. + ServerRetryThrottleData* throttle_data = this; + GetReplacementThrottleDataIfNeeded(&throttle_data); + // We decrement milli_tokens by 1000 (1 token) for each failure. + const intptr_t new_value = + static_cast(gpr_atm_no_barrier_clamped_add( + &throttle_data->milli_tokens_, static_cast(-1000), + static_cast(0), + static_cast(throttle_data->max_milli_tokens_))); + // Retries are allowed as long as the new value is above the threshold + // (max_milli_tokens / 2). + return new_value > throttle_data->max_milli_tokens_ / 2; +} + +void ServerRetryThrottleData::RecordSuccess() { + // First, check if we are stale and need to be replaced. + ServerRetryThrottleData* throttle_data = this; + GetReplacementThrottleDataIfNeeded(&throttle_data); + // We increment milli_tokens by milli_token_ratio for each success. + gpr_atm_no_barrier_clamped_add( + &throttle_data->milli_tokens_, + static_cast(throttle_data->milli_token_ratio_), + static_cast(0), + static_cast(throttle_data->max_milli_tokens_)); +} + +// +// avl vtable for string -> server_retry_throttle_data map +// + +namespace { + +void* copy_server_name(void* key, void* unused) { + return gpr_strdup(static_cast(key)); +} + +long compare_server_name(void* key1, void* key2, void* unused) { + return strcmp(static_cast(key1), static_cast(key2)); +} + +void destroy_server_retry_throttle_data(void* value, void* unused) { + ServerRetryThrottleData* throttle_data = + static_cast(value); + throttle_data->Unref(); +} + +void* copy_server_retry_throttle_data(void* value, void* unused) { + ServerRetryThrottleData* throttle_data = + static_cast(value); + return throttle_data->Ref().release(); +} + +void destroy_server_name(void* key, void* unused) { gpr_free(key); } + +const grpc_avl_vtable avl_vtable = { + destroy_server_name, copy_server_name, compare_server_name, + destroy_server_retry_throttle_data, copy_server_retry_throttle_data}; + +} // namespace + +// +// ServerRetryThrottleMap +// + +static gpr_mu g_mu; +static grpc_avl g_avl; + +void ServerRetryThrottleMap::Init() { + gpr_mu_init(&g_mu); + g_avl = grpc_avl_create(&avl_vtable); +} + +void ServerRetryThrottleMap::Shutdown() { + gpr_mu_destroy(&g_mu); + grpc_avl_unref(g_avl, nullptr); +} + +RefCountedPtr ServerRetryThrottleMap::GetDataForServer( + const char* server_name, intptr_t max_milli_tokens, + intptr_t milli_token_ratio) { + RefCountedPtr result; + gpr_mu_lock(&g_mu); + ServerRetryThrottleData* throttle_data = + static_cast( + grpc_avl_get(g_avl, const_cast(server_name), nullptr)); + if (throttle_data == nullptr || + throttle_data->max_milli_tokens() != max_milli_tokens || + throttle_data->milli_token_ratio() != milli_token_ratio) { + // Entry not found, or found with old parameters. Create a new one. + result = MakeRefCounted( + max_milli_tokens, milli_token_ratio, throttle_data); + g_avl = grpc_avl_add(g_avl, gpr_strdup(server_name), + result->Ref().release(), nullptr); + } else { + // Entry found. Return a new ref to it. + result = throttle_data->Ref(); + } + gpr_mu_unlock(&g_mu); + return result; +} + +} // namespace internal +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.h b/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.h index bf99297e9..fddafcd90 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/retry_throttle.h @@ -19,32 +19,63 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H -#include +#include + +#include "src/core/lib/gprpp/ref_counted.h" + +namespace grpc_core { +namespace internal { /// Tracks retry throttling data for an individual server name. -typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data; - -/// Records a failure. Returns true if it's okay to send a retry. -bool grpc_server_retry_throttle_data_record_failure( - grpc_server_retry_throttle_data* throttle_data); -/// Records a success. -void grpc_server_retry_throttle_data_record_success( - grpc_server_retry_throttle_data* throttle_data); - -grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_ref( - grpc_server_retry_throttle_data* throttle_data); -void grpc_server_retry_throttle_data_unref( - grpc_server_retry_throttle_data* throttle_data); - -/// Initializes global map of failure data for each server name. -void grpc_retry_throttle_map_init(); -/// Shuts down global map of failure data for each server name. -void grpc_retry_throttle_map_shutdown(); - -/// Returns a reference to the failure data for \a server_name, creating -/// a new entry if needed. -/// Caller must eventually unref via \a grpc_server_retry_throttle_data_unref(). -grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server( - const char* server_name, int max_milli_tokens, int milli_token_ratio); +class ServerRetryThrottleData : public RefCounted { + public: + ServerRetryThrottleData(intptr_t max_milli_tokens, intptr_t milli_token_ratio, + ServerRetryThrottleData* old_throttle_data); + + /// Records a failure. Returns true if it's okay to send a retry. + bool RecordFailure(); + + /// Records a success. + void RecordSuccess(); + + intptr_t max_milli_tokens() const { return max_milli_tokens_; } + intptr_t milli_token_ratio() const { return milli_token_ratio_; } + + private: + // So Delete() can call our private dtor. + template + friend void grpc_core::Delete(T*); + + ~ServerRetryThrottleData(); + + void GetReplacementThrottleDataIfNeeded( + ServerRetryThrottleData** throttle_data); + + const intptr_t max_milli_tokens_; + const intptr_t milli_token_ratio_; + gpr_atm milli_tokens_; + // A pointer to the replacement for this ServerRetryThrottleData entry. + // If non-nullptr, then this entry is stale and must not be used. + // We hold a reference to the replacement. + gpr_atm replacement_ = 0; +}; + +/// Global map of server name to retry throttle data. +class ServerRetryThrottleMap { + public: + /// Initializes global map of failure data for each server name. + static void Init(); + /// Shuts down global map of failure data for each server name. + static void Shutdown(); + + /// Returns the failure data for \a server_name, creating a new entry if + /// needed. + static RefCountedPtr GetDataForServer( + const char* server_name, intptr_t max_milli_tokens, + intptr_t milli_token_ratio); +}; + +} // namespace internal +} // namespace grpc_core #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.c b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.c deleted file mode 100644 index 40a51c72d..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.c +++ /dev/null @@ -1,816 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/filters/client_channel/subchannel.h" - -#include -#include - -#include -#include -#include - -#include "src/core/ext/filters/client_channel/client_channel.h" -#include "src/core/ext/filters/client_channel/parse_address.h" -#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" -#include "src/core/ext/filters/client_channel/subchannel_index.h" -#include "src/core/ext/filters/client_channel/uri_parser.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/connected_channel.h" -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/backoff.h" -#include "src/core/lib/surface/channel.h" -#include "src/core/lib/surface/channel_init.h" -#include "src/core/lib/transport/connectivity_state.h" - -#define INTERNAL_REF_BITS 16 -#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1)) - -#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1 -#define GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER 1.6 -#define GRPC_SUBCHANNEL_RECONNECT_MIN_BACKOFF_SECONDS 20 -#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120 -#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2 - -#define GET_CONNECTED_SUBCHANNEL(subchannel, barrier) \ - ((grpc_connected_subchannel *)(gpr_atm_##barrier##_load( \ - &(subchannel)->connected_subchannel))) - -typedef struct { - grpc_closure closure; - grpc_subchannel *subchannel; - grpc_connectivity_state connectivity_state; -} state_watcher; - -typedef struct external_state_watcher { - grpc_subchannel *subchannel; - grpc_pollset_set *pollset_set; - grpc_closure *notify; - grpc_closure closure; - struct external_state_watcher *next; - struct external_state_watcher *prev; -} external_state_watcher; - -struct grpc_subchannel { - grpc_connector *connector; - - /** refcount - - lower INTERNAL_REF_BITS bits are for internal references: - these do not keep the subchannel open. - - upper remaining bits are for public references: these do - keep the subchannel open */ - gpr_atm ref_pair; - - /** non-transport related channel filters */ - const grpc_channel_filter **filters; - size_t num_filters; - /** channel arguments */ - grpc_channel_args *args; - - grpc_subchannel_key *key; - - /** set during connection */ - grpc_connect_out_args connecting_result; - - /** callback for connection finishing */ - grpc_closure connected; - - /** callback for our alarm */ - grpc_closure on_alarm; - - /** pollset_set tracking who's interested in a connection - being setup */ - grpc_pollset_set *pollset_set; - - /** active connection, or null; of type grpc_connected_subchannel */ - gpr_atm connected_subchannel; - - /** mutex protecting remaining elements */ - gpr_mu mu; - - /** have we seen a disconnection? */ - bool disconnected; - /** are we connecting */ - bool connecting; - /** connectivity state tracking */ - grpc_connectivity_state_tracker state_tracker; - - external_state_watcher root_external_state_watcher; - - /** next connect attempt time */ - gpr_timespec next_attempt; - /** backoff state */ - gpr_backoff backoff_state; - /** do we have an active alarm? */ - bool have_alarm; - /** have we started the backoff loop */ - bool backoff_begun; - /** our alarm */ - grpc_timer alarm; -}; - -struct grpc_subchannel_call { - grpc_connected_subchannel *connection; - grpc_closure *schedule_closure_after_destroy; -}; - -#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack *)((call) + 1)) -#define CHANNEL_STACK_FROM_CONNECTION(con) ((grpc_channel_stack *)(con)) -#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \ - (((grpc_subchannel_call *)(callstack)) - 1) - -static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel, - grpc_error *error); - -#ifndef NDEBUG -#define REF_REASON reason -#define REF_MUTATE_EXTRA_ARGS \ - GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char *purpose -#define REF_MUTATE_PURPOSE(x) , file, line, reason, x -#else -#define REF_REASON "" -#define REF_MUTATE_EXTRA_ARGS -#define REF_MUTATE_PURPOSE(x) -#endif - -/* - * connection implementation - */ - -static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg; - grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c)); - gpr_free(c); -} - -grpc_connected_subchannel *grpc_connected_subchannel_ref( - grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON); - return c; -} - -void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx, - grpc_connected_subchannel *c - GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c), - REF_REASON); -} - -/* - * grpc_subchannel implementation - */ - -static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_subchannel *c = (grpc_subchannel *)arg; - gpr_free((void *)c->filters); - grpc_channel_args_destroy(exec_ctx, c->args); - grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker); - grpc_connector_unref(exec_ctx, c->connector); - grpc_pollset_set_destroy(exec_ctx, c->pollset_set); - grpc_subchannel_key_destroy(exec_ctx, c->key); - gpr_mu_destroy(&c->mu); - gpr_free(c); -} - -static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta, - int barrier REF_MUTATE_EXTRA_ARGS) { - gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta) - : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c, - purpose, old_val, old_val + delta, reason); - } -#endif - return old_val; -} - -grpc_subchannel *grpc_subchannel_ref( - grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - gpr_atm old_refs; - old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS), - 0 REF_MUTATE_PURPOSE("STRONG_REF")); - GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0); - return c; -} - -grpc_subchannel *grpc_subchannel_weak_ref( - grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - gpr_atm old_refs; - old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF")); - GPR_ASSERT(old_refs != 0); - return c; -} - -grpc_subchannel *grpc_subchannel_ref_from_weak_ref( - grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - if (!c) return NULL; - for (;;) { - gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair); - if (old_refs >= (1 << INTERNAL_REF_BITS)) { - gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS); - if (gpr_atm_rel_cas(&c->ref_pair, old_refs, new_refs)) { - return c; - } - } else { - return NULL; - } - } -} - -static void disconnect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) { - grpc_connected_subchannel *con; - grpc_subchannel_index_unregister(exec_ctx, c->key, c); - gpr_mu_lock(&c->mu); - GPR_ASSERT(!c->disconnected); - c->disconnected = true; - grpc_connector_shutdown( - exec_ctx, c->connector, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Subchannel disconnected")); - con = GET_CONNECTED_SUBCHANNEL(c, no_barrier); - if (con != NULL) { - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection"); - gpr_atm_no_barrier_store(&c->connected_subchannel, (gpr_atm)0xdeadbeef); - } - gpr_mu_unlock(&c->mu); -} - -void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - gpr_atm old_refs; - // add a weak ref and subtract a strong ref (atomically) - old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS), - 1 REF_MUTATE_PURPOSE("STRONG_UNREF")); - if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) { - disconnect(exec_ctx, c); - } - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref"); -} - -void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel *c - GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - gpr_atm old_refs; - old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF")); - if (old_refs == 1) { - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); - } -} - -grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx, - grpc_connector *connector, - const grpc_subchannel_args *args) { - grpc_subchannel_key *key = grpc_subchannel_key_create(args); - grpc_subchannel *c = grpc_subchannel_index_find(exec_ctx, key); - if (c) { - grpc_subchannel_key_destroy(exec_ctx, key); - return c; - } - - GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx); - c = (grpc_subchannel *)gpr_zalloc(sizeof(*c)); - c->key = key; - gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS); - c->connector = connector; - grpc_connector_ref(c->connector); - c->num_filters = args->filter_count; - if (c->num_filters > 0) { - c->filters = (const grpc_channel_filter **)gpr_malloc( - sizeof(grpc_channel_filter *) * c->num_filters); - memcpy((void *)c->filters, args->filters, - sizeof(grpc_channel_filter *) * c->num_filters); - } else { - c->filters = NULL; - } - c->pollset_set = grpc_pollset_set_create(); - grpc_resolved_address *addr = - (grpc_resolved_address *)gpr_malloc(sizeof(*addr)); - grpc_get_subchannel_address_arg(exec_ctx, args->args, addr); - grpc_resolved_address *new_address = NULL; - grpc_channel_args *new_args = NULL; - if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address, - &new_args)) { - GPR_ASSERT(new_address != NULL); - gpr_free(addr); - addr = new_address; - } - static const char *keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS}; - grpc_arg new_arg = grpc_create_subchannel_address_arg(addr); - gpr_free(addr); - c->args = grpc_channel_args_copy_and_add_and_remove( - new_args != NULL ? new_args : args->args, keys_to_remove, - GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1); - gpr_free(new_arg.value.string); - if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args); - c->root_external_state_watcher.next = c->root_external_state_watcher.prev = - &c->root_external_state_watcher; - GRPC_CLOSURE_INIT(&c->connected, subchannel_connected, c, - grpc_schedule_on_exec_ctx); - grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, - "subchannel"); - int initial_backoff_ms = - GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000; - int min_backoff_ms = GRPC_SUBCHANNEL_RECONNECT_MIN_BACKOFF_SECONDS * 1000; - int max_backoff_ms = GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000; - bool fixed_reconnect_backoff = false; - if (c->args) { - for (size_t i = 0; i < c->args->num_args; i++) { - if (0 == strcmp(c->args->args[i].key, - "grpc.testing.fixed_reconnect_backoff_ms")) { - fixed_reconnect_backoff = true; - initial_backoff_ms = min_backoff_ms = max_backoff_ms = - grpc_channel_arg_get_integer( - &c->args->args[i], - (grpc_integer_options){initial_backoff_ms, 100, INT_MAX}); - } else if (0 == strcmp(c->args->args[i].key, - GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) { - fixed_reconnect_backoff = false; - min_backoff_ms = grpc_channel_arg_get_integer( - &c->args->args[i], - (grpc_integer_options){min_backoff_ms, 100, INT_MAX}); - } else if (0 == strcmp(c->args->args[i].key, - GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) { - fixed_reconnect_backoff = false; - max_backoff_ms = grpc_channel_arg_get_integer( - &c->args->args[i], - (grpc_integer_options){max_backoff_ms, 100, INT_MAX}); - } else if (0 == strcmp(c->args->args[i].key, - GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) { - fixed_reconnect_backoff = false; - initial_backoff_ms = grpc_channel_arg_get_integer( - &c->args->args[i], - (grpc_integer_options){initial_backoff_ms, 100, INT_MAX}); - } - } - } - gpr_backoff_init( - &c->backoff_state, initial_backoff_ms, - fixed_reconnect_backoff ? 1.0 - : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER, - fixed_reconnect_backoff ? 0.0 : GRPC_SUBCHANNEL_RECONNECT_JITTER, - min_backoff_ms, max_backoff_ms); - gpr_mu_init(&c->mu); - - return grpc_subchannel_index_register(exec_ctx, key, c); -} - -static void continue_connect_locked(grpc_exec_ctx *exec_ctx, - grpc_subchannel *c) { - grpc_connect_in_args args; - - args.interested_parties = c->pollset_set; - args.deadline = c->next_attempt; - args.channel_args = c->args; - - grpc_connectivity_state_set(exec_ctx, &c->state_tracker, - GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE, - "state_change"); - grpc_connector_connect(exec_ctx, c->connector, &args, &c->connecting_result, - &c->connected); -} - -grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c, - grpc_error **error) { - grpc_connectivity_state state; - gpr_mu_lock(&c->mu); - state = grpc_connectivity_state_get(&c->state_tracker, error); - gpr_mu_unlock(&c->mu); - return state; -} - -static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - external_state_watcher *w = (external_state_watcher *)arg; - grpc_closure *follow_up = w->notify; - if (w->pollset_set != NULL) { - grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set, - w->pollset_set); - } - gpr_mu_lock(&w->subchannel->mu); - w->next->prev = w->prev; - w->prev->next = w->next; - gpr_mu_unlock(&w->subchannel->mu); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher"); - gpr_free(w); - GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error)); -} - -static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_subchannel *c = (grpc_subchannel *)arg; - gpr_mu_lock(&c->mu); - c->have_alarm = false; - if (c->disconnected) { - error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected", - &error, 1); - } else { - GRPC_ERROR_REF(error); - } - if (error == GRPC_ERROR_NONE) { - gpr_log(GPR_INFO, "Failed to connect to channel, retrying"); - c->next_attempt = - gpr_backoff_step(&c->backoff_state, gpr_now(GPR_CLOCK_MONOTONIC)); - continue_connect_locked(exec_ctx, c); - gpr_mu_unlock(&c->mu); - } else { - gpr_mu_unlock(&c->mu); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting"); - } - GRPC_ERROR_UNREF(error); -} - -static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx, - grpc_subchannel *c) { - if (c->disconnected) { - /* Don't try to connect if we're already disconnected */ - return; - } - - if (c->connecting) { - /* Already connecting: don't restart */ - return; - } - - if (GET_CONNECTED_SUBCHANNEL(c, no_barrier) != NULL) { - /* Already connected: don't restart */ - return; - } - - if (!grpc_connectivity_state_has_watchers(&c->state_tracker)) { - /* Nobody is interested in connecting: so don't just yet */ - return; - } - - c->connecting = true; - GRPC_SUBCHANNEL_WEAK_REF(c, "connecting"); - - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - if (!c->backoff_begun) { - c->backoff_begun = true; - c->next_attempt = gpr_backoff_begin(&c->backoff_state, now); - continue_connect_locked(exec_ctx, c); - } else { - GPR_ASSERT(!c->have_alarm); - c->have_alarm = true; - gpr_timespec time_til_next = gpr_time_sub(c->next_attempt, now); - if (gpr_time_cmp(time_til_next, gpr_time_0(time_til_next.clock_type)) <= - 0) { - gpr_log(GPR_INFO, "Retry immediately"); - } else { - gpr_log(GPR_INFO, "Retry in %" PRId64 ".%09d seconds", - time_til_next.tv_sec, time_til_next.tv_nsec); - } - GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm, now); - } -} - -void grpc_subchannel_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_subchannel *c, - grpc_pollset_set *interested_parties, grpc_connectivity_state *state, - grpc_closure *notify) { - external_state_watcher *w; - - if (state == NULL) { - gpr_mu_lock(&c->mu); - for (w = c->root_external_state_watcher.next; - w != &c->root_external_state_watcher; w = w->next) { - if (w->notify == notify) { - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &c->state_tracker, NULL, &w->closure); - } - } - gpr_mu_unlock(&c->mu); - } else { - w = (external_state_watcher *)gpr_malloc(sizeof(*w)); - w->subchannel = c; - w->pollset_set = interested_parties; - w->notify = notify; - GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w, - grpc_schedule_on_exec_ctx); - if (interested_parties != NULL) { - grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set, - interested_parties); - } - GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher"); - gpr_mu_lock(&c->mu); - w->next = &c->root_external_state_watcher; - w->prev = w->next->prev; - w->next->prev = w->prev->next = w; - grpc_connectivity_state_notify_on_state_change(exec_ctx, &c->state_tracker, - state, &w->closure); - maybe_start_connecting_locked(exec_ctx, c); - gpr_mu_unlock(&c->mu); - } -} - -void grpc_connected_subchannel_process_transport_op( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con, - grpc_transport_op *op) { - grpc_channel_stack *channel_stack = CHANNEL_STACK_FROM_CONNECTION(con); - grpc_channel_element *top_elem = grpc_channel_stack_element(channel_stack, 0); - top_elem->filter->start_transport_op(exec_ctx, top_elem, op); -} - -static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p, - grpc_error *error) { - state_watcher *sw = (state_watcher *)p; - grpc_subchannel *c = sw->subchannel; - gpr_mu *mu = &c->mu; - - gpr_mu_lock(mu); - - /* if we failed just leave this closure */ - if (sw->connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - /* any errors on a subchannel ==> we're done, create a new one */ - sw->connectivity_state = GRPC_CHANNEL_SHUTDOWN; - } - grpc_connectivity_state_set(exec_ctx, &c->state_tracker, - sw->connectivity_state, GRPC_ERROR_REF(error), - "reflect_child"); - if (sw->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, GET_CONNECTED_SUBCHANNEL(c, no_barrier), NULL, - &sw->connectivity_state, &sw->closure); - GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); - sw = NULL; - } - - gpr_mu_unlock(mu); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "state_watcher"); - gpr_free(sw); -} - -static void connected_subchannel_state_op(grpc_exec_ctx *exec_ctx, - grpc_connected_subchannel *con, - grpc_pollset_set *interested_parties, - grpc_connectivity_state *state, - grpc_closure *closure) { - grpc_transport_op *op = grpc_make_transport_op(NULL); - grpc_channel_element *elem; - op->connectivity_state = state; - op->on_connectivity_state_change = closure; - op->bind_pollset_set = interested_parties; - elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0); - elem->filter->start_transport_op(exec_ctx, elem, op); -} - -void grpc_connected_subchannel_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con, - grpc_pollset_set *interested_parties, grpc_connectivity_state *state, - grpc_closure *closure) { - connected_subchannel_state_op(exec_ctx, con, interested_parties, state, - closure); -} - -void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx, - grpc_connected_subchannel *con, - grpc_closure *closure) { - grpc_transport_op *op = grpc_make_transport_op(NULL); - grpc_channel_element *elem; - op->send_ping = closure; - elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0); - elem->filter->start_transport_op(exec_ctx, elem, op); -} - -static bool publish_transport_locked(grpc_exec_ctx *exec_ctx, - grpc_subchannel *c) { - grpc_connected_subchannel *con; - grpc_channel_stack *stk; - state_watcher *sw_subchannel; - - /* construct channel stack */ - grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); - grpc_channel_stack_builder_set_channel_arguments( - exec_ctx, builder, c->connecting_result.channel_args); - grpc_channel_stack_builder_set_transport(builder, - c->connecting_result.transport); - - if (!grpc_channel_init_create_stack(exec_ctx, builder, - GRPC_CLIENT_SUBCHANNEL)) { - grpc_channel_stack_builder_destroy(exec_ctx, builder); - return false; - } - grpc_error *error = grpc_channel_stack_builder_finish( - exec_ctx, builder, 0, 1, connection_destroy, NULL, (void **)&con); - if (error != GRPC_ERROR_NONE) { - grpc_transport_destroy(exec_ctx, c->connecting_result.transport); - gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", - grpc_error_string(error)); - GRPC_ERROR_UNREF(error); - return false; - } - stk = CHANNEL_STACK_FROM_CONNECTION(con); - memset(&c->connecting_result, 0, sizeof(c->connecting_result)); - - /* initialize state watcher */ - sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel)); - sw_subchannel->subchannel = c; - sw_subchannel->connectivity_state = GRPC_CHANNEL_READY; - GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed, - sw_subchannel, grpc_schedule_on_exec_ctx); - - if (c->disconnected) { - gpr_free(sw_subchannel); - grpc_channel_stack_destroy(exec_ctx, stk); - gpr_free(con); - return false; - } - - /* publish */ - /* TODO(ctiller): this full barrier seems to clear up a TSAN failure. - I'd have expected the rel_cas below to be enough, but - seemingly it's not. - Re-evaluate if we really need this. */ - gpr_atm_full_barrier(); - GPR_ASSERT(gpr_atm_rel_cas(&c->connected_subchannel, 0, (gpr_atm)con)); - - /* setup subchannel watching connected subchannel for changes; subchannel - ref for connecting is donated to the state watcher */ - GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting"); - grpc_connected_subchannel_notify_on_state_change( - exec_ctx, con, c->pollset_set, &sw_subchannel->connectivity_state, - &sw_subchannel->closure); - - /* signal completion */ - grpc_connectivity_state_set(exec_ctx, &c->state_tracker, GRPC_CHANNEL_READY, - GRPC_ERROR_NONE, "connected"); - return true; -} - -static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_subchannel *c = (grpc_subchannel *)arg; - grpc_channel_args *delete_channel_args = c->connecting_result.channel_args; - - GRPC_SUBCHANNEL_WEAK_REF(c, "connected"); - gpr_mu_lock(&c->mu); - c->connecting = false; - if (c->connecting_result.transport != NULL && - publish_transport_locked(exec_ctx, c)) { - /* do nothing, transport was published */ - } else if (c->disconnected) { - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting"); - } else { - grpc_connectivity_state_set( - exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, - grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Connect Failed", &error, 1), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE), - "connect_failed"); - - const char *errmsg = grpc_error_string(error); - gpr_log(GPR_INFO, "Connect failed: %s", errmsg); - - maybe_start_connecting_locked(exec_ctx, c); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting"); - } - gpr_mu_unlock(&c->mu); - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connected"); - grpc_channel_args_destroy(exec_ctx, delete_channel_args); -} - -/* - * grpc_subchannel_call implementation - */ - -static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call, - grpc_error *error) { - grpc_subchannel_call *c = (grpc_subchannel_call *)call; - GPR_ASSERT(c->schedule_closure_after_destroy != NULL); - GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0); - grpc_connected_subchannel *connection = c->connection; - grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), NULL, - c->schedule_closure_after_destroy); - GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call"); - GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0); -} - -void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call *call, - grpc_closure *closure) { - GPR_ASSERT(call->schedule_closure_after_destroy == NULL); - GPR_ASSERT(closure != NULL); - call->schedule_closure_after_destroy = closure; -} - -void grpc_subchannel_call_ref( - grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); -} - -void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *c - GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { - GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); -} - -void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *call, - grpc_transport_stream_op_batch *batch) { - GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0); - grpc_call_stack *call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); - grpc_call_element *top_elem = grpc_call_stack_element(call_stack, 0); - GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch); - top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch); - GPR_TIMER_END("grpc_subchannel_call_process_op", 0); -} - -grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel( - grpc_subchannel *c) { - return GET_CONNECTED_SUBCHANNEL(c, acq); -} - -const grpc_subchannel_key *grpc_subchannel_get_key( - const grpc_subchannel *subchannel) { - return subchannel->key; -} - -grpc_error *grpc_connected_subchannel_create_call( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *con, - const grpc_connected_subchannel_call_args *args, - grpc_subchannel_call **call) { - grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con); - *call = (grpc_subchannel_call *)gpr_arena_alloc( - args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size); - grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); - (*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call"); - const grpc_call_element_args call_args = { - .call_stack = callstk, - .server_transport_data = NULL, - .context = args->context, - .path = args->path, - .start_time = args->start_time, - .deadline = args->deadline, - .arena = args->arena, - .call_combiner = args->call_combiner}; - grpc_error *error = grpc_call_stack_init( - exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args); - if (error != GRPC_ERROR_NONE) { - const char *error_string = grpc_error_string(error); - gpr_log(GPR_ERROR, "error: %s", error_string); - return error; - } - grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, args->pollent); - return GRPC_ERROR_NONE; -} - -grpc_call_stack *grpc_subchannel_call_get_call_stack( - grpc_subchannel_call *subchannel_call) { - return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call); -} - -static void grpc_uri_to_sockaddr(grpc_exec_ctx *exec_ctx, const char *uri_str, - grpc_resolved_address *addr) { - grpc_uri *uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */); - GPR_ASSERT(uri != NULL); - if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr)); - grpc_uri_destroy(uri); -} - -void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx, - const grpc_channel_args *args, - grpc_resolved_address *addr) { - const char *addr_uri_str = grpc_get_subchannel_address_uri_arg(args); - memset(addr, 0, sizeof(*addr)); - if (*addr_uri_str != '\0') { - grpc_uri_to_sockaddr(exec_ctx, addr_uri_str, addr); - } -} - -const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) { - const grpc_arg *addr_arg = - grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS); - GPR_ASSERT(addr_arg != NULL); // Should have been set by LB policy. - GPR_ASSERT(addr_arg->type == GRPC_ARG_STRING); - return addr_arg->value.string; -} - -grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) { - return grpc_channel_arg_string_create( - (char *)GRPC_ARG_SUBCHANNEL_ADDRESS, - addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("")); -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.cc new file mode 100644 index 000000000..d7815fb7e --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.cc @@ -0,0 +1,815 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/filters/client_channel/subchannel.h" + +#include +#include + +#include +#include + +#include +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/parse_address.h" +#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" +#include "src/core/ext/filters/client_channel/subchannel_index.h" +#include "src/core/ext/filters/client_channel/uri_parser.h" +#include "src/core/lib/backoff/backoff.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/connected_channel.h" +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/gprpp/debug_location.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/surface/channel_init.h" +#include "src/core/lib/transport/connectivity_state.h" + +#define INTERNAL_REF_BITS 16 +#define STRONG_REF_MASK (~(gpr_atm)((1 << INTERNAL_REF_BITS) - 1)) + +#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1 +#define GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER 1.6 +#define GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS 20 +#define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120 +#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2 + +namespace { +struct state_watcher { + grpc_closure closure; + grpc_subchannel* subchannel; + grpc_connectivity_state connectivity_state; +}; +} // namespace + +typedef struct external_state_watcher { + grpc_subchannel* subchannel; + grpc_pollset_set* pollset_set; + grpc_closure* notify; + grpc_closure closure; + struct external_state_watcher* next; + struct external_state_watcher* prev; +} external_state_watcher; + +struct grpc_subchannel { + grpc_connector* connector; + + /** refcount + - lower INTERNAL_REF_BITS bits are for internal references: + these do not keep the subchannel open. + - upper remaining bits are for public references: these do + keep the subchannel open */ + gpr_atm ref_pair; + + /** non-transport related channel filters */ + const grpc_channel_filter** filters; + size_t num_filters; + /** channel arguments */ + grpc_channel_args* args; + + grpc_subchannel_key* key; + + /** set during connection */ + grpc_connect_out_args connecting_result; + + /** callback for connection finishing */ + grpc_closure on_connected; + + /** callback for our alarm */ + grpc_closure on_alarm; + + /** pollset_set tracking who's interested in a connection + being setup */ + grpc_pollset_set* pollset_set; + + /** mutex protecting remaining elements */ + gpr_mu mu; + + /** active connection, or null; of type grpc_core::ConnectedSubchannel + */ + grpc_core::RefCountedPtr connected_subchannel; + + /** have we seen a disconnection? */ + bool disconnected; + /** are we connecting */ + bool connecting; + /** connectivity state tracking */ + grpc_connectivity_state_tracker state_tracker; + + external_state_watcher root_external_state_watcher; + + /** backoff state */ + grpc_core::ManualConstructor backoff; + grpc_millis next_attempt_deadline; + grpc_millis min_connect_timeout_ms; + + /** do we have an active alarm? */ + bool have_alarm; + /** have we started the backoff loop */ + bool backoff_begun; + /** our alarm */ + grpc_timer alarm; +}; + +struct grpc_subchannel_call { + grpc_core::ConnectedSubchannel* connection; + grpc_closure* schedule_closure_after_destroy; +}; + +#define SUBCHANNEL_CALL_TO_CALL_STACK(call) ((grpc_call_stack*)((call) + 1)) +#define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \ + (((grpc_subchannel_call*)(callstack)) - 1) + +static void on_subchannel_connected(void* subchannel, grpc_error* error); + +#ifndef NDEBUG +#define REF_REASON reason +#define REF_MUTATE_EXTRA_ARGS \ + GRPC_SUBCHANNEL_REF_EXTRA_ARGS, const char* purpose +#define REF_MUTATE_PURPOSE(x) , file, line, reason, x +#else +#define REF_REASON "" +#define REF_MUTATE_EXTRA_ARGS +#define REF_MUTATE_PURPOSE(x) +#endif + +/* + * connection implementation + */ + +static void connection_destroy(void* arg, grpc_error* error) { + grpc_channel_stack* stk = static_cast(arg); + grpc_channel_stack_destroy(stk); + gpr_free(stk); +} + +/* + * grpc_subchannel implementation + */ + +static void subchannel_destroy(void* arg, grpc_error* error) { + grpc_subchannel* c = static_cast(arg); + gpr_free((void*)c->filters); + grpc_channel_args_destroy(c->args); + grpc_connectivity_state_destroy(&c->state_tracker); + grpc_connector_unref(c->connector); + grpc_pollset_set_destroy(c->pollset_set); + grpc_subchannel_key_destroy(c->key); + gpr_mu_destroy(&c->mu); + gpr_free(c); +} + +static gpr_atm ref_mutate(grpc_subchannel* c, gpr_atm delta, + int barrier REF_MUTATE_EXTRA_ARGS) { + gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta) + : gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta); +#ifndef NDEBUG + if (grpc_trace_stream_refcount.enabled()) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "SUBCHANNEL: %p %12s 0x%" PRIxPTR " -> 0x%" PRIxPTR " [%s]", c, + purpose, old_val, old_val + delta, reason); + } +#endif + return old_val; +} + +grpc_subchannel* grpc_subchannel_ref( + grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + gpr_atm old_refs; + old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS), + 0 REF_MUTATE_PURPOSE("STRONG_REF")); + GPR_ASSERT((old_refs & STRONG_REF_MASK) != 0); + return c; +} + +grpc_subchannel* grpc_subchannel_weak_ref( + grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + gpr_atm old_refs; + old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF")); + GPR_ASSERT(old_refs != 0); + return c; +} + +grpc_subchannel* grpc_subchannel_ref_from_weak_ref( + grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + if (!c) return nullptr; + for (;;) { + gpr_atm old_refs = gpr_atm_acq_load(&c->ref_pair); + if (old_refs >= (1 << INTERNAL_REF_BITS)) { + gpr_atm new_refs = old_refs + (1 << INTERNAL_REF_BITS); + if (gpr_atm_rel_cas(&c->ref_pair, old_refs, new_refs)) { + return c; + } + } else { + return nullptr; + } + } +} + +static void disconnect(grpc_subchannel* c) { + grpc_subchannel_index_unregister(c->key, c); + gpr_mu_lock(&c->mu); + GPR_ASSERT(!c->disconnected); + c->disconnected = true; + grpc_connector_shutdown(c->connector, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Subchannel disconnected")); + c->connected_subchannel.reset(); + gpr_mu_unlock(&c->mu); +} + +void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + gpr_atm old_refs; + // add a weak ref and subtract a strong ref (atomically) + old_refs = ref_mutate( + c, static_cast(1) - static_cast(1 << INTERNAL_REF_BITS), + 1 REF_MUTATE_PURPOSE("STRONG_UNREF")); + if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) { + disconnect(c); + } + GRPC_SUBCHANNEL_WEAK_UNREF(c, "strong-unref"); +} + +void grpc_subchannel_weak_unref( + grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + gpr_atm old_refs; + old_refs = ref_mutate(c, -static_cast(1), + 1 REF_MUTATE_PURPOSE("WEAK_UNREF")); + if (old_refs == 1) { + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); + } +} + +static void parse_args_for_backoff_values( + const grpc_channel_args* args, grpc_core::BackOff::Options* backoff_options, + grpc_millis* min_connect_timeout_ms) { + grpc_millis initial_backoff_ms = + GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000; + *min_connect_timeout_ms = + GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS * 1000; + grpc_millis max_backoff_ms = + GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000; + bool fixed_reconnect_backoff = false; + if (args != nullptr) { + for (size_t i = 0; i < args->num_args; i++) { + if (0 == strcmp(args->args[i].key, + "grpc.testing.fixed_reconnect_backoff_ms")) { + fixed_reconnect_backoff = true; + initial_backoff_ms = *min_connect_timeout_ms = max_backoff_ms = + grpc_channel_arg_get_integer( + &args->args[i], + {static_cast(initial_backoff_ms), 100, INT_MAX}); + } else if (0 == + strcmp(args->args[i].key, GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) { + fixed_reconnect_backoff = false; + *min_connect_timeout_ms = grpc_channel_arg_get_integer( + &args->args[i], + {static_cast(*min_connect_timeout_ms), 100, INT_MAX}); + } else if (0 == + strcmp(args->args[i].key, GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) { + fixed_reconnect_backoff = false; + max_backoff_ms = grpc_channel_arg_get_integer( + &args->args[i], {static_cast(max_backoff_ms), 100, INT_MAX}); + } else if (0 == strcmp(args->args[i].key, + GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) { + fixed_reconnect_backoff = false; + initial_backoff_ms = grpc_channel_arg_get_integer( + &args->args[i], + {static_cast(initial_backoff_ms), 100, INT_MAX}); + } + } + } + backoff_options->set_initial_backoff(initial_backoff_ms) + .set_multiplier(fixed_reconnect_backoff + ? 1.0 + : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(fixed_reconnect_backoff ? 0.0 + : GRPC_SUBCHANNEL_RECONNECT_JITTER) + .set_max_backoff(max_backoff_ms); +} + +grpc_subchannel* grpc_subchannel_create(grpc_connector* connector, + const grpc_subchannel_args* args) { + grpc_subchannel_key* key = grpc_subchannel_key_create(args); + grpc_subchannel* c = grpc_subchannel_index_find(key); + if (c) { + grpc_subchannel_key_destroy(key); + return c; + } + + GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(); + c = static_cast(gpr_zalloc(sizeof(*c))); + c->key = key; + gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS); + c->connector = connector; + grpc_connector_ref(c->connector); + c->num_filters = args->filter_count; + if (c->num_filters > 0) { + c->filters = static_cast( + gpr_malloc(sizeof(grpc_channel_filter*) * c->num_filters)); + memcpy((void*)c->filters, args->filters, + sizeof(grpc_channel_filter*) * c->num_filters); + } else { + c->filters = nullptr; + } + c->pollset_set = grpc_pollset_set_create(); + grpc_resolved_address* addr = + static_cast(gpr_malloc(sizeof(*addr))); + grpc_get_subchannel_address_arg(args->args, addr); + grpc_resolved_address* new_address = nullptr; + grpc_channel_args* new_args = nullptr; + if (grpc_proxy_mappers_map_address(addr, args->args, &new_address, + &new_args)) { + GPR_ASSERT(new_address != nullptr); + gpr_free(addr); + addr = new_address; + } + static const char* keys_to_remove[] = {GRPC_ARG_SUBCHANNEL_ADDRESS}; + grpc_arg new_arg = grpc_create_subchannel_address_arg(addr); + gpr_free(addr); + c->args = grpc_channel_args_copy_and_add_and_remove( + new_args != nullptr ? new_args : args->args, keys_to_remove, + GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1); + gpr_free(new_arg.value.string); + if (new_args != nullptr) grpc_channel_args_destroy(new_args); + c->root_external_state_watcher.next = c->root_external_state_watcher.prev = + &c->root_external_state_watcher; + GRPC_CLOSURE_INIT(&c->on_connected, on_subchannel_connected, c, + grpc_schedule_on_exec_ctx); + grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE, + "subchannel"); + grpc_core::BackOff::Options backoff_options; + parse_args_for_backoff_values(args->args, &backoff_options, + &c->min_connect_timeout_ms); + c->backoff.Init(backoff_options); + gpr_mu_init(&c->mu); + + return grpc_subchannel_index_register(key, c); +} + +static void continue_connect_locked(grpc_subchannel* c) { + grpc_connect_in_args args; + args.interested_parties = c->pollset_set; + const grpc_millis min_deadline = + c->min_connect_timeout_ms + grpc_core::ExecCtx::Get()->Now(); + c->next_attempt_deadline = c->backoff->NextAttemptTime(); + args.deadline = std::max(c->next_attempt_deadline, min_deadline); + args.channel_args = c->args; + grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_CONNECTING, + GRPC_ERROR_NONE, "state_change"); + grpc_connector_connect(c->connector, &args, &c->connecting_result, + &c->on_connected); +} + +grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel* c, + grpc_error** error) { + grpc_connectivity_state state; + gpr_mu_lock(&c->mu); + state = grpc_connectivity_state_get(&c->state_tracker, error); + gpr_mu_unlock(&c->mu); + return state; +} + +static void on_external_state_watcher_done(void* arg, grpc_error* error) { + external_state_watcher* w = static_cast(arg); + grpc_closure* follow_up = w->notify; + if (w->pollset_set != nullptr) { + grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set, + w->pollset_set); + } + gpr_mu_lock(&w->subchannel->mu); + w->next->prev = w->prev; + w->prev->next = w->next; + gpr_mu_unlock(&w->subchannel->mu); + GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher"); + gpr_free(w); + GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error)); +} + +static void on_alarm(void* arg, grpc_error* error) { + grpc_subchannel* c = static_cast(arg); + gpr_mu_lock(&c->mu); + c->have_alarm = false; + if (c->disconnected) { + error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Disconnected", + &error, 1); + } else { + GRPC_ERROR_REF(error); + } + if (error == GRPC_ERROR_NONE) { + gpr_log(GPR_INFO, "Failed to connect to channel, retrying"); + continue_connect_locked(c); + gpr_mu_unlock(&c->mu); + } else { + gpr_mu_unlock(&c->mu); + GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); + } + GRPC_ERROR_UNREF(error); +} + +static void maybe_start_connecting_locked(grpc_subchannel* c) { + if (c->disconnected) { + /* Don't try to connect if we're already disconnected */ + return; + } + + if (c->connecting) { + /* Already connecting: don't restart */ + return; + } + + if (c->connected_subchannel != nullptr) { + /* Already connected: don't restart */ + return; + } + + if (!grpc_connectivity_state_has_watchers(&c->state_tracker)) { + /* Nobody is interested in connecting: so don't just yet */ + return; + } + + c->connecting = true; + GRPC_SUBCHANNEL_WEAK_REF(c, "connecting"); + + if (!c->backoff_begun) { + c->backoff_begun = true; + continue_connect_locked(c); + } else { + GPR_ASSERT(!c->have_alarm); + c->have_alarm = true; + const grpc_millis time_til_next = + c->next_attempt_deadline - grpc_core::ExecCtx::Get()->Now(); + if (time_til_next <= 0) { + gpr_log(GPR_INFO, "Subchannel %p: Retry immediately", c); + } else { + gpr_log(GPR_INFO, "Subchannel %p: Retry in %" PRIdPTR " milliseconds", c, + time_til_next); + } + GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx); + grpc_timer_init(&c->alarm, c->next_attempt_deadline, &c->on_alarm); + } +} + +void grpc_subchannel_notify_on_state_change( + grpc_subchannel* c, grpc_pollset_set* interested_parties, + grpc_connectivity_state* state, grpc_closure* notify) { + external_state_watcher* w; + + if (state == nullptr) { + gpr_mu_lock(&c->mu); + for (w = c->root_external_state_watcher.next; + w != &c->root_external_state_watcher; w = w->next) { + if (w->notify == notify) { + grpc_connectivity_state_notify_on_state_change(&c->state_tracker, + nullptr, &w->closure); + } + } + gpr_mu_unlock(&c->mu); + } else { + w = static_cast(gpr_malloc(sizeof(*w))); + w->subchannel = c; + w->pollset_set = interested_parties; + w->notify = notify; + GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w, + grpc_schedule_on_exec_ctx); + if (interested_parties != nullptr) { + grpc_pollset_set_add_pollset_set(c->pollset_set, interested_parties); + } + GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher"); + gpr_mu_lock(&c->mu); + w->next = &c->root_external_state_watcher; + w->prev = w->next->prev; + w->next->prev = w->prev->next = w; + grpc_connectivity_state_notify_on_state_change(&c->state_tracker, state, + &w->closure); + maybe_start_connecting_locked(c); + gpr_mu_unlock(&c->mu); + } +} + +static void on_connected_subchannel_connectivity_changed(void* p, + grpc_error* error) { + state_watcher* connected_subchannel_watcher = static_cast(p); + grpc_subchannel* c = connected_subchannel_watcher->subchannel; + gpr_mu* mu = &c->mu; + + gpr_mu_lock(mu); + + switch (connected_subchannel_watcher->connectivity_state) { + case GRPC_CHANNEL_TRANSIENT_FAILURE: + case GRPC_CHANNEL_SHUTDOWN: { + if (!c->disconnected && c->connected_subchannel != nullptr) { + if (grpc_trace_stream_refcount.enabled()) { + gpr_log(GPR_INFO, + "Connected subchannel %p of subchannel %p has gone into %s. " + "Attempting to reconnect.", + c->connected_subchannel.get(), c, + grpc_connectivity_state_name( + connected_subchannel_watcher->connectivity_state)); + } + c->connected_subchannel.reset(); + grpc_connectivity_state_set(&c->state_tracker, + GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_REF(error), "reflect_child"); + c->backoff_begun = false; + c->backoff->Reset(); + maybe_start_connecting_locked(c); + } else { + connected_subchannel_watcher->connectivity_state = + GRPC_CHANNEL_SHUTDOWN; + } + break; + } + default: { + grpc_connectivity_state_set( + &c->state_tracker, connected_subchannel_watcher->connectivity_state, + GRPC_ERROR_REF(error), "reflect_child"); + GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); + c->connected_subchannel->NotifyOnStateChange( + nullptr, &connected_subchannel_watcher->connectivity_state, + &connected_subchannel_watcher->closure); + connected_subchannel_watcher = nullptr; + } + } + gpr_mu_unlock(mu); + GRPC_SUBCHANNEL_WEAK_UNREF(c, "state_watcher"); + gpr_free(connected_subchannel_watcher); +} + +static bool publish_transport_locked(grpc_subchannel* c) { + /* construct channel stack */ + grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); + grpc_channel_stack_builder_set_channel_arguments( + builder, c->connecting_result.channel_args); + grpc_channel_stack_builder_set_transport(builder, + c->connecting_result.transport); + + if (!grpc_channel_init_create_stack(builder, GRPC_CLIENT_SUBCHANNEL)) { + grpc_channel_stack_builder_destroy(builder); + return false; + } + grpc_channel_stack* stk; + grpc_error* error = grpc_channel_stack_builder_finish( + builder, 0, 1, connection_destroy, nullptr, + reinterpret_cast(&stk)); + if (error != GRPC_ERROR_NONE) { + grpc_transport_destroy(c->connecting_result.transport); + gpr_log(GPR_ERROR, "error initializing subchannel stack: %s", + grpc_error_string(error)); + GRPC_ERROR_UNREF(error); + return false; + } + memset(&c->connecting_result, 0, sizeof(c->connecting_result)); + + /* initialize state watcher */ + state_watcher* connected_subchannel_watcher = static_cast( + gpr_zalloc(sizeof(*connected_subchannel_watcher))); + connected_subchannel_watcher->subchannel = c; + connected_subchannel_watcher->connectivity_state = GRPC_CHANNEL_READY; + GRPC_CLOSURE_INIT(&connected_subchannel_watcher->closure, + on_connected_subchannel_connectivity_changed, + connected_subchannel_watcher, grpc_schedule_on_exec_ctx); + + if (c->disconnected) { + gpr_free(connected_subchannel_watcher); + grpc_channel_stack_destroy(stk); + gpr_free(stk); + return false; + } + + /* publish */ + c->connected_subchannel.reset( + grpc_core::New(stk)); + gpr_log(GPR_INFO, "New connected subchannel at %p for subchannel %p", + c->connected_subchannel.get(), c); + + /* setup subchannel watching connected subchannel for changes; subchannel + ref for connecting is donated to the state watcher */ + GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher"); + GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); + c->connected_subchannel->NotifyOnStateChange( + c->pollset_set, &connected_subchannel_watcher->connectivity_state, + &connected_subchannel_watcher->closure); + + /* signal completion */ + grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_READY, + GRPC_ERROR_NONE, "connected"); + return true; +} + +static void on_subchannel_connected(void* arg, grpc_error* error) { + grpc_subchannel* c = static_cast(arg); + grpc_channel_args* delete_channel_args = c->connecting_result.channel_args; + + GRPC_SUBCHANNEL_WEAK_REF(c, "on_subchannel_connected"); + gpr_mu_lock(&c->mu); + c->connecting = false; + if (c->connecting_result.transport != nullptr && + publish_transport_locked(c)) { + /* do nothing, transport was published */ + } else if (c->disconnected) { + GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); + } else { + grpc_connectivity_state_set( + &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, + grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Connect Failed", &error, 1), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE), + "connect_failed"); + + const char* errmsg = grpc_error_string(error); + gpr_log(GPR_INFO, "Connect failed: %s", errmsg); + + maybe_start_connecting_locked(c); + GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); + } + gpr_mu_unlock(&c->mu); + GRPC_SUBCHANNEL_WEAK_UNREF(c, "connected"); + grpc_channel_args_destroy(delete_channel_args); +} + +/* + * grpc_subchannel_call implementation + */ + +static void subchannel_call_destroy(void* call, grpc_error* error) { + GPR_TIMER_SCOPE("grpc_subchannel_call_unref.destroy", 0); + grpc_subchannel_call* c = static_cast(call); + grpc_core::ConnectedSubchannel* connection = c->connection; + grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr, + c->schedule_closure_after_destroy); + connection->Unref(DEBUG_LOCATION, "subchannel_call"); +} + +void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call, + grpc_closure* closure) { + GPR_ASSERT(call->schedule_closure_after_destroy == nullptr); + GPR_ASSERT(closure != nullptr); + call->schedule_closure_after_destroy = closure; +} + +grpc_subchannel_call* grpc_subchannel_call_ref( + grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); + return c; +} + +void grpc_subchannel_call_unref( + grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { + GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON); +} + +void grpc_subchannel_call_process_op(grpc_subchannel_call* call, + grpc_transport_stream_op_batch* batch) { + GPR_TIMER_SCOPE("grpc_subchannel_call_process_op", 0); + grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call); + grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0); + GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch); + top_elem->filter->start_transport_stream_op_batch(top_elem, batch); +} + +grpc_core::RefCountedPtr +grpc_subchannel_get_connected_subchannel(grpc_subchannel* c) { + gpr_mu_lock(&c->mu); + auto copy = c->connected_subchannel; + gpr_mu_unlock(&c->mu); + return copy; +} + +const grpc_subchannel_key* grpc_subchannel_get_key( + const grpc_subchannel* subchannel) { + return subchannel->key; +} + +void* grpc_connected_subchannel_call_get_parent_data( + grpc_subchannel_call* subchannel_call) { + grpc_channel_stack* chanstk = subchannel_call->connection->channel_stack(); + return (char*)subchannel_call + sizeof(grpc_subchannel_call) + + chanstk->call_stack_size; +} + +grpc_call_stack* grpc_subchannel_call_get_call_stack( + grpc_subchannel_call* subchannel_call) { + return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call); +} + +static void grpc_uri_to_sockaddr(const char* uri_str, + grpc_resolved_address* addr) { + grpc_uri* uri = grpc_uri_parse(uri_str, 0 /* suppress_errors */); + GPR_ASSERT(uri != nullptr); + if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr)); + grpc_uri_destroy(uri); +} + +void grpc_get_subchannel_address_arg(const grpc_channel_args* args, + grpc_resolved_address* addr) { + const char* addr_uri_str = grpc_get_subchannel_address_uri_arg(args); + memset(addr, 0, sizeof(*addr)); + if (*addr_uri_str != '\0') { + grpc_uri_to_sockaddr(addr_uri_str, addr); + } +} + +const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args) { + const grpc_arg* addr_arg = + grpc_channel_args_find(args, GRPC_ARG_SUBCHANNEL_ADDRESS); + const char* addr_str = grpc_channel_arg_get_string(addr_arg); + GPR_ASSERT(addr_str != nullptr); // Should have been set by LB policy. + return addr_str; +} + +grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr) { + return grpc_channel_arg_string_create( + (char*)GRPC_ARG_SUBCHANNEL_ADDRESS, + addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("")); +} + +namespace grpc_core { + +ConnectedSubchannel::ConnectedSubchannel(grpc_channel_stack* channel_stack) + : RefCountedWithTracing(&grpc_trace_stream_refcount), + channel_stack_(channel_stack) {} + +ConnectedSubchannel::~ConnectedSubchannel() { + GRPC_CHANNEL_STACK_UNREF(channel_stack_, "connected_subchannel_dtor"); +} + +void ConnectedSubchannel::NotifyOnStateChange( + grpc_pollset_set* interested_parties, grpc_connectivity_state* state, + grpc_closure* closure) { + grpc_transport_op* op = grpc_make_transport_op(nullptr); + grpc_channel_element* elem; + op->connectivity_state = state; + op->on_connectivity_state_change = closure; + op->bind_pollset_set = interested_parties; + elem = grpc_channel_stack_element(channel_stack_, 0); + elem->filter->start_transport_op(elem, op); +} + +void ConnectedSubchannel::Ping(grpc_closure* on_initiate, + grpc_closure* on_ack) { + grpc_transport_op* op = grpc_make_transport_op(nullptr); + grpc_channel_element* elem; + op->send_ping.on_initiate = on_initiate; + op->send_ping.on_ack = on_ack; + elem = grpc_channel_stack_element(channel_stack_, 0); + elem->filter->start_transport_op(elem, op); +} + +grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args, + grpc_subchannel_call** call) { + *call = static_cast(gpr_arena_alloc( + args.arena, sizeof(grpc_subchannel_call) + + channel_stack_->call_stack_size + args.parent_data_size)); + grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call); + RefCountedPtr connection = + Ref(DEBUG_LOCATION, "subchannel_call"); + connection.release(); // Ref is passed to the grpc_subchannel_call object. + (*call)->connection = this; + const grpc_call_element_args call_args = { + callstk, /* call_stack */ + nullptr, /* server_transport_data */ + args.context, /* context */ + args.path, /* path */ + args.start_time, /* start_time */ + args.deadline, /* deadline */ + args.arena, /* arena */ + args.call_combiner /* call_combiner */ + }; + grpc_error* error = grpc_call_stack_init( + channel_stack_, 1, subchannel_call_destroy, *call, &call_args); + if (error != GRPC_ERROR_NONE) { + const char* error_string = grpc_error_string(error); + gpr_log(GPR_ERROR, "error: %s", error_string); + return error; + } + grpc_call_stack_set_pollset_or_pollset_set(callstk, args.pollent); + return GRPC_ERROR_NONE; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.h b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.h index 51d712f6a..e23aec12d 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel.h @@ -19,10 +19,14 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H +#include + #include "src/core/ext/filters/client_channel/connector.h" #include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/gpr/arena.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/iomgr/polling_entity.h" -#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/connectivity_state.h" #include "src/core/lib/transport/metadata.h" @@ -32,7 +36,6 @@ /** A (sub-)channel that knows how to connect to exactly one target address. Provides a target for load balancing. */ typedef struct grpc_subchannel grpc_subchannel; -typedef struct grpc_connected_subchannel grpc_connected_subchannel; typedef struct grpc_subchannel_call grpc_subchannel_call; typedef struct grpc_subchannel_args grpc_subchannel_args; typedef struct grpc_subchannel_key grpc_subchannel_key; @@ -42,150 +45,139 @@ typedef struct grpc_subchannel_key grpc_subchannel_key; grpc_subchannel_ref((p), __FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \ grpc_subchannel_ref_from_weak_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \ - grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r)) +#define GRPC_SUBCHANNEL_UNREF(p, r) \ + grpc_subchannel_unref((p), __FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_WEAK_REF(p, r) \ grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \ - grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r)) -#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \ - grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \ - grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r)) +#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \ + grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_CALL_REF(p, r) \ grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \ - grpc_subchannel_call_unref((cl), (p), __FILE__, __LINE__, (r)) +#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \ + grpc_subchannel_call_unref((p), __FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS \ , const char *file, int line, const char *reason #else #define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p)) #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \ grpc_subchannel_ref_from_weak_ref((p)) -#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p)) +#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p)) #define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p)) -#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \ - grpc_subchannel_weak_unref((cl), (p)) -#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p)) -#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \ - grpc_connected_subchannel_unref((cl), (p)) +#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p)) #define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p)) -#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \ - grpc_subchannel_call_unref((cl), (p)) +#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p)) #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS #endif -grpc_subchannel *grpc_subchannel_ref( - grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -grpc_subchannel *grpc_subchannel_ref_from_weak_ref( - grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel *channel - GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -grpc_subchannel *grpc_subchannel_weak_ref( - grpc_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel *channel - GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -grpc_connected_subchannel *grpc_connected_subchannel_ref( - grpc_connected_subchannel *channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx, - grpc_connected_subchannel *channel - GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -void grpc_subchannel_call_ref( - grpc_subchannel_call *call GRPC_SUBCHANNEL_REF_EXTRA_ARGS); -void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *call - GRPC_SUBCHANNEL_REF_EXTRA_ARGS); - -/** construct a subchannel call */ -typedef struct { - grpc_polling_entity *pollent; - grpc_slice path; - gpr_timespec start_time; - gpr_timespec deadline; - gpr_arena *arena; - grpc_call_context_element *context; - grpc_call_combiner *call_combiner; -} grpc_connected_subchannel_call_args; - -grpc_error *grpc_connected_subchannel_create_call( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel, - const grpc_connected_subchannel_call_args *args, - grpc_subchannel_call **subchannel_call); - -/** process a transport level op */ -void grpc_connected_subchannel_process_transport_op( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel, - grpc_transport_op *op); +namespace grpc_core { + +class ConnectedSubchannel : public RefCountedWithTracing { + public: + struct CallArgs { + grpc_polling_entity* pollent; + grpc_slice path; + gpr_timespec start_time; + grpc_millis deadline; + gpr_arena* arena; + grpc_call_context_element* context; + grpc_call_combiner* call_combiner; + size_t parent_data_size; + }; + + explicit ConnectedSubchannel(grpc_channel_stack* channel_stack); + ~ConnectedSubchannel(); + + grpc_channel_stack* channel_stack() { return channel_stack_; } + void NotifyOnStateChange(grpc_pollset_set* interested_parties, + grpc_connectivity_state* state, + grpc_closure* closure); + void Ping(grpc_closure* on_initiate, grpc_closure* on_ack); + grpc_error* CreateCall(const CallArgs& args, grpc_subchannel_call** call); + + private: + grpc_channel_stack* channel_stack_; +}; + +} // namespace grpc_core + +grpc_subchannel* grpc_subchannel_ref( + grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +grpc_subchannel* grpc_subchannel_ref_from_weak_ref( + grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +void grpc_subchannel_unref( + grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +grpc_subchannel* grpc_subchannel_weak_ref( + grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +void grpc_subchannel_weak_unref( + grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +grpc_subchannel_call* grpc_subchannel_call_ref( + grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS); +void grpc_subchannel_call_unref( + grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS); + +/** Returns a pointer to the parent data associated with \a subchannel_call. + The data will be of the size specified in \a parent_data_size + field of the args passed to \a grpc_connected_subchannel_create_call(). */ +void* grpc_connected_subchannel_call_get_parent_data( + grpc_subchannel_call* subchannel_call); /** poll the current connectivity state of a channel */ grpc_connectivity_state grpc_subchannel_check_connectivity( - grpc_subchannel *channel, grpc_error **error); + grpc_subchannel* channel, grpc_error** error); -/** call notify when the connectivity state of a channel changes from *state. - Updates *state with the new state of the channel */ +/** Calls notify when the connectivity state of a channel becomes different + from *state. Updates *state with the new state of the channel. */ void grpc_subchannel_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_subchannel *channel, - grpc_pollset_set *interested_parties, grpc_connectivity_state *state, - grpc_closure *notify); -void grpc_connected_subchannel_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel, - grpc_pollset_set *interested_parties, grpc_connectivity_state *state, - grpc_closure *notify); -void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx, - grpc_connected_subchannel *channel, - grpc_closure *notify); - -/** retrieve the grpc_connected_subchannel - or NULL if called before - the subchannel becomes connected */ -grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel( - grpc_subchannel *subchannel); + grpc_subchannel* channel, grpc_pollset_set* interested_parties, + grpc_connectivity_state* state, grpc_closure* notify); + +/** retrieve the grpc_core::ConnectedSubchannel - or nullptr if not connected + * (which may happen before it initially connects or during transient failures) + * */ +grpc_core::RefCountedPtr +grpc_subchannel_get_connected_subchannel(grpc_subchannel* c); /** return the subchannel index key for \a subchannel */ -const grpc_subchannel_key *grpc_subchannel_get_key( - const grpc_subchannel *subchannel); +const grpc_subchannel_key* grpc_subchannel_get_key( + const grpc_subchannel* subchannel); /** continue processing a transport op */ -void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx, - grpc_subchannel_call *subchannel_call, - grpc_transport_stream_op_batch *op); +void grpc_subchannel_call_process_op(grpc_subchannel_call* subchannel_call, + grpc_transport_stream_op_batch* op); /** Must be called once per call. Sets the 'then_schedule_closure' argument for call stack destruction. */ void grpc_subchannel_call_set_cleanup_closure( - grpc_subchannel_call *subchannel_call, grpc_closure *closure); + grpc_subchannel_call* subchannel_call, grpc_closure* closure); -grpc_call_stack *grpc_subchannel_call_get_call_stack( - grpc_subchannel_call *subchannel_call); +grpc_call_stack* grpc_subchannel_call_get_call_stack( + grpc_subchannel_call* subchannel_call); struct grpc_subchannel_args { /* When updating this struct, also update subchannel_index.c */ /** Channel filters for this channel - wrapped factories will likely want to mutate this */ - const grpc_channel_filter **filters; + const grpc_channel_filter** filters; /** The number of filters in the above array */ size_t filter_count; /** Channel arguments to be supplied to the newly created channel */ - const grpc_channel_args *args; + const grpc_channel_args* args; }; /** create a subchannel given a connector */ -grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx, - grpc_connector *connector, - const grpc_subchannel_args *args); +grpc_subchannel* grpc_subchannel_create(grpc_connector* connector, + const grpc_subchannel_args* args); /// Sets \a addr from \a args. -void grpc_get_subchannel_address_arg(grpc_exec_ctx *exec_ctx, - const grpc_channel_args *args, - grpc_resolved_address *addr); +void grpc_get_subchannel_address_arg(const grpc_channel_args* args, + grpc_resolved_address* addr); /// Returns the URI string for the address to connect to. -const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args); +const char* grpc_get_subchannel_address_uri_arg(const grpc_channel_args* args); /// Returns a new channel arg encoding the subchannel address as a string. /// Caller is responsible for freeing the string. -grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr); +grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.c b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.c deleted file mode 100644 index d7a51f389..000000000 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.c +++ /dev/null @@ -1,251 +0,0 @@ -// -// -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// - -#include "src/core/ext/filters/client_channel/subchannel_index.h" - -#include -#include - -#include -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" - -// a map of subchannel_key --> subchannel, used for detecting connections -// to the same destination in order to share them -static gpr_avl g_subchannel_index; - -static gpr_mu g_mu; - -static gpr_refcount g_refcount; - -struct grpc_subchannel_key { - grpc_subchannel_args args; -}; - -static bool g_force_creation = false; - -static grpc_subchannel_key *create_key( - const grpc_subchannel_args *args, - grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) { - grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k)); - k->args.filter_count = args->filter_count; - if (k->args.filter_count > 0) { - k->args.filters = (const grpc_channel_filter **)gpr_malloc( - sizeof(*k->args.filters) * k->args.filter_count); - memcpy((grpc_channel_filter *)k->args.filters, args->filters, - sizeof(*k->args.filters) * k->args.filter_count); - } else { - k->args.filters = NULL; - } - k->args.args = copy_channel_args(args->args); - return k; -} - -grpc_subchannel_key *grpc_subchannel_key_create( - const grpc_subchannel_args *args) { - return create_key(args, grpc_channel_args_normalize); -} - -static grpc_subchannel_key *subchannel_key_copy(grpc_subchannel_key *k) { - return create_key(&k->args, grpc_channel_args_copy); -} - -int grpc_subchannel_key_compare(const grpc_subchannel_key *a, - const grpc_subchannel_key *b) { - if (g_force_creation) return false; - int c = GPR_ICMP(a->args.filter_count, b->args.filter_count); - if (c != 0) return c; - if (a->args.filter_count > 0) { - c = memcmp(a->args.filters, b->args.filters, - a->args.filter_count * sizeof(*a->args.filters)); - if (c != 0) return c; - } - return grpc_channel_args_compare(a->args.args, b->args.args); -} - -void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *k) { - gpr_free((grpc_channel_args *)k->args.filters); - grpc_channel_args_destroy(exec_ctx, (grpc_channel_args *)k->args.args); - gpr_free(k); -} - -static void sck_avl_destroy(void *p, void *user_data) { - grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data; - grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key *)p); -} - -static void *sck_avl_copy(void *p, void *unused) { - return subchannel_key_copy((grpc_subchannel_key *)p); -} - -static long sck_avl_compare(void *a, void *b, void *unused) { - return grpc_subchannel_key_compare((grpc_subchannel_key *)a, - (grpc_subchannel_key *)b); -} - -static void scv_avl_destroy(void *p, void *user_data) { - grpc_exec_ctx *exec_ctx = (grpc_exec_ctx *)user_data; - GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p, - "subchannel_index"); -} - -static void *scv_avl_copy(void *p, void *unused) { - GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel *)p, "subchannel_index"); - return p; -} - -static const gpr_avl_vtable subchannel_avl_vtable = { - .destroy_key = sck_avl_destroy, - .copy_key = sck_avl_copy, - .compare_keys = sck_avl_compare, - .destroy_value = scv_avl_destroy, - .copy_value = scv_avl_copy}; - -void grpc_subchannel_index_init(void) { - g_subchannel_index = gpr_avl_create(&subchannel_avl_vtable); - gpr_mu_init(&g_mu); - gpr_ref_init(&g_refcount, 1); -} - -void grpc_subchannel_index_shutdown(void) { - // TODO(juanlishen): This refcounting mechanism may lead to memory leackage. - // To solve that, we should force polling to flush any pending callbacks, then - // shutdown safely. - grpc_subchannel_index_unref(); -} - -void grpc_subchannel_index_unref(void) { - if (gpr_unref(&g_refcount)) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - gpr_mu_destroy(&g_mu); - gpr_avl_unref(g_subchannel_index, &exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - } -} - -void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); } - -grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key) { - // Lock, and take a reference to the subchannel index. - // We don't need to do the search under a lock as avl's are immutable. - gpr_mu_lock(&g_mu); - gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx); - gpr_mu_unlock(&g_mu); - - grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF( - (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find"); - gpr_avl_unref(index, exec_ctx); - - return c; -} - -grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key, - grpc_subchannel *constructed) { - grpc_subchannel *c = NULL; - bool need_to_unref_constructed; - - while (c == NULL) { - need_to_unref_constructed = false; - - // Compare and swap loop: - // - take a reference to the current index - gpr_mu_lock(&g_mu); - gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx); - gpr_mu_unlock(&g_mu); - - // - Check to see if a subchannel already exists - c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx); - if (c != NULL) { - c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register"); - } - if (c != NULL) { - // yes -> we're done - need_to_unref_constructed = true; - } else { - // no -> update the avl and compare/swap - gpr_avl updated = gpr_avl_add( - gpr_avl_ref(index, exec_ctx), subchannel_key_copy(key), - GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), exec_ctx); - - // it may happen (but it's expected to be unlikely) - // that some other thread has changed the index: - // compare/swap here to check that, and retry as necessary - gpr_mu_lock(&g_mu); - if (index.root == g_subchannel_index.root) { - GPR_SWAP(gpr_avl, updated, g_subchannel_index); - c = constructed; - } - gpr_mu_unlock(&g_mu); - - gpr_avl_unref(updated, exec_ctx); - } - gpr_avl_unref(index, exec_ctx); - } - - if (need_to_unref_constructed) { - GRPC_SUBCHANNEL_UNREF(exec_ctx, constructed, "index_register"); - } - - return c; -} - -void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key, - grpc_subchannel *constructed) { - bool done = false; - while (!done) { - // Compare and swap loop: - // - take a reference to the current index - gpr_mu_lock(&g_mu); - gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx); - gpr_mu_unlock(&g_mu); - - // Check to see if this key still refers to the previously - // registered subchannel - grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx); - if (c != constructed) { - gpr_avl_unref(index, exec_ctx); - break; - } - - // compare and swap the update (some other thread may have - // mutated the index behind us) - gpr_avl updated = - gpr_avl_remove(gpr_avl_ref(index, exec_ctx), key, exec_ctx); - - gpr_mu_lock(&g_mu); - if (index.root == g_subchannel_index.root) { - GPR_SWAP(gpr_avl, updated, g_subchannel_index); - done = true; - } - gpr_mu_unlock(&g_mu); - - gpr_avl_unref(updated, exec_ctx); - gpr_avl_unref(index, exec_ctx); - } -} - -void grpc_subchannel_index_test_only_set_force_creation(bool force_creation) { - g_force_creation = force_creation; -} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.cc b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.cc new file mode 100644 index 000000000..cb02b1a74 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.cc @@ -0,0 +1,254 @@ +// +// +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// + +#include + +#include "src/core/ext/filters/client_channel/subchannel_index.h" + +#include +#include + +#include +#include + +#include "src/core/lib/avl/avl.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/tls.h" + +// a map of subchannel_key --> subchannel, used for detecting connections +// to the same destination in order to share them +static grpc_avl g_subchannel_index; + +static gpr_mu g_mu; + +static gpr_refcount g_refcount; + +struct grpc_subchannel_key { + grpc_subchannel_args args; +}; + +static bool g_force_creation = false; + +static grpc_subchannel_key* create_key( + const grpc_subchannel_args* args, + grpc_channel_args* (*copy_channel_args)(const grpc_channel_args* args)) { + grpc_subchannel_key* k = + static_cast(gpr_malloc(sizeof(*k))); + k->args.filter_count = args->filter_count; + if (k->args.filter_count > 0) { + k->args.filters = static_cast( + gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count)); + memcpy(reinterpret_cast(k->args.filters), + args->filters, sizeof(*k->args.filters) * k->args.filter_count); + } else { + k->args.filters = nullptr; + } + k->args.args = copy_channel_args(args->args); + return k; +} + +grpc_subchannel_key* grpc_subchannel_key_create( + const grpc_subchannel_args* args) { + return create_key(args, grpc_channel_args_normalize); +} + +static grpc_subchannel_key* subchannel_key_copy(grpc_subchannel_key* k) { + return create_key(&k->args, grpc_channel_args_copy); +} + +int grpc_subchannel_key_compare(const grpc_subchannel_key* a, + const grpc_subchannel_key* b) { + if (g_force_creation) return false; + int c = GPR_ICMP(a->args.filter_count, b->args.filter_count); + if (c != 0) return c; + if (a->args.filter_count > 0) { + c = memcmp(a->args.filters, b->args.filters, + a->args.filter_count * sizeof(*a->args.filters)); + if (c != 0) return c; + } + return grpc_channel_args_compare(a->args.args, b->args.args); +} + +void grpc_subchannel_key_destroy(grpc_subchannel_key* k) { + gpr_free(reinterpret_cast(k->args.filters)); + grpc_channel_args_destroy(const_cast(k->args.args)); + gpr_free(k); +} + +static void sck_avl_destroy(void* p, void* user_data) { + grpc_subchannel_key_destroy(static_cast(p)); +} + +static void* sck_avl_copy(void* p, void* unused) { + return subchannel_key_copy(static_cast(p)); +} + +static long sck_avl_compare(void* a, void* b, void* unused) { + return grpc_subchannel_key_compare(static_cast(a), + static_cast(b)); +} + +static void scv_avl_destroy(void* p, void* user_data) { + GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "subchannel_index"); +} + +static void* scv_avl_copy(void* p, void* unused) { + GRPC_SUBCHANNEL_WEAK_REF((grpc_subchannel*)p, "subchannel_index"); + return p; +} + +static const grpc_avl_vtable subchannel_avl_vtable = { + sck_avl_destroy, // destroy_key + sck_avl_copy, // copy_key + sck_avl_compare, // compare_keys + scv_avl_destroy, // destroy_value + scv_avl_copy // copy_value +}; + +void grpc_subchannel_index_init(void) { + g_subchannel_index = grpc_avl_create(&subchannel_avl_vtable); + gpr_mu_init(&g_mu); + gpr_ref_init(&g_refcount, 1); +} + +void grpc_subchannel_index_shutdown(void) { + // TODO(juanlishen): This refcounting mechanism may lead to memory leackage. + // To solve that, we should force polling to flush any pending callbacks, then + // shutdown safely. + grpc_subchannel_index_unref(); +} + +void grpc_subchannel_index_unref(void) { + if (gpr_unref(&g_refcount)) { + gpr_mu_destroy(&g_mu); + grpc_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get()); + } +} + +void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); } + +grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) { + // Lock, and take a reference to the subchannel index. + // We don't need to do the search under a lock as avl's are immutable. + gpr_mu_lock(&g_mu); + grpc_avl index = grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get()); + gpr_mu_unlock(&g_mu); + + grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF( + (grpc_subchannel*)grpc_avl_get(index, key, grpc_core::ExecCtx::Get()), + "index_find"); + grpc_avl_unref(index, grpc_core::ExecCtx::Get()); + + return c; +} + +grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key, + grpc_subchannel* constructed) { + grpc_subchannel* c = nullptr; + bool need_to_unref_constructed = false; + + while (c == nullptr) { + need_to_unref_constructed = false; + + // Compare and swap loop: + // - take a reference to the current index + gpr_mu_lock(&g_mu); + grpc_avl index = + grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get()); + gpr_mu_unlock(&g_mu); + + // - Check to see if a subchannel already exists + c = static_cast( + grpc_avl_get(index, key, grpc_core::ExecCtx::Get())); + if (c != nullptr) { + c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register"); + } + if (c != nullptr) { + // yes -> we're done + need_to_unref_constructed = true; + } else { + // no -> update the avl and compare/swap + grpc_avl updated = + grpc_avl_add(grpc_avl_ref(index, grpc_core::ExecCtx::Get()), + subchannel_key_copy(key), + GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), + grpc_core::ExecCtx::Get()); + + // it may happen (but it's expected to be unlikely) + // that some other thread has changed the index: + // compare/swap here to check that, and retry as necessary + gpr_mu_lock(&g_mu); + if (index.root == g_subchannel_index.root) { + GPR_SWAP(grpc_avl, updated, g_subchannel_index); + c = constructed; + } + gpr_mu_unlock(&g_mu); + + grpc_avl_unref(updated, grpc_core::ExecCtx::Get()); + } + grpc_avl_unref(index, grpc_core::ExecCtx::Get()); + } + + if (need_to_unref_constructed) { + GRPC_SUBCHANNEL_UNREF(constructed, "index_register"); + } + + return c; +} + +void grpc_subchannel_index_unregister(grpc_subchannel_key* key, + grpc_subchannel* constructed) { + bool done = false; + while (!done) { + // Compare and swap loop: + // - take a reference to the current index + gpr_mu_lock(&g_mu); + grpc_avl index = + grpc_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get()); + gpr_mu_unlock(&g_mu); + + // Check to see if this key still refers to the previously + // registered subchannel + grpc_subchannel* c = static_cast( + grpc_avl_get(index, key, grpc_core::ExecCtx::Get())); + if (c != constructed) { + grpc_avl_unref(index, grpc_core::ExecCtx::Get()); + break; + } + + // compare and swap the update (some other thread may have + // mutated the index behind us) + grpc_avl updated = + grpc_avl_remove(grpc_avl_ref(index, grpc_core::ExecCtx::Get()), key, + grpc_core::ExecCtx::Get()); + + gpr_mu_lock(&g_mu); + if (index.root == g_subchannel_index.root) { + GPR_SWAP(grpc_avl, updated, g_subchannel_index); + done = true; + } + gpr_mu_unlock(&g_mu); + + grpc_avl_unref(updated, grpc_core::ExecCtx::Get()); + grpc_avl_unref(index, grpc_core::ExecCtx::Get()); + } +} + +void grpc_subchannel_index_test_only_set_force_creation(bool force_creation) { + g_force_creation = force_creation; +} diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.h b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.h index 92e36d528..a7dae9d47 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/subchannel_index.h @@ -19,40 +19,38 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H +#include + #include "src/core/ext/filters/client_channel/subchannel.h" /** \file Provides an index of active subchannels so that they can be shared amongst channels */ /** Create a key that can be used to uniquely identify a subchannel */ -grpc_subchannel_key *grpc_subchannel_key_create( - const grpc_subchannel_args *args); +grpc_subchannel_key* grpc_subchannel_key_create( + const grpc_subchannel_args* args); /** Destroy a subchannel key */ -void grpc_subchannel_key_destroy(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key); +void grpc_subchannel_key_destroy(grpc_subchannel_key* key); /** Given a subchannel key, find the subchannel registered for it. Returns NULL if no such channel exists. Thread-safe. */ -grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key); +grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key); /** Register a subchannel against a key. Takes ownership of \a constructed. Returns the registered subchannel. This may be different from \a constructed in the case of a registration race. */ -grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key, - grpc_subchannel *constructed); +grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key, + grpc_subchannel* constructed); /** Remove \a constructed as the registered subchannel for \a key. */ -void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx, - grpc_subchannel_key *key, - grpc_subchannel *constructed); +void grpc_subchannel_index_unregister(grpc_subchannel_key* key, + grpc_subchannel* constructed); -int grpc_subchannel_key_compare(const grpc_subchannel_key *a, - const grpc_subchannel_key *b); +int grpc_subchannel_key_compare(const grpc_subchannel_key* a, + const grpc_subchannel_key* b); /** Initialize the subchannel index (global) */ void grpc_subchannel_index_init(void); diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.c b/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.cc similarity index 79% rename from Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.c rename to Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.cc index fb4fb8e69..0572034a9 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.c +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/client_channel/uri_parser.h" #include @@ -23,20 +25,19 @@ #include #include #include -#include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/slice/percent_encoding.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" /** a size_t default value... maps to all 1's */ #define NOT_SET (~(size_t)0) -static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section, +static grpc_uri* bad_uri(const char* uri_text, size_t pos, const char* section, bool suppress_errors) { - char *line_prefix; + char* line_prefix; size_t pfx_len; if (!suppress_errors) { @@ -45,26 +46,28 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section, gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text); gpr_free(line_prefix); - line_prefix = (char *)gpr_malloc(pfx_len + 1); + line_prefix = static_cast(gpr_malloc(pfx_len + 1)); memset(line_prefix, ' ', pfx_len); line_prefix[pfx_len] = 0; gpr_log(GPR_ERROR, "%s^ here", line_prefix); gpr_free(line_prefix); } - return NULL; + return nullptr; } /** Returns a copy of percent decoded \a src[begin, end) */ -static char *decode_and_copy_component(grpc_exec_ctx *exec_ctx, const char *src, - size_t begin, size_t end) { +static char* decode_and_copy_component(const char* src, size_t begin, + size_t end) { grpc_slice component = - grpc_slice_from_copied_buffer(src + begin, end - begin); + (begin == NOT_SET || end == NOT_SET) + ? grpc_empty_slice() + : grpc_slice_from_copied_buffer(src + begin, end - begin); grpc_slice decoded_component = grpc_permissive_percent_decode_slice(component); - char *out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII); - grpc_slice_unref_internal(exec_ctx, component); - grpc_slice_unref_internal(exec_ctx, decoded_component); + char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII); + grpc_slice_unref_internal(component); + grpc_slice_unref_internal(decoded_component); return out; } @@ -76,7 +79,7 @@ static bool valid_hex(char c) { /** Returns how many chars to advance if \a uri_text[i] begins a valid \a pchar * production. If \a uri_text[i] introduces an invalid \a pchar (such as percent * sign not followed by two hex digits), NOT_SET is returned. */ -static size_t parse_pchar(const char *uri_text, size_t i) { +static size_t parse_pchar(const char* uri_text, size_t i) { /* pchar = unreserved / pct-encoded / sub-delims / ":" / "@" * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" * pct-encoded = "%" HEXDIG HEXDIG @@ -118,7 +121,7 @@ static size_t parse_pchar(const char *uri_text, size_t i) { } /* *( pchar / "?" / "/" ) */ -static int parse_fragment_or_query(const char *uri_text, size_t *i) { +static int parse_fragment_or_query(const char* uri_text, size_t* i) { char c; while ((c = uri_text[*i]) != 0) { const size_t advance = parse_pchar(uri_text, *i); /* pchar */ @@ -143,13 +146,13 @@ static int parse_fragment_or_query(const char *uri_text, size_t *i) { return 1; } -static void parse_query_parts(grpc_uri *uri) { - static const char *QUERY_PARTS_SEPARATOR = "&"; - static const char *QUERY_PARTS_VALUE_SEPARATOR = "="; - GPR_ASSERT(uri->query != NULL); +static void parse_query_parts(grpc_uri* uri) { + static const char* QUERY_PARTS_SEPARATOR = "&"; + static const char* QUERY_PARTS_VALUE_SEPARATOR = "="; + GPR_ASSERT(uri->query != nullptr); if (uri->query[0] == '\0') { - uri->query_parts = NULL; - uri->query_parts_values = NULL; + uri->query_parts = nullptr; + uri->query_parts_values = nullptr; uri->num_query_parts = 0; return; } @@ -157,11 +160,11 @@ static void parse_query_parts(grpc_uri *uri) { gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts, &uri->num_query_parts); uri->query_parts_values = - (char **)gpr_malloc(uri->num_query_parts * sizeof(char **)); + static_cast(gpr_malloc(uri->num_query_parts * sizeof(char**))); for (size_t i = 0; i < uri->num_query_parts; i++) { - char **query_param_parts; + char** query_param_parts; size_t num_query_param_parts; - char *full = uri->query_parts[i]; + char* full = uri->query_parts[i]; gpr_string_split(full, QUERY_PARTS_VALUE_SEPARATOR, &query_param_parts, &num_query_param_parts); GPR_ASSERT(num_query_param_parts > 0); @@ -172,7 +175,7 @@ static void parse_query_parts(grpc_uri *uri) { * be included, even if they include the separator. */ uri->query_parts_values[i] = query_param_parts[1]; } else { - uri->query_parts_values[i] = NULL; + uri->query_parts_values[i] = nullptr; } for (size_t j = 2; j < num_query_param_parts; j++) { gpr_free(query_param_parts[j]); @@ -182,9 +185,8 @@ static void parse_query_parts(grpc_uri *uri) { } } -grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text, - bool suppress_errors) { - grpc_uri *uri; +grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) { + grpc_uri* uri; size_t scheme_begin = 0; size_t scheme_end = NOT_SET; size_t authority_begin = NOT_SET; @@ -270,35 +272,32 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text, fragment_end = i; } - uri = (grpc_uri *)gpr_zalloc(sizeof(*uri)); - uri->scheme = - decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end); - uri->authority = decode_and_copy_component(exec_ctx, uri_text, - authority_begin, authority_end); - uri->path = - decode_and_copy_component(exec_ctx, uri_text, path_begin, path_end); - uri->query = - decode_and_copy_component(exec_ctx, uri_text, query_begin, query_end); - uri->fragment = decode_and_copy_component(exec_ctx, uri_text, fragment_begin, - fragment_end); + uri = static_cast(gpr_zalloc(sizeof(*uri))); + uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end); + uri->authority = + decode_and_copy_component(uri_text, authority_begin, authority_end); + uri->path = decode_and_copy_component(uri_text, path_begin, path_end); + uri->query = decode_and_copy_component(uri_text, query_begin, query_end); + uri->fragment = + decode_and_copy_component(uri_text, fragment_begin, fragment_end); parse_query_parts(uri); return uri; } -const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key) { - GPR_ASSERT(key != NULL); - if (key[0] == '\0') return NULL; +const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key) { + GPR_ASSERT(key != nullptr); + if (key[0] == '\0') return nullptr; for (size_t i = 0; i < uri->num_query_parts; ++i) { if (0 == strcmp(key, uri->query_parts[i])) { return uri->query_parts_values[i]; } } - return NULL; + return nullptr; } -void grpc_uri_destroy(grpc_uri *uri) { +void grpc_uri_destroy(grpc_uri* uri) { if (!uri) return; gpr_free(uri->scheme); gpr_free(uri->authority); diff --git a/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.h b/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.h index 05ca2e00e..d749f2330 100644 --- a/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.h +++ b/Sources/CgRPC/src/core/ext/filters/client_channel/uri_parser.h @@ -19,32 +19,32 @@ #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H +#include + #include -#include "src/core/lib/iomgr/exec_ctx.h" typedef struct { - char *scheme; - char *authority; - char *path; - char *query; + char* scheme; + char* authority; + char* path; + char* query; /** Query substrings separated by '&' */ - char **query_parts; + char** query_parts; /** Number of elements in \a query_parts and \a query_parts_values */ size_t num_query_parts; /** Split each query part by '='. NULL if not present. */ - char **query_parts_values; - char *fragment; + char** query_parts_values; + char* fragment; } grpc_uri; /** parse a uri, return NULL on failure */ -grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text, - bool suppress_errors); +grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors); /** return the part of a query string after the '=' in "?key=xxx&...", or NULL * if key is not present */ -const char *grpc_uri_get_query_arg(const grpc_uri *uri, const char *key); +const char* grpc_uri_get_query_arg(const grpc_uri* uri, const char* key); /** destroy a uri */ -void grpc_uri_destroy(grpc_uri *uri); +void grpc_uri_destroy(grpc_uri* uri); #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.c b/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.cc similarity index 65% rename from Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.c rename to Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.cc index 1aed48807..27d3eac8d 100644 --- a/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.cc @@ -14,6 +14,8 @@ // limitations under the License. // +#include + #include "src/core/ext/filters/deadline/deadline_filter.h" #include @@ -25,7 +27,6 @@ #include #include "src/core/lib/channel/channel_stack_builder.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/surface/channel_init.h" @@ -36,63 +37,60 @@ // The on_complete callback used when sending a cancel_error batch down the // filter stack. Yields the call combiner when the batch returns. -static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* ignored) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg; - GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, +static void yield_call_combiner(void* arg, grpc_error* ignored) { + grpc_deadline_state* deadline_state = static_cast(arg); + GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner, "got on_complete from cancel_stream batch"); - GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer"); + GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer"); } // This is called via the call combiner, so access to deadline_state is // synchronized. -static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - grpc_call_element* elem = (grpc_call_element*)arg; - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; +static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op( GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner, deadline_state, grpc_schedule_on_exec_ctx)); batch->cancel_stream = true; batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error); - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); + elem->filter->start_transport_stream_op_batch(elem, batch); } // Timer callback. -static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - grpc_call_element* elem = (grpc_call_element*)arg; - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; +static void timer_callback(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); if (error != GRPC_ERROR_CANCELLED) { error = grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED); - grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner, + grpc_call_combiner_cancel(deadline_state->call_combiner, GRPC_ERROR_REF(error)); GRPC_CLOSURE_INIT(&deadline_state->timer_callback, send_cancel_op_in_call_combiner, elem, grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, + GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &deadline_state->timer_callback, error, "deadline exceeded -- sending cancel_stream op"); } else { - GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, - "deadline_timer"); + GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer"); } } // Starts the deadline timer. // This is called via the call combiner, so access to deadline_state is // synchronized. -static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, - gpr_timespec deadline) { - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) == 0) { +static void start_timer_if_needed(grpc_call_element* elem, + grpc_millis deadline) { + if (deadline == GRPC_MILLIS_INF_FUTURE) { return; } - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; - grpc_closure* closure = NULL; + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); + grpc_closure* closure = nullptr; switch (deadline_state->timer_state) { case GRPC_DEADLINE_STATE_PENDING: // Note: We do not start the timer if there is already a timer @@ -112,20 +110,18 @@ static void start_timer_if_needed(grpc_exec_ctx* exec_ctx, elem, grpc_schedule_on_exec_ctx); break; } - GPR_ASSERT(closure != NULL); + GPR_ASSERT(closure != nullptr); GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer"); - grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&deadline_state->timer, deadline, closure); } // Cancels the deadline timer. // This is called via the call combiner, so access to deadline_state is // synchronized. -static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, - grpc_deadline_state* deadline_state) { +static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) { if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) { deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED; - grpc_timer_cancel(exec_ctx, &deadline_state->timer); + grpc_timer_cancel(&deadline_state->timer); } else { // timer was either in STATE_INITAL (nothing to cancel) // OR in STATE_FINISHED (again nothing to cancel) @@ -133,12 +129,11 @@ static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx, } // Callback run when the call is complete. -static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg; - cancel_timer_if_needed(exec_ctx, deadline_state); +static void on_complete(void* arg, grpc_error* error) { + grpc_deadline_state* deadline_state = static_cast(arg); + cancel_timer_if_needed(deadline_state); // Invoke the next callback. - GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete, - GRPC_ERROR_REF(error)); + GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error)); } // Inject our own on_complete callback into op. @@ -155,41 +150,40 @@ static void inject_on_complete_cb(grpc_deadline_state* deadline_state, struct start_timer_after_init_state { bool in_call_combiner; grpc_call_element* elem; - gpr_timespec deadline; + grpc_millis deadline; grpc_closure closure; }; -static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { +static void start_timer_after_init(void* arg, grpc_error* error) { struct start_timer_after_init_state* state = - (struct start_timer_after_init_state*)arg; + static_cast(arg); grpc_deadline_state* deadline_state = - (grpc_deadline_state*)state->elem->call_data; + static_cast(state->elem->call_data); if (!state->in_call_combiner) { // We are initially called without holding the call combiner, so we // need to bounce ourselves into it. state->in_call_combiner = true; - GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner, - &state->closure, GRPC_ERROR_REF(error), + GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure, + GRPC_ERROR_REF(error), "scheduling deadline timer"); return; } - start_timer_if_needed(exec_ctx, state->elem, state->deadline); + start_timer_if_needed(state->elem, state->deadline); gpr_free(state); - GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner, + GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner, "done scheduling deadline timer"); } -void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +void grpc_deadline_state_init(grpc_call_element* elem, grpc_call_stack* call_stack, grpc_call_combiner* call_combiner, - gpr_timespec deadline) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; + grpc_millis deadline) { + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); deadline_state->call_stack = call_stack; deadline_state->call_combiner = call_combiner; // Deadline will always be infinite on servers, so the timer will only be // set on clients with a finite deadline. - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) { + if (deadline != GRPC_MILLIS_INF_FUTURE) { // When the deadline passes, we indicate the failure by sending down // an op with cancel_error set. However, we can't send down any ops // until after the call stack is fully initialized. If we start the @@ -198,34 +192,36 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // create a closure to start the timer, and we schedule that closure // to be run after call stack initialization is done. struct start_timer_after_init_state* state = - (struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state)); + static_cast( + gpr_zalloc(sizeof(*state))); state->elem = elem; state->deadline = deadline; GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE); } } -void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; - cancel_timer_if_needed(exec_ctx, deadline_state); +void grpc_deadline_state_destroy(grpc_call_element* elem) { + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); + cancel_timer_if_needed(deadline_state); } -void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - gpr_timespec new_deadline) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; - cancel_timer_if_needed(exec_ctx, deadline_state); - start_timer_if_needed(exec_ctx, elem, new_deadline); +void grpc_deadline_state_reset(grpc_call_element* elem, + grpc_millis new_deadline) { + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); + cancel_timer_if_needed(deadline_state); + start_timer_if_needed(elem, new_deadline); } void grpc_deadline_state_client_start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op) { - grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + grpc_deadline_state* deadline_state = + static_cast(elem->call_data); if (op->cancel_stream) { - cancel_timer_if_needed(exec_ctx, deadline_state); + cancel_timer_if_needed(deadline_state); } else { // Make sure we know when the call is complete, so that we can cancel // the timer. @@ -240,16 +236,14 @@ void grpc_deadline_state_client_start_transport_stream_op_batch( // // Constructor for channel_data. Used for both client and server filters. -static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, +static grpc_error* init_channel_elem(grpc_channel_element* elem, grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); return GRPC_ERROR_NONE; } // Destructor for channel_data. Used for both client and server filters. -static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} // Call data used for both client and server filter. typedef struct base_call_data { @@ -269,50 +263,45 @@ typedef struct server_call_data { } server_call_data; // Constructor for call_data. Used for both client and server filters. -static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, +static grpc_error* init_call_elem(grpc_call_element* elem, const grpc_call_element_args* args) { - grpc_deadline_state_init(exec_ctx, elem, args->call_stack, - args->call_combiner, args->deadline); + grpc_deadline_state_init(elem, args->call_stack, args->call_combiner, + args->deadline); return GRPC_ERROR_NONE; } // Destructor for call_data. Used for both client and server filters. -static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +static void destroy_call_elem(grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) { - grpc_deadline_state_destroy(exec_ctx, elem); + grpc_deadline_state_destroy(elem); } // Method for starting a call op for client filter. static void client_start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op) { - grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, - op); + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + grpc_deadline_state_client_start_transport_stream_op_batch(elem, op); // Chain to next filter. - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } // Callback for receiving initial metadata on the server. -static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - grpc_call_element* elem = (grpc_call_element*)arg; - server_call_data* calld = (server_call_data*)elem->call_data; +static void recv_initial_metadata_ready(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + server_call_data* calld = static_cast(elem->call_data); // Get deadline from metadata and start the timer if needed. - start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline); + start_timer_if_needed(elem, calld->recv_initial_metadata->deadline); // Invoke the next callback. calld->next_recv_initial_metadata_ready->cb( - exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error); + calld->next_recv_initial_metadata_ready->cb_arg, error); } // Method for starting a call op for server filter. static void server_start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op) { - server_call_data* calld = (server_call_data*)elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + server_call_data* calld = static_cast(elem->call_data); if (op->cancel_stream) { - cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state); + cancel_timer_if_needed(&calld->base.deadline_state); } else { // If we're receiving initial metadata, we need to get the deadline // from the recv_initial_metadata_ready callback. So we inject our @@ -338,7 +327,7 @@ static void server_start_transport_stream_op_batch( } } // Chain to next filter. - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } const grpc_channel_filter grpc_client_deadline_filter = { @@ -375,13 +364,13 @@ bool grpc_deadline_checking_enabled(const grpc_channel_args* channel_args) { !grpc_channel_args_want_minimal_stack(channel_args)); } -static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx, - grpc_channel_stack_builder* builder, +static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder, void* arg) { return grpc_deadline_checking_enabled( grpc_channel_stack_builder_get_channel_arguments(builder)) ? grpc_channel_stack_builder_prepend_filter( - builder, (const grpc_channel_filter*)arg, NULL, NULL) + builder, static_cast(arg), + nullptr, nullptr) : true; } diff --git a/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.h b/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.h index 3eb102ad2..13207cbd6 100644 --- a/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/deadline/deadline_filter.h @@ -17,6 +17,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H #define GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/iomgr/timer.h" @@ -49,12 +51,12 @@ typedef struct grpc_deadline_state { // // assumes elem->call_data is zero'd -void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +void grpc_deadline_state_init(grpc_call_element* elem, grpc_call_stack* call_stack, grpc_call_combiner* call_combiner, - gpr_timespec deadline); -void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem); + grpc_millis deadline); + +void grpc_deadline_state_destroy(grpc_call_element* elem); // Cancels the existing timer and starts a new one with new_deadline. // @@ -65,8 +67,8 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx, // deadline may result in the timer being called twice. // // Note: Must be called while holding the call combiner. -void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - gpr_timespec new_deadline); +void grpc_deadline_state_reset(grpc_call_element* elem, + grpc_millis new_deadline); // To be called from the client-side filter's start_transport_stream_op_batch() // method. Ensures that the deadline timer is cancelled when the call @@ -77,8 +79,7 @@ void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, // // Note: Must be called while holding the call combiner. void grpc_deadline_state_client_start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op); + grpc_call_element* elem, grpc_transport_stream_op_batch* op); // Should deadline checking be performed (according to channel args) bool grpc_deadline_checking_enabled(const grpc_channel_args* args); diff --git a/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.c b/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.cc similarity index 63% rename from Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.c rename to Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.cc index 6208089f2..ae94ce47b 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.cc @@ -15,17 +15,21 @@ * */ -#include "src/core/ext/filters/http/client/http_client_filter.h" +#include + #include #include #include +#include #include +#include "src/core/ext/filters/http/client/http_client_filter.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/b64.h" #include "src/core/lib/slice/percent_encoding.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/static_metadata.h" #include "src/core/lib/transport/transport_impl.h" @@ -35,8 +39,9 @@ /* default maximum size of payload eligable for GET request */ static const size_t kMaxPayloadSizeForGet = 2048; -typedef struct call_data { - grpc_call_combiner *call_combiner; +namespace { +struct call_data { + grpc_call_combiner* call_combiner; // State for handling send_initial_metadata ops. grpc_linked_mdelem method; grpc_linked_mdelem scheme; @@ -45,41 +50,42 @@ typedef struct call_data { grpc_linked_mdelem content_type; grpc_linked_mdelem user_agent; // State for handling recv_initial_metadata ops. - grpc_metadata_batch *recv_initial_metadata; - grpc_closure *original_recv_initial_metadata_ready; + grpc_metadata_batch* recv_initial_metadata; + grpc_closure* original_recv_initial_metadata_ready; grpc_closure recv_initial_metadata_ready; // State for handling recv_trailing_metadata ops. - grpc_metadata_batch *recv_trailing_metadata; - grpc_closure *original_recv_trailing_metadata_on_complete; + grpc_metadata_batch* recv_trailing_metadata; + grpc_closure* original_recv_trailing_metadata_on_complete; grpc_closure recv_trailing_metadata_on_complete; // State for handling send_message ops. - grpc_transport_stream_op_batch *send_message_batch; + grpc_transport_stream_op_batch* send_message_batch; size_t send_message_bytes_read; - grpc_byte_stream_cache send_message_cache; - grpc_caching_byte_stream send_message_caching_stream; + grpc_core::ManualConstructor send_message_cache; + grpc_core::ManualConstructor + send_message_caching_stream; grpc_closure on_send_message_next_done; - grpc_closure *original_send_message_on_complete; + grpc_closure* original_send_message_on_complete; grpc_closure send_message_on_complete; -} call_data; +}; -typedef struct channel_data { +struct channel_data { grpc_mdelem static_scheme; grpc_mdelem user_agent; size_t max_payload_size_for_get; -} channel_data; +}; +} // namespace -static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_metadata_batch *b) { - if (b->idx.named.status != NULL) { +static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem, + grpc_metadata_batch* b) { + if (b->idx.named.status != nullptr) { if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) { - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.status); + grpc_metadata_batch_remove(b, b->idx.named.status); } else { - char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md), + char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md), GPR_DUMP_ASCII); - char *msg; + char* msg; gpr_asprintf(&msg, "Received http2 header with status: %s", val); - grpc_error *e = grpc_error_set_str( + grpc_error* e = grpc_error_set_str( grpc_error_set_int( grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -93,19 +99,18 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, } } - if (b->idx.named.grpc_message != NULL) { + if (b->idx.named.grpc_message != nullptr) { grpc_slice pct_decoded_msg = grpc_permissive_percent_decode_slice( GRPC_MDVALUE(b->idx.named.grpc_message->md)); if (grpc_slice_is_equivalent(pct_decoded_msg, GRPC_MDVALUE(b->idx.named.grpc_message->md))) { - grpc_slice_unref_internal(exec_ctx, pct_decoded_msg); + grpc_slice_unref_internal(pct_decoded_msg); } else { - grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message, - pct_decoded_msg); + grpc_metadata_batch_set_value(b->idx.named.grpc_message, pct_decoded_msg); } } - if (b->idx.named.content_type != NULL) { + if (b->idx.named.content_type != nullptr) { if (!grpc_mdelem_eq(b->idx.named.content_type->md, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)) { if (grpc_slice_buf_start_eq(GRPC_MDVALUE(b->idx.named.content_type->md), @@ -125,116 +130,106 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, } else { /* TODO(klempner): We're currently allowing this, but we shouldn't see it without a proxy so log for now. */ - char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md), + char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md), GPR_DUMP_ASCII); gpr_log(GPR_INFO, "Unexpected content-type '%s'", val); gpr_free(val); } } - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type); + grpc_metadata_batch_remove(b, b->idx.named.content_type); } return GRPC_ERROR_NONE; } -static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, - void *user_data, grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void recv_initial_metadata_ready(void* user_data, grpc_error* error) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (error == GRPC_ERROR_NONE) { - error = client_filter_incoming_metadata(exec_ctx, elem, - calld->recv_initial_metadata); + error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata); } else { GRPC_ERROR_REF(error); } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready, - error); + GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, error); } -static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx, - void *user_data, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void recv_trailing_metadata_on_complete(void* user_data, + grpc_error* error) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (error == GRPC_ERROR_NONE) { - error = client_filter_incoming_metadata(exec_ctx, elem, - calld->recv_trailing_metadata); + error = + client_filter_incoming_metadata(elem, calld->recv_trailing_metadata); } else { GRPC_ERROR_REF(error); } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_trailing_metadata_on_complete, - error); + GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_on_complete, error); } -static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - grpc_byte_stream_cache_destroy(exec_ctx, &calld->send_message_cache); - GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete, +static void send_message_on_complete(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); + calld->send_message_cache.Destroy(); + GRPC_CLOSURE_RUN(calld->original_send_message_on_complete, GRPC_ERROR_REF(error)); } // Pulls a slice from the send_message byte stream, updating // calld->send_message_bytes_read. -static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx, - call_data *calld) { +static grpc_error* pull_slice_from_send_message(call_data* calld) { grpc_slice incoming_slice; - grpc_error *error = grpc_byte_stream_pull( - exec_ctx, &calld->send_message_caching_stream.base, &incoming_slice); + grpc_error* error = calld->send_message_caching_stream->Pull(&incoming_slice); if (error == GRPC_ERROR_NONE) { calld->send_message_bytes_read += GRPC_SLICE_LENGTH(incoming_slice); - grpc_slice_unref_internal(exec_ctx, incoming_slice); + grpc_slice_unref_internal(incoming_slice); } return error; } // Reads as many slices as possible from the send_message byte stream. // Upon successful return, if calld->send_message_bytes_read == -// calld->send_message_caching_stream.base.length, then we have completed +// calld->send_message_caching_stream->length(), then we have completed // reading from the byte stream; otherwise, an async read has been dispatched // and on_send_message_next_done() will be invoked when it is complete. -static grpc_error *read_all_available_send_message_data(grpc_exec_ctx *exec_ctx, - call_data *calld) { - while (grpc_byte_stream_next(exec_ctx, - &calld->send_message_caching_stream.base, - ~(size_t)0, &calld->on_send_message_next_done)) { - grpc_error *error = pull_slice_from_send_message(exec_ctx, calld); +static grpc_error* read_all_available_send_message_data(call_data* calld) { + while (calld->send_message_caching_stream->Next( + SIZE_MAX, &calld->on_send_message_next_done)) { + grpc_error* error = pull_slice_from_send_message(calld); if (error != GRPC_ERROR_NONE) return error; if (calld->send_message_bytes_read == - calld->send_message_caching_stream.base.length) { + calld->send_message_caching_stream->length()) { break; } } return GRPC_ERROR_NONE; } -// Async callback for grpc_byte_stream_next(). -static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; +// Async callback for ByteStream::Next(). +static void on_send_message_next_done(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + calld->send_message_batch, error, calld->call_combiner); return; } - error = pull_slice_from_send_message(exec_ctx, calld); + error = pull_slice_from_send_message(calld); if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + calld->send_message_batch, error, calld->call_combiner); return; } // There may or may not be more to read, but we don't care. If we got // here, then we know that all of the data was not available // synchronously, so we were not able to do a cached call. Instead, // we just reset the byte stream and then send down the batch as-is. - grpc_caching_byte_stream_reset(&calld->send_message_caching_stream); - grpc_call_next_op(exec_ctx, elem, calld->send_message_batch); + calld->send_message_caching_stream->Reset(); + grpc_call_next_op(elem, calld->send_message_batch); } -static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) { - char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1); +static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) { + char* payload_bytes = + static_cast(gpr_malloc(slice_buffer->length + 1)); size_t offset = 0; for (size_t i = 0; i < slice_buffer->count; ++i) { memcpy(payload_bytes + offset, @@ -248,10 +243,9 @@ static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) { // Modifies the path entry in the batch's send_initial_metadata to // append the base64-encoded query for a GET request. -static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; +static grpc_error* update_path_for_get(grpc_call_element* elem, + grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); grpc_slice path_slice = GRPC_MDVALUE(batch->payload->send_initial_metadata.send_initial_metadata ->idx.named.path->md); @@ -260,49 +254,49 @@ static grpc_error *update_path_for_get(grpc_exec_ctx *exec_ctx, size_t estimated_len = GRPC_SLICE_LENGTH(path_slice); estimated_len++; /* for the '?' */ estimated_len += grpc_base64_estimate_encoded_size( - batch->payload->send_message.send_message->length, true /* url_safe */, + batch->payload->send_message.send_message->length(), true /* url_safe */, false /* multi_line */); grpc_slice path_with_query_slice = GRPC_SLICE_MALLOC(estimated_len); /* memcopy individual pieces into this slice */ - char *write_ptr = (char *)GRPC_SLICE_START_PTR(path_with_query_slice); - char *original_path = (char *)GRPC_SLICE_START_PTR(path_slice); + char* write_ptr = + reinterpret_cast GRPC_SLICE_START_PTR(path_with_query_slice); + char* original_path = + reinterpret_cast GRPC_SLICE_START_PTR(path_slice); memcpy(write_ptr, original_path, GRPC_SLICE_LENGTH(path_slice)); write_ptr += GRPC_SLICE_LENGTH(path_slice); *write_ptr++ = '?'; - char *payload_bytes = - slice_buffer_to_string(&calld->send_message_cache.cache_buffer); - grpc_base64_encode_core((char *)write_ptr, payload_bytes, - batch->payload->send_message.send_message->length, + char* payload_bytes = + slice_buffer_to_string(calld->send_message_cache->cache_buffer()); + grpc_base64_encode_core(write_ptr, payload_bytes, + batch->payload->send_message.send_message->length(), true /* url_safe */, false /* multi_line */); gpr_free(payload_bytes); /* remove trailing unused memory and add trailing 0 to terminate string */ - char *t = (char *)GRPC_SLICE_START_PTR(path_with_query_slice); + char* t = reinterpret_cast GRPC_SLICE_START_PTR(path_with_query_slice); /* safe to use strlen since base64_encode will always add '\0' */ path_with_query_slice = grpc_slice_sub_no_ref(path_with_query_slice, 0, strlen(t)); /* substitute previous path with the new path+query */ grpc_mdelem mdelem_path_and_query = - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice); - grpc_metadata_batch *b = + grpc_mdelem_from_slices(GRPC_MDSTR_PATH, path_with_query_slice); + grpc_metadata_batch* b = batch->payload->send_initial_metadata.send_initial_metadata; - return grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path, + return grpc_metadata_batch_substitute(b, b->idx.named.path, mdelem_path_and_query); } -static void remove_if_present(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, +static void remove_if_present(grpc_metadata_batch* batch, grpc_metadata_batch_callouts_index idx) { - if (batch->idx.array[idx] != NULL) { - grpc_metadata_batch_remove(exec_ctx, batch, batch->idx.array[idx]); + if (batch->idx.array[idx] != nullptr) { + grpc_metadata_batch_remove(batch, batch->idx.array[idx]); } } static void hc_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; - channel_data *channeld = (channel_data *)elem->channel_data; - GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0); + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); + channel_data* channeld = static_cast(elem->channel_data); + GPR_TIMER_SCOPE("hc_start_transport_stream_op_batch", 0); if (batch->recv_initial_metadata) { /* substitute our callback for the higher callback */ @@ -322,7 +316,7 @@ static void hc_start_transport_stream_op_batch( batch->on_complete = &calld->recv_trailing_metadata_on_complete; } - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; bool batch_will_be_handled_asynchronously = false; if (batch->send_initial_metadata) { // Decide which HTTP VERB to use. We use GET if the request is marked @@ -333,29 +327,27 @@ static void hc_start_transport_stream_op_batch( if (batch->send_message && (batch->payload->send_initial_metadata.send_initial_metadata_flags & GRPC_INITIAL_METADATA_CACHEABLE_REQUEST) && - batch->payload->send_message.send_message->length < + batch->payload->send_message.send_message->length() < channeld->max_payload_size_for_get) { calld->send_message_bytes_read = 0; - grpc_byte_stream_cache_init(&calld->send_message_cache, - batch->payload->send_message.send_message); - grpc_caching_byte_stream_init(&calld->send_message_caching_stream, - &calld->send_message_cache); - batch->payload->send_message.send_message = - &calld->send_message_caching_stream.base; + calld->send_message_cache.Init( + std::move(batch->payload->send_message.send_message)); + calld->send_message_caching_stream.Init(calld->send_message_cache.get()); + batch->payload->send_message.send_message.reset( + calld->send_message_caching_stream.get()); calld->original_send_message_on_complete = batch->on_complete; batch->on_complete = &calld->send_message_on_complete; calld->send_message_batch = batch; - error = read_all_available_send_message_data(exec_ctx, calld); + error = read_all_available_send_message_data(calld); if (error != GRPC_ERROR_NONE) goto done; // If all the data has been read, then we can use GET. if (calld->send_message_bytes_read == - calld->send_message_caching_stream.base.length) { + calld->send_message_caching_stream->length()) { method = GRPC_MDELEM_METHOD_GET; - error = update_path_for_get(exec_ctx, elem, batch); + error = update_path_for_get(elem, batch); if (error != GRPC_ERROR_NONE) goto done; batch->send_message = false; - grpc_byte_stream_destroy(exec_ctx, - &calld->send_message_caching_stream.base); + calld->send_message_caching_stream->Orphan(); } else { // Not all data is available. The batch will be sent down // asynchronously in on_send_message_next_done(). @@ -372,41 +364,41 @@ static void hc_start_transport_stream_op_batch( } remove_if_present( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, GRPC_BATCH_METHOD); remove_if_present( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, GRPC_BATCH_SCHEME); remove_if_present( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, GRPC_BATCH_TE); remove_if_present( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, GRPC_BATCH_CONTENT_TYPE); remove_if_present( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, GRPC_BATCH_USER_AGENT); /* Send : prefixed headers, which have to be before any application layer headers. */ error = grpc_metadata_batch_add_head( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, &calld->method, method); if (error != GRPC_ERROR_NONE) goto done; error = grpc_metadata_batch_add_head( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, &calld->scheme, channeld->static_scheme); if (error != GRPC_ERROR_NONE) goto done; error = grpc_metadata_batch_add_tail( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, &calld->te_trailers, GRPC_MDELEM_TE_TRAILERS); if (error != GRPC_ERROR_NONE) goto done; error = grpc_metadata_batch_add_tail( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, &calld->content_type, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC); if (error != GRPC_ERROR_NONE) goto done; error = grpc_metadata_batch_add_tail( - exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata, + batch->payload->send_initial_metadata.send_initial_metadata, &calld->user_agent, GRPC_MDELEM_REF(channeld->user_agent)); if (error != GRPC_ERROR_NONE) goto done; } @@ -414,18 +406,16 @@ static void hc_start_transport_stream_op_batch( done: if (error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, error, calld->call_combiner); + calld->send_message_batch, error, calld->call_combiner); } else if (!batch_will_be_handled_asynchronously) { - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); } - GPR_TIMER_END("hc_start_transport_stream_op_batch", 0); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, recv_initial_metadata_ready, elem, @@ -441,16 +431,16 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) {} +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) {} -static grpc_mdelem scheme_from_args(const grpc_channel_args *args) { +static grpc_mdelem scheme_from_args(const grpc_channel_args* args) { unsigned i; size_t j; grpc_mdelem valid_schemes[] = {GRPC_MDELEM_SCHEME_HTTP, GRPC_MDELEM_SCHEME_HTTPS}; - if (args != NULL) { + if (args != nullptr) { for (i = 0; i < args->num_args; ++i) { if (args->args[i].type == GRPC_ARG_STRING && strcmp(args->args[i].key, GRPC_ARG_HTTP2_SCHEME) == 0) { @@ -466,15 +456,15 @@ static grpc_mdelem scheme_from_args(const grpc_channel_args *args) { return GRPC_MDELEM_SCHEME_HTTP; } -static size_t max_payload_size_from_args(const grpc_channel_args *args) { - if (args != NULL) { +static size_t max_payload_size_from_args(const grpc_channel_args* args) { + if (args != nullptr) { for (size_t i = 0; i < args->num_args; ++i) { if (0 == strcmp(args->args[i].key, GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET)) { if (args->args[i].type != GRPC_ARG_INTEGER) { gpr_log(GPR_ERROR, "%s: must be an integer", GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET); } else { - return (size_t)args->args[i].value.integer; + return static_cast(args->args[i].value.integer); } } } @@ -482,12 +472,12 @@ static size_t max_payload_size_from_args(const grpc_channel_args *args) { return kMaxPayloadSizeForGet; } -static grpc_slice user_agent_from_args(const grpc_channel_args *args, - const char *transport_name) { +static grpc_slice user_agent_from_args(const grpc_channel_args* args, + const char* transport_name) { gpr_strvec v; size_t i; int is_first = 1; - char *tmp; + char* tmp; grpc_slice result; gpr_strvec_init(&v); @@ -524,7 +514,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args, } } - tmp = gpr_strvec_flatten(&v, NULL); + tmp = gpr_strvec_flatten(&v, nullptr); gpr_strvec_destroy(&v); result = grpc_slice_intern(grpc_slice_from_static_string(tmp)); gpr_free(tmp); @@ -533,27 +523,25 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args, } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *chand = (channel_data *)elem->channel_data; +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* chand = static_cast(elem->channel_data); GPR_ASSERT(!args->is_last); - GPR_ASSERT(args->optional_transport != NULL); + GPR_ASSERT(args->optional_transport != nullptr); chand->static_scheme = scheme_from_args(args->channel_args); chand->max_payload_size_for_get = max_payload_size_from_args(args->channel_args); chand->user_agent = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_USER_AGENT, + GRPC_MDSTR_USER_AGENT, user_agent_from_args(args->channel_args, args->optional_transport->vtable->name)); return GRPC_ERROR_NONE; } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent); +static void destroy_channel_elem(grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + GRPC_MDELEM_UNREF(chand->user_agent); } const grpc_channel_filter grpc_http_client_filter = { diff --git a/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.h b/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.h index ec8177c43..b7cef33f5 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/http/client/http_client_filter.h @@ -18,6 +18,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_HTTP_CLIENT_FILTER_H #define GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_HTTP_CLIENT_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" /* Processes metadata on the client side for HTTP2 transports */ diff --git a/Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.cc b/Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.cc new file mode 100644 index 000000000..1f57ab5ce --- /dev/null +++ b/Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.cc @@ -0,0 +1,156 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include +#include + +#include +#include +#include + +#include "src/core/ext/filters/http/client_authority_filter.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/surface/call.h" +#include "src/core/lib/surface/channel_init.h" +#include "src/core/lib/surface/channel_stack_type.h" +#include "src/core/lib/transport/static_metadata.h" + +namespace { + +struct call_data { + grpc_linked_mdelem authority_storage; + grpc_call_combiner* call_combiner; +}; + +struct channel_data { + grpc_slice default_authority; +}; + +void authority_start_transport_stream_op_batch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + // Handle send_initial_metadata. + auto* initial_metadata = + batch->payload->send_initial_metadata.send_initial_metadata; + // If the initial metadata doesn't already contain :authority, add it. + if (batch->send_initial_metadata && + initial_metadata->idx.named.authority == nullptr) { + grpc_error* error = grpc_metadata_batch_add_head( + initial_metadata, &calld->authority_storage, + grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY, + grpc_slice_ref(chand->default_authority))); + if (error != GRPC_ERROR_NONE) { + grpc_transport_stream_op_batch_finish_with_failure(batch, error, + calld->call_combiner); + return; + } + } + // Pass control down the stack. + grpc_call_next_op(elem, batch); +} + +/* Constructor for call_data */ +grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + calld->call_combiner = args->call_combiner; + return GRPC_ERROR_NONE; +} + +/* Destructor for call_data */ +void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) {} + +/* Constructor for channel_data */ +grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* chand = static_cast(elem->channel_data); + const grpc_arg* default_authority_arg = + grpc_channel_args_find(args->channel_args, GRPC_ARG_DEFAULT_AUTHORITY); + if (default_authority_arg == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "GRPC_ARG_DEFAULT_AUTHORITY channel arg. not found. Note that direct " + "channels must explicity specify a value for this argument."); + } + const char* default_authority_str = + grpc_channel_arg_get_string(default_authority_arg); + if (default_authority_str == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "GRPC_ARG_DEFAULT_AUTHORITY channel arg. must be a string"); + } + chand->default_authority = + grpc_slice_from_copied_string(default_authority_str); + GPR_ASSERT(!args->is_last); + return GRPC_ERROR_NONE; +} + +/* Destructor for channel data */ +void destroy_channel_elem(grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + grpc_slice_unref(chand->default_authority); +} +} // namespace + +const grpc_channel_filter grpc_client_authority_filter = { + authority_start_transport_stream_op_batch, + grpc_channel_next_op, + sizeof(call_data), + init_call_elem, + grpc_call_stack_ignore_set_pollset_or_pollset_set, + destroy_call_elem, + sizeof(channel_data), + init_channel_elem, + destroy_channel_elem, + grpc_channel_next_get_info, + "authority"}; + +static bool add_client_authority_filter(grpc_channel_stack_builder* builder, + void* arg) { + const grpc_channel_args* channel_args = + grpc_channel_stack_builder_get_channel_arguments(builder); + const grpc_arg* disable_client_authority_filter_arg = grpc_channel_args_find( + channel_args, GRPC_ARG_DISABLE_CLIENT_AUTHORITY_FILTER); + if (disable_client_authority_filter_arg != nullptr) { + const bool is_client_authority_filter_disabled = + grpc_channel_arg_get_bool(disable_client_authority_filter_arg, false); + if (is_client_authority_filter_disabled) { + return true; + } + } + return grpc_channel_stack_builder_prepend_filter( + builder, static_cast(arg), nullptr, nullptr); +} + +void grpc_client_authority_filter_init(void) { + grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX, + add_client_authority_filter, + (void*)&grpc_client_authority_filter); + grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX, + add_client_authority_filter, + (void*)&grpc_client_authority_filter); +} + +void grpc_client_authority_filter_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/census/grpc_filter.h b/Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.h similarity index 51% rename from Sources/CgRPC/src/core/ext/census/grpc_filter.h rename to Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.h index baa7bb931..5824e91ff 100644 --- a/Sources/CgRPC/src/core/ext/census/grpc_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/http/client_authority_filter.h @@ -1,6 +1,6 @@ /* * - * Copyright 2015 gRPC authors. + * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,19 @@ * */ -#ifndef GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H -#define GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H +#ifndef GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_AUTHORITY_FILTER_H +#define GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_AUTHORITY_FILTER_H + +#include + +#include #include "src/core/lib/channel/channel_stack.h" -/* Census filters: provides tracing and stats collection functionalities. It - needs to reside right below the surface filter in the channel stack. */ -extern const grpc_channel_filter grpc_client_census_filter; -extern const grpc_channel_filter grpc_server_census_filter; +/// Filter responsible for setting the authority header, if not already set. It +/// uses the value of the GRPC_ARG_DEFAULT_AUTHORITY channel arg if the initial +/// metadata doesn't already contain an authority value. + +extern const grpc_channel_filter grpc_client_authority_filter; -#endif /* GRPC_CORE_EXT_CENSUS_GRPC_FILTER_H */ +#endif /* GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_AUTHORITY_FILTER_H */ diff --git a/Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.c b/Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.cc similarity index 71% rename from Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.c rename to Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.cc index 88bd2250f..f03fa0141 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.c +++ b/Sources/CgRPC/src/core/ext/filters/http/http_filters_plugin.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include "src/core/ext/filters/http/client/http_client_filter.h" @@ -27,45 +29,43 @@ #include "src/core/lib/transport/transport_impl.h" typedef struct { - const grpc_channel_filter *filter; - const char *control_channel_arg; + const grpc_channel_filter* filter; + const char* control_channel_arg; } optional_filter; static optional_filter compress_filter = { &grpc_message_compress_filter, GRPC_ARG_ENABLE_PER_MESSAGE_COMPRESSION}; static bool is_building_http_like_transport( - grpc_channel_stack_builder *builder) { - grpc_transport *t = grpc_channel_stack_builder_get_transport(builder); - return t != NULL && strstr(t->vtable->name, "http"); + grpc_channel_stack_builder* builder) { + grpc_transport* t = grpc_channel_stack_builder_get_transport(builder); + return t != nullptr && strstr(t->vtable->name, "http"); } -static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg) { +static bool maybe_add_optional_filter(grpc_channel_stack_builder* builder, + void* arg) { if (!is_building_http_like_transport(builder)) return true; - optional_filter *filtarg = (optional_filter *)arg; - const grpc_channel_args *channel_args = + optional_filter* filtarg = static_cast(arg); + const grpc_channel_args* channel_args = grpc_channel_stack_builder_get_channel_arguments(builder); bool enable = grpc_channel_arg_get_bool( grpc_channel_args_find(channel_args, filtarg->control_channel_arg), !grpc_channel_args_want_minimal_stack(channel_args)); return enable ? grpc_channel_stack_builder_prepend_filter( - builder, filtarg->filter, NULL, NULL) + builder, filtarg->filter, nullptr, nullptr) : true; } -static bool maybe_add_required_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg) { +static bool maybe_add_required_filter(grpc_channel_stack_builder* builder, + void* arg) { return is_building_http_like_transport(builder) ? grpc_channel_stack_builder_prepend_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL) + builder, static_cast(arg), + nullptr, nullptr) : true; } void grpc_http_filters_init(void) { - grpc_register_tracer(&grpc_compression_trace); grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, maybe_add_optional_filter, &compress_filter); @@ -77,13 +77,13 @@ void grpc_http_filters_init(void) { maybe_add_optional_filter, &compress_filter); grpc_channel_init_register_stage( GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_required_filter, (void *)&grpc_http_client_filter); + maybe_add_required_filter, (void*)&grpc_http_client_filter); grpc_channel_init_register_stage( GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_required_filter, (void *)&grpc_http_client_filter); + maybe_add_required_filter, (void*)&grpc_http_client_filter); grpc_channel_init_register_stage( GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_required_filter, (void *)&grpc_http_server_filter); + maybe_add_required_filter, (void*)&grpc_http_server_filter); } void grpc_http_filters_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.c b/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.cc similarity index 50% rename from Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.c rename to Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.cc index f785e1355..f8f478b6c 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -27,69 +29,69 @@ #include "src/core/ext/filters/http/message_compress/message_compress_filter.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/compression/algorithm_metadata.h" +#include "src/core/lib/compression/compression_internal.h" #include "src/core/lib/compression/message_compress.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/transport/static_metadata.h" -typedef enum { +namespace { +enum initial_metadata_state { // Initial metadata not yet seen. INITIAL_METADATA_UNSEEN = 0, // Initial metadata seen; compression algorithm set. HAS_COMPRESSION_ALGORITHM, // Initial metadata seen; no compression algorithm set. NO_COMPRESSION_ALGORITHM, -} initial_metadata_state; +}; -typedef struct call_data { - grpc_call_combiner *call_combiner; +struct call_data { + grpc_call_combiner* call_combiner; grpc_linked_mdelem compression_algorithm_storage; grpc_linked_mdelem stream_compression_algorithm_storage; grpc_linked_mdelem accept_encoding_storage; grpc_linked_mdelem accept_stream_encoding_storage; /** Compression algorithm we'll try to use. It may be given by incoming * metadata, or by the channel's default compression settings. */ - grpc_compression_algorithm compression_algorithm; + grpc_message_compression_algorithm message_compression_algorithm; initial_metadata_state send_initial_metadata_state; - grpc_error *cancel_error; + grpc_error* cancel_error; grpc_closure start_send_message_batch_in_call_combiner; - grpc_transport_stream_op_batch *send_message_batch; + grpc_transport_stream_op_batch* send_message_batch; grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */ - grpc_slice_buffer_stream replacement_stream; - grpc_closure *original_send_message_on_complete; + grpc_core::ManualConstructor + replacement_stream; + grpc_closure* original_send_message_on_complete; grpc_closure send_message_on_complete; grpc_closure on_send_message_next_done; -} call_data; +}; -typedef struct channel_data { +struct channel_data { /** The default, channel-level, compression algorithm */ grpc_compression_algorithm default_compression_algorithm; - /** Bitset of enabled algorithms */ + /** Bitset of enabled compression algorithms */ uint32_t enabled_algorithms_bitset; /** Supported compression algorithms */ - uint32_t supported_compression_algorithms; - - /** The default, channel-level, stream compression algorithm */ - grpc_stream_compression_algorithm default_stream_compression_algorithm; - /** Bitset of enabled stream compression algorithms */ - uint32_t enabled_stream_compression_algorithms_bitset; + uint32_t supported_message_compression_algorithms; /** Supported stream compression algorithms */ uint32_t supported_stream_compression_algorithms; -} channel_data; +}; +} // namespace -static bool skip_compression(grpc_call_element *elem, uint32_t flags, +static bool skip_compression(grpc_call_element* elem, uint32_t flags, bool has_compression_algorithm) { - call_data *calld = (call_data *)elem->call_data; - channel_data *channeld = (channel_data *)elem->channel_data; + call_data* calld = static_cast(elem->call_data); + channel_data* channeld = static_cast(elem->channel_data); if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) { return true; } if (has_compression_algorithm) { - if (calld->compression_algorithm == GRPC_COMPRESS_NONE) { + if (calld->message_compression_algorithm == GRPC_MESSAGE_COMPRESS_NONE) { return true; } return false; /* we have an actual call-specific algorithm */ @@ -99,111 +101,95 @@ static bool skip_compression(grpc_call_element *elem, uint32_t flags, } /** Filter initial metadata */ -static grpc_error *process_send_initial_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *initial_metadata, - bool *has_compression_algorithm) GRPC_MUST_USE_RESULT; -static grpc_error *process_send_initial_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) { - call_data *calld = (call_data *)elem->call_data; - channel_data *channeld = (channel_data *)elem->channel_data; +static grpc_error* process_send_initial_metadata( + grpc_call_element* elem, grpc_metadata_batch* initial_metadata, + bool* has_compression_algorithm) GRPC_MUST_USE_RESULT; +static grpc_error* process_send_initial_metadata( + grpc_call_element* elem, grpc_metadata_batch* initial_metadata, + bool* has_compression_algorithm) { + call_data* calld = static_cast(elem->call_data); + channel_data* channeld = static_cast(elem->channel_data); *has_compression_algorithm = false; + grpc_compression_algorithm compression_algorithm; grpc_stream_compression_algorithm stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE; - if (initial_metadata->idx.named.grpc_internal_stream_encoding_request != - NULL) { + if (initial_metadata->idx.named.grpc_internal_encoding_request != nullptr) { grpc_mdelem md = - initial_metadata->idx.named.grpc_internal_stream_encoding_request->md; - if (!grpc_stream_compression_algorithm_parse( - GRPC_MDVALUE(md), &stream_compression_algorithm)) { - char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + initial_metadata->idx.named.grpc_internal_encoding_request->md; + if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md), + &compression_algorithm)) { + char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, - "Invalid stream compression algorithm: '%s' (unknown). Ignoring.", - val); - gpr_free(val); - stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE; - } - if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset, - stream_compression_algorithm)) { - char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); - gpr_log( - GPR_ERROR, - "Invalid stream compression algorithm: '%s' (previously disabled). " - "Ignoring.", - val); + "Invalid compression algorithm: '%s' (unknown). Ignoring.", val); gpr_free(val); + calld->message_compression_algorithm = GRPC_MESSAGE_COMPRESS_NONE; stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE; } - *has_compression_algorithm = true; - grpc_metadata_batch_remove( - exec_ctx, initial_metadata, - initial_metadata->idx.named.grpc_internal_stream_encoding_request); - /* Disable message-wise compression */ - calld->compression_algorithm = GRPC_COMPRESS_NONE; - if (initial_metadata->idx.named.grpc_internal_encoding_request != NULL) { - grpc_metadata_batch_remove( - exec_ctx, initial_metadata, - initial_metadata->idx.named.grpc_internal_encoding_request); - } - } else if (initial_metadata->idx.named.grpc_internal_encoding_request != - NULL) { - grpc_mdelem md = - initial_metadata->idx.named.grpc_internal_encoding_request->md; - if (!grpc_compression_algorithm_parse(GRPC_MDVALUE(md), - &calld->compression_algorithm)) { - char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + if (!GPR_BITGET(channeld->enabled_algorithms_bitset, + compression_algorithm)) { + char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, - "Invalid compression algorithm: '%s' (unknown). Ignoring.", val); + "Invalid compression algorithm: '%s' (previously disabled). " + "Ignoring.", + val); gpr_free(val); - calld->compression_algorithm = GRPC_COMPRESS_NONE; + calld->message_compression_algorithm = GRPC_MESSAGE_COMPRESS_NONE; + stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE; } *has_compression_algorithm = true; grpc_metadata_batch_remove( - exec_ctx, initial_metadata, + initial_metadata, initial_metadata->idx.named.grpc_internal_encoding_request); + calld->message_compression_algorithm = + grpc_compression_algorithm_to_message_compression_algorithm( + compression_algorithm); + stream_compression_algorithm = + grpc_compression_algorithm_to_stream_compression_algorithm( + compression_algorithm); } else { /* If no algorithm was found in the metadata and we aren't * exceptionally skipping compression, fall back to the channel * default */ - if (channeld->default_stream_compression_algorithm != - GRPC_STREAM_COMPRESS_NONE) { + if (channeld->default_compression_algorithm != GRPC_COMPRESS_NONE) { + calld->message_compression_algorithm = + grpc_compression_algorithm_to_message_compression_algorithm( + channeld->default_compression_algorithm); stream_compression_algorithm = - channeld->default_stream_compression_algorithm; - calld->compression_algorithm = GRPC_COMPRESS_NONE; - } else { - calld->compression_algorithm = channeld->default_compression_algorithm; + grpc_compression_algorithm_to_stream_compression_algorithm( + channeld->default_compression_algorithm); } *has_compression_algorithm = true; } - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; /* hint compression algorithm */ if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) { error = grpc_metadata_batch_add_tail( - exec_ctx, initial_metadata, - &calld->stream_compression_algorithm_storage, + initial_metadata, &calld->stream_compression_algorithm_storage, grpc_stream_compression_encoding_mdelem(stream_compression_algorithm)); - } else if (calld->compression_algorithm != GRPC_COMPRESS_NONE) { + } else if (calld->message_compression_algorithm != + GRPC_MESSAGE_COMPRESS_NONE) { error = grpc_metadata_batch_add_tail( - exec_ctx, initial_metadata, &calld->compression_algorithm_storage, - grpc_compression_encoding_mdelem(calld->compression_algorithm)); + initial_metadata, &calld->compression_algorithm_storage, + grpc_message_compression_encoding_mdelem( + calld->message_compression_algorithm)); } if (error != GRPC_ERROR_NONE) return error; /* convey supported compression algorithms */ error = grpc_metadata_batch_add_tail( - exec_ctx, initial_metadata, &calld->accept_encoding_storage, + initial_metadata, &calld->accept_encoding_storage, GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS( - channeld->supported_compression_algorithms)); + channeld->supported_message_compression_algorithms)); if (error != GRPC_ERROR_NONE) return error; - /* Do not overwrite accept-encoding header if it already presents. */ + /* Do not overwrite accept-encoding header if it already presents (e.g. added + * by some proxy). */ if (!initial_metadata->idx.named.accept_encoding) { error = grpc_metadata_batch_add_tail( - exec_ctx, initial_metadata, &calld->accept_stream_encoding_storage, + initial_metadata, &calld->accept_stream_encoding_storage, GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS( channeld->supported_stream_compression_algorithms)); } @@ -211,96 +197,89 @@ static grpc_error *process_send_initial_metadata( return error; } -static void send_message_on_complete(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &calld->slices); - GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete, +static void send_message_on_complete(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); + grpc_slice_buffer_reset_and_unref_internal(&calld->slices); + GRPC_CLOSURE_RUN(calld->original_send_message_on_complete, GRPC_ERROR_REF(error)); } -static void send_message_batch_continue(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; +static void send_message_batch_continue(grpc_call_element* elem) { + call_data* calld = static_cast(elem->call_data); // Note: The call to grpc_call_next_op() results in yielding the // call combiner, so we need to clear calld->send_message_batch // before we do that. - grpc_transport_stream_op_batch *send_message_batch = + grpc_transport_stream_op_batch* send_message_batch = calld->send_message_batch; - calld->send_message_batch = NULL; - grpc_call_next_op(exec_ctx, elem, send_message_batch); + calld->send_message_batch = nullptr; + grpc_call_next_op(elem, send_message_batch); } -static void finish_send_message(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; +static void finish_send_message(grpc_call_element* elem) { + call_data* calld = static_cast(elem->call_data); // Compress the data if appropriate. grpc_slice_buffer tmp; grpc_slice_buffer_init(&tmp); uint32_t send_flags = - calld->send_message_batch->payload->send_message.send_message->flags; - bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm, + calld->send_message_batch->payload->send_message.send_message->flags(); + bool did_compress = grpc_msg_compress(calld->message_compression_algorithm, &calld->slices, &tmp); if (did_compress) { - if (GRPC_TRACER_ON(grpc_compression_trace)) { - const char *algo_name; + if (grpc_compression_trace.enabled()) { + const char* algo_name; const size_t before_size = calld->slices.length; const size_t after_size = tmp.length; - const float savings_ratio = 1.0f - (float)after_size / (float)before_size; - GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm, - &algo_name)); - gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR - " bytes (%.2f%% savings)", + const float savings_ratio = 1.0f - static_cast(after_size) / + static_cast(before_size); + GPR_ASSERT(grpc_message_compression_algorithm_name( + calld->message_compression_algorithm, &algo_name)); + gpr_log(GPR_INFO, + "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR + " bytes (%.2f%% savings)", algo_name, before_size, after_size, 100 * savings_ratio); } grpc_slice_buffer_swap(&calld->slices, &tmp); send_flags |= GRPC_WRITE_INTERNAL_COMPRESS; } else { - if (GRPC_TRACER_ON(grpc_compression_trace)) { - const char *algo_name; - GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm, - &algo_name)); - gpr_log(GPR_DEBUG, + if (grpc_compression_trace.enabled()) { + const char* algo_name; + GPR_ASSERT(grpc_message_compression_algorithm_name( + calld->message_compression_algorithm, &algo_name)); + gpr_log(GPR_INFO, "Algorithm '%s' enabled but decided not to compress. Input size: " "%" PRIuPTR, algo_name, calld->slices.length); } } - grpc_slice_buffer_destroy_internal(exec_ctx, &tmp); + grpc_slice_buffer_destroy_internal(&tmp); // Swap out the original byte stream with our new one and send the // batch down. - grpc_byte_stream_destroy( - exec_ctx, calld->send_message_batch->payload->send_message.send_message); - grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices, - send_flags); - calld->send_message_batch->payload->send_message.send_message = - &calld->replacement_stream.base; + calld->replacement_stream.Init(&calld->slices, send_flags); + calld->send_message_batch->payload->send_message.send_message.reset( + calld->replacement_stream.get()); calld->original_send_message_on_complete = calld->send_message_batch->on_complete; calld->send_message_batch->on_complete = &calld->send_message_on_complete; - send_message_batch_continue(exec_ctx, elem); + send_message_batch_continue(elem); } -static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_error *error) { - call_data *calld = (call_data *)arg; - if (calld->send_message_batch != NULL) { +static void fail_send_message_batch_in_call_combiner(void* arg, + grpc_error* error) { + call_data* calld = static_cast(arg); + if (calld->send_message_batch != nullptr) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error), - calld->call_combiner); - calld->send_message_batch = NULL; + calld->send_message_batch, GRPC_ERROR_REF(error), calld->call_combiner); + calld->send_message_batch = nullptr; } } // Pulls a slice from the send_message byte stream and adds it to calld->slices. -static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx, - call_data *calld) { +static grpc_error* pull_slice_from_send_message(call_data* calld) { grpc_slice incoming_slice; - grpc_error *error = grpc_byte_stream_pull( - exec_ctx, calld->send_message_batch->payload->send_message.send_message, - &incoming_slice); + grpc_error* error = + calld->send_message_batch->payload->send_message.send_message->Pull( + &incoming_slice); if (error == GRPC_ERROR_NONE) { grpc_slice_buffer_add(&calld->slices, incoming_slice); } @@ -309,110 +288,102 @@ static grpc_error *pull_slice_from_send_message(grpc_exec_ctx *exec_ctx, // Reads as many slices as possible from the send_message byte stream. // If all data has been read, invokes finish_send_message(). Otherwise, -// an async call to grpc_byte_stream_next() has been started, which will +// an async call to ByteStream::Next() has been started, which will // eventually result in calling on_send_message_next_done(). -static void continue_reading_send_message(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; - while (grpc_byte_stream_next( - exec_ctx, calld->send_message_batch->payload->send_message.send_message, - ~(size_t)0, &calld->on_send_message_next_done)) { - grpc_error *error = pull_slice_from_send_message(exec_ctx, calld); +static void continue_reading_send_message(grpc_call_element* elem) { + call_data* calld = static_cast(elem->call_data); + while (calld->send_message_batch->payload->send_message.send_message->Next( + ~static_cast(0), &calld->on_send_message_next_done)) { + grpc_error* error = pull_slice_from_send_message(calld); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + fail_send_message_batch_in_call_combiner(calld, error); GRPC_ERROR_UNREF(error); return; } - if (calld->slices.length == - calld->send_message_batch->payload->send_message.send_message->length) { - finish_send_message(exec_ctx, elem); + if (calld->slices.length == calld->send_message_batch->payload->send_message + .send_message->length()) { + finish_send_message(elem); break; } } } -// Async callback for grpc_byte_stream_next(). -static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; +// Async callback for ByteStream::Next(). +static void on_send_message_next_done(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + fail_send_message_batch_in_call_combiner(calld, error); return; } - error = pull_slice_from_send_message(exec_ctx, calld); + error = pull_slice_from_send_message(calld); if (error != GRPC_ERROR_NONE) { // Closure callback; does not take ownership of error. - fail_send_message_batch_in_call_combiner(exec_ctx, calld, error); + fail_send_message_batch_in_call_combiner(calld, error); GRPC_ERROR_UNREF(error); return; } if (calld->slices.length == - calld->send_message_batch->payload->send_message.send_message->length) { - finish_send_message(exec_ctx, elem); + calld->send_message_batch->payload->send_message.send_message->length()) { + finish_send_message(elem); } else { - continue_reading_send_message(exec_ctx, elem); + continue_reading_send_message(elem); } } -static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *unused) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; +static void start_send_message_batch(void* arg, grpc_error* unused) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); if (skip_compression( elem, - calld->send_message_batch->payload->send_message.send_message->flags, + calld->send_message_batch->payload->send_message.send_message + ->flags(), calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) { - send_message_batch_continue(exec_ctx, elem); + send_message_batch_continue(elem); } else { - continue_reading_send_message(exec_ctx, elem); + continue_reading_send_message(elem); } } static void compress_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; - GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0); + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + GPR_TIMER_SCOPE("compress_start_transport_stream_op_batch", 0); + call_data* calld = static_cast(elem->call_data); // Handle cancel_stream. if (batch->cancel_stream) { GRPC_ERROR_UNREF(calld->cancel_error); calld->cancel_error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); - if (calld->send_message_batch != NULL) { + if (calld->send_message_batch != nullptr) { if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { GRPC_CALL_COMBINER_START( - exec_ctx, calld->call_combiner, + calld->call_combiner, GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld, grpc_schedule_on_exec_ctx), GRPC_ERROR_REF(calld->cancel_error), "failing send_message op"); } else { - grpc_byte_stream_shutdown( - exec_ctx, - calld->send_message_batch->payload->send_message.send_message, + calld->send_message_batch->payload->send_message.send_message->Shutdown( GRPC_ERROR_REF(calld->cancel_error)); } } } else if (calld->cancel_error != GRPC_ERROR_NONE) { grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error), - calld->call_combiner); - goto done; + batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner); + return; } // Handle send_initial_metadata. if (batch->send_initial_metadata) { GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN); bool has_compression_algorithm; - grpc_error *error = process_send_initial_metadata( - exec_ctx, elem, - batch->payload->send_initial_metadata.send_initial_metadata, + grpc_error* error = process_send_initial_metadata( + elem, batch->payload->send_initial_metadata.send_initial_metadata, &has_compression_algorithm); if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, + grpc_transport_stream_op_batch_finish_with_failure(batch, error, calld->call_combiner); - goto done; + return; } calld->send_initial_metadata_state = has_compression_algorithm ? HAS_COMPRESSION_ALGORITHM @@ -422,16 +393,16 @@ static void compress_start_transport_stream_op_batch( // for this, since we can't send two batches down while holding the // call combiner, since the connected_channel filter (at the bottom of // the call stack) will release the call combiner for each batch it sees. - if (calld->send_message_batch != NULL) { + if (calld->send_message_batch != nullptr) { GRPC_CALL_COMBINER_START( - exec_ctx, calld->call_combiner, + calld->call_combiner, &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE, "starting send_message after send_initial_metadata"); } } // Handle send_message. if (batch->send_message) { - GPR_ASSERT(calld->send_message_batch == NULL); + GPR_ASSERT(calld->send_message_batch == nullptr); calld->send_message_batch = batch; // If we have not yet seen send_initial_metadata, then we have to // wait. We save the batch in calld and then drop the call @@ -439,24 +410,21 @@ static void compress_start_transport_stream_op_batch( // send_initial_metadata. if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) { GRPC_CALL_COMBINER_STOP( - exec_ctx, calld->call_combiner, + calld->call_combiner, "send_message batch pending send_initial_metadata"); - goto done; + return; } - start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE); + start_send_message_batch(elem, GRPC_ERROR_NONE); } else { // Pass control down the stack. - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); } -done: - GPR_TIMER_END("compress_start_transport_stream_op_batch", 0); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); calld->call_combiner = args->call_combiner; calld->cancel_error = GRPC_ERROR_NONE; grpc_slice_buffer_init(&calld->slices); @@ -470,26 +438,24 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *calld = (call_data *)elem->call_data; - grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices); +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + call_data* calld = static_cast(elem->call_data); + grpc_slice_buffer_destroy_internal(&calld->slices); GRPC_ERROR_UNREF(calld->cancel_error); } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *channeld = (channel_data *)elem->channel_data; +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* channeld = static_cast(elem->channel_data); - /* Configuration for message compression */ channeld->enabled_algorithms_bitset = grpc_channel_args_compression_algorithm_get_states(args->channel_args); - channeld->default_compression_algorithm = grpc_channel_args_get_compression_algorithm(args->channel_args); + /* Make sure the default isn't disabled. */ if (!GPR_BITGET(channeld->enabled_algorithms_bitset, channeld->default_compression_algorithm)) { @@ -499,39 +465,25 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, channeld->default_compression_algorithm = GRPC_COMPRESS_NONE; } - channeld->supported_compression_algorithms = + uint32_t supported_compression_algorithms = (((1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1) & channeld->enabled_algorithms_bitset) | 1u; - /* Configuration for stream compression */ - channeld->enabled_stream_compression_algorithms_bitset = - grpc_channel_args_stream_compression_algorithm_get_states( - args->channel_args); - - channeld->default_stream_compression_algorithm = - grpc_channel_args_get_stream_compression_algorithm(args->channel_args); - - if (!GPR_BITGET(channeld->enabled_stream_compression_algorithms_bitset, - channeld->default_stream_compression_algorithm)) { - gpr_log(GPR_DEBUG, - "stream compression algorithm %d not enabled: switching to none", - channeld->default_stream_compression_algorithm); - channeld->default_stream_compression_algorithm = GRPC_STREAM_COMPRESS_NONE; - } + channeld->supported_message_compression_algorithms = + grpc_compression_bitset_to_message_bitset( + supported_compression_algorithms); channeld->supported_stream_compression_algorithms = - (((1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1) & - channeld->enabled_stream_compression_algorithms_bitset) | - 1u; + grpc_compression_bitset_to_stream_bitset( + supported_compression_algorithms); GPR_ASSERT(!args->is_last); return GRPC_ERROR_NONE; } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} const grpc_channel_filter grpc_message_compress_filter = { compress_start_transport_stream_op_batch, diff --git a/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.h b/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.h index c121a391e..e163e3cf9 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/http/message_compress/message_compress_filter.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H #define GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H +#include + #include #include "src/core/lib/channel/channel_stack.h" @@ -48,4 +50,4 @@ extern const grpc_channel_filter grpc_message_compress_filter; #endif /* GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.c b/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.cc similarity index 63% rename from Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.c rename to Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.cc index 03958136b..c20201587 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.cc @@ -16,11 +16,14 @@ * */ +#include + #include "src/core/ext/filters/http/server/http_server_filter.h" #include #include #include +#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/b64.h" #include "src/core/lib/slice/percent_encoding.h" @@ -31,8 +34,9 @@ #define EXPECTED_CONTENT_TYPE "application/grpc" #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1 -typedef struct call_data { - grpc_call_combiner *call_combiner; +namespace { +struct call_data { + grpc_call_combiner* call_combiner; grpc_linked_mdelem status; grpc_linked_mdelem content_type; @@ -42,17 +46,16 @@ typedef struct call_data { /* flag to ensure payload_bin is delivered only once */ bool payload_bin_delivered; - grpc_metadata_batch *recv_initial_metadata; - uint32_t *recv_initial_metadata_flags; + grpc_metadata_batch* recv_initial_metadata; + uint32_t* recv_initial_metadata_flags; /** Closure to call when finished with the hs_on_recv hook */ - grpc_closure *on_done_recv; + grpc_closure* on_done_recv; /** Closure to call when we retrieve read message from the path URI */ - grpc_closure *recv_message_ready; - grpc_closure *on_complete; - grpc_byte_stream **pp_recv_message; - grpc_slice_buffer read_slice_buffer; - grpc_slice_buffer_stream read_stream; + grpc_closure* recv_message_ready; + grpc_closure* on_complete; + grpc_core::OrphanablePtr* pp_recv_message; + grpc_core::ManualConstructor read_stream; /** Receive closures are chained: we inject this closure as the on_done_recv up-call on transport_op, and remember to call our on_done_recv member @@ -60,30 +63,31 @@ typedef struct call_data { grpc_closure hs_on_recv; grpc_closure hs_on_complete; grpc_closure hs_recv_message_ready; -} call_data; +}; -typedef struct channel_data { uint8_t unused; } channel_data; +struct channel_data { + uint8_t unused; +}; +} // namespace -static grpc_error *server_filter_outgoing_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_metadata_batch *b) { - if (b->idx.named.grpc_message != NULL) { +static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem, + grpc_metadata_batch* b) { + if (b->idx.named.grpc_message != nullptr) { grpc_slice pct_encoded_msg = grpc_percent_encode_slice( GRPC_MDVALUE(b->idx.named.grpc_message->md), grpc_compatible_percent_encoding_unreserved_bytes); if (grpc_slice_is_equivalent(pct_encoded_msg, GRPC_MDVALUE(b->idx.named.grpc_message->md))) { - grpc_slice_unref_internal(exec_ctx, pct_encoded_msg); + grpc_slice_unref_internal(pct_encoded_msg); } else { - grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message, - pct_encoded_msg); + grpc_metadata_batch_set_value(b->idx.named.grpc_message, pct_encoded_msg); } } return GRPC_ERROR_NONE; } -static void add_error(const char *error_name, grpc_error **cumulative, - grpc_error *new_err) { +static void add_error(const char* error_name, grpc_error** cumulative, + grpc_error* new_err) { if (new_err == GRPC_ERROR_NONE) return; if (*cumulative == GRPC_ERROR_NONE) { *cumulative = GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_name); @@ -91,14 +95,13 @@ static void add_error(const char *error_name, grpc_error **cumulative, *cumulative = grpc_error_add_child(*cumulative, new_err); } -static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_metadata_batch *b) { - call_data *calld = (call_data *)elem->call_data; - grpc_error *error = GRPC_ERROR_NONE; - static const char *error_name = "Failed processing incoming headers"; +static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem, + grpc_metadata_batch* b) { + call_data* calld = static_cast(elem->call_data); + grpc_error* error = GRPC_ERROR_NONE; + static const char* error_name = "Failed processing incoming headers"; - if (b->idx.named.method != NULL) { + if (b->idx.named.method != nullptr) { if (grpc_mdelem_eq(b->idx.named.method->md, GRPC_MDELEM_METHOD_POST)) { *calld->recv_initial_metadata_flags &= ~(GRPC_INITIAL_METADATA_CACHEABLE_REQUEST | @@ -121,7 +124,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"), b->idx.named.method->md)); } - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.method); + grpc_metadata_batch_remove(b, b->idx.named.method); } else { add_error( error_name, &error, @@ -130,14 +133,14 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":method"))); } - if (b->idx.named.te != NULL) { + if (b->idx.named.te != nullptr) { if (!grpc_mdelem_eq(b->idx.named.te->md, GRPC_MDELEM_TE_TRAILERS)) { add_error(error_name, &error, grpc_attach_md_to_error( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"), b->idx.named.te->md)); } - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.te); + grpc_metadata_batch_remove(b, b->idx.named.te); } else { add_error(error_name, &error, grpc_error_set_str( @@ -145,7 +148,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_STR_KEY, grpc_slice_from_static_string("te"))); } - if (b->idx.named.scheme != NULL) { + if (b->idx.named.scheme != nullptr) { if (!grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_HTTP) && !grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_HTTPS) && !grpc_mdelem_eq(b->idx.named.scheme->md, GRPC_MDELEM_SCHEME_GRPC)) { @@ -154,7 +157,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"), b->idx.named.scheme->md)); } - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.scheme); + grpc_metadata_batch_remove(b, b->idx.named.scheme); } else { add_error( error_name, &error, @@ -163,7 +166,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, GRPC_ERROR_STR_KEY, grpc_slice_from_static_string(":scheme"))); } - if (b->idx.named.content_type != NULL) { + if (b->idx.named.content_type != nullptr) { if (!grpc_mdelem_eq(b->idx.named.content_type->md, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)) { if (grpc_slice_buf_start_eq(GRPC_MDVALUE(b->idx.named.content_type->md), @@ -183,16 +186,16 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, } else { /* TODO(klempner): We're currently allowing this, but we shouldn't see it without a proxy so log for now. */ - char *val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md), + char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.content_type->md), GPR_DUMP_ASCII); gpr_log(GPR_INFO, "Unexpected content-type '%s'", val); gpr_free(val); } } - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type); + grpc_metadata_batch_remove(b, b->idx.named.content_type); } - if (b->idx.named.path == NULL) { + if (b->idx.named.path == nullptr) { add_error(error_name, &error, grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing header"), @@ -203,7 +206,7 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, * query parameter which is base64 encoded request payload. */ const char k_query_separator = '?'; grpc_slice path_slice = GRPC_MDVALUE(b->idx.named.path->md); - uint8_t *path_ptr = (uint8_t *)GRPC_SLICE_START_PTR(path_slice); + uint8_t* path_ptr = GRPC_SLICE_START_PTR(path_slice); size_t path_length = GRPC_SLICE_LENGTH(path_slice); /* offset of the character '?' */ size_t offset = 0; @@ -216,41 +219,43 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, /* substitute path metadata with just the path (not query) */ grpc_mdelem mdelem_path_without_query = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_PATH, grpc_slice_sub(path_slice, 0, offset)); + GRPC_MDSTR_PATH, grpc_slice_sub(path_slice, 0, offset)); - grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path, + grpc_metadata_batch_substitute(b, b->idx.named.path, mdelem_path_without_query); /* decode payload from query and add to the slice buffer to be returned */ const int k_url_safe = 1; + grpc_slice_buffer read_slice_buffer; + grpc_slice_buffer_init(&read_slice_buffer); grpc_slice_buffer_add( - &calld->read_slice_buffer, + &read_slice_buffer, grpc_base64_decode_with_len( - exec_ctx, (const char *)GRPC_SLICE_START_PTR(query_slice), + reinterpret_cast GRPC_SLICE_START_PTR(query_slice), GRPC_SLICE_LENGTH(query_slice), k_url_safe)); - grpc_slice_buffer_stream_init(&calld->read_stream, - &calld->read_slice_buffer, 0); + calld->read_stream.Init(&read_slice_buffer, 0); + grpc_slice_buffer_destroy_internal(&read_slice_buffer); calld->seen_path_with_query = true; - grpc_slice_unref_internal(exec_ctx, query_slice); + grpc_slice_unref_internal(query_slice); } else { gpr_log(GPR_ERROR, "GET request without QUERY"); } } - if (b->idx.named.host != NULL && b->idx.named.authority == NULL) { - grpc_linked_mdelem *el = b->idx.named.host; + if (b->idx.named.host != nullptr && b->idx.named.authority == nullptr) { + grpc_linked_mdelem* el = b->idx.named.host; grpc_mdelem md = GRPC_MDELEM_REF(el->md); - grpc_metadata_batch_remove(exec_ctx, b, el); - add_error( - error_name, &error, - grpc_metadata_batch_add_head( - exec_ctx, b, el, grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_ref_internal(GRPC_MDVALUE(md))))); - GRPC_MDELEM_UNREF(exec_ctx, md); + grpc_metadata_batch_remove(b, el); + add_error(error_name, &error, + grpc_metadata_batch_add_head( + b, el, + grpc_mdelem_from_slices( + GRPC_MDSTR_AUTHORITY, + grpc_slice_ref_internal(GRPC_MDVALUE(md))))); + GRPC_MDELEM_UNREF(md); } - if (b->idx.named.authority == NULL) { + if (b->idx.named.authority == nullptr) { add_error( error_name, &error, grpc_error_set_str( @@ -261,85 +266,79 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx, return error; } -static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *err) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void hs_on_recv(void* user_data, grpc_error* err) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (err == GRPC_ERROR_NONE) { - err = server_filter_incoming_metadata(exec_ctx, elem, - calld->recv_initial_metadata); + err = server_filter_incoming_metadata(elem, calld->recv_initial_metadata); } else { GRPC_ERROR_REF(err); } - GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err); + GRPC_CLOSURE_RUN(calld->on_done_recv, err); } -static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *err) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void hs_on_complete(void* user_data, grpc_error* err) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); /* Call recv_message_ready if we got the payload via the path field */ - if (calld->seen_path_with_query && calld->recv_message_ready != NULL) { - *calld->pp_recv_message = calld->payload_bin_delivered - ? NULL - : (grpc_byte_stream *)&calld->read_stream; + if (calld->seen_path_with_query && calld->recv_message_ready != nullptr) { + calld->pp_recv_message->reset( + calld->payload_bin_delivered ? nullptr + : reinterpret_cast( + calld->read_stream.get())); // Re-enter call combiner for recv_message_ready, since the surface // code will release the call combiner for each callback it receives. - GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, - calld->recv_message_ready, GRPC_ERROR_REF(err), + GRPC_CALL_COMBINER_START(calld->call_combiner, calld->recv_message_ready, + GRPC_ERROR_REF(err), "resuming recv_message_ready from on_complete"); - calld->recv_message_ready = NULL; + calld->recv_message_ready = nullptr; calld->payload_bin_delivered = true; } - GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err)); + GRPC_CLOSURE_RUN(calld->on_complete, GRPC_ERROR_REF(err)); } -static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *err) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void hs_recv_message_ready(void* user_data, grpc_error* err) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (calld->seen_path_with_query) { // Do nothing. This is probably a GET request, and payload will be // returned in hs_on_complete callback. // Note that we release the call combiner here, so that other // callbacks can run. - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pausing recv_message_ready until on_complete"); } else { - GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err)); + GRPC_CLOSURE_RUN(calld->recv_message_ready, GRPC_ERROR_REF(err)); } } -static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { +static grpc_error* hs_mutate_op(grpc_call_element* elem, + grpc_transport_stream_op_batch* op) { /* grab pointers to our data from the call element */ - call_data *calld = (call_data *)elem->call_data; + call_data* calld = static_cast(elem->call_data); if (op->send_initial_metadata) { - grpc_error *error = GRPC_ERROR_NONE; - static const char *error_name = "Failed sending initial metadata"; - add_error( - error_name, &error, - grpc_metadata_batch_add_head( - exec_ctx, op->payload->send_initial_metadata.send_initial_metadata, - &calld->status, GRPC_MDELEM_STATUS_200)); + grpc_error* error = GRPC_ERROR_NONE; + static const char* error_name = "Failed sending initial metadata"; + add_error(error_name, &error, + grpc_metadata_batch_add_head( + op->payload->send_initial_metadata.send_initial_metadata, + &calld->status, GRPC_MDELEM_STATUS_200)); + add_error(error_name, &error, + grpc_metadata_batch_add_tail( + op->payload->send_initial_metadata.send_initial_metadata, + &calld->content_type, + GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)); add_error( error_name, &error, - grpc_metadata_batch_add_tail( - exec_ctx, op->payload->send_initial_metadata.send_initial_metadata, - &calld->content_type, - GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC)); - add_error(error_name, &error, - server_filter_outgoing_metadata( - exec_ctx, elem, - op->payload->send_initial_metadata.send_initial_metadata)); + server_filter_outgoing_metadata( + elem, op->payload->send_initial_metadata.send_initial_metadata)); if (error != GRPC_ERROR_NONE) return error; } if (op->recv_initial_metadata) { /* substitute our callback for the higher callback */ - GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags != NULL); + GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags != nullptr); calld->recv_initial_metadata = op->payload->recv_initial_metadata.recv_initial_metadata; calld->recv_initial_metadata_flags = @@ -364,9 +363,8 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, } if (op->send_trailing_metadata) { - grpc_error *error = server_filter_outgoing_metadata( - exec_ctx, elem, - op->payload->send_trailing_metadata.send_trailing_metadata); + grpc_error* error = server_filter_outgoing_metadata( + elem, op->payload->send_trailing_metadata.send_trailing_metadata); if (error != GRPC_ERROR_NONE) return error; } @@ -374,26 +372,23 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx, } static void hs_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = (call_data *)elem->call_data; - GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0); - grpc_error *error = hs_mutate_op(exec_ctx, elem, op); + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + GPR_TIMER_SCOPE("hs_start_transport_stream_op_batch", 0); + call_data* calld = static_cast(elem->call_data); + grpc_error* error = hs_mutate_op(elem, op); if (error != GRPC_ERROR_NONE) { - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error, + grpc_transport_stream_op_batch_finish_with_failure(op, error, calld->call_combiner); } else { - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } - GPR_TIMER_END("hs_start_transport_stream_op_batch", 0); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { /* grab pointers to our data from the call element */ - call_data *calld = (call_data *)elem->call_data; + call_data* calld = static_cast(elem->call_data); /* initialize members */ calld->call_combiner = args->call_combiner; GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem, @@ -402,29 +397,28 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&calld->hs_recv_message_ready, hs_recv_message_ready, elem, grpc_schedule_on_exec_ctx); - grpc_slice_buffer_init(&calld->read_slice_buffer); return GRPC_ERROR_NONE; } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *calld = (call_data *)elem->call_data; - grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer); +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + call_data* calld = static_cast(elem->call_data); + if (calld->seen_path_with_query && !calld->payload_bin_delivered) { + calld->read_stream->Orphan(); + } } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); return GRPC_ERROR_NONE; } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} const grpc_channel_filter grpc_http_server_filter = { hs_start_transport_stream_op_batch, diff --git a/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.h b/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.h index c0f678a32..4eb130b1f 100644 --- a/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/http/server/http_server_filter.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_HTTP_SERVER_HTTP_SERVER_FILTER_H #define GRPC_CORE_EXT_FILTERS_HTTP_SERVER_HTTP_SERVER_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" /* Processes metadata on the client side for HTTP2 transports */ diff --git a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.c b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc similarity index 73% rename from Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.c rename to Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc index ca8a3b2a1..0d349e2a8 100644 --- a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -31,7 +33,8 @@ #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/transport/static_metadata.h" -typedef struct call_data { +namespace { +struct call_data { intptr_t id; /**< an id unique to the call */ bool have_trailing_md_string; grpc_slice trailing_md_string; @@ -43,24 +46,24 @@ typedef struct call_data { /* stores the recv_initial_metadata op's ready closure, which we wrap with our * own (on_initial_md_ready) in order to capture the incoming initial metadata * */ - grpc_closure *ops_recv_initial_metadata_ready; + grpc_closure* ops_recv_initial_metadata_ready; /* to get notified of the availability of the incoming initial metadata. */ grpc_closure on_initial_md_ready; - grpc_metadata_batch *recv_initial_metadata; -} call_data; + grpc_metadata_batch* recv_initial_metadata; +}; -typedef struct channel_data { +struct channel_data { intptr_t id; /**< an id unique to the channel */ -} channel_data; +}; +} // namespace -static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *err) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; +static void on_initial_md_ready(void* user_data, grpc_error* err) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (err == GRPC_ERROR_NONE) { - if (calld->recv_initial_metadata->idx.named.path != NULL) { + if (calld->recv_initial_metadata->idx.named.path != nullptr) { calld->service_method = grpc_slice_ref_internal( GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md)); calld->have_service_method = true; @@ -68,27 +71,26 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data, err = grpc_error_add_child( err, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing :path header")); } - if (calld->recv_initial_metadata->idx.named.lb_token != NULL) { + if (calld->recv_initial_metadata->idx.named.lb_token != nullptr) { calld->initial_md_string = grpc_slice_ref_internal( GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.lb_token->md)); calld->have_initial_md_string = true; grpc_metadata_batch_remove( - exec_ctx, calld->recv_initial_metadata, + calld->recv_initial_metadata, calld->recv_initial_metadata->idx.named.lb_token); } } else { GRPC_ERROR_REF(err); } calld->ops_recv_initial_metadata_ready->cb( - exec_ctx, calld->ops_recv_initial_metadata_ready->cb_arg, err); + calld->ops_recv_initial_metadata_ready->cb_arg, err); GRPC_ERROR_UNREF(err); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); calld->id = (intptr_t)args->call_stack; GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem, grpc_schedule_on_exec_ctx); @@ -108,10 +110,10 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *calld = (call_data *)elem->call_data; +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + call_data* calld = static_cast(elem->call_data); /* TODO(dgq): do something with the data channel_data *chand = elem->channel_data; @@ -125,23 +127,22 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, */ if (calld->have_initial_md_string) { - grpc_slice_unref_internal(exec_ctx, calld->initial_md_string); + grpc_slice_unref_internal(calld->initial_md_string); } if (calld->have_trailing_md_string) { - grpc_slice_unref_internal(exec_ctx, calld->trailing_md_string); + grpc_slice_unref_internal(calld->trailing_md_string); } if (calld->have_service_method) { - grpc_slice_unref_internal(exec_ctx, calld->service_method); + grpc_slice_unref_internal(calld->service_method); } } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); - channel_data *chand = (channel_data *)elem->channel_data; + channel_data* chand = static_cast(elem->channel_data); chand->id = (intptr_t)args->channel_stack; /* TODO(dgq): do something with the data @@ -158,8 +159,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element* elem) { /* TODO(dgq): do something with the data channel_data *chand = elem->channel_data; grpc_load_reporting_call_data lr_call_data = { @@ -173,11 +173,10 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, */ } -static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx, - void *user_data, +static grpc_filtered_mdelem lr_trailing_md_filter(void* user_data, grpc_mdelem md) { - grpc_call_element *elem = (grpc_call_element *)user_data; - call_data *calld = (call_data *)elem->call_data; + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) { calld->trailing_md_string = GRPC_MDVALUE(md); return GRPC_FILTERED_REMOVE(); @@ -186,10 +185,9 @@ static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx, } static void lr_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0); - call_data *calld = (call_data *)elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + GPR_TIMER_SCOPE("lr_start_transport_stream_op_batch", 0); + call_data* calld = static_cast(elem->call_data); if (op->recv_initial_metadata) { /* substitute our callback for the higher callback */ @@ -203,14 +201,11 @@ static void lr_start_transport_stream_op_batch( GRPC_LOG_IF_ERROR( "grpc_metadata_batch_filter", grpc_metadata_batch_filter( - exec_ctx, op->payload->send_trailing_metadata.send_trailing_metadata, lr_trailing_md_filter, elem, "LR trailing metadata filtering error")); } - grpc_call_next_op(exec_ctx, elem, op); - - GPR_TIMER_END("lr_start_transport_stream_op_batch", 0); + grpc_call_next_op(elem, op); } const grpc_channel_filter grpc_server_load_reporting_filter = { diff --git a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.h b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.h index 9527868c9..b459a8ec5 100644 --- a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_filter.h @@ -19,10 +19,12 @@ #ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H #define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H +#include + #include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h" #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_server_load_reporting_filter; #endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c rename to Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc index 2486ead42..667c0c56e 100644 --- a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c +++ b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc @@ -32,30 +32,31 @@ #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel_init.h" -static bool is_load_reporting_enabled(const grpc_channel_args *a) { +static bool is_load_reporting_enabled(const grpc_channel_args* a) { return grpc_channel_arg_get_bool( grpc_channel_args_find(a, GRPC_ARG_ENABLE_LOAD_REPORTING), false); } static bool maybe_add_server_load_reporting_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { - const grpc_channel_args *args = + grpc_channel_stack_builder* builder, void* arg) { + const grpc_channel_args* args = grpc_channel_stack_builder_get_channel_arguments(builder); - const grpc_channel_filter *filter = (const grpc_channel_filter *)arg; - grpc_channel_stack_builder_iterator *it = + const grpc_channel_filter* filter = + static_cast(arg); + grpc_channel_stack_builder_iterator* it = grpc_channel_stack_builder_iterator_find(builder, filter->name); const bool already_has_load_reporting_filter = !grpc_channel_stack_builder_iterator_is_end(it); grpc_channel_stack_builder_iterator_destroy(it); if (is_load_reporting_enabled(args) && !already_has_load_reporting_filter) { - return grpc_channel_stack_builder_prepend_filter(builder, filter, NULL, - NULL); + return grpc_channel_stack_builder_prepend_filter(builder, filter, nullptr, + nullptr); } return true; } grpc_arg grpc_load_reporting_enable_arg() { - return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING, + return grpc_channel_arg_integer_create((char*)GRPC_ARG_ENABLE_LOAD_REPORTING, 1); } @@ -64,7 +65,7 @@ grpc_arg grpc_load_reporting_enable_arg() { void grpc_server_load_reporting_plugin_init(void) { grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, maybe_add_server_load_reporting_filter, - (void *)&grpc_server_load_reporting_filter); + (void*)&grpc_server_load_reporting_filter); } void grpc_server_load_reporting_plugin_shutdown() {} diff --git a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h index 65a6d0900..c20aaa744 100644 --- a/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +++ b/Sources/CgRPC/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H #define GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H +#include + #include #include "src/core/lib/channel/channel_stack.h" @@ -45,15 +47,15 @@ typedef struct grpc_load_reporting_call_data { /** Only valid when \a source is \a GRPC_LR_POINT_CALL_DESTRUCTION, that is, * once the call has completed */ - const grpc_call_final_info *final_info; + const grpc_call_final_info* final_info; - const char *initial_md_string; /**< value string for LR's initial md key */ - const char *trailing_md_string; /**< value string for LR's trailing md key */ - const char *method_name; /**< Corresponds to :path header */ + const char* initial_md_string; /**< value string for LR's initial md key */ + const char* trailing_md_string; /**< value string for LR's trailing md key */ + const char* method_name; /**< Corresponds to :path header */ } grpc_load_reporting_call_data; /** Return a \a grpc_arg enabling load reporting */ grpc_arg grpc_load_reporting_enable_arg(); #endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.c b/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.cc similarity index 50% rename from Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.c rename to Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.cc index 0ac803ed4..1fe8288bd 100644 --- a/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/filters/max_age/max_age_filter.h" #include @@ -33,11 +35,18 @@ #define MAX_CONNECTION_AGE_JITTER 0.1 #define MAX_CONNECTION_AGE_INTEGER_OPTIONS \ - (grpc_integer_options) { DEFAULT_MAX_CONNECTION_AGE_MS, 1, INT_MAX } + { DEFAULT_MAX_CONNECTION_AGE_MS, 1, INT_MAX } #define MAX_CONNECTION_IDLE_INTEGER_OPTIONS \ - (grpc_integer_options) { DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX } + { DEFAULT_MAX_CONNECTION_IDLE_MS, 1, INT_MAX } + +/* States for idle_state in channel_data */ +#define MAX_IDLE_STATE_INIT ((gpr_atm)0) +#define MAX_IDLE_STATE_SEEN_EXIT_IDLE ((gpr_atm)1) +#define MAX_IDLE_STATE_SEEN_ENTER_IDLE ((gpr_atm)2) +#define MAX_IDLE_STATE_TIMER_SET ((gpr_atm)3) -typedef struct channel_data { +namespace { +struct channel_data { /* We take a reference to the channel stack for the timer callback */ grpc_channel_stack* channel_stack; /* Guards access to max_age_timer, max_age_timer_pending, max_age_grace_timer @@ -56,14 +65,14 @@ typedef struct channel_data { max_connection_idle */ grpc_timer max_idle_timer; /* Allowed max time a channel may have no outstanding rpcs */ - gpr_timespec max_connection_idle; + grpc_millis max_connection_idle; /* Allowed max time a channel may exist */ - gpr_timespec max_connection_age; + grpc_millis max_connection_age; /* Allowed grace period after the channel reaches its max age */ - gpr_timespec max_connection_age_grace; + grpc_millis max_connection_age_grace; /* Closure to run when the channel's idle duration reaches max_connection_idle and should be closed gracefully */ - grpc_closure close_max_idle_channel; + grpc_closure max_idle_timer_cb; /* Closure to run when the channel reaches its max age and should be closed gracefully */ grpc_closure close_max_age_channel; @@ -84,99 +93,222 @@ typedef struct channel_data { grpc_connectivity_state connectivity_state; /* Number of active calls */ gpr_atm call_count; -} channel_data; + /* TODO(zyc): C++lize this state machine */ + /* 'idle_state' holds the states of max_idle_timer and channel idleness. + It can contain one of the following values: + +--------------------------------+----------------+---------+ + | idle_state | max_idle_timer | channel | + +--------------------------------+----------------+---------+ + | MAX_IDLE_STATE_INIT | unset | busy | + | MAX_IDLE_STATE_TIMER_SET | set, valid | idle | + | MAX_IDLE_STATE_SEEN_EXIT_IDLE | set, invalid | busy | + | MAX_IDLE_STATE_SEEN_ENTER_IDLE | set, invalid | idle | + +--------------------------------+----------------+---------+ + + MAX_IDLE_STATE_INIT: The initial and final state of 'idle_state'. The + channel has 1 or 1+ active calls, and the the timer is not set. Note that + we may put a virtual call to hold this state at channel initialization or + shutdown, so that the channel won't enter other states. + + MAX_IDLE_STATE_TIMER_SET: The state after the timer is set and no calls + have arrived after the timer is set. The channel must have 0 active call in + this state. If the timer is fired in this state, we will close the channel + due to idleness. + + MAX_IDLE_STATE_SEEN_EXIT_IDLE: The state after the timer is set and at + least one call has arrived after the timer is set. The channel must have 1 + or 1+ active calls in this state. If the timer is fired in this state, we + won't reschudle it. + + MAX_IDLE_STATE_SEEN_ENTER_IDLE: The state after the timer is set and the at + least one call has arrived after the timer is set, BUT the channel + currently has 1 or 1+ active calls. If the timer is fired in this state, we + will reschudle it. + + max_idle_timer will not be cancelled (unless the channel is shutting down). + If the timer callback is called when the max_idle_timer is valid (i.e. + idle_state is MAX_IDLE_STATE_TIMER_SET), the channel will be closed due to + idleness, otherwise the channel won't be changed. + + State transitions: + MAX_IDLE_STATE_INIT <-------3------ MAX_IDLE_STATE_SEEN_EXIT_IDLE + ^ | ^ ^ | + | | | | | + 1 2 +-----------4------------+ 6 7 + | | | | | + | v | | v + MAX_IDLE_STATE_TIMER_SET <----5------ MAX_IDLE_STATE_SEEN_ENTER_IDLE + + For 1, 3, 5 : See max_idle_timer_cb() function + For 2, 7 : See decrease_call_count() function + For 4, 6 : See increase_call_count() function */ + gpr_atm idle_state; + /* Time when the channel finished its last outstanding call, in grpc_millis */ + gpr_atm last_enter_idle_time_millis; +}; +} // namespace /* Increase the nubmer of active calls. Before the increasement, if there are no calls, the max_idle_timer should be cancelled. */ -static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) { +static void increase_call_count(channel_data* chand) { + /* Exit idle */ if (gpr_atm_full_fetch_add(&chand->call_count, 1) == 0) { - grpc_timer_cancel(exec_ctx, &chand->max_idle_timer); + while (true) { + gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state); + switch (idle_state) { + case MAX_IDLE_STATE_TIMER_SET: + /* max_idle_timer_cb may have already set idle_state to + MAX_IDLE_STATE_INIT, in this case, we don't need to set it to + MAX_IDLE_STATE_SEEN_EXIT_IDLE */ + gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_TIMER_SET, + MAX_IDLE_STATE_SEEN_EXIT_IDLE); + return; + case MAX_IDLE_STATE_SEEN_ENTER_IDLE: + gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE); + return; + default: + /* try again */ + break; + } + } } } /* Decrease the nubmer of active calls. After the decrement, if there are no calls, the max_idle_timer should be started. */ -static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) { +static void decrease_call_count(channel_data* chand) { + /* Enter idle */ if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) { - GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer"); - grpc_timer_init( - exec_ctx, &chand->max_idle_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_idle), - &chand->close_max_idle_channel, gpr_now(GPR_CLOCK_MONOTONIC)); + gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, + (gpr_atm)grpc_core::ExecCtx::Get()->Now()); + while (true) { + gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state); + switch (idle_state) { + case MAX_IDLE_STATE_INIT: + GRPC_CHANNEL_STACK_REF(chand->channel_stack, + "max_age max_idle_timer"); + grpc_timer_init( + &chand->max_idle_timer, + grpc_core::ExecCtx::Get()->Now() + chand->max_connection_idle, + &chand->max_idle_timer_cb); + gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_TIMER_SET); + return; + case MAX_IDLE_STATE_SEEN_EXIT_IDLE: + if (gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE, + MAX_IDLE_STATE_SEEN_ENTER_IDLE)) { + return; + } + break; + default: + /* try again */ + break; + } + } } } -static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void start_max_idle_timer_after_init(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); /* Decrease call_count. If there are no active calls at this time, max_idle_timer will start here. If the number of active calls is not 0, max_idle_timer will start after all the active calls end. */ - decrease_call_count(exec_ctx, chand); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, + decrease_call_count(chand); + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age start_max_idle_timer_after_init"); } -static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void start_max_age_timer_after_init(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer"); - grpc_timer_init( - exec_ctx, &chand->max_age_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), chand->max_connection_age), - &chand->close_max_age_channel, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&chand->max_age_timer, + grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age, + &chand->close_max_age_channel); gpr_mu_unlock(&chand->max_age_timer_mu); - grpc_transport_op* op = grpc_make_transport_op(NULL); - op->on_connectivity_state_change = &chand->channel_connectivity_changed, + grpc_transport_op* op = grpc_make_transport_op(nullptr); + op->on_connectivity_state_change = &chand->channel_connectivity_changed; op->connectivity_state = &chand->connectivity_state; - grpc_channel_next_op(exec_ctx, - grpc_channel_stack_element(chand->channel_stack, 0), op); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, + grpc_channel_next_op(grpc_channel_stack_element(chand->channel_stack, 0), op); + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age start_max_age_timer_after_init"); } -static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx, - void* arg, +static void start_max_age_grace_timer_after_goaway_op(void* arg, grpc_error* error) { - channel_data* chand = (channel_data*)arg; + channel_data* chand = static_cast(arg); gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = true; GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer"); - grpc_timer_init(exec_ctx, &chand->max_age_grace_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - chand->max_connection_age_grace), - &chand->force_close_max_age_channel, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init( + &chand->max_age_grace_timer, + chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE + ? GRPC_MILLIS_INF_FUTURE + : grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age_grace, + &chand->force_close_max_age_channel); gpr_mu_unlock(&chand->max_age_timer_mu); - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age start_max_age_grace_timer_after_goaway_op"); } -static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void close_max_idle_channel(channel_data* chand) { + /* Prevent the max idle timer from being set again */ + gpr_atm_no_barrier_fetch_add(&chand->call_count, 1); + grpc_transport_op* op = grpc_make_transport_op(nullptr); + op->goaway_error = + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_idle"), + GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR); + grpc_channel_element* elem = + grpc_channel_stack_element(chand->channel_stack, 0); + elem->filter->start_transport_op(elem, op); +} + +static void max_idle_timer_cb(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); if (error == GRPC_ERROR_NONE) { - /* Prevent the max idle timer from being set again */ - gpr_atm_no_barrier_fetch_add(&chand->call_count, 1); - grpc_transport_op* op = grpc_make_transport_op(NULL); - op->goaway_error = - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("max_idle"), - GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR); - grpc_channel_element* elem = - grpc_channel_stack_element(chand->channel_stack, 0); - elem->filter->start_transport_op(exec_ctx, elem, op); - } else if (error != GRPC_ERROR_CANCELLED) { - GRPC_LOG_IF_ERROR("close_max_idle_channel", error); + bool try_again = true; + while (try_again) { + gpr_atm idle_state = gpr_atm_acq_load(&chand->idle_state); + switch (idle_state) { + case MAX_IDLE_STATE_TIMER_SET: + close_max_idle_channel(chand); + /* This MAX_IDLE_STATE_INIT is a final state, we don't have to check + * if idle_state has been changed */ + gpr_atm_rel_store(&chand->idle_state, MAX_IDLE_STATE_INIT); + try_again = false; + break; + case MAX_IDLE_STATE_SEEN_EXIT_IDLE: + if (gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_EXIT_IDLE, + MAX_IDLE_STATE_INIT)) { + try_again = false; + } + break; + case MAX_IDLE_STATE_SEEN_ENTER_IDLE: + GRPC_CHANNEL_STACK_REF(chand->channel_stack, + "max_age max_idle_timer"); + grpc_timer_init(&chand->max_idle_timer, + static_cast(gpr_atm_no_barrier_load( + &chand->last_enter_idle_time_millis)) + + chand->max_connection_idle, + &chand->max_idle_timer_cb); + /* idle_state may have already been set to + MAX_IDLE_STATE_SEEN_EXIT_IDLE by increase_call_count(), in this + case, we don't need to set it to MAX_IDLE_STATE_TIMER_SET */ + gpr_atm_rel_cas(&chand->idle_state, MAX_IDLE_STATE_SEEN_ENTER_IDLE, + MAX_IDLE_STATE_TIMER_SET); + try_again = false; + break; + default: + /* try again */ + break; + } + } } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, - "max_age max_idle_timer"); + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_idle_timer"); } -static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void close_max_age_channel(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_timer_pending = false; gpr_mu_unlock(&chand->max_age_timer_mu); @@ -190,58 +322,57 @@ static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR); grpc_channel_element* elem = grpc_channel_stack_element(chand->channel_stack, 0); - elem->filter->start_transport_op(exec_ctx, elem, op); + elem->filter->start_transport_op(elem, op); } else if (error != GRPC_ERROR_CANCELLED) { GRPC_LOG_IF_ERROR("close_max_age_channel", error); } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, - "max_age max_age_timer"); + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_age_timer"); } -static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void force_close_max_age_channel(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); gpr_mu_lock(&chand->max_age_timer_mu); chand->max_age_grace_timer_pending = false; gpr_mu_unlock(&chand->max_age_timer_mu); if (error == GRPC_ERROR_NONE) { - grpc_transport_op* op = grpc_make_transport_op(NULL); + grpc_transport_op* op = grpc_make_transport_op(nullptr); op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel reaches max age"); grpc_channel_element* elem = grpc_channel_stack_element(chand->channel_stack, 0); - elem->filter->start_transport_op(exec_ctx, elem, op); + elem->filter->start_transport_op(elem, op); } else if (error != GRPC_ERROR_CANCELLED) { GRPC_LOG_IF_ERROR("force_close_max_age_channel", error); } - GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack, - "max_age max_age_grace_timer"); + GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_age_grace_timer"); } -static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - channel_data* chand = (channel_data*)arg; +static void channel_connectivity_changed(void* arg, grpc_error* error) { + channel_data* chand = static_cast(arg); if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { - grpc_transport_op* op = grpc_make_transport_op(NULL); - op->on_connectivity_state_change = &chand->channel_connectivity_changed, + grpc_transport_op* op = grpc_make_transport_op(nullptr); + op->on_connectivity_state_change = &chand->channel_connectivity_changed; op->connectivity_state = &chand->connectivity_state; - grpc_channel_next_op( - exec_ctx, grpc_channel_stack_element(chand->channel_stack, 0), op); + grpc_channel_next_op(grpc_channel_stack_element(chand->channel_stack, 0), + op); } else { gpr_mu_lock(&chand->max_age_timer_mu); if (chand->max_age_timer_pending) { - grpc_timer_cancel(exec_ctx, &chand->max_age_timer); + grpc_timer_cancel(&chand->max_age_timer); chand->max_age_timer_pending = false; } if (chand->max_age_grace_timer_pending) { - grpc_timer_cancel(exec_ctx, &chand->max_age_grace_timer); + grpc_timer_cancel(&chand->max_age_grace_timer); chand->max_age_grace_timer_pending = false; } gpr_mu_unlock(&chand->max_age_timer_mu); /* If there are no active calls, this increasement will cancel max_idle_timer, and prevent max_idle_timer from being started in the future. */ - increase_call_count(exec_ctx, chand); + increase_call_count(chand); + if (gpr_atm_acq_load(&chand->idle_state) == MAX_IDLE_STATE_SEEN_EXIT_IDLE) { + grpc_timer_cancel(&chand->max_idle_timer); + } } } @@ -249,7 +380,8 @@ static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg, connection storms. Note that the MAX_CONNECTION_AGE option without jitter would not create connection storms by itself, but if there happened to be a connection storm it could cause it to repeat at a fixed period. */ -static int add_random_max_connection_age_jitter(int value) { +static grpc_millis +add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) { /* generate a random number between 1 - MAX_CONNECTION_AGE_JITTER and 1 + MAX_CONNECTION_AGE_JITTER */ double multiplier = rand() * MAX_CONNECTION_AGE_JITTER * 2.0 / RAND_MAX + @@ -257,80 +389,73 @@ static int add_random_max_connection_age_jitter(int value) { double result = multiplier * value; /* INT_MAX - 0.5 converts the value to float, so that result will not be cast to int implicitly before the comparison. */ - return result > INT_MAX - 0.5 ? INT_MAX : (int)result; + return result > (static_cast(GRPC_MILLIS_INF_FUTURE)) - 0.5 + ? GRPC_MILLIS_INF_FUTURE + : static_cast(result); } /* Constructor for call_data. */ -static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, +static grpc_error* init_call_elem(grpc_call_element* elem, const grpc_call_element_args* args) { - channel_data* chand = (channel_data*)elem->channel_data; - increase_call_count(exec_ctx, chand); + channel_data* chand = static_cast(elem->channel_data); + increase_call_count(chand); return GRPC_ERROR_NONE; } /* Destructor for call_data. */ -static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +static void destroy_call_elem(grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) { - channel_data* chand = (channel_data*)elem->channel_data; - decrease_call_count(exec_ctx, chand); + channel_data* chand = static_cast(elem->channel_data); + decrease_call_count(chand); } /* Constructor for channel_data. */ -static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, +static grpc_error* init_channel_elem(grpc_channel_element* elem, grpc_channel_element_args* args) { - channel_data* chand = (channel_data*)elem->channel_data; + channel_data* chand = static_cast(elem->channel_data); gpr_mu_init(&chand->max_age_timer_mu); chand->max_age_timer_pending = false; chand->max_age_grace_timer_pending = false; chand->channel_stack = args->channel_stack; chand->max_connection_age = - DEFAULT_MAX_CONNECTION_AGE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(add_random_max_connection_age_jitter( - DEFAULT_MAX_CONNECTION_AGE_MS), - GPR_TIMESPAN); + add_random_max_connection_age_jitter_and_convert_to_grpc_millis( + DEFAULT_MAX_CONNECTION_AGE_MS); chand->max_connection_age_grace = DEFAULT_MAX_CONNECTION_AGE_GRACE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, - GPR_TIMESPAN); - chand->max_connection_idle = - DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(DEFAULT_MAX_CONNECTION_IDLE_MS, GPR_TIMESPAN); + ? GRPC_MILLIS_INF_FUTURE + : DEFAULT_MAX_CONNECTION_AGE_GRACE_MS; + chand->max_connection_idle = DEFAULT_MAX_CONNECTION_IDLE_MS == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : DEFAULT_MAX_CONNECTION_IDLE_MS; + chand->idle_state = MAX_IDLE_STATE_INIT; + gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, + GRPC_MILLIS_INF_PAST); for (size_t i = 0; i < args->channel_args->num_args; ++i) { if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_AGE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_AGE_INTEGER_OPTIONS); chand->max_connection_age = - value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis( - add_random_max_connection_age_jitter(value), GPR_TIMESPAN); + add_random_max_connection_age_jitter_and_convert_to_grpc_millis( + value); } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], - (grpc_integer_options){DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, - INT_MAX}); + {DEFAULT_MAX_CONNECTION_AGE_GRACE_MS, 0, INT_MAX}); chand->max_connection_age_grace = - value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(args->channel_args->args[i].key, GRPC_ARG_MAX_CONNECTION_IDLE_MS)) { const int value = grpc_channel_arg_get_integer( &args->channel_args->args[i], MAX_CONNECTION_IDLE_INTEGER_OPTIONS); chand->max_connection_idle = - value == INT_MAX ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } } - GRPC_CLOSURE_INIT(&chand->close_max_idle_channel, close_max_idle_channel, - chand, grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_INIT(&chand->max_idle_timer_cb, max_idle_timer_cb, chand, + grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&chand->close_max_age_channel, close_max_age_channel, chand, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&chand->force_close_max_age_channel, @@ -349,8 +474,7 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, channel_connectivity_changed, chand, grpc_schedule_on_exec_ctx); - if (gpr_time_cmp(chand->max_connection_age, gpr_inf_future(GPR_TIMESPAN)) != - 0) { + if (chand->max_connection_age != GRPC_MILLIS_INF_FUTURE) { /* When the channel reaches its max age, we send down an op with goaway_error set. However, we can't send down any ops until after the channel stack is fully initialized. If we start the timer here, we have @@ -360,26 +484,23 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, initialization is done. */ GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age start_max_age_timer_after_init"); - GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_age_timer_after_init, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&chand->start_max_age_timer_after_init, GRPC_ERROR_NONE); } /* Initialize the number of calls as 1, so that the max_idle_timer will not start until start_max_idle_timer_after_init is invoked. */ gpr_atm_rel_store(&chand->call_count, 1); - if (gpr_time_cmp(chand->max_connection_idle, gpr_inf_future(GPR_TIMESPAN)) != - 0) { + if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) { GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age start_max_idle_timer_after_init"); - GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init, + GRPC_CLOSURE_SCHED(&chand->start_max_idle_timer_after_init, GRPC_ERROR_NONE); } return GRPC_ERROR_NONE; } /* Destructor for channel_data. */ -static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} const grpc_channel_filter grpc_max_age_filter = { grpc_call_next_op, @@ -394,8 +515,7 @@ const grpc_channel_filter grpc_max_age_filter = { grpc_channel_next_get_info, "max_age"}; -static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx, - grpc_channel_stack_builder* builder, +static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder, void* arg) { const grpc_channel_args* channel_args = grpc_channel_stack_builder_get_channel_arguments(builder); @@ -408,7 +528,7 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx, MAX_CONNECTION_IDLE_INTEGER_OPTIONS) != INT_MAX; if (enable) { return grpc_channel_stack_builder_prepend_filter( - builder, &grpc_max_age_filter, NULL, NULL); + builder, &grpc_max_age_filter, nullptr, nullptr); } else { return true; } @@ -417,7 +537,7 @@ static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx, void grpc_max_age_filter_init(void) { grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_max_age_filter, NULL); + maybe_add_max_age_filter, nullptr); } void grpc_max_age_filter_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.h b/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.h index 68fb4a4ca..989322244 100644 --- a/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/max_age/max_age_filter.h @@ -17,6 +17,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_MAX_AGE_MAX_AGE_FILTER_H #define GRPC_CORE_EXT_FILTERS_MAX_AGE_MAX_AGE_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_max_age_filter; diff --git a/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.c b/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.cc similarity index 63% rename from Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.c rename to Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.cc index 47763b1de..c7fc3f2e6 100644 --- a/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.cc @@ -14,6 +14,8 @@ // limitations under the License. // +#include + #include "src/core/ext/filters/message_size/message_size_filter.h" #include @@ -26,48 +28,71 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_stack_builder.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/surface/channel_init.h" #include "src/core/lib/transport/service_config.h" -typedef struct message_size_limits { +typedef struct { int max_send_size; int max_recv_size; } message_size_limits; -static void message_size_limits_free(grpc_exec_ctx* exec_ctx, void* value) { - gpr_free(value); -} +namespace grpc_core { +namespace { + +class MessageSizeLimits : public RefCounted { + public: + static RefCountedPtr CreateFromJson(const grpc_json* json); -static void* message_size_limits_create_from_json(const grpc_json* json) { + const message_size_limits& limits() const { return limits_; } + + private: + // So New() can call our private ctor. + template + friend T* grpc_core::New(Args&&... args); + + MessageSizeLimits(int max_send_size, int max_recv_size) { + limits_.max_send_size = max_send_size; + limits_.max_recv_size = max_recv_size; + } + + message_size_limits limits_; +}; + +RefCountedPtr MessageSizeLimits::CreateFromJson( + const grpc_json* json) { int max_request_message_bytes = -1; int max_response_message_bytes = -1; - for (grpc_json* field = json->child; field != NULL; field = field->next) { - if (field->key == NULL) continue; + for (grpc_json* field = json->child; field != nullptr; field = field->next) { + if (field->key == nullptr) continue; if (strcmp(field->key, "maxRequestMessageBytes") == 0) { - if (max_request_message_bytes >= 0) return NULL; // Duplicate. + if (max_request_message_bytes >= 0) return nullptr; // Duplicate. if (field->type != GRPC_JSON_STRING && field->type != GRPC_JSON_NUMBER) { - return NULL; + return nullptr; } max_request_message_bytes = gpr_parse_nonnegative_int(field->value); - if (max_request_message_bytes == -1) return NULL; + if (max_request_message_bytes == -1) return nullptr; } else if (strcmp(field->key, "maxResponseMessageBytes") == 0) { - if (max_response_message_bytes >= 0) return NULL; // Duplicate. + if (max_response_message_bytes >= 0) return nullptr; // Duplicate. if (field->type != GRPC_JSON_STRING && field->type != GRPC_JSON_NUMBER) { - return NULL; + return nullptr; } max_response_message_bytes = gpr_parse_nonnegative_int(field->value); - if (max_response_message_bytes == -1) return NULL; + if (max_response_message_bytes == -1) return nullptr; } } - message_size_limits* value = - (message_size_limits*)gpr_malloc(sizeof(message_size_limits)); - value->max_send_size = max_request_message_bytes; - value->max_recv_size = max_response_message_bytes; - return value; + return MakeRefCounted(max_request_message_bytes, + max_response_message_bytes); } -typedef struct call_data { +} // namespace +} // namespace grpc_core + +namespace { + +struct call_data { grpc_call_combiner* call_combiner; message_size_limits limits; // Receive closures are chained: we inject this closure as the @@ -75,29 +100,33 @@ typedef struct call_data { // call our next_recv_message_ready member after handling it. grpc_closure recv_message_ready; // Used by recv_message_ready. - grpc_byte_stream** recv_message; + grpc_core::OrphanablePtr* recv_message; // Original recv_message_ready callback, invoked after our own. grpc_closure* next_recv_message_ready; -} call_data; +}; -typedef struct channel_data { +struct channel_data { message_size_limits limits; - // Maps path names to message_size_limits structs. - grpc_slice_hash_table* method_limit_table; -} channel_data; + // Maps path names to refcounted_message_size_limits structs. + grpc_core::RefCountedPtr>> + method_limit_table; +}; + +} // namespace // Callback invoked when we receive a message. Here we check the max // receive message size. -static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data, - grpc_error* error) { - grpc_call_element* elem = (grpc_call_element*)user_data; - call_data* calld = (call_data*)elem->call_data; - if (*calld->recv_message != NULL && calld->limits.max_recv_size >= 0 && - (*calld->recv_message)->length > (size_t)calld->limits.max_recv_size) { +static void recv_message_ready(void* user_data, grpc_error* error) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); + if (*calld->recv_message != nullptr && calld->limits.max_recv_size >= 0 && + (*calld->recv_message)->length() > + static_cast(calld->limits.max_recv_size)) { char* message_string; gpr_asprintf(&message_string, "Received message larger than max (%u vs. %d)", - (*calld->recv_message)->length, calld->limits.max_recv_size); + (*calld->recv_message)->length(), calld->limits.max_recv_size); grpc_error* new_error = grpc_error_set_int( GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED); @@ -112,24 +141,23 @@ static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data, GRPC_ERROR_REF(error); } // Invoke the next callback. - GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_message_ready, error); + GRPC_CLOSURE_RUN(calld->next_recv_message_ready, error); } // Start transport stream op. static void start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op) { - call_data* calld = (call_data*)elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + call_data* calld = static_cast(elem->call_data); // Check max send message size. if (op->send_message && calld->limits.max_send_size >= 0 && - op->payload->send_message.send_message->length > - (size_t)calld->limits.max_send_size) { + op->payload->send_message.send_message->length() > + static_cast(calld->limits.max_send_size)) { char* message_string; gpr_asprintf(&message_string, "Sent message larger than max (%u vs. %d)", - op->payload->send_message.send_message->length, + op->payload->send_message.send_message->length(), calld->limits.max_send_size); grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, op, + op, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED), @@ -145,17 +173,16 @@ static void start_transport_stream_op_batch( op->payload->recv_message.recv_message_ready = &calld->recv_message_ready; } // Chain to the next filter. - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } // Constructor for call_data. -static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, +static grpc_error* init_call_elem(grpc_call_element* elem, const grpc_call_element_args* args) { - channel_data* chand = (channel_data*)elem->channel_data; - call_data* calld = (call_data*)elem->call_data; + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); calld->call_combiner = args->call_combiner; - calld->next_recv_message_ready = NULL; + calld->next_recv_message_ready = nullptr; GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem, grpc_schedule_on_exec_ctx); // Get max sizes from channel data, then merge in per-method config values. @@ -163,20 +190,20 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, // apply the max request size to the send limit and the max response // size to the receive limit. calld->limits = chand->limits; - if (chand->method_limit_table != NULL) { - message_size_limits* limits = - (message_size_limits*)grpc_method_config_table_get( - exec_ctx, chand->method_limit_table, args->path); - if (limits != NULL) { - if (limits->max_send_size >= 0 && - (limits->max_send_size < calld->limits.max_send_size || + if (chand->method_limit_table != nullptr) { + grpc_core::RefCountedPtr limits = + grpc_core::ServiceConfig::MethodConfigTableLookup( + *chand->method_limit_table, args->path); + if (limits != nullptr) { + if (limits->limits().max_send_size >= 0 && + (limits->limits().max_send_size < calld->limits.max_send_size || calld->limits.max_send_size < 0)) { - calld->limits.max_send_size = limits->max_send_size; + calld->limits.max_send_size = limits->limits().max_send_size; } - if (limits->max_recv_size >= 0 && - (limits->max_recv_size < calld->limits.max_recv_size || + if (limits->limits().max_recv_size >= 0 && + (limits->limits().max_recv_size < calld->limits.max_recv_size || calld->limits.max_recv_size < 0)) { - calld->limits.max_recv_size = limits->max_recv_size; + calld->limits.max_recv_size = limits->limits().max_recv_size; } } } @@ -184,7 +211,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, } // Destructor for call_data. -static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +static void destroy_call_elem(grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) {} @@ -221,35 +248,30 @@ message_size_limits get_message_size_limits( } // Constructor for channel_data. -static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, +static grpc_error* init_channel_elem(grpc_channel_element* elem, grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); - channel_data* chand = (channel_data*)elem->channel_data; + channel_data* chand = static_cast(elem->channel_data); chand->limits = get_message_size_limits(args->channel_args); // Get method config table from channel args. const grpc_arg* channel_arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG); - if (channel_arg != NULL) { - GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); - grpc_service_config* service_config = - grpc_service_config_create(channel_arg->value.string); - if (service_config != NULL) { - chand->method_limit_table = - grpc_service_config_create_method_config_table( - exec_ctx, service_config, message_size_limits_create_from_json, - message_size_limits_free); - grpc_service_config_destroy(service_config); + const char* service_config_str = grpc_channel_arg_get_string(channel_arg); + if (service_config_str != nullptr) { + grpc_core::UniquePtr service_config = + grpc_core::ServiceConfig::Create(service_config_str); + if (service_config != nullptr) { + chand->method_limit_table = service_config->CreateMethodConfigTable( + grpc_core::MessageSizeLimits::CreateFromJson); } } return GRPC_ERROR_NONE; } // Destructor for channel_data. -static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem) { - channel_data* chand = (channel_data*)elem->channel_data; - grpc_slice_hash_table_unref(exec_ctx, chand->method_limit_table); +static void destroy_channel_elem(grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + chand->method_limit_table.reset(); } const grpc_channel_filter grpc_message_size_filter = { @@ -265,8 +287,7 @@ const grpc_channel_filter grpc_message_size_filter = { grpc_channel_next_get_info, "message_size"}; -static bool maybe_add_message_size_filter(grpc_exec_ctx* exec_ctx, - grpc_channel_stack_builder* builder, +static bool maybe_add_message_size_filter(grpc_channel_stack_builder* builder, void* arg) { const grpc_channel_args* channel_args = grpc_channel_stack_builder_get_channel_arguments(builder); @@ -277,12 +298,12 @@ static bool maybe_add_message_size_filter(grpc_exec_ctx* exec_ctx, } const grpc_arg* a = grpc_channel_args_find(channel_args, GRPC_ARG_SERVICE_CONFIG); - if (a != NULL) { + if (a != nullptr) { enable = true; } if (enable) { return grpc_channel_stack_builder_prepend_filter( - builder, &grpc_message_size_filter, NULL, NULL); + builder, &grpc_message_size_filter, nullptr, nullptr); } else { return true; } @@ -291,13 +312,13 @@ static bool maybe_add_message_size_filter(grpc_exec_ctx* exec_ctx, void grpc_message_size_filter_init(void) { grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_message_size_filter, NULL); + maybe_add_message_size_filter, nullptr); grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_message_size_filter, NULL); + maybe_add_message_size_filter, nullptr); grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - maybe_add_message_size_filter, NULL); + maybe_add_message_size_filter, nullptr); } void grpc_message_size_filter_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.h b/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.h index d3667f700..f66636e58 100644 --- a/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/message_size/message_size_filter.h @@ -17,6 +17,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H #define GRPC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_message_size_filter; diff --git a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc similarity index 79% rename from Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c rename to Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc index c8b2fe5f9..c7070d4d9 100644 --- a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +++ b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc @@ -14,6 +14,8 @@ // limitations under the License. // +#include + #include "src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h" #include @@ -25,7 +27,8 @@ #include "src/core/lib/surface/channel_init.h" #include "src/core/lib/transport/metadata.h" -typedef struct call_data { +namespace { +struct call_data { // Receive closures are chained: we inject this closure as the // recv_initial_metadata_ready up-call on transport_stream_op, and remember to // call our next_recv_initial_metadata_ready member after handling it. @@ -37,12 +40,13 @@ typedef struct call_data { // Marks whether the workaround is active bool workaround_active; -} call_data; +}; +} // namespace // Find the user agent metadata element in the batch static bool get_user_agent_mdelem(const grpc_metadata_batch* batch, grpc_mdelem* md) { - if (batch->idx.named.user_agent != NULL) { + if (batch->idx.named.user_agent != nullptr) { *md = batch->idx.named.user_agent->md; return true; } @@ -50,10 +54,9 @@ static bool get_user_agent_mdelem(const grpc_metadata_batch* batch, } // Callback invoked when we receive an initial metadata. -static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, - void* user_data, grpc_error* error) { - grpc_call_element* elem = (grpc_call_element*)user_data; - call_data* calld = (call_data*)elem->call_data; +static void recv_initial_metadata_ready(void* user_data, grpc_error* error) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); if (GRPC_ERROR_NONE == error) { grpc_mdelem md; @@ -67,15 +70,14 @@ static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, } // Invoke the next callback. - GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_initial_metadata_ready, + GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready, GRPC_ERROR_REF(error)); } // Start transport stream op. static void start_transport_stream_op_batch( - grpc_exec_ctx* exec_ctx, grpc_call_element* elem, - grpc_transport_stream_op_batch* op) { - call_data* calld = (call_data*)elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + call_data* calld = static_cast(elem->call_data); // Inject callback for receiving initial metadata if (op->recv_initial_metadata) { @@ -91,20 +93,21 @@ static void start_transport_stream_op_batch( /* Send message happens after client's user-agent (initial metadata) is * received, so workaround_active must be set already */ if (calld->workaround_active) { - op->payload->send_message.send_message->flags |= GRPC_WRITE_NO_COMPRESS; + op->payload->send_message.send_message->set_flags( + op->payload->send_message.send_message->flags() | + GRPC_WRITE_NO_COMPRESS); } } // Chain to the next filter. - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } // Constructor for call_data. -static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, - grpc_call_element* elem, +static grpc_error* init_call_elem(grpc_call_element* elem, const grpc_call_element_args* args) { - call_data* calld = (call_data*)elem->call_data; - calld->next_recv_initial_metadata_ready = NULL; + call_data* calld = static_cast(elem->call_data); + calld->next_recv_initial_metadata_ready = nullptr; calld->workaround_active = false; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, recv_initial_metadata_ready, elem, @@ -113,20 +116,18 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx, } // Destructor for call_data. -static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem, +static void destroy_call_elem(grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_closure* ignored) {} // Constructor for channel_data. -static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem, +static grpc_error* init_channel_elem(grpc_channel_element* elem, grpc_channel_element_args* args) { return GRPC_ERROR_NONE; } // Destructor for channel_data. -static void destroy_channel_elem(grpc_exec_ctx* exec_ctx, - grpc_channel_element* elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} // Parse the user agent static bool parse_user_agent(grpc_mdelem md) { @@ -139,10 +140,10 @@ static bool parse_user_agent(grpc_mdelem md) { bool grpc_objc_specifier_seen = false; bool cronet_specifier_seen = false; char *major_version_str = user_agent_str, *minor_version_str; - long major_version, minor_version; + long major_version = 0, minor_version = 0; char* head = strtok(user_agent_str, " "); - while (head != NULL) { + while (head != nullptr) { if (!grpc_objc_specifier_seen && 0 == strncmp(head, grpc_objc_specifier, grpc_objc_specifier_len)) { major_version_str = head + grpc_objc_specifier_len; @@ -153,11 +154,11 @@ static bool parse_user_agent(grpc_mdelem md) { break; } - head = strtok(NULL, " "); + head = strtok(nullptr, " "); } if (grpc_objc_specifier_seen) { major_version_str = strtok(major_version_str, "."); - minor_version_str = strtok(NULL, "."); + minor_version_str = strtok(nullptr, "."); major_version = atol(major_version_str); minor_version = atol(minor_version_str); } @@ -181,25 +182,25 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = { "workaround_cronet_compression"}; static bool register_workaround_cronet_compression( - grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) { + grpc_channel_stack_builder* builder, void* arg) { const grpc_channel_args* channel_args = grpc_channel_stack_builder_get_channel_arguments(builder); const grpc_arg* a = grpc_channel_args_find( channel_args, GRPC_ARG_WORKAROUND_CRONET_COMPRESSION); - if (a == NULL) { + if (a == nullptr) { return true; } if (grpc_channel_arg_get_bool(a, false) == false) { return true; } return grpc_channel_stack_builder_prepend_filter( - builder, &grpc_workaround_cronet_compression_filter, NULL, NULL); + builder, &grpc_workaround_cronet_compression_filter, nullptr, nullptr); } void grpc_workaround_cronet_compression_filter_init(void) { grpc_channel_init_register_stage( GRPC_SERVER_CHANNEL, GRPC_WORKAROUND_PRIORITY_HIGH, - register_workaround_cronet_compression, NULL); + register_workaround_cronet_compression, nullptr); grpc_register_workaround(GRPC_WORKAROUND_ID_CRONET_COMPRESSION, parse_user_agent); } diff --git a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h index 9dae4f073..94d20f0c4 100644 --- a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +++ b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h @@ -17,6 +17,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_CRONET_COMPRESSION_FILTER_H #define GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_CRONET_COMPRESSION_FILTER_H +#include + #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_workaround_cronet_compression_filter; diff --git a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.c b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.cc similarity index 67% rename from Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.c rename to Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.cc index e600fbee6..4dabe896d 100644 --- a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.c +++ b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.cc @@ -14,6 +14,8 @@ // limitations under the License. // +#include + #include "src/core/ext/filters/workarounds/workaround_utils.h" #include @@ -21,26 +23,26 @@ user_agent_parser ua_parser[GRPC_MAX_WORKAROUND_ID]; -static void destroy_user_agent_md(void *user_agent_md) { +static void destroy_user_agent_md(void* user_agent_md) { gpr_free(user_agent_md); } -grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md) { - grpc_workaround_user_agent_md *user_agent_md = - (grpc_workaround_user_agent_md *)grpc_mdelem_get_user_data( - md, destroy_user_agent_md); +grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md) { + grpc_workaround_user_agent_md* user_agent_md = + static_cast( + grpc_mdelem_get_user_data(md, destroy_user_agent_md)); - if (NULL != user_agent_md) { + if (nullptr != user_agent_md) { return user_agent_md; } - user_agent_md = (grpc_workaround_user_agent_md *)gpr_malloc( - sizeof(grpc_workaround_user_agent_md)); + user_agent_md = static_cast( + gpr_malloc(sizeof(grpc_workaround_user_agent_md))); for (int i = 0; i < GRPC_MAX_WORKAROUND_ID; i++) { if (ua_parser[i]) { user_agent_md->workaround_active[i] = ua_parser[i](md); } } - grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void *)user_agent_md); + grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void*)user_agent_md); return user_agent_md; } diff --git a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.h b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.h index 2ad7a876d..f172ccc07 100644 --- a/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.h +++ b/Sources/CgRPC/src/core/ext/filters/workarounds/workaround_utils.h @@ -17,6 +17,8 @@ #ifndef GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H #define GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H +#include + #include #include "src/core/lib/transport/metadata.h" @@ -28,7 +30,7 @@ typedef struct grpc_workaround_user_agent_md { bool workaround_active[GRPC_MAX_WORKAROUND_ID]; } grpc_workaround_user_agent_md; -grpc_workaround_user_agent_md *grpc_parse_user_agent(grpc_mdelem md); +grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md); typedef bool (*user_agent_parser)(grpc_mdelem); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.c b/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.cc index ca2e801ec..1fdab76db 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.cc @@ -16,14 +16,17 @@ * */ -#include "src/core/ext/transport/chttp2/alpn/alpn.h" +#include + #include -#include +#include "src/core/ext/transport/chttp2/alpn/alpn.h" + +#include "src/core/lib/gpr/useful.h" /* in order of preference */ -static const char *const supported_versions[] = {"grpc-exp", "h2"}; +static const char* const supported_versions[] = {"grpc-exp", "h2"}; -int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size) { +int grpc_chttp2_is_alpn_version_supported(const char* version, size_t size) { size_t i; for (i = 0; i < GPR_ARRAY_SIZE(supported_versions); i++) { if (!strncmp(version, supported_versions[i], size)) return 1; @@ -35,7 +38,7 @@ size_t grpc_chttp2_num_alpn_versions(void) { return GPR_ARRAY_SIZE(supported_versions); } -const char *grpc_chttp2_get_alpn_version_index(size_t i) { +const char* grpc_chttp2_get_alpn_version_index(size_t i) { GPR_ASSERT(i < GPR_ARRAY_SIZE(supported_versions)); return supported_versions[i]; } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.h b/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.h index 379af4b24..0042eafd9 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/alpn/alpn.h @@ -19,16 +19,18 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H +#include + #include /* Retuns 1 if the version is supported, 0 otherwise. */ -int grpc_chttp2_is_alpn_version_supported(const char *version, size_t size); +int grpc_chttp2_is_alpn_version_supported(const char* version, size_t size); /* Returns the number of protocol versions to advertise */ size_t grpc_chttp2_num_alpn_versions(void); /* Returns the protocol version at index i (0 <= i < * grpc_chttp2_num_alpn_versions()) */ -const char *grpc_chttp2_get_alpn_version_index(size_t i); +const char* grpc_chttp2_get_alpn_version_index(size_t i); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.cc b/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.cc new file mode 100644 index 000000000..bad3153b0 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.cc @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/transport/chttp2/client/authority.h" + +grpc_channel_args* grpc_default_authority_add_if_not_present( + const grpc_channel_args* args) { + const bool has_default_authority = + grpc_channel_args_find(args, GRPC_ARG_DEFAULT_AUTHORITY) != nullptr; + grpc_arg new_args[1]; + size_t num_new_args = 0; + grpc_core::UniquePtr default_authority; + if (!has_default_authority) { + const grpc_arg* server_uri_arg = + grpc_channel_args_find(args, GRPC_ARG_SERVER_URI); + const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg); + GPR_ASSERT(server_uri_str != nullptr); + default_authority = + grpc_core::ResolverRegistry::GetDefaultAuthority(server_uri_str); + GPR_ASSERT(default_authority != nullptr); + new_args[num_new_args++] = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_DEFAULT_AUTHORITY), default_authority.get()); + } + return grpc_channel_args_copy_and_add(args, new_args, num_new_args); +} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.h b/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.h new file mode 100644 index 000000000..642584ef5 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/authority.h @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_AUTHORITY_H +#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_AUTHORITY_H + +#include + +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/memory.h" + +/// Returns a copy of \a args with the default authority channel arg set if it +/// wasn't already present. +grpc_channel_args* grpc_default_authority_add_if_not_present( + const grpc_channel_args* args); + +#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_AUTHORITY_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.c b/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.cc similarity index 59% rename from Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.cc index a459a1a83..e7522ffba 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" #include @@ -45,56 +47,52 @@ typedef struct { bool shutdown; bool connecting; - grpc_closure *notify; + grpc_closure* notify; grpc_connect_in_args args; - grpc_connect_out_args *result; + grpc_connect_out_args* result; - grpc_endpoint *endpoint; // Non-NULL until handshaking starts. + grpc_endpoint* endpoint; // Non-NULL until handshaking starts. grpc_closure connected; - grpc_handshake_manager *handshake_mgr; + grpc_handshake_manager* handshake_mgr; } chttp2_connector; -static void chttp2_connector_ref(grpc_connector *con) { - chttp2_connector *c = (chttp2_connector *)con; +static void chttp2_connector_ref(grpc_connector* con) { + chttp2_connector* c = reinterpret_cast(con); gpr_ref(&c->refs); } -static void chttp2_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_connector *con) { - chttp2_connector *c = (chttp2_connector *)con; +static void chttp2_connector_unref(grpc_connector* con) { + chttp2_connector* c = reinterpret_cast(con); if (gpr_unref(&c->refs)) { gpr_mu_destroy(&c->mu); // If handshaking is not yet in progress, destroy the endpoint. // Otherwise, the handshaker will do this for us. - if (c->endpoint != NULL) grpc_endpoint_destroy(exec_ctx, c->endpoint); + if (c->endpoint != nullptr) grpc_endpoint_destroy(c->endpoint); gpr_free(c); } } -static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx, - grpc_connector *con, grpc_error *why) { - chttp2_connector *c = (chttp2_connector *)con; +static void chttp2_connector_shutdown(grpc_connector* con, grpc_error* why) { + chttp2_connector* c = reinterpret_cast(con); gpr_mu_lock(&c->mu); c->shutdown = true; - if (c->handshake_mgr != NULL) { - grpc_handshake_manager_shutdown(exec_ctx, c->handshake_mgr, - GRPC_ERROR_REF(why)); + if (c->handshake_mgr != nullptr) { + grpc_handshake_manager_shutdown(c->handshake_mgr, GRPC_ERROR_REF(why)); } // If handshaking is not yet in progress, shutdown the endpoint. // Otherwise, the handshaker will do this for us. - if (!c->connecting && c->endpoint != NULL) { - grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(why)); + if (!c->connecting && c->endpoint != nullptr) { + grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(why)); } gpr_mu_unlock(&c->mu); GRPC_ERROR_UNREF(why); } -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_handshaker_args *args = (grpc_handshaker_args *)arg; - chttp2_connector *c = (chttp2_connector *)args->user_data; +static void on_handshake_done(void* arg, grpc_error* error) { + grpc_handshaker_args* args = static_cast(arg); + chttp2_connector* c = static_cast(args->user_data); gpr_mu_lock(&c->mu); if (error != GRPC_ERROR_NONE || c->shutdown) { if (error == GRPC_ERROR_NONE) { @@ -105,18 +103,20 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, // before destroying them, even if we know that there are no // pending read/write callbacks. This should be fixed, at which // point this can be removed. - grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_REF(error)); - grpc_endpoint_destroy(exec_ctx, args->endpoint); - grpc_channel_args_destroy(exec_ctx, args->args); - grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer); + grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error)); + grpc_endpoint_destroy(args->endpoint); + grpc_channel_args_destroy(args->args); + grpc_slice_buffer_destroy_internal(args->read_buffer); gpr_free(args->read_buffer); } else { error = GRPC_ERROR_REF(error); } memset(c->result, 0, sizeof(*c->result)); } else { - c->result->transport = grpc_create_chttp2_transport(exec_ctx, args->args, - args->endpoint, true); + grpc_endpoint_delete_from_pollset_set(args->endpoint, + c->args.interested_parties); + c->result->transport = + grpc_create_chttp2_transport(args->args, args->endpoint, true); GPR_ASSERT(c->result->transport); // TODO(roth): We ideally want to wait until we receive HTTP/2 // settings from the server before we consider the connection @@ -142,32 +142,33 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, // so until after transparent retries is implemented. Otherwise, any // RPC that we attempt to send on the connection before the timeout // would fail instead of being retried on a subsequent attempt. - grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, - args->read_buffer, NULL); + grpc_chttp2_transport_start_reading(c->result->transport, args->read_buffer, + nullptr); c->result->channel_args = args->args; } - grpc_closure *notify = c->notify; - c->notify = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, notify, error); - grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); - c->handshake_mgr = NULL; + grpc_closure* notify = c->notify; + c->notify = nullptr; + GRPC_CLOSURE_SCHED(notify, error); + grpc_handshake_manager_destroy(c->handshake_mgr); + c->handshake_mgr = nullptr; gpr_mu_unlock(&c->mu); - chttp2_connector_unref(exec_ctx, (grpc_connector *)c); + chttp2_connector_unref(reinterpret_cast(c)); } -static void start_handshake_locked(grpc_exec_ctx *exec_ctx, - chttp2_connector *c) { +static void start_handshake_locked(chttp2_connector* c) { c->handshake_mgr = grpc_handshake_manager_create(); - grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args, + grpc_handshakers_add(HANDSHAKER_CLIENT, c->args.channel_args, c->handshake_mgr); + grpc_endpoint_add_to_pollset_set(c->endpoint, c->args.interested_parties); grpc_handshake_manager_do_handshake( - exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args, - c->args.deadline, NULL /* acceptor */, on_handshake_done, c); - c->endpoint = NULL; // Endpoint handed off to handshake manager. + c->handshake_mgr, c->args.interested_parties, c->endpoint, + c->args.channel_args, c->args.deadline, nullptr /* acceptor */, + on_handshake_done, c); + c->endpoint = nullptr; // Endpoint handed off to handshake manager. } -static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - chttp2_connector *c = (chttp2_connector *)arg; +static void connected(void* arg, grpc_error* error) { + chttp2_connector* c = static_cast(arg); gpr_mu_lock(&c->mu); GPR_ASSERT(c->connecting); c->connecting = false; @@ -178,42 +179,40 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { error = GRPC_ERROR_REF(error); } memset(c->result, 0, sizeof(*c->result)); - grpc_closure *notify = c->notify; - c->notify = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, notify, error); - if (c->endpoint != NULL) { - grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error)); + grpc_closure* notify = c->notify; + c->notify = nullptr; + GRPC_CLOSURE_SCHED(notify, error); + if (c->endpoint != nullptr) { + grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(error)); } gpr_mu_unlock(&c->mu); - chttp2_connector_unref(exec_ctx, (grpc_connector *)arg); + chttp2_connector_unref(static_cast(arg)); } else { - GPR_ASSERT(c->endpoint != NULL); - start_handshake_locked(exec_ctx, c); + GPR_ASSERT(c->endpoint != nullptr); + start_handshake_locked(c); gpr_mu_unlock(&c->mu); } } -static void chttp2_connector_connect(grpc_exec_ctx *exec_ctx, - grpc_connector *con, - const grpc_connect_in_args *args, - grpc_connect_out_args *result, - grpc_closure *notify) { - chttp2_connector *c = (chttp2_connector *)con; +static void chttp2_connector_connect(grpc_connector* con, + const grpc_connect_in_args* args, + grpc_connect_out_args* result, + grpc_closure* notify) { + chttp2_connector* c = reinterpret_cast(con); grpc_resolved_address addr; - grpc_get_subchannel_address_arg(exec_ctx, args->channel_args, &addr); + grpc_get_subchannel_address_arg(args->channel_args, &addr); gpr_mu_lock(&c->mu); - GPR_ASSERT(c->notify == NULL); + GPR_ASSERT(c->notify == nullptr); c->notify = notify; c->args = *args; c->result = result; - GPR_ASSERT(c->endpoint == NULL); + GPR_ASSERT(c->endpoint == nullptr); chttp2_connector_ref(con); // Ref taken for callback. GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx); GPR_ASSERT(!c->connecting); c->connecting = true; - grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint, - args->interested_parties, args->channel_args, &addr, - args->deadline); + grpc_tcp_client_connect(&c->connected, &c->endpoint, args->interested_parties, + args->channel_args, &addr, args->deadline); gpr_mu_unlock(&c->mu); } @@ -221,8 +220,8 @@ static const grpc_connector_vtable chttp2_connector_vtable = { chttp2_connector_ref, chttp2_connector_unref, chttp2_connector_shutdown, chttp2_connector_connect}; -grpc_connector *grpc_chttp2_connector_create() { - chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c)); +grpc_connector* grpc_chttp2_connector_create() { + chttp2_connector* c = static_cast(gpr_zalloc(sizeof(*c))); c->base.vtable = &chttp2_connector_vtable; gpr_mu_init(&c->mu); gpr_ref_init(&c->refs, 1); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.h b/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.h index e258892cf..04da44130 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/chttp2_connector.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H +#include + #include "src/core/ext/filters/client_channel/connector.h" grpc_connector* grpc_chttp2_connector_create(); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.cc similarity index 52% rename from Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.cc index 6410a6043..e6c8c3826 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -25,45 +27,48 @@ #include "src/core/ext/filters/client_channel/client_channel.h" #include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/ext/transport/chttp2/client/authority.h" #include "src/core/ext/transport/chttp2/client/chttp2_connector.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/channel.h" static void client_channel_factory_ref( - grpc_client_channel_factory *cc_factory) {} + grpc_client_channel_factory* cc_factory) {} static void client_channel_factory_unref( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {} + grpc_client_channel_factory* cc_factory) {} -static grpc_subchannel *client_channel_factory_create_subchannel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, - const grpc_subchannel_args *args) { - grpc_connector *connector = grpc_chttp2_connector_create(); - grpc_subchannel *s = grpc_subchannel_create(exec_ctx, connector, args); - grpc_connector_unref(exec_ctx, connector); +static grpc_subchannel* client_channel_factory_create_subchannel( + grpc_client_channel_factory* cc_factory, const grpc_subchannel_args* args) { + grpc_subchannel_args final_sc_args; + memcpy(&final_sc_args, args, sizeof(*args)); + final_sc_args.args = grpc_default_authority_add_if_not_present(args->args); + grpc_connector* connector = grpc_chttp2_connector_create(); + grpc_subchannel* s = grpc_subchannel_create(connector, &final_sc_args); + grpc_connector_unref(connector); + grpc_channel_args_destroy(const_cast(final_sc_args.args)); return s; } -static grpc_channel *client_channel_factory_create_channel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, - const char *target, grpc_client_channel_type type, - const grpc_channel_args *args) { - if (target == NULL) { +static grpc_channel* client_channel_factory_create_channel( + grpc_client_channel_factory* cc_factory, const char* target, + grpc_client_channel_type type, const grpc_channel_args* args) { + if (target == nullptr) { gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); - return NULL; + return nullptr; } // Add channel arg containing the server URI. + grpc_core::UniquePtr canonical_target = + grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target); grpc_arg arg = grpc_channel_arg_string_create( - (char *)GRPC_ARG_SERVER_URI, - grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target)); - const char *to_remove[] = {GRPC_ARG_SERVER_URI}; - grpc_channel_args *new_args = + const_cast(GRPC_ARG_SERVER_URI), canonical_target.get()); + const char* to_remove[] = {GRPC_ARG_SERVER_URI}; + grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); - gpr_free(arg.value.string); - grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, - GRPC_CLIENT_CHANNEL, NULL); - grpc_channel_args_destroy(exec_ctx, new_args); + grpc_channel* channel = + grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); + grpc_channel_args_destroy(new_args); return channel; } @@ -79,26 +84,27 @@ static grpc_client_channel_factory client_channel_factory = { Asynchronously: - resolve target - connect to it (trying alternatives as presented) - perform handshakes */ -grpc_channel *grpc_insecure_channel_create(const char *target, - const grpc_channel_args *args, - void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +grpc_channel* grpc_insecure_channel_create(const char* target, + const grpc_channel_args* args, + void* reserved) { + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE( "grpc_insecure_channel_create(target=%s, args=%p, reserved=%p)", 3, (target, args, reserved)); - GPR_ASSERT(reserved == NULL); + GPR_ASSERT(reserved == nullptr); // Add channel arg containing the client channel factory. grpc_arg arg = grpc_client_channel_factory_create_channel_arg(&client_channel_factory); - grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); + grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1); // Create channel. - grpc_channel *channel = client_channel_factory_create_channel( - &exec_ctx, &client_channel_factory, target, - GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); + grpc_channel* channel = client_channel_factory_create_channel( + &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, + new_args); // Clean up. - grpc_channel_args_destroy(&exec_ctx, new_args); - grpc_exec_ctx_finish(&exec_ctx); - return channel != NULL ? channel : grpc_lame_client_channel_create( - target, GRPC_STATUS_INTERNAL, - "Failed to create client channel"); + grpc_channel_args_destroy(new_args); + + return channel != nullptr ? channel + : grpc_lame_client_channel_create( + target, GRPC_STATUS_INTERNAL, + "Failed to create client channel"); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc similarity index 58% rename from Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc index f150aa7dc..b95c9dae5 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc @@ -16,10 +16,11 @@ * */ +#include + #include #include #include -#include #ifdef GPR_SUPPORT_CHANNELS_FROM_FD @@ -28,51 +29,51 @@ #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/endpoint.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/tcp_client_posix.h" #include "src/core/lib/iomgr/tcp_posix.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/transport/transport.h" -grpc_channel *grpc_insecure_channel_create_from_fd( - const char *target, int fd, const grpc_channel_args *args) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +grpc_channel* grpc_insecure_channel_create_from_fd( + const char* target, int fd, const grpc_channel_args* args) { + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3, (target, fd, args)); grpc_arg default_authority_arg = grpc_channel_arg_string_create( - (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority"); - grpc_channel_args *final_args = + (char*)GRPC_ARG_DEFAULT_AUTHORITY, (char*)"test.authority"); + grpc_channel_args* final_args = grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); int flags = fcntl(fd, F_GETFL, 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0); - grpc_endpoint *client = grpc_tcp_client_create_from_fd( - &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client"); + grpc_endpoint* client = grpc_tcp_client_create_from_fd( + grpc_fd_create(fd, "client"), args, "fd-client"); - grpc_transport *transport = - grpc_create_chttp2_transport(&exec_ctx, final_args, client, true); + grpc_transport* transport = + grpc_create_chttp2_transport(final_args, client, true); GPR_ASSERT(transport); - grpc_channel *channel = grpc_channel_create( - &exec_ctx, target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport); - grpc_channel_args_destroy(&exec_ctx, final_args); - grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL, NULL); + grpc_channel* channel = grpc_channel_create( + target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport); + grpc_channel_args_destroy(final_args); + grpc_chttp2_transport_start_reading(transport, nullptr, nullptr); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); - return channel != NULL ? channel : grpc_lame_client_channel_create( - target, GRPC_STATUS_INTERNAL, - "Failed to create client channel"); + return channel != nullptr ? channel + : grpc_lame_client_channel_create( + target, GRPC_STATUS_INTERNAL, + "Failed to create client channel"); } #else // !GPR_SUPPORT_CHANNELS_FROM_FD -grpc_channel *grpc_insecure_channel_create_from_fd( - const char *target, int fd, const grpc_channel_args *args) { +grpc_channel* grpc_insecure_channel_create_from_fd( + const char* target, int fd, const grpc_channel_args* args) { GPR_ASSERT(0); - return NULL; + return nullptr; } #endif // GPR_SUPPORT_CHANNELS_FROM_FD diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c b/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c deleted file mode 100644 index d4580f15f..000000000 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include - -#include -#include - -#include "src/core/ext/filters/client_channel/client_channel.h" -#include "src/core/ext/filters/client_channel/resolver_registry.h" -#include "src/core/ext/filters/client_channel/uri_parser.h" -#include "src/core/ext/transport/chttp2/client/chttp2_connector.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/security/credentials/credentials.h" -#include "src/core/lib/security/transport/lb_targets_info.h" -#include "src/core/lib/security/transport/security_connector.h" -#include "src/core/lib/slice/slice_hash_table.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/surface/api_trace.h" -#include "src/core/lib/surface/channel.h" - -static void client_channel_factory_ref( - grpc_client_channel_factory *cc_factory) {} - -static void client_channel_factory_unref( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory) {} - -static grpc_subchannel_args *get_secure_naming_subchannel_args( - grpc_exec_ctx *exec_ctx, const grpc_subchannel_args *args) { - grpc_channel_credentials *channel_credentials = - grpc_channel_credentials_find_in_args(args->args); - if (channel_credentials == NULL) { - gpr_log(GPR_ERROR, - "Can't create subchannel: channel credentials missing for secure " - "channel."); - return NULL; - } - // Make sure security connector does not already exist in args. - if (grpc_security_connector_find_in_args(args->args) != NULL) { - gpr_log(GPR_ERROR, - "Can't create subchannel: security connector already present in " - "channel args."); - return NULL; - } - // To which address are we connecting? By default, use the server URI. - const grpc_arg *server_uri_arg = - grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); - GPR_ASSERT(server_uri_arg != NULL); - GPR_ASSERT(server_uri_arg->type == GRPC_ARG_STRING); - const char *server_uri_str = server_uri_arg->value.string; - GPR_ASSERT(server_uri_str != NULL); - grpc_uri *server_uri = - grpc_uri_parse(exec_ctx, server_uri_str, true /* supress errors */); - GPR_ASSERT(server_uri != NULL); - const char *server_uri_path; - server_uri_path = - server_uri->path[0] == '/' ? server_uri->path + 1 : server_uri->path; - const grpc_slice_hash_table *targets_info = - grpc_lb_targets_info_find_in_args(args->args); - char *target_name_to_check = NULL; - if (targets_info != NULL) { // LB channel - // Find the balancer name for the target. - const char *target_uri_str = - grpc_get_subchannel_address_uri_arg(args->args); - grpc_uri *target_uri = - grpc_uri_parse(exec_ctx, target_uri_str, false /* suppress errors */); - GPR_ASSERT(target_uri != NULL); - if (target_uri->path[0] != '\0') { // "path" may be empty - const grpc_slice key = grpc_slice_from_static_string( - target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path); - const char *value = grpc_slice_hash_table_get(targets_info, key); - if (value != NULL) target_name_to_check = gpr_strdup(value); - grpc_slice_unref_internal(exec_ctx, key); - } - if (target_name_to_check == NULL) { - // If the target name to check hasn't already been set, fall back to using - // SERVER_URI - target_name_to_check = gpr_strdup(server_uri_path); - } - grpc_uri_destroy(target_uri); - } else { // regular channel: the secure name is the original server URI. - target_name_to_check = gpr_strdup(server_uri_path); - } - grpc_uri_destroy(server_uri); - GPR_ASSERT(target_name_to_check != NULL); - grpc_channel_security_connector *subchannel_security_connector = NULL; - // Create the security connector using the credentials and target name. - grpc_channel_args *new_args_from_connector = NULL; - const grpc_security_status security_status = - grpc_channel_credentials_create_security_connector( - exec_ctx, channel_credentials, target_name_to_check, args->args, - &subchannel_security_connector, &new_args_from_connector); - if (security_status != GRPC_SECURITY_OK) { - gpr_log(GPR_ERROR, - "Failed to create secure subchannel for secure name '%s'", - target_name_to_check); - gpr_free(target_name_to_check); - return NULL; - } - gpr_free(target_name_to_check); - grpc_arg new_security_connector_arg = - grpc_security_connector_to_arg(&subchannel_security_connector->base); - - grpc_channel_args *new_args = grpc_channel_args_copy_and_add( - new_args_from_connector != NULL ? new_args_from_connector : args->args, - &new_security_connector_arg, 1); - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &subchannel_security_connector->base, - "lb_channel_create"); - if (new_args_from_connector != NULL) { - grpc_channel_args_destroy(exec_ctx, new_args_from_connector); - } - grpc_subchannel_args *final_sc_args = gpr_malloc(sizeof(*final_sc_args)); - memcpy(final_sc_args, args, sizeof(*args)); - final_sc_args->args = new_args; - return final_sc_args; -} - -static grpc_subchannel *client_channel_factory_create_subchannel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, - const grpc_subchannel_args *args) { - grpc_subchannel_args *subchannel_args = - get_secure_naming_subchannel_args(exec_ctx, args); - if (subchannel_args == NULL) { - gpr_log( - GPR_ERROR, - "Failed to create subchannel arguments during subchannel creation."); - return NULL; - } - grpc_connector *connector = grpc_chttp2_connector_create(); - grpc_subchannel *s = - grpc_subchannel_create(exec_ctx, connector, subchannel_args); - grpc_connector_unref(exec_ctx, connector); - grpc_channel_args_destroy(exec_ctx, - (grpc_channel_args *)subchannel_args->args); - gpr_free(subchannel_args); - return s; -} - -static grpc_channel *client_channel_factory_create_channel( - grpc_exec_ctx *exec_ctx, grpc_client_channel_factory *cc_factory, - const char *target, grpc_client_channel_type type, - const grpc_channel_args *args) { - if (target == NULL) { - gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); - return NULL; - } - // Add channel arg containing the server URI. - grpc_arg arg = grpc_channel_arg_string_create( - GRPC_ARG_SERVER_URI, - grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target)); - const char *to_remove[] = {GRPC_ARG_SERVER_URI}; - grpc_channel_args *new_args = - grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); - gpr_free(arg.value.string); - grpc_channel *channel = grpc_channel_create(exec_ctx, target, new_args, - GRPC_CLIENT_CHANNEL, NULL); - grpc_channel_args_destroy(exec_ctx, new_args); - return channel; -} - -static const grpc_client_channel_factory_vtable client_channel_factory_vtable = - {client_channel_factory_ref, client_channel_factory_unref, - client_channel_factory_create_subchannel, - client_channel_factory_create_channel}; - -static grpc_client_channel_factory client_channel_factory = { - &client_channel_factory_vtable}; - -// Create a secure client channel: -// Asynchronously: - resolve target -// - connect to it (trying alternatives as presented) -// - perform handshakes -grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds, - const char *target, - const grpc_channel_args *args, - void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_API_TRACE( - "grpc_secure_channel_create(creds=%p, target=%s, args=%p, " - "reserved=%p)", - 4, ((void *)creds, target, (void *)args, (void *)reserved)); - GPR_ASSERT(reserved == NULL); - grpc_channel *channel = NULL; - if (creds != NULL) { - // Add channel args containing the client channel factory and channel - // credentials. - grpc_arg args_to_add[] = { - grpc_client_channel_factory_create_channel_arg(&client_channel_factory), - grpc_channel_credentials_to_arg(creds)}; - grpc_channel_args *new_args = grpc_channel_args_copy_and_add( - args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); - // Create channel. - channel = client_channel_factory_create_channel( - &exec_ctx, &client_channel_factory, target, - GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args); - // Clean up. - grpc_channel_args_destroy(&exec_ctx, new_args); - grpc_exec_ctx_finish(&exec_ctx); - } - return channel != NULL ? channel - : grpc_lame_client_channel_create( - target, GRPC_STATUS_INTERNAL, - "Failed to create secure client channel"); -} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc new file mode 100644 index 000000000..5ce73a95d --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc @@ -0,0 +1,230 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include + +#include +#include + +#include "src/core/ext/filters/client_channel/client_channel.h" +#include "src/core/ext/filters/client_channel/resolver_registry.h" +#include "src/core/ext/filters/client_channel/uri_parser.h" +#include "src/core/ext/transport/chttp2/client/chttp2_connector.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/security_connector/security_connector.h" +#include "src/core/lib/security/transport/target_authority_table.h" +#include "src/core/lib/slice/slice_hash_table.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/surface/channel.h" + +static void client_channel_factory_ref( + grpc_client_channel_factory* cc_factory) {} + +static void client_channel_factory_unref( + grpc_client_channel_factory* cc_factory) {} + +static grpc_subchannel_args* get_secure_naming_subchannel_args( + const grpc_subchannel_args* args) { + grpc_channel_credentials* channel_credentials = + grpc_channel_credentials_find_in_args(args->args); + if (channel_credentials == nullptr) { + gpr_log(GPR_ERROR, + "Can't create subchannel: channel credentials missing for secure " + "channel."); + return nullptr; + } + // Make sure security connector does not already exist in args. + if (grpc_security_connector_find_in_args(args->args) != nullptr) { + gpr_log(GPR_ERROR, + "Can't create subchannel: security connector already present in " + "channel args."); + return nullptr; + } + // To which address are we connecting? By default, use the server URI. + const grpc_arg* server_uri_arg = + grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI); + const char* server_uri_str = grpc_channel_arg_get_string(server_uri_arg); + GPR_ASSERT(server_uri_str != nullptr); + grpc_uri* server_uri = + grpc_uri_parse(server_uri_str, true /* supress errors */); + GPR_ASSERT(server_uri != nullptr); + const grpc_core::TargetAuthorityTable* target_authority_table = + grpc_core::FindTargetAuthorityTableInArgs(args->args); + grpc_core::UniquePtr authority; + if (target_authority_table != nullptr) { + // Find the authority for the target. + const char* target_uri_str = + grpc_get_subchannel_address_uri_arg(args->args); + grpc_uri* target_uri = + grpc_uri_parse(target_uri_str, false /* suppress errors */); + GPR_ASSERT(target_uri != nullptr); + if (target_uri->path[0] != '\0') { // "path" may be empty + const grpc_slice key = grpc_slice_from_static_string( + target_uri->path[0] == '/' ? target_uri->path + 1 : target_uri->path); + const grpc_core::UniquePtr* value = + target_authority_table->Get(key); + if (value != nullptr) authority.reset(gpr_strdup(value->get())); + grpc_slice_unref_internal(key); + } + grpc_uri_destroy(target_uri); + } + // If the authority hasn't already been set (either because no target + // authority table was present or because the target was not present + // in the table), fall back to using the original server URI. + if (authority == nullptr) { + authority = + grpc_core::ResolverRegistry::GetDefaultAuthority(server_uri_str); + } + grpc_arg args_to_add[2]; + size_t num_args_to_add = 0; + if (grpc_channel_args_find(args->args, GRPC_ARG_DEFAULT_AUTHORITY) == + nullptr) { + // If the channel args don't already contain GRPC_ARG_DEFAULT_AUTHORITY, add + // the arg, setting it to the value just obtained. + args_to_add[num_args_to_add++] = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_DEFAULT_AUTHORITY), authority.get()); + } + grpc_channel_args* args_with_authority = + grpc_channel_args_copy_and_add(args->args, args_to_add, num_args_to_add); + grpc_uri_destroy(server_uri); + grpc_channel_security_connector* subchannel_security_connector = nullptr; + // Create the security connector using the credentials and target name. + grpc_channel_args* new_args_from_connector = nullptr; + const grpc_security_status security_status = + grpc_channel_credentials_create_security_connector( + channel_credentials, authority.get(), args_with_authority, + &subchannel_security_connector, &new_args_from_connector); + if (security_status != GRPC_SECURITY_OK) { + gpr_log(GPR_ERROR, + "Failed to create secure subchannel for secure name '%s'", + authority.get()); + grpc_channel_args_destroy(args_with_authority); + return nullptr; + } + grpc_arg new_security_connector_arg = + grpc_security_connector_to_arg(&subchannel_security_connector->base); + + grpc_channel_args* new_args = grpc_channel_args_copy_and_add( + new_args_from_connector != nullptr ? new_args_from_connector + : args_with_authority, + &new_security_connector_arg, 1); + + GRPC_SECURITY_CONNECTOR_UNREF(&subchannel_security_connector->base, + "lb_channel_create"); + if (new_args_from_connector != nullptr) { + grpc_channel_args_destroy(new_args_from_connector); + } + grpc_channel_args_destroy(args_with_authority); + grpc_subchannel_args* final_sc_args = + static_cast(gpr_malloc(sizeof(*final_sc_args))); + memcpy(final_sc_args, args, sizeof(*args)); + final_sc_args->args = new_args; + return final_sc_args; +} + +static grpc_subchannel* client_channel_factory_create_subchannel( + grpc_client_channel_factory* cc_factory, const grpc_subchannel_args* args) { + grpc_subchannel_args* subchannel_args = + get_secure_naming_subchannel_args(args); + if (subchannel_args == nullptr) { + gpr_log( + GPR_ERROR, + "Failed to create subchannel arguments during subchannel creation."); + return nullptr; + } + grpc_connector* connector = grpc_chttp2_connector_create(); + grpc_subchannel* s = grpc_subchannel_create(connector, subchannel_args); + grpc_connector_unref(connector); + grpc_channel_args_destroy( + const_cast(subchannel_args->args)); + gpr_free(subchannel_args); + return s; +} + +static grpc_channel* client_channel_factory_create_channel( + grpc_client_channel_factory* cc_factory, const char* target, + grpc_client_channel_type type, const grpc_channel_args* args) { + if (target == nullptr) { + gpr_log(GPR_ERROR, "cannot create channel with NULL target name"); + return nullptr; + } + // Add channel arg containing the server URI. + grpc_core::UniquePtr canonical_target = + grpc_core::ResolverRegistry::AddDefaultPrefixIfNeeded(target); + grpc_arg arg = grpc_channel_arg_string_create((char*)GRPC_ARG_SERVER_URI, + canonical_target.get()); + const char* to_remove[] = {GRPC_ARG_SERVER_URI}; + grpc_channel_args* new_args = + grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1); + grpc_channel* channel = + grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr); + grpc_channel_args_destroy(new_args); + return channel; +} + +static const grpc_client_channel_factory_vtable client_channel_factory_vtable = + {client_channel_factory_ref, client_channel_factory_unref, + client_channel_factory_create_subchannel, + client_channel_factory_create_channel}; + +static grpc_client_channel_factory client_channel_factory = { + &client_channel_factory_vtable}; + +// Create a secure client channel: +// Asynchronously: - resolve target +// - connect to it (trying alternatives as presented) +// - perform handshakes +grpc_channel* grpc_secure_channel_create(grpc_channel_credentials* creds, + const char* target, + const grpc_channel_args* args, + void* reserved) { + grpc_core::ExecCtx exec_ctx; + GRPC_API_TRACE( + "grpc_secure_channel_create(creds=%p, target=%s, args=%p, " + "reserved=%p)", + 4, ((void*)creds, target, (void*)args, (void*)reserved)); + GPR_ASSERT(reserved == nullptr); + grpc_channel* channel = nullptr; + if (creds != nullptr) { + // Add channel args containing the client channel factory and channel + // credentials. + grpc_arg args_to_add[] = { + grpc_client_channel_factory_create_channel_arg(&client_channel_factory), + grpc_channel_credentials_to_arg(creds)}; + grpc_channel_args* new_args = grpc_channel_args_copy_and_add( + args, args_to_add, GPR_ARRAY_SIZE(args_to_add)); + // Create channel. + channel = client_channel_factory_create_channel( + &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, + new_args); + // Clean up. + grpc_channel_args_destroy(new_args); + } + return channel != nullptr ? channel + : grpc_lame_client_channel_create( + target, GRPC_STATUS_INTERNAL, + "Failed to create secure client channel"); +} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.c b/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.cc similarity index 54% rename from Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.cc index 57360785a..687cc483f 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.cc @@ -16,10 +16,13 @@ * */ +#include + #include "src/core/ext/transport/chttp2/server/chttp2_server.h" #include +#include #include #include @@ -27,7 +30,6 @@ #include #include #include -#include #include "src/core/ext/filters/http/server/http_server_filter.h" #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" @@ -43,228 +45,222 @@ #include "src/core/lib/surface/server.h" typedef struct { - grpc_server *server; - grpc_tcp_server *tcp_server; - grpc_channel_args *args; + grpc_server* server; + grpc_tcp_server* tcp_server; + grpc_channel_args* args; gpr_mu mu; bool shutdown; grpc_closure tcp_server_shutdown_complete; - grpc_closure *server_destroy_listener_done; - grpc_handshake_manager *pending_handshake_mgrs; + grpc_closure* server_destroy_listener_done; + grpc_handshake_manager* pending_handshake_mgrs; } server_state; typedef struct { gpr_refcount refs; - server_state *svr_state; - grpc_pollset *accepting_pollset; - grpc_tcp_server_acceptor *acceptor; - grpc_handshake_manager *handshake_mgr; + server_state* svr_state; + grpc_pollset* accepting_pollset; + grpc_tcp_server_acceptor* acceptor; + grpc_handshake_manager* handshake_mgr; // State for enforcing handshake timeout on receiving HTTP/2 settings. - grpc_chttp2_transport *transport; - gpr_timespec deadline; + grpc_chttp2_transport* transport; + grpc_millis deadline; grpc_timer timer; grpc_closure on_timeout; grpc_closure on_receive_settings; } server_connection_state; static void server_connection_state_unref( - grpc_exec_ctx *exec_ctx, server_connection_state *connection_state) { + server_connection_state* connection_state) { if (gpr_unref(&connection_state->refs)) { - if (connection_state->transport != NULL) { - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, connection_state->transport, + if (connection_state->transport != nullptr) { + GRPC_CHTTP2_UNREF_TRANSPORT(connection_state->transport, "receive settings timeout"); } gpr_free(connection_state); } } -static void on_timeout(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - server_connection_state *connection_state = (server_connection_state *)arg; +static void on_timeout(void* arg, grpc_error* error) { + server_connection_state* connection_state = + static_cast(arg); // Note that we may be called with GRPC_ERROR_NONE when the timer fires // or with an error indicating that the timer system is being shut down. if (error != GRPC_ERROR_CANCELLED) { - grpc_transport_op *op = grpc_make_transport_op(NULL); + grpc_transport_op* op = grpc_make_transport_op(nullptr); op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Did not receive HTTP/2 settings before handshake timeout"); - grpc_transport_perform_op(exec_ctx, &connection_state->transport->base, op); + grpc_transport_perform_op(&connection_state->transport->base, op); } - server_connection_state_unref(exec_ctx, connection_state); + server_connection_state_unref(connection_state); } -static void on_receive_settings(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - server_connection_state *connection_state = (server_connection_state *)arg; +static void on_receive_settings(void* arg, grpc_error* error) { + server_connection_state* connection_state = + static_cast(arg); if (error == GRPC_ERROR_NONE) { - grpc_timer_cancel(exec_ctx, &connection_state->timer); + grpc_timer_cancel(&connection_state->timer); } - server_connection_state_unref(exec_ctx, connection_state); + server_connection_state_unref(connection_state); } -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_handshaker_args *args = (grpc_handshaker_args *)arg; - server_connection_state *connection_state = - (server_connection_state *)args->user_data; +static void on_handshake_done(void* arg, grpc_error* error) { + grpc_handshaker_args* args = static_cast(arg); + server_connection_state* connection_state = + static_cast(args->user_data); gpr_mu_lock(&connection_state->svr_state->mu); if (error != GRPC_ERROR_NONE || connection_state->svr_state->shutdown) { - const char *error_str = grpc_error_string(error); + const char* error_str = grpc_error_string(error); gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str); - if (error == GRPC_ERROR_NONE && args->endpoint != NULL) { + if (error == GRPC_ERROR_NONE && args->endpoint != nullptr) { // We were shut down after handshaking completed successfully, so // destroy the endpoint here. // TODO(ctiller): It is currently necessary to shutdown endpoints // before destroying them, even if we know that there are no // pending read/write callbacks. This should be fixed, at which // point this can be removed. - grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_NONE); - grpc_endpoint_destroy(exec_ctx, args->endpoint); - grpc_channel_args_destroy(exec_ctx, args->args); - grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer); + grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_NONE); + grpc_endpoint_destroy(args->endpoint); + grpc_channel_args_destroy(args->args); + grpc_slice_buffer_destroy_internal(args->read_buffer); gpr_free(args->read_buffer); } } else { // If the handshaking succeeded but there is no endpoint, then the // handshaker may have handed off the connection to some external // code, so we can just clean up here without creating a transport. - if (args->endpoint != NULL) { - grpc_transport *transport = grpc_create_chttp2_transport( - exec_ctx, args->args, args->endpoint, false); + if (args->endpoint != nullptr) { + grpc_transport* transport = + grpc_create_chttp2_transport(args->args, args->endpoint, false); grpc_server_setup_transport( - exec_ctx, connection_state->svr_state->server, transport, + connection_state->svr_state->server, transport, connection_state->accepting_pollset, args->args); // Use notify_on_receive_settings callback to enforce the // handshake deadline. - connection_state->transport = (grpc_chttp2_transport *)transport; + connection_state->transport = + reinterpret_cast(transport); gpr_ref(&connection_state->refs); GRPC_CLOSURE_INIT(&connection_state->on_receive_settings, on_receive_settings, connection_state, grpc_schedule_on_exec_ctx); grpc_chttp2_transport_start_reading( - exec_ctx, transport, args->read_buffer, - &connection_state->on_receive_settings); - grpc_channel_args_destroy(exec_ctx, args->args); + transport, args->read_buffer, &connection_state->on_receive_settings); + grpc_channel_args_destroy(args->args); gpr_ref(&connection_state->refs); - GRPC_CHTTP2_REF_TRANSPORT((grpc_chttp2_transport *)transport, + GRPC_CHTTP2_REF_TRANSPORT((grpc_chttp2_transport*)transport, "receive settings timeout"); GRPC_CLOSURE_INIT(&connection_state->on_timeout, on_timeout, connection_state, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &connection_state->timer, - connection_state->deadline, &connection_state->on_timeout, - gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&connection_state->timer, connection_state->deadline, + &connection_state->on_timeout); } } grpc_handshake_manager_pending_list_remove( &connection_state->svr_state->pending_handshake_mgrs, connection_state->handshake_mgr); gpr_mu_unlock(&connection_state->svr_state->mu); - grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr); + grpc_handshake_manager_destroy(connection_state->handshake_mgr); gpr_free(connection_state->acceptor); - grpc_tcp_server_unref(exec_ctx, connection_state->svr_state->tcp_server); - server_connection_state_unref(exec_ctx, connection_state); + grpc_tcp_server_unref(connection_state->svr_state->tcp_server); + server_connection_state_unref(connection_state); } -static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp, - grpc_pollset *accepting_pollset, - grpc_tcp_server_acceptor *acceptor) { - server_state *state = (server_state *)arg; +static void on_accept(void* arg, grpc_endpoint* tcp, + grpc_pollset* accepting_pollset, + grpc_tcp_server_acceptor* acceptor) { + server_state* state = static_cast(arg); gpr_mu_lock(&state->mu); if (state->shutdown) { gpr_mu_unlock(&state->mu); - grpc_endpoint_shutdown(exec_ctx, tcp, GRPC_ERROR_NONE); - grpc_endpoint_destroy(exec_ctx, tcp); + grpc_endpoint_shutdown(tcp, GRPC_ERROR_NONE); + grpc_endpoint_destroy(tcp); gpr_free(acceptor); return; } - grpc_handshake_manager *handshake_mgr = grpc_handshake_manager_create(); + grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create(); grpc_handshake_manager_pending_list_add(&state->pending_handshake_mgrs, handshake_mgr); gpr_mu_unlock(&state->mu); grpc_tcp_server_ref(state->tcp_server); - server_connection_state *connection_state = - (server_connection_state *)gpr_zalloc(sizeof(*connection_state)); + server_connection_state* connection_state = + static_cast( + gpr_zalloc(sizeof(*connection_state))); gpr_ref_init(&connection_state->refs, 1); connection_state->svr_state = state; connection_state->accepting_pollset = accepting_pollset; connection_state->acceptor = acceptor; connection_state->handshake_mgr = handshake_mgr; - grpc_handshakers_add(exec_ctx, HANDSHAKER_SERVER, state->args, + grpc_handshakers_add(HANDSHAKER_SERVER, state->args, connection_state->handshake_mgr); - const grpc_arg *timeout_arg = + const grpc_arg* timeout_arg = grpc_channel_args_find(state->args, GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS); - connection_state->deadline = gpr_time_add( - gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_millis( - grpc_channel_arg_get_integer( - timeout_arg, - (grpc_integer_options){120 * GPR_MS_PER_SEC, 1, INT_MAX}), - GPR_TIMESPAN)); - grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr, - tcp, state->args, - connection_state->deadline, acceptor, - on_handshake_done, connection_state); + connection_state->deadline = + grpc_core::ExecCtx::Get()->Now() + + grpc_channel_arg_get_integer(timeout_arg, + {120 * GPR_MS_PER_SEC, 1, INT_MAX}); + grpc_handshake_manager_do_handshake( + connection_state->handshake_mgr, nullptr /* interested_parties */, tcp, + state->args, connection_state->deadline, acceptor, on_handshake_done, + connection_state); } /* Server callback: start listening on our ports */ -static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server, - void *arg, grpc_pollset **pollsets, +static void server_start_listener(grpc_server* server, void* arg, + grpc_pollset** pollsets, size_t pollset_count) { - server_state *state = (server_state *)arg; + server_state* state = static_cast(arg); gpr_mu_lock(&state->mu); state->shutdown = false; gpr_mu_unlock(&state->mu); - grpc_tcp_server_start(exec_ctx, state->tcp_server, pollsets, pollset_count, - on_accept, state); + grpc_tcp_server_start(state->tcp_server, pollsets, pollset_count, on_accept, + state); } -static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - server_state *state = (server_state *)arg; +static void tcp_server_shutdown_complete(void* arg, grpc_error* error) { + server_state* state = static_cast(arg); /* ensure all threads have unlocked */ gpr_mu_lock(&state->mu); - grpc_closure *destroy_done = state->server_destroy_listener_done; + grpc_closure* destroy_done = state->server_destroy_listener_done; GPR_ASSERT(state->shutdown); grpc_handshake_manager_pending_list_shutdown_all( - exec_ctx, state->pending_handshake_mgrs, GRPC_ERROR_REF(error)); + state->pending_handshake_mgrs, GRPC_ERROR_REF(error)); gpr_mu_unlock(&state->mu); // Flush queued work before destroying handshaker factory, since that // may do a synchronous unref. - grpc_exec_ctx_flush(exec_ctx); - if (destroy_done != NULL) { - destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error)); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); + if (destroy_done != nullptr) { + destroy_done->cb(destroy_done->cb_arg, GRPC_ERROR_REF(error)); + grpc_core::ExecCtx::Get()->Flush(); } - grpc_channel_args_destroy(exec_ctx, state->args); + grpc_channel_args_destroy(state->args); gpr_mu_destroy(&state->mu); gpr_free(state); } /* Server callback: destroy the tcp listener (so we don't generate further callbacks) */ -static void server_destroy_listener(grpc_exec_ctx *exec_ctx, - grpc_server *server, void *arg, - grpc_closure *destroy_done) { - server_state *state = (server_state *)arg; +static void server_destroy_listener(grpc_server* server, void* arg, + grpc_closure* destroy_done) { + server_state* state = static_cast(arg); gpr_mu_lock(&state->mu); state->shutdown = true; state->server_destroy_listener_done = destroy_done; - grpc_tcp_server *tcp_server = state->tcp_server; + grpc_tcp_server* tcp_server = state->tcp_server; gpr_mu_unlock(&state->mu); - grpc_tcp_server_shutdown_listeners(exec_ctx, tcp_server); - grpc_tcp_server_unref(exec_ctx, tcp_server); + grpc_tcp_server_shutdown_listeners(tcp_server); + grpc_tcp_server_unref(tcp_server); } -grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, - grpc_server *server, const char *addr, - grpc_channel_args *args, - int *port_num) { - grpc_resolved_addresses *resolved = NULL; - grpc_tcp_server *tcp_server = NULL; +grpc_error* grpc_chttp2_server_add_port(grpc_server* server, const char* addr, + grpc_channel_args* args, + int* port_num) { + grpc_resolved_addresses* resolved = nullptr; + grpc_tcp_server* tcp_server = nullptr; size_t i; size_t count = 0; int port_temp; - grpc_error *err = GRPC_ERROR_NONE; - server_state *state = NULL; - grpc_error **errors = NULL; + grpc_error* err = GRPC_ERROR_NONE; + server_state* state = nullptr; + grpc_error** errors = nullptr; size_t naddrs = 0; *port_num = -1; @@ -274,12 +270,12 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, if (err != GRPC_ERROR_NONE) { goto error; } - state = (server_state *)gpr_zalloc(sizeof(*state)); + state = static_cast(gpr_zalloc(sizeof(*state))); GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete, tcp_server_shutdown_complete, state, grpc_schedule_on_exec_ctx); - err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete, - args, &tcp_server); + err = grpc_tcp_server_create(&state->tcp_server_shutdown_complete, args, + &tcp_server); if (err != GRPC_ERROR_NONE) { goto error; } @@ -291,7 +287,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, gpr_mu_init(&state->mu); naddrs = resolved->naddrs; - errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs); + errors = static_cast(gpr_malloc(sizeof(*errors) * naddrs)); for (i = 0; i < naddrs; i++) { errors[i] = grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp); @@ -305,21 +301,22 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, } } if (count == 0) { - char *msg; + char* msg; gpr_asprintf(&msg, "No address added out of total %" PRIuPTR " resolved", naddrs); err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs); gpr_free(msg); goto error; } else if (count != naddrs) { - char *msg; - gpr_asprintf(&msg, "Only %" PRIuPTR - " addresses added out of total %" PRIuPTR " resolved", + char* msg; + gpr_asprintf(&msg, + "Only %" PRIuPTR " addresses added out of total %" PRIuPTR + " resolved", count, naddrs); err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs); gpr_free(msg); - const char *warning_message = grpc_error_string(err); + const char* warning_message = grpc_error_string(err); gpr_log(GPR_INFO, "WARNING: %s", warning_message); /* we managed to bind some addresses: continue */ @@ -327,7 +324,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, grpc_resolved_addresses_destroy(resolved); /* Register with the server only upon success */ - grpc_server_add_listener(exec_ctx, server, state, server_start_listener, + grpc_server_add_listener(server, state, server_start_listener, server_destroy_listener); goto done; @@ -338,15 +335,15 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, grpc_resolved_addresses_destroy(resolved); } if (tcp_server) { - grpc_tcp_server_unref(exec_ctx, tcp_server); + grpc_tcp_server_unref(tcp_server); } else { - grpc_channel_args_destroy(exec_ctx, args); + grpc_channel_args_destroy(args); gpr_free(state); } *port_num = 0; done: - if (errors != NULL) { + if (errors != nullptr) { for (i = 0; i < naddrs; i++) { GRPC_ERROR_UNREF(errors[i]); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.h b/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.h index ed968496f..6e51001b5 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/server/chttp2_server.h @@ -19,14 +19,15 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H +#include + #include -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/error.h" /// Adds a port to \a server. Sets \a port_num to the port number. /// Takes ownership of \a args. -grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx, - grpc_server *server, const char *addr, - grpc_channel_args *args, int *port_num); +grpc_error* grpc_chttp2_server_add_port(grpc_server* server, const char* addr, + grpc_channel_args* args, int* port_num); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c b/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc index d42b2d123..99f18cdf3 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -25,20 +27,19 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/server.h" -int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +int grpc_server_add_insecure_http2_port(grpc_server* server, const char* addr) { + grpc_core::ExecCtx exec_ctx; int port_num = 0; GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2, (server, addr)); - grpc_error *err = grpc_chttp2_server_add_port( - &exec_ctx, server, addr, + grpc_error* err = grpc_chttp2_server_add_port( + server, addr, grpc_channel_args_copy(grpc_server_get_channel_args(server)), &port_num); if (err != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(err); + const char* msg = grpc_error_string(err); gpr_log(GPR_ERROR, "%s", msg); GRPC_ERROR_UNREF(err); } - grpc_exec_ctx_finish(&exec_ctx); return port_num; } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c b/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc similarity index 60% rename from Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc index 98b15bcb4..371e46381 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc @@ -16,10 +16,11 @@ * */ +#include + #include #include #include -#include #ifdef GPR_SUPPORT_CHANNELS_FROM_FD @@ -34,41 +35,39 @@ #include "src/core/lib/surface/completion_queue.h" #include "src/core/lib/surface/server.h" -void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - void *reserved, int fd) { - GPR_ASSERT(reserved == NULL); +void grpc_server_add_insecure_channel_from_fd(grpc_server* server, + void* reserved, int fd) { + GPR_ASSERT(reserved == nullptr); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - char *name; + grpc_core::ExecCtx exec_ctx; + char* name; gpr_asprintf(&name, "fd:%d", fd); - grpc_endpoint *server_endpoint = - grpc_tcp_create(&exec_ctx, grpc_fd_create(fd, name), - grpc_server_get_channel_args(server), name); + grpc_endpoint* server_endpoint = grpc_tcp_create( + grpc_fd_create(fd, name), grpc_server_get_channel_args(server), name); gpr_free(name); - const grpc_channel_args *server_args = grpc_server_get_channel_args(server); - grpc_transport *transport = grpc_create_chttp2_transport( - &exec_ctx, server_args, server_endpoint, false /* is_client */); + const grpc_channel_args* server_args = grpc_server_get_channel_args(server); + grpc_transport* transport = grpc_create_chttp2_transport( + server_args, server_endpoint, false /* is_client */); - grpc_pollset **pollsets; + grpc_pollset** pollsets; size_t num_pollsets = 0; grpc_server_get_pollsets(server, &pollsets, &num_pollsets); for (size_t i = 0; i < num_pollsets; i++) { - grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, pollsets[i]); + grpc_endpoint_add_to_pollset(server_endpoint, pollsets[i]); } - grpc_server_setup_transport(&exec_ctx, server, transport, NULL, server_args); - grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL, NULL); - grpc_exec_ctx_finish(&exec_ctx); + grpc_server_setup_transport(server, transport, nullptr, server_args); + grpc_chttp2_transport_start_reading(transport, nullptr, nullptr); } #else // !GPR_SUPPORT_CHANNELS_FROM_FD -void grpc_server_add_insecure_channel_from_fd(grpc_server *server, - void *reserved, int fd) { +void grpc_server_add_insecure_channel_from_fd(grpc_server* server, + void* reserved, int fd) { GPR_ASSERT(0); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c b/Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc similarity index 77% rename from Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc index 5ad63aaf1..6689a17da 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -34,26 +36,27 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/server.h" -int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, - grpc_server_credentials *creds) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_error *err = GRPC_ERROR_NONE; - grpc_server_security_connector *sc = NULL; +int grpc_server_add_secure_http2_port(grpc_server* server, const char* addr, + grpc_server_credentials* creds) { + grpc_core::ExecCtx exec_ctx; + grpc_error* err = GRPC_ERROR_NONE; + grpc_server_security_connector* sc = nullptr; int port_num = 0; + grpc_security_status status; + grpc_channel_args* args = nullptr; GRPC_API_TRACE( "grpc_server_add_secure_http2_port(" "server=%p, addr=%s, creds=%p)", 3, (server, addr, creds)); // Create security context. - if (creds == NULL) { + if (creds == nullptr) { err = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No credentials specified for secure server port (creds==NULL)"); goto done; } - grpc_security_status status = - grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc); + status = grpc_server_credentials_create_security_connector(creds, &sc); if (status != GRPC_SECURITY_OK) { - char *msg; + char* msg; gpr_asprintf(&msg, "Unable to create secure server with credentials of type %s.", creds->type); @@ -66,18 +69,18 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr, grpc_arg args_to_add[2]; args_to_add[0] = grpc_server_credentials_to_arg(creds); args_to_add[1] = grpc_security_connector_to_arg(&sc->base); - grpc_channel_args *args = + args = grpc_channel_args_copy_and_add(grpc_server_get_channel_args(server), args_to_add, GPR_ARRAY_SIZE(args_to_add)); // Add server port. - err = grpc_chttp2_server_add_port(&exec_ctx, server, addr, args, &port_num); + err = grpc_chttp2_server_add_port(server, addr, args, &port_num); done: - if (sc != NULL) { - GRPC_SECURITY_CONNECTOR_UNREF(&exec_ctx, &sc->base, "server"); + if (sc != nullptr) { + GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server"); } - grpc_exec_ctx_finish(&exec_ctx); + if (err != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(err); + const char* msg = grpc_error_string(err); gpr_log(GPR_ERROR, "%s", msg); GRPC_ERROR_UNREF(err); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.cc similarity index 79% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.cc index 5a99cbeff..f0f32da02 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.cc @@ -16,12 +16,14 @@ * */ -#include "src/core/ext/transport/chttp2/transport/bin_decoder.h" +#include + #include #include +#include "src/core/ext/transport/chttp2/transport/bin_decoder.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" static uint8_t decode_table[] = { 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, @@ -49,7 +51,7 @@ static uint8_t decode_table[] = { static const uint8_t tail_xtra[4] = {0, 0, 1, 2}; -static bool input_is_valid(uint8_t *input_ptr, size_t length) { +static bool input_is_valid(uint8_t* input_ptr, size_t length) { size_t i; for (i = 0; i < length; ++i) { @@ -57,7 +59,7 @@ static bool input_is_valid(uint8_t *input_ptr, size_t length) { gpr_log(GPR_ERROR, "Base64 decoding failed, invalid character '%c' in base64 " "input.\n", - (char)(*input_ptr)); + static_cast(*input_ptr)); return false; } } @@ -75,7 +77,33 @@ static bool input_is_valid(uint8_t *input_ptr, size_t length) { #define COMPOSE_OUTPUT_BYTE_2(input_ptr) \ (uint8_t)((decode_table[input_ptr[2]] << 6) | decode_table[input_ptr[3]]) -bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) { +// By RFC 4648, if the length of the encoded string without padding is 4n+r, +// the length of decoded string is: 1) 3n if r = 0, 2) 3n + 1 if r = 2, 3, or +// 3) invalid if r = 1. +size_t grpc_chttp2_base64_infer_length_after_decode(const grpc_slice& slice) { + size_t len = GRPC_SLICE_LENGTH(slice); + const uint8_t* bytes = GRPC_SLICE_START_PTR(slice); + while (len > 0 && bytes[len - 1] == '=') { + len--; + } + if (GRPC_SLICE_LENGTH(slice) - len > 2) { + gpr_log(GPR_ERROR, + "Base64 decoding failed. Input has more than 2 paddings."); + return 0; + } + size_t tuples = len / 4; + size_t tail_case = len % 4; + if (tail_case == 1) { + gpr_log(GPR_ERROR, + "Base64 decoding failed. Input has a length of %zu (without" + " padding), which is invalid.\n", + len); + return 0; + } + return tuples * 3 + tail_xtra[tail_case]; +} + +bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx) { size_t input_tail; if (ctx->input_cur > ctx->input_end || ctx->output_cur > ctx->output_end) { @@ -94,7 +122,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) { } // Process the tail of input data - input_tail = (size_t)(ctx->input_end - ctx->input_cur); + input_tail = static_cast(ctx->input_end - ctx->input_cur); if (input_tail == 4) { // Process the input data with pad chars if (ctx->input_cur[3] == '=') { @@ -130,8 +158,7 @@ bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx) { return true; } -grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, - grpc_slice input) { +grpc_slice grpc_chttp2_base64_decode(grpc_slice input) { size_t input_length = GRPC_SLICE_LENGTH(input); size_t output_length = input_length / 4 * 3; struct grpc_base64_decode_context ctx; @@ -142,12 +169,12 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, "Base64 decoding failed, input of " "grpc_chttp2_base64_decode has a length of %d, which is not a " "multiple of 4.\n", - (int)input_length); + static_cast(input_length)); return grpc_empty_slice(); } if (input_length > 0) { - uint8_t *input_end = GRPC_SLICE_END_PTR(input); + uint8_t* input_end = GRPC_SLICE_END_PTR(input); if (*(--input_end) == '=') { output_length--; if (*(--input_end) == '=') { @@ -164,10 +191,10 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, ctx.contains_tail = false; if (!grpc_base64_decode_partial(&ctx)) { - char *s = grpc_slice_to_c_string(input); + char* s = grpc_slice_to_c_string(input); gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s); gpr_free(s); - grpc_slice_unref_internal(exec_ctx, output); + grpc_slice_unref_internal(output); return grpc_empty_slice(); } GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output)); @@ -175,8 +202,7 @@ grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, return output; } -grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx, - grpc_slice input, +grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input, size_t output_length) { size_t input_length = GRPC_SLICE_LENGTH(input); grpc_slice output = GRPC_SLICE_MALLOC(output_length); @@ -188,18 +214,19 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx, "Base64 decoding failed, input of " "grpc_chttp2_base64_decode_with_length has a length of %d, which " "has a tail of 1 byte.\n", - (int)input_length); - grpc_slice_unref_internal(exec_ctx, output); + static_cast(input_length)); + grpc_slice_unref_internal(output); return grpc_empty_slice(); } if (output_length > input_length / 4 * 3 + tail_xtra[input_length % 4]) { - gpr_log(GPR_ERROR, - "Base64 decoding failed, output_length %d is longer " - "than the max possible output length %d.\n", - (int)output_length, - (int)(input_length / 4 * 3 + tail_xtra[input_length % 4])); - grpc_slice_unref_internal(exec_ctx, output); + gpr_log( + GPR_ERROR, + "Base64 decoding failed, output_length %d is longer " + "than the max possible output length %d.\n", + static_cast(output_length), + static_cast(input_length / 4 * 3 + tail_xtra[input_length % 4])); + grpc_slice_unref_internal(output); return grpc_empty_slice(); } @@ -210,10 +237,10 @@ grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx, ctx.contains_tail = true; if (!grpc_base64_decode_partial(&ctx)) { - char *s = grpc_slice_to_c_string(input); + char* s = grpc_slice_to_c_string(input); gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s); gpr_free(s); - grpc_slice_unref_internal(exec_ctx, output); + grpc_slice_unref_internal(output); return grpc_empty_slice(); } GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output)); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.h index 047b33d58..8a4d4a717 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_decoder.h @@ -19,15 +19,17 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H +#include + #include #include struct grpc_base64_decode_context { /* input/output: */ - uint8_t *input_cur; - uint8_t *input_end; - uint8_t *output_cur; - uint8_t *output_end; + uint8_t* input_cur; + uint8_t* input_end; + uint8_t* output_cur; + uint8_t* output_end; /* Indicate if the decoder should handle the tail of input data*/ bool contains_tail; }; @@ -36,17 +38,19 @@ struct grpc_base64_decode_context { or output_end is reached. When input_end is reached, (input_end - input_cur) is less than 4. When output_end is reached, (output_end - output_cur) is less than 3. Returns false if decoding is failed. */ -bool grpc_base64_decode_partial(struct grpc_base64_decode_context *ctx); +bool grpc_base64_decode_partial(struct grpc_base64_decode_context* ctx); /* base64 decode a slice with pad chars. Returns a new slice, does not take ownership of the input. Returns an empty slice if decoding is failed. */ -grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx *exec_ctx, grpc_slice input); +grpc_slice grpc_chttp2_base64_decode(grpc_slice input); /* base64 decode a slice without pad chars, data length is needed. Returns a new slice, does not take ownership of the input. Returns an empty slice if decoding is failed. */ -grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx *exec_ctx, - grpc_slice input, +grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input, size_t output_length); +/* Infer the length of decoded data from encoded data. */ +size_t grpc_chttp2_base64_infer_length_after_decode(const grpc_slice& slice); + #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.cc index 42d481b3c..bad29e342 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/bin_encoder.h" #include @@ -52,8 +54,8 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) { size_t tail_case = input_length % 3; size_t output_length = input_triplets * 4 + tail_xtra[tail_case]; grpc_slice output = GRPC_SLICE_MALLOC(output_length); - uint8_t *in = GRPC_SLICE_START_PTR(input); - char *out = (char *)GRPC_SLICE_START_PTR(output); + uint8_t* in = GRPC_SLICE_START_PTR(input); + char* out = reinterpret_cast GRPC_SLICE_START_PTR(output); size_t i; /* encode full triplets */ @@ -85,15 +87,15 @@ grpc_slice grpc_chttp2_base64_encode(grpc_slice input) { break; } - GPR_ASSERT(out == (char *)GRPC_SLICE_END_PTR(output)); + GPR_ASSERT(out == (char*)GRPC_SLICE_END_PTR(output)); GPR_ASSERT(in == GRPC_SLICE_END_PTR(input)); return output; } grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) { size_t nbits; - uint8_t *in; - uint8_t *out; + uint8_t* in; + uint8_t* out; grpc_slice output; uint32_t temp = 0; uint32_t temp_length = 0; @@ -115,7 +117,7 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) { while (temp_length > 8) { temp_length -= 8; - *out++ = (uint8_t)(temp >> temp_length); + *out++ = static_cast(temp >> temp_length); } } @@ -124,8 +126,9 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) { * expanded form due to the "integral promotion" performed (see section * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type * is then required to avoid the compiler warning */ - *out++ = (uint8_t)((uint8_t)(temp << (8u - temp_length)) | - (uint8_t)(0xffu >> temp_length)); + *out++ = + static_cast(static_cast(temp << (8u - temp_length)) | + static_cast(0xffu >> temp_length)); } GPR_ASSERT(out == GRPC_SLICE_END_PTR(output)); @@ -136,26 +139,27 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input) { typedef struct { uint32_t temp; uint32_t temp_length; - uint8_t *out; + uint8_t* out; } huff_out; -static void enc_flush_some(huff_out *out) { +static void enc_flush_some(huff_out* out) { while (out->temp_length > 8) { out->temp_length -= 8; - *out->out++ = (uint8_t)(out->temp >> out->temp_length); + *out->out++ = static_cast(out->temp >> out->temp_length); } } -static void enc_add2(huff_out *out, uint8_t a, uint8_t b) { +static void enc_add2(huff_out* out, uint8_t a, uint8_t b) { b64_huff_sym sa = huff_alphabet[a]; b64_huff_sym sb = huff_alphabet[b]; out->temp = (out->temp << (sa.length + sb.length)) | - ((uint32_t)sa.bits << sb.length) | sb.bits; - out->temp_length += (uint32_t)sa.length + (uint32_t)sb.length; + (static_cast(sa.bits) << sb.length) | sb.bits; + out->temp_length += + static_cast(sa.length) + static_cast(sb.length); enc_flush_some(out); } -static void enc_add1(huff_out *out, uint8_t a) { +static void enc_add1(huff_out* out, uint8_t a) { b64_huff_sym sa = huff_alphabet[a]; out->temp = (out->temp << sa.length) | sa.bits; out->temp_length += sa.length; @@ -170,8 +174,8 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { size_t max_output_bits = 11 * output_syms; size_t max_output_length = max_output_bits / 8 + (max_output_bits % 8 != 0); grpc_slice output = GRPC_SLICE_MALLOC(max_output_length); - uint8_t *in = GRPC_SLICE_START_PTR(input); - uint8_t *start_out = GRPC_SLICE_START_PTR(output); + uint8_t* in = GRPC_SLICE_START_PTR(input); + uint8_t* start_out = GRPC_SLICE_START_PTR(output); huff_out out; size_t i; @@ -181,11 +185,11 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { /* encode full triplets */ for (i = 0; i < input_triplets; i++) { - const uint8_t low_to_high = (uint8_t)((in[0] & 0x3) << 4); + const uint8_t low_to_high = static_cast((in[0] & 0x3) << 4); const uint8_t high_to_low = in[1] >> 4; enc_add2(&out, in[0] >> 2, low_to_high | high_to_low); - const uint8_t a = (uint8_t)((in[1] & 0xf) << 2); + const uint8_t a = static_cast((in[1] & 0xf) << 2); const uint8_t b = (in[2] >> 6); enc_add2(&out, a | b, in[2] & 0x3f); in += 3; @@ -196,14 +200,14 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { case 0: break; case 1: - enc_add2(&out, in[0] >> 2, (uint8_t)((in[0] & 0x3) << 4)); + enc_add2(&out, in[0] >> 2, static_cast((in[0] & 0x3) << 4)); in += 1; break; case 2: { - const uint8_t low_to_high = (uint8_t)((in[0] & 0x3) << 4); + const uint8_t low_to_high = static_cast((in[0] & 0x3) << 4); const uint8_t high_to_low = in[1] >> 4; enc_add2(&out, in[0] >> 2, low_to_high | high_to_low); - enc_add1(&out, (uint8_t)((in[1] & 0xf) << 2)); + enc_add1(&out, static_cast((in[1] & 0xf) << 2)); in += 2; break; } @@ -214,8 +218,9 @@ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input) { * expanded form due to the "integral promotion" performed (see section * 3.2.1.1 of the C89 draft standard). A cast to the smaller container type * is then required to avoid the compiler warning */ - *out.out++ = (uint8_t)((uint8_t)(out.temp << (8u - out.temp_length)) | - (uint8_t)(0xffu >> out.temp_length)); + *out.out++ = static_cast( + static_cast(out.temp << (8u - out.temp_length)) | + static_cast(0xffu >> out.temp_length)); } GPR_ASSERT(out.out <= GRPC_SLICE_END_PTR(output)); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.h index a8f36a345..1b7bb1574 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/bin_encoder.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H +#include + #include /* base64 encode a slice. Returns a new slice, does not take ownership of the @@ -32,7 +34,7 @@ grpc_slice grpc_chttp2_huffman_compress(grpc_slice input); /* equivalent to: grpc_slice x = grpc_chttp2_base64_encode(input); grpc_slice y = grpc_chttp2_huffman_compress(x); - grpc_slice_unref_internal(exec_ctx, x); + grpc_slice_unref_internal( x); return y; */ grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc similarity index 74% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc index 6d0995383..531ea73e9 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc @@ -16,17 +16,20 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/env.h" #include "src/core/lib/transport/metadata.h" void grpc_chttp2_plugin_init(void) { - grpc_register_tracer(&grpc_http_trace); - grpc_register_tracer(&grpc_flowctl_trace); - grpc_register_tracer(&grpc_trace_http2_stream_state); -#ifndef NDEBUG - grpc_register_tracer(&grpc_trace_chttp2_refcount); -#endif + g_flow_control_enabled = true; + char* env_variable = gpr_getenv("GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL"); + if (env_variable != nullptr) { + g_flow_control_enabled = false; + gpr_free(env_variable); + } } void grpc_chttp2_plugin_shutdown(void) {} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.cc similarity index 50% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.cc index 2ba671732..0ef73961a 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.cc @@ -16,8 +16,11 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" +#include #include #include #include @@ -27,7 +30,6 @@ #include #include #include -#include #include "src/core/ext/transport/chttp2/transport/frame_data.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -35,14 +37,15 @@ #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/compression/stream_compression.h" #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/error_utils.h" #include "src/core/lib/transport/http2_errors.h" #include "src/core/lib/transport/static_metadata.h" @@ -51,7 +54,6 @@ #include "src/core/lib/transport/transport.h" #include "src/core/lib/transport/transport_impl.h" -#define DEFAULT_WINDOW 65535 #define DEFAULT_CONNECTION_WINDOW_TARGET (1024 * 1024) #define MAX_WINDOW 0x7fffffffu #define MAX_WRITE_BUFFER_SIZE (64 * 1024 * 1024) @@ -66,7 +68,7 @@ #define DEFAULT_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */ #define DEFAULT_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS 300000 /* 5 minutes */ -#define DEFAULT_MAX_PINGS_BETWEEN_DATA 0 /* unlimited */ +#define DEFAULT_MAX_PINGS_BETWEEN_DATA 2 #define DEFAULT_MAX_PING_STRIKES 2 static int g_default_client_keepalive_time_ms = @@ -77,7 +79,9 @@ static int g_default_server_keepalive_time_ms = DEFAULT_SERVER_KEEPALIVE_TIME_MS; static int g_default_server_keepalive_timeout_ms = DEFAULT_SERVER_KEEPALIVE_TIMEOUT_MS; -static bool g_default_keepalive_permit_without_calls = +static bool g_default_client_keepalive_permit_without_calls = + DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS; +static bool g_default_server_keepalive_permit_without_calls = DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS; static int g_default_min_sent_ping_interval_without_data_ms = @@ -88,156 +92,127 @@ static int g_default_max_pings_without_data = DEFAULT_MAX_PINGS_BETWEEN_DATA; static int g_default_max_ping_strikes = DEFAULT_MAX_PING_STRIKES; #define MAX_CLIENT_STREAM_ID 0x7fffffffu -grpc_tracer_flag grpc_http_trace = GRPC_TRACER_INITIALIZER(false, "http"); -grpc_tracer_flag grpc_flowctl_trace = GRPC_TRACER_INITIALIZER(false, "flowctl"); - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_chttp2_refcount = - GRPC_TRACER_INITIALIZER(false, "chttp2_refcount"); -#endif +grpc_core::TraceFlag grpc_http_trace(false, "http"); +grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount(false, + "chttp2_refcount"); /* forward declarations of various callbacks that we'll build closures around */ -static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); -static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error); -static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); +static void write_action_begin_locked(void* t, grpc_error* error); +static void write_action(void* t, grpc_error* error); +static void write_action_end_locked(void* t, grpc_error* error); -static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); +static void read_action_locked(void* t, grpc_error* error); -static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs, - grpc_error *error); +static void complete_fetch_locked(void* gs, grpc_error* error); /** Set a transport level setting, and push it to our peer */ -static void queue_setting_update(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static void queue_setting_update(grpc_chttp2_transport* t, grpc_chttp2_setting_id id, uint32_t value); -static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_error *error); +static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_error* error); /** Start new streams that have been created if we can */ -static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); +static void maybe_start_some_streams(grpc_chttp2_transport* t); -static void connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static void connectivity_state_set(grpc_chttp2_transport* t, grpc_connectivity_state state, - grpc_error *error, const char *reason); - -static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx, - void *byte_stream, - grpc_error *error_ignored); -static void incoming_byte_stream_publish_error( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_error *error); -static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx, - grpc_chttp2_incoming_byte_stream *bs); - -static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); -static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t, - grpc_error *error); - -static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); - -static void close_transport_locked(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, grpc_error *error); -static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error); - -static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error); -static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error); - -static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error); -static void send_ping_locked( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate, - grpc_closure *on_complete, - grpc_chttp2_initiate_write_reason initiate_write_reason); -static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error); + grpc_error* error, const char* reason); + +static void benign_reclaimer_locked(void* t, grpc_error* error); +static void destructive_reclaimer_locked(void* t, grpc_error* error); + +static void post_benign_reclaimer(grpc_chttp2_transport* t); +static void post_destructive_reclaimer(grpc_chttp2_transport* t); + +static void close_transport_locked(grpc_chttp2_transport* t, grpc_error* error); +static void end_all_the_calls(grpc_chttp2_transport* t, grpc_error* error); + +static void schedule_bdp_ping_locked(grpc_chttp2_transport* t); +static void start_bdp_ping_locked(void* tp, grpc_error* error); +static void finish_bdp_ping_locked(void* tp, grpc_error* error); +static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error); + +static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error); +static void send_ping_locked(grpc_chttp2_transport* t, + grpc_closure* on_initiate, + grpc_closure* on_complete); +static void retry_initiate_ping_locked(void* tp, grpc_error* error); /** keepalive-relevant functions */ -static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); -static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); -static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); -static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); - -static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); +static void init_keepalive_ping_locked(void* arg, grpc_error* error); +static void start_keepalive_ping_locked(void* arg, grpc_error* error); +static void finish_keepalive_ping_locked(void* arg, grpc_error* error); +static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error); + +static void reset_byte_stream(void* arg, grpc_error* error); + +// Flow control default enabled. Can be disabled by setting +// GRPC_EXPERIMENTAL_DISABLE_FLOW_CONTROL +bool g_flow_control_enabled = true; /******************************************************************************* * CONSTRUCTION/DESTRUCTION/REFCOUNTING */ -static void destruct_transport(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +static void destruct_transport(grpc_chttp2_transport* t) { size_t i; - grpc_endpoint_destroy(exec_ctx, t->ep); + grpc_endpoint_destroy(t->ep); - grpc_slice_buffer_destroy_internal(exec_ctx, &t->qbuf); + grpc_slice_buffer_destroy_internal(&t->qbuf); - grpc_slice_buffer_destroy_internal(exec_ctx, &t->outbuf); - grpc_chttp2_hpack_compressor_destroy(exec_ctx, &t->hpack_compressor); + grpc_slice_buffer_destroy_internal(&t->outbuf); + grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor); - grpc_slice_buffer_destroy_internal(exec_ctx, &t->read_buffer); - grpc_chttp2_hpack_parser_destroy(exec_ctx, &t->hpack_parser); + grpc_slice_buffer_destroy_internal(&t->read_buffer); + grpc_chttp2_hpack_parser_destroy(&t->hpack_parser); grpc_chttp2_goaway_parser_destroy(&t->goaway_parser); for (i = 0; i < STREAM_LIST_COUNT; i++) { - GPR_ASSERT(t->lists[i].head == NULL); - GPR_ASSERT(t->lists[i].tail == NULL); + GPR_ASSERT(t->lists[i].head == nullptr); + GPR_ASSERT(t->lists[i].tail == nullptr); } + GRPC_ERROR_UNREF(t->goaway_error); + GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0); grpc_chttp2_stream_map_destroy(&t->stream_map); - grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker); + grpc_connectivity_state_destroy(&t->channel_callback.state_tracker); - GRPC_COMBINER_UNREF(exec_ctx, t->combiner, "chttp2_transport"); + GRPC_COMBINER_UNREF(t->combiner, "chttp2_transport"); - cancel_pings(exec_ctx, t, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed")); + cancel_pings(t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed")); while (t->write_cb_pool) { - grpc_chttp2_write_cb *next = t->write_cb_pool->next; + grpc_chttp2_write_cb* next = t->write_cb_pool->next; gpr_free(t->write_cb_pool); t->write_cb_pool = next; } + t->flow_control.Destroy(); + + GRPC_ERROR_UNREF(t->closed_with_error); gpr_free(t->ping_acks); gpr_free(t->peer_string); gpr_free(t); } #ifndef NDEBUG -void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) { +void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason, + const char* file, int line) { + if (grpc_trace_chttp2_refcount.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count); gpr_log(GPR_DEBUG, "chttp2:unref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t, val, val - 1, reason, file, line); } if (!gpr_unref(&t->refs)) return; - destruct_transport(exec_ctx, t); + destruct_transport(t); } -void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_chttp2_refcount)) { +void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason, + const char* file, int line) { + if (grpc_trace_chttp2_refcount.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count); gpr_log(GPR_DEBUG, "chttp2: ref:%p %" PRIdPTR "->%" PRIdPTR " %s [%s:%d]", t, val, val + 1, reason, file, line); @@ -245,20 +220,19 @@ void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason, gpr_ref(&t->refs); } #else -void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +void grpc_chttp2_unref_transport(grpc_chttp2_transport* t) { if (!gpr_unref(&t->refs)) return; - destruct_transport(exec_ctx, t); + destruct_transport(t); } -void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); } +void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); } #endif -static const grpc_transport_vtable *get_vtable(void); +static const grpc_transport_vtable* get_vtable(void); -static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - const grpc_channel_args *channel_args, - grpc_endpoint *ep, bool is_client) { +static void init_transport(grpc_chttp2_transport* t, + const grpc_channel_args* channel_args, + grpc_endpoint* ep, bool is_client) { size_t i; int j; @@ -274,9 +248,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, t->endpoint_reading = 1; t->next_stream_id = is_client ? 1 : 2; t->is_client = is_client; - t->flow_control.remote_window = DEFAULT_WINDOW; - t->flow_control.announced_window = DEFAULT_WINDOW; - t->flow_control.t = t; t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0; t->is_first_frame = true; grpc_connectivity_state_init( @@ -301,6 +272,9 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_combiner_scheduler(t->combiner)); GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t, grpc_combiner_scheduler(t->combiner)); + GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked, + next_bdp_ping_timer_expired_locked, t, + grpc_combiner_scheduler(t->combiner)); GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked, t, grpc_combiner_scheduler(t->combiner)); GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked, @@ -313,20 +287,9 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, keepalive_watchdog_fired_locked, t, grpc_combiner_scheduler(t->combiner)); - grpc_bdp_estimator_init(&t->flow_control.bdp_estimator, t->peer_string); - t->flow_control.last_pid_update = gpr_now(GPR_CLOCK_MONOTONIC); - grpc_pid_controller_init( - &t->flow_control.pid_controller, - (grpc_pid_controller_args){.gain_p = 4, - .gain_i = 8, - .gain_d = 0, - .initial_control_value = log2(DEFAULT_WINDOW), - .min_control_value = -1, - .max_control_value = 25, - .integral_range = 10}); - + t->goaway_error = GRPC_ERROR_NONE; grpc_chttp2_goaway_parser_init(&t->goaway_parser); - grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser); + grpc_chttp2_hpack_parser_init(&t->hpack_parser); grpc_slice_buffer_init(&t->read_buffer); @@ -348,8 +311,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, window -- this should by rights be 0 */ t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; t->sent_local_settings = 0; - t->write_buffer_size = DEFAULT_WINDOW; - t->flow_control.enable_bdp_probe = true; + t->write_buffer_size = grpc_core::chttp2::kDefaultWindow; if (is_client) { grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string( @@ -358,52 +320,46 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, /* configure http2 the way we like it */ if (is_client) { - queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0); - queue_setting_update(exec_ctx, t, - GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0); + queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0); + queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0); } - queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, - DEFAULT_WINDOW); - queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, + queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, DEFAULT_MAX_HEADER_LIST_SIZE); - queue_setting_update(exec_ctx, t, - GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1); + queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, + 1); t->ping_policy.max_pings_without_data = g_default_max_pings_without_data; - t->ping_policy.min_sent_ping_interval_without_data = gpr_time_from_millis( - g_default_min_sent_ping_interval_without_data_ms, GPR_TIMESPAN); + t->ping_policy.min_sent_ping_interval_without_data = + g_default_min_sent_ping_interval_without_data_ms; t->ping_policy.max_ping_strikes = g_default_max_ping_strikes; - t->ping_policy.min_recv_ping_interval_without_data = gpr_time_from_millis( - g_default_min_recv_ping_interval_without_data_ms, GPR_TIMESPAN); + t->ping_policy.min_recv_ping_interval_without_data = + g_default_min_recv_ping_interval_without_data_ms; /* Keepalive setting */ if (t->is_client) { - t->keepalive_time = - g_default_client_keepalive_time_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_client_keepalive_time_ms, - GPR_TIMESPAN); - t->keepalive_timeout = - g_default_client_keepalive_timeout_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_client_keepalive_timeout_ms, - GPR_TIMESPAN); + t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_client_keepalive_time_ms; + t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_client_keepalive_timeout_ms; + t->keepalive_permit_without_calls = + g_default_client_keepalive_permit_without_calls; } else { - t->keepalive_time = - g_default_server_keepalive_time_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_server_keepalive_time_ms, - GPR_TIMESPAN); - t->keepalive_timeout = - g_default_server_keepalive_timeout_ms == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(g_default_server_keepalive_timeout_ms, - GPR_TIMESPAN); - } - t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls; + t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_server_keepalive_time_ms; + t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX + ? GRPC_MILLIS_INF_FUTURE + : g_default_server_keepalive_timeout_ms; + t->keepalive_permit_without_calls = + g_default_server_keepalive_permit_without_calls; + } t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY; + bool enable_bdp = true; + if (channel_args) { for (i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, @@ -417,7 +373,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1, is_client ? "client" : "server"); } else { - t->next_stream_id = (uint32_t)value; + t->next_stream_id = static_cast(value); } } } else if (0 == strcmp(channel_args->args[i].key, @@ -426,80 +382,69 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, const int value = grpc_channel_arg_get_integer(&channel_args->args[i], options); if (value >= 0) { - grpc_chttp2_hpack_compressor_set_max_usable_size(&t->hpack_compressor, - (uint32_t)value); + grpc_chttp2_hpack_compressor_set_max_usable_size( + &t->hpack_compressor, static_cast(value)); } } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) { t->ping_policy.max_pings_without_data = grpc_channel_arg_get_integer( &channel_args->args[i], - (grpc_integer_options){g_default_max_pings_without_data, 0, - INT_MAX}); + {g_default_max_pings_without_data, 0, INT_MAX}); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) { t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX}); + &channel_args->args[i], {g_default_max_ping_strikes, 0, INT_MAX}); } else if (0 == strcmp( channel_args->args[i].key, GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) { t->ping_policy.min_sent_ping_interval_without_data = - gpr_time_from_millis( - grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){ - g_default_min_sent_ping_interval_without_data_ms, 0, - INT_MAX}), - GPR_TIMESPAN); + grpc_channel_arg_get_integer( + &channel_args->args[i], + grpc_integer_options{ + g_default_min_sent_ping_interval_without_data_ms, 0, + INT_MAX}); } else if (0 == strcmp( channel_args->args[i].key, GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) { t->ping_policy.min_recv_ping_interval_without_data = - gpr_time_from_millis( - grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){ - g_default_min_recv_ping_interval_without_data_ms, 0, - INT_MAX}), - GPR_TIMESPAN); + grpc_channel_arg_get_integer( + &channel_args->args[i], + grpc_integer_options{ + g_default_min_recv_ping_interval_without_data_ms, 0, + INT_MAX}); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) { - t->write_buffer_size = (uint32_t)grpc_channel_arg_get_integer( - &channel_args->args[i], - (grpc_integer_options){0, 0, MAX_WRITE_BUFFER_SIZE}); + t->write_buffer_size = + static_cast(grpc_channel_arg_get_integer( + &channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE})); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) { - t->flow_control.enable_bdp_probe = grpc_channel_arg_get_integer( - &channel_args->args[i], (grpc_integer_options){1, 0, 1}); + enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) { const int value = grpc_channel_arg_get_integer( &channel_args->args[i], - (grpc_integer_options){t->is_client - ? g_default_client_keepalive_time_ms - : g_default_server_keepalive_time_ms, - 1, INT_MAX}); - t->keepalive_time = value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + grpc_integer_options{t->is_client + ? g_default_client_keepalive_time_ms + : g_default_server_keepalive_time_ms, + 1, INT_MAX}); + t->keepalive_time = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) { const int value = grpc_channel_arg_get_integer( &channel_args->args[i], - (grpc_integer_options){t->is_client - ? g_default_client_keepalive_timeout_ms - : g_default_server_keepalive_timeout_ms, - 0, INT_MAX}); - t->keepalive_timeout = value == INT_MAX - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis(value, GPR_TIMESPAN); + grpc_integer_options{t->is_client + ? g_default_client_keepalive_timeout_ms + : g_default_server_keepalive_timeout_ms, + 0, INT_MAX}); + t->keepalive_timeout = + value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value; } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) { - t->keepalive_permit_without_calls = - (uint32_t)grpc_channel_arg_get_integer( - &channel_args->args[i], (grpc_integer_options){0, 0, 1}); + t->keepalive_permit_without_calls = static_cast( + grpc_channel_arg_get_integer(&channel_args->args[i], {0, 0, 1})); } else if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_OPTIMIZATION_TARGET)) { if (channel_args->args[i].type != GRPC_ARG_STRING) { @@ -519,7 +464,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, } } else { static const struct { - const char *channel_arg_name; + const char* channel_arg_name; grpc_chttp2_setting_id setting_id; grpc_integer_options integer_options; bool availability[2] /* server, client */; @@ -548,7 +493,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, {-1, 5, INT32_MAX}, {true, true}}}; - for (j = 0; j < (int)GPR_ARRAY_SIZE(settings_map); j++) { + for (j = 0; j < static_cast GPR_ARRAY_SIZE(settings_map); j++) { if (0 == strcmp(channel_args->args[i].key, settings_map[j].channel_arg_name)) { if (!settings_map[j].availability[is_client]) { @@ -559,8 +504,8 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, int value = grpc_channel_arg_get_integer( &channel_args->args[i], settings_map[j].integer_options); if (value >= 0) { - queue_setting_update(exec_ctx, t, settings_map[j].setting_id, - (uint32_t)value); + queue_setting_update(t, settings_map[j].setting_id, + static_cast(value)); } } break; @@ -570,62 +515,75 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, } } + if (g_flow_control_enabled) { + t->flow_control.Init(t, + enable_bdp); + } else { + t->flow_control.Init(t); + enable_bdp = false; + } + /* No pings allowed before receiving a header or data frame. */ t->ping_state.pings_before_data_required = 0; t->ping_state.is_delayed_ping_timer_set = false; + t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; - t->ping_recv_state.last_ping_recv_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; t->ping_recv_state.ping_strikes = 0; /* Start keepalive pings */ - if (gpr_time_cmp(t->keepalive_time, gpr_inf_future(GPR_TIMESPAN)) != 0) { + if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&t->keepalive_ping_timer, + grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, + &t->init_keepalive_ping_locked); } else { /* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no inflight keeaplive timers */ t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED; } - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE); - post_benign_reclaimer(exec_ctx, t); + if (enable_bdp) { + GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping"); + schedule_bdp_ping_locked(t); + + grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t, + nullptr); + } + + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE); + post_benign_reclaimer(t); } -static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; +static void destroy_transport_locked(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); t->destroying = 1; close_transport_locked( - exec_ctx, t, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"), - GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state)); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy"); + t, grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"), + GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state)); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "destroy"); } -static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_CREATE(destroy_transport_locked, t, +static void destroy_transport(grpc_transport* gt) { + grpc_chttp2_transport* t = reinterpret_cast(gt); + GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(destroy_transport_locked, t, grpc_combiner_scheduler(t->combiner)), GRPC_ERROR_NONE); } -static void close_transport_locked(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_error *error) { - if (!t->closed) { +static void close_transport_locked(grpc_chttp2_transport* t, + grpc_error* error) { + end_all_the_calls(t, GRPC_ERROR_REF(error)); + cancel_pings(t, GRPC_ERROR_REF(error)); + if (t->closed_with_error == GRPC_ERROR_NONE) { if (!grpc_error_has_clear_grpc_status(error)) { error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); } if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) { - if (t->close_transport_on_writes_finished == NULL) { + if (t->close_transport_on_writes_finished == nullptr) { t->close_transport_on_writes_finished = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Delayed close due to in-progress write"); @@ -634,20 +592,23 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, grpc_error_add_child(t->close_transport_on_writes_finished, error); return; } - t->closed = 1; - connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_REF(error), "close_transport"); - grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error)); + GPR_ASSERT(error != GRPC_ERROR_NONE); + t->closed_with_error = GRPC_ERROR_REF(error); + connectivity_state_set(t, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), + "close_transport"); if (t->ping_state.is_delayed_ping_timer_set) { - grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer); + grpc_timer_cancel(&t->ping_state.delayed_ping_timer); + } + if (t->have_next_bdp_ping_timer) { + grpc_timer_cancel(&t->next_bdp_ping_timer); } switch (t->keepalive_state) { case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING: - grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer); + grpc_timer_cancel(&t->keepalive_ping_timer); break; case GRPC_CHTTP2_KEEPALIVE_STATE_PINGING: - grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer); - grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer); + grpc_timer_cancel(&t->keepalive_ping_timer); + grpc_timer_cancel(&t->keepalive_watchdog_timer); break; case GRPC_CHTTP2_KEEPALIVE_STATE_DYING: case GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED: @@ -656,60 +617,59 @@ static void close_transport_locked(grpc_exec_ctx *exec_ctx, } /* flush writable stream list to avoid dangling references */ - grpc_chttp2_stream *s; + grpc_chttp2_stream* s; while (grpc_chttp2_list_pop_writable_stream(t, &s)) { - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close"); + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:close"); } - end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error)); - cancel_pings(exec_ctx, t, GRPC_ERROR_REF(error)); + GPR_ASSERT(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE); + grpc_endpoint_shutdown(t->ep, GRPC_ERROR_REF(error)); } - if (t->notify_on_receive_settings != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, t->notify_on_receive_settings, - GRPC_ERROR_CANCELLED); - t->notify_on_receive_settings = NULL; + if (t->notify_on_receive_settings != nullptr) { + GRPC_CLOSURE_SCHED(t->notify_on_receive_settings, GRPC_ERROR_CANCELLED); + t->notify_on_receive_settings = nullptr; } GRPC_ERROR_UNREF(error); } #ifndef NDEBUG -void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason) { +void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason) { grpc_stream_ref(s->refcount, reason); } -void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s, - const char *reason) { - grpc_stream_unref(exec_ctx, s->refcount, reason); +void grpc_chttp2_stream_unref(grpc_chttp2_stream* s, const char* reason) { + grpc_stream_unref(s->refcount, reason); } #else -void grpc_chttp2_stream_ref(grpc_chttp2_stream *s) { +void grpc_chttp2_stream_ref(grpc_chttp2_stream* s) { grpc_stream_ref(s->refcount); } -void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s) { - grpc_stream_unref(exec_ctx, s->refcount); +void grpc_chttp2_stream_unref(grpc_chttp2_stream* s) { + grpc_stream_unref(s->refcount); } #endif -static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena) { - GPR_TIMER_BEGIN("init_stream", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; +static int init_stream(grpc_transport* gt, grpc_stream* gs, + grpc_stream_refcount* refcount, const void* server_data, + gpr_arena* arena) { + GPR_TIMER_SCOPE("init_stream", 0); + grpc_chttp2_transport* t = reinterpret_cast(gt); + grpc_chttp2_stream* s = reinterpret_cast(gs); s->t = t; s->refcount = refcount; /* We reserve one 'active stream' that's dropped when the stream is - read-closed. The others are for incoming_byte_streams that are actively - reading */ + read-closed. The others are for Chttp2IncomingByteStreams that are + actively reading */ GRPC_CHTTP2_STREAM_REF(s, "chttp2"); grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena); grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena); grpc_chttp2_data_parser_init(&s->data_parser); grpc_slice_buffer_init(&s->flow_controlled_buffer); - s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + s->deadline = GRPC_MILLIS_INF_FUTURE; GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer); + s->unprocessed_incoming_frames_buffer_cached_length = 0; grpc_slice_buffer_init(&s->frame_storage); grpc_slice_buffer_init(&s->compressed_data_buffer); grpc_slice_buffer_init(&s->decompressed_data_buffer); @@ -721,35 +681,38 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, GRPC_CHTTP2_REF_TRANSPORT(t, "stream"); if (server_data) { - s->id = (uint32_t)(uintptr_t)server_data; + s->id = static_cast((uintptr_t)server_data); *t->accepting_stream = s; grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); - post_destructive_reclaimer(exec_ctx, t); + post_destructive_reclaimer(t); } - s->flow_control.s = s; - GPR_TIMER_END("init_stream", 0); + if (t->flow_control->flow_control_enabled()) { + s->flow_control.Init( + static_cast( + t->flow_control.get()), + s); + } else { + s->flow_control.Init(); + } return 0; } -static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, - grpc_error *error) { - grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp; - grpc_chttp2_transport *t = s->t; - - GPR_TIMER_BEGIN("destroy_stream", 0); +static void destroy_stream_locked(void* sp, grpc_error* error) { + GPR_TIMER_SCOPE("destroy_stream", 0); + grpc_chttp2_stream* s = static_cast(sp); + grpc_chttp2_transport* t = s->t; GPR_ASSERT((s->write_closed && s->read_closed) || s->id == 0); if (s->id != 0) { - GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == NULL); + GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == nullptr); } - grpc_slice_buffer_destroy_internal(exec_ctx, - &s->unprocessed_incoming_frames_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage); - grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer); + grpc_slice_buffer_destroy_internal(&s->unprocessed_incoming_frames_buffer); + grpc_slice_buffer_destroy_internal(&s->frame_storage); + grpc_slice_buffer_destroy_internal(&s->compressed_data_buffer); + grpc_slice_buffer_destroy_internal(&s->decompressed_data_buffer); grpc_chttp2_list_remove_stalled_by_transport(t, s); grpc_chttp2_list_remove_stalled_by_stream(t, s); @@ -762,73 +725,67 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, } } - GPR_ASSERT(s->send_initial_metadata_finished == NULL); - GPR_ASSERT(s->fetching_send_message == NULL); - GPR_ASSERT(s->send_trailing_metadata_finished == NULL); - GPR_ASSERT(s->recv_initial_metadata_ready == NULL); - GPR_ASSERT(s->recv_message_ready == NULL); - GPR_ASSERT(s->recv_trailing_metadata_finished == NULL); - grpc_chttp2_data_parser_destroy(exec_ctx, &s->data_parser); - grpc_chttp2_incoming_metadata_buffer_destroy(exec_ctx, - &s->metadata_buffer[0]); - grpc_chttp2_incoming_metadata_buffer_destroy(exec_ctx, - &s->metadata_buffer[1]); - grpc_slice_buffer_destroy_internal(exec_ctx, &s->flow_controlled_buffer); + GPR_ASSERT(s->send_initial_metadata_finished == nullptr); + GPR_ASSERT(s->fetching_send_message == nullptr); + GPR_ASSERT(s->send_trailing_metadata_finished == nullptr); + GPR_ASSERT(s->recv_initial_metadata_ready == nullptr); + GPR_ASSERT(s->recv_message_ready == nullptr); + GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr); + grpc_chttp2_data_parser_destroy(&s->data_parser); + grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]); + grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]); + grpc_slice_buffer_destroy_internal(&s->flow_controlled_buffer); GRPC_ERROR_UNREF(s->read_closed_error); GRPC_ERROR_UNREF(s->write_closed_error); GRPC_ERROR_UNREF(s->byte_stream_error); - grpc_chttp2_flowctl_destroy_stream(&t->flow_control, &s->flow_control); + s->flow_control.Destroy(); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream"); - GPR_TIMER_END("destroy_stream", 0); - - GRPC_CLOSURE_SCHED(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->destroy_stream_arg, GRPC_ERROR_NONE); } -static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, - grpc_closure *then_schedule_closure) { - GPR_TIMER_BEGIN("destroy_stream", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; +static void destroy_stream(grpc_transport* gt, grpc_stream* gs, + grpc_closure* then_schedule_closure) { + GPR_TIMER_SCOPE("destroy_stream", 0); + grpc_chttp2_transport* t = reinterpret_cast(gt); + grpc_chttp2_stream* s = reinterpret_cast(gs); - if (s->stream_compression_ctx != NULL) { + if (s->stream_compression_ctx != nullptr) { grpc_stream_compression_context_destroy(s->stream_compression_ctx); - s->stream_compression_ctx = NULL; + s->stream_compression_ctx = nullptr; } - if (s->stream_decompression_ctx != NULL) { + if (s->stream_decompression_ctx != nullptr) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); - s->stream_decompression_ctx = NULL; + s->stream_decompression_ctx = nullptr; } s->destroy_stream_arg = then_schedule_closure; GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s, - grpc_combiner_scheduler(t->combiner)), + GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s, + grpc_combiner_scheduler(t->combiner)), GRPC_ERROR_NONE); - GPR_TIMER_END("destroy_stream", 0); } -grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t, +grpc_chttp2_stream* grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport* t, uint32_t id) { - return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id); + return static_cast( + grpc_chttp2_stream_map_find(&t->stream_map, id)); } -grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t, uint32_t id) { - if (t->channel_callback.accept_stream == NULL) { - return NULL; + if (t->channel_callback.accept_stream == nullptr) { + return nullptr; } - grpc_chttp2_stream *accepting; - GPR_ASSERT(t->accepting_stream == NULL); + grpc_chttp2_stream* accepting; + GPR_ASSERT(t->accepting_stream == nullptr); t->accepting_stream = &accepting; - t->channel_callback.accept_stream(exec_ctx, - t->channel_callback.accept_stream_user_data, - &t->base, (void *)(uintptr_t)id); - t->accepting_stream = NULL; + t->channel_callback.accept_stream(t->channel_callback.accept_stream_user_data, + &t->base, + (void*)static_cast(id)); + t->accepting_stream = nullptr; return accepting; } @@ -836,7 +793,7 @@ grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx, * OUTPUT PROCESSING */ -static const char *write_state_name(grpc_chttp2_write_state st) { +static const char* write_state_name(grpc_chttp2_write_state st) { switch (st) { case GRPC_CHTTP2_WRITE_STATE_IDLE: return "IDLE"; @@ -848,136 +805,124 @@ static const char *write_state_name(grpc_chttp2_write_state st) { GPR_UNREACHABLE_CODE(return "UNKNOWN"); } -static void set_write_state(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_write_state st, const char *reason) { - GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t, +static void set_write_state(grpc_chttp2_transport* t, + grpc_chttp2_write_state st, const char* reason) { + GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t, t->is_client ? "CLIENT" : "SERVER", write_state_name(t->write_state), write_state_name(st), reason)); t->write_state = st; if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) { - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &t->run_after_write); - if (t->close_transport_on_writes_finished != NULL) { - grpc_error *err = t->close_transport_on_writes_finished; - t->close_transport_on_writes_finished = NULL; - close_transport_locked(exec_ctx, t, err); + GRPC_CLOSURE_LIST_SCHED(&t->run_after_write); + if (t->close_transport_on_writes_finished != nullptr) { + grpc_error* err = t->close_transport_on_writes_finished; + t->close_transport_on_writes_finished = nullptr; + close_transport_locked(t, err); } } } static void inc_initiate_write_reason( - grpc_exec_ctx *exec_ctx, grpc_chttp2_initiate_write_reason reason) { + grpc_chttp2_initiate_write_reason reason) { switch (reason) { case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(); break; case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(); break; case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(); break; case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA(); break; case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA(); break; case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(); break; case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(); break; case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(); break; case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(); break; case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(); break; case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(); break; case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL(); break; case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx); - break; - case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(); break; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(); break; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE(); break; case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(); break; case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(); break; case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( - exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED(); break; case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(); break; case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM: - GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx); + GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(); break; } } -void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +void grpc_chttp2_initiate_write(grpc_chttp2_transport* t, grpc_chttp2_initiate_write_reason reason) { - GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0); + GPR_TIMER_SCOPE("grpc_chttp2_initiate_write", 0); switch (t->write_state) { case GRPC_CHTTP2_WRITE_STATE_IDLE: - inc_initiate_write_reason(exec_ctx, reason); - set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, + inc_initiate_write_reason(reason); + set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING, grpc_chttp2_initiate_write_reason_string(reason)); t->is_first_write_in_batch = true; GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT(&t->write_action_begin_locked, write_action_begin_locked, t, grpc_combiner_finally_scheduler(t->combiner)), GRPC_ERROR_NONE); break; case GRPC_CHTTP2_WRITE_STATE_WRITING: - set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE, + set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE, grpc_chttp2_initiate_write_reason_string(reason)); break; case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE: break; } - GPR_TIMER_END("grpc_chttp2_initiate_write", 0); } -void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) { +void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + if (t->closed_with_error == GRPC_ERROR_NONE && + grpc_chttp2_list_add_writable_stream(t, s)) { GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become"); } } -static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t, +static grpc_closure_scheduler* write_scheduler(grpc_chttp2_transport* t, bool early_results_scheduled, bool partial_write) { /* if it's not the first write in a batch, always offload to the executor: @@ -1001,11 +946,11 @@ static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t, case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY: return grpc_schedule_on_exec_ctx; } - GPR_UNREACHABLE_CODE(return NULL); + GPR_UNREACHABLE_CODE(return nullptr); } #define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i)) -static const char *begin_writing_desc(bool partial, bool inlined) { +static const char* begin_writing_desc(bool partial, bool inlined) { switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) { case WRITE_STATE_TUPLE_TO_INT(false, false): return "begin write in background"; @@ -1019,68 +964,65 @@ static const char *begin_writing_desc(bool partial, bool inlined) { GPR_UNREACHABLE_CODE(return "bad state tuple"); } -static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt, - grpc_error *error_ignored) { - GPR_TIMER_BEGIN("write_action_begin_locked", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; +static void write_action_begin_locked(void* gt, grpc_error* error_ignored) { + GPR_TIMER_SCOPE("write_action_begin_locked", 0); + grpc_chttp2_transport* t = static_cast(gt); GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE); grpc_chttp2_begin_write_result r; - if (t->closed) { + if (t->closed_with_error != GRPC_ERROR_NONE) { r.writing = false; } else { - r = grpc_chttp2_begin_write(exec_ctx, t); + r = grpc_chttp2_begin_write(t); } if (r.writing) { if (r.partial) { - GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx); + GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(); } if (!t->is_first_write_in_batch) { - GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx); + GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(); } - grpc_closure_scheduler *scheduler = + grpc_closure_scheduler* scheduler = write_scheduler(t, r.early_results_scheduled, r.partial); if (scheduler != grpc_schedule_on_exec_ctx) { - GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx); + GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(); } set_write_state( - exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE - : GRPC_CHTTP2_WRITE_STATE_WRITING, + t, + r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE + : GRPC_CHTTP2_WRITE_STATE_WRITING, begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx)); - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action, - write_action, t, scheduler), - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler), + GRPC_ERROR_NONE); } else { - set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, - "begin writing nothing"); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing"); + GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(); + set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing"); } - GPR_TIMER_END("write_action_begin_locked", 0); } -static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - GPR_TIMER_BEGIN("write_action", 0); +static void write_action(void* gt, grpc_error* error) { + GPR_TIMER_SCOPE("write_action", 0); + grpc_chttp2_transport* t = static_cast(gt); grpc_endpoint_write( - exec_ctx, t->ep, &t->outbuf, + t->ep, &t->outbuf, GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t, grpc_combiner_scheduler(t->combiner))); - GPR_TIMER_END("write_action", 0); } -static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - GPR_TIMER_BEGIN("terminate_writing_with_lock", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; +static void write_action_end_locked(void* tp, grpc_error* error) { + GPR_TIMER_SCOPE("terminate_writing_with_lock", 0); + grpc_chttp2_transport* t = static_cast(tp); if (error != GRPC_ERROR_NONE) { - close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error)); + close_transport_locked(t, GRPC_ERROR_REF(error)); } if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) { t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT; if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) { close_transport_locked( - exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent")); + t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent")); } } @@ -1089,17 +1031,14 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, GPR_UNREACHABLE_CODE(break); case GRPC_CHTTP2_WRITE_STATE_WRITING: GPR_TIMER_MARK("state=writing", 0); - set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, - "finish writing"); + set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "finish writing"); break; case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE: GPR_TIMER_MARK("state=writing_stale_no_poller", 0); - set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, - "continue writing"); + set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING, "continue writing"); t->is_first_write_in_batch = false; GRPC_CHTTP2_REF_TRANSPORT(t, "writing"); GRPC_CLOSURE_RUN( - exec_ctx, GRPC_CLOSURE_INIT(&t->write_action_begin_locked, write_action_begin_locked, t, grpc_combiner_finally_scheduler(t->combiner)), @@ -1107,18 +1046,16 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp, break; } - grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error)); + grpc_chttp2_end_write(t, GRPC_ERROR_REF(error)); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing"); - GPR_TIMER_END("terminate_writing_with_lock", 0); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing"); } // Dirties an HTTP2 setting to be sent out next time a writing path occurs. // If the change needs to occur immediately, manually initiate a write. -static void queue_setting_update(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static void queue_setting_update(grpc_chttp2_transport* t, grpc_chttp2_setting_id id, uint32_t value) { - const grpc_chttp2_setting_parameters *sp = + const grpc_chttp2_setting_parameters* sp = &grpc_chttp2_settings_parameters[id]; uint32_t use_value = GPR_CLAMP(value, sp->min_value, sp->max_value); if (use_value != value) { @@ -1131,13 +1068,21 @@ static void queue_setting_update(grpc_exec_ctx *exec_ctx, } } -void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t, uint32_t goaway_error, grpc_slice goaway_text) { // GRPC_CHTTP2_IF_TRACING( - // gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg)); - t->seen_goaway = 1; + // gpr_log(GPR_INFO, "got goaway [%d]: %s", goaway_error, msg)); + + // Discard the error from a previous goaway frame (if any) + if (t->goaway_error != GRPC_ERROR_NONE) { + GRPC_ERROR_UNREF(t->goaway_error); + } + t->goaway_error = grpc_error_set_str( + grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"), + GRPC_ERROR_INT_HTTP2_ERROR, static_cast(goaway_error)), + GRPC_ERROR_STR_RAW_BYTES, goaway_text); /* When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug * data equal to "too_many_pings", it should log the occurrence at a log level @@ -1148,31 +1093,22 @@ void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx, gpr_log(GPR_ERROR, "Received a GOAWAY with error code ENHANCE_YOUR_CALM and debug " "data equal to \"too_many_pings\""); - double current_keepalive_time_ms = - gpr_timespec_to_micros(t->keepalive_time) / 1000; + double current_keepalive_time_ms = static_cast(t->keepalive_time); t->keepalive_time = current_keepalive_time_ms > INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER - ? gpr_inf_future(GPR_TIMESPAN) - : gpr_time_from_millis((int64_t)(current_keepalive_time_ms * - KEEPALIVE_TIME_BACKOFF_MULTIPLIER), - GPR_TIMESPAN); + ? GRPC_MILLIS_INF_FUTURE + : static_cast(current_keepalive_time_ms * + KEEPALIVE_TIME_BACKOFF_MULTIPLIER); } /* lie: use transient failure from the transport to indicate goaway has been * received */ - connectivity_state_set( - exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE, - grpc_error_set_str( - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("GOAWAY received"), - GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)goaway_error), - GRPC_ERROR_STR_RAW_BYTES, goaway_text), - "got_goaway"); + connectivity_state_set(t, GRPC_CHANNEL_TRANSIENT_FAILURE, + GRPC_ERROR_REF(t->goaway_error), "got_goaway"); } -static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_chttp2_stream *s; +static void maybe_start_some_streams(grpc_chttp2_transport* t) { + grpc_chttp2_stream* s; /* start streams where we have free grpc_chttp2_stream ids and free * concurrency */ while (t->next_stream_id <= MAX_CLIENT_STREAM_ID && @@ -1182,7 +1118,7 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) { /* safe since we can't (legally) be parsing this stream yet */ GRPC_CHTTP2_IF_TRACING(gpr_log( - GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d", + GPR_INFO, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d", t->is_client ? "CLI" : "SVR", s, t->next_stream_id)); GPR_ASSERT(s->id == 0); @@ -1191,22 +1127,21 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) { connectivity_state_set( - exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE, + t, GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"), "no_more_stream_ids"); } grpc_chttp2_stream_map_add(&t->stream_map, s->id, s); - post_destructive_reclaimer(exec_ctx, t); - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM); + post_destructive_reclaimer(t); + grpc_chttp2_mark_stream_writable(t, s); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM); } /* cancel out streams that will never be started */ while (t->next_stream_id >= MAX_CLIENT_STREAM_ID && grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) { grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); @@ -1223,40 +1158,40 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx, bits being used for flags defined above) */ #define CLOSURE_BARRIER_FIRST_REF_BIT (1 << 16) -static grpc_closure *add_closure_barrier(grpc_closure *closure) { +static grpc_closure* add_closure_barrier(grpc_closure* closure) { closure->next_data.scratch += CLOSURE_BARRIER_FIRST_REF_BIT; return closure; } -static void null_then_run_closure(grpc_exec_ctx *exec_ctx, - grpc_closure **closure, grpc_error *error) { - grpc_closure *c = *closure; - *closure = NULL; - GRPC_CLOSURE_RUN(exec_ctx, c, error); +static void null_then_run_closure(grpc_closure** closure, grpc_error* error) { + grpc_closure* c = *closure; + *closure = nullptr; + GRPC_CLOSURE_RUN(c, error); } -void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, - grpc_closure **pclosure, - grpc_error *error, const char *desc) { - grpc_closure *closure = *pclosure; - *pclosure = NULL; - if (closure == NULL) { +void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, + grpc_closure** pclosure, + grpc_error* error, const char* desc) { + grpc_closure* closure = *pclosure; + *pclosure = nullptr; + if (closure == nullptr) { GRPC_ERROR_UNREF(error); return; } closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT; - if (GRPC_TRACER_ON(grpc_http_trace)) { - const char *errstr = grpc_error_string(error); + if (grpc_http_trace.enabled()) { + const char* errstr = grpc_error_string(error); gpr_log( - GPR_DEBUG, + GPR_INFO, "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s " "write_state=%s", t, closure, - (int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT), - (int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc, - errstr, write_state_name(t->write_state)); + static_cast(closure->next_data.scratch / + CLOSURE_BARRIER_FIRST_REF_BIT), + static_cast(closure->next_data.scratch % + CLOSURE_BARRIER_FIRST_REF_BIT), + desc, errstr, write_state_name(t->write_state)); } if (error != GRPC_ERROR_NONE) { if (closure->error_data.error == GRPC_ERROR_NONE) { @@ -1272,11 +1207,11 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, if (closure->next_data.scratch < CLOSURE_BARRIER_FIRST_REF_BIT) { if (closure->next_data.scratch & CLOSURE_BARRIER_STATS_BIT) { grpc_transport_move_stats(&s->stats, s->collecting_stats); - s->collecting_stats = NULL; + s->collecting_stats = nullptr; } if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) || !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) { - GRPC_CLOSURE_RUN(exec_ctx, closure, closure->error_data.error); + GRPC_CLOSURE_RUN(closure, closure->error_data.error); } else { grpc_closure_list_append(&t->run_after_write, closure, closure->error_data.error); @@ -1284,109 +1219,101 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, } } -static bool contains_non_ok_status(grpc_metadata_batch *batch) { - if (batch->idx.named.grpc_status != NULL) { +static bool contains_non_ok_status(grpc_metadata_batch* batch) { + if (batch->idx.named.grpc_status != nullptr) { return !grpc_mdelem_eq(batch->idx.named.grpc_status->md, GRPC_MDELEM_GRPC_STATUS_0); } return false; } -static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +static void maybe_become_writable_due_to_send_msg(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { if (s->id != 0 && (!s->write_buffering || s->flow_controlled_buffer.length > t->write_buffer_size)) { - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE); + grpc_chttp2_mark_stream_writable(t, s); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE); } } -static void add_fetched_slice_locked(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +static void add_fetched_slice_locked(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { s->fetched_send_message_length += - (uint32_t)GRPC_SLICE_LENGTH(s->fetching_slice); + static_cast GRPC_SLICE_LENGTH(s->fetching_slice); grpc_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice); - maybe_become_writable_due_to_send_msg(exec_ctx, t, s); + maybe_become_writable_due_to_send_msg(t, s); } -static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +static void continue_fetching_send_locked(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { for (;;) { - if (s->fetching_send_message == NULL) { + if (s->fetching_send_message == nullptr) { /* Stream was cancelled before message fetch completed */ abort(); /* TODO(ctiller): what cleanup here? */ return; /* early out */ } - if (s->fetched_send_message_length == s->fetching_send_message->length) { - grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message); + if (s->fetched_send_message_length == s->fetching_send_message->length()) { int64_t notify_offset = s->next_message_end_offset; if (notify_offset <= s->flow_controlled_bytes_written) { grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE, + t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE, "fetching_send_message_finished"); } else { - grpc_chttp2_write_cb *cb = t->write_cb_pool; - if (cb == NULL) { - cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb)); + grpc_chttp2_write_cb* cb = t->write_cb_pool; + if (cb == nullptr) { + cb = static_cast(gpr_malloc(sizeof(*cb))); } else { t->write_cb_pool = cb->next; } cb->call_at_byte = notify_offset; cb->closure = s->fetching_send_message_finished; - s->fetching_send_message_finished = NULL; - grpc_chttp2_write_cb **list = - s->fetching_send_message->flags & GRPC_WRITE_THROUGH + s->fetching_send_message_finished = nullptr; + grpc_chttp2_write_cb** list = + s->fetching_send_message->flags() & GRPC_WRITE_THROUGH ? &s->on_write_finished_cbs : &s->on_flow_controlled_cbs; cb->next = *list; *list = cb; } - s->fetching_send_message = NULL; + s->fetching_send_message.reset(); return; /* early out */ - } else if (grpc_byte_stream_next(exec_ctx, s->fetching_send_message, - UINT32_MAX, &s->complete_fetch_locked)) { - grpc_error *error = grpc_byte_stream_pull( - exec_ctx, s->fetching_send_message, &s->fetching_slice); + } else if (s->fetching_send_message->Next(UINT32_MAX, + &s->complete_fetch_locked)) { + grpc_error* error = s->fetching_send_message->Pull(&s->fetching_slice); if (error != GRPC_ERROR_NONE) { - grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message); - grpc_chttp2_cancel_stream(exec_ctx, t, s, error); + s->fetching_send_message.reset(); + grpc_chttp2_cancel_stream(t, s, error); } else { - add_fetched_slice_locked(exec_ctx, t, s); + add_fetched_slice_locked(t, s); } } } } -static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs, - grpc_error *error) { - grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; - grpc_chttp2_transport *t = s->t; +static void complete_fetch_locked(void* gs, grpc_error* error) { + grpc_chttp2_stream* s = static_cast(gs); + grpc_chttp2_transport* t = s->t; if (error == GRPC_ERROR_NONE) { - error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message, - &s->fetching_slice); + error = s->fetching_send_message->Pull(&s->fetching_slice); if (error == GRPC_ERROR_NONE) { - add_fetched_slice_locked(exec_ctx, t, s); - continue_fetching_send_locked(exec_ctx, t, s); + add_fetched_slice_locked(t, s); + continue_fetching_send_locked(t, s); } } if (error != GRPC_ERROR_NONE) { - grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message); - grpc_chttp2_cancel_stream(exec_ctx, t, s, error); + s->fetching_send_message.reset(); + grpc_chttp2_cancel_stream(t, s, error); } } -static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} +static void do_nothing(void* arg, grpc_error* error) {} -static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id, +static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id, bool is_client, bool is_initial) { - for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL; + for (grpc_linked_mdelem* md = md_batch->list.head; md != nullptr; md = md->next) { - char *key = grpc_slice_to_c_string(GRPC_MDKEY(md->md)); - char *value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md)); + char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md)); + char* value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md)); gpr_log(GPR_INFO, "HTTP:%d:%s:%s: %s: %s", id, is_initial ? "HDR" : "TRL", is_client ? "CLI" : "SVR", key, value); gpr_free(key); @@ -1394,21 +1321,22 @@ static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id, } } -static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, - grpc_error *error_ignored) { - GPR_TIMER_BEGIN("perform_stream_op_locked", 0); +static void perform_stream_op_locked(void* stream_op, + grpc_error* error_ignored) { + GPR_TIMER_SCOPE("perform_stream_op_locked", 0); - grpc_transport_stream_op_batch *op = - (grpc_transport_stream_op_batch *)stream_op; - grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg; - grpc_transport_stream_op_batch_payload *op_payload = op->payload; - grpc_chttp2_transport *t = s->t; + grpc_transport_stream_op_batch* op = + static_cast(stream_op); + grpc_chttp2_stream* s = + static_cast(op->handler_private.extra_arg); + grpc_transport_stream_op_batch_payload* op_payload = op->payload; + grpc_chttp2_transport* t = s->t; - GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx); + GRPC_STATS_INC_HTTP2_OP_BATCHES(); - if (GRPC_TRACER_ON(grpc_http_trace)) { - char *str = grpc_transport_stream_op_batch_string(op); - gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str, + if (grpc_http_trace.enabled()) { + char* str = grpc_transport_stream_op_batch_string(op); + gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str, op->on_complete); gpr_free(str); if (op->send_initial_metadata) { @@ -1421,10 +1349,10 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } } - grpc_closure *on_complete = op->on_complete; - if (on_complete == NULL) { + grpc_closure* on_complete = op->on_complete; + if (on_complete == nullptr) { on_complete = - GRPC_CLOSURE_CREATE(do_nothing, NULL, grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_CREATE(do_nothing, nullptr, grpc_schedule_on_exec_ctx); } /* use final_data as a barrier until enqueue time; the inital counter is @@ -1433,25 +1361,24 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, on_complete->error_data.error = GRPC_ERROR_NONE; if (op->collect_stats) { - GPR_ASSERT(s->collecting_stats == NULL); + GPR_ASSERT(s->collecting_stats == nullptr); s->collecting_stats = op_payload->collect_stats.collect_stats; on_complete->next_data.scratch |= CLOSURE_BARRIER_STATS_BIT; } if (op->cancel_stream) { - GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx); - grpc_chttp2_cancel_stream(exec_ctx, t, s, - op_payload->cancel_stream.cancel_error); + GRPC_STATS_INC_HTTP2_OP_CANCEL(); + grpc_chttp2_cancel_stream(t, s, op_payload->cancel_stream.cancel_error); } if (op->send_initial_metadata) { - GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx); - GPR_ASSERT(s->send_initial_metadata_finished == NULL); + GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(); + GPR_ASSERT(s->send_initial_metadata_finished == nullptr); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; /* Identify stream compression */ if (op_payload->send_initial_metadata.send_initial_metadata->idx.named - .content_encoding == NULL || + .content_encoding == nullptr || grpc_stream_compression_method_parse( GRPC_MDVALUE( op_payload->send_initial_metadata.send_initial_metadata->idx @@ -1469,20 +1396,20 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, t->settings[GRPC_PEER_SETTINGS] [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE]; if (t->is_client) { - s->deadline = - gpr_time_min(s->deadline, s->send_initial_metadata->deadline); + s->deadline = GPR_MIN(s->deadline, s->send_initial_metadata->deadline); } if (metadata_size > metadata_peer_limit) { grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int( grpc_error_set_int( grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "to-be-sent initial metadata size " "exceeds peer limit"), GRPC_ERROR_INT_SIZE, - (intptr_t)metadata_size), - GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit), + static_cast(metadata_size)), + GRPC_ERROR_INT_LIMIT, + static_cast(metadata_peer_limit)), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED)); } else { if (contains_non_ok_status(s->send_initial_metadata)) { @@ -1490,47 +1417,50 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } if (!s->write_closed) { if (t->is_client) { - if (!t->closed) { + if (t->closed_with_error == GRPC_ERROR_NONE) { GPR_ASSERT(s->id == 0); grpc_chttp2_list_add_waiting_for_concurrency(t, s); - maybe_start_some_streams(exec_ctx, t); + maybe_start_some_streams(t); } else { grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"), + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Transport closed", &t->closed_with_error, 1), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); } } else { GPR_ASSERT(s->id != 0); - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); + grpc_chttp2_mark_stream_writable(t, s); if (!(op->send_message && - (op->payload->send_message.send_message->flags & + (op->payload->send_message.send_message->flags() & GRPC_WRITE_BUFFER_HINT))) { grpc_chttp2_initiate_write( - exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA); + t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA); } } } else { - s->send_initial_metadata = NULL; + s->send_initial_metadata = nullptr; grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_initial_metadata_finished, + t, s, &s->send_initial_metadata_finished, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Attempt to send initial metadata after stream was closed", &s->write_closed_error, 1), "send_initial_metadata_finished"); } } - if (op_payload->send_initial_metadata.peer_string != NULL) { - gpr_atm_rel_store(op_payload->send_initial_metadata.peer_string, - (gpr_atm)gpr_strdup(t->peer_string)); + if (op_payload->send_initial_metadata.peer_string != nullptr) { + char* old_peer_string = (char*)gpr_atm_full_xchg( + op_payload->send_initial_metadata.peer_string, + (gpr_atm)gpr_strdup(t->peer_string)); + gpr_free(old_peer_string); } } if (op->send_message) { - GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx); + GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(); GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE( - exec_ctx, op->payload->send_message.send_message->length); + op->payload->send_message.send_message->length()); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->fetching_send_message_finished = add_closure_barrier(op->on_complete); if (s->write_closed) { @@ -1539,8 +1469,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, // streaming call might send another message before getting a // recv_message failure, breaking out of its loop, and then // starting recv_trailing_metadata. + op->payload->send_message.send_message.reset(); grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->fetching_send_message_finished, + t, s, &s->fetching_send_message_finished, t->is_client && s->received_trailing_metadata ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( @@ -1548,35 +1479,37 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, &s->write_closed_error, 1), "fetching_send_message_finished"); } else { - GPR_ASSERT(s->fetching_send_message == NULL); - uint8_t *frame_hdr = grpc_slice_buffer_tiny_add( + GPR_ASSERT(s->fetching_send_message == nullptr); + uint8_t* frame_hdr = grpc_slice_buffer_tiny_add( &s->flow_controlled_buffer, GRPC_HEADER_SIZE_IN_BYTES); - uint32_t flags = op_payload->send_message.send_message->flags; + uint32_t flags = op_payload->send_message.send_message->flags(); frame_hdr[0] = (flags & GRPC_WRITE_INTERNAL_COMPRESS) != 0; - size_t len = op_payload->send_message.send_message->length; - frame_hdr[1] = (uint8_t)(len >> 24); - frame_hdr[2] = (uint8_t)(len >> 16); - frame_hdr[3] = (uint8_t)(len >> 8); - frame_hdr[4] = (uint8_t)(len); - s->fetching_send_message = op_payload->send_message.send_message; + size_t len = op_payload->send_message.send_message->length(); + frame_hdr[1] = static_cast(len >> 24); + frame_hdr[2] = static_cast(len >> 16); + frame_hdr[3] = static_cast(len >> 8); + frame_hdr[4] = static_cast(len); + s->fetching_send_message = + std::move(op_payload->send_message.send_message); s->fetched_send_message_length = 0; - s->next_message_end_offset = s->flow_controlled_bytes_written + - (int64_t)s->flow_controlled_buffer.length + - (int64_t)len; + s->next_message_end_offset = + s->flow_controlled_bytes_written + + static_cast(s->flow_controlled_buffer.length) + + static_cast(len); if (flags & GRPC_WRITE_BUFFER_HINT) { s->next_message_end_offset -= t->write_buffer_size; s->write_buffering = true; } else { s->write_buffering = false; } - continue_fetching_send_locked(exec_ctx, t, s); - maybe_become_writable_due_to_send_msg(exec_ctx, t, s); + continue_fetching_send_locked(t, s); + maybe_become_writable_due_to_send_msg(t, s); } } if (op->send_trailing_metadata) { - GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx); - GPR_ASSERT(s->send_trailing_metadata_finished == NULL); + GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(); + GPR_ASSERT(s->send_trailing_metadata_finished == nullptr); on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; s->send_trailing_metadata_finished = add_closure_barrier(on_complete); s->send_trailing_metadata = @@ -1589,24 +1522,25 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE]; if (metadata_size > metadata_peer_limit) { grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int( grpc_error_set_int( grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "to-be-sent trailing metadata size " "exceeds peer limit"), GRPC_ERROR_INT_SIZE, - (intptr_t)metadata_size), - GRPC_ERROR_INT_LIMIT, (intptr_t)metadata_peer_limit), + static_cast(metadata_size)), + GRPC_ERROR_INT_LIMIT, + static_cast(metadata_peer_limit)), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED)); } else { if (contains_non_ok_status(s->send_trailing_metadata)) { s->seen_error = true; } if (s->write_closed) { - s->send_trailing_metadata = NULL; + s->send_trailing_metadata = nullptr; grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_trailing_metadata_finished, + t, s, &s->send_trailing_metadata_finished, grpc_metadata_batch_is_empty( op->payload->send_trailing_metadata.send_trailing_metadata) ? GRPC_ERROR_NONE @@ -1617,199 +1551,217 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, } else if (s->id != 0) { /* TODO(ctiller): check if there's flow control for any outstanding bytes before going writable */ - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); + grpc_chttp2_mark_stream_writable(t, s); grpc_chttp2_initiate_write( - exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA); + t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA); } } } if (op->recv_initial_metadata) { - GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx); - GPR_ASSERT(s->recv_initial_metadata_ready == NULL); + GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(); + GPR_ASSERT(s->recv_initial_metadata_ready == nullptr); s->recv_initial_metadata_ready = op_payload->recv_initial_metadata.recv_initial_metadata_ready; s->recv_initial_metadata = op_payload->recv_initial_metadata.recv_initial_metadata; s->trailing_metadata_available = op_payload->recv_initial_metadata.trailing_metadata_available; - if (op_payload->recv_initial_metadata.peer_string != NULL) { - gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string, - (gpr_atm)gpr_strdup(t->peer_string)); + if (op_payload->recv_initial_metadata.peer_string != nullptr) { + char* old_peer_string = (char*)gpr_atm_full_xchg( + op_payload->recv_initial_metadata.peer_string, + (gpr_atm)gpr_strdup(t->peer_string)); + gpr_free(old_peer_string); } - grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_initial_metadata(t, s); } if (op->recv_message) { - GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx); - size_t already_received; - GPR_ASSERT(s->recv_message_ready == NULL); + GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(); + size_t before = 0; + GPR_ASSERT(s->recv_message_ready == nullptr); GPR_ASSERT(!s->pending_byte_stream); s->recv_message_ready = op_payload->recv_message.recv_message_ready; s->recv_message = op_payload->recv_message.recv_message; if (s->id != 0) { if (!s->read_closed) { - already_received = s->frame_storage.length; - grpc_chttp2_flowctl_incoming_bs_update( - &t->flow_control, &s->flow_control, GRPC_HEADER_SIZE_IN_BYTES, - already_received); - grpc_chttp2_act_on_flowctl_action( - exec_ctx, - grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control), - t, s); + before = s->frame_storage.length + + s->unprocessed_incoming_frames_buffer.length; + } + } + grpc_chttp2_maybe_complete_recv_message(t, s); + if (s->id != 0) { + if (!s->read_closed && s->frame_storage.length == 0) { + size_t after = s->frame_storage.length + + s->unprocessed_incoming_frames_buffer_cached_length; + s->flow_control->IncomingByteStreamUpdate(GRPC_HEADER_SIZE_IN_BYTES, + before - after); + grpc_chttp2_act_on_flowctl_action(s->flow_control->MakeAction(), t, s); } } - grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); } if (op->recv_trailing_metadata) { - GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx); - GPR_ASSERT(s->recv_trailing_metadata_finished == NULL); + GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(); + GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr); s->recv_trailing_metadata_finished = add_closure_barrier(on_complete); s->recv_trailing_metadata = op_payload->recv_trailing_metadata.recv_trailing_metadata; s->final_metadata_requested = true; - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s); } - grpc_chttp2_complete_closure_step(exec_ctx, t, s, &on_complete, - GRPC_ERROR_NONE, "op->on_complete"); + grpc_chttp2_complete_closure_step(t, s, &on_complete, GRPC_ERROR_NONE, + "op->on_complete"); - GPR_TIMER_END("perform_stream_op_locked", 0); - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "perform_stream_op"); + GRPC_CHTTP2_STREAM_UNREF(s, "perform_stream_op"); } -static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, - grpc_transport_stream_op_batch *op) { - GPR_TIMER_BEGIN("perform_stream_op", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs; +static void perform_stream_op(grpc_transport* gt, grpc_stream* gs, + grpc_transport_stream_op_batch* op) { + GPR_TIMER_SCOPE("perform_stream_op", 0); + grpc_chttp2_transport* t = reinterpret_cast(gt); + grpc_chttp2_stream* s = reinterpret_cast(gs); if (!t->is_client) { if (op->send_initial_metadata) { - gpr_timespec deadline = + grpc_millis deadline = op->payload->send_initial_metadata.send_initial_metadata->deadline; - GPR_ASSERT(0 == - gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline)); + GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE); } if (op->send_trailing_metadata) { - gpr_timespec deadline = + grpc_millis deadline = op->payload->send_trailing_metadata.send_trailing_metadata->deadline; - GPR_ASSERT(0 == - gpr_time_cmp(gpr_inf_future(deadline.clock_type), deadline)); + GPR_ASSERT(deadline == GRPC_MILLIS_INF_FUTURE); } } - if (GRPC_TRACER_ON(grpc_http_trace)) { - char *str = grpc_transport_stream_op_batch_string(op); - gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str); + if (grpc_http_trace.enabled()) { + char* str = grpc_transport_stream_op_batch_string(op); + gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s, str); gpr_free(str); } op->handler_private.extra_arg = gs; GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op"); GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked, op, grpc_combiner_scheduler(t->combiner)), GRPC_ERROR_NONE); - GPR_TIMER_END("perform_stream_op", 0); } -static void cancel_pings(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error) { +static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error) { /* callback remaining pings: they're not allowed to call into the transpot, and maybe they hold resources that need to be freed */ - for (size_t i = 0; i < GRPC_CHTTP2_PING_TYPE_COUNT; i++) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[i]; - for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) { - grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error)); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]); - } + grpc_chttp2_ping_queue* pq = &t->ping_queue; + GPR_ASSERT(error != GRPC_ERROR_NONE); + for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) { + grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error)); + GRPC_CLOSURE_LIST_SCHED(&pq->lists[j]); } GRPC_ERROR_UNREF(error); } -static void send_ping_locked( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, grpc_closure *on_initiate, - grpc_closure *on_ack, - grpc_chttp2_initiate_write_reason initiate_write_reason) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type]; +static void send_ping_locked(grpc_chttp2_transport* t, + grpc_closure* on_initiate, grpc_closure* on_ack) { + if (t->closed_with_error != GRPC_ERROR_NONE) { + GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_REF(t->closed_with_error)); + GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_REF(t->closed_with_error)); + return; + } + grpc_chttp2_ping_queue* pq = &t->ping_queue; grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], on_initiate, GRPC_ERROR_NONE); - if (grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack, - GRPC_ERROR_NONE)) { - grpc_chttp2_initiate_write(exec_ctx, t, initiate_write_reason); + grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], on_ack, + GRPC_ERROR_NONE); +} + +/* + * Specialized form of send_ping_locked for keepalive ping. If there is already + * a ping in progress, the keepalive ping would piggyback onto that ping, + * instead of waiting for that ping to complete and then starting a new ping. + */ +static void send_keepalive_ping_locked(grpc_chttp2_transport* t) { + if (t->closed_with_error != GRPC_ERROR_NONE) { + GRPC_CLOSURE_SCHED(&t->start_keepalive_ping_locked, + GRPC_ERROR_REF(t->closed_with_error)); + GRPC_CLOSURE_SCHED(&t->finish_keepalive_ping_locked, + GRPC_ERROR_REF(t->closed_with_error)); + return; } + grpc_chttp2_ping_queue* pq = &t->ping_queue; + if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { + /* There is a ping in flight. Add yourself to the inflight closure list. */ + GRPC_CLOSURE_SCHED(&t->start_keepalive_ping_locked, GRPC_ERROR_NONE); + grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT], + &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE); + return; + } + grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_INITIATE], + &t->start_keepalive_ping_locked, GRPC_ERROR_NONE); + grpc_closure_list_append(&pq->lists[GRPC_CHTTP2_PCL_NEXT], + &t->finish_keepalive_ping_locked, GRPC_ERROR_NONE); } -static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; +static void retry_initiate_ping_locked(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); t->ping_state.is_delayed_ping_timer_set = false; if (error == GRPC_ERROR_NONE) { - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING); } + GRPC_CHTTP2_UNREF_TRANSPORT(t, "retry_initiate_ping_locked"); } -void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - uint64_t id) { - grpc_chttp2_ping_queue *pq = - &t->ping_queues[id % GRPC_CHTTP2_PING_TYPE_COUNT]; +void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) { + grpc_chttp2_ping_queue* pq = &t->ping_queue; if (pq->inflight_id != id) { - char *from = grpc_endpoint_get_peer(t->ep); + char* from = grpc_endpoint_get_peer(t->ep); gpr_log(GPR_DEBUG, "Unknown ping response from %s: %" PRIx64, from, id); gpr_free(from); return; } - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); + GRPC_CLOSURE_LIST_SCHED(&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS); } } -static void send_goaway(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error) { +static void send_goaway(grpc_chttp2_transport* t, grpc_error* error) { t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED; grpc_http2_error_code http_error; grpc_slice slice; - grpc_error_get_status(error, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL, - &slice, &http_error); - grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error, + grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, nullptr, &slice, + &http_error, nullptr); + grpc_chttp2_goaway_append(t->last_new_stream_id, + static_cast(http_error), grpc_slice_ref_internal(slice), &t->qbuf); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT); GRPC_ERROR_UNREF(error); } -void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - gpr_log(GPR_DEBUG, "PING strike"); +void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t) { if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes && t->ping_policy.max_ping_strikes != 0) { - send_goaway(exec_ctx, t, + send_goaway(t, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("too_many_pings"), GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM)); /*The transport will be closed after the write is done */ close_transport_locked( - exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings")); + t, grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); } } -static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, - void *stream_op, - grpc_error *error_ignored) { - grpc_transport_op *op = (grpc_transport_op *)stream_op; - grpc_chttp2_transport *t = - (grpc_chttp2_transport *)op->handler_private.extra_arg; +static void perform_transport_op_locked(void* stream_op, + grpc_error* error_ignored) { + grpc_transport_op* op = static_cast(stream_op); + grpc_chttp2_transport* t = + static_cast(op->handler_private.extra_arg); if (op->goaway_error) { - send_goaway(exec_ctx, t, op->goaway_error); + send_goaway(t, op->goaway_error); } if (op->set_accept_stream) { @@ -1819,43 +1771,40 @@ static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx, } if (op->bind_pollset) { - grpc_endpoint_add_to_pollset(exec_ctx, t->ep, op->bind_pollset); + grpc_endpoint_add_to_pollset(t->ep, op->bind_pollset); } if (op->bind_pollset_set) { - grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, op->bind_pollset_set); + grpc_endpoint_add_to_pollset_set(t->ep, op->bind_pollset_set); } - if (op->send_ping) { - send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, NULL, - op->send_ping, - GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING); + if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { + send_ping_locked(t, op->send_ping.on_initiate, op->send_ping.on_ack); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING); } - if (op->on_connectivity_state_change != NULL) { + if (op->on_connectivity_state_change != nullptr) { grpc_connectivity_state_notify_on_state_change( - exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state, + &t->channel_callback.state_tracker, op->connectivity_state, op->on_connectivity_state_change); } if (op->disconnect_with_error != GRPC_ERROR_NONE) { - close_transport_locked(exec_ctx, t, op->disconnect_with_error); + close_transport_locked(t, op->disconnect_with_error); } - GRPC_CLOSURE_RUN(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); + GRPC_CLOSURE_RUN(op->on_consumed, GRPC_ERROR_NONE); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "transport_op"); } -static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_transport_op *op) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - char *msg = grpc_transport_op_string(op); +static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) { + grpc_chttp2_transport* t = reinterpret_cast(gt); + char* msg = grpc_transport_op_string(op); gpr_free(msg); op->handler_private.extra_arg = gt; GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op"); - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_INIT(&op->handler_private.closure, + GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_transport_op_locked, op, grpc_combiner_scheduler(t->combiner)), GRPC_ERROR_NONE); @@ -1865,36 +1814,33 @@ static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, * INPUT PROCESSING - GENERAL */ -void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - if (s->recv_initial_metadata_ready != NULL && +void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + if (s->recv_initial_metadata_ready != nullptr && s->published_metadata[0] != GRPC_METADATA_NOT_PUBLISHED) { if (s->seen_error) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); if (!s->pending_byte_stream) { grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); } } - grpc_chttp2_incoming_metadata_buffer_publish( - exec_ctx, &s->metadata_buffer[0], s->recv_initial_metadata); - null_then_run_closure(exec_ctx, &s->recv_initial_metadata_ready, - GRPC_ERROR_NONE); + grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[0], + s->recv_initial_metadata); + null_then_run_closure(&s->recv_initial_metadata_ready, GRPC_ERROR_NONE); } } -void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - grpc_error *error = GRPC_ERROR_NONE; - if (s->recv_message_ready != NULL) { - *s->recv_message = NULL; +void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + grpc_error* error = GRPC_ERROR_NONE; + if (s->recv_message_ready != nullptr) { + *s->recv_message = nullptr; if (s->final_metadata_requested && s->seen_error) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); if (!s->pending_byte_stream) { grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); } } if (!s->pending_byte_stream) { @@ -1905,7 +1851,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, &s->frame_storage); s->unprocessed_incoming_frames_decompressed = false; } - if (!s->unprocessed_incoming_frames_decompressed) { + if (!s->unprocessed_incoming_frames_decompressed && + s->stream_decompression_method != + GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS) { GPR_ASSERT(s->decompressed_data_buffer.length == 0); bool end_of_context; if (!s->stream_decompression_ctx) { @@ -1916,13 +1864,12 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, if (!grpc_stream_decompress( s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, - &s->decompressed_data_buffer, NULL, + &s->decompressed_data_buffer, nullptr, GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes, &end_of_context)) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &s->frame_storage); + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Stream decompression error."); } else { @@ -1931,58 +1878,60 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, s->decompressed_header_bytes = 0; } error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, - NULL, s->recv_message); + &s->data_parser, s, &s->decompressed_data_buffer, nullptr, + s->recv_message); if (end_of_context) { grpc_stream_compression_context_destroy( s->stream_decompression_ctx); - s->stream_decompression_ctx = NULL; + s->stream_decompression_ctx = nullptr; } } } else { error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, - &s->unprocessed_incoming_frames_buffer, NULL, s->recv_message); + &s->data_parser, s, &s->unprocessed_incoming_frames_buffer, + nullptr, s->recv_message); } if (error != GRPC_ERROR_NONE) { s->seen_error = true; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &s->frame_storage); + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); break; - } else if (*s->recv_message != NULL) { + } else if (*s->recv_message != nullptr) { break; } } } - if (error == GRPC_ERROR_NONE && *s->recv_message != NULL) { - null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE); + // save the length of the buffer before handing control back to application + // threads. Needed to support correct flow control bookkeeping + s->unprocessed_incoming_frames_buffer_cached_length = + s->unprocessed_incoming_frames_buffer.length; + if (error == GRPC_ERROR_NONE && *s->recv_message != nullptr) { + null_then_run_closure(&s->recv_message_ready, GRPC_ERROR_NONE); } else if (s->published_metadata[1] != GRPC_METADATA_NOT_PUBLISHED) { - *s->recv_message = NULL; - null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE); + *s->recv_message = nullptr; + null_then_run_closure(&s->recv_message_ready, GRPC_ERROR_NONE); } GRPC_ERROR_UNREF(error); } } -void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); - if (s->recv_trailing_metadata_finished != NULL && s->read_closed && +void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + grpc_chttp2_maybe_complete_recv_message(t, s); + if (s->recv_trailing_metadata_finished != nullptr && s->read_closed && s->write_closed) { - if (s->seen_error) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); + if (s->seen_error || !t->is_client) { + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); if (!s->pending_byte_stream) { grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); } } bool pending_data = s->pending_byte_stream || s->unprocessed_incoming_frames_buffer.length > 0; if (s->read_closed && s->frame_storage.length > 0 && !pending_data && - !s->seen_error && s->recv_trailing_metadata_finished != NULL) { + !s->seen_error && s->recv_trailing_metadata_finished != nullptr) { /* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and * maybe decompress the next 5 bytes in the stream. */ bool end_of_context; @@ -1990,13 +1939,13 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, s->stream_decompression_ctx = grpc_stream_compression_context_create( s->stream_decompression_method); } - if (!grpc_stream_decompress(s->stream_decompression_ctx, - &s->frame_storage, - &s->unprocessed_incoming_frames_buffer, NULL, - GRPC_HEADER_SIZE_IN_BYTES, &end_of_context)) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); + if (!grpc_stream_decompress( + s->stream_decompression_ctx, &s->frame_storage, + &s->unprocessed_incoming_frames_buffer, nullptr, + GRPC_HEADER_SIZE_IN_BYTES, &end_of_context)) { + grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage); grpc_slice_buffer_reset_and_unref_internal( - exec_ctx, &s->unprocessed_incoming_frames_buffer); + &s->unprocessed_incoming_frames_buffer); s->seen_error = true; } else { if (s->unprocessed_incoming_frames_buffer.length > 0) { @@ -2005,39 +1954,39 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } if (end_of_context) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); - s->stream_decompression_ctx = NULL; + s->stream_decompression_ctx = nullptr; } } } if (s->read_closed && s->frame_storage.length == 0 && !pending_data && - s->recv_trailing_metadata_finished != NULL) { - grpc_chttp2_incoming_metadata_buffer_publish( - exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata); + s->recv_trailing_metadata_finished != nullptr) { + grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1], + s->recv_trailing_metadata); grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE, + t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE, "recv_trailing_metadata_finished"); } } } -static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - uint32_t id, grpc_error *error) { - grpc_chttp2_stream *s = - (grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id); +static void remove_stream(grpc_chttp2_transport* t, uint32_t id, + grpc_error* error) { + grpc_chttp2_stream* s = static_cast( + grpc_chttp2_stream_map_delete(&t->stream_map, id)); GPR_ASSERT(s); if (t->incoming_stream == s) { - t->incoming_stream = NULL; - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + t->incoming_stream = nullptr; + grpc_chttp2_parsing_become_skip_parser(t); } if (s->pending_byte_stream) { - if (s->on_next != NULL) { - grpc_chttp2_incoming_byte_stream *bs = s->data_parser.parsing_frame; + if (s->on_next != nullptr) { + grpc_core::Chttp2IncomingByteStream* bs = s->data_parser.parsing_frame; if (error == GRPC_ERROR_NONE) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); } - incoming_byte_stream_publish_error(exec_ctx, bs, error); - incoming_byte_stream_unref(exec_ctx, bs); - s->data_parser.parsing_frame = NULL; + bs->PublishError(error); + bs->Unref(); + s->data_parser.parsing_frame = nullptr; } else { GRPC_ERROR_UNREF(s->byte_stream_error); s->byte_stream_error = GRPC_ERROR_REF(error); @@ -2045,55 +1994,53 @@ static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, } if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) { - post_benign_reclaimer(exec_ctx, t); + post_benign_reclaimer(t); if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) { close_transport_locked( - exec_ctx, t, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Last stream closed after sending GOAWAY", &error, 1)); + t, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Last stream closed after sending GOAWAY", &error, 1)); } } if (grpc_chttp2_list_remove_writable_stream(t, s)) { - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream"); + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:remove_stream"); } GRPC_ERROR_UNREF(error); - maybe_start_some_streams(exec_ctx, t); + maybe_start_some_streams(t); } -void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, grpc_chttp2_stream *s, - grpc_error *due_to_error) { +void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_error* due_to_error) { if (!t->is_client && !s->sent_trailing_metadata && grpc_error_has_clear_grpc_status(due_to_error)) { - close_from_api(exec_ctx, t, s, due_to_error); + close_from_api(t, s, due_to_error); return; } if (!s->read_closed || !s->write_closed) { if (s->id != 0) { grpc_http2_error_code http_error; - grpc_error_get_status(due_to_error, s->deadline, NULL, NULL, &http_error); + grpc_error_get_status(due_to_error, s->deadline, nullptr, nullptr, + &http_error, nullptr); grpc_slice_buffer_add( - &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error, - &s->stats.outgoing)); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM); + &t->qbuf, + grpc_chttp2_rst_stream_create( + s->id, static_cast(http_error), &s->stats.outgoing)); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM); } } if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) { s->seen_error = true; } - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error); + grpc_chttp2_mark_stream_closed(t, s, 1, 1, due_to_error); } -void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_error *error) { +void grpc_chttp2_fake_status(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_error* error) { grpc_status_code status; grpc_slice slice; - grpc_error_get_status(error, s->deadline, &status, &slice, NULL); - + grpc_error_get_status(error, s->deadline, &status, &slice, nullptr, nullptr); if (status != GRPC_STATUS_OK) { s->seen_error = true; } @@ -2104,31 +2051,31 @@ void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, what we want - which is safe because we haven't told anyone about the metadata yet */ if (s->published_metadata[1] == GRPC_METADATA_NOT_PUBLISHED || - s->recv_trailing_metadata_finished != NULL) { + s->recv_trailing_metadata_finished != nullptr) { char status_string[GPR_LTOA_MIN_BUFSIZE]; gpr_ltoa(status, status_string); GRPC_LOG_IF_ERROR("add_status", grpc_chttp2_incoming_metadata_buffer_replace_or_add( - exec_ctx, &s->metadata_buffer[1], + &s->metadata_buffer[1], grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_STATUS, + GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(status_string)))); if (!GRPC_SLICE_IS_EMPTY(slice)) { GRPC_LOG_IF_ERROR( "add_status_message", grpc_chttp2_incoming_metadata_buffer_replace_or_add( - exec_ctx, &s->metadata_buffer[1], - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + &s->metadata_buffer[1], + grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_MESSAGE, grpc_slice_ref_internal(slice)))); } s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE; - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s); } GRPC_ERROR_UNREF(error); } -static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) { +static void add_error(grpc_error* error, grpc_error** refs, size_t* nrefs) { if (error == GRPC_ERROR_NONE) return; for (size_t i = 0; i < *nrefs; i++) { if (error == refs[i]) { @@ -2139,14 +2086,14 @@ static void add_error(grpc_error *error, grpc_error **refs, size_t *nrefs) { ++*nrefs; } -static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s, - const char *master_error_msg) { - grpc_error *refs[3]; +static grpc_error* removal_error(grpc_error* extra_error, grpc_chttp2_stream* s, + const char* master_error_msg) { + grpc_error* refs[3]; size_t nrefs = 0; add_error(s->read_closed_error, refs, &nrefs); add_error(s->write_closed_error, refs, &nrefs); add_error(extra_error, refs, &nrefs); - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; if (nrefs > 0) { error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(master_error_msg, refs, nrefs); @@ -2155,14 +2102,12 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s, return error; } -static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_chttp2_write_cb **list, - grpc_error *error) { +static void flush_write_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_chttp2_write_cb** list, grpc_error* error) { while (*list) { - grpc_chttp2_write_cb *cb = *list; + grpc_chttp2_write_cb* cb = *list; *list = cb->next; - grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, - GRPC_ERROR_REF(error), + grpc_chttp2_complete_closure_step(t, s, &cb->closure, GRPC_ERROR_REF(error), "on_write_finished_cb"); cb->next = t->write_cb_pool; t->write_cb_pool = cb; @@ -2170,37 +2115,34 @@ static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GRPC_ERROR_UNREF(error); } -void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_error *error) { +void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_error* error) { error = removal_error(error, s, "Pending writes failed due to stream closure"); - s->send_initial_metadata = NULL; - grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error), - "send_initial_metadata_finished"); - - s->send_trailing_metadata = NULL; - grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_trailing_metadata_finished, - GRPC_ERROR_REF(error), "send_trailing_metadata_finished"); - - s->fetching_send_message = NULL; - grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error), - "fetching_send_message_finished"); - flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs, - GRPC_ERROR_REF(error)); - flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error); -} - -void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, int close_reads, - int close_writes, grpc_error *error) { + s->send_initial_metadata = nullptr; + grpc_chttp2_complete_closure_step(t, s, &s->send_initial_metadata_finished, + GRPC_ERROR_REF(error), + "send_initial_metadata_finished"); + + s->send_trailing_metadata = nullptr; + grpc_chttp2_complete_closure_step(t, s, &s->send_trailing_metadata_finished, + GRPC_ERROR_REF(error), + "send_trailing_metadata_finished"); + + s->fetching_send_message.reset(); + grpc_chttp2_complete_closure_step(t, s, &s->fetching_send_message_finished, + GRPC_ERROR_REF(error), + "fetching_send_message_finished"); + flush_write_list(t, s, &s->on_write_finished_cbs, GRPC_ERROR_REF(error)); + flush_write_list(t, s, &s->on_flow_controlled_cbs, error); +} + +void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, int close_reads, + int close_writes, grpc_error* error) { if (s->read_closed && s->write_closed) { /* already closed */ - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s); GRPC_ERROR_UNREF(error); return; } @@ -2214,20 +2156,20 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, if (close_writes && !s->write_closed) { s->write_closed_error = GRPC_ERROR_REF(error); s->write_closed = true; - grpc_chttp2_fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error)); + grpc_chttp2_fail_pending_writes(t, s, GRPC_ERROR_REF(error)); } if (s->read_closed && s->write_closed) { became_closed = true; - grpc_error *overall_error = + grpc_error* overall_error = removal_error(GRPC_ERROR_REF(error), s, "Stream removed"); if (s->id != 0) { - remove_stream(exec_ctx, t, s->id, GRPC_ERROR_REF(overall_error)); + remove_stream(t, s->id, GRPC_ERROR_REF(overall_error)); } else { /* Purge streams waiting on concurrency still waiting for id assignment */ grpc_chttp2_list_remove_waiting_for_concurrency(t, s); } if (overall_error != GRPC_ERROR_NONE) { - grpc_chttp2_fake_status(exec_ctx, t, s, overall_error); + grpc_chttp2_fake_status(t, s, overall_error); } } if (closed_read) { @@ -2236,28 +2178,29 @@ void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE; } } - grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s); - grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_initial_metadata(t, s); + grpc_chttp2_maybe_complete_recv_message(t, s); } if (became_closed) { - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2"); + grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s); + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2"); } GRPC_ERROR_UNREF(error); } -static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_error *error) { +static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_error* error) { grpc_slice hdr; grpc_slice status_hdr; grpc_slice http_status_hdr; grpc_slice content_type_hdr; grpc_slice message_pfx; - uint8_t *p; + uint8_t* p; uint32_t len = 0; grpc_status_code grpc_status; grpc_slice slice; - grpc_error_get_status(error, s->deadline, &grpc_status, &slice, NULL); + grpc_error_get_status(error, s->deadline, &grpc_status, &slice, nullptr, + nullptr); GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100); @@ -2284,7 +2227,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, *p++ = '0'; *p++ = '0'; GPR_ASSERT(p == GRPC_SLICE_END_PTR(http_status_hdr)); - len += (uint32_t)GRPC_SLICE_LENGTH(http_status_hdr); + len += static_cast GRPC_SLICE_LENGTH(http_status_hdr); content_type_hdr = GRPC_SLICE_MALLOC(31); p = GRPC_SLICE_START_PTR(content_type_hdr); @@ -2320,7 +2263,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, *p++ = 'p'; *p++ = 'c'; GPR_ASSERT(p == GRPC_SLICE_END_PTR(content_type_hdr)); - len += (uint32_t)GRPC_SLICE_LENGTH(content_type_hdr); + len += static_cast GRPC_SLICE_LENGTH(content_type_hdr); } status_hdr = GRPC_SLICE_MALLOC(15 + (grpc_status >= 10)); @@ -2340,14 +2283,14 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, *p++ = 's'; if (grpc_status < 10) { *p++ = 1; - *p++ = (uint8_t)('0' + grpc_status); + *p++ = static_cast('0' + grpc_status); } else { *p++ = 2; - *p++ = (uint8_t)('0' + (grpc_status / 10)); - *p++ = (uint8_t)('0' + (grpc_status % 10)); + *p++ = static_cast('0' + (grpc_status / 10)); + *p++ = static_cast('0' + (grpc_status % 10)); } GPR_ASSERT(p == GRPC_SLICE_END_PTR(status_hdr)); - len += (uint32_t)GRPC_SLICE_LENGTH(status_hdr); + len += static_cast GRPC_SLICE_LENGTH(status_hdr); size_t msg_len = GRPC_SLICE_LENGTH(slice); GPR_ASSERT(msg_len <= UINT32_MAX); @@ -2371,20 +2314,20 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, GRPC_CHTTP2_WRITE_VARINT((uint32_t)msg_len, 1, 0, p, (uint32_t)msg_len_len); p += msg_len_len; GPR_ASSERT(p == GRPC_SLICE_END_PTR(message_pfx)); - len += (uint32_t)GRPC_SLICE_LENGTH(message_pfx); - len += (uint32_t)msg_len; + len += static_cast GRPC_SLICE_LENGTH(message_pfx); + len += static_cast(msg_len); hdr = GRPC_SLICE_MALLOC(9); p = GRPC_SLICE_START_PTR(hdr); - *p++ = (uint8_t)(len >> 16); - *p++ = (uint8_t)(len >> 8); - *p++ = (uint8_t)(len); + *p++ = static_cast(len >> 16); + *p++ = static_cast(len >> 8); + *p++ = static_cast(len); *p++ = GRPC_CHTTP2_FRAME_HEADER; *p++ = GRPC_CHTTP2_DATA_FLAG_END_STREAM | GRPC_CHTTP2_DATA_FLAG_END_HEADERS; - *p++ = (uint8_t)(s->id >> 24); - *p++ = (uint8_t)(s->id >> 16); - *p++ = (uint8_t)(s->id >> 8); - *p++ = (uint8_t)(s->id); + *p++ = static_cast(s->id >> 24); + *p++ = static_cast(s->id >> 16); + *p++ = static_cast(s->id >> 8); + *p++ = static_cast(s->id); GPR_ASSERT(p == GRPC_SLICE_END_PTR(hdr)); grpc_slice_buffer_add(&t->qbuf, hdr); @@ -2399,27 +2342,23 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing)); - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API); + grpc_chttp2_mark_stream_closed(t, s, 1, 1, error); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API); } typedef struct { - grpc_exec_ctx *exec_ctx; - grpc_error *error; - grpc_chttp2_transport *t; + grpc_error* error; + grpc_chttp2_transport* t; } cancel_stream_cb_args; -static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) { - cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data; - grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream; - grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s, - GRPC_ERROR_REF(args->error)); +static void cancel_stream_cb(void* user_data, uint32_t key, void* stream) { + cancel_stream_cb_args* args = static_cast(user_data); + grpc_chttp2_stream* s = static_cast(stream); + grpc_chttp2_cancel_stream(args->t, s, GRPC_ERROR_REF(args->error)); } -static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error) { - cancel_stream_cb_args args = {exec_ctx, error, t}; +static void end_all_the_calls(grpc_chttp2_transport* t, grpc_error* error) { + cancel_stream_cb_args args = {error, t}; grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args); GRPC_ERROR_UNREF(error); } @@ -2428,73 +2367,56 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, * INPUT PROCESSING - PARSING */ -void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx, - grpc_chttp2_flowctl_action action, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { - switch (action.send_stream_update) { - case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED: +template +static void WithUrgency(grpc_chttp2_transport* t, + grpc_core::chttp2::FlowControlAction::Urgency urgency, + grpc_chttp2_initiate_write_reason reason, F action) { + switch (urgency) { + case grpc_core::chttp2::FlowControlAction::Urgency::NO_ACTION_NEEDED: break; - case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY: - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); - grpc_chttp2_initiate_write( - exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL); + case grpc_core::chttp2::FlowControlAction::Urgency::UPDATE_IMMEDIATELY: + grpc_chttp2_initiate_write(t, reason); + // fallthrough + case grpc_core::chttp2::FlowControlAction::Urgency::QUEUE_UPDATE: + action(); break; - case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE: - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); - break; - } - switch (action.send_transport_update) { - case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED: - break; - case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY: - grpc_chttp2_initiate_write( - exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL); - break; - // this is the same as no action b/c every time the transport enters the - // writing path it will maybe do an update - case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE: - break; - } - if (action.send_setting_update != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) { - if (action.initial_window_size > 0) { - queue_setting_update(exec_ctx, t, - GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, - (uint32_t)action.initial_window_size); - } - if (action.max_frame_size > 0) { - queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, - (uint32_t)action.max_frame_size); - } - if (action.send_setting_update == GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY) { - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS); - } - } - if (action.need_ping) { - GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping"); - grpc_bdp_estimator_schedule_ping(&t->flow_control.bdp_estimator); - send_ping_locked(exec_ctx, t, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE, - &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked, - GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING); } } -static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +void grpc_chttp2_act_on_flowctl_action( + const grpc_core::chttp2::FlowControlAction& action, + grpc_chttp2_transport* t, grpc_chttp2_stream* s) { + WithUrgency(t, action.send_stream_update(), + GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL, + [t, s]() { grpc_chttp2_mark_stream_writable(t, s); }); + WithUrgency(t, action.send_transport_update(), + GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, []() {}); + WithUrgency(t, action.send_initial_window_update(), + GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [t, &action]() { + queue_setting_update(t, + GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, + action.initial_window_size()); + }); + WithUrgency(t, action.send_max_frame_size_update(), + GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [t, &action]() { + queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, + action.max_frame_size()); + }); +} + +static grpc_error* try_http_parsing(grpc_chttp2_transport* t) { grpc_http_parser parser; size_t i = 0; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; grpc_http_response response; memset(&response, 0, sizeof(response)); grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response); - grpc_error *parse_error = GRPC_ERROR_NONE; + grpc_error* parse_error = GRPC_ERROR_NONE; for (; i < t->read_buffer.count && parse_error == GRPC_ERROR_NONE; i++) { parse_error = - grpc_http_parser_parse(&parser, t->read_buffer.slices[i], NULL); + grpc_http_parser_parse(&parser, t->read_buffer.slices[i], nullptr); } if (parse_error == GRPC_ERROR_NONE && (parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) { @@ -2511,37 +2433,37 @@ static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, return error; } -static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - GPR_TIMER_BEGIN("reading_action_locked", 0); +static void read_action_locked(void* tp, grpc_error* error) { + GPR_TIMER_SCOPE("reading_action_locked", 0); - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; + grpc_chttp2_transport* t = static_cast(tp); GRPC_ERROR_REF(error); - grpc_error *err = error; + grpc_error* err = error; if (err != GRPC_ERROR_NONE) { err = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Endpoint read failed", &err, 1), GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state); } - GPR_SWAP(grpc_error *, err, error); + GPR_SWAP(grpc_error*, err, error); GRPC_ERROR_UNREF(err); - if (!t->closed) { - GPR_TIMER_BEGIN("reading_action.parse", 0); + if (t->closed_with_error == GRPC_ERROR_NONE) { + GPR_TIMER_SCOPE("reading_action.parse", 0); size_t i = 0; - grpc_error *errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE, + grpc_error* errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE, GRPC_ERROR_NONE}; for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) { - grpc_bdp_estimator_add_incoming_bytes( - &t->flow_control.bdp_estimator, - (int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i])); - errors[1] = - grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]); + grpc_core::BdpEstimator* bdp_est = t->flow_control->bdp_estimator(); + if (bdp_est) { + bdp_est->AddIncomingBytes( + static_cast GRPC_SLICE_LENGTH(t->read_buffer.slices[i])); + } + errors[1] = grpc_chttp2_perform_read(t, t->read_buffer.slices[i]); } if (errors[1] != GRPC_ERROR_NONE) { - errors[2] = try_http_parsing(exec_ctx, t); + errors[2] = try_http_parsing(t); GRPC_ERROR_UNREF(error); error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed parsing HTTP/2", errors, GPR_ARRAY_SIZE(errors)); @@ -2549,90 +2471,114 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp, for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) { GRPC_ERROR_UNREF(errors[i]); } - GPR_TIMER_END("reading_action.parse", 0); - GPR_TIMER_BEGIN("post_parse_locked", 0); - if (t->flow_control.initial_window_update != 0) { - if (t->flow_control.initial_window_update > 0) { - grpc_chttp2_stream *s; + GPR_TIMER_SCOPE("post_parse_locked", 0); + if (t->initial_window_update != 0) { + if (t->initial_window_update > 0) { + grpc_chttp2_stream* s; while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) { - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); + grpc_chttp2_mark_stream_writable(t, s); grpc_chttp2_initiate_write( - exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING); + t, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING); } } - t->flow_control.initial_window_update = 0; + t->initial_window_update = 0; } - GPR_TIMER_END("post_parse_locked", 0); } - GPR_TIMER_BEGIN("post_reading_action_locked", 0); + GPR_TIMER_SCOPE("post_reading_action_locked", 0); bool keep_reading = false; - if (error == GRPC_ERROR_NONE && t->closed) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"); + if (error == GRPC_ERROR_NONE && t->closed_with_error != GRPC_ERROR_NONE) { + error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Transport closed", &t->closed_with_error, 1); } if (error != GRPC_ERROR_NONE) { - close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error)); + /* If a goaway frame was received, this might be the reason why the read + * failed. Add this info to the error */ + if (t->goaway_error != GRPC_ERROR_NONE) { + error = grpc_error_add_child(error, GRPC_ERROR_REF(t->goaway_error)); + } + + close_transport_locked(t, GRPC_ERROR_REF(error)); t->endpoint_reading = 0; - } else if (!t->closed) { + } else if (t->closed_with_error == GRPC_ERROR_NONE) { keep_reading = true; GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading"); } - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(&t->read_buffer); if (keep_reading) { - grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, - &t->read_action_locked); - grpc_chttp2_act_on_flowctl_action( - exec_ctx, grpc_chttp2_flowctl_get_bdp_action(&t->flow_control), t, - NULL); - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading"); + grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked); + grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t, + nullptr); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "keep_reading"); } else { - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "reading_action"); } - GPR_TIMER_END("post_reading_action_locked", 0); - GRPC_ERROR_UNREF(error); +} - GPR_TIMER_END("reading_action_locked", 0); +// t is reffed prior to calling the first time, and once the callback chain +// that kicks off finishes, it's unreffed +static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) { + t->flow_control->bdp_estimator()->SchedulePing(); + send_ping_locked(t, &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked); } -static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string); +static void start_bdp_ping_locked(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string, + grpc_error_string(error)); } /* Reset the keepalive ping timer */ if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) { - grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer); + grpc_timer_cancel(&t->keepalive_ping_timer); } - grpc_bdp_estimator_start_ping(&t->flow_control.bdp_estimator); + t->flow_control->bdp_estimator()->StartPing(); } -static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string); +static void finish_bdp_ping_locked(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string, + grpc_error_string(error)); } - grpc_bdp_estimator_complete_ping(&t->flow_control.bdp_estimator); + if (error != GRPC_ERROR_NONE) { + GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping"); + return; + } + grpc_millis next_ping = t->flow_control->bdp_estimator()->CompletePing(); + grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t, + nullptr); + GPR_ASSERT(!t->have_next_bdp_ping_timer); + t->have_next_bdp_ping_timer = true; + grpc_timer_init(&t->next_bdp_ping_timer, next_ping, + &t->next_bdp_ping_timer_expired_locked); +} - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping"); +static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(tp); + GPR_ASSERT(t->have_next_bdp_ping_timer); + t->have_next_bdp_ping_timer = false; + if (error != GRPC_ERROR_NONE) { + GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping"); + return; + } + schedule_bdp_ping_locked(t); } -void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, +void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args, bool is_client) { size_t i; if (args) { for (i = 0; i < args->num_args; i++) { if (0 == strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) { const int value = grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){g_default_client_keepalive_time_ms, 1, - INT_MAX}); + &args->args[i], {is_client ? g_default_client_keepalive_time_ms + : g_default_server_keepalive_time_ms, + 1, INT_MAX}); if (is_client) { g_default_client_keepalive_time_ms = value; } else { @@ -2641,9 +2587,9 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, } else if (0 == strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) { const int value = grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){g_default_client_keepalive_timeout_ms, 0, - INT_MAX}); + &args->args[i], {is_client ? g_default_client_keepalive_timeout_ms + : g_default_server_keepalive_timeout_ms, + 0, INT_MAX}); if (is_client) { g_default_client_keepalive_timeout_ms = value; } else { @@ -2651,21 +2597,24 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, } } else if (0 == strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) { - g_default_keepalive_permit_without_calls = - (uint32_t)grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){g_default_keepalive_permit_without_calls, - 0, 1}); + const bool value = static_cast(grpc_channel_arg_get_integer( + &args->args[i], + {is_client ? g_default_client_keepalive_permit_without_calls + : g_default_server_keepalive_timeout_ms, + 0, 1})); + if (is_client) { + g_default_client_keepalive_permit_without_calls = value; + } else { + g_default_server_keepalive_permit_without_calls = value; + } } else if (0 == strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) { g_default_max_ping_strikes = grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){g_default_max_ping_strikes, 0, INT_MAX}); + &args->args[i], {g_default_max_ping_strikes, 0, INT_MAX}); } else if (0 == strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA)) { g_default_max_pings_without_data = grpc_channel_arg_get_integer( - &args->args[i], (grpc_integer_options){ - g_default_max_pings_without_data, 0, INT_MAX}); + &args->args[i], {g_default_max_pings_without_data, 0, INT_MAX}); } else if (0 == strcmp( args->args[i].key, @@ -2673,9 +2622,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, g_default_min_sent_ping_interval_without_data_ms = grpc_channel_arg_get_integer( &args->args[i], - (grpc_integer_options){ - g_default_min_sent_ping_interval_without_data_ms, 0, - INT_MAX}); + {g_default_min_sent_ping_interval_without_data_ms, 0, INT_MAX}); } else if (0 == strcmp( args->args[i].key, @@ -2683,82 +2630,73 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, g_default_min_recv_ping_interval_without_data_ms = grpc_channel_arg_get_integer( &args->args[i], - (grpc_integer_options){ - g_default_min_recv_ping_interval_without_data_ms, 0, - INT_MAX}); + {g_default_min_recv_ping_interval_without_data_ms, 0, INT_MAX}); } } } } -static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void init_keepalive_ping_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING); - if (t->destroying || t->closed) { + if (t->destroying || t->closed_with_error != GRPC_ERROR_NONE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING; } else if (error == GRPC_ERROR_NONE) { if (t->keepalive_permit_without_calls || grpc_chttp2_stream_map_size(&t->stream_map) > 0) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING; GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end"); - send_ping_locked(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, - &t->start_keepalive_ping_locked, - &t->finish_keepalive_ping_locked, - GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING); + send_keepalive_ping_locked(t); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING); } else { GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&t->keepalive_ping_timer, + grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, + &t->init_keepalive_ping_locked); } } else if (error == GRPC_ERROR_CANCELLED) { /* The keepalive ping timer may be cancelled by bdp */ GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&t->keepalive_ping_timer, + grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, + &t->init_keepalive_ping_locked); } - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "init keepalive ping"); } -static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void start_keepalive_ping_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog"); - grpc_timer_init( - exec_ctx, &t->keepalive_watchdog_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_timeout), - &t->keepalive_watchdog_fired_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&t->keepalive_watchdog_timer, + grpc_core::ExecCtx::Get()->Now() + t->keepalive_timeout, + &t->keepalive_watchdog_fired_locked); } -static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void finish_keepalive_ping_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) { if (error == GRPC_ERROR_NONE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING; - grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer); + grpc_timer_cancel(&t->keepalive_watchdog_timer); GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping"); - grpc_timer_init( - exec_ctx, &t->keepalive_ping_timer, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t->keepalive_time), - &t->init_keepalive_ping_locked, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&t->keepalive_ping_timer, + grpc_core::ExecCtx::Get()->Now() + t->keepalive_time, + &t->init_keepalive_ping_locked); } } - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "keepalive ping end"); } -static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) { if (error == GRPC_ERROR_NONE) { t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING; - close_transport_locked(exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "keepalive watchdog timeout")); + close_transport_locked( + t, + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "keepalive watchdog timeout"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL)); } } else { /* The watchdog timer should have been cancelled by @@ -2768,356 +2706,292 @@ static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg, t->keepalive_state, GRPC_CHTTP2_KEEPALIVE_STATE_PINGING); } } - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive watchdog"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "keepalive watchdog"); } /******************************************************************************* * CALLBACK LOOP */ -static void connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static void connectivity_state_set(grpc_chttp2_transport* t, grpc_connectivity_state state, - grpc_error *error, const char *reason) { - GRPC_CHTTP2_IF_TRACING( - gpr_log(GPR_DEBUG, "set connectivity_state=%d", state)); - grpc_connectivity_state_set(exec_ctx, &t->channel_callback.state_tracker, - state, error, reason); + grpc_error* error, const char* reason) { + GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "set connectivity_state=%d", state)); + grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error, + reason); } /******************************************************************************* * POLLSET STUFF */ -static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_pollset *pollset) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_endpoint_add_to_pollset(exec_ctx, t->ep, pollset); +static void set_pollset(grpc_transport* gt, grpc_stream* gs, + grpc_pollset* pollset) { + grpc_chttp2_transport* t = reinterpret_cast(gt); + grpc_endpoint_add_to_pollset(t->ep, pollset); } -static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_pollset_set *pollset_set) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt; - grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, pollset_set); +static void set_pollset_set(grpc_transport* gt, grpc_stream* gs, + grpc_pollset_set* pollset_set) { + grpc_chttp2_transport* t = reinterpret_cast(gt); + grpc_endpoint_add_to_pollset_set(t->ep, pollset_set); } /******************************************************************************* * BYTE STREAM */ -static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_stream *s = (grpc_chttp2_stream *)arg; - +static void reset_byte_stream(void* arg, grpc_error* error) { + grpc_chttp2_stream* s = static_cast(arg); s->pending_byte_stream = false; if (error == GRPC_ERROR_NONE) { - grpc_chttp2_maybe_complete_recv_message(exec_ctx, s->t, s); - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, s->t, s); + grpc_chttp2_maybe_complete_recv_message(s->t, s); + grpc_chttp2_maybe_complete_recv_trailing_metadata(s->t, s); } else { GPR_ASSERT(error != GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error)); - s->on_next = NULL; + GRPC_CLOSURE_SCHED(s->on_next, GRPC_ERROR_REF(error)); + s->on_next = nullptr; GRPC_ERROR_UNREF(s->byte_stream_error); s->byte_stream_error = GRPC_ERROR_NONE; - grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error)); + grpc_chttp2_cancel_stream(s->t, s, GRPC_ERROR_REF(error)); s->byte_stream_error = GRPC_ERROR_REF(error); } } -static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx, - grpc_chttp2_incoming_byte_stream *bs) { - if (gpr_unref(&bs->refs)) { - gpr_free(bs); +namespace grpc_core { + +Chttp2IncomingByteStream::Chttp2IncomingByteStream( + grpc_chttp2_transport* transport, grpc_chttp2_stream* stream, + uint32_t frame_size, uint32_t flags) + : ByteStream(frame_size, flags), + transport_(transport), + stream_(stream), + remaining_bytes_(frame_size) { + gpr_ref_init(&refs_, 2); + GRPC_ERROR_UNREF(stream->byte_stream_error); + stream->byte_stream_error = GRPC_ERROR_NONE; +} + +void Chttp2IncomingByteStream::OrphanLocked(void* arg, + grpc_error* error_ignored) { + Chttp2IncomingByteStream* bs = static_cast(arg); + grpc_chttp2_stream* s = bs->stream_; + grpc_chttp2_transport* t = s->t; + bs->Unref(); + s->pending_byte_stream = false; + grpc_chttp2_maybe_complete_recv_message(t, s); + grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s); +} + +void Chttp2IncomingByteStream::Orphan() { + GPR_TIMER_SCOPE("incoming_byte_stream_destroy", 0); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&destroy_action_, + &Chttp2IncomingByteStream::OrphanLocked, this, + grpc_combiner_scheduler(transport_->combiner)), + GRPC_ERROR_NONE); +} + +void Chttp2IncomingByteStream::Unref() { + if (gpr_unref(&refs_)) { + Delete(this); } } -static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx, - void *argp, - grpc_error *error_ignored) { - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)argp; - grpc_chttp2_transport *t = bs->transport; - grpc_chttp2_stream *s = bs->stream; +void Chttp2IncomingByteStream::Ref() { gpr_ref(&refs_); } +void Chttp2IncomingByteStream::NextLocked(void* arg, + grpc_error* error_ignored) { + Chttp2IncomingByteStream* bs = static_cast(arg); + grpc_chttp2_transport* t = bs->transport_; + grpc_chttp2_stream* s = bs->stream_; size_t cur_length = s->frame_storage.length; if (!s->read_closed) { - grpc_chttp2_flowctl_incoming_bs_update(&t->flow_control, &s->flow_control, - bs->next_action.max_size_hint, - cur_length); - grpc_chttp2_act_on_flowctl_action( - exec_ctx, - grpc_chttp2_flowctl_get_action(&t->flow_control, &s->flow_control), t, - s); + s->flow_control->IncomingByteStreamUpdate(bs->next_action_.max_size_hint, + cur_length); + grpc_chttp2_act_on_flowctl_action(s->flow_control->MakeAction(), t, s); } GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0); if (s->frame_storage.length > 0) { grpc_slice_buffer_swap(&s->frame_storage, &s->unprocessed_incoming_frames_buffer); s->unprocessed_incoming_frames_decompressed = false; - GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(bs->next_action_.on_complete, GRPC_ERROR_NONE); } else if (s->byte_stream_error != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, + GRPC_CLOSURE_SCHED(bs->next_action_.on_complete, GRPC_ERROR_REF(s->byte_stream_error)); - if (s->data_parser.parsing_frame != NULL) { - incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame); - s->data_parser.parsing_frame = NULL; + if (s->data_parser.parsing_frame != nullptr) { + s->data_parser.parsing_frame->Unref(); + s->data_parser.parsing_frame = nullptr; } } else if (s->read_closed) { - if (bs->remaining_bytes != 0) { + if (bs->remaining_bytes_ != 0) { s->byte_stream_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); - GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, + GRPC_CLOSURE_SCHED(bs->next_action_.on_complete, GRPC_ERROR_REF(s->byte_stream_error)); - if (s->data_parser.parsing_frame != NULL) { - incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame); - s->data_parser.parsing_frame = NULL; + if (s->data_parser.parsing_frame != nullptr) { + s->data_parser.parsing_frame->Unref(); + s->data_parser.parsing_frame = nullptr; } } else { /* Should never reach here. */ GPR_ASSERT(false); } } else { - s->on_next = bs->next_action.on_complete; + s->on_next = bs->next_action_.on_complete; } - incoming_byte_stream_unref(exec_ctx, bs); + bs->Unref(); } -static bool incoming_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - size_t max_size_hint, - grpc_closure *on_complete) { - GPR_TIMER_BEGIN("incoming_byte_stream_next", 0); - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)byte_stream; - grpc_chttp2_stream *s = bs->stream; - if (s->unprocessed_incoming_frames_buffer.length > 0) { - GPR_TIMER_END("incoming_byte_stream_next", 0); +bool Chttp2IncomingByteStream::Next(size_t max_size_hint, + grpc_closure* on_complete) { + GPR_TIMER_SCOPE("incoming_byte_stream_next", 0); + if (stream_->unprocessed_incoming_frames_buffer.length > 0) { return true; } else { - gpr_ref(&bs->refs); - bs->next_action.max_size_hint = max_size_hint; - bs->next_action.on_complete = on_complete; + Ref(); + next_action_.max_size_hint = max_size_hint; + next_action_.on_complete = on_complete; GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&bs->next_action.closure, - incoming_byte_stream_next_locked, bs, - grpc_combiner_scheduler(bs->transport->combiner)), + GRPC_CLOSURE_INIT(&next_action_.closure, + &Chttp2IncomingByteStream::NextLocked, this, + grpc_combiner_scheduler(transport_->combiner)), GRPC_ERROR_NONE); - GPR_TIMER_END("incoming_byte_stream_next", 0); return false; } } -static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_slice *slice) { - GPR_TIMER_BEGIN("incoming_byte_stream_pull", 0); - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)byte_stream; - grpc_chttp2_stream *s = bs->stream; - grpc_error *error; - - if (s->unprocessed_incoming_frames_buffer.length > 0) { - if (!s->unprocessed_incoming_frames_decompressed) { +grpc_error* Chttp2IncomingByteStream::Pull(grpc_slice* slice) { + GPR_TIMER_SCOPE("incoming_byte_stream_pull", 0); + grpc_error* error; + if (stream_->unprocessed_incoming_frames_buffer.length > 0) { + if (!stream_->unprocessed_incoming_frames_decompressed) { bool end_of_context; - if (!s->stream_decompression_ctx) { - s->stream_decompression_ctx = grpc_stream_compression_context_create( - s->stream_decompression_method); + if (!stream_->stream_decompression_ctx) { + stream_->stream_decompression_ctx = + grpc_stream_compression_context_create( + stream_->stream_decompression_method); } - if (!grpc_stream_decompress(s->stream_decompression_ctx, - &s->unprocessed_incoming_frames_buffer, - &s->decompressed_data_buffer, NULL, + if (!grpc_stream_decompress(stream_->stream_decompression_ctx, + &stream_->unprocessed_incoming_frames_buffer, + &stream_->decompressed_data_buffer, nullptr, MAX_SIZE_T, &end_of_context)) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error."); return error; } - GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0); - grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer, - &s->decompressed_data_buffer); - s->unprocessed_incoming_frames_decompressed = true; + GPR_ASSERT(stream_->unprocessed_incoming_frames_buffer.length == 0); + grpc_slice_buffer_swap(&stream_->unprocessed_incoming_frames_buffer, + &stream_->decompressed_data_buffer); + stream_->unprocessed_incoming_frames_decompressed = true; if (end_of_context) { - grpc_stream_compression_context_destroy(s->stream_decompression_ctx); - s->stream_decompression_ctx = NULL; + grpc_stream_compression_context_destroy( + stream_->stream_decompression_ctx); + stream_->stream_decompression_ctx = nullptr; } - if (s->unprocessed_incoming_frames_buffer.length == 0) { + if (stream_->unprocessed_incoming_frames_buffer.length == 0) { *slice = grpc_empty_slice(); } } error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer, - slice, NULL); + &stream_->data_parser, stream_, + &stream_->unprocessed_incoming_frames_buffer, slice, nullptr); if (error != GRPC_ERROR_NONE) { return error; } } else { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); - GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); return error; } - GPR_TIMER_END("incoming_byte_stream_pull", 0); return GRPC_ERROR_NONE; } -static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx, - void *byte_stream, - grpc_error *error_ignored); - -static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { - GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0); - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)byte_stream; - GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_INIT( - &bs->destroy_action, incoming_byte_stream_destroy_locked, - bs, grpc_combiner_scheduler(bs->transport->combiner)), - GRPC_ERROR_NONE); - GPR_TIMER_END("incoming_byte_stream_destroy", 0); -} - -static void incoming_byte_stream_publish_error( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_error *error) { - grpc_chttp2_stream *s = bs->stream; - +void Chttp2IncomingByteStream::PublishError(grpc_error* error) { GPR_ASSERT(error != GRPC_ERROR_NONE); - GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error)); - s->on_next = NULL; - GRPC_ERROR_UNREF(s->byte_stream_error); - s->byte_stream_error = GRPC_ERROR_REF(error); - grpc_chttp2_cancel_stream(exec_ctx, bs->transport, bs->stream, - GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(stream_->on_next, GRPC_ERROR_REF(error)); + stream_->on_next = nullptr; + GRPC_ERROR_UNREF(stream_->byte_stream_error); + stream_->byte_stream_error = GRPC_ERROR_REF(error); + grpc_chttp2_cancel_stream(transport_, stream_, GRPC_ERROR_REF(error)); } -grpc_error *grpc_chttp2_incoming_byte_stream_push( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_slice slice, grpc_slice *slice_out) { - grpc_chttp2_stream *s = bs->stream; - - if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) { - grpc_error *error = +grpc_error* Chttp2IncomingByteStream::Push(grpc_slice slice, + grpc_slice* slice_out) { + if (remaining_bytes_ < GRPC_SLICE_LENGTH(slice)) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream"); - - GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); - grpc_slice_unref_internal(exec_ctx, slice); + GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); + grpc_slice_unref_internal(slice); return error; } else { - bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice); - if (slice_out != NULL) { + remaining_bytes_ -= static_cast GRPC_SLICE_LENGTH(slice); + if (slice_out != nullptr) { *slice_out = slice; } return GRPC_ERROR_NONE; } } -grpc_error *grpc_chttp2_incoming_byte_stream_finished( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_error *error, bool reset_on_error) { - grpc_chttp2_stream *s = bs->stream; - +grpc_error* Chttp2IncomingByteStream::Finished(grpc_error* error, + bool reset_on_error) { if (error == GRPC_ERROR_NONE) { - if (bs->remaining_bytes != 0) { + if (remaining_bytes_ != 0) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message"); } } if (error != GRPC_ERROR_NONE && reset_on_error) { - GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(&stream_->reset_byte_stream, GRPC_ERROR_REF(error)); } - incoming_byte_stream_unref(exec_ctx, bs); + Unref(); return error; } -static void incoming_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_error *error) { - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)byte_stream; - GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished( - exec_ctx, bs, error, true /* reset_on_error */)); +void Chttp2IncomingByteStream::Shutdown(grpc_error* error) { + GRPC_ERROR_UNREF(Finished(error, true /* reset_on_error */)); } -static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = { - incoming_byte_stream_next, incoming_byte_stream_pull, - incoming_byte_stream_shutdown, incoming_byte_stream_destroy}; - -static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx, - void *byte_stream, - grpc_error *error_ignored) { - grpc_chttp2_incoming_byte_stream *bs = - (grpc_chttp2_incoming_byte_stream *)byte_stream; - grpc_chttp2_stream *s = bs->stream; - grpc_chttp2_transport *t = s->t; - - GPR_ASSERT(bs->base.vtable == &grpc_chttp2_incoming_byte_stream_vtable); - incoming_byte_stream_unref(exec_ctx, bs); - s->pending_byte_stream = false; - grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); - grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s); -} - -grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, - uint32_t frame_size, uint32_t flags) { - grpc_chttp2_incoming_byte_stream *incoming_byte_stream = - (grpc_chttp2_incoming_byte_stream *)gpr_malloc( - sizeof(*incoming_byte_stream)); - incoming_byte_stream->base.length = frame_size; - incoming_byte_stream->remaining_bytes = frame_size; - incoming_byte_stream->base.flags = flags; - incoming_byte_stream->base.vtable = &grpc_chttp2_incoming_byte_stream_vtable; - gpr_ref_init(&incoming_byte_stream->refs, 2); - incoming_byte_stream->transport = t; - incoming_byte_stream->stream = s; - GRPC_ERROR_UNREF(s->byte_stream_error); - s->byte_stream_error = GRPC_ERROR_NONE; - return incoming_byte_stream; -} +} // namespace grpc_core /******************************************************************************* * RESOURCE QUOTAS */ -static void post_benign_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +static void post_benign_reclaimer(grpc_chttp2_transport* t) { if (!t->benign_reclaimer_registered) { t->benign_reclaimer_registered = true; GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer"); - grpc_resource_user_post_reclaimer(exec_ctx, - grpc_endpoint_get_resource_user(t->ep), + grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep), false, &t->benign_reclaimer_locked); } } -static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +static void post_destructive_reclaimer(grpc_chttp2_transport* t) { if (!t->destructive_reclaimer_registered) { t->destructive_reclaimer_registered = true; GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer"); - grpc_resource_user_post_reclaimer(exec_ctx, - grpc_endpoint_get_resource_user(t->ep), + grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep), true, &t->destructive_reclaimer_locked); } } -static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void benign_reclaimer_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); if (error == GRPC_ERROR_NONE && grpc_chttp2_stream_map_size(&t->stream_map) == 0) { /* Channel with no active streams: send a goaway to try and make it * disconnect cleanly */ - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory", + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory", t->peer_string); } - send_goaway(exec_ctx, t, + send_goaway(t, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"), GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM)); - } else if (error == GRPC_ERROR_NONE && - GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, + } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR " streams", t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map)); @@ -3125,25 +2999,24 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, t->benign_reclaimer_registered = false; if (error != GRPC_ERROR_CANCELLED) { grpc_resource_user_finish_reclamation( - exec_ctx, grpc_endpoint_get_resource_user(t->ep)); + grpc_endpoint_get_resource_user(t->ep)); } - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "benign_reclaimer"); } -static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg; +static void destructive_reclaimer_locked(void* arg, grpc_error* error) { + grpc_chttp2_transport* t = static_cast(arg); size_t n = grpc_chttp2_stream_map_size(&t->stream_map); t->destructive_reclaimer_registered = false; if (error == GRPC_ERROR_NONE && n > 0) { - grpc_chttp2_stream *s = - (grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map); - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string, + grpc_chttp2_stream* s = static_cast( + grpc_chttp2_stream_map_rand(&t->stream_map)); + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d", t->peer_string, s->id); } grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"), GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM)); @@ -3152,21 +3025,21 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg, there are more streams left, we can immediately post a new reclaimer in case the resource quota needs to free more memory */ - post_destructive_reclaimer(exec_ctx, t); + post_destructive_reclaimer(t); } } if (error != GRPC_ERROR_CANCELLED) { grpc_resource_user_finish_reclamation( - exec_ctx, grpc_endpoint_get_resource_user(t->ep)); + grpc_endpoint_get_resource_user(t->ep)); } - GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer"); + GRPC_CHTTP2_UNREF_TRANSPORT(t, "destructive_reclaimer"); } /******************************************************************************* * MONITORING */ -const char *grpc_chttp2_initiate_write_reason_string( +const char* grpc_chttp2_initiate_write_reason_string( grpc_chttp2_initiate_write_reason reason) { switch (reason) { case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE: @@ -3195,8 +3068,6 @@ const char *grpc_chttp2_initiate_write_reason_string( return "TRANSPORT_FLOW_CONTROL"; case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS: return "SEND_SETTINGS"; - case GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING: - return "BDP_ESTIMATOR_PING"; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING: return "FLOW_CONTROL_UNSTALLED_BY_SETTING"; case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE: @@ -3215,9 +3086,8 @@ const char *grpc_chttp2_initiate_write_reason_string( GPR_UNREACHABLE_CODE(return "unknown"); } -static grpc_endpoint *chttp2_get_endpoint(grpc_exec_ctx *exec_ctx, - grpc_transport *t) { - return ((grpc_chttp2_transport *)t)->ep; +static grpc_endpoint* chttp2_get_endpoint(grpc_transport* t) { + return (reinterpret_cast(t))->ep; } static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream), @@ -3231,27 +3101,27 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream), destroy_transport, chttp2_get_endpoint}; -static const grpc_transport_vtable *get_vtable(void) { return &vtable; } +static const grpc_transport_vtable* get_vtable(void) { return &vtable; } -grpc_transport *grpc_create_chttp2_transport( - grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args, - grpc_endpoint *ep, bool is_client) { - grpc_chttp2_transport *t = - (grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport)); - init_transport(exec_ctx, t, channel_args, ep, is_client); +grpc_transport* grpc_create_chttp2_transport( + const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client) { + grpc_chttp2_transport* t = static_cast( + gpr_zalloc(sizeof(grpc_chttp2_transport))); + init_transport(t, channel_args, ep, is_client); return &t->base; } void grpc_chttp2_transport_start_reading( - grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_slice_buffer *read_buffer, grpc_closure *notify_on_receive_settings) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport; + grpc_transport* transport, grpc_slice_buffer* read_buffer, + grpc_closure* notify_on_receive_settings) { + grpc_chttp2_transport* t = + reinterpret_cast(transport); GRPC_CHTTP2_REF_TRANSPORT( t, "reading_action"); /* matches unref inside reading_action */ - if (read_buffer != NULL) { + if (read_buffer != nullptr) { grpc_slice_buffer_move_into(read_buffer, &t->read_buffer); gpr_free(read_buffer); } t->notify_on_receive_settings = notify_on_receive_settings; - GRPC_CLOSURE_SCHED(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&t->read_action_locked, GRPC_ERROR_NONE); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.h index 1f9d38426..9d55b3f4b 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/chttp2_transport.h @@ -19,28 +19,27 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H +#include + #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/transport/transport.h" -extern grpc_tracer_flag grpc_http_trace; -extern grpc_tracer_flag grpc_flowctl_trace; -extern grpc_tracer_flag grpc_trace_http2_stream_state; +extern grpc_core::TraceFlag grpc_http_trace; +extern grpc_core::TraceFlag grpc_trace_http2_stream_state; +extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount; -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_chttp2_refcount; -#endif +extern bool g_flow_control_enabled; -grpc_transport *grpc_create_chttp2_transport( - grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args, - grpc_endpoint *ep, bool is_client); +grpc_transport* grpc_create_chttp2_transport( + const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client); /// Takes ownership of \a read_buffer, which (if non-NULL) contains /// leftover bytes previously read from the endpoint (e.g., by handshakers). /// If non-null, \a notify_on_receive_settings will be scheduled when /// HTTP/2 settings are received from the peer. void grpc_chttp2_transport_start_reading( - grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_slice_buffer *read_buffer, grpc_closure *notify_on_receive_settings); + grpc_transport* transport, grpc_slice_buffer* read_buffer, + grpc_closure* notify_on_receive_settings); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.c deleted file mode 100644 index 569a6349d..000000000 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.c +++ /dev/null @@ -1,502 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/transport/chttp2/transport/internal.h" - -#include -#include -#include - -#include -#include -#include -#include - -#include "src/core/lib/support/string.h" - -static uint32_t grpc_chttp2_target_announced_window( - const grpc_chttp2_transport_flowctl* tfc); - -#ifndef NDEBUG - -typedef struct { - int64_t remote_window; - int64_t target_window; - int64_t announced_window; - int64_t remote_window_delta; - int64_t local_window_delta; - int64_t announced_window_delta; - uint32_t local_init_window; - uint32_t local_max_frame; -} shadow_flow_control; - -static void pretrace(shadow_flow_control* shadow_fc, - grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc) { - shadow_fc->remote_window = tfc->remote_window; - shadow_fc->target_window = grpc_chttp2_target_announced_window(tfc); - shadow_fc->announced_window = tfc->announced_window; - if (sfc != NULL) { - shadow_fc->remote_window_delta = sfc->remote_window_delta; - shadow_fc->local_window_delta = sfc->local_window_delta; - shadow_fc->announced_window_delta = sfc->announced_window_delta; - } -} - -#define TRACE_PADDING 30 - -static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) { - char* str; - if (old_val != new_val) { - gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old_val, new_val); - } else { - gpr_asprintf(&str, "%" PRId64 "", old_val); - } - char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING); - gpr_free(str); - return str_lp; -} - -static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) { - char* str; - if (new_val > 0 && old_val != new_val) { - gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old_val, new_val); - } else { - gpr_asprintf(&str, "%" PRIu32 "", old_val); - } - char* str_lp = gpr_leftpad(str, ' ', TRACE_PADDING); - gpr_free(str); - return str_lp; -} - -static void posttrace(shadow_flow_control* shadow_fc, - grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc, char* reason) { - uint32_t acked_local_window = - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - uint32_t remote_window = - tfc->t->settings[GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - char* trw_str = - fmt_int64_diff_str(shadow_fc->remote_window, tfc->remote_window); - char* tlw_str = fmt_int64_diff_str(shadow_fc->target_window, - grpc_chttp2_target_announced_window(tfc)); - char* taw_str = - fmt_int64_diff_str(shadow_fc->announced_window, tfc->announced_window); - char* srw_str; - char* slw_str; - char* saw_str; - if (sfc != NULL) { - srw_str = fmt_int64_diff_str(shadow_fc->remote_window_delta + remote_window, - sfc->remote_window_delta + remote_window); - slw_str = - fmt_int64_diff_str(shadow_fc->local_window_delta + acked_local_window, - sfc->local_window_delta + acked_local_window); - saw_str = fmt_int64_diff_str( - shadow_fc->announced_window_delta + acked_local_window, - sfc->announced_window_delta + acked_local_window); - } else { - srw_str = gpr_leftpad("", ' ', TRACE_PADDING); - slw_str = gpr_leftpad("", ' ', TRACE_PADDING); - saw_str = gpr_leftpad("", ' ', TRACE_PADDING); - } - gpr_log(GPR_DEBUG, - "%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s", - tfc, sfc != NULL ? sfc->s->id : 0, tfc->t->is_client ? "cli" : "svr", - reason, trw_str, tlw_str, taw_str, srw_str, slw_str, saw_str); - gpr_free(trw_str); - gpr_free(tlw_str); - gpr_free(taw_str); - gpr_free(srw_str); - gpr_free(slw_str); - gpr_free(saw_str); -} - -static char* urgency_to_string(grpc_chttp2_flowctl_urgency urgency) { - switch (urgency) { - case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED: - return "no action"; - case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY: - return "update immediately"; - case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE: - return "queue update"; - default: - GPR_UNREACHABLE_CODE(return "unknown"); - } - GPR_UNREACHABLE_CODE(return "unknown"); -} - -static void trace_action(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_flowctl_action action) { - char* iw_str = fmt_uint32_diff_str( - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], - action.initial_window_size); - char* mf_str = fmt_uint32_diff_str( - tfc->t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], - action.max_frame_size); - gpr_log(GPR_DEBUG, "t[%s], s[%s], settings[%s] iw:%s mf:%s", - urgency_to_string(action.send_transport_update), - urgency_to_string(action.send_stream_update), - urgency_to_string(action.send_setting_update), iw_str, mf_str); - gpr_free(iw_str); - gpr_free(mf_str); -} - -#define PRETRACE(tfc, sfc) \ - shadow_flow_control shadow_fc; \ - GRPC_FLOW_CONTROL_IF_TRACING(pretrace(&shadow_fc, tfc, sfc)) -#define POSTTRACE(tfc, sfc, reason) \ - GRPC_FLOW_CONTROL_IF_TRACING(posttrace(&shadow_fc, tfc, sfc, reason)) -#define TRACEACTION(tfc, action) \ - GRPC_FLOW_CONTROL_IF_TRACING(trace_action(tfc, action)) -#else -#define PRETRACE(tfc, sfc) -#define POSTTRACE(tfc, sfc, reason) -#define TRACEACTION(tfc, action) -#endif - -/* How many bytes of incoming flow control would we like to advertise */ -static uint32_t grpc_chttp2_target_announced_window( - const grpc_chttp2_transport_flowctl* tfc) { - return (uint32_t)GPR_MIN( - (int64_t)((1u << 31) - 1), - tfc->announced_stream_total_over_incoming_window + - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); -} - -// we have sent data on the wire, we must track this in our bookkeeping for the -// remote peer's flow control. -void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc, - int64_t size) { - PRETRACE(tfc, sfc); - tfc->remote_window -= size; - sfc->remote_window_delta -= size; - POSTTRACE(tfc, sfc, " data sent"); -} - -static void announced_window_delta_preupdate(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc) { - if (sfc->announced_window_delta > 0) { - tfc->announced_stream_total_over_incoming_window -= - sfc->announced_window_delta; - } else { - tfc->announced_stream_total_under_incoming_window += - -sfc->announced_window_delta; - } -} - -static void announced_window_delta_postupdate( - grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) { - if (sfc->announced_window_delta > 0) { - tfc->announced_stream_total_over_incoming_window += - sfc->announced_window_delta; - } else { - tfc->announced_stream_total_under_incoming_window -= - -sfc->announced_window_delta; - } -} - -// We have received data from the wire. We must track this in our own flow -// control bookkeeping. -// Returns an error if the incoming frame violates our flow control. -grpc_error* grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc, - int64_t incoming_frame_size) { - uint32_t sent_init_window = - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - uint32_t acked_init_window = - tfc->t->settings[GRPC_ACKED_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - PRETRACE(tfc, sfc); - if (incoming_frame_size > tfc->announced_window) { - char* msg; - gpr_asprintf(&msg, - "frame of size %" PRId64 " overflows local window of %" PRId64, - incoming_frame_size, tfc->announced_window); - grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - return err; - } - - if (sfc != NULL) { - int64_t acked_stream_window = - sfc->announced_window_delta + acked_init_window; - int64_t sent_stream_window = sfc->announced_window_delta + sent_init_window; - if (incoming_frame_size > acked_stream_window) { - if (incoming_frame_size <= sent_stream_window) { - gpr_log( - GPR_ERROR, - "Incoming frame of size %" PRId64 - " exceeds local window size of %" PRId64 - ".\n" - "The (un-acked, future) window size would be %" PRId64 - " which is not exceeded.\n" - "This would usually cause a disconnection, but allowing it due to" - "broken HTTP2 implementations in the wild.\n" - "See (for example) https://github.com/netty/netty/issues/6520.", - incoming_frame_size, acked_stream_window, sent_stream_window); - } else { - char* msg; - gpr_asprintf(&msg, "frame of size %" PRId64 - " overflows local window of %" PRId64, - incoming_frame_size, acked_stream_window); - grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - return err; - } - } - - announced_window_delta_preupdate(tfc, sfc); - sfc->announced_window_delta -= incoming_frame_size; - announced_window_delta_postupdate(tfc, sfc); - sfc->local_window_delta -= incoming_frame_size; - } - - tfc->announced_window -= incoming_frame_size; - - POSTTRACE(tfc, sfc, " data recv"); - return GRPC_ERROR_NONE; -} - -// Returns a non zero announce integer if we should send a transport window -// update -uint32_t grpc_chttp2_flowctl_maybe_send_transport_update( - grpc_chttp2_transport_flowctl* tfc) { - PRETRACE(tfc, NULL); - uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc); - uint32_t threshold_to_send_transport_window_update = - tfc->t->outbuf.count > 0 ? 3 * target_announced_window / 4 - : target_announced_window / 2; - if (tfc->announced_window <= threshold_to_send_transport_window_update && - tfc->announced_window != target_announced_window) { - uint32_t announce = (uint32_t)GPR_CLAMP( - target_announced_window - tfc->announced_window, 0, UINT32_MAX); - tfc->announced_window += announce; - POSTTRACE(tfc, NULL, "t updt sent"); - return announce; - } - GRPC_FLOW_CONTROL_IF_TRACING( - gpr_log(GPR_DEBUG, "%p[0][%s] will not send transport update", tfc, - tfc->t->is_client ? "cli" : "svr")); - return 0; -} - -// Returns a non zero announce integer if we should send a stream window update -uint32_t grpc_chttp2_flowctl_maybe_send_stream_update( - grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) { - PRETRACE(tfc, sfc); - if (sfc->local_window_delta > sfc->announced_window_delta) { - uint32_t announce = (uint32_t)GPR_CLAMP( - sfc->local_window_delta - sfc->announced_window_delta, 0, UINT32_MAX); - announced_window_delta_preupdate(tfc, sfc); - sfc->announced_window_delta += announce; - announced_window_delta_postupdate(tfc, sfc); - POSTTRACE(tfc, sfc, "s updt sent"); - return announce; - } - GRPC_FLOW_CONTROL_IF_TRACING( - gpr_log(GPR_DEBUG, "%p[%u][%s] will not send stream update", tfc, - sfc->s->id, tfc->t->is_client ? "cli" : "svr")); - return 0; -} - -// we have received a WINDOW_UPDATE frame for a transport -void grpc_chttp2_flowctl_recv_transport_update( - grpc_chttp2_transport_flowctl* tfc, uint32_t size) { - PRETRACE(tfc, NULL); - tfc->remote_window += size; - POSTTRACE(tfc, NULL, "t updt recv"); -} - -// we have received a WINDOW_UPDATE frame for a stream -void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc, - uint32_t size) { - PRETRACE(tfc, sfc); - sfc->remote_window_delta += size; - POSTTRACE(tfc, sfc, "s updt recv"); -} - -void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc, - size_t max_size_hint, - size_t have_already) { - PRETRACE(tfc, sfc); - uint32_t max_recv_bytes; - uint32_t sent_init_window = - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - - /* clamp max recv hint to an allowable size */ - if (max_size_hint >= UINT32_MAX - sent_init_window) { - max_recv_bytes = UINT32_MAX - sent_init_window; - } else { - max_recv_bytes = (uint32_t)max_size_hint; - } - - /* account for bytes already received but unknown to higher layers */ - if (max_recv_bytes >= have_already) { - max_recv_bytes -= (uint32_t)have_already; - } else { - max_recv_bytes = 0; - } - - /* add some small lookahead to keep pipelines flowing */ - GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window); - if (sfc->local_window_delta < max_recv_bytes) { - uint32_t add_max_recv_bytes = - (uint32_t)(max_recv_bytes - sfc->local_window_delta); - sfc->local_window_delta += add_max_recv_bytes; - } - POSTTRACE(tfc, sfc, "app st recv"); -} - -void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl* tfc, - grpc_chttp2_stream_flowctl* sfc) { - announced_window_delta_preupdate(tfc, sfc); -} - -// Returns an urgency with which to make an update -static grpc_chttp2_flowctl_urgency delta_is_significant( - const grpc_chttp2_transport_flowctl* tfc, int32_t value, - grpc_chttp2_setting_id setting_id) { - int64_t delta = (int64_t)value - - (int64_t)tfc->t->settings[GRPC_LOCAL_SETTINGS][setting_id]; - // TODO(ncteisen): tune this - if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) { - return GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE; - } else { - return GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED; - } -} - -// Takes in a target and uses the pid controller to return a stabilized -// guess at the new bdp. -static double get_pid_controller_guess(grpc_chttp2_transport_flowctl* tfc, - double target) { - double bdp_error = target - grpc_pid_controller_last(&tfc->pid_controller); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec dt_timespec = gpr_time_sub(now, tfc->last_pid_update); - double dt = (double)dt_timespec.tv_sec + dt_timespec.tv_nsec * 1e-9; - if (dt > 0.1) { - dt = 0.1; - } - double log2_bdp_guess = - grpc_pid_controller_update(&tfc->pid_controller, bdp_error, dt); - tfc->last_pid_update = now; - return pow(2, log2_bdp_guess); -} - -// Take in a target and modifies it based on the memory pressure of the system -static double get_target_under_memory_pressure( - grpc_chttp2_transport_flowctl* tfc, double target) { - // do not increase window under heavy memory pressure. - double memory_pressure = grpc_resource_quota_get_memory_pressure( - grpc_resource_user_quota(grpc_endpoint_get_resource_user(tfc->t->ep))); - if (memory_pressure > 0.8) { - target *= 1 - GPR_MIN(1, (memory_pressure - 0.8) / 0.1); - } - return target; -} - -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action( - grpc_chttp2_transport_flowctl* tfc, grpc_chttp2_stream_flowctl* sfc) { - grpc_chttp2_flowctl_action action; - memset(&action, 0, sizeof(action)); - uint32_t target_announced_window = grpc_chttp2_target_announced_window(tfc); - if (tfc->announced_window < target_announced_window / 2) { - action.send_transport_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY; - } - // TODO(ncteisen): tune this - if (sfc != NULL && !sfc->s->read_closed) { - uint32_t sent_init_window = - tfc->t->settings[GRPC_SENT_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - if ((int64_t)sfc->local_window_delta > - (int64_t)sfc->announced_window_delta && - (int64_t)sfc->announced_window_delta + sent_init_window <= - sent_init_window / 2) { - action.send_stream_update = GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY; - } else if (sfc->local_window_delta > sfc->announced_window_delta) { - action.send_stream_update = GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE; - } - } - TRACEACTION(tfc, action); - return action; -} - -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( - grpc_chttp2_transport_flowctl* tfc) { - grpc_chttp2_flowctl_action action; - memset(&action, 0, sizeof(action)); - if (tfc->enable_bdp_probe) { - action.need_ping = grpc_bdp_estimator_need_ping(&tfc->bdp_estimator); - - // get bdp estimate and update initial_window accordingly. - int64_t estimate = -1; - int32_t bdp = -1; - if (grpc_bdp_estimator_get_estimate(&tfc->bdp_estimator, &estimate)) { - double target = 1 + log2((double)estimate); - - // target might change based on how much memory pressure we are under - // TODO(ncteisen): experiment with setting target to be huge under low - // memory pressure. - target = get_target_under_memory_pressure(tfc, target); - - // run our target through the pid controller to stabilize change. - // TODO(ncteisen): experiment with other controllers here. - double bdp_guess = get_pid_controller_guess(tfc, target); - - // Though initial window 'could' drop to 0, we keep the floor at 128 - bdp = GPR_MAX((int32_t)bdp_guess, 128); - - grpc_chttp2_flowctl_urgency init_window_update_urgency = - delta_is_significant(tfc, bdp, - GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE); - if (init_window_update_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) { - action.send_setting_update = init_window_update_urgency; - action.initial_window_size = (uint32_t)bdp; - } - } - - // get bandwidth estimate and update max_frame accordingly. - double bw_dbl = -1; - if (grpc_bdp_estimator_get_bw(&tfc->bdp_estimator, &bw_dbl)) { - // we target the max of BDP or bandwidth in microseconds. - int32_t frame_size = (int32_t)GPR_CLAMP( - GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, bdp), 16384, - 16777215); - grpc_chttp2_flowctl_urgency frame_size_urgency = delta_is_significant( - tfc, frame_size, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE); - if (frame_size_urgency != GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED) { - if (frame_size_urgency > action.send_setting_update) { - action.send_setting_update = frame_size_urgency; - } - action.max_frame_size = (uint32_t)frame_size; - } - } - } - - TRACEACTION(tfc, action); - return action; -} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.cc b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.cc new file mode 100644 index 000000000..e89c36320 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.cc @@ -0,0 +1,405 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/transport/chttp2/transport/flow_control.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "src/core/ext/transport/chttp2/transport/internal.h" +#include "src/core/lib/gpr/string.h" + +grpc_core::TraceFlag grpc_flowctl_trace(false, "flowctl"); + +namespace grpc_core { +namespace chttp2 { + +namespace { + +static constexpr const int kTracePadding = 30; + +static char* fmt_int64_diff_str(int64_t old_val, int64_t new_val) { + char* str; + if (old_val != new_val) { + gpr_asprintf(&str, "%" PRId64 " -> %" PRId64 "", old_val, new_val); + } else { + gpr_asprintf(&str, "%" PRId64 "", old_val); + } + char* str_lp = gpr_leftpad(str, ' ', kTracePadding); + gpr_free(str); + return str_lp; +} + +static char* fmt_uint32_diff_str(uint32_t old_val, uint32_t new_val) { + char* str; + if (new_val > 0 && old_val != new_val) { + gpr_asprintf(&str, "%" PRIu32 " -> %" PRIu32 "", old_val, new_val); + } else { + gpr_asprintf(&str, "%" PRIu32 "", old_val); + } + char* str_lp = gpr_leftpad(str, ' ', kTracePadding); + gpr_free(str); + return str_lp; +} +} // namespace + +void FlowControlTrace::Init(const char* reason, TransportFlowControl* tfc, + StreamFlowControl* sfc) { + tfc_ = tfc; + sfc_ = sfc; + reason_ = reason; + remote_window_ = tfc->remote_window(); + target_window_ = tfc->target_window(); + announced_window_ = tfc->announced_window(); + if (sfc != nullptr) { + remote_window_delta_ = sfc->remote_window_delta(); + local_window_delta_ = sfc->local_window_delta(); + announced_window_delta_ = sfc->announced_window_delta(); + } +} + +void FlowControlTrace::Finish() { + uint32_t acked_local_window = + tfc_->transport()->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + uint32_t remote_window = + tfc_->transport()->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + char* trw_str = fmt_int64_diff_str(remote_window_, tfc_->remote_window()); + char* tlw_str = fmt_int64_diff_str(target_window_, tfc_->target_window()); + char* taw_str = + fmt_int64_diff_str(announced_window_, tfc_->announced_window()); + char* srw_str; + char* slw_str; + char* saw_str; + if (sfc_ != nullptr) { + srw_str = fmt_int64_diff_str(remote_window_delta_ + remote_window, + sfc_->remote_window_delta() + remote_window); + slw_str = fmt_int64_diff_str(local_window_delta_ + acked_local_window, + local_window_delta_ + acked_local_window); + saw_str = fmt_int64_diff_str(announced_window_delta_ + acked_local_window, + announced_window_delta_ + acked_local_window); + } else { + srw_str = gpr_leftpad("", ' ', kTracePadding); + slw_str = gpr_leftpad("", ' ', kTracePadding); + saw_str = gpr_leftpad("", ' ', kTracePadding); + } + gpr_log(GPR_DEBUG, + "%p[%u][%s] | %s | trw:%s, ttw:%s, taw:%s, srw:%s, slw:%s, saw:%s", + tfc_, sfc_ != nullptr ? sfc_->stream()->id : 0, + tfc_->transport()->is_client ? "cli" : "svr", reason_, trw_str, + tlw_str, taw_str, srw_str, slw_str, saw_str); + gpr_free(trw_str); + gpr_free(tlw_str); + gpr_free(taw_str); + gpr_free(srw_str); + gpr_free(slw_str); + gpr_free(saw_str); +} + +const char* FlowControlAction::UrgencyString(Urgency u) { + switch (u) { + case Urgency::NO_ACTION_NEEDED: + return "no action"; + case Urgency::UPDATE_IMMEDIATELY: + return "update immediately"; + case Urgency::QUEUE_UPDATE: + return "queue update"; + default: + GPR_UNREACHABLE_CODE(return "unknown"); + } + GPR_UNREACHABLE_CODE(return "unknown"); +} + +void FlowControlAction::Trace(grpc_chttp2_transport* t) const { + char* iw_str = fmt_uint32_diff_str( + t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], + initial_window_size_); + char* mf_str = fmt_uint32_diff_str( + t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], + max_frame_size_); + gpr_log(GPR_DEBUG, "t[%s], s[%s], iw:%s:%s mf:%s:%s", + UrgencyString(send_transport_update_), + UrgencyString(send_stream_update_), + UrgencyString(send_initial_window_update_), iw_str, + UrgencyString(send_max_frame_size_update_), mf_str); + gpr_free(iw_str); + gpr_free(mf_str); +} + +TransportFlowControlDisabled::TransportFlowControlDisabled( + grpc_chttp2_transport* t) { + remote_window_ = kMaxWindow; + target_initial_window_size_ = kMaxWindow; + announced_window_ = kMaxWindow; + t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] = + kFrameSize; + t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] = + kFrameSize; + t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE] = + kFrameSize; + t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = + kMaxWindow; + t->settings[GRPC_SENT_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = + kMaxWindow; + t->settings[GRPC_ACKED_SETTINGS][GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = + kMaxWindow; +} + +TransportFlowControl::TransportFlowControl(const grpc_chttp2_transport* t, + bool enable_bdp_probe) + : t_(t), + enable_bdp_probe_(enable_bdp_probe), + bdp_estimator_(t->peer_string), + pid_controller_(grpc_core::PidController::Args() + .set_gain_p(4) + .set_gain_i(8) + .set_gain_d(0) + .set_initial_control_value(TargetLogBdp()) + .set_min_control_value(-1) + .set_max_control_value(25) + .set_integral_range(10)), + last_pid_update_(grpc_core::ExecCtx::Get()->Now()) {} + +uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) { + FlowControlTrace trace("t updt sent", this, nullptr); + const uint32_t target_announced_window = + static_cast(target_window()); + if ((writing_anyway || announced_window_ <= target_announced_window / 2) && + announced_window_ != target_announced_window) { + const uint32_t announce = static_cast GPR_CLAMP( + target_announced_window - announced_window_, 0, UINT32_MAX); + announced_window_ += announce; + return announce; + } + return 0; +} + +grpc_error* TransportFlowControl::ValidateRecvData( + int64_t incoming_frame_size) { + if (incoming_frame_size > announced_window_) { + char* msg; + gpr_asprintf(&msg, + "frame of size %" PRId64 " overflows local window of %" PRId64, + incoming_frame_size, announced_window_); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + return err; + } + return GRPC_ERROR_NONE; +} + +StreamFlowControl::StreamFlowControl(TransportFlowControl* tfc, + const grpc_chttp2_stream* s) + : tfc_(tfc), s_(s) {} + +grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) { + FlowControlTrace trace(" data recv", tfc_, this); + + grpc_error* error = GRPC_ERROR_NONE; + error = tfc_->ValidateRecvData(incoming_frame_size); + if (error != GRPC_ERROR_NONE) return error; + + uint32_t sent_init_window = + tfc_->transport()->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + uint32_t acked_init_window = + tfc_->transport()->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + + int64_t acked_stream_window = announced_window_delta_ + acked_init_window; + int64_t sent_stream_window = announced_window_delta_ + sent_init_window; + if (incoming_frame_size > acked_stream_window) { + if (incoming_frame_size <= sent_stream_window) { + gpr_log(GPR_ERROR, + "Incoming frame of size %" PRId64 + " exceeds local window size of %" PRId64 + ".\n" + "The (un-acked, future) window size would be %" PRId64 + " which is not exceeded.\n" + "This would usually cause a disconnection, but allowing it due to" + "broken HTTP2 implementations in the wild.\n" + "See (for example) https://github.com/netty/netty/issues/6520.", + incoming_frame_size, acked_stream_window, sent_stream_window); + } else { + char* msg; + gpr_asprintf( + &msg, "frame of size %" PRId64 " overflows local window of %" PRId64, + incoming_frame_size, acked_stream_window); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + return err; + } + } + + UpdateAnnouncedWindowDelta(tfc_, -incoming_frame_size); + local_window_delta_ -= incoming_frame_size; + tfc_->CommitRecvData(incoming_frame_size); + return GRPC_ERROR_NONE; +} + +uint32_t StreamFlowControl::MaybeSendUpdate() { + FlowControlTrace trace("s updt sent", tfc_, this); + if (local_window_delta_ > announced_window_delta_) { + uint32_t announce = static_cast GPR_CLAMP( + local_window_delta_ - announced_window_delta_, 0, UINT32_MAX); + UpdateAnnouncedWindowDelta(tfc_, announce); + return announce; + } + return 0; +} + +void StreamFlowControl::IncomingByteStreamUpdate(size_t max_size_hint, + size_t have_already) { + FlowControlTrace trace("app st recv", tfc_, this); + uint32_t max_recv_bytes; + uint32_t sent_init_window = + tfc_->transport()->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + + /* clamp max recv hint to an allowable size */ + if (max_size_hint >= UINT32_MAX - sent_init_window) { + max_recv_bytes = UINT32_MAX - sent_init_window; + } else { + max_recv_bytes = static_cast(max_size_hint); + } + + /* account for bytes already received but unknown to higher layers */ + if (max_recv_bytes >= have_already) { + max_recv_bytes -= static_cast(have_already); + } else { + max_recv_bytes = 0; + } + + /* add some small lookahead to keep pipelines flowing */ + GPR_ASSERT(max_recv_bytes <= UINT32_MAX - sent_init_window); + if (local_window_delta_ < max_recv_bytes) { + uint32_t add_max_recv_bytes = + static_cast(max_recv_bytes - local_window_delta_); + local_window_delta_ += add_max_recv_bytes; + } +} + +// Take in a target and modifies it based on the memory pressure of the system +static double AdjustForMemoryPressure(grpc_resource_quota* quota, + double target) { + // do not increase window under heavy memory pressure. + double memory_pressure = grpc_resource_quota_get_memory_pressure(quota); + static const double kLowMemPressure = 0.1; + static const double kZeroTarget = 22; + static const double kHighMemPressure = 0.8; + static const double kMaxMemPressure = 0.9; + if (memory_pressure < kLowMemPressure && target < kZeroTarget) { + target = (target - kZeroTarget) * memory_pressure / kLowMemPressure + + kZeroTarget; + } else if (memory_pressure > kHighMemPressure) { + target *= 1 - GPR_MIN(1, (memory_pressure - kHighMemPressure) / + (kMaxMemPressure - kHighMemPressure)); + } + return target; +} + +double TransportFlowControl::TargetLogBdp() { + return AdjustForMemoryPressure( + grpc_resource_user_quota(grpc_endpoint_get_resource_user(t_->ep)), + 1 + log2(bdp_estimator_.EstimateBdp())); +} + +double TransportFlowControl::SmoothLogBdp(double value) { + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + double bdp_error = value - pid_controller_.last_control_value(); + const double dt = static_cast(now - last_pid_update_) * 1e-3; + last_pid_update_ = now; + // Limit dt to 100ms + const double kMaxDt = 0.1; + return pid_controller_.Update(bdp_error, dt > kMaxDt ? kMaxDt : dt); +} + +FlowControlAction::Urgency TransportFlowControl::DeltaUrgency( + int64_t value, grpc_chttp2_setting_id setting_id) { + int64_t delta = value - static_cast( + t_->settings[GRPC_LOCAL_SETTINGS][setting_id]); + // TODO(ncteisen): tune this + if (delta != 0 && (delta <= -value / 5 || delta >= value / 5)) { + return FlowControlAction::Urgency::QUEUE_UPDATE; + } else { + return FlowControlAction::Urgency::NO_ACTION_NEEDED; + } +} + +FlowControlAction TransportFlowControl::PeriodicUpdate() { + FlowControlAction action; + if (enable_bdp_probe_) { + // get bdp estimate and update initial_window accordingly. + // target might change based on how much memory pressure we are under + // TODO(ncteisen): experiment with setting target to be huge under low + // memory pressure. + const double target = pow(2, SmoothLogBdp(TargetLogBdp())); + + // Though initial window 'could' drop to 0, we keep the floor at 128 + target_initial_window_size_ = + static_cast GPR_CLAMP(target, 128, INT32_MAX); + + action.set_send_initial_window_update( + DeltaUrgency(target_initial_window_size_, + GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE), + static_cast(target_initial_window_size_)); + + // get bandwidth estimate and update max_frame accordingly. + double bw_dbl = bdp_estimator_.EstimateBandwidth(); + // we target the max of BDP or bandwidth in microseconds. + int32_t frame_size = static_cast GPR_CLAMP( + GPR_MAX((int32_t)GPR_CLAMP(bw_dbl, 0, INT_MAX) / 1000, + target_initial_window_size_), + 16384, 16777215); + action.set_send_max_frame_size_update( + DeltaUrgency(static_cast(frame_size), + GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE), + frame_size); + } + return UpdateAction(action); +} + +FlowControlAction StreamFlowControl::UpdateAction(FlowControlAction action) { + // TODO(ncteisen): tune this + if (!s_->read_closed) { + uint32_t sent_init_window = + tfc_->transport()->settings[GRPC_SENT_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + if (local_window_delta_ > announced_window_delta_ && + announced_window_delta_ + sent_init_window <= sent_init_window / 2) { + action.set_send_stream_update( + FlowControlAction::Urgency::UPDATE_IMMEDIATELY); + } else if (local_window_delta_ > announced_window_delta_) { + action.set_send_stream_update(FlowControlAction::Urgency::QUEUE_UPDATE); + } + } + + return action; +} + +} // namespace chttp2 +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.h new file mode 100644 index 000000000..120fefc8b --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/flow_control.h @@ -0,0 +1,482 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H +#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FLOW_CONTROL_H + +#include + +#include + +#include "src/core/ext/transport/chttp2/transport/http2_settings.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/transport/bdp_estimator.h" +#include "src/core/lib/transport/pid_controller.h" + +struct grpc_chttp2_transport; +struct grpc_chttp2_stream; + +extern grpc_core::TraceFlag grpc_flowctl_trace; + +namespace grpc { +namespace testing { +class TrickledCHTTP2; // to make this a friend +} // namespace testing +} // namespace grpc + +namespace grpc_core { +namespace chttp2 { + +static constexpr uint32_t kDefaultWindow = 65535; +static constexpr int64_t kMaxWindow = static_cast((1u << 31) - 1); +// TODO(ncteisen): Tune this +static constexpr uint32_t kFrameSize = 1024 * 1024; + +class TransportFlowControl; +class StreamFlowControl; + +// Encapsulates a collections of actions the transport needs to take with +// regard to flow control. Each action comes with urgencies that tell the +// transport how quickly the action must take place. +class FlowControlAction { + public: + enum class Urgency : uint8_t { + // Nothing to be done. + NO_ACTION_NEEDED = 0, + // Initiate a write to update the initial window immediately. + UPDATE_IMMEDIATELY, + // Push the flow control update into a send buffer, to be sent + // out the next time a write is initiated. + QUEUE_UPDATE, + }; + + Urgency send_stream_update() const { return send_stream_update_; } + Urgency send_transport_update() const { return send_transport_update_; } + Urgency send_initial_window_update() const { + return send_initial_window_update_; + } + Urgency send_max_frame_size_update() const { + return send_max_frame_size_update_; + } + uint32_t initial_window_size() const { return initial_window_size_; } + uint32_t max_frame_size() const { return max_frame_size_; } + + FlowControlAction& set_send_stream_update(Urgency u) { + send_stream_update_ = u; + return *this; + } + FlowControlAction& set_send_transport_update(Urgency u) { + send_transport_update_ = u; + return *this; + } + FlowControlAction& set_send_initial_window_update(Urgency u, + uint32_t update) { + send_initial_window_update_ = u; + initial_window_size_ = update; + return *this; + } + FlowControlAction& set_send_max_frame_size_update(Urgency u, + uint32_t update) { + send_max_frame_size_update_ = u; + max_frame_size_ = update; + return *this; + } + + static const char* UrgencyString(Urgency u); + void Trace(grpc_chttp2_transport* t) const; + + private: + Urgency send_stream_update_ = Urgency::NO_ACTION_NEEDED; + Urgency send_transport_update_ = Urgency::NO_ACTION_NEEDED; + Urgency send_initial_window_update_ = Urgency::NO_ACTION_NEEDED; + Urgency send_max_frame_size_update_ = Urgency::NO_ACTION_NEEDED; + uint32_t initial_window_size_ = 0; + uint32_t max_frame_size_ = 0; +}; + +class FlowControlTrace { + public: + FlowControlTrace(const char* reason, TransportFlowControl* tfc, + StreamFlowControl* sfc) { + if (enabled_) Init(reason, tfc, sfc); + } + + ~FlowControlTrace() { + if (enabled_) Finish(); + } + + private: + void Init(const char* reason, TransportFlowControl* tfc, + StreamFlowControl* sfc); + void Finish(); + + const bool enabled_ = grpc_flowctl_trace.enabled(); + + TransportFlowControl* tfc_; + StreamFlowControl* sfc_; + const char* reason_; + int64_t remote_window_; + int64_t target_window_; + int64_t announced_window_; + int64_t remote_window_delta_; + int64_t local_window_delta_; + int64_t announced_window_delta_; +}; + +// Fat interface with all methods a flow control implementation needs to +// support. gRPC C Core does not support pure virtual functions, so instead +// we abort in any methods which require implementation in the base class. +class TransportFlowControlBase { + public: + TransportFlowControlBase() {} + virtual ~TransportFlowControlBase() {} + + // Is flow control enabled? This is needed in other codepaths like the checks + // in parsing and in writing. + virtual bool flow_control_enabled() const { abort(); } + + // Called to check if the transport needs to send a WINDOW_UPDATE frame + virtual uint32_t MaybeSendUpdate(bool writing_anyway) { abort(); } + + // Using the protected members, returns and Action to be taken by the + // tranport. + virtual FlowControlAction MakeAction() { abort(); } + + // Using the protected members, returns and Action to be taken by the + // tranport. Also checks for updates to our BDP estimate and acts + // accordingly. + virtual FlowControlAction PeriodicUpdate() { abort(); } + + // Called to do bookkeeping when a stream owned by this transport sends + // data on the wire + virtual void StreamSentData(int64_t size) { abort(); } + + // Called to do bookkeeping when a stream owned by this transport receives + // data from the wire. Also does error checking for frame size. + virtual grpc_error* RecvData(int64_t incoming_frame_size) { abort(); } + + // Called to do bookkeeping when we receive a WINDOW_UPDATE frame. + virtual void RecvUpdate(uint32_t size) { abort(); } + + // Returns the BdpEstimator held by this object. Caller is responsible for + // checking for nullptr. TODO(ncteisen): consider fully encapsulating all + // bdp estimator actions inside TransportFlowControl + virtual BdpEstimator* bdp_estimator() { return nullptr; } + + // Getters + int64_t remote_window() const { return remote_window_; } + virtual int64_t target_window() const { return target_initial_window_size_; } + int64_t announced_window() const { return announced_window_; } + + // Used in certain benchmarks in which we don't want FlowControl to be a + // factor + virtual void TestOnlyForceHugeWindow() {} + + GRPC_ABSTRACT_BASE_CLASS + + protected: + friend class ::grpc::testing::TrickledCHTTP2; + int64_t remote_window_ = kDefaultWindow; + int64_t target_initial_window_size_ = kDefaultWindow; + int64_t announced_window_ = kDefaultWindow; +}; + +// Implementation of flow control that does NOTHING. Always returns maximum +// values, never initiates writes, and assumes that the remote peer is doing +// the same. To be used to narrow down on flow control as the cause of negative +// performance. +class TransportFlowControlDisabled final : public TransportFlowControlBase { + public: + // Maxes out all values + TransportFlowControlDisabled(grpc_chttp2_transport* t); + + bool flow_control_enabled() const override { return false; } + + // Never do anything. + uint32_t MaybeSendUpdate(bool writing_anyway) override { return 0; } + FlowControlAction MakeAction() override { return FlowControlAction(); } + FlowControlAction PeriodicUpdate() override { return FlowControlAction(); } + void StreamSentData(int64_t size) override {} + grpc_error* RecvData(int64_t incoming_frame_size) override { + return GRPC_ERROR_NONE; + } + void RecvUpdate(uint32_t size) override {} +}; + +// Implementation of flow control that abides to HTTP/2 spec and attempts +// to be as performant as possible. +class TransportFlowControl final : public TransportFlowControlBase { + public: + TransportFlowControl(const grpc_chttp2_transport* t, bool enable_bdp_probe); + ~TransportFlowControl() {} + + bool flow_control_enabled() const override { return true; } + + bool bdp_probe() const { return enable_bdp_probe_; } + + // returns an announce if we should send a transport update to our peer, + // else returns zero; writing_anyway indicates if a write would happen + // regardless of the send - if it is false and this function returns non-zero, + // this announce will cause a write to occur + uint32_t MaybeSendUpdate(bool writing_anyway) override; + + // Reads the flow control data and returns and actionable struct that will + // tell chttp2 exactly what it needs to do + FlowControlAction MakeAction() override { + return UpdateAction(FlowControlAction()); + } + + // Call periodically (at a low-ish rate, 100ms - 10s makes sense) + // to perform more complex flow control calculations and return an action + // to let chttp2 change its parameters + FlowControlAction PeriodicUpdate() override; + + void StreamSentData(int64_t size) override { remote_window_ -= size; } + + grpc_error* ValidateRecvData(int64_t incoming_frame_size); + void CommitRecvData(int64_t incoming_frame_size) { + announced_window_ -= incoming_frame_size; + } + + grpc_error* RecvData(int64_t incoming_frame_size) override { + FlowControlTrace trace(" data recv", this, nullptr); + grpc_error* error = ValidateRecvData(incoming_frame_size); + if (error != GRPC_ERROR_NONE) return error; + CommitRecvData(incoming_frame_size); + return GRPC_ERROR_NONE; + } + + // we have received a WINDOW_UPDATE frame for a transport + void RecvUpdate(uint32_t size) override { + FlowControlTrace trace("t updt recv", this, nullptr); + remote_window_ += size; + } + + // See comment above announced_stream_total_over_incoming_window_ for the + // logic behind this decision. + int64_t target_window() const override { + return static_cast GPR_MIN( + (int64_t)((1u << 31) - 1), + announced_stream_total_over_incoming_window_ + + target_initial_window_size_); + } + + const grpc_chttp2_transport* transport() const { return t_; } + + void PreUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) { + if (delta > 0) { + announced_stream_total_over_incoming_window_ -= delta; + } else { + announced_stream_total_under_incoming_window_ += -delta; + } + } + + void PostUpdateAnnouncedWindowOverIncomingWindow(int64_t delta) { + if (delta > 0) { + announced_stream_total_over_incoming_window_ += delta; + } else { + announced_stream_total_under_incoming_window_ -= -delta; + } + } + + BdpEstimator* bdp_estimator() override { return &bdp_estimator_; } + + void TestOnlyForceHugeWindow() override { + announced_window_ = 1024 * 1024 * 1024; + remote_window_ = 1024 * 1024 * 1024; + } + + private: + double TargetLogBdp(); + double SmoothLogBdp(double value); + FlowControlAction::Urgency DeltaUrgency(int64_t value, + grpc_chttp2_setting_id setting_id); + + FlowControlAction UpdateAction(FlowControlAction action) { + if (announced_window_ < target_window() / 2) { + action.set_send_transport_update( + FlowControlAction::Urgency::UPDATE_IMMEDIATELY); + } + return action; + } + + const grpc_chttp2_transport* const t_; + + /** calculating what we should give for local window: + we track the total amount of flow control over initial window size + across all streams: this is data that we want to receive right now (it + has an outstanding read) + and the total amount of flow control under initial window size across all + streams: this is data we've read early + we want to adjust incoming_window such that: + incoming_window = total_over - max(bdp - total_under, 0) */ + int64_t announced_stream_total_over_incoming_window_ = 0; + int64_t announced_stream_total_under_incoming_window_ = 0; + + /** should we probe bdp? */ + const bool enable_bdp_probe_; + + /* bdp estimation */ + grpc_core::BdpEstimator bdp_estimator_; + + /* pid controller */ + grpc_core::PidController pid_controller_; + grpc_millis last_pid_update_ = 0; +}; + +// Fat interface with all methods a stream flow control implementation needs +// to support. gRPC C Core does not support pure virtual functions, so instead +// we abort in any methods which require implementation in the base class. +class StreamFlowControlBase { + public: + StreamFlowControlBase() {} + virtual ~StreamFlowControlBase() {} + + // Updates an action using the protected members. + virtual FlowControlAction UpdateAction(FlowControlAction action) { abort(); } + + // Using the protected members, returns an Action for this stream to be + // taken by the tranport. + virtual FlowControlAction MakeAction() { abort(); } + + // Bookkeeping for when data is sent on this stream. + virtual void SentData(int64_t outgoing_frame_size) { abort(); } + + // Bookkeeping and error checking for when data is received by this stream. + virtual grpc_error* RecvData(int64_t incoming_frame_size) { abort(); } + + // Called to check if this stream needs to send a WINDOW_UPDATE frame. + virtual uint32_t MaybeSendUpdate() { abort(); } + + // Bookkeeping for receiving a WINDOW_UPDATE from for this stream. + virtual void RecvUpdate(uint32_t size) { abort(); } + + // Bookkeeping for when a call pulls bytes out of the transport. At this + // point we consider the data 'used' and can thus let out peer know we are + // ready for more data. + virtual void IncomingByteStreamUpdate(size_t max_size_hint, + size_t have_already) { + abort(); + } + + // Used in certain benchmarks in which we don't want FlowControl to be a + // factor + virtual void TestOnlyForceHugeWindow() {} + + // Getters + int64_t remote_window_delta() { return remote_window_delta_; } + int64_t local_window_delta() { return local_window_delta_; } + int64_t announced_window_delta() { return announced_window_delta_; } + + GRPC_ABSTRACT_BASE_CLASS + + protected: + friend class ::grpc::testing::TrickledCHTTP2; + int64_t remote_window_delta_ = 0; + int64_t local_window_delta_ = 0; + int64_t announced_window_delta_ = 0; +}; + +// Implementation of flow control that does NOTHING. Always returns maximum +// values, never initiates writes, and assumes that the remote peer is doing +// the same. To be used to narrow down on flow control as the cause of negative +// performance. +class StreamFlowControlDisabled : public StreamFlowControlBase { + public: + FlowControlAction UpdateAction(FlowControlAction action) override { + return action; + } + FlowControlAction MakeAction() override { return FlowControlAction(); } + void SentData(int64_t outgoing_frame_size) override {} + grpc_error* RecvData(int64_t incoming_frame_size) override { + return GRPC_ERROR_NONE; + } + uint32_t MaybeSendUpdate() override { return 0; } + void RecvUpdate(uint32_t size) override {} + void IncomingByteStreamUpdate(size_t max_size_hint, + size_t have_already) override {} +}; + +// Implementation of flow control that abides to HTTP/2 spec and attempts +// to be as performant as possible. +class StreamFlowControl final : public StreamFlowControlBase { + public: + StreamFlowControl(TransportFlowControl* tfc, const grpc_chttp2_stream* s); + ~StreamFlowControl() { + tfc_->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_); + } + + FlowControlAction UpdateAction(FlowControlAction action) override; + FlowControlAction MakeAction() override { + return UpdateAction(tfc_->MakeAction()); + } + + // we have sent data on the wire, we must track this in our bookkeeping for + // the remote peer's flow control. + void SentData(int64_t outgoing_frame_size) override { + FlowControlTrace tracer(" data sent", tfc_, this); + tfc_->StreamSentData(outgoing_frame_size); + remote_window_delta_ -= outgoing_frame_size; + } + + // we have received data from the wire + grpc_error* RecvData(int64_t incoming_frame_size) override; + + // returns an announce if we should send a stream update to our peer, else + // returns zero + uint32_t MaybeSendUpdate() override; + + // we have received a WINDOW_UPDATE frame for a stream + void RecvUpdate(uint32_t size) override { + FlowControlTrace trace("s updt recv", tfc_, this); + remote_window_delta_ += size; + } + + // the application is asking for a certain amount of bytes + void IncomingByteStreamUpdate(size_t max_size_hint, + size_t have_already) override; + + int64_t remote_window_delta() const { return remote_window_delta_; } + int64_t local_window_delta() const { return local_window_delta_; } + int64_t announced_window_delta() const { return announced_window_delta_; } + + const grpc_chttp2_stream* stream() const { return s_; } + + void TestOnlyForceHugeWindow() override { + announced_window_delta_ = 1024 * 1024 * 1024; + local_window_delta_ = 1024 * 1024 * 1024; + remote_window_delta_ = 1024 * 1024 * 1024; + } + + private: + TransportFlowControl* const tfc_; + const grpc_chttp2_stream* const s_; + + void UpdateAnnouncedWindowDelta(TransportFlowControl* tfc, int64_t change) { + tfc->PreUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_); + announced_window_delta_ += change; + tfc->PostUpdateAnnouncedWindowOverIncomingWindow(announced_window_delta_); + } +}; + +} // namespace chttp2 +} // namespace grpc_core + +#endif diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame.h index dba4c004e..083c0076e 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H -#include #include +#include + #include "src/core/lib/iomgr/error.h" /* defined in internal.h */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.cc similarity index 55% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.cc index 73aaab180..f8f06f678 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_data.h" #include @@ -23,39 +25,37 @@ #include #include #include -#include #include "src/core/ext/transport/chttp2/transport/internal.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/memory.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/transport.h" -grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser) { +grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser) { parser->state = GRPC_CHTTP2_DATA_FH_0; - parser->parsing_frame = NULL; + parser->parsing_frame = nullptr; return GRPC_ERROR_NONE; } -void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_data_parser *parser) { - if (parser->parsing_frame != NULL) { - GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished( - exec_ctx, parser->parsing_frame, +void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser) { + if (parser->parsing_frame != nullptr) { + GRPC_ERROR_UNREF(parser->parsing_frame->Finished( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"), false)); } GRPC_ERROR_UNREF(parser->error); } -grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser, +grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser, uint8_t flags, uint32_t stream_id, - grpc_chttp2_stream *s) { + grpc_chttp2_stream* s) { if (flags & ~GRPC_CHTTP2_DATA_FLAG_END_STREAM) { - char *msg; + char* msg; gpr_asprintf(&msg, "unsupported data flags: 0x%02x", flags); - grpc_error *err = - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg), - GRPC_ERROR_INT_STREAM_ID, (intptr_t)stream_id); + grpc_error* err = grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg), GRPC_ERROR_INT_STREAM_ID, + static_cast(stream_id)); gpr_free(msg); return err; } @@ -69,26 +69,26 @@ grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser, return GRPC_ERROR_NONE; } -void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf, +void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf, uint32_t write_bytes, int is_eof, - grpc_transport_one_way_stats *stats, - grpc_slice_buffer *outbuf) { + grpc_transport_one_way_stats* stats, + grpc_slice_buffer* outbuf) { grpc_slice hdr; - uint8_t *p; + uint8_t* p; static const size_t header_size = 9; hdr = GRPC_SLICE_MALLOC(header_size); p = GRPC_SLICE_START_PTR(hdr); GPR_ASSERT(write_bytes < (1 << 24)); - *p++ = (uint8_t)(write_bytes >> 16); - *p++ = (uint8_t)(write_bytes >> 8); - *p++ = (uint8_t)(write_bytes); + *p++ = static_cast(write_bytes >> 16); + *p++ = static_cast(write_bytes >> 8); + *p++ = static_cast(write_bytes); *p++ = GRPC_CHTTP2_FRAME_DATA; *p++ = is_eof ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0; - *p++ = (uint8_t)(id >> 24); - *p++ = (uint8_t)(id >> 16); - *p++ = (uint8_t)(id >> 8); - *p++ = (uint8_t)(id); + *p++ = static_cast(id >> 24); + *p++ = static_cast(id >> 16); + *p++ = static_cast(id >> 8); + *p++ = static_cast(id); grpc_slice_buffer_add(outbuf, hdr); grpc_slice_buffer_move_first_no_ref(inbuf, write_bytes, outbuf); @@ -97,17 +97,17 @@ void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf, stats->data_bytes += write_bytes; } -grpc_error *grpc_deframe_unprocessed_incoming_frames( - grpc_exec_ctx *exec_ctx, grpc_chttp2_data_parser *p, grpc_chttp2_stream *s, - grpc_slice_buffer *slices, grpc_slice *slice_out, - grpc_byte_stream **stream_out) { - grpc_error *error = GRPC_ERROR_NONE; - grpc_chttp2_transport *t = s->t; +grpc_error* grpc_deframe_unprocessed_incoming_frames( + grpc_chttp2_data_parser* p, grpc_chttp2_stream* s, + grpc_slice_buffer* slices, grpc_slice* slice_out, + grpc_core::OrphanablePtr* stream_out) { + grpc_error* error = GRPC_ERROR_NONE; + grpc_chttp2_transport* t = s->t; while (slices->count > 0) { - uint8_t *beg = NULL; - uint8_t *end = NULL; - uint8_t *cur = NULL; + uint8_t* beg = nullptr; + uint8_t* end = nullptr; + uint8_t* cur = nullptr; grpc_slice slice = grpc_slice_buffer_take_first(slices); @@ -115,17 +115,17 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( end = GRPC_SLICE_END_PTR(slice); cur = beg; uint32_t message_flags; - char *msg; + char* msg; if (cur == end) { - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } switch (p->state) { case GRPC_CHTTP2_DATA_ERROR: p->state = GRPC_CHTTP2_DATA_ERROR; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); return GRPC_ERROR_REF(p->error); case GRPC_CHTTP2_DATA_FH_0: s->stats.incoming.framing_bytes++; @@ -141,7 +141,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( gpr_asprintf(&msg, "Bad GRPC frame type 0x%02x", p->frame_type); p->error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_STREAM_ID, - (intptr_t)s->id); + static_cast(s->id)); gpr_free(msg); msg = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII); p->error = grpc_error_set_str(p->error, GRPC_ERROR_STR_RAW_BYTES, @@ -150,136 +150,133 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( p->error = grpc_error_set_int(p->error, GRPC_ERROR_INT_OFFSET, cur - beg); p->state = GRPC_CHTTP2_DATA_ERROR; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); return GRPC_ERROR_REF(p->error); } if (++cur == end) { p->state = GRPC_CHTTP2_DATA_FH_1; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } /* fallthrough */ case GRPC_CHTTP2_DATA_FH_1: s->stats.incoming.framing_bytes++; - p->frame_size = ((uint32_t)*cur) << 24; + p->frame_size = (static_cast(*cur)) << 24; if (++cur == end) { p->state = GRPC_CHTTP2_DATA_FH_2; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } /* fallthrough */ case GRPC_CHTTP2_DATA_FH_2: s->stats.incoming.framing_bytes++; - p->frame_size |= ((uint32_t)*cur) << 16; + p->frame_size |= (static_cast(*cur)) << 16; if (++cur == end) { p->state = GRPC_CHTTP2_DATA_FH_3; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } /* fallthrough */ case GRPC_CHTTP2_DATA_FH_3: s->stats.incoming.framing_bytes++; - p->frame_size |= ((uint32_t)*cur) << 8; + p->frame_size |= (static_cast(*cur)) << 8; if (++cur == end) { p->state = GRPC_CHTTP2_DATA_FH_4; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } /* fallthrough */ case GRPC_CHTTP2_DATA_FH_4: s->stats.incoming.framing_bytes++; - GPR_ASSERT(stream_out != NULL); - GPR_ASSERT(p->parsing_frame == NULL); - p->frame_size |= ((uint32_t)*cur); + GPR_ASSERT(stream_out != nullptr); + GPR_ASSERT(p->parsing_frame == nullptr); + p->frame_size |= (static_cast(*cur)); p->state = GRPC_CHTTP2_DATA_FRAME; ++cur; message_flags = 0; if (p->is_frame_compressed) { message_flags |= GRPC_WRITE_INTERNAL_COMPRESS; } - p->parsing_frame = grpc_chttp2_incoming_byte_stream_create( - exec_ctx, t, s, p->frame_size, message_flags); - *stream_out = &p->parsing_frame->base; - if (p->parsing_frame->remaining_bytes == 0) { - GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished( - exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true)); - p->parsing_frame = NULL; + p->parsing_frame = grpc_core::New( + t, s, p->frame_size, message_flags); + stream_out->reset(p->parsing_frame); + if (p->parsing_frame->remaining_bytes() == 0) { + GRPC_ERROR_UNREF(p->parsing_frame->Finished(GRPC_ERROR_NONE, true)); + p->parsing_frame = nullptr; p->state = GRPC_CHTTP2_DATA_FH_0; } s->pending_byte_stream = true; if (cur != end) { grpc_slice_buffer_undo_take_first( - slices, - grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); + slices, grpc_slice_sub(slice, static_cast(cur - beg), + static_cast(end - beg))); } - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); return GRPC_ERROR_NONE; case GRPC_CHTTP2_DATA_FRAME: { - GPR_ASSERT(p->parsing_frame != NULL); - GPR_ASSERT(slice_out != NULL); + GPR_ASSERT(p->parsing_frame != nullptr); + GPR_ASSERT(slice_out != nullptr); if (cur == end) { - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); continue; } - uint32_t remaining = (uint32_t)(end - cur); + uint32_t remaining = static_cast(end - cur); if (remaining == p->frame_size) { s->stats.incoming.data_bytes += remaining; - if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push( - exec_ctx, p->parsing_frame, - grpc_slice_sub(slice, (size_t)(cur - beg), - (size_t)(end - beg)), - slice_out))) { - grpc_slice_unref_internal(exec_ctx, slice); + if (GRPC_ERROR_NONE != + (error = p->parsing_frame->Push( + grpc_slice_sub(slice, static_cast(cur - beg), + static_cast(end - beg)), + slice_out))) { + grpc_slice_unref_internal(slice); return error; } if (GRPC_ERROR_NONE != - (error = grpc_chttp2_incoming_byte_stream_finished( - exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true))) { - grpc_slice_unref_internal(exec_ctx, slice); + (error = p->parsing_frame->Finished(GRPC_ERROR_NONE, true))) { + grpc_slice_unref_internal(slice); return error; } - p->parsing_frame = NULL; + p->parsing_frame = nullptr; p->state = GRPC_CHTTP2_DATA_FH_0; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); return GRPC_ERROR_NONE; } else if (remaining < p->frame_size) { s->stats.incoming.data_bytes += remaining; - if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push( - exec_ctx, p->parsing_frame, - grpc_slice_sub(slice, (size_t)(cur - beg), - (size_t)(end - beg)), - slice_out))) { + if (GRPC_ERROR_NONE != + (error = p->parsing_frame->Push( + grpc_slice_sub(slice, static_cast(cur - beg), + static_cast(end - beg)), + slice_out))) { return error; } p->frame_size -= remaining; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); return GRPC_ERROR_NONE; } else { GPR_ASSERT(remaining > p->frame_size); s->stats.incoming.data_bytes += p->frame_size; if (GRPC_ERROR_NONE != - (grpc_chttp2_incoming_byte_stream_push( - exec_ctx, p->parsing_frame, - grpc_slice_sub(slice, (size_t)(cur - beg), - (size_t)(cur + p->frame_size - beg)), - slice_out))) { - grpc_slice_unref_internal(exec_ctx, slice); + p->parsing_frame->Push( + grpc_slice_sub( + slice, static_cast(cur - beg), + static_cast(cur + p->frame_size - beg)), + slice_out)) { + grpc_slice_unref_internal(slice); return error; } if (GRPC_ERROR_NONE != - (error = grpc_chttp2_incoming_byte_stream_finished( - exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true))) { - grpc_slice_unref_internal(exec_ctx, slice); + (error = p->parsing_frame->Finished(GRPC_ERROR_NONE, true))) { + grpc_slice_unref_internal(slice); return error; } - p->parsing_frame = NULL; + p->parsing_frame = nullptr; p->state = GRPC_CHTTP2_DATA_FH_0; cur += p->frame_size; grpc_slice_buffer_undo_take_first( - slices, - grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); - grpc_slice_unref_internal(exec_ctx, slice); + slices, grpc_slice_sub(slice, static_cast(cur - beg), + static_cast(end - beg))); + grpc_slice_unref_internal(slice); return GRPC_ERROR_NONE; } } @@ -289,20 +286,20 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_data_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { if (!s->pending_byte_stream) { grpc_slice_ref_internal(slice); grpc_slice_buffer_add(&s->frame_storage, slice); - grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s); + grpc_chttp2_maybe_complete_recv_message(t, s); } else if (s->on_next) { GPR_ASSERT(s->frame_storage.length == 0); grpc_slice_ref_internal(slice); grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice); - GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_NONE); - s->on_next = NULL; + GRPC_CLOSURE_SCHED(s->on_next, GRPC_ERROR_NONE); + s->on_next = nullptr; s->unprocessed_incoming_frames_decompressed = false; } else { grpc_slice_ref_internal(slice); @@ -310,8 +307,7 @@ grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, } if (is_last && s->received_last_frame) { - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, - GRPC_ERROR_NONE); + grpc_chttp2_mark_stream_closed(t, s, true, false, GRPC_ERROR_NONE); } return GRPC_ERROR_NONE; diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.h index 3f1c78784..e5d01f764 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_data.h @@ -21,10 +21,11 @@ /* Parser for GRPC streams embedded in DATA frames */ +#include + #include #include #include "src/core/ext/transport/chttp2/transport/frame.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/byte_stream.h" #include "src/core/lib/transport/transport.h" @@ -38,46 +39,46 @@ typedef enum { GRPC_CHTTP2_DATA_ERROR } grpc_chttp2_stream_state; -typedef struct grpc_chttp2_incoming_byte_stream - grpc_chttp2_incoming_byte_stream; +namespace grpc_core { +class Chttp2IncomingByteStream; +} // namespace grpc_core typedef struct { grpc_chttp2_stream_state state; uint8_t frame_type; uint32_t frame_size; - grpc_error *error; + grpc_error* error; bool is_frame_compressed; - grpc_chttp2_incoming_byte_stream *parsing_frame; + grpc_core::Chttp2IncomingByteStream* parsing_frame; } grpc_chttp2_data_parser; /* initialize per-stream state for data frame parsing */ -grpc_error *grpc_chttp2_data_parser_init(grpc_chttp2_data_parser *parser); +grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser); -void grpc_chttp2_data_parser_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_data_parser *parser); +void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser); /* start processing a new data frame */ -grpc_error *grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser *parser, +grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser, uint8_t flags, uint32_t stream_id, - grpc_chttp2_stream *s); + grpc_chttp2_stream* s); /* handle a slice of a data frame - is_last indicates the last slice of a frame */ -grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_data_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); -void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer *inbuf, +void grpc_chttp2_encode_data(uint32_t id, grpc_slice_buffer* inbuf, uint32_t write_bytes, int is_eof, - grpc_transport_one_way_stats *stats, - grpc_slice_buffer *outbuf); + grpc_transport_one_way_stats* stats, + grpc_slice_buffer* outbuf); -grpc_error *grpc_deframe_unprocessed_incoming_frames( - grpc_exec_ctx *exec_ctx, grpc_chttp2_data_parser *p, grpc_chttp2_stream *s, - grpc_slice_buffer *slices, grpc_slice *slice_out, - grpc_byte_stream **stream_out); +grpc_error* grpc_deframe_unprocessed_incoming_frames( + grpc_chttp2_data_parser* p, grpc_chttp2_stream* s, + grpc_slice_buffer* slices, grpc_slice* slice_out, + grpc_core::OrphanablePtr* stream_out); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.cc similarity index 65% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.cc index 78ec08e17..2a1dd3c31 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_goaway.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -25,42 +27,42 @@ #include #include -void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p) { - p->debug_data = NULL; +void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser* p) { + p->debug_data = nullptr; } -void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p) { +void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser* p) { gpr_free(p->debug_data); } -grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p, +grpc_error* grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser* p, uint32_t length, uint8_t flags) { if (length < 8) { - char *msg; + char* msg; gpr_asprintf(&msg, "goaway frame too short (%d bytes)", length); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } gpr_free(p->debug_data); p->debug_length = length - 8; - p->debug_data = (char *)gpr_malloc(p->debug_length); + p->debug_data = static_cast(gpr_malloc(p->debug_length)); p->debug_pos = 0; p->state = GRPC_CHTTP2_GOAWAY_LSI0; return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, - void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_goaway_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { - uint8_t *const beg = GRPC_SLICE_START_PTR(slice); - uint8_t *const end = GRPC_SLICE_END_PTR(slice); - uint8_t *cur = beg; - grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser; + uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + uint8_t* const end = GRPC_SLICE_END_PTR(slice); + uint8_t* cur = beg; + grpc_chttp2_goaway_parser* p = + static_cast(parser); switch (p->state) { case GRPC_CHTTP2_GOAWAY_LSI0: @@ -68,7 +70,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_LSI0; return GRPC_ERROR_NONE; } - p->last_stream_id = ((uint32_t)*cur) << 24; + p->last_stream_id = (static_cast(*cur)) << 24; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_LSI1: @@ -76,7 +78,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_LSI1; return GRPC_ERROR_NONE; } - p->last_stream_id |= ((uint32_t)*cur) << 16; + p->last_stream_id |= (static_cast(*cur)) << 16; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_LSI2: @@ -84,7 +86,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_LSI2; return GRPC_ERROR_NONE; } - p->last_stream_id |= ((uint32_t)*cur) << 8; + p->last_stream_id |= (static_cast(*cur)) << 8; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_LSI3: @@ -92,7 +94,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_LSI3; return GRPC_ERROR_NONE; } - p->last_stream_id |= ((uint32_t)*cur); + p->last_stream_id |= (static_cast(*cur)); ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_ERR0: @@ -100,7 +102,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_ERR0; return GRPC_ERROR_NONE; } - p->error_code = ((uint32_t)*cur) << 24; + p->error_code = (static_cast(*cur)) << 24; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_ERR1: @@ -108,7 +110,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_ERR1; return GRPC_ERROR_NONE; } - p->error_code |= ((uint32_t)*cur) << 16; + p->error_code |= (static_cast(*cur)) << 16; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_ERR2: @@ -116,7 +118,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_ERR2; return GRPC_ERROR_NONE; } - p->error_code |= ((uint32_t)*cur) << 8; + p->error_code |= (static_cast(*cur)) << 8; ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_ERR3: @@ -124,20 +126,21 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, p->state = GRPC_CHTTP2_GOAWAY_ERR3; return GRPC_ERROR_NONE; } - p->error_code |= ((uint32_t)*cur); + p->error_code |= (static_cast(*cur)); ++cur; /* fallthrough */ case GRPC_CHTTP2_GOAWAY_DEBUG: if (end != cur) - memcpy(p->debug_data + p->debug_pos, cur, (size_t)(end - cur)); + memcpy(p->debug_data + p->debug_pos, cur, + static_cast(end - cur)); GPR_ASSERT((size_t)(end - cur) < UINT32_MAX - p->debug_pos); - p->debug_pos += (uint32_t)(end - cur); + p->debug_pos += static_cast(end - cur); p->state = GRPC_CHTTP2_GOAWAY_DEBUG; if (is_last) { grpc_chttp2_add_incoming_goaway( - exec_ctx, t, (uint32_t)p->error_code, + t, p->error_code, grpc_slice_new(p->debug_data, p->debug_length, gpr_free)); - p->debug_data = NULL; + p->debug_data = nullptr; } return GRPC_ERROR_NONE; } @@ -147,17 +150,17 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code, grpc_slice debug_data, - grpc_slice_buffer *slice_buffer) { + grpc_slice_buffer* slice_buffer) { grpc_slice header = GRPC_SLICE_MALLOC(9 + 4 + 4); - uint8_t *p = GRPC_SLICE_START_PTR(header); + uint8_t* p = GRPC_SLICE_START_PTR(header); uint32_t frame_length; GPR_ASSERT(GRPC_SLICE_LENGTH(debug_data) < UINT32_MAX - 4 - 4); - frame_length = 4 + 4 + (uint32_t)GRPC_SLICE_LENGTH(debug_data); + frame_length = 4 + 4 + static_cast GRPC_SLICE_LENGTH(debug_data); /* frame header: length */ - *p++ = (uint8_t)(frame_length >> 16); - *p++ = (uint8_t)(frame_length >> 8); - *p++ = (uint8_t)(frame_length); + *p++ = static_cast(frame_length >> 16); + *p++ = static_cast(frame_length >> 8); + *p++ = static_cast(frame_length); /* frame header: type */ *p++ = GRPC_CHTTP2_FRAME_GOAWAY; /* frame header: flags */ @@ -168,15 +171,15 @@ void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code, *p++ = 0; *p++ = 0; /* payload: last stream id */ - *p++ = (uint8_t)(last_stream_id >> 24); - *p++ = (uint8_t)(last_stream_id >> 16); - *p++ = (uint8_t)(last_stream_id >> 8); - *p++ = (uint8_t)(last_stream_id); + *p++ = static_cast(last_stream_id >> 24); + *p++ = static_cast(last_stream_id >> 16); + *p++ = static_cast(last_stream_id >> 8); + *p++ = static_cast(last_stream_id); /* payload: error code */ - *p++ = (uint8_t)(error_code >> 24); - *p++ = (uint8_t)(error_code >> 16); - *p++ = (uint8_t)(error_code >> 8); - *p++ = (uint8_t)(error_code); + *p++ = static_cast(error_code >> 24); + *p++ = static_cast(error_code >> 16); + *p++ = static_cast(error_code >> 8); + *p++ = static_cast(error_code); GPR_ASSERT(p == GRPC_SLICE_END_PTR(header)); grpc_slice_buffer_add(slice_buffer, header); grpc_slice_buffer_add(slice_buffer, debug_data); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.h index abc48f30c..66c7a68be 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_goaway.h @@ -19,11 +19,11 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H +#include + #include #include -#include #include "src/core/ext/transport/chttp2/transport/frame.h" -#include "src/core/lib/iomgr/exec_ctx.h" typedef enum { GRPC_CHTTP2_GOAWAY_LSI0, @@ -41,23 +41,22 @@ typedef struct { grpc_chttp2_goaway_parse_state state; uint32_t last_stream_id; uint32_t error_code; - char *debug_data; + char* debug_data; uint32_t debug_length; uint32_t debug_pos; } grpc_chttp2_goaway_parser; -void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser *p); -void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser *p); -grpc_error *grpc_chttp2_goaway_parser_begin_frame( - grpc_chttp2_goaway_parser *parser, uint32_t length, uint8_t flags); -grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx, - void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +void grpc_chttp2_goaway_parser_init(grpc_chttp2_goaway_parser* p); +void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser* p); +grpc_error* grpc_chttp2_goaway_parser_begin_frame( + grpc_chttp2_goaway_parser* parser, uint32_t length, uint8_t flags); +grpc_error* grpc_chttp2_goaway_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); void grpc_chttp2_goaway_append(uint32_t last_stream_id, uint32_t error_code, grpc_slice debug_data, - grpc_slice_buffer *slice_buffer); + grpc_slice_buffer* slice_buffer); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.cc similarity index 61% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.cc index d431d6b2d..205826b77 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_ping.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -29,7 +31,7 @@ static bool g_disable_ping_ack = false; grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes) { grpc_slice slice = GRPC_SLICE_MALLOC(9 + 8); - uint8_t *p = GRPC_SLICE_START_PTR(slice); + uint8_t* p = GRPC_SLICE_START_PTR(slice); *p++ = 0; *p++ = 0; @@ -40,25 +42,25 @@ grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes) { *p++ = 0; *p++ = 0; *p++ = 0; - *p++ = (uint8_t)(opaque_8bytes >> 56); - *p++ = (uint8_t)(opaque_8bytes >> 48); - *p++ = (uint8_t)(opaque_8bytes >> 40); - *p++ = (uint8_t)(opaque_8bytes >> 32); - *p++ = (uint8_t)(opaque_8bytes >> 24); - *p++ = (uint8_t)(opaque_8bytes >> 16); - *p++ = (uint8_t)(opaque_8bytes >> 8); - *p++ = (uint8_t)(opaque_8bytes); + *p++ = static_cast(opaque_8bytes >> 56); + *p++ = static_cast(opaque_8bytes >> 48); + *p++ = static_cast(opaque_8bytes >> 40); + *p++ = static_cast(opaque_8bytes >> 32); + *p++ = static_cast(opaque_8bytes >> 24); + *p++ = static_cast(opaque_8bytes >> 16); + *p++ = static_cast(opaque_8bytes >> 8); + *p++ = static_cast(opaque_8bytes); return slice; } -grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser, +grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser, uint32_t length, uint8_t flags) { if (flags & 0xfe || length != 8) { - char *msg; + char* msg; gpr_asprintf(&msg, "invalid ping: length=%d, flags=%02x", length, flags); - grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return error; } @@ -68,17 +70,17 @@ grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser, return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_ping_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { - uint8_t *const beg = GRPC_SLICE_START_PTR(slice); - uint8_t *const end = GRPC_SLICE_END_PTR(slice); - uint8_t *cur = beg; - grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser; + uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + uint8_t* const end = GRPC_SLICE_END_PTR(slice); + uint8_t* cur = beg; + grpc_chttp2_ping_parser* p = static_cast(parser); while (p->byte != 8 && cur != end) { - p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte)); + p->opaque_8bytes |= ((static_cast(*cur)) << (56 - 8 * p->byte)); cur++; p->byte++; } @@ -86,13 +88,13 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, if (p->byte == 8) { GPR_ASSERT(is_last); if (p->is_ack) { - grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes); + grpc_chttp2_ack_ping(t, p->opaque_8bytes); } else { if (!t->is_client) { - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_allowed_ping = - gpr_time_add(t->ping_recv_state.last_ping_recv_time, - t->ping_policy.min_recv_ping_interval_without_data); + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + grpc_millis next_allowed_ping = + t->ping_recv_state.last_ping_recv_time + + t->ping_policy.min_recv_ping_interval_without_data; if (t->keepalive_permit_without_calls == 0 && grpc_chttp2_stream_map_size(&t->stream_map) == 0) { @@ -100,12 +102,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, no less than two hours. When there is no outstanding streams, we restrict the number of PINGS equivalent to TCP Keep-Alive. */ next_allowed_ping = - gpr_time_add(t->ping_recv_state.last_ping_recv_time, - gpr_time_from_seconds(7200, GPR_TIMESPAN)); + t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC; } - if (gpr_time_cmp(next_allowed_ping, now) > 0) { - grpc_chttp2_add_ping_strike(exec_ctx, t); + if (next_allowed_ping > now) { + grpc_chttp2_add_ping_strike(t); } t->ping_recv_state.last_ping_recv_time = now; @@ -113,12 +114,11 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, if (!g_disable_ping_ack) { if (t->ping_ack_count == t->ping_ack_capacity) { t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3); - t->ping_acks = (uint64_t *)gpr_realloc( - t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks)); + t->ping_acks = static_cast(gpr_realloc( + t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks))); } t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes; - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE); } } } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.h index 5969ace9b..55a4499ad 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_ping.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H +#include + #include #include "src/core/ext/transport/chttp2/transport/frame.h" -#include "src/core/lib/iomgr/exec_ctx.h" typedef struct { uint8_t byte; @@ -31,11 +32,11 @@ typedef struct { grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes); -grpc_error *grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser *parser, +grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser, uint32_t length, uint8_t flags); -grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_ping_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); /* Test-only function for disabling ping ack */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc similarity index 62% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc index 0133b6efa..4bdd4309a 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_rst_stream.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -27,11 +29,11 @@ #include "src/core/lib/transport/http2_errors.h" grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code, - grpc_transport_one_way_stats *stats) { + grpc_transport_one_way_stats* stats) { static const size_t frame_size = 13; grpc_slice slice = GRPC_SLICE_MALLOC(frame_size); stats->framing_bytes += frame_size; - uint8_t *p = GRPC_SLICE_START_PTR(slice); + uint8_t* p = GRPC_SLICE_START_PTR(slice); // Frame size. *p++ = 0; @@ -42,26 +44,26 @@ grpc_slice grpc_chttp2_rst_stream_create(uint32_t id, uint32_t code, // Flags. *p++ = 0; // Stream ID. - *p++ = (uint8_t)(id >> 24); - *p++ = (uint8_t)(id >> 16); - *p++ = (uint8_t)(id >> 8); - *p++ = (uint8_t)(id); + *p++ = static_cast(id >> 24); + *p++ = static_cast(id >> 16); + *p++ = static_cast(id >> 8); + *p++ = static_cast(id); // Error code. - *p++ = (uint8_t)(code >> 24); - *p++ = (uint8_t)(code >> 16); - *p++ = (uint8_t)(code >> 8); - *p++ = (uint8_t)(code); + *p++ = static_cast(code >> 24); + *p++ = static_cast(code >> 16); + *p++ = static_cast(code >> 8); + *p++ = static_cast(code); return slice; } -grpc_error *grpc_chttp2_rst_stream_parser_begin_frame( - grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags) { +grpc_error* grpc_chttp2_rst_stream_parser_begin_frame( + grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags) { if (length != 4) { - char *msg; + char* msg; gpr_asprintf(&msg, "invalid rst_stream: length=%d, flags=%02x", length, flags); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } @@ -69,41 +71,41 @@ grpc_error *grpc_chttp2_rst_stream_parser_begin_frame( return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx, - void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { - uint8_t *const beg = GRPC_SLICE_START_PTR(slice); - uint8_t *const end = GRPC_SLICE_END_PTR(slice); - uint8_t *cur = beg; - grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser; + uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + uint8_t* const end = GRPC_SLICE_END_PTR(slice); + uint8_t* cur = beg; + grpc_chttp2_rst_stream_parser* p = + static_cast(parser); while (p->byte != 4 && cur != end) { p->reason_bytes[p->byte] = *cur; cur++; p->byte++; } - s->stats.incoming.framing_bytes += (uint64_t)(end - cur); + s->stats.incoming.framing_bytes += static_cast(end - cur); if (p->byte == 4) { GPR_ASSERT(is_last); - uint32_t reason = (((uint32_t)p->reason_bytes[0]) << 24) | - (((uint32_t)p->reason_bytes[1]) << 16) | - (((uint32_t)p->reason_bytes[2]) << 8) | - (((uint32_t)p->reason_bytes[3])); - grpc_error *error = GRPC_ERROR_NONE; + uint32_t reason = ((static_cast(p->reason_bytes[0])) << 24) | + ((static_cast(p->reason_bytes[1])) << 16) | + ((static_cast(p->reason_bytes[2])) << 8) | + ((static_cast(p->reason_bytes[3]))); + grpc_error* error = GRPC_ERROR_NONE; if (reason != GRPC_HTTP2_NO_ERROR || s->metadata_buffer[1].size == 0) { - char *message; + char* message; gpr_asprintf(&message, "Received RST_STREAM with error code %d", reason); error = grpc_error_set_int( grpc_error_set_str(GRPC_ERROR_CREATE_FROM_STATIC_STRING("RST_STREAM"), GRPC_ERROR_STR_GRPC_MESSAGE, grpc_slice_from_copied_string(message)), - GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason); + GRPC_ERROR_INT_HTTP2_ERROR, static_cast(reason)); gpr_free(message); } - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error); + grpc_chttp2_mark_stream_closed(t, s, true, true, error); } return GRPC_ERROR_NONE; diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.h index d088221b5..6bcf9c447 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_rst_stream.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H +#include + #include #include "src/core/ext/transport/chttp2/transport/frame.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/transport.h" typedef struct { @@ -30,14 +31,13 @@ typedef struct { } grpc_chttp2_rst_stream_parser; grpc_slice grpc_chttp2_rst_stream_create(uint32_t stream_id, uint32_t code, - grpc_transport_one_way_stats *stats); + grpc_transport_one_way_stats* stats); -grpc_error *grpc_chttp2_rst_stream_parser_begin_frame( - grpc_chttp2_rst_stream_parser *parser, uint32_t length, uint8_t flags); -grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx, - void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_rst_stream_parser_begin_frame( + grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags); +grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.cc similarity index 69% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.cc index 637f3059a..987ac0e79 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_settings.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -24,17 +26,16 @@ #include #include #include -#include #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/frame.h" #include "src/core/lib/debug/trace.h" #include "src/core/lib/transport/http2_errors.h" -static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) { - *out++ = (uint8_t)(length >> 16); - *out++ = (uint8_t)(length >> 8); - *out++ = (uint8_t)(length); +static uint8_t* fill_header(uint8_t* out, uint32_t length, uint8_t flags) { + *out++ = static_cast(length >> 16); + *out++ = static_cast(length >> 8); + *out++ = static_cast(length); *out++ = GRPC_CHTTP2_FRAME_SETTINGS; *out++ = flags; *out++ = 0; @@ -44,13 +45,13 @@ static uint8_t *fill_header(uint8_t *out, uint32_t length, uint8_t flags) { return out; } -grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings, - const uint32_t *new_settings, +grpc_slice grpc_chttp2_settings_create(uint32_t* old_settings, + const uint32_t* new_settings, uint32_t force_mask, size_t count) { size_t i; uint32_t n = 0; grpc_slice output; - uint8_t *p; + uint8_t* p; for (i = 0; i < count; i++) { n += (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0); @@ -61,12 +62,12 @@ grpc_slice grpc_chttp2_settings_create(uint32_t *old_settings, for (i = 0; i < count; i++) { if (new_settings[i] != old_settings[i] || (force_mask & (1u << i)) != 0) { - *p++ = (uint8_t)(grpc_setting_id_to_wire_id[i] >> 8); - *p++ = (uint8_t)(grpc_setting_id_to_wire_id[i]); - *p++ = (uint8_t)(new_settings[i] >> 24); - *p++ = (uint8_t)(new_settings[i] >> 16); - *p++ = (uint8_t)(new_settings[i] >> 8); - *p++ = (uint8_t)(new_settings[i]); + *p++ = static_cast(grpc_setting_id_to_wire_id[i] >> 8); + *p++ = static_cast(grpc_setting_id_to_wire_id[i]); + *p++ = static_cast(new_settings[i] >> 24); + *p++ = static_cast(new_settings[i] >> 16); + *p++ = static_cast(new_settings[i] >> 8); + *p++ = static_cast(new_settings[i]); old_settings[i] = new_settings[i]; } } @@ -82,9 +83,9 @@ grpc_slice grpc_chttp2_settings_ack_create(void) { return output; } -grpc_error *grpc_chttp2_settings_parser_begin_frame( - grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags, - uint32_t *settings) { +grpc_error* grpc_chttp2_settings_parser_begin_frame( + grpc_chttp2_settings_parser* parser, uint32_t length, uint8_t flags, + uint32_t* settings) { parser->target_settings = settings; memcpy(parser->incoming_settings, settings, GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t)); @@ -108,14 +109,14 @@ grpc_error *grpc_chttp2_settings_parser_begin_frame( } } -grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { - grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p; - const uint8_t *cur = GRPC_SLICE_START_PTR(slice); - const uint8_t *end = GRPC_SLICE_END_PTR(slice); - char *msg; + grpc_chttp2_settings_parser* parser = + static_cast(p); + const uint8_t* cur = GRPC_SLICE_START_PTR(slice); + const uint8_t* end = GRPC_SLICE_END_PTR(slice); + char* msg; grpc_chttp2_setting_id id; if (parser->is_ack) { @@ -131,15 +132,15 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, memcpy(parser->target_settings, parser->incoming_settings, GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t)); grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create()); - if (t->notify_on_receive_settings != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, t->notify_on_receive_settings, + if (t->notify_on_receive_settings != nullptr) { + GRPC_CLOSURE_SCHED(t->notify_on_receive_settings, GRPC_ERROR_NONE); - t->notify_on_receive_settings = NULL; + t->notify_on_receive_settings = nullptr; } } return GRPC_ERROR_NONE; } - parser->id = (uint16_t)(((uint16_t)*cur) << 8); + parser->id = static_cast((static_cast(*cur)) << 8); cur++; /* fallthrough */ case GRPC_CHTTP2_SPS_ID1: @@ -147,7 +148,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, parser->state = GRPC_CHTTP2_SPS_ID1; return GRPC_ERROR_NONE; } - parser->id = (uint16_t)(parser->id | (*cur)); + parser->id = static_cast(parser->id | (*cur)); cur++; /* fallthrough */ case GRPC_CHTTP2_SPS_VAL0: @@ -155,7 +156,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, parser->state = GRPC_CHTTP2_SPS_VAL0; return GRPC_ERROR_NONE; } - parser->value = ((uint32_t)*cur) << 24; + parser->value = (static_cast(*cur)) << 24; cur++; /* fallthrough */ case GRPC_CHTTP2_SPS_VAL1: @@ -163,7 +164,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, parser->state = GRPC_CHTTP2_SPS_VAL1; return GRPC_ERROR_NONE; } - parser->value |= ((uint32_t)*cur) << 16; + parser->value |= (static_cast(*cur)) << 16; cur++; /* fallthrough */ case GRPC_CHTTP2_SPS_VAL2: @@ -171,7 +172,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, parser->state = GRPC_CHTTP2_SPS_VAL2; return GRPC_ERROR_NONE; } - parser->value |= ((uint32_t)*cur) << 8; + parser->value |= (static_cast(*cur)) << 8; cur++; /* fallthrough */ case GRPC_CHTTP2_SPS_VAL3: @@ -185,8 +186,14 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, cur++; if (grpc_wire_id_to_setting_id(parser->id, &id)) { - const grpc_chttp2_setting_parameters *sp = + const grpc_chttp2_setting_parameters* sp = &grpc_chttp2_settings_parameters[id]; + // If flow control is disabled we skip these. + if (!t->flow_control->flow_control_enabled() && + (id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE || + id == GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE)) { + continue; + } if (parser->value < sp->min_value || parser->value > sp->max_value) { switch (sp->invalid_value_behavior) { case GRPC_CHTTP2_CLAMP_INVALID_VALUE: @@ -200,29 +207,28 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p, &t->qbuf); gpr_asprintf(&msg, "invalid value %u passed for %s", parser->value, sp->name); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } } if (id == GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE && parser->incoming_settings[id] != parser->value) { - t->flow_control.initial_window_update += - (int64_t)parser->value - parser->incoming_settings[id]; - if (GRPC_TRACER_ON(grpc_http_trace) || - GRPC_TRACER_ON(grpc_flowctl_trace)) { - gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change", - t, t->is_client ? "cli" : "svr", - (int)t->flow_control.initial_window_update); + t->initial_window_update += static_cast(parser->value) - + parser->incoming_settings[id]; + if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) { + gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t, + t->is_client ? "cli" : "svr", + static_cast(t->initial_window_update)); } } parser->incoming_settings[id] = parser->value; - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d", + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d", t->is_client ? "CLI" : "SVR", t->peer_string, sp->name, parser->value); } - } else if (GRPC_TRACER_ON(grpc_http_trace)) { + } else if (grpc_http_trace.enabled()) { gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)", parser->id, parser->value); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.h index 47479d675..8d8d9b1a9 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_settings.h @@ -19,11 +19,11 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H -#include #include + +#include #include "src/core/ext/transport/chttp2/transport/frame.h" #include "src/core/ext/transport/chttp2/transport/http2_settings.h" -#include "src/core/lib/iomgr/exec_ctx.h" typedef enum { GRPC_CHTTP2_SPS_ID0, @@ -36,7 +36,7 @@ typedef enum { typedef struct { grpc_chttp2_settings_parse_state state; - uint32_t *target_settings; + uint32_t* target_settings; uint8_t is_ack; uint16_t id; uint32_t value; @@ -44,18 +44,17 @@ typedef struct { } grpc_chttp2_settings_parser; /* Create a settings frame by diffing old & new, and updating old to be new */ -grpc_slice grpc_chttp2_settings_create(uint32_t *old, const uint32_t *newval, +grpc_slice grpc_chttp2_settings_create(uint32_t* old, const uint32_t* newval, uint32_t force_mask, size_t count); /* Create an ack settings frame */ grpc_slice grpc_chttp2_settings_ack_create(void); -grpc_error *grpc_chttp2_settings_parser_begin_frame( - grpc_chttp2_settings_parser *parser, uint32_t length, uint8_t flags, - uint32_t *settings); -grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, - void *parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_settings_parser_begin_frame( + grpc_chttp2_settings_parser* parser, uint32_t length, uint8_t flags, + uint32_t* settings); +grpc_error* grpc_chttp2_settings_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.cc similarity index 52% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.cc index c9ab8d1b5..4b586dc3e 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/frame_window_update.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -24,11 +26,11 @@ #include grpc_slice grpc_chttp2_window_update_create( - uint32_t id, uint32_t window_update, grpc_transport_one_way_stats *stats) { + uint32_t id, uint32_t window_update, grpc_transport_one_way_stats* stats) { static const size_t frame_size = 13; grpc_slice slice = GRPC_SLICE_MALLOC(frame_size); stats->header_bytes += frame_size; - uint8_t *p = GRPC_SLICE_START_PTR(slice); + uint8_t* p = GRPC_SLICE_START_PTR(slice); GPR_ASSERT(window_update); @@ -37,25 +39,25 @@ grpc_slice grpc_chttp2_window_update_create( *p++ = 4; *p++ = GRPC_CHTTP2_FRAME_WINDOW_UPDATE; *p++ = 0; - *p++ = (uint8_t)(id >> 24); - *p++ = (uint8_t)(id >> 16); - *p++ = (uint8_t)(id >> 8); - *p++ = (uint8_t)(id); - *p++ = (uint8_t)(window_update >> 24); - *p++ = (uint8_t)(window_update >> 16); - *p++ = (uint8_t)(window_update >> 8); - *p++ = (uint8_t)(window_update); + *p++ = static_cast(id >> 24); + *p++ = static_cast(id >> 16); + *p++ = static_cast(id >> 8); + *p++ = static_cast(id); + *p++ = static_cast(window_update >> 24); + *p++ = static_cast(window_update >> 16); + *p++ = static_cast(window_update >> 8); + *p++ = static_cast(window_update); return slice; } -grpc_error *grpc_chttp2_window_update_parser_begin_frame( - grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags) { +grpc_error* grpc_chttp2_window_update_parser_begin_frame( + grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags) { if (flags || length != 4) { - char *msg; + char* msg; gpr_asprintf(&msg, "invalid window update: length=%d, flags=%02x", length, flags); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } @@ -64,56 +66,54 @@ grpc_error *grpc_chttp2_window_update_parser_begin_frame( return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_window_update_parser_parse( - grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_slice slice, int is_last) { - uint8_t *const beg = GRPC_SLICE_START_PTR(slice); - uint8_t *const end = GRPC_SLICE_END_PTR(slice); - uint8_t *cur = beg; - grpc_chttp2_window_update_parser *p = - (grpc_chttp2_window_update_parser *)parser; +grpc_error* grpc_chttp2_window_update_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, + grpc_slice slice, + int is_last) { + uint8_t* const beg = GRPC_SLICE_START_PTR(slice); + uint8_t* const end = GRPC_SLICE_END_PTR(slice); + uint8_t* cur = beg; + grpc_chttp2_window_update_parser* p = + static_cast(parser); while (p->byte != 4 && cur != end) { - p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte)); + p->amount |= (static_cast(*cur)) << (8 * (3 - p->byte)); cur++; p->byte++; } - if (s != NULL) { - s->stats.incoming.framing_bytes += (uint32_t)(end - cur); + if (s != nullptr) { + s->stats.incoming.framing_bytes += static_cast(end - cur); } if (p->byte == 4) { uint32_t received_update = p->amount; if (received_update == 0 || (received_update & 0x80000000u)) { - char *msg; + char* msg; gpr_asprintf(&msg, "invalid window update bytes: %d", p->amount); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } GPR_ASSERT(is_last); if (t->incoming_stream_id != 0) { - if (s != NULL) { - grpc_chttp2_flowctl_recv_stream_update( - &t->flow_control, &s->flow_control, received_update); + if (s != nullptr) { + s->flow_control->RecvUpdate(received_update); if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) { - grpc_chttp2_mark_stream_writable(exec_ctx, t, s); + grpc_chttp2_mark_stream_writable(t, s); grpc_chttp2_initiate_write( - exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE); + t, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE); } } } else { - bool was_zero = t->flow_control.remote_window <= 0; - grpc_chttp2_flowctl_recv_transport_update(&t->flow_control, - received_update); - bool is_zero = t->flow_control.remote_window <= 0; + bool was_zero = t->flow_control->remote_window() <= 0; + t->flow_control->RecvUpdate(received_update); + bool is_zero = t->flow_control->remote_window() <= 0; if (was_zero && !is_zero) { grpc_chttp2_initiate_write( - exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED); + t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED); } } } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.h index 698da4e35..3d2391f63 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/frame_window_update.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H +#include + #include #include "src/core/ext/transport/chttp2/transport/frame.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/transport.h" typedef struct { @@ -31,12 +32,14 @@ typedef struct { } grpc_chttp2_window_update_parser; grpc_slice grpc_chttp2_window_update_create( - uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats *stats); + uint32_t id, uint32_t window_delta, grpc_transport_one_way_stats* stats); -grpc_error *grpc_chttp2_window_update_parser_begin_frame( - grpc_chttp2_window_update_parser *parser, uint32_t length, uint8_t flags); -grpc_error *grpc_chttp2_window_update_parser_parse( - grpc_exec_ctx *exec_ctx, void *parser, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_slice slice, int is_last); +grpc_error* grpc_chttp2_window_update_parser_begin_frame( + grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags); +grpc_error* grpc_chttp2_window_update_parser_parse(void* parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, + grpc_slice slice, + int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.cc similarity index 63% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.cc index a404b664e..d5ef06388 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/hpack_encoder.h" #include @@ -28,7 +30,6 @@ #include #include -#include #include "src/core/ext/transport/chttp2/transport/bin_encoder.h" #include "src/core/ext/transport/chttp2/transport/hpack_table.h" @@ -51,14 +52,12 @@ /* don't consider adding anything bigger than this to the hpack table */ #define MAX_DECODER_SPACE_USAGE 512 -static grpc_slice_refcount terminal_slice_refcount = {NULL, NULL}; +static grpc_slice_refcount terminal_slice_refcount = {nullptr, nullptr}; static const grpc_slice terminal_slice = { &terminal_slice_refcount, /* refcount */ - {{0, 0}} /* data.refcounted */ + {{nullptr, 0}} /* data.refcounted */ }; -extern grpc_tracer_flag grpc_http_trace; - typedef struct { int is_first_frame; /* number of bytes in 'output' when we started the frame - used to calculate @@ -70,30 +69,30 @@ typedef struct { uint8_t seen_regular_header; /* output stream id */ uint32_t stream_id; - grpc_slice_buffer *output; - grpc_transport_one_way_stats *stats; + grpc_slice_buffer* output; + grpc_transport_one_way_stats* stats; /* maximum size of a frame */ size_t max_frame_size; bool use_true_binary_metadata; } framer_state; /* fills p (which is expected to be 9 bytes long) with a data frame header */ -static void fill_header(uint8_t *p, uint8_t type, uint32_t id, size_t len, +static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len, uint8_t flags) { GPR_ASSERT(len < 16777316); - *p++ = (uint8_t)(len >> 16); - *p++ = (uint8_t)(len >> 8); - *p++ = (uint8_t)(len); + *p++ = static_cast(len >> 16); + *p++ = static_cast(len >> 8); + *p++ = static_cast(len); *p++ = type; *p++ = flags; - *p++ = (uint8_t)(id >> 24); - *p++ = (uint8_t)(id >> 16); - *p++ = (uint8_t)(id >> 8); - *p++ = (uint8_t)(id); + *p++ = static_cast(id >> 24); + *p++ = static_cast(id >> 16); + *p++ = static_cast(id >> 8); + *p++ = static_cast(id); } /* finish a frame - fill in the previously reserved header */ -static void finish_frame(framer_state *st, int is_header_boundary, +static void finish_frame(framer_state* st, int is_header_boundary, int is_last_in_stream) { uint8_t type = 0xff; type = st->is_first_frame ? GRPC_CHTTP2_FRAME_HEADER @@ -101,15 +100,16 @@ static void finish_frame(framer_state *st, int is_header_boundary, fill_header( GRPC_SLICE_START_PTR(st->output->slices[st->header_idx]), type, st->stream_id, st->output->length - st->output_length_at_start_of_frame, - (uint8_t)((is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) | - (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0))); + static_cast( + (is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) | + (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0))); st->stats->framing_bytes += 9; st->is_first_frame = 0; } /* begin a new frame: reserve off header space, remember how many bytes we'd output before beginning */ -static void begin_frame(framer_state *st) { +static void begin_frame(framer_state* st) { st->header_idx = grpc_slice_buffer_add_indexed(st->output, GRPC_SLICE_MALLOC(9)); st->output_length_at_start_of_frame = st->output->length; @@ -118,7 +118,7 @@ static void begin_frame(framer_state *st) { /* make sure that the current frame is of the type desired, and has sufficient space to add at least about_to_add bytes -- finishes the current frame if needed */ -static void ensure_space(framer_state *st, size_t need_bytes) { +static void ensure_space(framer_state* st, size_t need_bytes) { if (st->output->length - st->output_length_at_start_of_frame + need_bytes <= st->max_frame_size) { return; @@ -128,7 +128,7 @@ static void ensure_space(framer_state *st, size_t need_bytes) { } /* increment a filter count, halve all counts if one element reaches max */ -static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) { +static void inc_filter(uint8_t idx, uint32_t* sum, uint8_t* elems) { elems[idx]++; if (elems[idx] < 255) { (*sum)++; @@ -142,7 +142,7 @@ static void inc_filter(uint8_t idx, uint32_t *sum, uint8_t *elems) { } } -static void add_header_data(framer_state *st, grpc_slice slice) { +static void add_header_data(framer_state* st, grpc_slice slice) { size_t len = GRPC_SLICE_LENGTH(slice); size_t remaining; if (len == 0) return; @@ -160,42 +160,37 @@ static void add_header_data(framer_state *st, grpc_slice slice) { } } -static uint8_t *add_tiny_header_data(framer_state *st, size_t len) { +static uint8_t* add_tiny_header_data(framer_state* st, size_t len) { ensure_space(st, len); st->stats->header_bytes += len; return grpc_slice_buffer_tiny_add(st->output, len); } -static void evict_entry(grpc_chttp2_hpack_compressor *c) { +static void evict_entry(grpc_chttp2_hpack_compressor* c) { c->tail_remote_index++; GPR_ASSERT(c->tail_remote_index > 0); GPR_ASSERT(c->table_size >= c->table_elem_size[c->tail_remote_index % c->cap_table_elems]); GPR_ASSERT(c->table_elems > 0); - c->table_size = - (uint16_t)(c->table_size - - c->table_elem_size[c->tail_remote_index % c->cap_table_elems]); + c->table_size = static_cast( + c->table_size - + c->table_elem_size[c->tail_remote_index % c->cap_table_elems]); c->table_elems--; } -/* add an element to the decoder table */ -static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, - grpc_mdelem elem) { - GPR_ASSERT(GRPC_MDELEM_IS_INTERNED(elem)); - - uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem)); - uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem)); - uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash); +// Reserve space in table for the new element, evict entries if needed. +// Return the new index of the element. Return 0 to indicate not adding to +// table. +static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c, + size_t elem_size) { uint32_t new_index = c->tail_remote_index + c->table_elems + 1; - size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem); - GPR_ASSERT(elem_size < 65536); if (elem_size > c->max_table_size) { while (c->table_size > 0) { evict_entry(c); } - return; + return 0; } /* Reserve space for this element in the remote table: if this overflows @@ -205,41 +200,29 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, evict_entry(c); } GPR_ASSERT(c->table_elems < c->max_table_size); - c->table_elem_size[new_index % c->cap_table_elems] = (uint16_t)elem_size; - c->table_size = (uint16_t)(c->table_size + elem_size); + c->table_elem_size[new_index % c->cap_table_elems] = + static_cast(elem_size); + c->table_size = static_cast(c->table_size + elem_size); c->table_elems++; - /* Store this element into {entries,indices}_elem */ - if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem)) { - /* already there: update with new index */ - c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; - } else if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], - elem)) { - /* already there (cuckoo): update with new index */ - c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; - } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) { - /* not there, but a free element: add */ - c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem); - c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; - } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) { - /* not there (cuckoo), but a free element: add */ - c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem); - c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; - } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] < - c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) { - /* not there: replace oldest */ - GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_2(elem_hash)]); - c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem); - c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; - } else { - /* not there: replace oldest */ - GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_3(elem_hash)]); - c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem); - c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; + return new_index; +} + +/* dummy function */ +static void add_nothing(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, + size_t elem_size) {} + +// Add a key to the dynamic table. Both key and value will be added to table at +// the decoder. +static void add_key_with_index(grpc_chttp2_hpack_compressor* c, + grpc_mdelem elem, uint32_t new_index) { + if (new_index == 0) { + return; } - /* do exactly the same for the key (so we can find by that again too) */ + uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem)); + /* Store the key into {entries,indices}_keys */ if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem))) { c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index; @@ -258,24 +241,77 @@ static void add_elem(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index; } else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] < c->indices_keys[HASH_FRAGMENT_3(key_hash)]) { - grpc_slice_unref_internal(exec_ctx, - c->entries_keys[HASH_FRAGMENT_2(key_hash)]); + grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_2(key_hash)]); c->entries_keys[HASH_FRAGMENT_2(key_hash)] = grpc_slice_ref_internal(GRPC_MDKEY(elem)); c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index; } else { - grpc_slice_unref_internal(exec_ctx, - c->entries_keys[HASH_FRAGMENT_3(key_hash)]); + grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_3(key_hash)]); c->entries_keys[HASH_FRAGMENT_3(key_hash)] = grpc_slice_ref_internal(GRPC_MDKEY(elem)); c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index; } } -static void emit_indexed(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, uint32_t elem_index, - framer_state *st) { - GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx); +/* add an element to the decoder table */ +static void add_elem_with_index(grpc_chttp2_hpack_compressor* c, + grpc_mdelem elem, uint32_t new_index) { + if (new_index == 0) { + return; + } + GPR_ASSERT(GRPC_MDELEM_IS_INTERNED(elem)); + + uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem)); + uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem)); + uint32_t elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash); + + /* Store this element into {entries,indices}_elem */ + if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem)) { + /* already there: update with new index */ + c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; + } else if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], + elem)) { + /* already there (cuckoo): update with new index */ + c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; + } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) { + /* not there, but a free element: add */ + c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem); + c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; + } else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) { + /* not there (cuckoo), but a free element: add */ + c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem); + c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; + } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] < + c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) { + /* not there: replace oldest */ + GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]); + c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem); + c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index; + } else { + /* not there: replace oldest */ + GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]); + c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem); + c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index; + } + + add_key_with_index(c, elem, new_index); +} + +static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, + size_t elem_size) { + uint32_t new_index = prepare_space_for_new_elem(c, elem_size); + add_elem_with_index(c, elem, new_index); +} + +static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, + size_t elem_size) { + uint32_t new_index = prepare_space_for_new_elem(c, elem_size); + add_key_with_index(c, elem, new_index); +} + +static void emit_indexed(grpc_chttp2_hpack_compressor* c, uint32_t elem_index, + framer_state* st) { + GRPC_STATS_INC_HPACK_SEND_INDEXED(); uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1); GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len), len); @@ -287,18 +323,17 @@ typedef struct { bool insert_null_before_wire_value; } wire_value; -static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem, - bool true_binary_enabled) { +static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) { wire_value wire_val; if (grpc_is_binary_header(GRPC_MDKEY(elem))) { if (true_binary_enabled) { - GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx); + GRPC_STATS_INC_HPACK_SEND_BINARY(); wire_val.huffman_prefix = 0x00; wire_val.insert_null_before_wire_value = true; wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)); } else { - GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx); + GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(); wire_val.huffman_prefix = 0x80; wire_val.insert_null_before_wire_value = false; wire_val.data = @@ -306,7 +341,7 @@ static wire_value get_wire_value(grpc_exec_ctx *exec_ctx, grpc_mdelem elem, } } else { /* TODO(ctiller): opportunistically compress non-binary headers */ - GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx); + GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(); wire_val.huffman_prefix = 0x00; wire_val.insert_null_before_wire_value = false; wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem)); @@ -318,19 +353,17 @@ static size_t wire_value_length(wire_value v) { return GPR_SLICE_LENGTH(v.data) + v.insert_null_before_wire_value; } -static void add_wire_value(framer_state *st, wire_value v) { +static void add_wire_value(framer_state* st, wire_value v) { if (v.insert_null_before_wire_value) *add_tiny_header_data(st, 1) = 0; add_header_data(st, v.data); } -static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, +static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor* c, uint32_t key_index, grpc_mdelem elem, - framer_state *st) { - GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx); + framer_state* st) { + GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(); uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2); - wire_value value = - get_wire_value(exec_ctx, elem, st->use_true_binary_metadata); + wire_value value = get_wire_value(elem, st->use_true_binary_metadata); size_t len_val = wire_value_length(value); uint32_t len_val_len; GPR_ASSERT(len_val <= UINT32_MAX); @@ -342,14 +375,12 @@ static void emit_lithdr_incidx(grpc_exec_ctx *exec_ctx, add_wire_value(st, value); } -static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, +static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor* c, uint32_t key_index, grpc_mdelem elem, - framer_state *st) { - GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx); + framer_state* st) { + GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(); uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4); - wire_value value = - get_wire_value(exec_ctx, elem, st->use_true_binary_metadata); + wire_value value = get_wire_value(elem, st->use_true_binary_metadata); size_t len_val = wire_value_length(value); uint32_t len_val_len; GPR_ASSERT(len_val <= UINT32_MAX); @@ -361,15 +392,15 @@ static void emit_lithdr_noidx(grpc_exec_ctx *exec_ctx, add_wire_value(st, value); } -static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, - grpc_mdelem elem, framer_state *st) { - GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx); - GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx); - uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)); - wire_value value = - get_wire_value(exec_ctx, elem, st->use_true_binary_metadata); - uint32_t len_val = (uint32_t)wire_value_length(value); +static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor* c, + uint32_t unused_index, grpc_mdelem elem, + framer_state* st) { + GPR_ASSERT(unused_index == 0); + GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(); + GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(); + uint32_t len_key = static_cast GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)); + wire_value value = get_wire_value(elem, st->use_true_binary_metadata); + uint32_t len_val = static_cast(wire_value_length(value)); uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1); uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1); GPR_ASSERT(len_key <= UINT32_MAX); @@ -383,15 +414,15 @@ static void emit_lithdr_incidx_v(grpc_exec_ctx *exec_ctx, add_wire_value(st, value); } -static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, - grpc_mdelem elem, framer_state *st) { - GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx); - GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx); - uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)); - wire_value value = - get_wire_value(exec_ctx, elem, st->use_true_binary_metadata); - uint32_t len_val = (uint32_t)wire_value_length(value); +static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor* c, + uint32_t unused_index, grpc_mdelem elem, + framer_state* st) { + GPR_ASSERT(unused_index == 0); + GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(); + GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(); + uint32_t len_key = static_cast GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)); + wire_value value = get_wire_value(elem, st->use_true_binary_metadata); + uint32_t len_val = static_cast(wire_value_length(value)); uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1); uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1); GPR_ASSERT(len_key <= UINT32_MAX); @@ -405,22 +436,22 @@ static void emit_lithdr_noidx_v(grpc_exec_ctx *exec_ctx, add_wire_value(st, value); } -static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor *c, - framer_state *st) { +static void emit_advertise_table_size_change(grpc_chttp2_hpack_compressor* c, + framer_state* st) { uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(c->max_table_size, 3); GRPC_CHTTP2_WRITE_VARINT(c->max_table_size, 3, 0x20, add_tiny_header_data(st, len), len); c->advertise_table_size_change = 0; } -static uint32_t dynidx(grpc_chttp2_hpack_compressor *c, uint32_t elem_index) { +static uint32_t dynidx(grpc_chttp2_hpack_compressor* c, uint32_t elem_index) { return 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY + c->tail_remote_index + c->table_elems - elem_index; } /* encode an mdelem */ -static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, - grpc_mdelem elem, framer_state *st) { +static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, + framer_state* st) { GPR_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0); if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */ st->seen_regular_header = 1; @@ -430,11 +461,16 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, "Reserved header (colon-prefixed) happening after regular ones."); } - if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(elem)) { - char *k = grpc_slice_to_c_string(GRPC_MDKEY(elem)); - char *v = grpc_slice_to_c_string(GRPC_MDVALUE(elem)); + if (grpc_http_trace.enabled()) { + char* k = grpc_slice_to_c_string(GRPC_MDKEY(elem)); + char* v = nullptr; + if (grpc_is_binary_header(GRPC_MDKEY(elem))) { + v = grpc_dump_slice(GRPC_MDVALUE(elem), GPR_DUMP_HEX); + } else { + v = grpc_slice_to_c_string(GRPC_MDVALUE(elem)); + } gpr_log( - GPR_DEBUG, + GPR_INFO, "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d", k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem), grpc_slice_is_interned(GRPC_MDKEY(elem)), @@ -442,64 +478,69 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, gpr_free(k); gpr_free(v); } - if (!GRPC_MDELEM_IS_INTERNED(elem)) { - emit_lithdr_noidx_v(exec_ctx, c, elem, st); + + bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem); + bool key_interned = elem_interned || grpc_slice_is_interned(GRPC_MDKEY(elem)); + + // Key is not interned, emit literals. + if (!key_interned) { + emit_lithdr_noidx_v(c, 0, elem, st); return; } - uint32_t key_hash; - uint32_t value_hash; - uint32_t elem_hash; - size_t decoder_space_usage; - uint32_t indices_key; - int should_add_elem; + uint32_t key_hash = grpc_slice_hash(GRPC_MDKEY(elem)); + uint32_t elem_hash = 0; - key_hash = grpc_slice_hash(GRPC_MDKEY(elem)); - value_hash = grpc_slice_hash(GRPC_MDVALUE(elem)); - elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash); + if (elem_interned) { + uint32_t value_hash = grpc_slice_hash(GRPC_MDVALUE(elem)); + elem_hash = GRPC_MDSTR_KV_HASH(key_hash, value_hash); - inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems); + inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, + c->filter_elems); - /* is this elem currently in the decoders table? */ + /* is this elem currently in the decoders table? */ - if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) && - c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) { - /* HIT: complete element (first cuckoo hash) */ - emit_indexed(exec_ctx, c, - dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), st); - return; - } + if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) && + c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) { + /* HIT: complete element (first cuckoo hash) */ + emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), + st); + return; + } - if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) && - c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) { - /* HIT: complete element (second cuckoo hash) */ - emit_indexed(exec_ctx, c, - dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), st); - return; + if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) && + c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) { + /* HIT: complete element (second cuckoo hash) */ + emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), + st); + return; + } } + uint32_t indices_key; + /* should this elem be in the table? */ - decoder_space_usage = grpc_mdelem_get_size_in_hpack_table(elem); - should_add_elem = decoder_space_usage < MAX_DECODER_SPACE_USAGE && - c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >= - c->filter_elems_sum / ONE_ON_ADD_PROBABILITY; + size_t decoder_space_usage = + grpc_mdelem_get_size_in_hpack_table(elem, st->use_true_binary_metadata); + bool should_add_elem = elem_interned && + decoder_space_usage < MAX_DECODER_SPACE_USAGE && + c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >= + c->filter_elems_sum / ONE_ON_ADD_PROBABILITY; + void (*maybe_add)(grpc_chttp2_hpack_compressor*, grpc_mdelem, size_t) = + should_add_elem ? add_elem : add_nothing; + void (*emit)(grpc_chttp2_hpack_compressor*, uint32_t, grpc_mdelem, + framer_state*) = + should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx; /* no hits for the elem... maybe there's a key? */ - indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)]; if (grpc_slice_eq(c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem)) && indices_key > c->tail_remote_index) { /* HIT: key (first cuckoo hash) */ - if (should_add_elem) { - emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st); - add_elem(exec_ctx, c, elem); - return; - } else { - emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st); - return; - } - GPR_UNREACHABLE_CODE(return ); + emit(c, dynidx(c, indices_key), elem, st); + maybe_add(c, elem, decoder_space_usage); + return; } indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)]; @@ -507,56 +548,47 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c, GRPC_MDKEY(elem)) && indices_key > c->tail_remote_index) { /* HIT: key (first cuckoo hash) */ - if (should_add_elem) { - emit_lithdr_incidx(exec_ctx, c, dynidx(c, indices_key), elem, st); - add_elem(exec_ctx, c, elem); - return; - } else { - emit_lithdr_noidx(exec_ctx, c, dynidx(c, indices_key), elem, st); - return; - } - GPR_UNREACHABLE_CODE(return ); + emit(c, dynidx(c, indices_key), elem, st); + maybe_add(c, elem, decoder_space_usage); + return; } /* no elem, key in the table... fall back to literal emission */ - - if (should_add_elem) { - emit_lithdr_incidx_v(exec_ctx, c, elem, st); - add_elem(exec_ctx, c, elem); - return; - } else { - emit_lithdr_noidx_v(exec_ctx, c, elem, st); - return; - } - GPR_UNREACHABLE_CODE(return ); + bool should_add_key = + !elem_interned && decoder_space_usage < MAX_DECODER_SPACE_USAGE; + emit = (should_add_elem || should_add_key) ? emit_lithdr_incidx_v + : emit_lithdr_noidx_v; + maybe_add = + should_add_elem ? add_elem : (should_add_key ? add_key : add_nothing); + emit(c, 0, elem, st); + maybe_add(c, elem, decoder_space_usage); } #define STRLEN_LIT(x) (sizeof(x) - 1) #define TIMEOUT_KEY "grpc-timeout" -static void deadline_enc(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, gpr_timespec deadline, - framer_state *st) { +static void deadline_enc(grpc_chttp2_hpack_compressor* c, grpc_millis deadline, + framer_state* st) { char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE]; grpc_mdelem mdelem; - grpc_http2_encode_timeout( - gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str); - mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT, + grpc_http2_encode_timeout(deadline - grpc_core::ExecCtx::Get()->Now(), + timeout_str); + mdelem = grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_TIMEOUT, grpc_slice_from_copied_string(timeout_str)); - hpack_enc(exec_ctx, c, mdelem, st); - GRPC_MDELEM_UNREF(exec_ctx, mdelem); + hpack_enc(c, mdelem, st); + GRPC_MDELEM_UNREF(mdelem); } static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; } -void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) { +void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c) { memset(c, 0, sizeof(*c)); c->max_table_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE; c->cap_table_elems = elems_for_bytes(c->max_table_size); c->max_table_elems = c->cap_table_elems; c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE; - c->table_elem_size = - (uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems); + c->table_elem_size = static_cast( + gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems)); memset(c->table_elem_size, 0, sizeof(*c->table_elem_size) * c->cap_table_elems); for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) { @@ -564,28 +596,27 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) { } } -void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c) { +void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) { int i; for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) { if (c->entries_keys[i].refcount != &terminal_slice_refcount) { - grpc_slice_unref_internal(exec_ctx, c->entries_keys[i]); + grpc_slice_unref_internal(c->entries_keys[i]); } - GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[i]); + GRPC_MDELEM_UNREF(c->entries_elems[i]); } gpr_free(c->table_elem_size); } void grpc_chttp2_hpack_compressor_set_max_usable_size( - grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) { + grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) { c->max_usable_size = max_table_size; grpc_chttp2_hpack_compressor_set_max_table_size( c, GPR_MIN(c->max_table_size, max_table_size)); } -static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) { - uint16_t *table_elem_size = - (uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap); +static void rebuild_elems(grpc_chttp2_hpack_compressor* c, uint32_t new_cap) { + uint16_t* table_elem_size = + static_cast(gpr_malloc(sizeof(*table_elem_size) * new_cap)); uint32_t i; memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap); @@ -603,7 +634,7 @@ static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) { } void grpc_chttp2_hpack_compressor_set_max_table_size( - grpc_chttp2_hpack_compressor *c, uint32_t max_table_size) { + grpc_chttp2_hpack_compressor* c, uint32_t max_table_size) { max_table_size = GPR_MIN(max_table_size, c->max_usable_size); if (max_table_size == c->max_table_size) { return; @@ -622,18 +653,17 @@ void grpc_chttp2_hpack_compressor_set_max_table_size( } } c->advertise_table_size_change = 1; - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size); + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size); } } -void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, - grpc_mdelem **extra_headers, +void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c, + grpc_mdelem** extra_headers, size_t extra_headers_size, - grpc_metadata_batch *metadata, - const grpc_encode_header_options *options, - grpc_slice_buffer *outbuf) { + grpc_metadata_batch* metadata, + const grpc_encode_header_options* options, + grpc_slice_buffer* outbuf) { GPR_ASSERT(options->stream_id != 0); framer_state st; @@ -654,15 +684,15 @@ void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx, emit_advertise_table_size_change(c, &st); } for (size_t i = 0; i < extra_headers_size; ++i) { - hpack_enc(exec_ctx, c, *extra_headers[i], &st); + hpack_enc(c, *extra_headers[i], &st); } grpc_metadata_batch_assert_ok(metadata); - for (grpc_linked_mdelem *l = metadata->list.head; l; l = l->next) { - hpack_enc(exec_ctx, c, l->md, &st); + for (grpc_linked_mdelem* l = metadata->list.head; l; l = l->next) { + hpack_enc(c, l->md, &st); } - gpr_timespec deadline = metadata->deadline; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) { - deadline_enc(exec_ctx, c, deadline, &st); + grpc_millis deadline = metadata->deadline; + if (deadline != GRPC_MILLIS_INF_FUTURE) { + deadline_enc(c, deadline, &st); } finish_frame(&st, 1, options->is_eof); diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.h index 271192f89..b37093213 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_encoder.h @@ -19,9 +19,10 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H +#include + #include #include -#include #include "src/core/ext/transport/chttp2/transport/frame.h" #include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/metadata_batch.h" @@ -34,6 +35,8 @@ /* maximum table size we'll actually use */ #define GRPC_CHTTP2_HPACKC_MAX_TABLE_SIZE (1024 * 1024) +extern grpc_core::TraceFlag grpc_http_trace; + typedef struct { uint32_t filter_elems_sum; uint32_t max_table_size; @@ -64,31 +67,29 @@ typedef struct { uint32_t indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES]; uint32_t indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES]; - uint16_t *table_elem_size; + uint16_t* table_elem_size; } grpc_chttp2_hpack_compressor; -void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c); -void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c); +void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c); +void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c); void grpc_chttp2_hpack_compressor_set_max_table_size( - grpc_chttp2_hpack_compressor *c, uint32_t max_table_size); + grpc_chttp2_hpack_compressor* c, uint32_t max_table_size); void grpc_chttp2_hpack_compressor_set_max_usable_size( - grpc_chttp2_hpack_compressor *c, uint32_t max_table_size); + grpc_chttp2_hpack_compressor* c, uint32_t max_table_size); typedef struct { uint32_t stream_id; bool is_eof; bool use_true_binary_metadata; size_t max_frame_size; - grpc_transport_one_way_stats *stats; + grpc_transport_one_way_stats* stats; } grpc_encode_header_options; -void grpc_chttp2_encode_header(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_compressor *c, - grpc_mdelem **extra_headers, +void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c, + grpc_mdelem** extra_headers, size_t extra_headers_size, - grpc_metadata_batch *metadata, - const grpc_encode_header_options *options, - grpc_slice_buffer *outbuf); + grpc_metadata_batch* metadata, + const grpc_encode_header_options* options, + grpc_slice_buffer* outbuf); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.cc similarity index 65% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.cc index 3d1df19bc..907ba7117 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/hpack_parser.h" #include "src/core/ext/transport/chttp2/transport/internal.h" @@ -25,15 +27,14 @@ #include #include -#include #include -#include #include "src/core/ext/transport/chttp2/transport/bin_encoder.h" #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/slice/slice_string_helpers.h" #include "src/core/lib/transport/http2_errors.h" typedef enum { @@ -60,97 +61,70 @@ typedef enum { a set of indirect jumps, and so not waste stack space. */ /* forward declarations for parsing states */ -static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_error(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end, grpc_error *error); -static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); - -static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_value_string_with_indexed_key( - grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value_string_with_literal_key( - grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); - -static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); - -static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end); -static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); -static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end); +static grpc_error* parse_begin(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_error(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end, grpc_error* error); +static grpc_error* still_parse_error(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_illegal_op(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); + +static grpc_error* parse_string_prefix(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_key_string(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_value_string_with_indexed_key( + grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_value_string_with_literal_key( + grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end); + +static grpc_error* parse_value0(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_value1(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_value2(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_value3(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_value4(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_value5up(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); + +static grpc_error* parse_indexed_field(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_indexed_field_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_incidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_lithdr_incidx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_incidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_notidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_lithdr_notidx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_notidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_nvridx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end); +static grpc_error* parse_max_tbl_size(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); +static grpc_error* parse_max_tbl_size_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end); /* we translate the first byte of a hpack field into one of these decoding cases, then use a lookup table to jump directly to the appropriate parser. @@ -648,13 +622,18 @@ static const uint8_t inverse_base64[256] = { }; /* emission helpers */ -static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, - grpc_mdelem md, int add_to_table) { - if (GRPC_TRACER_ON(grpc_http_trace) && !GRPC_MDELEM_IS_INTERNED(md)) { - char *k = grpc_slice_to_c_string(GRPC_MDKEY(md)); - char *v = grpc_slice_to_c_string(GRPC_MDVALUE(md)); +static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md, + int add_to_table) { + if (grpc_http_trace.enabled()) { + char* k = grpc_slice_to_c_string(GRPC_MDKEY(md)); + char* v = nullptr; + if (grpc_is_binary_header(GRPC_MDKEY(md))) { + v = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX); + } else { + v = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + } gpr_log( - GPR_DEBUG, + GPR_INFO, "Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d", k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md), grpc_slice_is_interned(GRPC_MDKEY(md)), @@ -665,26 +644,25 @@ static grpc_error *on_hdr(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, if (add_to_table) { GPR_ASSERT(GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_INTERNED || GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC); - grpc_error *err = grpc_chttp2_hptbl_add(exec_ctx, &p->table, md); + grpc_error* err = grpc_chttp2_hptbl_add(&p->table, md); if (err != GRPC_ERROR_NONE) return err; } - if (p->on_header == NULL) { - GRPC_MDELEM_UNREF(exec_ctx, md); + if (p->on_header == nullptr) { + GRPC_MDELEM_UNREF(md); return GRPC_ERROR_CREATE_FROM_STATIC_STRING("on_header callback not set"); } - p->on_header(exec_ctx, p->on_header_user_data, md); + p->on_header(p->on_header_user_data, md); return GRPC_ERROR_NONE; } -static grpc_slice take_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - grpc_chttp2_hpack_parser_string *str, +static grpc_slice take_string(grpc_chttp2_hpack_parser* p, + grpc_chttp2_hpack_parser_string* str, bool intern) { grpc_slice s; if (!str->copied) { if (intern) { s = grpc_slice_intern(str->data.referenced); - grpc_slice_unref_internal(exec_ctx, str->data.referenced); + grpc_slice_unref_internal(str->data.referenced); } else { s = str->data.referenced; } @@ -702,174 +680,161 @@ static grpc_slice take_string(grpc_exec_ctx *exec_ctx, } /* jump to the next state */ -static grpc_error *parse_next(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_next(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { p->state = *p->next_state++; - return p->state(exec_ctx, p, cur, end); + return p->state(p, cur, end); } /* begin parsing a header: all functionality is encoded into lookup tables above */ -static grpc_error *parse_begin(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_begin(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { if (cur == end) { p->state = parse_begin; return GRPC_ERROR_NONE; } - return first_byte_action[first_byte_lut[*cur]](exec_ctx, p, cur, end); + return first_byte_action[first_byte_lut[*cur]](p, cur, end); } /* stream dependency and prioritization data: we just skip it */ -static grpc_error *parse_stream_weight(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_stream_weight(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_stream_weight; return GRPC_ERROR_NONE; } - return p->after_prioritization(exec_ctx, p, cur + 1, end); + return p->after_prioritization(p, cur + 1, end); } -static grpc_error *parse_stream_dep3(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_stream_dep3(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_stream_dep3; return GRPC_ERROR_NONE; } - return parse_stream_weight(exec_ctx, p, cur + 1, end); + return parse_stream_weight(p, cur + 1, end); } -static grpc_error *parse_stream_dep2(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_stream_dep2(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_stream_dep2; return GRPC_ERROR_NONE; } - return parse_stream_dep3(exec_ctx, p, cur + 1, end); + return parse_stream_dep3(p, cur + 1, end); } -static grpc_error *parse_stream_dep1(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_stream_dep1(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_stream_dep1; return GRPC_ERROR_NONE; } - return parse_stream_dep2(exec_ctx, p, cur + 1, end); + return parse_stream_dep2(p, cur + 1, end); } -static grpc_error *parse_stream_dep0(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_stream_dep0(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_stream_dep0; return GRPC_ERROR_NONE; } - return parse_stream_dep1(exec_ctx, p, cur + 1, end); + return parse_stream_dep1(p, cur + 1, end); } /* emit an indexed field; jumps to begin the next field on completion */ -static grpc_error *finish_indexed_field(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* finish_indexed_field(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index); if (GRPC_MDISNULL(md)) { return grpc_error_set_int( grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Invalid HPACK index received"), - GRPC_ERROR_INT_INDEX, (intptr_t)p->index), - GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents); + GRPC_ERROR_INT_INDEX, + static_cast(p->index)), + GRPC_ERROR_INT_SIZE, static_cast(p->table.num_ents)); } GRPC_MDELEM_REF(md); - GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx); - grpc_error *err = on_hdr(exec_ctx, p, md, 0); + GRPC_STATS_INC_HPACK_RECV_INDEXED(); + grpc_error* err = on_hdr(p, md, 0); if (err != GRPC_ERROR_NONE) return err; - return parse_begin(exec_ctx, p, cur, end); + return parse_begin(p, cur, end); } /* parse an indexed field with index < 127 */ -static grpc_error *parse_indexed_field(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_indexed_field(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { p->dynamic_table_update_allowed = 0; p->index = (*cur) & 0x7f; - return finish_indexed_field(exec_ctx, p, cur + 1, end); + return finish_indexed_field(p, cur + 1, end); } /* parse an indexed field with index >= 127 */ -static grpc_error *parse_indexed_field_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_indexed_field_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { finish_indexed_field}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; p->index = 0x7f; p->parsing.value = &p->index; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } /* finish a literal header with incremental indexing */ -static grpc_error *finish_lithdr_incidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* finish_lithdr_incidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index); GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */ - GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)), - take_string(exec_ctx, p, &p->value, true)), - 1); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); + GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)), + take_string(p, &p->value, true)), + 1); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* finish a literal header with incremental indexing with no index */ -static grpc_error *finish_lithdr_incidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { - GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true), - take_string(exec_ctx, p, &p->value, true)), - 1); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); +static grpc_error* finish_lithdr_incidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { + GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(take_string(p, &p->key, true), + take_string(p, &p->value, true)), + 1); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* parse a literal header with incremental indexing; index < 63 */ -static grpc_error *parse_lithdr_incidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_lithdr_incidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_value_string_with_indexed_key, finish_lithdr_incidx}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; p->index = (*cur) & 0x3f; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* parse a literal header with incremental indexing; index >= 63 */ -static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_incidx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_string_prefix, parse_value_string_with_indexed_key, finish_lithdr_incidx}; @@ -877,71 +842,66 @@ static grpc_error *parse_lithdr_incidx_x(grpc_exec_ctx *exec_ctx, p->next_state = and_then; p->index = 0x3f; p->parsing.value = &p->index; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } /* parse a literal header with incremental indexing; index = 0 */ -static grpc_error *parse_lithdr_incidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_incidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_key_string, parse_string_prefix, parse_value_string_with_literal_key, finish_lithdr_incidx_v}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* finish a literal header without incremental indexing */ -static grpc_error *finish_lithdr_notidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* finish_lithdr_notidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index); GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */ - GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)), - take_string(exec_ctx, p, &p->value, false)), - 0); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); + GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)), + take_string(p, &p->value, false)), + 0); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* finish a literal header without incremental indexing with index = 0 */ -static grpc_error *finish_lithdr_notidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { - GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true), - take_string(exec_ctx, p, &p->value, false)), - 0); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); +static grpc_error* finish_lithdr_notidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { + GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(take_string(p, &p->key, true), + take_string(p, &p->value, false)), + 0); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* parse a literal header without incremental indexing; index < 15 */ -static grpc_error *parse_lithdr_notidx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_lithdr_notidx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_value_string_with_indexed_key, finish_lithdr_notidx}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; p->index = (*cur) & 0xf; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* parse a literal header without incremental indexing; index >= 15 */ -static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_notidx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_string_prefix, parse_value_string_with_indexed_key, finish_lithdr_notidx}; @@ -949,71 +909,66 @@ static grpc_error *parse_lithdr_notidx_x(grpc_exec_ctx *exec_ctx, p->next_state = and_then; p->index = 0xf; p->parsing.value = &p->index; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } /* parse a literal header without incremental indexing; index == 0 */ -static grpc_error *parse_lithdr_notidx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_notidx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_key_string, parse_string_prefix, parse_value_string_with_literal_key, finish_lithdr_notidx_v}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* finish a literal header that is never indexed */ -static grpc_error *finish_lithdr_nvridx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* finish_lithdr_nvridx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index); GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */ - GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)), - take_string(exec_ctx, p, &p->value, false)), - 0); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); + GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)), + take_string(p, &p->value, false)), + 0); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* finish a literal header that is never indexed with an extra value */ -static grpc_error *finish_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { - GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx); - grpc_error *err = on_hdr( - exec_ctx, p, - grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true), - take_string(exec_ctx, p, &p->value, false)), - 0); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); +static grpc_error* finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { + GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(); + grpc_error* err = + on_hdr(p, + grpc_mdelem_from_slices(take_string(p, &p->key, true), + take_string(p, &p->value, false)), + 0); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* parse a literal header that is never indexed; index < 15 */ -static grpc_error *parse_lithdr_nvridx(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_lithdr_nvridx(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_value_string_with_indexed_key, finish_lithdr_nvridx}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; p->index = (*cur) & 0xf; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* parse a literal header that is never indexed; index >= 15 */ -static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_string_prefix, parse_value_string_with_indexed_key, finish_lithdr_nvridx}; @@ -1021,60 +976,56 @@ static grpc_error *parse_lithdr_nvridx_x(grpc_exec_ctx *exec_ctx, p->next_state = and_then; p->index = 0xf; p->parsing.value = &p->index; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } /* parse a literal header that is never indexed; index == 0 */ -static grpc_error *parse_lithdr_nvridx_v(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { parse_key_string, parse_string_prefix, parse_value_string_with_literal_key, finish_lithdr_nvridx_v}; p->dynamic_table_update_allowed = 0; p->next_state = and_then; - return parse_string_prefix(exec_ctx, p, cur + 1, end); + return parse_string_prefix(p, cur + 1, end); } /* finish parsing a max table size change */ -static grpc_error *finish_max_tbl_size(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { - if (GRPC_TRACER_ON(grpc_http_trace)) { +static grpc_error* finish_max_tbl_size(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { + if (grpc_http_trace.enabled()) { gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index); } - grpc_error *err = - grpc_chttp2_hptbl_set_current_table_size(exec_ctx, &p->table, p->index); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_begin(exec_ctx, p, cur, end); + grpc_error* err = + grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_begin(p, cur, end); } /* parse a max table size change, max size < 15 */ -static grpc_error *parse_max_tbl_size(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_max_tbl_size(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (p->dynamic_table_update_allowed == 0) { return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "More than two max table size changes in a single frame")); } p->dynamic_table_update_allowed--; p->index = (*cur) & 0x1f; - return finish_max_tbl_size(exec_ctx, p, cur + 1, end); + return finish_max_tbl_size(p, cur + 1, end); } /* parse a max table size change, max size >= 15 */ -static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_max_tbl_size_x(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, + const uint8_t* end) { static const grpc_chttp2_hpack_parser_state and_then[] = { finish_max_tbl_size}; if (p->dynamic_table_update_allowed == 0) { return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "More than two max table size changes in a single frame")); } @@ -1082,13 +1033,12 @@ static grpc_error *parse_max_tbl_size_x(grpc_exec_ctx *exec_ctx, p->next_state = and_then; p->index = 0x1f; p->parsing.value = &p->index; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } /* a parse error: jam the parse state into parse_error, and return error */ -static grpc_error *parse_error(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end, grpc_error *err) { +static grpc_error* parse_error(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end, grpc_error* err) { GPR_ASSERT(err != GRPC_ERROR_NONE); if (p->last_error == GRPC_ERROR_NONE) { p->last_error = GRPC_ERROR_REF(err); @@ -1097,28 +1047,25 @@ static grpc_error *parse_error(grpc_exec_ctx *exec_ctx, return err; } -static grpc_error *still_parse_error(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* still_parse_error(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { return GRPC_ERROR_REF(p->last_error); } -static grpc_error *parse_illegal_op(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_illegal_op(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { GPR_ASSERT(cur != end); - char *msg; + char* msg; gpr_asprintf(&msg, "Illegal hpack op code %d", *cur); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - return parse_error(exec_ctx, p, cur, end, err); + return parse_error(p, cur, end, err); } /* parse the 1st byte of a varint into p->parsing.value no overflow is possible */ -static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value0(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { if (cur == end) { p->state = parse_value0; return GRPC_ERROR_NONE; @@ -1127,78 +1074,74 @@ static grpc_error *parse_value0(grpc_exec_ctx *exec_ctx, *p->parsing.value += (*cur) & 0x7f; if ((*cur) & 0x80) { - return parse_value1(exec_ctx, p, cur + 1, end); + return parse_value1(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } } /* parse the 2nd byte of a varint into p->parsing.value no overflow is possible */ -static grpc_error *parse_value1(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value1(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { if (cur == end) { p->state = parse_value1; return GRPC_ERROR_NONE; } - *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 7; + *p->parsing.value += ((static_cast(*cur)) & 0x7f) << 7; if ((*cur) & 0x80) { - return parse_value2(exec_ctx, p, cur + 1, end); + return parse_value2(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } } /* parse the 3rd byte of a varint into p->parsing.value no overflow is possible */ -static grpc_error *parse_value2(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value2(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { if (cur == end) { p->state = parse_value2; return GRPC_ERROR_NONE; } - *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 14; + *p->parsing.value += ((static_cast(*cur)) & 0x7f) << 14; if ((*cur) & 0x80) { - return parse_value3(exec_ctx, p, cur + 1, end); + return parse_value3(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } } /* parse the 4th byte of a varint into p->parsing.value no overflow is possible */ -static grpc_error *parse_value3(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value3(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { if (cur == end) { p->state = parse_value3; return GRPC_ERROR_NONE; } - *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 21; + *p->parsing.value += ((static_cast(*cur)) & 0x7f) << 21; if ((*cur) & 0x80) { - return parse_value4(exec_ctx, p, cur + 1, end); + return parse_value4(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } } /* parse the 5th byte of a varint into p->parsing.value depending on the byte, we may overflow, and care must be taken */ -static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value4(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { uint8_t c; uint32_t cur_value; uint32_t add_value; - char *msg; + char* msg; if (cur == end) { p->state = parse_value4; @@ -1211,7 +1154,7 @@ static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx, } cur_value = *p->parsing.value; - add_value = ((uint32_t)c) << 28; + add_value = (static_cast(c)) << 28; if (add_value > 0xffffffffu - cur_value) { goto error; } @@ -1219,9 +1162,9 @@ static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx, *p->parsing.value = cur_value + add_value; if ((*cur) & 0x80) { - return parse_value5up(exec_ctx, p, cur + 1, end); + return parse_value5up(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } error: @@ -1229,17 +1172,16 @@ static grpc_error *parse_value4(grpc_exec_ctx *exec_ctx, "integer overflow in hpack integer decoding: have 0x%08x, " "got byte 0x%02x on byte 5", *p->parsing.value, *cur); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - return parse_error(exec_ctx, p, cur, end, err); + return parse_error(p, cur, end, err); } /* parse any trailing bytes in a varint: it's possible to append an arbitrary number of 0x80's and not affect the value - a zero will terminate - and anything else will overflow */ -static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_value5up(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { while (cur != end && *cur == 0x80) { ++cur; } @@ -1250,23 +1192,22 @@ static grpc_error *parse_value5up(grpc_exec_ctx *exec_ctx, } if (*cur == 0) { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } - char *msg; + char* msg; gpr_asprintf(&msg, "integer overflow in hpack integer decoding: have 0x%08x, " "got byte 0x%02x sometime after byte 5", *p->parsing.value, *cur); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - return parse_error(exec_ctx, p, cur, end, err); + return parse_error(p, cur, end, err); } /* parse a string prefix */ -static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* parse_string_prefix(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (cur == end) { p->state = parse_string_prefix; return GRPC_ERROR_NONE; @@ -1276,36 +1217,36 @@ static grpc_error *parse_string_prefix(grpc_exec_ctx *exec_ctx, p->huff = (*cur) >> 7; if (p->strlen == 0x7f) { p->parsing.value = &p->strlen; - return parse_value0(exec_ctx, p, cur + 1, end); + return parse_value0(p, cur + 1, end); } else { - return parse_next(exec_ctx, p, cur + 1, end); + return parse_next(p, cur + 1, end); } } /* append some bytes to a string */ -static void append_bytes(grpc_chttp2_hpack_parser_string *str, - const uint8_t *data, size_t length) { +static void append_bytes(grpc_chttp2_hpack_parser_string* str, + const uint8_t* data, size_t length) { if (length == 0) return; if (length + str->data.copied.length > str->data.copied.capacity) { GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX); - str->data.copied.capacity = (uint32_t)(str->data.copied.length + length); - str->data.copied.str = - (char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity); + str->data.copied.capacity = + static_cast(str->data.copied.length + length); + str->data.copied.str = static_cast( + gpr_realloc(str->data.copied.str, str->data.copied.capacity)); } memcpy(str->data.copied.str + str->data.copied.length, data, length); GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length); - str->data.copied.length += (uint32_t)length; + str->data.copied.length += static_cast(length); } -static grpc_error *append_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { - grpc_chttp2_hpack_parser_string *str = p->parsing.str; +static grpc_error* append_string(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { + grpc_chttp2_hpack_parser_string* str = p->parsing.str; uint32_t bits; uint8_t decoded[3]; - switch ((binary_state)p->binary) { + switch (static_cast(p->binary)) { case NOT_BINARY: - append_bytes(str, cur, (size_t)(end - cur)); + append_bytes(str, cur, static_cast(end - cur)); return GRPC_ERROR_NONE; case BINARY_BEGIN: if (cur == end) { @@ -1316,11 +1257,11 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx, /* 'true-binary' case */ ++cur; p->binary = NOT_BINARY; - GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx); - append_bytes(str, cur, (size_t)(end - cur)); + GRPC_STATS_INC_HPACK_RECV_BINARY(); + append_bytes(str, cur, static_cast(end - cur)); return GRPC_ERROR_NONE; } - GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx); + GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(); /* fallthrough */ b64_byte0: case B64_BYTE0: @@ -1332,7 +1273,7 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx, ++cur; if (bits == 255) return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character")); else if (bits == 64) goto b64_byte0; @@ -1348,7 +1289,7 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx, ++cur; if (bits == 255) return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character")); else if (bits == 64) goto b64_byte1; @@ -1364,7 +1305,7 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx, ++cur; if (bits == 255) return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character")); else if (bits == 64) goto b64_byte2; @@ -1380,30 +1321,29 @@ static grpc_error *append_string(grpc_exec_ctx *exec_ctx, ++cur; if (bits == 255) return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character")); else if (bits == 64) goto b64_byte3; p->base64_buffer |= bits; bits = p->base64_buffer; - decoded[0] = (uint8_t)(bits >> 16); - decoded[1] = (uint8_t)(bits >> 8); - decoded[2] = (uint8_t)(bits); + decoded[0] = static_cast(bits >> 16); + decoded[1] = static_cast(bits >> 8); + decoded[2] = static_cast(bits); append_bytes(str, decoded, 3); goto b64_byte0; } GPR_UNREACHABLE_CODE(return parse_error( - exec_ctx, p, cur, end, + p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here"))); } -static grpc_error *finish_str(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* finish_str(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { uint8_t decoded[2]; uint32_t bits; - grpc_chttp2_hpack_parser_string *str = p->parsing.str; - switch ((binary_state)p->binary) { + grpc_chttp2_hpack_parser_string* str = p->parsing.str; + switch (static_cast(p->binary)) { case NOT_BINARY: break; case BINARY_BEGIN: @@ -1411,34 +1351,34 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx, case B64_BYTE0: break; case B64_BYTE1: - return parse_error(exec_ctx, p, cur, end, + return parse_error(p, cur, end, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "illegal base64 encoding")); /* illegal encoding */ case B64_BYTE2: bits = p->base64_buffer; if (bits & 0xffff) { - char *msg; + char* msg; gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%04x", bits & 0xffff); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - return parse_error(exec_ctx, p, cur, end, err); + return parse_error(p, cur, end, err); } - decoded[0] = (uint8_t)(bits >> 16); + decoded[0] = static_cast(bits >> 16); append_bytes(str, decoded, 1); break; case B64_BYTE3: bits = p->base64_buffer; if (bits & 0xff) { - char *msg; + char* msg; gpr_asprintf(&msg, "trailing bits in base64 encoding: 0x%02x", bits & 0xff); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); - return parse_error(exec_ctx, p, cur, end, err); + return parse_error(p, cur, end, err); } - decoded[0] = (uint8_t)(bits >> 16); - decoded[1] = (uint8_t)(bits >> 8); + decoded[0] = static_cast(bits >> 16); + decoded[1] = static_cast(bits >> 8); append_bytes(str, decoded, 2); break; } @@ -1446,14 +1386,13 @@ static grpc_error *finish_str(grpc_exec_ctx *exec_ctx, } /* decode a nibble from a huffman encoded stream */ -static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, uint8_t nibble) { +static grpc_error* huff_nibble(grpc_chttp2_hpack_parser* p, uint8_t nibble) { int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble]; int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble]; if (emit != -1) { if (emit >= 0 && emit < 256) { - uint8_t c = (uint8_t)emit; - grpc_error *err = append_string(exec_ctx, p, &c, (&c) + 1); + uint8_t c = static_cast(emit); + grpc_error* err = append_string(p, &c, (&c) + 1); if (err != GRPC_ERROR_NONE) return err; } else { assert(emit == 256); @@ -1464,67 +1403,64 @@ static grpc_error *huff_nibble(grpc_exec_ctx *exec_ctx, } /* decode full bytes from a huffman encoded stream */ -static grpc_error *add_huff_bytes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* add_huff_bytes(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { for (; cur != end; ++cur) { - grpc_error *err = huff_nibble(exec_ctx, p, *cur >> 4); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - err = huff_nibble(exec_ctx, p, *cur & 0xf); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); + grpc_error* err = huff_nibble(p, *cur >> 4); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + err = huff_nibble(p, *cur & 0xf); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); } return GRPC_ERROR_NONE; } /* decode some string bytes based on the current decoding mode (huffman or not) */ -static grpc_error *add_str_bytes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { +static grpc_error* add_str_bytes(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { if (p->huff) { - return add_huff_bytes(exec_ctx, p, cur, end); + return add_huff_bytes(p, cur, end); } else { - return append_string(exec_ctx, p, cur, end); + return append_string(p, cur, end); } } /* parse a string - tries to do large chunks at a time */ -static grpc_error *parse_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_string(grpc_chttp2_hpack_parser* p, const uint8_t* cur, + const uint8_t* end) { size_t remaining = p->strlen - p->strgot; - size_t given = (size_t)(end - cur); + size_t given = static_cast(end - cur); if (remaining <= given) { - grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + remaining); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - err = finish_str(exec_ctx, p, cur + remaining, end); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_next(exec_ctx, p, cur + remaining, end); + grpc_error* err = add_str_bytes(p, cur, cur + remaining); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + err = finish_str(p, cur + remaining, end); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_next(p, cur + remaining, end); } else { - grpc_error *err = add_str_bytes(exec_ctx, p, cur, cur + given); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); + grpc_error* err = add_str_bytes(p, cur, cur + given); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); GPR_ASSERT(given <= UINT32_MAX - p->strgot); - p->strgot += (uint32_t)given; + p->strgot += static_cast(given); p->state = parse_string; return GRPC_ERROR_NONE; } } /* begin parsing a string - performs setup, calls parse_string */ -static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end, +static grpc_error* begin_parse_string(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end, uint8_t binary, - grpc_chttp2_hpack_parser_string *str) { - if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen && - p->current_slice_refcount != NULL) { - GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx); + grpc_chttp2_hpack_parser_string* str) { + if (!p->huff && binary == NOT_BINARY && + (end - cur) >= static_cast(p->strlen) && + p->current_slice_refcount != nullptr) { + GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(); str->copied = false; str->data.referenced.refcount = p->current_slice_refcount; - str->data.referenced.data.refcounted.bytes = (uint8_t *)cur; + str->data.referenced.data.refcounted.bytes = const_cast(cur); str->data.referenced.data.refcounted.length = p->strlen; grpc_slice_ref_internal(str->data.referenced); - return parse_next(exec_ctx, p, cur + p->strlen, end); + return parse_next(p, cur + p->strlen, end); } p->strgot = 0; str->copied = true; @@ -1535,9 +1471,9 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx, switch (p->binary) { case NOT_BINARY: if (p->huff) { - GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx); + GRPC_STATS_INC_HPACK_RECV_HUFFMAN(); } else { - GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx); + GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(); } break; case BINARY_BEGIN: @@ -1546,144 +1482,134 @@ static grpc_error *begin_parse_string(grpc_exec_ctx *exec_ctx, default: abort(); } - return parse_string(exec_ctx, p, cur, end); + return parse_string(p, cur, end); } /* parse the key string */ -static grpc_error *parse_key_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end) { - return begin_parse_string(exec_ctx, p, cur, end, NOT_BINARY, &p->key); +static grpc_error* parse_key_string(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end) { + return begin_parse_string(p, cur, end, NOT_BINARY, &p->key); } /* check if a key represents a binary header or not */ -static bool is_binary_literal_header(grpc_chttp2_hpack_parser *p) { +static bool is_binary_literal_header(grpc_chttp2_hpack_parser* p) { return grpc_is_binary_header( p->key.copied ? grpc_slice_from_static_buffer(p->key.data.copied.str, p->key.data.copied.length) : p->key.data.referenced); } -static grpc_error *is_binary_indexed_header(grpc_chttp2_hpack_parser *p, - bool *is) { +static grpc_error* is_binary_indexed_header(grpc_chttp2_hpack_parser* p, + bool* is) { grpc_mdelem elem = grpc_chttp2_hptbl_lookup(&p->table, p->index); if (GRPC_MDISNULL(elem)) { return grpc_error_set_int( grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Invalid HPACK index received"), - GRPC_ERROR_INT_INDEX, (intptr_t)p->index), - GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents); + GRPC_ERROR_INT_INDEX, + static_cast(p->index)), + GRPC_ERROR_INT_SIZE, static_cast(p->table.num_ents)); } *is = grpc_is_binary_header(GRPC_MDKEY(elem)); return GRPC_ERROR_NONE; } /* parse the value string */ -static grpc_error *parse_value_string(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, - const uint8_t *cur, const uint8_t *end, +static grpc_error* parse_value_string(grpc_chttp2_hpack_parser* p, + const uint8_t* cur, const uint8_t* end, bool is_binary) { - return begin_parse_string(exec_ctx, p, cur, end, - is_binary ? BINARY_BEGIN : NOT_BINARY, &p->value); + return begin_parse_string(p, cur, end, is_binary ? BINARY_BEGIN : NOT_BINARY, + &p->value); } -static grpc_error *parse_value_string_with_indexed_key( - grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { +static grpc_error* parse_value_string_with_indexed_key( + grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end) { bool is_binary = false; - grpc_error *err = is_binary_indexed_header(p, &is_binary); - if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err); - return parse_value_string(exec_ctx, p, cur, end, is_binary); + grpc_error* err = is_binary_indexed_header(p, &is_binary); + if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err); + return parse_value_string(p, cur, end, is_binary); } -static grpc_error *parse_value_string_with_literal_key( - grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *cur, - const uint8_t *end) { - return parse_value_string(exec_ctx, p, cur, end, is_binary_literal_header(p)); +static grpc_error* parse_value_string_with_literal_key( + grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end) { + return parse_value_string(p, cur, end, is_binary_literal_header(p)); } /* PUBLIC INTERFACE */ -void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p) { - p->on_header = NULL; - p->on_header_user_data = NULL; +void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser* p) { + p->on_header = nullptr; + p->on_header_user_data = nullptr; p->state = parse_begin; p->key.data.referenced = grpc_empty_slice(); - p->key.data.copied.str = NULL; + p->key.data.copied.str = nullptr; p->key.data.copied.capacity = 0; p->key.data.copied.length = 0; p->value.data.referenced = grpc_empty_slice(); - p->value.data.copied.str = NULL; + p->value.data.copied.str = nullptr; p->value.data.copied.capacity = 0; p->value.data.copied.length = 0; p->dynamic_table_update_allowed = 2; p->last_error = GRPC_ERROR_NONE; - grpc_chttp2_hptbl_init(exec_ctx, &p->table); + grpc_chttp2_hptbl_init(&p->table); } -void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p) { +void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p) { p->after_prioritization = p->state; p->state = parse_stream_dep0; } -void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p) { - grpc_chttp2_hptbl_destroy(exec_ctx, &p->table); +void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p) { + grpc_chttp2_hptbl_destroy(&p->table); GRPC_ERROR_UNREF(p->last_error); - grpc_slice_unref_internal(exec_ctx, p->key.data.referenced); - grpc_slice_unref_internal(exec_ctx, p->value.data.referenced); + grpc_slice_unref_internal(p->key.data.referenced); + grpc_slice_unref_internal(p->value.data.referenced); gpr_free(p->key.data.copied.str); gpr_free(p->value.data.copied.str); } -grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, +grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p, grpc_slice slice) { /* max number of bytes to parse at a time... limits call stack depth on * compilers without TCO */ #define MAX_PARSE_LENGTH 1024 p->current_slice_refcount = slice.refcount; - uint8_t *start = GRPC_SLICE_START_PTR(slice); - uint8_t *end = GRPC_SLICE_END_PTR(slice); - grpc_error *error = GRPC_ERROR_NONE; + uint8_t* start = GRPC_SLICE_START_PTR(slice); + uint8_t* end = GRPC_SLICE_END_PTR(slice); + grpc_error* error = GRPC_ERROR_NONE; while (start != end && error == GRPC_ERROR_NONE) { - uint8_t *target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start); - error = p->state(exec_ctx, p, start, target); + uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start); + error = p->state(p, start, target); start = target; } - p->current_slice_refcount = NULL; + p->current_slice_refcount = nullptr; return error; } -typedef void (*maybe_complete_func_type)(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); +typedef void (*maybe_complete_func_type)(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); static const maybe_complete_func_type maybe_complete_funcs[] = { grpc_chttp2_maybe_complete_recv_initial_metadata, grpc_chttp2_maybe_complete_recv_trailing_metadata}; -static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp, - grpc_error *error) { - grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp; - grpc_chttp2_transport *t = s->t; +static void force_client_rst_stream(void* sp, grpc_error* error) { + grpc_chttp2_stream* s = static_cast(sp); + grpc_chttp2_transport* t = s->t; if (!s->write_closed) { grpc_slice_buffer_add( &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing)); - grpc_chttp2_initiate_write(exec_ctx, t, - GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM); - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE); + grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM); + grpc_chttp2_mark_stream_closed(t, s, true, true, GRPC_ERROR_NONE); } - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst"); + GRPC_CHTTP2_STREAM_UNREF(s, "final_rst"); } -static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, - grpc_metadata_batch *initial_metadata) { - if (initial_metadata->idx.named.content_encoding == NULL || +static void parse_stream_compression_md(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, + grpc_metadata_batch* initial_metadata) { + if (initial_metadata->idx.named.content_encoding == nullptr || grpc_stream_compression_method_parse( GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false, &s->stream_decompression_method) == 0) { @@ -1692,45 +1618,41 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx, } } -grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx, - void *hpack_parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last) { - grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser; - GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0); - if (s != NULL) { + GPR_TIMER_SCOPE("grpc_chttp2_hpack_parser_parse", 0); + grpc_chttp2_hpack_parser* parser = + static_cast(hpack_parser); + if (s != nullptr) { s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice); } - grpc_error *error = grpc_chttp2_hpack_parser_parse(exec_ctx, parser, slice); + grpc_error* error = grpc_chttp2_hpack_parser_parse(parser, slice); if (error != GRPC_ERROR_NONE) { - GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0); return error; } if (is_last) { if (parser->is_boundary && parser->state != parse_begin) { - GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0); return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "end of header frame not aligned with a hpack record boundary"); } /* need to check for null stream: this can occur if we receive an invalid stream id on a header */ - if (s != NULL) { + if (s != nullptr) { if (parser->is_boundary) { if (s->header_frames_received == GPR_ARRAY_SIZE(s->metadata_buffer)) { - GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0); return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Too many trailer frames"); } /* Process stream compression md element if it exists */ if (s->header_frames_received == 0) { /* Only acts on initial metadata */ - parse_stream_compression_md(exec_ctx, t, s, - &s->metadata_buffer[0].batch); + parse_stream_compression_md(t, s, &s->metadata_buffer[0].batch); } s->published_metadata[s->header_frames_received] = GRPC_METADATA_PUBLISHED_FROM_WIRE; - maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s); + maybe_complete_funcs[s->header_frames_received](t, s); s->header_frames_received++; } if (parser->is_eof) { @@ -1741,21 +1663,18 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx, and can avoid the extra write */ GRPC_CHTTP2_STREAM_REF(s, "final_rst"); GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE(force_client_rst_stream, s, grpc_combiner_finally_scheduler(t->combiner)), GRPC_ERROR_NONE); } - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, - GRPC_ERROR_NONE); + grpc_chttp2_mark_stream_closed(t, s, true, false, GRPC_ERROR_NONE); } } - parser->on_header = NULL; - parser->on_header_user_data = NULL; + parser->on_header = nullptr; + parser->on_header_user_data = nullptr; parser->is_boundary = 0xde; parser->is_eof = 0xde; parser->dynamic_table_update_allowed = 2; } - GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0); return GRPC_ERROR_NONE; } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.h index 8fbc6a602..3e05de4b9 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_parser.h @@ -19,26 +19,25 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H +#include + #include -#include #include "src/core/ext/transport/chttp2/transport/frame.h" #include "src/core/ext/transport/chttp2/transport/hpack_table.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/metadata.h" typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser; -typedef grpc_error *(*grpc_chttp2_hpack_parser_state)( - grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_parser *p, const uint8_t *beg, - const uint8_t *end); +typedef grpc_error* (*grpc_chttp2_hpack_parser_state)( + grpc_chttp2_hpack_parser* p, const uint8_t* beg, const uint8_t* end); typedef struct { bool copied; struct { grpc_slice referenced; struct { - char *str; + char* str; uint32_t length; uint32_t capacity; } copied; @@ -47,23 +46,23 @@ typedef struct { struct grpc_chttp2_hpack_parser { /* user specified callback for each header output */ - void (*on_header)(grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem md); - void *on_header_user_data; + void (*on_header)(void* user_data, grpc_mdelem md); + void* on_header_user_data; - grpc_error *last_error; + grpc_error* last_error; /* current parse state - or a function that implements it */ grpc_chttp2_hpack_parser_state state; /* future states dependent on the opening op code */ - const grpc_chttp2_hpack_parser_state *next_state; + const grpc_chttp2_hpack_parser_state* next_state; /* what to do after skipping prioritization data */ grpc_chttp2_hpack_parser_state after_prioritization; /* the refcount of the slice that we're currently parsing */ - grpc_slice_refcount *current_slice_refcount; + grpc_slice_refcount* current_slice_refcount; /* the value we're currently parsing */ union { - uint32_t *value; - grpc_chttp2_hpack_parser_string *str; + uint32_t* value; + grpc_chttp2_hpack_parser_string* str; } parsing; /* string parameters for each chunk */ grpc_chttp2_hpack_parser_string key; @@ -92,23 +91,19 @@ struct grpc_chttp2_hpack_parser { grpc_chttp2_hptbl table; }; -void grpc_chttp2_hpack_parser_init(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p); -void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p); +void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser* p); +void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p); -void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser *p); +void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p); -grpc_error *grpc_chttp2_hpack_parser_parse(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hpack_parser *p, +grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p, grpc_slice slice); /* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for the transport */ -grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx, - void *hpack_parser, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser, + grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.cc index bbd135a31..792925835 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/hpack_table.h" #include @@ -26,16 +28,16 @@ #include #include "src/core/lib/debug/trace.h" -#include "src/core/lib/support/murmur_hash.h" +#include "src/core/lib/gpr/murmur_hash.h" -extern grpc_tracer_flag grpc_http_trace; +extern grpc_core::TraceFlag grpc_http_trace; static struct { - const char *key; - const char *value; + const char* key; + const char* value; } static_table[] = { /* 0: */ - {NULL, NULL}, + {nullptr, nullptr}, /* 1: */ {":authority", ""}, /* 2: */ @@ -165,7 +167,7 @@ static uint32_t entries_for_bytes(uint32_t bytes) { GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD; } -void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) { +void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl* tbl) { size_t i; memset(tbl, 0, sizeof(*tbl)); @@ -173,31 +175,29 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) { GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE; tbl->max_entries = tbl->cap_entries = entries_for_bytes(tbl->current_table_bytes); - tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries); + tbl->ents = static_cast( + gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries)); memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries); for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) { tbl->static_ents[i - 1] = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(static_table[i].key)), grpc_slice_intern( grpc_slice_from_static_string(static_table[i].value))); } } -void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl) { +void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl* tbl) { size_t i; for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) { - GRPC_MDELEM_UNREF(exec_ctx, tbl->static_ents[i]); + GRPC_MDELEM_UNREF(tbl->static_ents[i]); } for (i = 0; i < tbl->num_ents; i++) { - GRPC_MDELEM_UNREF(exec_ctx, - tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]); + GRPC_MDELEM_UNREF(tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]); } gpr_free(tbl->ents); } -grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl, +grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl* tbl, uint32_t tbl_index) { /* Static table comes first, just return an entry from it */ if (tbl_index <= GRPC_CHTTP2_LAST_STATIC_ENTRY) { @@ -215,20 +215,21 @@ grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl, } /* Evict one element from the table */ -static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) { +static void evict1(grpc_chttp2_hptbl* tbl) { grpc_mdelem first_ent = tbl->ents[tbl->first_ent]; size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(first_ent)) + GRPC_SLICE_LENGTH(GRPC_MDVALUE(first_ent)) + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD; GPR_ASSERT(elem_bytes <= tbl->mem_used); - tbl->mem_used -= (uint32_t)elem_bytes; + tbl->mem_used -= static_cast(elem_bytes); tbl->first_ent = ((tbl->first_ent + 1) % tbl->cap_entries); tbl->num_ents--; - GRPC_MDELEM_UNREF(exec_ctx, first_ent); + GRPC_MDELEM_UNREF(first_ent); } -static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) { - grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap); +static void rebuild_ents(grpc_chttp2_hptbl* tbl, uint32_t new_cap) { + grpc_mdelem* ents = + static_cast(gpr_malloc(sizeof(*ents) * new_cap)); uint32_t i; for (i = 0; i < tbl->num_ents; i++) { @@ -240,41 +241,39 @@ static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) { tbl->first_ent = 0; } -void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, +void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl, uint32_t max_bytes) { if (tbl->max_bytes == max_bytes) { return; } - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes); + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes); } while (tbl->mem_used > max_bytes) { - evict1(exec_ctx, tbl); + evict1(tbl); } tbl->max_bytes = max_bytes; } -grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, +grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl, uint32_t bytes) { if (tbl->current_table_bytes == bytes) { return GRPC_ERROR_NONE; } if (bytes > tbl->max_bytes) { - char *msg; + char* msg; gpr_asprintf(&msg, "Attempt to make hpack table %d bytes when max is %d bytes", bytes, tbl->max_bytes); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } - if (GRPC_TRACER_ON(grpc_http_trace)) { - gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes); + if (grpc_http_trace.enabled()) { + gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes); } while (tbl->mem_used > bytes) { - evict1(exec_ctx, tbl); + evict1(tbl); } tbl->current_table_bytes = bytes; tbl->max_entries = entries_for_bytes(bytes); @@ -289,21 +288,20 @@ grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, grpc_mdelem md) { +grpc_error* grpc_chttp2_hptbl_add(grpc_chttp2_hptbl* tbl, grpc_mdelem md) { /* determine how many bytes of buffer this entry represents */ size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(md)) + GRPC_SLICE_LENGTH(GRPC_MDVALUE(md)) + GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD; if (tbl->current_table_bytes > tbl->max_bytes) { - char *msg; + char* msg; gpr_asprintf( &msg, "HPACK max table size reduced to %d but not reflected by hpack " "stream (still at %d)", tbl->max_bytes, tbl->current_table_bytes); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } @@ -320,14 +318,15 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx, * empty table. */ while (tbl->num_ents) { - evict1(exec_ctx, tbl); + evict1(tbl); } return GRPC_ERROR_NONE; } /* evict entries to ensure no overflow */ - while (elem_bytes > (size_t)tbl->current_table_bytes - tbl->mem_used) { - evict1(exec_ctx, tbl); + while (elem_bytes > + static_cast(tbl->current_table_bytes) - tbl->mem_used) { + evict1(tbl); } /* copy the finalized entry in */ @@ -336,12 +335,12 @@ grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx, /* update accounting values */ tbl->num_ents++; - tbl->mem_used += (uint32_t)elem_bytes; + tbl->mem_used += static_cast(elem_bytes); return GRPC_ERROR_NONE; } grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find( - const grpc_chttp2_hptbl *tbl, grpc_mdelem md) { + const grpc_chttp2_hptbl* tbl, grpc_mdelem md) { grpc_chttp2_hptbl_find_result r = {0, 0}; uint32_t i; @@ -356,8 +355,8 @@ grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find( /* Scan the dynamic table */ for (i = 0; i < tbl->num_ents; i++) { - uint32_t idx = - (uint32_t)(tbl->num_ents - i + GRPC_CHTTP2_LAST_STATIC_ENTRY); + uint32_t idx = static_cast(tbl->num_ents - i + + GRPC_CHTTP2_LAST_STATIC_ENTRY); grpc_mdelem ent = tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]; if (!grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDKEY(ent))) continue; r.index = idx; diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.h index 2cf8f6850..98026a4ba 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/hpack_table.h @@ -19,8 +19,9 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H -#include #include + +#include #include "src/core/lib/iomgr/error.h" #include "src/core/lib/transport/metadata.h" @@ -64,26 +65,23 @@ typedef struct { /* a circular buffer of headers - this is stored in the opposite order to what hpack specifies, in order to simplify table management a little... meaning lookups need to SUBTRACT from the end position */ - grpc_mdelem *ents; + grpc_mdelem* ents; grpc_mdelem static_ents[GRPC_CHTTP2_LAST_STATIC_ENTRY]; } grpc_chttp2_hptbl; /* initialize a hpack table */ -void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl); -void grpc_chttp2_hptbl_destroy(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl); -void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, +void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl* tbl); +void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl* tbl); +void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl, uint32_t max_bytes); -grpc_error *grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, +grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl, uint32_t bytes); /* lookup a table entry based on its hpack index */ -grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl *tbl, +grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl* tbl, uint32_t index); /* add a table entry to the index */ -grpc_error *grpc_chttp2_hptbl_add(grpc_exec_ctx *exec_ctx, - grpc_chttp2_hptbl *tbl, +grpc_error* grpc_chttp2_hptbl_add(grpc_chttp2_hptbl* tbl, grpc_mdelem md) GRPC_MUST_USE_RESULT; /* Find a key/value pair in the table... returns the index in the table of the most similar entry, or 0 if the value was not found */ @@ -92,6 +90,6 @@ typedef struct { int has_value; } grpc_chttp2_hptbl_find_result; grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find( - const grpc_chttp2_hptbl *tbl, grpc_mdelem md); + const grpc_chttp2_hptbl* tbl, grpc_mdelem md); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.cc similarity index 93% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.cc index 46b7c0c49..294ee8e4a 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.cc @@ -18,14 +18,16 @@ * Automatically generated by tools/codegen/core/gen_settings_ids.py */ +#include + #include "src/core/ext/transport/chttp2/transport/http2_settings.h" -#include +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/transport/http2_errors.h" const uint16_t grpc_setting_id_to_wire_id[] = {1, 2, 3, 4, 5, 6, 65027}; -bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) { +bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id* out) { uint32_t i = wire_id - 1; uint32_t x = i % 256; uint32_t y = i / 256; @@ -35,7 +37,7 @@ bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) { h += 4; break; } - *out = (grpc_chttp2_setting_id)h; + *out = static_cast(h); return h < GPR_ARRAY_SIZE(grpc_setting_id_to_wire_id) && grpc_setting_id_to_wire_id[h] == wire_id; } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.h index 706dfc313..07ce0621b 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/http2_settings.h @@ -21,6 +21,8 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H +#include + #include #include @@ -35,9 +37,10 @@ typedef enum { } grpc_chttp2_setting_id; #define GRPC_CHTTP2_NUM_SETTINGS 7 + extern const uint16_t grpc_setting_id_to_wire_id[]; -bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out); +bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id* out); typedef enum { GRPC_CHTTP2_CLAMP_INVALID_VALUE, @@ -45,7 +48,7 @@ typedef enum { } grpc_chttp2_invalid_value_behavior; typedef struct { - const char *name; + const char* name; uint32_t default_value; uint32_t min_value; uint32_t max_value; diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.cc similarity index 99% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.cc index f28d8cc30..813e4c91b 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/huffsyms.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/huffsyms.h" /* Constants pulled from the HPACK spec, and converted to C using the vim diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.cc similarity index 54% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.cc index ba680a89d..4d7dfd900 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/incoming_metadata.h" #include @@ -26,49 +28,46 @@ #include void grpc_chttp2_incoming_metadata_buffer_init( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena) { + grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena) { buffer->arena = arena; grpc_metadata_batch_init(&buffer->batch); - buffer->batch.deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE; } void grpc_chttp2_incoming_metadata_buffer_destroy( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer) { - grpc_metadata_batch_destroy(exec_ctx, &buffer->batch); + grpc_chttp2_incoming_metadata_buffer* buffer) { + grpc_metadata_batch_destroy(&buffer->batch); } -grpc_error *grpc_chttp2_incoming_metadata_buffer_add( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, - grpc_mdelem elem) { +grpc_error* grpc_chttp2_incoming_metadata_buffer_add( + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) { buffer->size += GRPC_MDELEM_LENGTH(elem); return grpc_metadata_batch_add_tail( - exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc( - buffer->arena, sizeof(grpc_linked_mdelem)), + &buffer->batch, + static_cast( + gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem))), elem); } -grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, - grpc_mdelem elem) { - for (grpc_linked_mdelem *l = buffer->batch.list.head; l != NULL; +grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add( + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) { + for (grpc_linked_mdelem* l = buffer->batch.list.head; l != nullptr; l = l->next) { if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) { - GRPC_MDELEM_UNREF(exec_ctx, l->md); + GRPC_MDELEM_UNREF(l->md); l->md = elem; return GRPC_ERROR_NONE; } } - return grpc_chttp2_incoming_metadata_buffer_add(exec_ctx, buffer, elem); + return grpc_chttp2_incoming_metadata_buffer_add(buffer, elem); } void grpc_chttp2_incoming_metadata_buffer_set_deadline( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline) { + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_millis deadline) { buffer->batch.deadline = deadline; } void grpc_chttp2_incoming_metadata_buffer_publish( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, - grpc_metadata_batch *batch) { - *batch = buffer->batch; - grpc_metadata_batch_init(&buffer->batch); + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch) { + grpc_metadata_batch_move(&buffer->batch, batch); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.h index a951d8764..d029cf00d 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/incoming_metadata.h @@ -19,30 +19,31 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H +#include + #include "src/core/lib/transport/transport.h" typedef struct { - gpr_arena *arena; + gpr_arena* arena; grpc_metadata_batch batch; size_t size; // total size of metadata } grpc_chttp2_incoming_metadata_buffer; /** assumes everything initially zeroed */ void grpc_chttp2_incoming_metadata_buffer_init( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_arena *arena); + grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena); void grpc_chttp2_incoming_metadata_buffer_destroy( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer); + grpc_chttp2_incoming_metadata_buffer* buffer); void grpc_chttp2_incoming_metadata_buffer_publish( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, - grpc_metadata_batch *batch); + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch); -grpc_error *grpc_chttp2_incoming_metadata_buffer_add( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, +grpc_error* grpc_chttp2_incoming_metadata_buffer_add( + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) GRPC_MUST_USE_RESULT; -grpc_error *grpc_chttp2_incoming_metadata_buffer_replace_or_add( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_metadata_buffer *buffer, +grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add( + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) GRPC_MUST_USE_RESULT; void grpc_chttp2_incoming_metadata_buffer_set_deadline( - grpc_chttp2_incoming_metadata_buffer *buffer, gpr_timespec deadline); + grpc_chttp2_incoming_metadata_buffer* buffer, grpc_millis deadline); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/internal.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/internal.h index fde46b348..ca6e71597 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/internal.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/internal.h @@ -19,9 +19,12 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H +#include + #include #include +#include "src/core/ext/transport/chttp2/transport/flow_control.h" #include "src/core/ext/transport/chttp2/transport/frame.h" #include "src/core/ext/transport/chttp2/transport/frame_data.h" #include "src/core/ext/transport/chttp2/transport/frame_goaway.h" @@ -34,12 +37,11 @@ #include "src/core/ext/transport/chttp2/transport/incoming_metadata.h" #include "src/core/ext/transport/chttp2/transport/stream_map.h" #include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/transport/bdp_estimator.h" #include "src/core/lib/transport/connectivity_state.h" -#include "src/core/lib/transport/pid_controller.h" #include "src/core/lib/transport/transport_impl.h" /* streams are kept in various linked lists depending on what things need to @@ -61,12 +63,6 @@ typedef enum { GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE, } grpc_chttp2_write_state; -typedef enum { - GRPC_CHTTP2_PING_ON_NEXT_WRITE = 0, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE, - GRPC_CHTTP2_PING_TYPE_COUNT /* must be last */ -} grpc_chttp2_ping_type; - typedef enum { GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY, GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT, @@ -93,7 +89,6 @@ typedef enum { GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, - GRPC_CHTTP2_INITIATE_WRITE_BDP_ESTIMATOR_PING, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE, GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING, @@ -103,7 +98,7 @@ typedef enum { GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM, } grpc_chttp2_initiate_write_reason; -const char *grpc_chttp2_initiate_write_reason_string( +const char* grpc_chttp2_initiate_write_reason_string( grpc_chttp2_initiate_write_reason reason); typedef struct { @@ -114,19 +109,19 @@ typedef struct { typedef struct { int max_pings_without_data; int max_ping_strikes; - gpr_timespec min_sent_ping_interval_without_data; - gpr_timespec min_recv_ping_interval_without_data; + grpc_millis min_sent_ping_interval_without_data; + grpc_millis min_recv_ping_interval_without_data; } grpc_chttp2_repeated_ping_policy; typedef struct { - gpr_timespec last_ping_sent_time; + grpc_millis last_ping_sent_time; int pings_before_data_required; grpc_timer delayed_ping_timer; bool is_delayed_ping_timer_set; } grpc_chttp2_repeated_ping_state; typedef struct { - gpr_timespec last_ping_recv_time; + grpc_millis last_ping_recv_time; int ping_strikes; } grpc_chttp2_server_ping_recv_state; @@ -174,13 +169,13 @@ typedef enum { } grpc_chttp2_deframe_transport_state; typedef struct { - grpc_chttp2_stream *head; - grpc_chttp2_stream *tail; + grpc_chttp2_stream* head; + grpc_chttp2_stream* tail; } grpc_chttp2_stream_list; typedef struct { - grpc_chttp2_stream *next; - grpc_chttp2_stream *prev; + grpc_chttp2_stream* next; + grpc_chttp2_stream* prev; } grpc_chttp2_stream_link; /* We keep several sets of connection wide parameters */ @@ -204,22 +199,62 @@ typedef enum { typedef struct grpc_chttp2_write_cb { int64_t call_at_byte; - grpc_closure *closure; - struct grpc_chttp2_write_cb *next; + grpc_closure* closure; + struct grpc_chttp2_write_cb* next; } grpc_chttp2_write_cb; -/* forward declared in frame_data.h */ -struct grpc_chttp2_incoming_byte_stream { - grpc_byte_stream base; - gpr_refcount refs; +namespace grpc_core { + +class Chttp2IncomingByteStream : public ByteStream { + public: + Chttp2IncomingByteStream(grpc_chttp2_transport* transport, + grpc_chttp2_stream* stream, uint32_t frame_size, + uint32_t flags); + + void Orphan() override; + + bool Next(size_t max_size_hint, grpc_closure* on_complete) override; + grpc_error* Pull(grpc_slice* slice) override; + void Shutdown(grpc_error* error) override; + + // TODO(roth): When I converted this class to C++, I wanted to make it + // inherit from RefCounted or InternallyRefCounted instead of continuing + // to use its own custom ref-counting code. However, that would require + // using multiple inheritence, which sucks in general. And to make matters + // worse, it causes problems with our New<> and Delete<> wrappers. + // Specifically, unless RefCounted is first in the list of parent classes, + // it will see a different value of the address of the object than the one + // we actually allocated, in which case gpr_free() will be called on a + // different address than the one we got from gpr_malloc(), thus causing a + // crash. Given the fragility of depending on that, as well as a desire to + // avoid multiple inheritence in general, I've decided to leave this + // alone for now. We can revisit this once we're able to link against + // libc++, at which point we can eliminate New<> and Delete<> and + // switch to std::shared_ptr<>. + void Ref(); + void Unref(); + + void PublishError(grpc_error* error); + + grpc_error* Push(grpc_slice slice, grpc_slice* slice_out); + + grpc_error* Finished(grpc_error* error, bool reset_on_error); + + uint32_t remaining_bytes() const { return remaining_bytes_; } + + private: + static void NextLocked(void* arg, grpc_error* error_ignored); + static void OrphanLocked(void* arg, grpc_error* error_ignored); + + grpc_chttp2_transport* transport_; // Immutable. + grpc_chttp2_stream* stream_; // Immutable. - grpc_chttp2_transport *transport; /* immutable */ - grpc_chttp2_stream *stream; /* immutable */ + gpr_refcount refs_; /* Accessed only by transport thread when stream->pending_byte_stream == false * Accessed only by application thread when stream->pending_byte_stream == * true */ - uint32_t remaining_bytes; + uint32_t remaining_bytes_; /* Accessed only by transport thread when stream->pending_byte_stream == false * Accessed only by application thread when stream->pending_byte_stream == @@ -227,12 +262,13 @@ struct grpc_chttp2_incoming_byte_stream { struct { grpc_closure closure; size_t max_size_hint; - grpc_closure *on_complete; - } next_action; - grpc_closure destroy_action; - grpc_closure finished_action; + grpc_closure* on_complete; + } next_action_; + grpc_closure destroy_action_; }; +} // namespace grpc_core + typedef enum { GRPC_CHTTP2_KEEPALIVE_STATE_WAITING, GRPC_CHTTP2_KEEPALIVE_STATE_PINGING, @@ -240,54 +276,15 @@ typedef enum { GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED, } grpc_chttp2_keepalive_state; -typedef struct { - /** initial window change. This is tracked as we parse settings frames from - * the remote peer. If there is a positive delta, then we will make all - * streams readable since they may have become unstalled */ - int64_t initial_window_update; - - /** Our bookkeeping for the remote peer's available window */ - int64_t remote_window; - - /** calculating what we should give for local window: - we track the total amount of flow control over initial window size - across all streams: this is data that we want to receive right now (it - has an outstanding read) - and the total amount of flow control under initial window size across all - streams: this is data we've read early - we want to adjust incoming_window such that: - incoming_window = total_over - max(bdp - total_under, 0) */ - int64_t announced_stream_total_over_incoming_window; - int64_t announced_stream_total_under_incoming_window; - - /** This is out window according to what we have sent to our remote peer. The - * difference between this and target window is what we use to decide when - * to send WINDOW_UPDATE frames. */ - int64_t announced_window; - - /** should we probe bdp? */ - bool enable_bdp_probe; - - /* bdp estimation */ - grpc_bdp_estimator bdp_estimator; - - /* pid controller */ - grpc_pid_controller pid_controller; - gpr_timespec last_pid_update; - - // pointer back to transport for tracing - const grpc_chttp2_transport *t; -} grpc_chttp2_transport_flowctl; - struct grpc_chttp2_transport { grpc_transport base; /* must be first */ gpr_refcount refs; - grpc_endpoint *ep; - char *peer_string; + grpc_endpoint* ep; + char* peer_string; - grpc_combiner *combiner; + grpc_combiner* combiner; - grpc_closure *notify_on_receive_settings; + grpc_closure* notify_on_receive_settings; /** write execution state of the transport */ grpc_chttp2_write_state write_state; @@ -299,7 +296,7 @@ struct grpc_chttp2_transport { /** is the transport destroying itself? */ uint8_t destroying; /** has the upper layer closed the transport? */ - uint8_t closed; + grpc_error* closed_with_error; /** is there a read request to the endpoint outstanding? */ uint8_t endpoint_reading; @@ -324,13 +321,13 @@ struct grpc_chttp2_transport { /** address to place a newly accepted stream - set and unset by grpc_chttp2_parsing_accept_stream; used by init_stream to publish the accepted server stream */ - grpc_chttp2_stream **accepting_stream; + grpc_chttp2_stream** accepting_stream; struct { /* accept stream callback */ - void (*accept_stream)(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_transport *transport, const void *server_data); - void *accept_stream_user_data; + void (*accept_stream)(void* user_data, grpc_transport* transport, + const void* server_data); + void* accept_stream_user_data; /** connectivity tracking */ grpc_connectivity_state_tracker state_tracker; @@ -341,7 +338,7 @@ struct grpc_chttp2_transport { /** hpack encoding */ grpc_chttp2_hpack_compressor hpack_compressor; /** is this a client? */ - uint8_t is_client; + bool is_client; /** data to write next write */ grpc_slice_buffer qbuf; @@ -350,15 +347,16 @@ struct grpc_chttp2_transport { */ uint32_t write_buffer_size; - /** have we seen a goaway */ - uint8_t seen_goaway; - /** have we sent a goaway */ + /** Set to a grpc_error object if a goaway frame is received. By default, set + * to GRPC_ERROR_NONE */ + grpc_error* goaway_error; + grpc_chttp2_sent_goaway_state sent_goaway_state; /** are the local settings dirty and need to be sent? */ - uint8_t dirtied_local_settings; + bool dirtied_local_settings; /** have local settings been sent? */ - uint8_t sent_local_settings; + bool sent_local_settings; /** bitmask of setting indexes to send out */ uint32_t force_send_settings; /** settings values */ @@ -372,7 +370,7 @@ struct grpc_chttp2_transport { uint32_t last_new_stream_id; /** ping queues for various ping insertion points */ - grpc_chttp2_ping_queue ping_queues[GRPC_CHTTP2_PING_TYPE_COUNT]; + grpc_chttp2_ping_queue ping_queue; grpc_chttp2_repeated_ping_policy ping_policy; grpc_chttp2_repeated_ping_state ping_state; uint64_t ping_ctr; /* unique id for pings */ @@ -381,7 +379,7 @@ struct grpc_chttp2_transport { /** ping acks */ size_t ping_ack_count; size_t ping_ack_capacity; - uint64_t *ping_acks; + uint64_t* ping_acks; grpc_chttp2_server_ping_recv_state ping_recv_state; /** parser for headers */ @@ -396,7 +394,15 @@ struct grpc_chttp2_transport { /** parser for goaway frames */ grpc_chttp2_goaway_parser goaway_parser; - grpc_chttp2_transport_flowctl flow_control; + grpc_core::PolymorphicManualConstructor< + grpc_core::chttp2::TransportFlowControlBase, + grpc_core::chttp2::TransportFlowControl, + grpc_core::chttp2::TransportFlowControlDisabled> + flow_control; + /** initial window change. This is tracked as we parse settings frames from + * the remote peer. If there is a positive delta, then we will make all + * streams readable since they may have become unstalled */ + int64_t initial_window_update = 0; /* deframing */ grpc_chttp2_deframe_transport_state deframe_state; @@ -409,26 +415,21 @@ struct grpc_chttp2_transport { uint32_t incoming_stream_id; /* active parser */ - void *parser_data; - grpc_chttp2_stream *incoming_stream; - grpc_error *(*parser)(grpc_exec_ctx *exec_ctx, void *parser_user_data, - grpc_chttp2_transport *t, grpc_chttp2_stream *s, - grpc_slice slice, int is_last); - - /* goaway data */ - grpc_status_code goaway_error; - uint32_t goaway_last_stream_index; - grpc_slice goaway_text; + void* parser_data; + grpc_chttp2_stream* incoming_stream; + grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, int is_last); - grpc_chttp2_write_cb *write_cb_pool; + grpc_chttp2_write_cb* write_cb_pool; /* bdp estimator */ + grpc_closure next_bdp_ping_timer_expired_locked; grpc_closure start_bdp_ping_locked; grpc_closure finish_bdp_ping_locked; /* if non-NULL, close the transport with this error when writes are finished */ - grpc_error *close_transport_on_writes_finished; + grpc_error* close_transport_on_writes_finished; /* a list of closures to run after writes are finished */ grpc_closure_list run_after_write; @@ -443,6 +444,10 @@ struct grpc_chttp2_transport { /** destructive cleanup closure */ grpc_closure destructive_reclaimer_locked; + /* next bdp ping timer */ + bool have_next_bdp_ping_timer; + grpc_timer next_bdp_ping_timer; + /* keep-alive ping support */ /** Closure to initialize a keepalive ping */ grpc_closure init_keepalive_ping_locked; @@ -457,9 +462,9 @@ struct grpc_chttp2_transport { /** watchdog to kill the transport when waiting for the keepalive ping */ grpc_timer keepalive_watchdog_timer; /** time duration in between pings */ - gpr_timespec keepalive_time; + grpc_millis keepalive_time; /** grace period for a ping to complete before watchdog kicks in */ - gpr_timespec keepalive_timeout; + grpc_millis keepalive_timeout; /** if keepalive pings are allowed when there's no outstanding streams */ bool keepalive_permit_without_calls; /** keep-alive state machine state */ @@ -473,31 +478,12 @@ typedef enum { GPRC_METADATA_PUBLISHED_AT_CLOSE } grpc_published_metadata_method; -typedef struct { - /** window available for us to send to peer, over or under the initial window - * size of the transport... ie: - * remote_window = remote_window_delta + transport.initial_window_size */ - int64_t remote_window_delta; - - /** window available for peer to send to us (as a delta on - * transport.initial_window_size) - * local_window = local_window_delta + transport.initial_window_size */ - int64_t local_window_delta; - - /** window available for peer to send to us over this stream that we have - * announced to the peer */ - int64_t announced_window_delta; - - // read only pointer back to stream for data - const grpc_chttp2_stream *s; -} grpc_chttp2_stream_flowctl; - struct grpc_chttp2_stream { - grpc_chttp2_transport *t; - grpc_stream_refcount *refcount; + grpc_chttp2_transport* t; + grpc_stream_refcount* refcount; grpc_closure destroy_stream; - grpc_closure *destroy_stream_arg; + grpc_closure* destroy_stream_arg; grpc_chttp2_stream_link links[STREAM_LIST_COUNT]; uint8_t included[STREAM_LIST_COUNT]; @@ -506,29 +492,29 @@ struct grpc_chttp2_stream { uint32_t id; /** things the upper layers would like to send */ - grpc_metadata_batch *send_initial_metadata; - grpc_closure *send_initial_metadata_finished; - grpc_metadata_batch *send_trailing_metadata; - grpc_closure *send_trailing_metadata_finished; + grpc_metadata_batch* send_initial_metadata; + grpc_closure* send_initial_metadata_finished; + grpc_metadata_batch* send_trailing_metadata; + grpc_closure* send_trailing_metadata_finished; - grpc_byte_stream *fetching_send_message; + grpc_core::OrphanablePtr fetching_send_message; uint32_t fetched_send_message_length; grpc_slice fetching_slice; int64_t next_message_end_offset; int64_t flow_controlled_bytes_written; int64_t flow_controlled_bytes_flowed; grpc_closure complete_fetch_locked; - grpc_closure *fetching_send_message_finished; + grpc_closure* fetching_send_message_finished; - grpc_metadata_batch *recv_initial_metadata; - grpc_closure *recv_initial_metadata_ready; - bool *trailing_metadata_available; - grpc_byte_stream **recv_message; - grpc_closure *recv_message_ready; - grpc_metadata_batch *recv_trailing_metadata; - grpc_closure *recv_trailing_metadata_finished; + grpc_metadata_batch* recv_initial_metadata; + grpc_closure* recv_initial_metadata_ready; + bool* trailing_metadata_available; + grpc_core::OrphanablePtr* recv_message; + grpc_closure* recv_message_ready; + grpc_metadata_batch* recv_trailing_metadata; + grpc_closure* recv_trailing_metadata_finished; - grpc_transport_stream_stats *collecting_stats; + grpc_transport_stream_stats* collecting_stats; grpc_transport_stream_stats stats; /** Is this stream closed for writing. */ @@ -547,9 +533,9 @@ struct grpc_chttp2_stream { bool received_trailing_metadata; /** the error that resulted in this stream being read-closed */ - grpc_error *read_closed_error; + grpc_error* read_closed_error; /** the error that resulted in this stream being write-closed */ - grpc_error *write_closed_error; + grpc_error* write_closed_error; grpc_published_metadata_method published_metadata[2]; bool final_metadata_requested; @@ -562,16 +548,21 @@ struct grpc_chttp2_stream { * Accessed only by application thread when stream->pending_byte_stream == * true */ grpc_slice_buffer unprocessed_incoming_frames_buffer; - grpc_closure *on_next; /* protected by t combiner */ + grpc_closure* on_next; /* protected by t combiner */ bool pending_byte_stream; /* protected by t combiner */ + // cached length of buffer to be used by the transport thread in cases where + // stream->pending_byte_stream == true. The value is saved before + // application threads are allowed to modify + // unprocessed_incoming_frames_buffer + size_t unprocessed_incoming_frames_buffer_cached_length; grpc_closure reset_byte_stream; - grpc_error *byte_stream_error; /* protected by t combiner */ + grpc_error* byte_stream_error; /* protected by t combiner */ bool received_last_frame; /* protected by t combiner */ - gpr_timespec deadline; + grpc_millis deadline; /** saw some stream level error */ - grpc_error *forced_close_error; + grpc_error* forced_close_error; /** how many header frames have we received? */ uint8_t header_frames_received; /** parsing state for data frames */ @@ -585,13 +576,17 @@ struct grpc_chttp2_stream { bool sent_initial_metadata; bool sent_trailing_metadata; - grpc_chttp2_stream_flowctl flow_control; + grpc_core::PolymorphicManualConstructor< + grpc_core::chttp2::StreamFlowControlBase, + grpc_core::chttp2::StreamFlowControl, + grpc_core::chttp2::StreamFlowControlDisabled> + flow_control; grpc_slice_buffer flow_controlled_buffer; - grpc_chttp2_write_cb *on_flow_controlled_cbs; - grpc_chttp2_write_cb *on_write_finished_cbs; - grpc_chttp2_write_cb *finish_after_write; + grpc_chttp2_write_cb* on_flow_controlled_cbs; + grpc_chttp2_write_cb* on_write_finished_cbs; + grpc_chttp2_write_cb* finish_after_write; size_t sending_bytes; /* Stream compression method to be used. */ @@ -599,9 +594,9 @@ struct grpc_chttp2_stream { /* Stream decompression method to be used. */ grpc_stream_compression_method stream_decompression_method; /** Stream compression decompress context */ - grpc_stream_compression_context *stream_decompression_ctx; + grpc_stream_compression_context* stream_decompression_ctx; /** Stream compression compress context */ - grpc_stream_compression_context *stream_compression_ctx; + grpc_stream_compression_context* stream_compression_ctx; /** Buffer storing data that is compressed but not sent */ grpc_slice_buffer compressed_data_buffer; @@ -629,8 +624,7 @@ struct grpc_chttp2_stream { The actual call chain is documented in the implementation of this function. */ -void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +void grpc_chttp2_initiate_write(grpc_chttp2_transport* t, grpc_chttp2_initiate_write_reason reason); typedef struct { @@ -643,151 +637,79 @@ typedef struct { } grpc_chttp2_begin_write_result; grpc_chttp2_begin_write_result grpc_chttp2_begin_write( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t); -void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error); + grpc_chttp2_transport* t); +void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error); /** Process one slice of incoming data; return 1 if the connection is still viable after reading, or 0 if the connection should be torn down */ -grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t, grpc_slice slice); -bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); +bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); /** Get a writable stream returns non-zero if there was a stream available */ -bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); -bool grpc_chttp2_list_remove_writable_stream( - grpc_chttp2_transport *t, grpc_chttp2_stream *s) GRPC_MUST_USE_RESULT; - -bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t); -bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); - -void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -bool grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); - -void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); -void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); - -void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); -void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); - -void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s); -bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s); +bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); +bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); + +bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport* t); +bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); + +void grpc_chttp2_list_add_written_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +bool grpc_chttp2_list_pop_written_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); + +void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); + +void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); +void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); + +void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s); +bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); /********* Flow Control ***************/ -// we have sent data on the wire -void grpc_chttp2_flowctl_sent_data(grpc_chttp2_transport_flowctl *tfc, - grpc_chttp2_stream_flowctl *sfc, - int64_t size); - -// we have received data from the wire -grpc_error *grpc_chttp2_flowctl_recv_data(grpc_chttp2_transport_flowctl *tfc, - grpc_chttp2_stream_flowctl *sfc, - int64_t incoming_frame_size); - -// returns an announce if we should send a transport update to our peer, -// else returns zero -uint32_t grpc_chttp2_flowctl_maybe_send_transport_update( - grpc_chttp2_transport_flowctl *tfc); - -// returns an announce if we should send a stream update to our peer, else -// returns zero -uint32_t grpc_chttp2_flowctl_maybe_send_stream_update( - grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc); - -// we have received a WINDOW_UPDATE frame for a transport -void grpc_chttp2_flowctl_recv_transport_update( - grpc_chttp2_transport_flowctl *tfc, uint32_t size); - -// we have received a WINDOW_UPDATE frame for a stream -void grpc_chttp2_flowctl_recv_stream_update(grpc_chttp2_transport_flowctl *tfc, - grpc_chttp2_stream_flowctl *sfc, - uint32_t size); - -// the application is asking for a certain amount of bytes -void grpc_chttp2_flowctl_incoming_bs_update(grpc_chttp2_transport_flowctl *tfc, - grpc_chttp2_stream_flowctl *sfc, - size_t max_size_hint, - size_t have_already); - -void grpc_chttp2_flowctl_destroy_stream(grpc_chttp2_transport_flowctl *tfc, - grpc_chttp2_stream_flowctl *sfc); - -typedef enum { - // Nothing to be done. - GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED = 0, - // Initiate a write to update the initial window immediately. - GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY, - // Push the flow control update into a send buffer, to be sent - // out the next time a write is initiated. - GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE, -} grpc_chttp2_flowctl_urgency; - -typedef struct { - grpc_chttp2_flowctl_urgency send_stream_update; - grpc_chttp2_flowctl_urgency send_transport_update; - grpc_chttp2_flowctl_urgency send_setting_update; - uint32_t initial_window_size; - uint32_t max_frame_size; - bool need_ping; -} grpc_chttp2_flowctl_action; - -// Reads the flow control data and returns and actionable struct that will tell -// chttp2 exactly what it needs to do -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_action( - grpc_chttp2_transport_flowctl *tfc, grpc_chttp2_stream_flowctl *sfc); - -grpc_chttp2_flowctl_action grpc_chttp2_flowctl_get_bdp_action( - grpc_chttp2_transport_flowctl *tfc); - // Takes in a flow control action and performs all the needed operations. -void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx, - grpc_chttp2_flowctl_action action, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); +void grpc_chttp2_act_on_flowctl_action( + const grpc_core::chttp2::FlowControlAction& action, + grpc_chttp2_transport* t, grpc_chttp2_stream* s); /********* End of Flow Control ***************/ -grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t, +grpc_chttp2_stream* grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport* t, uint32_t id); -grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t, uint32_t id); -void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t, uint32_t goaway_error, grpc_slice goaway_text); -void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); +void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t); -void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, - grpc_closure **pclosure, - grpc_error *error, const char *desc); +void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, + grpc_closure** pclosure, + grpc_error* error, const char* desc); #define GRPC_HEADER_SIZE_IN_BYTES 5 #define MAX_SIZE_T (~(size_t)0) @@ -796,118 +718,82 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx, #define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \ (sizeof(GRPC_CHTTP2_CLIENT_CONNECT_STRING) - 1) -extern grpc_tracer_flag grpc_http_trace; -extern grpc_tracer_flag grpc_flowctl_trace; +// extern grpc_core::TraceFlag grpc_http_trace; +// extern grpc_core::TraceFlag grpc_flowctl_trace; -#ifndef NDEBUG -#define GRPC_FLOW_CONTROL_IF_TRACING(stmt) \ - if (!(GRPC_TRACER_ON(grpc_flowctl_trace))) \ - ; \ - else \ - stmt -#else -#define GRPC_FLOW_CONTROL_IF_TRACING(stmt) -#endif +#define GRPC_CHTTP2_IF_TRACING(stmt) \ + if (!(grpc_http_trace.enabled())) \ + ; \ + else \ + stmt -#define GRPC_CHTTP2_IF_TRACING(stmt) \ - if (!(GRPC_TRACER_ON(grpc_http_trace))) \ - ; \ - else \ - stmt - -void grpc_chttp2_fake_status(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *stream, grpc_error *error); -void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, int close_reads, - int close_writes, grpc_error *error); -void grpc_chttp2_start_writing(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); +void grpc_chttp2_fake_status(grpc_chttp2_transport* t, + grpc_chttp2_stream* stream, grpc_error* error); +void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, int close_reads, + int close_writes, grpc_error* error); +void grpc_chttp2_start_writing(grpc_chttp2_transport* t); #ifndef NDEBUG #define GRPC_CHTTP2_STREAM_REF(stream, reason) \ grpc_chttp2_stream_ref(stream, reason) -#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \ - grpc_chttp2_stream_unref(exec_ctx, stream, reason) -void grpc_chttp2_stream_ref(grpc_chttp2_stream *s, const char *reason); -void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s, - const char *reason); +#define GRPC_CHTTP2_STREAM_UNREF(stream, reason) \ + grpc_chttp2_stream_unref(stream, reason) +void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason); +void grpc_chttp2_stream_unref(grpc_chttp2_stream* s, const char* reason); #else #define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream) -#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \ - grpc_chttp2_stream_unref(exec_ctx, stream) -void grpc_chttp2_stream_ref(grpc_chttp2_stream *s); -void grpc_chttp2_stream_unref(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s); +#define GRPC_CHTTP2_STREAM_UNREF(stream, reason) \ + grpc_chttp2_stream_unref(stream) +void grpc_chttp2_stream_ref(grpc_chttp2_stream* s); +void grpc_chttp2_stream_unref(grpc_chttp2_stream* s); #endif #ifndef NDEBUG #define GRPC_CHTTP2_REF_TRANSPORT(t, r) \ grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__) -#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \ - grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__) -void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, const char *reason, - const char *file, int line); -void grpc_chttp2_ref_transport(grpc_chttp2_transport *t, const char *reason, - const char *file, int line); +#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) \ + grpc_chttp2_unref_transport(t, r, __FILE__, __LINE__) +void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason, + const char* file, int line); +void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason, + const char* file, int line); #else #define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t) -#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t) -void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -void grpc_chttp2_ref_transport(grpc_chttp2_transport *t); +#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) grpc_chttp2_unref_transport(t) +void grpc_chttp2_unref_transport(grpc_chttp2_transport* t); +void grpc_chttp2_ref_transport(grpc_chttp2_transport* t); #endif -grpc_chttp2_incoming_byte_stream *grpc_chttp2_incoming_byte_stream_create( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, - uint32_t frame_size, uint32_t flags); -grpc_error *grpc_chttp2_incoming_byte_stream_push( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_slice slice, grpc_slice *slice_out); -grpc_error *grpc_chttp2_incoming_byte_stream_finished( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_error *error, bool reset_on_error); -void grpc_chttp2_incoming_byte_stream_notify( - grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs, - grpc_error *error); - -void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - uint64_t id); +void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id); /** Add a new ping strike to ping_recv_state.ping_strikes. If ping_recv_state.ping_strikes > ping_policy.max_ping_strikes, it sends GOAWAY with error code ENHANCE_YOUR_CALM and additional debug data resembling "too_many_pings" followed by immediately closing the connection. */ -void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); +void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t); /** add a ref to the stream and add it to the writable list; ref will be dropped in writing.c */ -void grpc_chttp2_mark_stream_writable(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); - -void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, grpc_chttp2_stream *s, - grpc_error *due_to_error); - -void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); -void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s); - -void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_error *error); +void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); + +void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_error* due_to_error); + +void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); +void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t, + grpc_chttp2_stream* s); + +void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_error* error); /** Set the default keepalive configurations, must only be called at initialization */ -void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args, +void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args, bool is_client); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.cc similarity index 61% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.cc index 3db1ad412..a10c9ada4 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/parsing.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/internal.h" #include @@ -31,38 +33,27 @@ #include "src/core/lib/transport/status_conversion.h" #include "src/core/lib/transport/timeout_encoding.h" -static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static grpc_error* init_frame_parser(grpc_chttp2_transport* t); +static grpc_error* init_header_frame_parser(grpc_chttp2_transport* t, int is_continuation); -static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t); -static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static grpc_error* init_data_frame_parser(grpc_chttp2_transport* t); +static grpc_error* init_rst_stream_parser(grpc_chttp2_transport* t); +static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t); +static grpc_error* init_window_update_frame_parser(grpc_chttp2_transport* t); +static grpc_error* init_ping_parser(grpc_chttp2_transport* t); +static grpc_error* init_goaway_parser(grpc_chttp2_transport* t); +static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t, int is_header); -static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, grpc_slice slice, +static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice, int is_last); -grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t, grpc_slice slice) { - uint8_t *beg = GRPC_SLICE_START_PTR(slice); - uint8_t *end = GRPC_SLICE_END_PTR(slice); - uint8_t *cur = beg; - grpc_error *err; + uint8_t* beg = GRPC_SLICE_START_PTR(slice); + uint8_t* end = GRPC_SLICE_END_PTR(slice); + uint8_t* cur = beg; + grpc_error* err; if (cur == end) return GRPC_ERROR_NONE; @@ -93,21 +84,22 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, case GRPC_DTS_CLIENT_PREFIX_23: while (cur != end && t->deframe_state != GRPC_DTS_FH_0) { if (*cur != GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state]) { - char *msg; + char* msg; gpr_asprintf( &msg, "Connect string mismatch: expected '%c' (%d) got '%c' (%d) " "at byte %d", GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state], - (int)(uint8_t)GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state], - *cur, (int)*cur, t->deframe_state); + static_cast(static_cast( + GRPC_CHTTP2_CLIENT_CONNECT_STRING[t->deframe_state])), + *cur, static_cast(*cur), t->deframe_state); err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } ++cur; - t->deframe_state = - (grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state); + t->deframe_state = static_cast( + 1 + static_cast(t->deframe_state)); } if (cur == end) { return GRPC_ERROR_NONE; @@ -116,7 +108,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, dts_fh_0: case GRPC_DTS_FH_0: GPR_ASSERT(cur < end); - t->incoming_frame_size = ((uint32_t)*cur) << 16; + t->incoming_frame_size = (static_cast(*cur)) << 16; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_1; return GRPC_ERROR_NONE; @@ -124,7 +116,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FH_1: GPR_ASSERT(cur < end); - t->incoming_frame_size |= ((uint32_t)*cur) << 8; + t->incoming_frame_size |= (static_cast(*cur)) << 8; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_2; return GRPC_ERROR_NONE; @@ -156,7 +148,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FH_5: GPR_ASSERT(cur < end); - t->incoming_stream_id = (((uint32_t)*cur) & 0x7f) << 24; + t->incoming_stream_id = ((static_cast(*cur)) & 0x7f) << 24; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_6; return GRPC_ERROR_NONE; @@ -164,7 +156,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FH_6: GPR_ASSERT(cur < end); - t->incoming_stream_id |= ((uint32_t)*cur) << 16; + t->incoming_stream_id |= (static_cast(*cur)) << 16; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_7; return GRPC_ERROR_NONE; @@ -172,7 +164,7 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FH_7: GPR_ASSERT(cur < end); - t->incoming_stream_id |= ((uint32_t)*cur) << 8; + t->incoming_stream_id |= (static_cast(*cur)) << 8; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_8; return GRPC_ERROR_NONE; @@ -180,27 +172,28 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FH_8: GPR_ASSERT(cur < end); - t->incoming_stream_id |= ((uint32_t)*cur); + t->incoming_stream_id |= (static_cast(*cur)); t->deframe_state = GRPC_DTS_FRAME; - err = init_frame_parser(exec_ctx, t); + err = init_frame_parser(t); if (err != GRPC_ERROR_NONE) { return err; } if (t->incoming_frame_size == 0) { - err = parse_frame_slice(exec_ctx, t, grpc_empty_slice(), 1); + err = parse_frame_slice(t, grpc_empty_slice(), 1); if (err != GRPC_ERROR_NONE) { return err; } - t->incoming_stream = NULL; + t->incoming_stream = nullptr; if (++cur == end) { t->deframe_state = GRPC_DTS_FH_0; return GRPC_ERROR_NONE; } goto dts_fh_0; /* loop */ - } else if (t->incoming_frame_size > - t->settings[GRPC_ACKED_SETTINGS] - [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) { - char *msg; + } else if (t->flow_control->flow_control_enabled() && + t->incoming_frame_size > + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE]) { + char* msg; gpr_asprintf(&msg, "Frame size %d is larger than max frame size %d", t->incoming_frame_size, t->settings[GRPC_ACKED_SETTINGS] @@ -215,21 +208,22 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, /* fallthrough */ case GRPC_DTS_FRAME: GPR_ASSERT(cur < end); - if ((uint32_t)(end - cur) == t->incoming_frame_size) { + if (static_cast(end - cur) == t->incoming_frame_size) { err = parse_frame_slice( - exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg), - (size_t)(end - beg)), + t, + grpc_slice_sub_no_ref(slice, static_cast(cur - beg), + static_cast(end - beg)), 1); if (err != GRPC_ERROR_NONE) { return err; } t->deframe_state = GRPC_DTS_FH_0; - t->incoming_stream = NULL; + t->incoming_stream = nullptr; return GRPC_ERROR_NONE; - } else if ((uint32_t)(end - cur) > t->incoming_frame_size) { - size_t cur_offset = (size_t)(cur - beg); + } else if (static_cast(end - cur) > t->incoming_frame_size) { + size_t cur_offset = static_cast(cur - beg); err = parse_frame_slice( - exec_ctx, t, + t, grpc_slice_sub_no_ref(slice, cur_offset, cur_offset + t->incoming_frame_size), 1); @@ -237,141 +231,137 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx, return err; } cur += t->incoming_frame_size; - t->incoming_stream = NULL; + t->incoming_stream = nullptr; goto dts_fh_0; /* loop */ } else { err = parse_frame_slice( - exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg), - (size_t)(end - beg)), + t, + grpc_slice_sub_no_ref(slice, static_cast(cur - beg), + static_cast(end - beg)), 0); if (err != GRPC_ERROR_NONE) { return err; } - t->incoming_frame_size -= (uint32_t)(end - cur); + t->incoming_frame_size -= static_cast(end - cur); return GRPC_ERROR_NONE; } - GPR_UNREACHABLE_CODE(return 0); + GPR_UNREACHABLE_CODE(return nullptr); } - GPR_UNREACHABLE_CODE(return 0); + GPR_UNREACHABLE_CODE(return nullptr); } -static grpc_error *init_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +static grpc_error* init_frame_parser(grpc_chttp2_transport* t) { if (t->is_first_frame && t->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) { - char *msg; + char* msg; gpr_asprintf( &msg, "Expected SETTINGS frame as the first frame, got frame type %d", t->incoming_frame_type); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } t->is_first_frame = false; if (t->expect_continuation_stream_id != 0) { if (t->incoming_frame_type != GRPC_CHTTP2_FRAME_CONTINUATION) { - char *msg; + char* msg; gpr_asprintf(&msg, "Expected CONTINUATION frame, got frame type %02x", t->incoming_frame_type); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } if (t->expect_continuation_stream_id != t->incoming_stream_id) { - char *msg; + char* msg; gpr_asprintf( &msg, "Expected CONTINUATION frame for grpc_chttp2_stream %08x, got " "grpc_chttp2_stream %08x", t->expect_continuation_stream_id, t->incoming_stream_id); - grpc_error *err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); return err; } - return init_header_frame_parser(exec_ctx, t, 1); + return init_header_frame_parser(t, 1); } switch (t->incoming_frame_type) { case GRPC_CHTTP2_FRAME_DATA: - return init_data_frame_parser(exec_ctx, t); + return init_data_frame_parser(t); case GRPC_CHTTP2_FRAME_HEADER: - return init_header_frame_parser(exec_ctx, t, 0); + return init_header_frame_parser(t, 0); case GRPC_CHTTP2_FRAME_CONTINUATION: return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Unexpected CONTINUATION frame"); case GRPC_CHTTP2_FRAME_RST_STREAM: - return init_rst_stream_parser(exec_ctx, t); + return init_rst_stream_parser(t); case GRPC_CHTTP2_FRAME_SETTINGS: - return init_settings_frame_parser(exec_ctx, t); + return init_settings_frame_parser(t); case GRPC_CHTTP2_FRAME_WINDOW_UPDATE: - return init_window_update_frame_parser(exec_ctx, t); + return init_window_update_frame_parser(t); case GRPC_CHTTP2_FRAME_PING: - return init_ping_parser(exec_ctx, t); + return init_ping_parser(t); case GRPC_CHTTP2_FRAME_GOAWAY: - return init_goaway_parser(exec_ctx, t); + return init_goaway_parser(t); default: - if (GRPC_TRACER_ON(grpc_http_trace)) { + if (grpc_http_trace.enabled()) { gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type); } - return init_skip_frame_parser(exec_ctx, t, 0); + return init_skip_frame_parser(t, 0); } } -static grpc_error *skip_parser(grpc_exec_ctx *exec_ctx, void *parser, - grpc_chttp2_transport *t, grpc_chttp2_stream *s, - grpc_slice slice, int is_last) { +static grpc_error* skip_parser(void* parser, grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_slice slice, + int is_last) { return GRPC_ERROR_NONE; } -static void skip_header(grpc_exec_ctx *exec_ctx, void *tp, grpc_mdelem md) { - GRPC_MDELEM_UNREF(exec_ctx, md); -} +static void skip_header(void* tp, grpc_mdelem md) { GRPC_MDELEM_UNREF(md); } -static grpc_error *init_skip_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t, int is_header) { if (is_header) { uint8_t is_eoh = t->expect_continuation_stream_id != 0; t->parser = grpc_chttp2_header_parser_parse; t->parser_data = &t->hpack_parser; t->hpack_parser.on_header = skip_header; - t->hpack_parser.on_header_user_data = NULL; + t->hpack_parser.on_header_user_data = nullptr; t->hpack_parser.is_boundary = is_eoh; - t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0); + t->hpack_parser.is_eof = static_cast(is_eoh ? t->header_eof : 0); } else { t->parser = skip_parser; } return GRPC_ERROR_NONE; } -void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - init_skip_frame_parser(exec_ctx, t, - t->parser == grpc_chttp2_header_parser_parse); +void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t) { + init_skip_frame_parser(t, t->parser == grpc_chttp2_header_parser_parse); } -static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_chttp2_stream *s = +static grpc_error* init_data_frame_parser(grpc_chttp2_transport* t) { + grpc_chttp2_stream* s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); - grpc_error *err = GRPC_ERROR_NONE; - err = grpc_chttp2_flowctl_recv_data(&t->flow_control, - s == NULL ? NULL : &s->flow_control, - t->incoming_frame_size); - grpc_chttp2_act_on_flowctl_action( - exec_ctx, grpc_chttp2_flowctl_get_action( - &t->flow_control, s == NULL ? NULL : &s->flow_control), - t, s); + grpc_error* err = GRPC_ERROR_NONE; + grpc_core::chttp2::FlowControlAction action; + if (s == nullptr) { + err = t->flow_control->RecvData(t->incoming_frame_size); + action = t->flow_control->MakeAction(); + } else { + err = s->flow_control->RecvData(t->incoming_frame_size); + action = s->flow_control->MakeAction(); + } + grpc_chttp2_act_on_flowctl_action(action, t, s); if (err != GRPC_ERROR_NONE) { goto error_handler; } - if (s == NULL) { - return init_skip_frame_parser(exec_ctx, t, 0); + if (s == nullptr) { + return init_skip_frame_parser(t, 0); } s->received_bytes += t->incoming_frame_size; s->stats.incoming.framing_bytes += 9; if (err == GRPC_ERROR_NONE && s->read_closed) { - return init_skip_frame_parser(exec_ctx, t, 0); + return init_skip_frame_parser(t, 0); } if (err == GRPC_ERROR_NONE) { err = grpc_chttp2_data_parser_begin_frame( @@ -383,39 +373,35 @@ static grpc_error *init_data_frame_parser(grpc_exec_ctx *exec_ctx, /* t->parser = grpc_chttp2_data_parser_parse;*/ t->parser = grpc_chttp2_data_parser_parse; t->parser_data = &s->data_parser; - t->ping_state.pings_before_data_required = - t->ping_policy.max_pings_without_data; - t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; return GRPC_ERROR_NONE; - } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) { + } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, nullptr)) { /* handle stream errors by closing the stream */ - if (s != NULL) { - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, err); + if (s != nullptr) { + grpc_chttp2_mark_stream_closed(t, s, true, false, err); } grpc_slice_buffer_add( &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id, GRPC_HTTP2_PROTOCOL_ERROR, &s->stats.outgoing)); - return init_skip_frame_parser(exec_ctx, t, 0); + return init_skip_frame_parser(t, 0); } else { return err; } } -static void free_timeout(void *p) { gpr_free(p); } +static void free_timeout(void* p) { gpr_free(p); } -static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp, - grpc_mdelem md) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; - grpc_chttp2_stream *s = t->incoming_stream; +static void on_initial_header(void* tp, grpc_mdelem md) { + GPR_TIMER_SCOPE("on_initial_header", 0); - GPR_TIMER_BEGIN("on_initial_header", 0); + grpc_chttp2_transport* t = static_cast(tp); + grpc_chttp2_stream* s = t->incoming_stream; + GPR_ASSERT(s != nullptr); - GPR_ASSERT(s != NULL); - - if (GRPC_TRACER_ON(grpc_http_trace)) { - char *key = grpc_slice_to_c_string(GRPC_MDKEY(md)); - char *value = + if (grpc_http_trace.enabled()) { + char* key = grpc_slice_to_c_string(GRPC_MDKEY(md)); + char* value = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR", key, value); @@ -430,27 +416,31 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp, } if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) { - gpr_timespec *cached_timeout = - (gpr_timespec *)grpc_mdelem_get_user_data(md, free_timeout); - gpr_timespec timeout; - if (cached_timeout == NULL) { - /* not already parsed: parse it now, and store the result away */ - cached_timeout = (gpr_timespec *)gpr_malloc(sizeof(gpr_timespec)); - if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) { - char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + grpc_millis* cached_timeout = + static_cast(grpc_mdelem_get_user_data(md, free_timeout)); + grpc_millis timeout; + if (cached_timeout != nullptr) { + timeout = *cached_timeout; + } else { + if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), &timeout)) { + char* val = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val); gpr_free(val); - *cached_timeout = gpr_inf_future(GPR_TIMESPAN); + timeout = GRPC_MILLIS_INF_FUTURE; + } + if (GRPC_MDELEM_IS_INTERNED(md)) { + /* store the result */ + cached_timeout = + static_cast(gpr_malloc(sizeof(grpc_millis))); + *cached_timeout = timeout; + grpc_mdelem_set_user_data(md, free_timeout, cached_timeout); } - timeout = *cached_timeout; - grpc_mdelem_set_user_data(md, free_timeout, cached_timeout); - } else { - timeout = *cached_timeout; } - grpc_chttp2_incoming_metadata_buffer_set_deadline( - &s->metadata_buffer[0], - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), timeout)); - GRPC_MDELEM_UNREF(exec_ctx, md); + if (timeout != GRPC_MILLIS_INF_FUTURE) { + grpc_chttp2_incoming_metadata_buffer_set_deadline( + &s->metadata_buffer[0], grpc_core::ExecCtx::Get()->Now() + timeout); + } + GRPC_MDELEM_UNREF(md); } else { const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md); const size_t metadata_size_limit = @@ -462,41 +452,37 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp, " vs. %" PRIuPTR ")", new_size, metadata_size_limit); grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "received initial metadata size exceeds limit"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED)); - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + grpc_chttp2_parsing_become_skip_parser(t); s->seen_error = true; - GRPC_MDELEM_UNREF(exec_ctx, md); + GRPC_MDELEM_UNREF(md); } else { - grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add( - exec_ctx, &s->metadata_buffer[0], md); + grpc_error* error = + grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md); if (error != GRPC_ERROR_NONE) { - grpc_chttp2_cancel_stream(exec_ctx, t, s, error); - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + grpc_chttp2_cancel_stream(t, s, error); + grpc_chttp2_parsing_become_skip_parser(t); s->seen_error = true; - GRPC_MDELEM_UNREF(exec_ctx, md); + GRPC_MDELEM_UNREF(md); } } } - - GPR_TIMER_END("on_initial_header", 0); } -static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp, - grpc_mdelem md) { - grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp; - grpc_chttp2_stream *s = t->incoming_stream; +static void on_trailing_header(void* tp, grpc_mdelem md) { + GPR_TIMER_SCOPE("on_trailing_header", 0); - GPR_TIMER_BEGIN("on_trailing_header", 0); + grpc_chttp2_transport* t = static_cast(tp); + grpc_chttp2_stream* s = t->incoming_stream; + GPR_ASSERT(s != nullptr); - GPR_ASSERT(s != NULL); - - if (GRPC_TRACER_ON(grpc_http_trace)) { - char *key = grpc_slice_to_c_string(GRPC_MDKEY(md)); - char *value = + if (grpc_http_trace.enabled()) { + char* key = grpc_slice_to_c_string(GRPC_MDKEY(md)); + char* value = grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_INFO, "HTTP:%d:TRL:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR", key, value); @@ -520,34 +506,31 @@ static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp, " vs. %" PRIuPTR ")", new_size, metadata_size_limit); grpc_chttp2_cancel_stream( - exec_ctx, t, s, + t, s, grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "received trailing metadata size exceeds limit"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED)); - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + grpc_chttp2_parsing_become_skip_parser(t); s->seen_error = true; - GRPC_MDELEM_UNREF(exec_ctx, md); + GRPC_MDELEM_UNREF(md); } else { - grpc_error *error = grpc_chttp2_incoming_metadata_buffer_add( - exec_ctx, &s->metadata_buffer[1], md); + grpc_error* error = + grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md); if (error != GRPC_ERROR_NONE) { - grpc_chttp2_cancel_stream(exec_ctx, t, s, error); - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + grpc_chttp2_cancel_stream(t, s, error); + grpc_chttp2_parsing_become_skip_parser(t); s->seen_error = true; - GRPC_MDELEM_UNREF(exec_ctx, md); + GRPC_MDELEM_UNREF(md); } } - - GPR_TIMER_END("on_trailing_header", 0); } -static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, +static grpc_error* init_header_frame_parser(grpc_chttp2_transport* t, int is_continuation) { uint8_t is_eoh = (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0; - grpc_chttp2_stream *s; + grpc_chttp2_stream* s; /* TODO(ctiller): when to increment header_frames_received? */ @@ -562,18 +545,16 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0; } - t->ping_state.pings_before_data_required = - t->ping_policy.max_pings_without_data; - t->ping_state.last_ping_sent_time = gpr_inf_past(GPR_CLOCK_MONOTONIC); + t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST; /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */ s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); - if (s == NULL) { + if (s == nullptr) { if (is_continuation) { GRPC_CHTTP2_IF_TRACING( gpr_log(GPR_ERROR, "grpc_chttp2_stream disbanded before CONTINUATION received")); - return init_skip_frame_parser(exec_ctx, t, 1); + return init_skip_frame_parser(t, 1); } if (t->is_client) { if ((t->incoming_stream_id & 1) && @@ -583,7 +564,11 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, GRPC_CHTTP2_IF_TRACING(gpr_log( GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client")); } - return init_skip_frame_parser(exec_ctx, t, 1); + grpc_error* err = init_skip_frame_parser(t, 1); + if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY) { + grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser); + } + return err; } else if (t->last_new_stream_id >= t->incoming_stream_id) { GRPC_CHTTP2_IF_TRACING(gpr_log( GPR_ERROR, @@ -591,13 +576,13 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, "last grpc_chttp2_stream " "id=%d, new grpc_chttp2_stream id=%d", t->last_new_stream_id, t->incoming_stream_id)); - return init_skip_frame_parser(exec_ctx, t, 1); + return init_skip_frame_parser(t, 1); } else if ((t->incoming_stream_id & 1) == 0) { GRPC_CHTTP2_IF_TRACING(gpr_log( GPR_ERROR, "ignoring grpc_chttp2_stream with non-client generated index %d", t->incoming_stream_id)); - return init_skip_frame_parser(exec_ctx, t, 1); + return init_skip_frame_parser(t, 1); } else if (grpc_chttp2_stream_map_size(&t->stream_map) >= t->settings[GRPC_ACKED_SETTINGS] [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) { @@ -605,22 +590,22 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, } t->last_new_stream_id = t->incoming_stream_id; s = t->incoming_stream = - grpc_chttp2_parsing_accept_stream(exec_ctx, t, t->incoming_stream_id); - if (s == NULL) { + grpc_chttp2_parsing_accept_stream(t, t->incoming_stream_id); + if (s == nullptr) { GRPC_CHTTP2_IF_TRACING( gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted")); - return init_skip_frame_parser(exec_ctx, t, 1); + return init_skip_frame_parser(t, 1); } } else { t->incoming_stream = s; } - GPR_ASSERT(s != NULL); + GPR_ASSERT(s != nullptr); s->stats.incoming.framing_bytes += 9; if (s->read_closed) { GRPC_CHTTP2_IF_TRACING(gpr_log( GPR_ERROR, "skipping already closed grpc_chttp2_stream header")); - t->incoming_stream = NULL; - return init_skip_frame_parser(exec_ctx, t, 1); + t->incoming_stream = nullptr; + return init_skip_frame_parser(t, 1); } t->parser = grpc_chttp2_header_parser_parse; t->parser_data = &t->hpack_parser; @@ -628,7 +613,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, case 0: if (t->is_client && t->header_eof) { GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "parsing Trailers-Only")); - if (s->trailing_metadata_available != NULL) { + if (s->trailing_metadata_available != nullptr) { *s->trailing_metadata_available = true; } t->hpack_parser.on_header = on_trailing_header; @@ -645,11 +630,11 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, break; case 2: gpr_log(GPR_ERROR, "too many header frames received"); - return init_skip_frame_parser(exec_ctx, t, 1); + return init_skip_frame_parser(t, 1); } t->hpack_parser.on_header_user_data = t; t->hpack_parser.is_boundary = is_eoh; - t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0); + t->hpack_parser.is_eof = static_cast(is_eoh ? t->header_eof : 0); if (!is_continuation && (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY)) { grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser); @@ -657,17 +642,16 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_error *err = grpc_chttp2_window_update_parser_begin_frame( +static grpc_error* init_window_update_frame_parser(grpc_chttp2_transport* t) { + grpc_error* err = grpc_chttp2_window_update_parser_begin_frame( &t->simple.window_update, t->incoming_frame_size, t->incoming_frame_flags); if (err != GRPC_ERROR_NONE) return err; if (t->incoming_stream_id != 0) { - grpc_chttp2_stream *s = t->incoming_stream = + grpc_chttp2_stream* s = t->incoming_stream = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); - if (s == NULL) { - return init_skip_frame_parser(exec_ctx, t, 0); + if (s == nullptr) { + return init_skip_frame_parser(t, 0); } s->stats.incoming.framing_bytes += 9; } @@ -676,9 +660,8 @@ static grpc_error *init_window_update_frame_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_error *err = grpc_chttp2_ping_parser_begin_frame( +static grpc_error* init_ping_parser(grpc_chttp2_transport* t) { + grpc_error* err = grpc_chttp2_ping_parser_begin_frame( &t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags); if (err != GRPC_ERROR_NONE) return err; t->parser = grpc_chttp2_ping_parser_parse; @@ -686,15 +669,14 @@ static grpc_error *init_ping_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_error *err = grpc_chttp2_rst_stream_parser_begin_frame( +static grpc_error* init_rst_stream_parser(grpc_chttp2_transport* t) { + grpc_error* err = grpc_chttp2_rst_stream_parser_begin_frame( &t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags); if (err != GRPC_ERROR_NONE) return err; - grpc_chttp2_stream *s = t->incoming_stream = + grpc_chttp2_stream* s = t->incoming_stream = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id); if (!t->incoming_stream) { - return init_skip_frame_parser(exec_ctx, t, 0); + return init_skip_frame_parser(t, 0); } s->stats.incoming.framing_bytes += 9; t->parser = grpc_chttp2_rst_stream_parser_parse; @@ -702,9 +684,8 @@ static grpc_error *init_rst_stream_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { - grpc_error *err = grpc_chttp2_goaway_parser_begin_frame( +static grpc_error* init_goaway_parser(grpc_chttp2_transport* t) { + grpc_error* err = grpc_chttp2_goaway_parser_begin_frame( &t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags); if (err != GRPC_ERROR_NONE) return err; t->parser = grpc_chttp2_goaway_parser_parse; @@ -712,14 +693,13 @@ static grpc_error *init_goaway_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t) { +static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t) { if (t->incoming_stream_id != 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Settings frame received for grpc_chttp2_stream"); } - grpc_error *err = grpc_chttp2_settings_parser_begin_frame( + grpc_error* err = grpc_chttp2_settings_parser_begin_frame( &t->simple.settings, t->incoming_frame_size, t->incoming_frame_flags, t->settings[GRPC_PEER_SETTINGS]); if (err != GRPC_ERROR_NONE) { @@ -729,7 +709,7 @@ static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx, memcpy(t->settings[GRPC_ACKED_SETTINGS], t->settings[GRPC_SENT_SETTINGS], GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t)); grpc_chttp2_hptbl_set_max_bytes( - exec_ctx, &t->hpack_parser.table, + &t->hpack_parser.table, t->settings[GRPC_ACKED_SETTINGS] [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]); t->sent_local_settings = 0; @@ -739,19 +719,18 @@ static grpc_error *init_settings_frame_parser(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static grpc_error *parse_frame_slice(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, grpc_slice slice, +static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice, int is_last) { - grpc_chttp2_stream *s = t->incoming_stream; - grpc_error *err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last); + grpc_chttp2_stream* s = t->incoming_stream; + grpc_error* err = t->parser(t->parser_data, t, s, slice, is_last); if (err == GRPC_ERROR_NONE) { return err; - } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, NULL)) { - if (GRPC_TRACER_ON(grpc_http_trace)) { - const char *msg = grpc_error_string(err); + } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, nullptr)) { + if (grpc_http_trace.enabled()) { + const char* msg = grpc_error_string(err); gpr_log(GPR_ERROR, "%s", msg); } - grpc_chttp2_parsing_become_skip_parser(exec_ctx, t); + grpc_chttp2_parsing_become_skip_parser(t); if (s) { s->forced_close_error = err; grpc_slice_buffer_add( diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.cc similarity index 67% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.cc index 47cd22d17..6626170a7 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_lists.cc @@ -16,11 +16,14 @@ * */ +#include + +#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h" #include "src/core/ext/transport/chttp2/transport/internal.h" #include -static char *stream_list_id_string(grpc_chttp2_stream_list_id id) { +static const char* stream_list_id_string(grpc_chttp2_stream_list_id id) { switch (id) { case GRPC_CHTTP2_LIST_WRITABLE: return "writable"; @@ -38,41 +41,40 @@ static char *stream_list_id_string(grpc_chttp2_stream_list_id id) { GPR_UNREACHABLE_CODE(return "unknown"); } -grpc_tracer_flag grpc_trace_http2_stream_state = - GRPC_TRACER_INITIALIZER(false, "http2_stream_state"); +grpc_core::TraceFlag grpc_trace_http2_stream_state(false, "http2_stream_state"); /* core list management */ -static bool stream_list_empty(grpc_chttp2_transport *t, +static bool stream_list_empty(grpc_chttp2_transport* t, grpc_chttp2_stream_list_id id) { - return t->lists[id].head == NULL; + return t->lists[id].head == nullptr; } -static bool stream_list_pop(grpc_chttp2_transport *t, - grpc_chttp2_stream **stream, +static bool stream_list_pop(grpc_chttp2_transport* t, + grpc_chttp2_stream** stream, grpc_chttp2_stream_list_id id) { - grpc_chttp2_stream *s = t->lists[id].head; + grpc_chttp2_stream* s = t->lists[id].head; if (s) { - grpc_chttp2_stream *new_head = s->links[id].next; + grpc_chttp2_stream* new_head = s->links[id].next; GPR_ASSERT(s->included[id]); if (new_head) { t->lists[id].head = new_head; - new_head->links[id].prev = NULL; + new_head->links[id].prev = nullptr; } else { - t->lists[id].head = NULL; - t->lists[id].tail = NULL; + t->lists[id].head = nullptr; + t->lists[id].tail = nullptr; } s->included[id] = 0; } *stream = s; - if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { - gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id, + if (s && grpc_trace_http2_stream_state.enabled()) { + gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id, t->is_client ? "cli" : "svr", stream_list_id_string(id)); } - return s != 0; + return s != nullptr; } -static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s, +static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s, grpc_chttp2_stream_list_id id) { GPR_ASSERT(s->included[id]); s->included[id] = 0; @@ -87,14 +89,14 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s, } else { t->lists[id].tail = s->links[id].prev; } - if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { - gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id, + if (grpc_trace_http2_stream_state.enabled()) { + gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id, t->is_client ? "cli" : "svr", stream_list_id_string(id)); } } -static bool stream_list_maybe_remove(grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +static bool stream_list_maybe_remove(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_chttp2_stream_list_id id) { if (s->included[id]) { stream_list_remove(t, s, id); @@ -104,13 +106,13 @@ static bool stream_list_maybe_remove(grpc_chttp2_transport *t, } } -static void stream_list_add_tail(grpc_chttp2_transport *t, - grpc_chttp2_stream *s, +static void stream_list_add_tail(grpc_chttp2_transport* t, + grpc_chttp2_stream* s, grpc_chttp2_stream_list_id id) { - grpc_chttp2_stream *old_tail; + grpc_chttp2_stream* old_tail; GPR_ASSERT(!s->included[id]); old_tail = t->lists[id].tail; - s->links[id].next = NULL; + s->links[id].next = nullptr; s->links[id].prev = old_tail; if (old_tail) { old_tail->links[id].next = s; @@ -119,13 +121,13 @@ static void stream_list_add_tail(grpc_chttp2_transport *t, } t->lists[id].tail = s; s->included[id] = 1; - if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { - gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id, + if (grpc_trace_http2_stream_state.enabled()) { + gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id, t->is_client ? "cli" : "svr", stream_list_id_string(id)); } } -static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s, +static bool stream_list_add(grpc_chttp2_transport* t, grpc_chttp2_stream* s, grpc_chttp2_stream_list_id id) { if (s->included[id]) { return false; @@ -136,77 +138,79 @@ static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s, /* wrappers for specializations */ -bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { GPR_ASSERT(s->id != 0); return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITABLE); } -bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s) { +bool grpc_chttp2_list_pop_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s) { return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITABLE); } -bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +bool grpc_chttp2_list_remove_writable_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WRITABLE); } -bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +bool grpc_chttp2_list_add_writing_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { return stream_list_add(t, s, GRPC_CHTTP2_LIST_WRITING); } -bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport *t) { +bool grpc_chttp2_list_have_writing_streams(grpc_chttp2_transport* t) { return !stream_list_empty(t, GRPC_CHTTP2_LIST_WRITING); } -bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s) { +bool grpc_chttp2_list_pop_writing_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s) { return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WRITING); } -void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +void grpc_chttp2_list_add_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); } -bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream **s) { +bool grpc_chttp2_list_pop_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream** s) { return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); } -void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY); } -void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + GPR_ASSERT(t->flow_control->flow_control_enabled()); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); } -bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream **s) { +bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream** s) { return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); } -void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); } -void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { + GPR_ASSERT(t->flow_control->flow_control_enabled()); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } -bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream **s) { +bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream** s) { return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } -bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t, - grpc_chttp2_stream *s) { +bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t, + grpc_chttp2_stream* s) { return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.cc similarity index 58% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.cc index d6079a9a3..f300e2356 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.cc @@ -16,30 +16,33 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/stream_map.h" #include #include #include -#include -void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map, +void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map* map, size_t initial_capacity) { GPR_ASSERT(initial_capacity > 1); - map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity); - map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity); + map->keys = + static_cast(gpr_malloc(sizeof(uint32_t) * initial_capacity)); + map->values = + static_cast(gpr_malloc(sizeof(void*) * initial_capacity)); map->count = 0; map->free = 0; map->capacity = initial_capacity; } -void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map) { +void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map* map) { gpr_free(map->keys); gpr_free(map->values); } -static size_t compact(uint32_t *keys, void **values, size_t count) { +static size_t compact(uint32_t* keys, void** values, size_t count) { size_t i, out; for (i = 0, out = 0; i < count; i++) { @@ -53,16 +56,16 @@ static size_t compact(uint32_t *keys, void **values, size_t count) { return out; } -void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key, - void *value) { +void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map* map, uint32_t key, + void* value) { size_t count = map->count; size_t capacity = map->capacity; - uint32_t *keys = map->keys; - void **values = map->values; + uint32_t* keys = map->keys; + void** values = map->values; GPR_ASSERT(count == 0 || keys[count - 1] < key); GPR_ASSERT(value); - GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL); + GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == nullptr); if (count == capacity) { if (map->free > capacity / 4) { @@ -72,10 +75,10 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key, /* resize when less than 25% of the table is free, because compaction won't help much */ map->capacity = capacity = 3 * capacity / 2; - map->keys = keys = - (uint32_t *)gpr_realloc(keys, capacity * sizeof(uint32_t)); + map->keys = keys = static_cast( + gpr_realloc(keys, capacity * sizeof(uint32_t))); map->values = values = - (void **)gpr_realloc(values, capacity * sizeof(void *)); + static_cast(gpr_realloc(values, capacity * sizeof(void*))); } } @@ -84,15 +87,15 @@ void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key, map->count = count + 1; } -static void **find(grpc_chttp2_stream_map *map, uint32_t key) { +static void** find(grpc_chttp2_stream_map* map, uint32_t key) { size_t min_idx = 0; size_t max_idx = map->count; size_t mid_idx; - uint32_t *keys = map->keys; - void **values = map->values; + uint32_t* keys = map->keys; + void** values = map->values; uint32_t mid_key; - if (max_idx == 0) return NULL; + if (max_idx == 0) return nullptr; while (min_idx < max_idx) { /* find the midpoint, avoiding overflow */ @@ -109,50 +112,51 @@ static void **find(grpc_chttp2_stream_map *map, uint32_t key) { } } - return NULL; + return nullptr; } -void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key) { - void **pvalue = find(map, key); - void *out = NULL; - if (pvalue != NULL) { +void* grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map* map, uint32_t key) { + void** pvalue = find(map, key); + void* out = nullptr; + if (pvalue != nullptr) { out = *pvalue; - *pvalue = NULL; - map->free += (out != NULL); + *pvalue = nullptr; + map->free += (out != nullptr); /* recognize complete emptyness and ensure we can skip * defragmentation later */ if (map->free == map->count) { map->free = map->count = 0; } - GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == NULL); + GPR_ASSERT(grpc_chttp2_stream_map_find(map, key) == nullptr); } return out; } -void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key) { - void **pvalue = find(map, key); - return pvalue != NULL ? *pvalue : NULL; +void* grpc_chttp2_stream_map_find(grpc_chttp2_stream_map* map, uint32_t key) { + void** pvalue = find(map, key); + return pvalue != nullptr ? *pvalue : nullptr; } -size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map) { +size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map* map) { return map->count - map->free; } -void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map) { +void* grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map* map) { if (map->count == map->free) { - return NULL; + return nullptr; } if (map->free != 0) { map->count = compact(map->keys, map->values, map->count); map->free = 0; + GPR_ASSERT(map->count > 0); } - return map->values[((size_t)rand()) % map->count]; + return map->values[(static_cast(rand())) % map->count]; } -void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map, - void (*f)(void *user_data, uint32_t key, - void *value), - void *user_data) { +void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map* map, + void (*f)(void* user_data, uint32_t key, + void* value), + void* user_data) { size_t i; for (i = 0; i < map->count; i++) { diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.h b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.h index 30c50ba32..9fb8826e8 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.h +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/stream_map.h @@ -30,39 +30,39 @@ Adds are restricted to strictly higher keys than previously seen (this is guaranteed by http2). */ typedef struct { - uint32_t *keys; - void **values; + uint32_t* keys; + void** values; size_t count; size_t free; size_t capacity; } grpc_chttp2_stream_map; -void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map, +void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map* map, size_t initial_capacity); -void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map *map); +void grpc_chttp2_stream_map_destroy(grpc_chttp2_stream_map* map); /* Add a new key: given http2 semantics, new keys must always be greater than existing keys - this is asserted */ -void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map *map, uint32_t key, - void *value); +void grpc_chttp2_stream_map_add(grpc_chttp2_stream_map* map, uint32_t key, + void* value); /* Delete an existing key - returns the previous value of the key if it existed, or NULL otherwise */ -void *grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map *map, uint32_t key); +void* grpc_chttp2_stream_map_delete(grpc_chttp2_stream_map* map, uint32_t key); /* Return an existing key, or NULL if it does not exist */ -void *grpc_chttp2_stream_map_find(grpc_chttp2_stream_map *map, uint32_t key); +void* grpc_chttp2_stream_map_find(grpc_chttp2_stream_map* map, uint32_t key); /* Return a random entry */ -void *grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map *map); +void* grpc_chttp2_stream_map_rand(grpc_chttp2_stream_map* map); /* How many (populated) entries are in the stream map? */ -size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map *map); +size_t grpc_chttp2_stream_map_size(grpc_chttp2_stream_map* map); /* Callback on each stream */ -void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map *map, - void (*f)(void *user_data, uint32_t key, - void *value), - void *user_data); +void grpc_chttp2_stream_map_for_each(grpc_chttp2_stream_map* map, + void (*f)(void* user_data, uint32_t key, + void* value), + void* user_data); #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STREAM_MAP_H */ diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.cc similarity index 78% rename from Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.c rename to Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.cc index 0d94ddcbc..d4b01788b 100644 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.c +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/varint.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/ext/transport/chttp2/transport/varint.h" uint32_t grpc_chttp2_hpack_varint_length(uint32_t tail_value) { @@ -36,19 +38,19 @@ void grpc_chttp2_hpack_write_varint_tail(uint32_t tail_value, uint8_t* target, uint32_t tail_length) { switch (tail_length) { case 5: - target[4] = (uint8_t)((tail_value >> 28) | 0x80); + target[4] = static_cast((tail_value >> 28) | 0x80); /* fallthrough */ case 4: - target[3] = (uint8_t)((tail_value >> 21) | 0x80); + target[3] = static_cast((tail_value >> 21) | 0x80); /* fallthrough */ case 3: - target[2] = (uint8_t)((tail_value >> 14) | 0x80); + target[2] = static_cast((tail_value >> 14) | 0x80); /* fallthrough */ case 2: - target[1] = (uint8_t)((tail_value >> 7) | 0x80); + target[1] = static_cast((tail_value >> 7) | 0x80); /* fallthrough */ case 1: - target[0] = (uint8_t)((tail_value) | 0x80); + target[0] = static_cast((tail_value) | 0x80); } target[tail_length - 1] &= 0x7f; } diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.c b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.c deleted file mode 100644 index be1af1601..000000000 --- a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.c +++ /dev/null @@ -1,534 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/transport/chttp2/transport/internal.h" - -#include - -#include - -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/transport/http2_errors.h" - -static void add_to_write_list(grpc_chttp2_write_cb **list, - grpc_chttp2_write_cb *cb) { - cb->next = *list; - *list = cb; -} - -static void finish_write_cb(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, grpc_chttp2_write_cb *cb, - grpc_error *error) { - grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error, - "finish_write_cb"); - cb->next = t->write_cb_pool; - t->write_cb_pool = cb; -} - -static void collapse_pings_from_into(grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type, - grpc_chttp2_ping_queue *pq) { - for (size_t i = 0; i < GRPC_CHTTP2_PCL_COUNT; i++) { - grpc_closure_list_move(&t->ping_queues[ping_type].lists[i], &pq->lists[i]); - } -} - -static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx, - grpc_chttp2_transport *t, - grpc_chttp2_ping_type ping_type) { - grpc_chttp2_ping_queue *pq = &t->ping_queues[ping_type]; - if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { - /* no ping needed: wait */ - return; - } - if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { - /* ping already in-flight: wait */ - if (GRPC_TRACER_ON(grpc_http_trace) || - GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping delayed [%p]: already pinging", t->peer_string); - } - return; - } - if (t->ping_state.pings_before_data_required == 0 && - t->ping_policy.max_pings_without_data != 0) { - /* need to receive something of substance before sending a ping again */ - if (GRPC_TRACER_ON(grpc_http_trace) || - GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping delayed [%p]: too many recent pings: %d/%d", - t->peer_string, t->ping_state.pings_before_data_required, - t->ping_policy.max_pings_without_data); - } - return; - } - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_allowed_ping = - gpr_time_add(t->ping_state.last_ping_sent_time, - t->ping_policy.min_sent_ping_interval_without_data); - if (t->keepalive_permit_without_calls == 0 && - grpc_chttp2_stream_map_size(&t->stream_map) == 0) { - next_allowed_ping = gpr_time_add(t->ping_recv_state.last_ping_recv_time, - gpr_time_from_seconds(7200, GPR_TIMESPAN)); - } - /* gpr_log(GPR_DEBUG, "next_allowed_ping:%d.%09d now:%d.%09d", - (int)next_allowed_ping.tv_sec, (int)next_allowed_ping.tv_nsec, - (int)now.tv_sec, (int)now.tv_nsec); */ - if (gpr_time_cmp(next_allowed_ping, now) > 0) { - /* not enough elapsed time between successive pings */ - if (GRPC_TRACER_ON(grpc_http_trace) || - GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, - "Ping delayed [%p]: not enough time elapsed since last ping", - t->peer_string); - } - if (!t->ping_state.is_delayed_ping_timer_set) { - t->ping_state.is_delayed_ping_timer_set = true; - grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer, - next_allowed_ping, &t->retry_initiate_ping_locked, - gpr_now(GPR_CLOCK_MONOTONIC)); - } - return; - } - /* coalesce equivalent pings into this one */ - switch (ping_type) { - case GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE: - collapse_pings_from_into(t, GRPC_CHTTP2_PING_ON_NEXT_WRITE, pq); - break; - case GRPC_CHTTP2_PING_ON_NEXT_WRITE: - break; - case GRPC_CHTTP2_PING_TYPE_COUNT: - GPR_UNREACHABLE_CODE(break); - } - pq->inflight_id = t->ping_ctr * GRPC_CHTTP2_PING_TYPE_COUNT + ping_type; - t->ping_ctr++; - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]); - grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT], - &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); - grpc_slice_buffer_add(&t->outbuf, - grpc_chttp2_ping_create(false, pq->inflight_id)); - GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx); - t->ping_state.last_ping_sent_time = now; - if (GRPC_TRACER_ON(grpc_http_trace) || - GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "Ping sent [%p]: %d/%d", t->peer_string, - t->ping_state.pings_before_data_required, - t->ping_policy.max_pings_without_data); - } - t->ping_state.pings_before_data_required -= - (t->ping_state.pings_before_data_required != 0); -} - -static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_chttp2_stream *s, int64_t send_bytes, - grpc_chttp2_write_cb **list, int64_t *ctr, - grpc_error *error) { - bool sched_any = false; - grpc_chttp2_write_cb *cb = *list; - *list = NULL; - *ctr += send_bytes; - while (cb) { - grpc_chttp2_write_cb *next = cb->next; - if (cb->call_at_byte <= *ctr) { - sched_any = true; - finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error)); - } else { - add_to_write_list(list, cb); - } - cb = next; - } - GRPC_ERROR_UNREF(error); - return sched_any; -} - -static bool stream_ref_if_not_destroyed(gpr_refcount *r) { - gpr_atm count; - do { - count = gpr_atm_acq_load(&r->count); - if (count == 0) return false; - } while (!gpr_atm_rel_cas(&r->count, count, count + 1)); - return true; -} - -/* How many bytes would we like to put on the wire during a single syscall */ -static uint32_t target_write_size(grpc_chttp2_transport *t) { - return 1024 * 1024; -} - -// Returns true if initial_metadata contains only default headers. -static bool is_default_initial_metadata(grpc_metadata_batch *initial_metadata) { - return initial_metadata->list.default_count == initial_metadata->list.count; -} - -grpc_chttp2_begin_write_result grpc_chttp2_begin_write( - grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) { - grpc_chttp2_stream *s; - - /* stats histogram counters: we increment these throughout this function, - and at the end publish to the central stats histograms */ - int flow_control_writes = 0; - int initial_metadata_writes = 0; - int trailing_metadata_writes = 0; - int message_writes = 0; - - GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx); - - GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0); - - if (t->dirtied_local_settings && !t->sent_local_settings) { - grpc_slice_buffer_add( - &t->outbuf, - grpc_chttp2_settings_create( - t->settings[GRPC_SENT_SETTINGS], t->settings[GRPC_LOCAL_SETTINGS], - t->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS)); - t->force_send_settings = 0; - t->dirtied_local_settings = 0; - t->sent_local_settings = 1; - GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx); - } - - /* simple writes are queued to qbuf, and flushed here */ - grpc_slice_buffer_move_into(&t->qbuf, &t->outbuf); - GPR_ASSERT(t->qbuf.count == 0); - - grpc_chttp2_hpack_compressor_set_max_table_size( - &t->hpack_compressor, - t->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]); - - if (t->flow_control.remote_window > 0) { - while (grpc_chttp2_list_pop_stalled_by_transport(t, &s)) { - if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) { - stream_ref_if_not_destroyed(&s->refcount->refs); - } - } - } - - grpc_chttp2_begin_write_result result = {false, false, false}; - - /* for each grpc_chttp2_stream that's become writable, frame it's data - (according to available window sizes) and add to the output buffer */ - while (true) { - if (t->outbuf.length > target_write_size(t)) { - result.partial = true; - break; - } - - if (!grpc_chttp2_list_pop_writable_stream(t, &s)) { - break; - } - - bool sent_initial_metadata = s->sent_initial_metadata; - bool now_writing = false; - - GRPC_CHTTP2_IF_TRACING( - gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t, - t->is_client ? "CLIENT" : "SERVER", s->id, - sent_initial_metadata, s->send_initial_metadata != NULL, - (int)(s->flow_control.local_window_delta - - s->flow_control.announced_window_delta))); - - grpc_mdelem *extra_headers_for_trailing_metadata[2]; - size_t num_extra_headers_for_trailing_metadata = 0; - - /* send initial metadata if it's available */ - if (!sent_initial_metadata && s->send_initial_metadata != NULL) { - // We skip this on the server side if there is no custom initial - // metadata, there are no messages to send, and we are also sending - // trailing metadata. This results in a Trailers-Only response, - // which is required for retries, as per: - // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid - if (t->is_client || s->fetching_send_message != NULL || - s->flow_controlled_buffer.length != 0 || - s->send_trailing_metadata == NULL || - !is_default_initial_metadata(s->send_initial_metadata)) { - grpc_encode_header_options hopt = { - .stream_id = s->id, - .is_eof = false, - .use_true_binary_metadata = - t->settings - [GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != 0, - .max_frame_size = t->settings[GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], - .stats = &s->stats.outgoing}; - grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0, - s->send_initial_metadata, &hopt, &t->outbuf); - now_writing = true; - if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); - t->ping_recv_state.ping_strikes = 0; - } - initial_metadata_writes++; - } else { - GRPC_CHTTP2_IF_TRACING( - gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)")); - // When sending Trailers-Only, we need to move the :status and - // content-type headers to the trailers. - if (s->send_initial_metadata->idx.named.status != NULL) { - extra_headers_for_trailing_metadata - [num_extra_headers_for_trailing_metadata++] = - &s->send_initial_metadata->idx.named.status->md; - } - if (s->send_initial_metadata->idx.named.content_type != NULL) { - extra_headers_for_trailing_metadata - [num_extra_headers_for_trailing_metadata++] = - &s->send_initial_metadata->idx.named.content_type->md; - } - trailing_metadata_writes++; - } - s->send_initial_metadata = NULL; - s->sent_initial_metadata = true; - sent_initial_metadata = true; - result.early_results_scheduled = true; - grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE, - "send_initial_metadata_finished"); - } - /* send any window updates */ - uint32_t stream_announce = grpc_chttp2_flowctl_maybe_send_stream_update( - &t->flow_control, &s->flow_control); - if (stream_announce > 0) { - grpc_slice_buffer_add( - &t->outbuf, grpc_chttp2_window_update_create(s->id, stream_announce, - &s->stats.outgoing)); - if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); - t->ping_recv_state.ping_strikes = 0; - } - flow_control_writes++; - } - if (sent_initial_metadata) { - /* send any body bytes, if allowed by flow control */ - if (s->flow_controlled_buffer.length > 0 || - s->compressed_data_buffer.length > 0) { - uint32_t stream_remote_window = (uint32_t)GPR_MAX( - 0, - s->flow_control.remote_window_delta + - (int64_t)t->settings[GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); - uint32_t max_outgoing = (uint32_t)GPR_MIN( - t->settings[GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], - GPR_MIN(stream_remote_window, t->flow_control.remote_window)); - if (max_outgoing > 0) { - bool is_last_data_frame = false; - bool is_last_frame = false; - size_t sending_bytes_before = s->sending_bytes; - while ((s->flow_controlled_buffer.length > 0 || - s->compressed_data_buffer.length > 0) && - max_outgoing > 0) { - if (s->compressed_data_buffer.length > 0) { - uint32_t send_bytes = (uint32_t)GPR_MIN( - max_outgoing, s->compressed_data_buffer.length); - is_last_data_frame = - (send_bytes == s->compressed_data_buffer.length && - s->flow_controlled_buffer.length == 0 && - s->fetching_send_message == NULL); - if (is_last_data_frame && s->send_trailing_metadata != NULL && - s->stream_compression_ctx != NULL) { - if (!grpc_stream_compress( - s->stream_compression_ctx, &s->flow_controlled_buffer, - &s->compressed_data_buffer, NULL, MAX_SIZE_T, - GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) { - gpr_log(GPR_ERROR, "Stream compression failed."); - } - grpc_stream_compression_context_destroy( - s->stream_compression_ctx); - s->stream_compression_ctx = NULL; - /* After finish, bytes in s->compressed_data_buffer may be - * more than max_outgoing. Start another round of the current - * while loop so that send_bytes and is_last_data_frame are - * recalculated. */ - continue; - } - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer, - send_bytes, is_last_frame, - &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control, - send_bytes); - max_outgoing -= send_bytes; - if (s->compressed_data_buffer.length == 0) { - s->sending_bytes += s->uncompressed_data_size; - } - } else { - if (s->stream_compression_ctx == NULL) { - s->stream_compression_ctx = - grpc_stream_compression_context_create( - s->stream_compression_method); - } - s->uncompressed_data_size = s->flow_controlled_buffer.length; - if (!grpc_stream_compress( - s->stream_compression_ctx, &s->flow_controlled_buffer, - &s->compressed_data_buffer, NULL, MAX_SIZE_T, - GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) { - gpr_log(GPR_ERROR, "Stream compression failed."); - } - } - } - if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); - t->ping_recv_state.ping_strikes = 0; - } - if (is_last_frame) { - s->send_trailing_metadata = NULL; - s->sent_trailing_metadata = true; - if (!t->is_client && !s->read_closed) { - grpc_slice_buffer_add(&t->outbuf, grpc_chttp2_rst_stream_create( - s->id, GRPC_HTTP2_NO_ERROR, - &s->stats.outgoing)); - } - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1, - GRPC_ERROR_NONE); - } - result.early_results_scheduled |= - update_list(exec_ctx, t, s, - (int64_t)(s->sending_bytes - sending_bytes_before), - &s->on_flow_controlled_cbs, - &s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE); - now_writing = true; - if (s->flow_controlled_buffer.length > 0 || - s->compressed_data_buffer.length > 0) { - GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork"); - grpc_chttp2_list_add_writable_stream(t, s); - } - message_writes++; - } else if (t->flow_control.remote_window == 0) { - grpc_chttp2_list_add_stalled_by_transport(t, s); - now_writing = true; - } else if (stream_remote_window == 0) { - grpc_chttp2_list_add_stalled_by_stream(t, s); - now_writing = true; - } - } - if (s->send_trailing_metadata != NULL && - s->fetching_send_message == NULL && - s->flow_controlled_buffer.length == 0 && - s->compressed_data_buffer.length == 0) { - GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata")); - if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) { - grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true, - &s->stats.outgoing, &t->outbuf); - } else { - grpc_encode_header_options hopt = { - .stream_id = s->id, - .is_eof = true, - .use_true_binary_metadata = - t->settings - [GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != - 0, - .max_frame_size = - t->settings[GRPC_PEER_SETTINGS] - [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], - .stats = &s->stats.outgoing}; - grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, - extra_headers_for_trailing_metadata, - num_extra_headers_for_trailing_metadata, - s->send_trailing_metadata, &hopt, - &t->outbuf); - trailing_metadata_writes++; - } - s->send_trailing_metadata = NULL; - s->sent_trailing_metadata = true; - if (!t->is_client && !s->read_closed) { - grpc_slice_buffer_add( - &t->outbuf, grpc_chttp2_rst_stream_create( - s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing)); - } - grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1, - GRPC_ERROR_NONE); - now_writing = true; - result.early_results_scheduled = true; - grpc_chttp2_complete_closure_step( - exec_ctx, t, s, &s->send_trailing_metadata_finished, - GRPC_ERROR_NONE, "send_trailing_metadata_finished"); - } - } - - if (now_writing) { - GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE( - exec_ctx, initial_metadata_writes); - GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes); - GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE( - exec_ctx, trailing_metadata_writes); - GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, - flow_control_writes); - - if (!grpc_chttp2_list_add_writing_stream(t, s)) { - /* already in writing list: drop ref */ - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing"); - } - } else { - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write"); - } - } - - uint32_t transport_announce = - grpc_chttp2_flowctl_maybe_send_transport_update(&t->flow_control); - if (transport_announce) { - maybe_initiate_ping(exec_ctx, t, - GRPC_CHTTP2_PING_BEFORE_TRANSPORT_WINDOW_UPDATE); - grpc_transport_one_way_stats throwaway_stats; - grpc_slice_buffer_add( - &t->outbuf, grpc_chttp2_window_update_create(0, transport_announce, - &throwaway_stats)); - if (!t->is_client) { - t->ping_recv_state.last_ping_recv_time = - gpr_inf_past(GPR_CLOCK_MONOTONIC); - t->ping_recv_state.ping_strikes = 0; - } - } - - for (size_t i = 0; i < t->ping_ack_count; i++) { - grpc_slice_buffer_add(&t->outbuf, - grpc_chttp2_ping_create(1, t->ping_acks[i])); - } - t->ping_ack_count = 0; - - maybe_initiate_ping(exec_ctx, t, GRPC_CHTTP2_PING_ON_NEXT_WRITE); - - GPR_TIMER_END("grpc_chttp2_begin_write", 0); - - result.writing = t->outbuf.count > 0; - return result; -} - -void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, - grpc_error *error) { - GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0); - grpc_chttp2_stream *s; - - while (grpc_chttp2_list_pop_writing_stream(t, &s)) { - if (s->sending_bytes != 0) { - update_list(exec_ctx, t, s, (int64_t)s->sending_bytes, - &s->on_write_finished_cbs, &s->flow_controlled_bytes_written, - GRPC_ERROR_REF(error)); - s->sending_bytes = 0; - } - GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end"); - } - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf); - GRPC_ERROR_UNREF(error); - GPR_TIMER_END("grpc_chttp2_end_write", 0); -} diff --git a/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.cc b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.cc new file mode 100644 index 000000000..85efe2708 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/chttp2/transport/writing.cc @@ -0,0 +1,641 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/ext/transport/chttp2/transport/internal.h" + +#include + +#include + +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/transport/http2_errors.h" + +static void add_to_write_list(grpc_chttp2_write_cb** list, + grpc_chttp2_write_cb* cb) { + cb->next = *list; + *list = cb; +} + +static void finish_write_cb(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + grpc_chttp2_write_cb* cb, grpc_error* error) { + grpc_chttp2_complete_closure_step(t, s, &cb->closure, error, + "finish_write_cb"); + cb->next = t->write_cb_pool; + t->write_cb_pool = cb; +} + +static void maybe_initiate_ping(grpc_chttp2_transport* t) { + grpc_chttp2_ping_queue* pq = &t->ping_queue; + if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) { + /* no ping needed: wait */ + return; + } + if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { + /* ping already in-flight: wait */ + if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging", + t->is_client ? "CLIENT" : "SERVER", t->peer_string); + } + return; + } + if (t->ping_state.pings_before_data_required == 0 && + t->ping_policy.max_pings_without_data != 0) { + /* need to receive something of substance before sending a ping again */ + if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d", + t->is_client ? "CLIENT" : "SERVER", t->peer_string, + t->ping_state.pings_before_data_required, + t->ping_policy.max_pings_without_data); + } + return; + } + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + + grpc_millis next_allowed_ping_interval = + (t->keepalive_permit_without_calls == 0 && + grpc_chttp2_stream_map_size(&t->stream_map) == 0) + ? 7200 * GPR_MS_PER_SEC + : t->ping_policy.min_sent_ping_interval_without_data; + grpc_millis next_allowed_ping = + t->ping_state.last_ping_sent_time + next_allowed_ping_interval; + + if (next_allowed_ping > now) { + /* not enough elapsed time between successive pings */ + if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, + "%s: Ping delayed [%p]: not enough time elapsed since last ping. " + " Last ping %f: Next ping %f: Now %f", + t->is_client ? "CLIENT" : "SERVER", t->peer_string, + static_cast(t->ping_state.last_ping_sent_time), + static_cast(next_allowed_ping), static_cast(now)); + } + if (!t->ping_state.is_delayed_ping_timer_set) { + t->ping_state.is_delayed_ping_timer_set = true; + GRPC_CHTTP2_REF_TRANSPORT(t, "retry_initiate_ping_locked"); + grpc_timer_init(&t->ping_state.delayed_ping_timer, next_allowed_ping, + &t->retry_initiate_ping_locked); + } + return; + } + + pq->inflight_id = t->ping_ctr; + t->ping_ctr++; + GRPC_CLOSURE_LIST_SCHED(&pq->lists[GRPC_CHTTP2_PCL_INITIATE]); + grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT], + &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]); + grpc_slice_buffer_add(&t->outbuf, + grpc_chttp2_ping_create(false, pq->inflight_id)); + GRPC_STATS_INC_HTTP2_PINGS_SENT(); + t->ping_state.last_ping_sent_time = now; + if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d", + t->is_client ? "CLIENT" : "SERVER", t->peer_string, + t->ping_state.pings_before_data_required, + t->ping_policy.max_pings_without_data); + } + t->ping_state.pings_before_data_required -= + (t->ping_state.pings_before_data_required != 0); +} + +static bool update_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + int64_t send_bytes, grpc_chttp2_write_cb** list, + int64_t* ctr, grpc_error* error) { + bool sched_any = false; + grpc_chttp2_write_cb* cb = *list; + *list = nullptr; + *ctr += send_bytes; + while (cb) { + grpc_chttp2_write_cb* next = cb->next; + if (cb->call_at_byte <= *ctr) { + sched_any = true; + finish_write_cb(t, s, cb, GRPC_ERROR_REF(error)); + } else { + add_to_write_list(list, cb); + } + cb = next; + } + GRPC_ERROR_UNREF(error); + return sched_any; +} + +static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s, + const char* staller) { + gpr_log( + GPR_DEBUG, + "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR + ":pending-compressed=%" PRIdPTR ":flowed=%" PRId64 + ":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]", + t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length, + s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed, + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE], + t->flow_control->remote_window(), + static_cast GPR_MAX( + 0, + s->flow_control->remote_window_delta() + + (int64_t)t->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]), + s->flow_control->remote_window_delta()); +} + +static bool stream_ref_if_not_destroyed(gpr_refcount* r) { + gpr_atm count; + do { + count = gpr_atm_acq_load(&r->count); + if (count == 0) return false; + } while (!gpr_atm_rel_cas(&r->count, count, count + 1)); + return true; +} + +/* How many bytes would we like to put on the wire during a single syscall */ +static uint32_t target_write_size(grpc_chttp2_transport* t) { + return 1024 * 1024; +} + +// Returns true if initial_metadata contains only default headers. +static bool is_default_initial_metadata(grpc_metadata_batch* initial_metadata) { + return initial_metadata->list.default_count == initial_metadata->list.count; +} + +namespace { +class StreamWriteContext; + +class WriteContext { + public: + WriteContext(grpc_chttp2_transport* t) : t_(t) { + GRPC_STATS_INC_HTTP2_WRITES_BEGUN(); + GPR_TIMER_SCOPE("grpc_chttp2_begin_write", 0); + } + + // TODO(ctiller): make this the destructor + void FlushStats() { + GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE( + initial_metadata_writes_); + GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(message_writes_); + GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE( + trailing_metadata_writes_); + GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(flow_control_writes_); + } + + void FlushSettings() { + if (t_->dirtied_local_settings && !t_->sent_local_settings) { + grpc_slice_buffer_add( + &t_->outbuf, grpc_chttp2_settings_create( + t_->settings[GRPC_SENT_SETTINGS], + t_->settings[GRPC_LOCAL_SETTINGS], + t_->force_send_settings, GRPC_CHTTP2_NUM_SETTINGS)); + t_->force_send_settings = false; + t_->dirtied_local_settings = false; + t_->sent_local_settings = true; + GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(); + } + } + + void FlushQueuedBuffers() { + /* simple writes are queued to qbuf, and flushed here */ + grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf); + GPR_ASSERT(t_->qbuf.count == 0); + } + + void FlushWindowUpdates() { + uint32_t transport_announce = + t_->flow_control->MaybeSendUpdate(t_->outbuf.count > 0); + if (transport_announce) { + grpc_transport_one_way_stats throwaway_stats; + grpc_slice_buffer_add( + &t_->outbuf, grpc_chttp2_window_update_create(0, transport_announce, + &throwaway_stats)); + ResetPingClock(); + } + } + + void FlushPingAcks() { + for (size_t i = 0; i < t_->ping_ack_count; i++) { + grpc_slice_buffer_add(&t_->outbuf, + grpc_chttp2_ping_create(true, t_->ping_acks[i])); + } + t_->ping_ack_count = 0; + } + + void EnactHpackSettings() { + grpc_chttp2_hpack_compressor_set_max_table_size( + &t_->hpack_compressor, + t_->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]); + } + + void UpdateStreamsNoLongerStalled() { + grpc_chttp2_stream* s; + while (grpc_chttp2_list_pop_stalled_by_transport(t_, &s)) { + if (t_->closed_with_error == GRPC_ERROR_NONE && + grpc_chttp2_list_add_writable_stream(t_, s)) { + if (!stream_ref_if_not_destroyed(&s->refcount->refs)) { + grpc_chttp2_list_remove_writable_stream(t_, s); + } + } + } + } + + grpc_chttp2_stream* NextStream() { + if (t_->outbuf.length > target_write_size(t_)) { + result_.partial = true; + return nullptr; + } + + grpc_chttp2_stream* s; + if (!grpc_chttp2_list_pop_writable_stream(t_, &s)) { + return nullptr; + } + + return s; + } + + void ResetPingClock() { + if (!t_->is_client) { + t_->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST; + t_->ping_recv_state.ping_strikes = 0; + } + t_->ping_state.pings_before_data_required = + t_->ping_policy.max_pings_without_data; + } + + void IncInitialMetadataWrites() { ++initial_metadata_writes_; } + void IncWindowUpdateWrites() { ++flow_control_writes_; } + void IncMessageWrites() { ++message_writes_; } + void IncTrailingMetadataWrites() { ++trailing_metadata_writes_; } + + void NoteScheduledResults() { result_.early_results_scheduled = true; } + + grpc_chttp2_transport* transport() const { return t_; } + + grpc_chttp2_begin_write_result Result() { + result_.writing = t_->outbuf.count > 0; + return result_; + } + + private: + grpc_chttp2_transport* const t_; + + /* stats histogram counters: we increment these throughout this function, + and at the end publish to the central stats histograms */ + int flow_control_writes_ = 0; + int initial_metadata_writes_ = 0; + int trailing_metadata_writes_ = 0; + int message_writes_ = 0; + grpc_chttp2_begin_write_result result_ = {false, false, false}; +}; + +class DataSendContext { + public: + DataSendContext(WriteContext* write_context, grpc_chttp2_transport* t, + grpc_chttp2_stream* s) + : write_context_(write_context), + t_(t), + s_(s), + sending_bytes_before_(s_->sending_bytes) {} + + uint32_t stream_remote_window() const { + return static_cast GPR_MAX( + 0, s_->flow_control->remote_window_delta() + + (int64_t)t_->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]); + } + + uint32_t max_outgoing() const { + return static_cast GPR_MIN( + t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], + GPR_MIN(stream_remote_window(), t_->flow_control->remote_window())); + } + + bool AnyOutgoing() const { return max_outgoing() > 0; } + + void FlushCompressedBytes() { + uint32_t send_bytes = static_cast GPR_MIN( + max_outgoing(), s_->compressed_data_buffer.length); + bool is_last_data_frame = + (send_bytes == s_->compressed_data_buffer.length && + s_->flow_controlled_buffer.length == 0 && + s_->fetching_send_message == nullptr); + if (is_last_data_frame && s_->send_trailing_metadata != nullptr && + s_->stream_compression_ctx != nullptr) { + if (!grpc_stream_compress( + s_->stream_compression_ctx, &s_->flow_controlled_buffer, + &s_->compressed_data_buffer, nullptr, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) { + gpr_log(GPR_ERROR, "Stream compression failed."); + } + grpc_stream_compression_context_destroy(s_->stream_compression_ctx); + s_->stream_compression_ctx = nullptr; + /* After finish, bytes in s->compressed_data_buffer may be + * more than max_outgoing. Start another round of the current + * while loop so that send_bytes and is_last_data_frame are + * recalculated. */ + return; + } + is_last_frame_ = is_last_data_frame && + s_->send_trailing_metadata != nullptr && + grpc_metadata_batch_is_empty(s_->send_trailing_metadata); + grpc_chttp2_encode_data(s_->id, &s_->compressed_data_buffer, send_bytes, + is_last_frame_, &s_->stats.outgoing, &t_->outbuf); + s_->flow_control->SentData(send_bytes); + if (s_->compressed_data_buffer.length == 0) { + s_->sending_bytes += s_->uncompressed_data_size; + } + } + + void CompressMoreBytes() { + if (s_->stream_compression_ctx == nullptr) { + s_->stream_compression_ctx = + grpc_stream_compression_context_create(s_->stream_compression_method); + } + s_->uncompressed_data_size = s_->flow_controlled_buffer.length; + if (!grpc_stream_compress(s_->stream_compression_ctx, + &s_->flow_controlled_buffer, + &s_->compressed_data_buffer, nullptr, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_SYNC)) { + gpr_log(GPR_ERROR, "Stream compression failed."); + } + } + + bool is_last_frame() const { return is_last_frame_; } + + void CallCallbacks() { + if (update_list( + t_, s_, + static_cast(s_->sending_bytes - sending_bytes_before_), + &s_->on_flow_controlled_cbs, &s_->flow_controlled_bytes_flowed, + GRPC_ERROR_NONE)) { + write_context_->NoteScheduledResults(); + } + } + + private: + WriteContext* write_context_; + grpc_chttp2_transport* t_; + grpc_chttp2_stream* s_; + const size_t sending_bytes_before_; + bool is_last_frame_ = false; +}; + +class StreamWriteContext { + public: + StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s) + : write_context_(write_context), t_(write_context->transport()), s_(s) { + GRPC_CHTTP2_IF_TRACING( + gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_, + t_->is_client ? "CLIENT" : "SERVER", s->id, + s->sent_initial_metadata, s->send_initial_metadata != nullptr, + (int)(s->flow_control->local_window_delta() - + s->flow_control->announced_window_delta()))); + } + + void FlushInitialMetadata() { + /* send initial metadata if it's available */ + if (s_->sent_initial_metadata) return; + if (s_->send_initial_metadata == nullptr) return; + + // We skip this on the server side if there is no custom initial + // metadata, there are no messages to send, and we are also sending + // trailing metadata. This results in a Trailers-Only response, + // which is required for retries, as per: + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#when-retries-are-valid + if (!t_->is_client && s_->fetching_send_message == nullptr && + s_->flow_controlled_buffer.length == 0 && + s_->compressed_data_buffer.length == 0 && + s_->send_trailing_metadata != nullptr && + is_default_initial_metadata(s_->send_initial_metadata)) { + ConvertInitialMetadataToTrailingMetadata(); + } else { + grpc_encode_header_options hopt = { + s_->id, // stream_id + false, // is_eof + t_->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != + 0, // use_true_binary_metadata + t_->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], // max_frame_size + &s_->stats.outgoing // stats + }; + grpc_chttp2_encode_header(&t_->hpack_compressor, nullptr, 0, + s_->send_initial_metadata, &hopt, &t_->outbuf); + write_context_->ResetPingClock(); + write_context_->IncInitialMetadataWrites(); + } + + s_->send_initial_metadata = nullptr; + s_->sent_initial_metadata = true; + write_context_->NoteScheduledResults(); + grpc_chttp2_complete_closure_step( + t_, s_, &s_->send_initial_metadata_finished, GRPC_ERROR_NONE, + "send_initial_metadata_finished"); + } + + void FlushWindowUpdates() { + /* send any window updates */ + const uint32_t stream_announce = s_->flow_control->MaybeSendUpdate(); + if (stream_announce == 0) return; + + grpc_slice_buffer_add( + &t_->outbuf, grpc_chttp2_window_update_create(s_->id, stream_announce, + &s_->stats.outgoing)); + write_context_->ResetPingClock(); + write_context_->IncWindowUpdateWrites(); + } + + void FlushData() { + if (!s_->sent_initial_metadata) return; + + if (s_->flow_controlled_buffer.length == 0 && + s_->compressed_data_buffer.length == 0) { + return; // early out: nothing to do + } + + DataSendContext data_send_context(write_context_, t_, s_); + + if (!data_send_context.AnyOutgoing()) { + if (t_->flow_control->remote_window() <= 0) { + report_stall(t_, s_, "transport"); + grpc_chttp2_list_add_stalled_by_transport(t_, s_); + } else if (data_send_context.stream_remote_window() <= 0) { + report_stall(t_, s_, "stream"); + grpc_chttp2_list_add_stalled_by_stream(t_, s_); + } + return; // early out: nothing to do + } + + while ((s_->flow_controlled_buffer.length > 0 || + s_->compressed_data_buffer.length > 0) && + data_send_context.max_outgoing() > 0) { + if (s_->compressed_data_buffer.length > 0) { + data_send_context.FlushCompressedBytes(); + } else { + data_send_context.CompressMoreBytes(); + } + } + write_context_->ResetPingClock(); + if (data_send_context.is_last_frame()) { + SentLastFrame(); + } + data_send_context.CallCallbacks(); + stream_became_writable_ = true; + if (s_->flow_controlled_buffer.length > 0 || + s_->compressed_data_buffer.length > 0) { + GRPC_CHTTP2_STREAM_REF(s_, "chttp2_writing:fork"); + grpc_chttp2_list_add_writable_stream(t_, s_); + } + write_context_->IncMessageWrites(); + } + + void FlushTrailingMetadata() { + if (!s_->sent_initial_metadata) return; + + if (s_->send_trailing_metadata == nullptr) return; + if (s_->fetching_send_message != nullptr) return; + if (s_->flow_controlled_buffer.length != 0) return; + if (s_->compressed_data_buffer.length != 0) return; + + GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata")); + if (grpc_metadata_batch_is_empty(s_->send_trailing_metadata)) { + grpc_chttp2_encode_data(s_->id, &s_->flow_controlled_buffer, 0, true, + &s_->stats.outgoing, &t_->outbuf); + } else { + grpc_encode_header_options hopt = { + s_->id, true, + t_->settings[GRPC_PEER_SETTINGS] + [GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA] != + 0, + + t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE], + &s_->stats.outgoing}; + grpc_chttp2_encode_header(&t_->hpack_compressor, + extra_headers_for_trailing_metadata_, + num_extra_headers_for_trailing_metadata_, + s_->send_trailing_metadata, &hopt, &t_->outbuf); + } + write_context_->IncTrailingMetadataWrites(); + write_context_->ResetPingClock(); + SentLastFrame(); + + write_context_->NoteScheduledResults(); + grpc_chttp2_complete_closure_step( + t_, s_, &s_->send_trailing_metadata_finished, GRPC_ERROR_NONE, + "send_trailing_metadata_finished"); + } + + bool stream_became_writable() { return stream_became_writable_; } + + private: + void ConvertInitialMetadataToTrailingMetadata() { + GRPC_CHTTP2_IF_TRACING( + gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)")); + // When sending Trailers-Only, we need to move the :status and + // content-type headers to the trailers. + if (s_->send_initial_metadata->idx.named.status != nullptr) { + extra_headers_for_trailing_metadata_ + [num_extra_headers_for_trailing_metadata_++] = + &s_->send_initial_metadata->idx.named.status->md; + } + if (s_->send_initial_metadata->idx.named.content_type != nullptr) { + extra_headers_for_trailing_metadata_ + [num_extra_headers_for_trailing_metadata_++] = + &s_->send_initial_metadata->idx.named.content_type->md; + } + } + + void SentLastFrame() { + s_->send_trailing_metadata = nullptr; + s_->sent_trailing_metadata = true; + + if (!t_->is_client && !s_->read_closed) { + grpc_slice_buffer_add( + &t_->outbuf, grpc_chttp2_rst_stream_create( + s_->id, GRPC_HTTP2_NO_ERROR, &s_->stats.outgoing)); + } + grpc_chttp2_mark_stream_closed(t_, s_, !t_->is_client, true, + GRPC_ERROR_NONE); + } + + WriteContext* const write_context_; + grpc_chttp2_transport* const t_; + grpc_chttp2_stream* const s_; + bool stream_became_writable_ = false; + grpc_mdelem* extra_headers_for_trailing_metadata_[2]; + size_t num_extra_headers_for_trailing_metadata_ = 0; +}; +} // namespace + +grpc_chttp2_begin_write_result grpc_chttp2_begin_write( + grpc_chttp2_transport* t) { + WriteContext ctx(t); + ctx.FlushSettings(); + ctx.FlushPingAcks(); + ctx.FlushQueuedBuffers(); + ctx.EnactHpackSettings(); + + if (t->flow_control->remote_window() > 0) { + ctx.UpdateStreamsNoLongerStalled(); + } + + /* for each grpc_chttp2_stream that's become writable, frame it's data + (according to available window sizes) and add to the output buffer */ + while (grpc_chttp2_stream* s = ctx.NextStream()) { + StreamWriteContext stream_ctx(&ctx, s); + stream_ctx.FlushInitialMetadata(); + stream_ctx.FlushWindowUpdates(); + stream_ctx.FlushData(); + stream_ctx.FlushTrailingMetadata(); + + if (stream_ctx.stream_became_writable()) { + if (!grpc_chttp2_list_add_writing_stream(t, s)) { + /* already in writing list: drop ref */ + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:already_writing"); + } else { + /* ref will be dropped at end of write */ + } + } else { + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:no_write"); + } + } + + ctx.FlushWindowUpdates(); + + maybe_initiate_ping(t); + + return ctx.Result(); +} + +void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error) { + GPR_TIMER_SCOPE("grpc_chttp2_end_write", 0); + grpc_chttp2_stream* s; + + while (grpc_chttp2_list_pop_writing_stream(t, &s)) { + if (s->sending_bytes != 0) { + update_list(t, s, static_cast(s->sending_bytes), + &s->on_write_finished_cbs, &s->flow_controlled_bytes_written, + GRPC_ERROR_REF(error)); + s->sending_bytes = 0; + } + GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:end"); + } + grpc_slice_buffer_reset_and_unref_internal(&t->outbuf); + GRPC_ERROR_UNREF(error); +} diff --git a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.c b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.cc similarity index 80% rename from Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.c rename to Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.cc index 6a796a0b1..8e251fa2d 100644 --- a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.c +++ b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_plugin.cc @@ -16,14 +16,13 @@ * */ +#include + #include "src/core/ext/transport/inproc/inproc_transport.h" #include "src/core/lib/debug/trace.h" -grpc_tracer_flag grpc_inproc_trace = GRPC_TRACER_INITIALIZER(false, "inproc"); +grpc_core::TraceFlag grpc_inproc_trace(false, "inproc"); -void grpc_inproc_plugin_init(void) { - grpc_register_tracer(&grpc_inproc_trace); - grpc_inproc_transport_init(); -} +void grpc_inproc_plugin_init(void) { grpc_inproc_transport_init(); } void grpc_inproc_plugin_shutdown(void) { grpc_inproc_transport_shutdown(); } diff --git a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.c b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.c deleted file mode 100644 index 31739d07d..000000000 --- a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.c +++ /dev/null @@ -1,1299 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/ext/transport/inproc/inproc_transport.h" -#include -#include -#include -#include -#include -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/surface/api_trace.h" -#include "src/core/lib/surface/channel.h" -#include "src/core/lib/surface/channel_stack_type.h" -#include "src/core/lib/surface/server.h" -#include "src/core/lib/transport/connectivity_state.h" -#include "src/core/lib/transport/error_utils.h" -#include "src/core/lib/transport/transport_impl.h" - -#define INPROC_LOG(...) \ - do { \ - if (GRPC_TRACER_ON(grpc_inproc_trace)) gpr_log(__VA_ARGS__); \ - } while (0) - -static grpc_slice g_empty_slice; -static grpc_slice g_fake_path_key; -static grpc_slice g_fake_path_value; -static grpc_slice g_fake_auth_key; -static grpc_slice g_fake_auth_value; - -typedef struct { - gpr_mu mu; - gpr_refcount refs; -} shared_mu; - -typedef struct inproc_transport { - grpc_transport base; - shared_mu *mu; - gpr_refcount refs; - bool is_client; - grpc_connectivity_state_tracker connectivity; - void (*accept_stream_cb)(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_transport *transport, const void *server_data); - void *accept_stream_data; - bool is_closed; - struct inproc_transport *other_side; - struct inproc_stream *stream_list; -} inproc_transport; - -typedef struct sb_list_entry { - grpc_slice_buffer sb; - struct sb_list_entry *next; -} sb_list_entry; - -// Specialize grpc_byte_stream for our use case -typedef struct { - grpc_byte_stream base; - sb_list_entry *le; - grpc_error *shutdown_error; -} inproc_slice_byte_stream; - -typedef struct { - // TODO (vjpai): Add some inlined elements to avoid alloc in simple cases - sb_list_entry *head; - sb_list_entry *tail; -} slice_buffer_list; - -static void slice_buffer_list_init(slice_buffer_list *l) { - l->head = NULL; - l->tail = NULL; -} - -static void sb_list_entry_destroy(grpc_exec_ctx *exec_ctx, sb_list_entry *le) { - grpc_slice_buffer_destroy_internal(exec_ctx, &le->sb); - gpr_free(le); -} - -static void slice_buffer_list_destroy(grpc_exec_ctx *exec_ctx, - slice_buffer_list *l) { - sb_list_entry *curr = l->head; - while (curr != NULL) { - sb_list_entry *le = curr; - curr = curr->next; - sb_list_entry_destroy(exec_ctx, le); - } - l->head = NULL; - l->tail = NULL; -} - -static bool slice_buffer_list_empty(slice_buffer_list *l) { - return l->head == NULL; -} - -static void slice_buffer_list_append_entry(slice_buffer_list *l, - sb_list_entry *next) { - next->next = NULL; - if (l->tail) { - l->tail->next = next; - l->tail = next; - } else { - l->head = next; - l->tail = next; - } -} - -static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) { - sb_list_entry *next = (sb_list_entry *)gpr_malloc(sizeof(*next)); - grpc_slice_buffer_init(&next->sb); - slice_buffer_list_append_entry(l, next); - return &next->sb; -} - -static sb_list_entry *slice_buffer_list_pophead(slice_buffer_list *l) { - sb_list_entry *ret = l->head; - l->head = l->head->next; - if (l->head == NULL) { - l->tail = NULL; - } - return ret; -} - -typedef struct inproc_stream { - inproc_transport *t; - grpc_metadata_batch to_read_initial_md; - uint32_t to_read_initial_md_flags; - bool to_read_initial_md_filled; - slice_buffer_list to_read_message; - grpc_metadata_batch to_read_trailing_md; - bool to_read_trailing_md_filled; - bool reads_needed; - bool read_closure_scheduled; - grpc_closure read_closure; - // Write buffer used only during gap at init time when client-side - // stream is set up but server side stream is not yet set up - grpc_metadata_batch write_buffer_initial_md; - bool write_buffer_initial_md_filled; - uint32_t write_buffer_initial_md_flags; - gpr_timespec write_buffer_deadline; - slice_buffer_list write_buffer_message; - grpc_metadata_batch write_buffer_trailing_md; - bool write_buffer_trailing_md_filled; - grpc_error *write_buffer_cancel_error; - - struct inproc_stream *other_side; - bool other_side_closed; // won't talk anymore - bool write_buffer_other_side_closed; // on hold - grpc_stream_refcount *refs; - grpc_closure *closure_at_destroy; - - gpr_arena *arena; - - grpc_transport_stream_op_batch *recv_initial_md_op; - grpc_transport_stream_op_batch *recv_message_op; - grpc_transport_stream_op_batch *recv_trailing_md_op; - - inproc_slice_byte_stream recv_message_stream; - - bool initial_md_sent; - bool trailing_md_sent; - bool initial_md_recvd; - bool trailing_md_recvd; - - bool closed; - - grpc_error *cancel_self_error; - grpc_error *cancel_other_error; - - gpr_timespec deadline; - - bool listed; - struct inproc_stream *stream_list_prev; - struct inproc_stream *stream_list_next; -} inproc_stream; - -static bool inproc_slice_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *bs, size_t max, - grpc_closure *on_complete) { - // Because inproc transport always provides the entire message atomically, - // the byte stream always has data available when this function is called. - // Thus, this function always returns true (unlike other transports) and - // there is never any need to schedule a closure - return true; -} - -static grpc_error *inproc_slice_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *bs, - grpc_slice *slice) { - inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs; - if (stream->shutdown_error != GRPC_ERROR_NONE) { - return GRPC_ERROR_REF(stream->shutdown_error); - } - *slice = grpc_slice_buffer_take_first(&stream->le->sb); - return GRPC_ERROR_NONE; -} - -static void inproc_slice_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *bs, - grpc_error *error) { - inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs; - GRPC_ERROR_UNREF(stream->shutdown_error); - stream->shutdown_error = error; -} - -static void inproc_slice_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *bs) { - inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs; - sb_list_entry_destroy(exec_ctx, stream->le); - GRPC_ERROR_UNREF(stream->shutdown_error); -} - -static const grpc_byte_stream_vtable inproc_slice_byte_stream_vtable = { - inproc_slice_byte_stream_next, inproc_slice_byte_stream_pull, - inproc_slice_byte_stream_shutdown, inproc_slice_byte_stream_destroy}; - -void inproc_slice_byte_stream_init(inproc_slice_byte_stream *s, - sb_list_entry *le) { - s->base.length = (uint32_t)le->sb.length; - s->base.flags = 0; - s->base.vtable = &inproc_slice_byte_stream_vtable; - s->le = le; - s->shutdown_error = GRPC_ERROR_NONE; -} - -static void ref_transport(inproc_transport *t) { - INPROC_LOG(GPR_DEBUG, "ref_transport %p", t); - gpr_ref(&t->refs); -} - -static void really_destroy_transport(grpc_exec_ctx *exec_ctx, - inproc_transport *t) { - INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t); - grpc_connectivity_state_destroy(exec_ctx, &t->connectivity); - if (gpr_unref(&t->mu->refs)) { - gpr_free(t->mu); - } - gpr_free(t); -} - -static void unref_transport(grpc_exec_ctx *exec_ctx, inproc_transport *t) { - INPROC_LOG(GPR_DEBUG, "unref_transport %p", t); - if (gpr_unref(&t->refs)) { - really_destroy_transport(exec_ctx, t); - } -} - -#ifndef NDEBUG -#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason) -#define STREAM_UNREF(e, refs, reason) grpc_stream_unref(e, refs, reason) -#else -#define STREAM_REF(refs, reason) grpc_stream_ref(refs) -#define STREAM_UNREF(e, refs, reason) grpc_stream_unref(e, refs) -#endif - -static void ref_stream(inproc_stream *s, const char *reason) { - INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason); - STREAM_REF(s->refs, reason); -} - -static void unref_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s, - const char *reason) { - INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason); - STREAM_UNREF(exec_ctx, s->refs, reason); -} - -static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) { - INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s); - - slice_buffer_list_destroy(exec_ctx, &s->to_read_message); - slice_buffer_list_destroy(exec_ctx, &s->write_buffer_message); - GRPC_ERROR_UNREF(s->write_buffer_cancel_error); - GRPC_ERROR_UNREF(s->cancel_self_error); - GRPC_ERROR_UNREF(s->cancel_other_error); - - unref_transport(exec_ctx, s->t); - - if (s->closure_at_destroy) { - GRPC_CLOSURE_SCHED(exec_ctx, s->closure_at_destroy, GRPC_ERROR_NONE); - } -} - -static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); - -static void log_metadata(const grpc_metadata_batch *md_batch, bool is_client, - bool is_initial) { - for (grpc_linked_mdelem *md = md_batch->list.head; md != NULL; - md = md->next) { - char *key = grpc_slice_to_c_string(GRPC_MDKEY(md->md)); - char *value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md)); - gpr_log(GPR_INFO, "INPROC:%s:%s: %s: %s", is_initial ? "HDR" : "TRL", - is_client ? "CLI" : "SVR", key, value); - gpr_free(key); - gpr_free(value); - } -} - -static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s, - const grpc_metadata_batch *metadata, - uint32_t flags, grpc_metadata_batch *out_md, - uint32_t *outflags, bool *markfilled) { - if (GRPC_TRACER_ON(grpc_inproc_trace)) { - log_metadata(metadata, s->t->is_client, outflags != NULL); - } - - if (outflags != NULL) { - *outflags = flags; - } - if (markfilled != NULL) { - *markfilled = true; - } - grpc_error *error = GRPC_ERROR_NONE; - for (grpc_linked_mdelem *elem = metadata->list.head; - (elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) { - grpc_linked_mdelem *nelem = - (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem)); - nelem->md = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)), - grpc_slice_intern(GRPC_MDVALUE(elem->md))); - - error = grpc_metadata_batch_link_tail(exec_ctx, out_md, nelem); - } - return error; -} - -static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena) { - INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data); - inproc_transport *t = (inproc_transport *)gt; - inproc_stream *s = (inproc_stream *)gs; - s->arena = arena; - - s->refs = refcount; - // Ref this stream right now - ref_stream(s, "inproc_init_stream:init"); - - grpc_metadata_batch_init(&s->to_read_initial_md); - s->to_read_initial_md_flags = 0; - s->to_read_initial_md_filled = false; - grpc_metadata_batch_init(&s->to_read_trailing_md); - s->to_read_trailing_md_filled = false; - grpc_metadata_batch_init(&s->write_buffer_initial_md); - s->write_buffer_initial_md_flags = 0; - s->write_buffer_initial_md_filled = false; - grpc_metadata_batch_init(&s->write_buffer_trailing_md); - s->write_buffer_trailing_md_filled = false; - slice_buffer_list_init(&s->to_read_message); - slice_buffer_list_init(&s->write_buffer_message); - s->reads_needed = false; - s->read_closure_scheduled = false; - GRPC_CLOSURE_INIT(&s->read_closure, read_state_machine, s, - grpc_schedule_on_exec_ctx); - s->t = t; - s->closure_at_destroy = NULL; - s->other_side_closed = false; - - s->initial_md_sent = s->trailing_md_sent = s->initial_md_recvd = - s->trailing_md_recvd = false; - - s->closed = false; - - s->cancel_self_error = GRPC_ERROR_NONE; - s->cancel_other_error = GRPC_ERROR_NONE; - s->write_buffer_cancel_error = GRPC_ERROR_NONE; - s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - - s->stream_list_prev = NULL; - gpr_mu_lock(&t->mu->mu); - s->listed = true; - ref_stream(s, "inproc_init_stream:list"); - s->stream_list_next = t->stream_list; - if (t->stream_list) { - t->stream_list->stream_list_prev = s; - } - t->stream_list = s; - gpr_mu_unlock(&t->mu->mu); - - if (!server_data) { - ref_transport(t); - inproc_transport *st = t->other_side; - ref_transport(st); - s->other_side = NULL; // will get filled in soon - // Pass the client-side stream address to the server-side for a ref - ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server - // side to avoid destruction - INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p", - st->accept_stream_cb, st->accept_stream_data); - (*st->accept_stream_cb)(exec_ctx, st->accept_stream_data, &st->base, - (void *)s); - } else { - // This is the server-side and is being called through accept_stream_cb - inproc_stream *cs = (inproc_stream *)server_data; - s->other_side = cs; - // Ref the server-side stream on behalf of the client now - ref_stream(s, "inproc_init_stream:srv"); - - // Now we are about to affect the other side, so lock the transport - // to make sure that it doesn't get destroyed - gpr_mu_lock(&s->t->mu->mu); - cs->other_side = s; - // Now transfer from the other side's write_buffer if any to the to_read - // buffer - if (cs->write_buffer_initial_md_filled) { - fill_in_metadata(exec_ctx, s, &cs->write_buffer_initial_md, - cs->write_buffer_initial_md_flags, - &s->to_read_initial_md, &s->to_read_initial_md_flags, - &s->to_read_initial_md_filled); - s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline); - grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md); - cs->write_buffer_initial_md_filled = false; - } - while (!slice_buffer_list_empty(&cs->write_buffer_message)) { - slice_buffer_list_append_entry( - &s->to_read_message, - slice_buffer_list_pophead(&cs->write_buffer_message)); - } - if (cs->write_buffer_trailing_md_filled) { - fill_in_metadata(exec_ctx, s, &cs->write_buffer_trailing_md, 0, - &s->to_read_trailing_md, NULL, - &s->to_read_trailing_md_filled); - grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_trailing_md); - cs->write_buffer_trailing_md_filled = false; - } - if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) { - s->cancel_other_error = cs->write_buffer_cancel_error; - cs->write_buffer_cancel_error = GRPC_ERROR_NONE; - } - - gpr_mu_unlock(&s->t->mu->mu); - } - return 0; // return value is not important -} - -static void close_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s) { - if (!s->closed) { - // Release the metadata that we would have written out - grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_initial_md); - grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_trailing_md); - - if (s->listed) { - inproc_stream *p = s->stream_list_prev; - inproc_stream *n = s->stream_list_next; - if (p != NULL) { - p->stream_list_next = n; - } else { - s->t->stream_list = n; - } - if (n != NULL) { - n->stream_list_prev = p; - } - s->listed = false; - unref_stream(exec_ctx, s, "close_stream:list"); - } - s->closed = true; - unref_stream(exec_ctx, s, "close_stream:closing"); - } -} - -// This function means that we are done talking/listening to the other side -static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s, - const char *reason) { - if (s->other_side != NULL) { - // First release the metadata that came from the other side's arena - grpc_metadata_batch_destroy(exec_ctx, &s->to_read_initial_md); - grpc_metadata_batch_destroy(exec_ctx, &s->to_read_trailing_md); - - unref_stream(exec_ctx, s->other_side, reason); - s->other_side_closed = true; - s->other_side = NULL; - } else if (!s->other_side_closed) { - s->write_buffer_other_side_closed = true; - } -} - -static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s, - grpc_error *error) { - INPROC_LOG(GPR_DEBUG, "read_state_machine %p fail_helper", s); - // If we're failing this side, we need to make sure that - // we also send or have already sent trailing metadata - if (!s->trailing_md_sent) { - // Send trailing md to the other side indicating cancellation - s->trailing_md_sent = true; - - grpc_metadata_batch fake_md; - grpc_metadata_batch_init(&fake_md); - - inproc_stream *other = s->other_side; - grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md - : &other->to_read_trailing_md; - bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled - : &other->to_read_trailing_md_filled; - fill_in_metadata(exec_ctx, s, &fake_md, 0, dest, NULL, destfilled); - grpc_metadata_batch_destroy(exec_ctx, &fake_md); - - if (other != NULL) { - if (other->cancel_other_error == GRPC_ERROR_NONE) { - other->cancel_other_error = GRPC_ERROR_REF(error); - } - if (other->reads_needed) { - if (!other->read_closure_scheduled) { - GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure, - GRPC_ERROR_REF(error)); - other->read_closure_scheduled = true; - } - other->reads_needed = false; - } - } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) { - s->write_buffer_cancel_error = GRPC_ERROR_REF(error); - } - } - if (s->recv_initial_md_op) { - grpc_error *err; - if (!s->t->is_client) { - // If this is a server, provide initial metadata with a path and authority - // since it expects that as well as no error yet - grpc_metadata_batch fake_md; - grpc_metadata_batch_init(&fake_md); - grpc_linked_mdelem *path_md = - (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md)); - path_md->md = - grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value); - GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) == - GRPC_ERROR_NONE); - grpc_linked_mdelem *auth_md = - (grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md)); - auth_md->md = - grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value); - GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) == - GRPC_ERROR_NONE); - - fill_in_metadata( - exec_ctx, s, &fake_md, 0, - s->recv_initial_md_op->payload->recv_initial_metadata - .recv_initial_metadata, - s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, - NULL); - grpc_metadata_batch_destroy(exec_ctx, &fake_md); - err = GRPC_ERROR_NONE; - } else { - err = GRPC_ERROR_REF(error); - } - INPROC_LOG(GPR_DEBUG, - "fail_helper %p scheduling initial-metadata-ready %p %p", s, - error, err); - GRPC_CLOSURE_SCHED(exec_ctx, - s->recv_initial_md_op->payload->recv_initial_metadata - .recv_initial_metadata_ready, - err); - // Last use of err so no need to REF and then UNREF it - - if ((s->recv_initial_md_op != s->recv_message_op) && - (s->recv_initial_md_op != s->recv_trailing_md_op)) { - INPROC_LOG(GPR_DEBUG, - "fail_helper %p scheduling initial-metadata-on-complete %p", - error, s); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete, - GRPC_ERROR_REF(error)); - } - s->recv_initial_md_op = NULL; - } - if (s->recv_message_op) { - INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s, - error); - GRPC_CLOSURE_SCHED( - exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error)); - if (s->recv_message_op != s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-on-complete %p", - s, error); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete, - GRPC_ERROR_REF(error)); - } - s->recv_message_op = NULL; - } - if (s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "fail_helper %p scheduling trailing-md-on-complete %p", s, - error); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete, - GRPC_ERROR_REF(error)); - s->recv_trailing_md_op = NULL; - } - close_other_side_locked(exec_ctx, s, "fail_helper:other_side"); - close_stream_locked(exec_ctx, s); - - GRPC_ERROR_UNREF(error); -} - -static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - // This function gets called when we have contents in the unprocessed reads - // Get what we want based on our ops wanted - // Schedule our appropriate closures - // and then return to reads_needed state if still needed - - // Since this is a closure directly invoked by the combiner, it should not - // unref the error parameter explicitly; the combiner will do that implicitly - grpc_error *new_err = GRPC_ERROR_NONE; - - bool needs_close = false; - - INPROC_LOG(GPR_DEBUG, "read_state_machine %p", arg); - inproc_stream *s = (inproc_stream *)arg; - gpr_mu *mu = &s->t->mu->mu; // keep aside in case s gets closed - gpr_mu_lock(mu); - s->read_closure_scheduled = false; - // cancellation takes precedence - if (s->cancel_self_error != GRPC_ERROR_NONE) { - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error)); - goto done; - } else if (s->cancel_other_error != GRPC_ERROR_NONE) { - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_other_error)); - goto done; - } else if (error != GRPC_ERROR_NONE) { - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(error)); - goto done; - } - - if (s->recv_initial_md_op) { - if (!s->to_read_initial_md_filled) { - // We entered the state machine on some other kind of read even though - // we still haven't satisfied initial md . That's an error. - new_err = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected frame sequencing"); - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling on_complete errors for no " - "initial md %p", - s, new_err); - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err)); - goto done; - } else if (s->initial_md_recvd) { - new_err = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md"); - INPROC_LOG( - GPR_DEBUG, - "read_state_machine %p scheduling on_complete errors for already " - "recvd initial md %p", - s, new_err); - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err)); - goto done; - } - - s->initial_md_recvd = true; - new_err = fill_in_metadata( - exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags, - s->recv_initial_md_op->payload->recv_initial_metadata - .recv_initial_metadata, - s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, NULL); - s->recv_initial_md_op->payload->recv_initial_metadata.recv_initial_metadata - ->deadline = s->deadline; - grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md); - s->to_read_initial_md_filled = false; - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling initial-metadata-ready %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, - s->recv_initial_md_op->payload->recv_initial_metadata - .recv_initial_metadata_ready, - GRPC_ERROR_REF(new_err)); - if ((s->recv_initial_md_op != s->recv_message_op) && - (s->recv_initial_md_op != s->recv_trailing_md_op)) { - INPROC_LOG( - GPR_DEBUG, - "read_state_machine %p scheduling initial-metadata-on-complete %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete, - GRPC_ERROR_REF(new_err)); - } - s->recv_initial_md_op = NULL; - - if (new_err != GRPC_ERROR_NONE) { - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling on_complete errors2 %p", s, - new_err); - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err)); - goto done; - } - } - if (s->to_read_initial_md_filled) { - new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected recv frame"); - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err)); - goto done; - } - if (!slice_buffer_list_empty(&s->to_read_message) && s->recv_message_op) { - inproc_slice_byte_stream_init( - &s->recv_message_stream, - slice_buffer_list_pophead(&s->to_read_message)); - *s->recv_message_op->payload->recv_message.recv_message = - &s->recv_message_stream.base; - INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s); - GRPC_CLOSURE_SCHED( - exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready, - GRPC_ERROR_NONE); - if (s->recv_message_op != s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling message-on-complete %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete, - GRPC_ERROR_REF(new_err)); - } - s->recv_message_op = NULL; - } - if (s->to_read_trailing_md_filled) { - if (s->trailing_md_recvd) { - new_err = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md"); - INPROC_LOG( - GPR_DEBUG, - "read_state_machine %p scheduling on_complete errors for already " - "recvd trailing md %p", - s, new_err); - fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err)); - goto done; - } - if (s->recv_message_op != NULL) { - // This message needs to be wrapped up because it will never be - // satisfied - INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", - s); - GRPC_CLOSURE_SCHED( - exec_ctx, - s->recv_message_op->payload->recv_message.recv_message_ready, - GRPC_ERROR_NONE); - if (s->recv_message_op != s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling message-on-complete %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete, - GRPC_ERROR_REF(new_err)); - } - s->recv_message_op = NULL; - } - if (s->recv_trailing_md_op != NULL) { - // We wanted trailing metadata and we got it - s->trailing_md_recvd = true; - new_err = - fill_in_metadata(exec_ctx, s, &s->to_read_trailing_md, 0, - s->recv_trailing_md_op->payload - ->recv_trailing_metadata.recv_trailing_metadata, - NULL, NULL); - grpc_metadata_batch_clear(exec_ctx, &s->to_read_trailing_md); - s->to_read_trailing_md_filled = false; - - // We should schedule the recv_trailing_md_op completion if - // 1. this stream is the client-side - // 2. this stream is the server-side AND has already sent its trailing md - // (If the server hasn't already sent its trailing md, it doesn't have - // a final status, so don't mark this op complete) - if (s->t->is_client || s->trailing_md_sent) { - INPROC_LOG( - GPR_DEBUG, - "read_state_machine %p scheduling trailing-md-on-complete %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete, - GRPC_ERROR_REF(new_err)); - s->recv_trailing_md_op = NULL; - needs_close = true; - } else { - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p server needs to delay handling " - "trailing-md-on-complete %p", - s, new_err); - } - } else { - INPROC_LOG( - GPR_DEBUG, - "read_state_machine %p has trailing md but not yet waiting for it", - s); - } - } - if (s->trailing_md_recvd && s->recv_message_op) { - // No further message will come on this stream, so finish off the - // recv_message_op - INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s); - GRPC_CLOSURE_SCHED( - exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready, - GRPC_ERROR_NONE); - if (s->recv_message_op != s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "read_state_machine %p scheduling message-on-complete %p", s, - new_err); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete, - GRPC_ERROR_REF(new_err)); - } - s->recv_message_op = NULL; - } - if (s->recv_message_op || s->recv_trailing_md_op) { - // Didn't get the item we wanted so we still need to get - // rescheduled - INPROC_LOG(GPR_DEBUG, "read_state_machine %p still needs closure %p %p", s, - s->recv_message_op, s->recv_trailing_md_op); - s->reads_needed = true; - } -done: - if (needs_close) { - close_other_side_locked(exec_ctx, s, "read_state_machine"); - close_stream_locked(exec_ctx, s); - } - gpr_mu_unlock(mu); - GRPC_ERROR_UNREF(new_err); -} - -static grpc_closure do_nothing_closure; - -static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s, - grpc_error *error) { - bool ret = false; // was the cancel accepted - INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s, - grpc_error_string(error)); - if (s->cancel_self_error == GRPC_ERROR_NONE) { - ret = true; - s->cancel_self_error = GRPC_ERROR_REF(error); - if (s->reads_needed) { - if (!s->read_closure_scheduled) { - GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure, - GRPC_ERROR_REF(s->cancel_self_error)); - s->read_closure_scheduled = true; - } - s->reads_needed = false; - } - // Send trailing md to the other side indicating cancellation, even if we - // already have - s->trailing_md_sent = true; - - grpc_metadata_batch cancel_md; - grpc_metadata_batch_init(&cancel_md); - - inproc_stream *other = s->other_side; - grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md - : &other->to_read_trailing_md; - bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled - : &other->to_read_trailing_md_filled; - fill_in_metadata(exec_ctx, s, &cancel_md, 0, dest, NULL, destfilled); - grpc_metadata_batch_destroy(exec_ctx, &cancel_md); - - if (other != NULL) { - if (other->cancel_other_error == GRPC_ERROR_NONE) { - other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error); - } - if (other->reads_needed) { - if (!other->read_closure_scheduled) { - GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure, - GRPC_ERROR_REF(other->cancel_other_error)); - other->read_closure_scheduled = true; - } - other->reads_needed = false; - } - } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) { - s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error); - } - - // if we are a server and already received trailing md but - // couldn't complete that because we hadn't yet sent out trailing - // md, now's the chance - if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "cancel_stream %p scheduling trailing-md-on-complete %p", s, - s->cancel_self_error); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete, - GRPC_ERROR_REF(s->cancel_self_error)); - s->recv_trailing_md_op = NULL; - } - } - - close_other_side_locked(exec_ctx, s, "cancel_stream:other_side"); - close_stream_locked(exec_ctx, s); - - GRPC_ERROR_UNREF(error); - return ret; -} - -static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, - grpc_transport_stream_op_batch *op) { - INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op); - inproc_stream *s = (inproc_stream *)gs; - gpr_mu *mu = &s->t->mu->mu; // save aside in case s gets closed - gpr_mu_lock(mu); - - if (GRPC_TRACER_ON(grpc_inproc_trace)) { - if (op->send_initial_metadata) { - log_metadata(op->payload->send_initial_metadata.send_initial_metadata, - s->t->is_client, true); - } - if (op->send_trailing_metadata) { - log_metadata(op->payload->send_trailing_metadata.send_trailing_metadata, - s->t->is_client, false); - } - } - grpc_error *error = GRPC_ERROR_NONE; - grpc_closure *on_complete = op->on_complete; - if (on_complete == NULL) { - on_complete = &do_nothing_closure; - } - - if (op->cancel_stream) { - // Call cancel_stream_locked without ref'ing the cancel_error because - // this function is responsible to make sure that that field gets unref'ed - cancel_stream_locked(exec_ctx, s, op->payload->cancel_stream.cancel_error); - // this op can complete without an error - } else if (s->cancel_self_error != GRPC_ERROR_NONE) { - // already self-canceled so still give it an error - error = GRPC_ERROR_REF(s->cancel_self_error); - } else { - INPROC_LOG(GPR_DEBUG, "perform_stream_op %p%s%s%s%s%s%s", s, - op->send_initial_metadata ? " send_initial_metadata" : "", - op->send_message ? " send_message" : "", - op->send_trailing_metadata ? " send_trailing_metadata" : "", - op->recv_initial_metadata ? " recv_initial_metadata" : "", - op->recv_message ? " recv_message" : "", - op->recv_trailing_metadata ? " recv_trailing_metadata" : ""); - } - - bool needs_close = false; - - if (error == GRPC_ERROR_NONE && - (op->send_initial_metadata || op->send_message || - op->send_trailing_metadata)) { - inproc_stream *other = s->other_side; - if (s->t->is_closed) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown"); - } - if (error == GRPC_ERROR_NONE && op->send_initial_metadata) { - grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_initial_md - : &other->to_read_initial_md; - uint32_t *destflags = (other == NULL) ? &s->write_buffer_initial_md_flags - : &other->to_read_initial_md_flags; - bool *destfilled = (other == NULL) ? &s->write_buffer_initial_md_filled - : &other->to_read_initial_md_filled; - if (*destfilled || s->initial_md_sent) { - // The buffer is already in use; that's an error! - INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s); - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata"); - } else { - if (!other->closed) { - fill_in_metadata( - exec_ctx, s, - op->payload->send_initial_metadata.send_initial_metadata, - op->payload->send_initial_metadata.send_initial_metadata_flags, - dest, destflags, destfilled); - } - if (s->t->is_client) { - gpr_timespec *dl = - (other == NULL) ? &s->write_buffer_deadline : &other->deadline; - *dl = gpr_time_min(*dl, op->payload->send_initial_metadata - .send_initial_metadata->deadline); - s->initial_md_sent = true; - } - } - } - if (error == GRPC_ERROR_NONE && op->send_message) { - size_t remaining = op->payload->send_message.send_message->length; - grpc_slice_buffer *dest = slice_buffer_list_append( - (other == NULL) ? &s->write_buffer_message : &other->to_read_message); - do { - grpc_slice message_slice; - grpc_closure unused; - GPR_ASSERT(grpc_byte_stream_next(exec_ctx, - op->payload->send_message.send_message, - SIZE_MAX, &unused)); - error = grpc_byte_stream_pull( - exec_ctx, op->payload->send_message.send_message, &message_slice); - if (error != GRPC_ERROR_NONE) { - cancel_stream_locked(exec_ctx, s, GRPC_ERROR_REF(error)); - break; - } - GPR_ASSERT(error == GRPC_ERROR_NONE); - remaining -= GRPC_SLICE_LENGTH(message_slice); - grpc_slice_buffer_add(dest, message_slice); - } while (remaining != 0); - grpc_byte_stream_destroy(exec_ctx, - op->payload->send_message.send_message); - } - if (error == GRPC_ERROR_NONE && op->send_trailing_metadata) { - grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md - : &other->to_read_trailing_md; - bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled - : &other->to_read_trailing_md_filled; - if (*destfilled || s->trailing_md_sent) { - // The buffer is already in use; that's an error! - INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s); - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata"); - } else { - if (!other->closed) { - fill_in_metadata( - exec_ctx, s, - op->payload->send_trailing_metadata.send_trailing_metadata, 0, - dest, NULL, destfilled); - } - s->trailing_md_sent = true; - if (!s->t->is_client && s->trailing_md_recvd && - s->recv_trailing_md_op) { - INPROC_LOG(GPR_DEBUG, - "perform_stream_op %p scheduling trailing-md-on-complete", - s); - GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete, - GRPC_ERROR_NONE); - s->recv_trailing_md_op = NULL; - needs_close = true; - } - } - } - if (other != NULL && other->reads_needed) { - if (!other->read_closure_scheduled) { - GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure, error); - other->read_closure_scheduled = true; - } - other->reads_needed = false; - } - } - if (error == GRPC_ERROR_NONE && - (op->recv_initial_metadata || op->recv_message || - op->recv_trailing_metadata)) { - // If there are any reads, mark it so that the read closure will react to - // them - if (op->recv_initial_metadata) { - s->recv_initial_md_op = op; - } - if (op->recv_message) { - s->recv_message_op = op; - } - if (op->recv_trailing_metadata) { - s->recv_trailing_md_op = op; - } - - // We want to initiate the closure if: - // 1. There is initial metadata and something ready to take that - // 2. There is a message and something ready to take it - // 3. There is trailing metadata, even if nothing specifically wants - // that because that can shut down the message as well - if ((s->to_read_initial_md_filled && op->recv_initial_metadata) || - ((!slice_buffer_list_empty(&s->to_read_message) || - s->trailing_md_recvd) && - op->recv_message) || - (s->to_read_trailing_md_filled)) { - if (!s->read_closure_scheduled) { - GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure, GRPC_ERROR_NONE); - s->read_closure_scheduled = true; - } - } else { - s->reads_needed = true; - } - } else { - if (error != GRPC_ERROR_NONE) { - // Schedule op's read closures that we didn't push to read state machine - if (op->recv_initial_metadata) { - INPROC_LOG( - GPR_DEBUG, - "perform_stream_op error %p scheduling initial-metadata-ready %p", - s, error); - GRPC_CLOSURE_SCHED( - exec_ctx, - op->payload->recv_initial_metadata.recv_initial_metadata_ready, - GRPC_ERROR_REF(error)); - } - if (op->recv_message) { - INPROC_LOG( - GPR_DEBUG, - "perform_stream_op error %p scheduling recv message-ready %p", s, - error); - GRPC_CLOSURE_SCHED(exec_ctx, - op->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error)); - } - } - INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s, - error); - GRPC_CLOSURE_SCHED(exec_ctx, on_complete, GRPC_ERROR_REF(error)); - } - if (needs_close) { - close_other_side_locked(exec_ctx, s, "perform_stream_op:other_side"); - close_stream_locked(exec_ctx, s); - } - gpr_mu_unlock(mu); - GRPC_ERROR_UNREF(error); -} - -static void close_transport_locked(grpc_exec_ctx *exec_ctx, - inproc_transport *t) { - INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed); - grpc_connectivity_state_set( - exec_ctx, &t->connectivity, GRPC_CHANNEL_SHUTDOWN, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."), - "close transport"); - if (!t->is_closed) { - t->is_closed = true; - /* Also end all streams on this transport */ - while (t->stream_list != NULL) { - // cancel_stream_locked also adjusts stream list - cancel_stream_locked( - exec_ctx, t->stream_list, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); - } - } -} - -static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_transport_op *op) { - inproc_transport *t = (inproc_transport *)gt; - INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op); - gpr_mu_lock(&t->mu->mu); - if (op->on_connectivity_state_change) { - grpc_connectivity_state_notify_on_state_change( - exec_ctx, &t->connectivity, op->connectivity_state, - op->on_connectivity_state_change); - } - if (op->set_accept_stream) { - t->accept_stream_cb = op->set_accept_stream_fn; - t->accept_stream_data = op->set_accept_stream_user_data; - } - if (op->on_consumed) { - GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); - } - - bool do_close = false; - if (op->goaway_error != GRPC_ERROR_NONE) { - do_close = true; - GRPC_ERROR_UNREF(op->goaway_error); - } - if (op->disconnect_with_error != GRPC_ERROR_NONE) { - do_close = true; - GRPC_ERROR_UNREF(op->disconnect_with_error); - } - - if (do_close) { - close_transport_locked(exec_ctx, t); - } - gpr_mu_unlock(&t->mu->mu); -} - -static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, - grpc_closure *then_schedule_closure) { - INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure); - inproc_stream *s = (inproc_stream *)gs; - s->closure_at_destroy = then_schedule_closure; - really_destroy_stream(exec_ctx, s); -} - -static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) { - inproc_transport *t = (inproc_transport *)gt; - INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t); - gpr_mu_lock(&t->mu->mu); - close_transport_locked(exec_ctx, t); - gpr_mu_unlock(&t->mu->mu); - unref_transport(exec_ctx, t->other_side); - unref_transport(exec_ctx, t); -} - -/******************************************************************************* - * INTEGRATION GLUE - */ - -static void set_pollset(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_pollset *pollset) { - // Nothing to do here -} - -static void set_pollset_set(grpc_exec_ctx *exec_ctx, grpc_transport *gt, - grpc_stream *gs, grpc_pollset_set *pollset_set) { - // Nothing to do here -} - -static grpc_endpoint *get_endpoint(grpc_exec_ctx *exec_ctx, grpc_transport *t) { - return NULL; -} - -/******************************************************************************* - * GLOBAL INIT AND DESTROY - */ -static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {} - -void grpc_inproc_transport_init(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL, - grpc_schedule_on_exec_ctx); - g_empty_slice = grpc_slice_from_static_buffer(NULL, 0); - - grpc_slice key_tmp = grpc_slice_from_static_string(":path"); - g_fake_path_key = grpc_slice_intern(key_tmp); - grpc_slice_unref_internal(&exec_ctx, key_tmp); - - g_fake_path_value = grpc_slice_from_static_string("/"); - - grpc_slice auth_tmp = grpc_slice_from_static_string(":authority"); - g_fake_auth_key = grpc_slice_intern(auth_tmp); - grpc_slice_unref_internal(&exec_ctx, auth_tmp); - - g_fake_auth_value = grpc_slice_from_static_string("inproc-fail"); - grpc_exec_ctx_finish(&exec_ctx); -} - -static const grpc_transport_vtable inproc_vtable = { - sizeof(inproc_stream), "inproc", init_stream, - set_pollset, set_pollset_set, perform_stream_op, - perform_transport_op, destroy_stream, destroy_transport, - get_endpoint}; - -/******************************************************************************* - * Main inproc transport functions - */ -static void inproc_transports_create(grpc_exec_ctx *exec_ctx, - grpc_transport **server_transport, - const grpc_channel_args *server_args, - grpc_transport **client_transport, - const grpc_channel_args *client_args) { - INPROC_LOG(GPR_DEBUG, "inproc_transports_create"); - inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st)); - inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct)); - // Share one lock between both sides since both sides get affected - st->mu = ct->mu = (shared_mu *)gpr_malloc(sizeof(*st->mu)); - gpr_mu_init(&st->mu->mu); - gpr_ref_init(&st->mu->refs, 2); - st->base.vtable = &inproc_vtable; - ct->base.vtable = &inproc_vtable; - // Start each side of transport with 2 refs since they each have a ref - // to the other - gpr_ref_init(&st->refs, 2); - gpr_ref_init(&ct->refs, 2); - st->is_client = false; - ct->is_client = true; - grpc_connectivity_state_init(&st->connectivity, GRPC_CHANNEL_READY, - "inproc_server"); - grpc_connectivity_state_init(&ct->connectivity, GRPC_CHANNEL_READY, - "inproc_client"); - st->other_side = ct; - ct->other_side = st; - st->stream_list = NULL; - ct->stream_list = NULL; - *server_transport = (grpc_transport *)st; - *client_transport = (grpc_transport *)ct; -} - -grpc_channel *grpc_inproc_channel_create(grpc_server *server, - grpc_channel_args *args, - void *reserved) { - GRPC_API_TRACE("grpc_inproc_channel_create(server=%p, args=%p)", 2, - (server, args)); - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - const grpc_channel_args *server_args = grpc_server_get_channel_args(server); - - // Add a default authority channel argument for the client - - grpc_arg default_authority_arg; - default_authority_arg.type = GRPC_ARG_STRING; - default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY; - default_authority_arg.value.string = (char *)"inproc.authority"; - grpc_channel_args *client_args = - grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); - - grpc_transport *server_transport; - grpc_transport *client_transport; - inproc_transports_create(&exec_ctx, &server_transport, server_args, - &client_transport, client_args); - - grpc_server_setup_transport(&exec_ctx, server, server_transport, NULL, - server_args); - grpc_channel *channel = - grpc_channel_create(&exec_ctx, "inproc", client_args, - GRPC_CLIENT_DIRECT_CHANNEL, client_transport); - - // Free up created channel args - grpc_channel_args_destroy(&exec_ctx, client_args); - - // Now finish scheduled operations - grpc_exec_ctx_finish(&exec_ctx); - - return channel; -} - -void grpc_inproc_transport_shutdown(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, g_empty_slice); - grpc_slice_unref_internal(&exec_ctx, g_fake_path_key); - grpc_slice_unref_internal(&exec_ctx, g_fake_path_value); - grpc_slice_unref_internal(&exec_ctx, g_fake_auth_key); - grpc_slice_unref_internal(&exec_ctx, g_fake_auth_value); - grpc_exec_ctx_finish(&exec_ctx); -} diff --git a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.cc b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.cc new file mode 100644 index 000000000..2c3bff5c1 --- /dev/null +++ b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.cc @@ -0,0 +1,1238 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include +#include +#include +#include +#include "src/core/ext/transport/inproc/inproc_transport.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/surface/channel_stack_type.h" +#include "src/core/lib/surface/server.h" +#include "src/core/lib/transport/connectivity_state.h" +#include "src/core/lib/transport/error_utils.h" +#include "src/core/lib/transport/transport_impl.h" + +#define INPROC_LOG(...) \ + do { \ + if (grpc_inproc_trace.enabled()) gpr_log(__VA_ARGS__); \ + } while (0) + +static grpc_slice g_empty_slice; +static grpc_slice g_fake_path_key; +static grpc_slice g_fake_path_value; +static grpc_slice g_fake_auth_key; +static grpc_slice g_fake_auth_value; + +typedef struct { + gpr_mu mu; + gpr_refcount refs; +} shared_mu; + +typedef struct inproc_transport { + grpc_transport base; + shared_mu* mu; + gpr_refcount refs; + bool is_client; + grpc_connectivity_state_tracker connectivity; + void (*accept_stream_cb)(void* user_data, grpc_transport* transport, + const void* server_data); + void* accept_stream_data; + bool is_closed; + struct inproc_transport* other_side; + struct inproc_stream* stream_list; +} inproc_transport; + +typedef struct inproc_stream { + inproc_transport* t; + grpc_metadata_batch to_read_initial_md; + uint32_t to_read_initial_md_flags; + bool to_read_initial_md_filled; + grpc_metadata_batch to_read_trailing_md; + bool to_read_trailing_md_filled; + bool ops_needed; + bool op_closure_scheduled; + grpc_closure op_closure; + // Write buffer used only during gap at init time when client-side + // stream is set up but server side stream is not yet set up + grpc_metadata_batch write_buffer_initial_md; + bool write_buffer_initial_md_filled; + uint32_t write_buffer_initial_md_flags; + grpc_millis write_buffer_deadline; + grpc_metadata_batch write_buffer_trailing_md; + bool write_buffer_trailing_md_filled; + grpc_error* write_buffer_cancel_error; + + struct inproc_stream* other_side; + bool other_side_closed; // won't talk anymore + bool write_buffer_other_side_closed; // on hold + grpc_stream_refcount* refs; + grpc_closure* closure_at_destroy; + + gpr_arena* arena; + + grpc_transport_stream_op_batch* send_message_op; + grpc_transport_stream_op_batch* send_trailing_md_op; + grpc_transport_stream_op_batch* recv_initial_md_op; + grpc_transport_stream_op_batch* recv_message_op; + grpc_transport_stream_op_batch* recv_trailing_md_op; + + grpc_slice_buffer recv_message; + grpc_core::ManualConstructor recv_stream; + bool recv_inited; + + bool initial_md_sent; + bool trailing_md_sent; + bool initial_md_recvd; + bool trailing_md_recvd; + + bool closed; + + grpc_error* cancel_self_error; + grpc_error* cancel_other_error; + + grpc_millis deadline; + + bool listed; + struct inproc_stream* stream_list_prev; + struct inproc_stream* stream_list_next; +} inproc_stream; + +static grpc_closure do_nothing_closure; +static bool cancel_stream_locked(inproc_stream* s, grpc_error* error); +static void op_state_machine(void* arg, grpc_error* error); + +static void ref_transport(inproc_transport* t) { + INPROC_LOG(GPR_INFO, "ref_transport %p", t); + gpr_ref(&t->refs); +} + +static void really_destroy_transport(inproc_transport* t) { + INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t); + grpc_connectivity_state_destroy(&t->connectivity); + if (gpr_unref(&t->mu->refs)) { + gpr_free(t->mu); + } + gpr_free(t); +} + +static void unref_transport(inproc_transport* t) { + INPROC_LOG(GPR_INFO, "unref_transport %p", t); + if (gpr_unref(&t->refs)) { + really_destroy_transport(t); + } +} + +#ifndef NDEBUG +#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason) +#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason) +#else +#define STREAM_REF(refs, reason) grpc_stream_ref(refs) +#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs) +#endif + +static void ref_stream(inproc_stream* s, const char* reason) { + INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason); + STREAM_REF(s->refs, reason); +} + +static void unref_stream(inproc_stream* s, const char* reason) { + INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason); + STREAM_UNREF(s->refs, reason); +} + +static void really_destroy_stream(inproc_stream* s) { + INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s); + + GRPC_ERROR_UNREF(s->write_buffer_cancel_error); + GRPC_ERROR_UNREF(s->cancel_self_error); + GRPC_ERROR_UNREF(s->cancel_other_error); + + if (s->recv_inited) { + grpc_slice_buffer_destroy_internal(&s->recv_message); + } + + unref_transport(s->t); + + if (s->closure_at_destroy) { + GRPC_CLOSURE_SCHED(s->closure_at_destroy, GRPC_ERROR_NONE); + } +} + +static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client, + bool is_initial) { + for (grpc_linked_mdelem* md = md_batch->list.head; md != nullptr; + md = md->next) { + char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md)); + char* value = grpc_slice_to_c_string(GRPC_MDVALUE(md->md)); + gpr_log(GPR_INFO, "INPROC:%s:%s: %s: %s", is_initial ? "HDR" : "TRL", + is_client ? "CLI" : "SVR", key, value); + gpr_free(key); + gpr_free(value); + } +} + +static grpc_error* fill_in_metadata(inproc_stream* s, + const grpc_metadata_batch* metadata, + uint32_t flags, grpc_metadata_batch* out_md, + uint32_t* outflags, bool* markfilled) { + if (grpc_inproc_trace.enabled()) { + log_metadata(metadata, s->t->is_client, outflags != nullptr); + } + + if (outflags != nullptr) { + *outflags = flags; + } + if (markfilled != nullptr) { + *markfilled = true; + } + grpc_error* error = GRPC_ERROR_NONE; + for (grpc_linked_mdelem* elem = metadata->list.head; + (elem != nullptr) && (error == GRPC_ERROR_NONE); elem = elem->next) { + grpc_linked_mdelem* nelem = static_cast( + gpr_arena_alloc(s->arena, sizeof(*nelem))); + nelem->md = + grpc_mdelem_from_slices(grpc_slice_intern(GRPC_MDKEY(elem->md)), + grpc_slice_intern(GRPC_MDVALUE(elem->md))); + + error = grpc_metadata_batch_link_tail(out_md, nelem); + } + return error; +} + +static int init_stream(grpc_transport* gt, grpc_stream* gs, + grpc_stream_refcount* refcount, const void* server_data, + gpr_arena* arena) { + INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data); + inproc_transport* t = reinterpret_cast(gt); + inproc_stream* s = reinterpret_cast(gs); + s->arena = arena; + + s->refs = refcount; + // Ref this stream right now + ref_stream(s, "inproc_init_stream:init"); + + grpc_metadata_batch_init(&s->to_read_initial_md); + s->to_read_initial_md_flags = 0; + s->to_read_initial_md_filled = false; + grpc_metadata_batch_init(&s->to_read_trailing_md); + s->to_read_trailing_md_filled = false; + grpc_metadata_batch_init(&s->write_buffer_initial_md); + s->write_buffer_initial_md_flags = 0; + s->write_buffer_initial_md_filled = false; + grpc_metadata_batch_init(&s->write_buffer_trailing_md); + s->write_buffer_trailing_md_filled = false; + s->ops_needed = false; + s->op_closure_scheduled = false; + GRPC_CLOSURE_INIT(&s->op_closure, op_state_machine, s, + grpc_schedule_on_exec_ctx); + s->t = t; + s->closure_at_destroy = nullptr; + s->other_side_closed = false; + + s->initial_md_sent = s->trailing_md_sent = s->initial_md_recvd = + s->trailing_md_recvd = false; + + s->closed = false; + + s->cancel_self_error = GRPC_ERROR_NONE; + s->cancel_other_error = GRPC_ERROR_NONE; + s->write_buffer_cancel_error = GRPC_ERROR_NONE; + s->deadline = GRPC_MILLIS_INF_FUTURE; + s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE; + + s->stream_list_prev = nullptr; + gpr_mu_lock(&t->mu->mu); + s->listed = true; + ref_stream(s, "inproc_init_stream:list"); + s->stream_list_next = t->stream_list; + if (t->stream_list) { + t->stream_list->stream_list_prev = s; + } + t->stream_list = s; + gpr_mu_unlock(&t->mu->mu); + + if (!server_data) { + ref_transport(t); + inproc_transport* st = t->other_side; + ref_transport(st); + s->other_side = nullptr; // will get filled in soon + // Pass the client-side stream address to the server-side for a ref + ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server + // side to avoid destruction + INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb, + st->accept_stream_data); + (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s); + } else { + // This is the server-side and is being called through accept_stream_cb + inproc_stream* cs = (inproc_stream*)server_data; + s->other_side = cs; + // Ref the server-side stream on behalf of the client now + ref_stream(s, "inproc_init_stream:srv"); + + // Now we are about to affect the other side, so lock the transport + // to make sure that it doesn't get destroyed + gpr_mu_lock(&s->t->mu->mu); + cs->other_side = s; + // Now transfer from the other side's write_buffer if any to the to_read + // buffer + if (cs->write_buffer_initial_md_filled) { + fill_in_metadata(s, &cs->write_buffer_initial_md, + cs->write_buffer_initial_md_flags, + &s->to_read_initial_md, &s->to_read_initial_md_flags, + &s->to_read_initial_md_filled); + s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline); + grpc_metadata_batch_clear(&cs->write_buffer_initial_md); + cs->write_buffer_initial_md_filled = false; + } + if (cs->write_buffer_trailing_md_filled) { + fill_in_metadata(s, &cs->write_buffer_trailing_md, 0, + &s->to_read_trailing_md, nullptr, + &s->to_read_trailing_md_filled); + grpc_metadata_batch_clear(&cs->write_buffer_trailing_md); + cs->write_buffer_trailing_md_filled = false; + } + if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) { + s->cancel_other_error = cs->write_buffer_cancel_error; + cs->write_buffer_cancel_error = GRPC_ERROR_NONE; + } + + gpr_mu_unlock(&s->t->mu->mu); + } + return 0; // return value is not important +} + +static void close_stream_locked(inproc_stream* s) { + if (!s->closed) { + // Release the metadata that we would have written out + grpc_metadata_batch_destroy(&s->write_buffer_initial_md); + grpc_metadata_batch_destroy(&s->write_buffer_trailing_md); + + if (s->listed) { + inproc_stream* p = s->stream_list_prev; + inproc_stream* n = s->stream_list_next; + if (p != nullptr) { + p->stream_list_next = n; + } else { + s->t->stream_list = n; + } + if (n != nullptr) { + n->stream_list_prev = p; + } + s->listed = false; + unref_stream(s, "close_stream:list"); + } + s->closed = true; + unref_stream(s, "close_stream:closing"); + } +} + +// This function means that we are done talking/listening to the other side +static void close_other_side_locked(inproc_stream* s, const char* reason) { + if (s->other_side != nullptr) { + // First release the metadata that came from the other side's arena + grpc_metadata_batch_destroy(&s->to_read_initial_md); + grpc_metadata_batch_destroy(&s->to_read_trailing_md); + + unref_stream(s->other_side, reason); + s->other_side_closed = true; + s->other_side = nullptr; + } else if (!s->other_side_closed) { + s->write_buffer_other_side_closed = true; + } +} + +// Call the on_complete closure associated with this stream_op_batch if +// this stream_op_batch is only one of the pending operations for this +// stream. This is called when one of the pending operations for the stream +// is done and about to be NULLed out +static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error, + grpc_transport_stream_op_batch* op, + const char* msg) { + int is_sm = static_cast(op == s->send_message_op); + int is_stm = static_cast(op == s->send_trailing_md_op); + int is_rim = static_cast(op == s->recv_initial_md_op); + int is_rm = static_cast(op == s->recv_message_op); + int is_rtm = static_cast(op == s->recv_trailing_md_op); + + if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) { + INPROC_LOG(GPR_INFO, "%s %p %p %p", msg, s, op, error); + GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error)); + } +} + +static void maybe_schedule_op_closure_locked(inproc_stream* s, + grpc_error* error) { + if (s && s->ops_needed && !s->op_closure_scheduled) { + GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_REF(error)); + s->op_closure_scheduled = true; + s->ops_needed = false; + } +} + +static void fail_helper_locked(inproc_stream* s, grpc_error* error) { + INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s); + // If we're failing this side, we need to make sure that + // we also send or have already sent trailing metadata + if (!s->trailing_md_sent) { + // Send trailing md to the other side indicating cancellation + s->trailing_md_sent = true; + + grpc_metadata_batch fake_md; + grpc_metadata_batch_init(&fake_md); + + inproc_stream* other = s->other_side; + grpc_metadata_batch* dest = (other == nullptr) + ? &s->write_buffer_trailing_md + : &other->to_read_trailing_md; + bool* destfilled = (other == nullptr) ? &s->write_buffer_trailing_md_filled + : &other->to_read_trailing_md_filled; + fill_in_metadata(s, &fake_md, 0, dest, nullptr, destfilled); + grpc_metadata_batch_destroy(&fake_md); + + if (other != nullptr) { + if (other->cancel_other_error == GRPC_ERROR_NONE) { + other->cancel_other_error = GRPC_ERROR_REF(error); + } + maybe_schedule_op_closure_locked(other, error); + } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) { + s->write_buffer_cancel_error = GRPC_ERROR_REF(error); + } + } + if (s->recv_initial_md_op) { + grpc_error* err; + if (!s->t->is_client) { + // If this is a server, provide initial metadata with a path and authority + // since it expects that as well as no error yet + grpc_metadata_batch fake_md; + grpc_metadata_batch_init(&fake_md); + grpc_linked_mdelem* path_md = static_cast( + gpr_arena_alloc(s->arena, sizeof(*path_md))); + path_md->md = grpc_mdelem_from_slices(g_fake_path_key, g_fake_path_value); + GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, path_md) == + GRPC_ERROR_NONE); + grpc_linked_mdelem* auth_md = static_cast( + gpr_arena_alloc(s->arena, sizeof(*auth_md))); + auth_md->md = grpc_mdelem_from_slices(g_fake_auth_key, g_fake_auth_value); + GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, auth_md) == + GRPC_ERROR_NONE); + + fill_in_metadata( + s, &fake_md, 0, + s->recv_initial_md_op->payload->recv_initial_metadata + .recv_initial_metadata, + s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, + nullptr); + grpc_metadata_batch_destroy(&fake_md); + err = GRPC_ERROR_NONE; + } else { + err = GRPC_ERROR_REF(error); + } + if (s->recv_initial_md_op->payload->recv_initial_metadata + .trailing_metadata_available != nullptr) { + // Set to true unconditionally, because we're failing the call, so even + // if we haven't actually seen the send_trailing_metadata op from the + // other side, we're going to return trailing metadata anyway. + *s->recv_initial_md_op->payload->recv_initial_metadata + .trailing_metadata_available = true; + } + INPROC_LOG(GPR_INFO, + "fail_helper %p scheduling initial-metadata-ready %p %p", s, + error, err); + GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata + .recv_initial_metadata_ready, + err); + // Last use of err so no need to REF and then UNREF it + + complete_if_batch_end_locked( + s, error, s->recv_initial_md_op, + "fail_helper scheduling recv-initial-metadata-on-complete"); + s->recv_initial_md_op = nullptr; + } + if (s->recv_message_op) { + INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %p", s, + error); + GRPC_CLOSURE_SCHED( + s->recv_message_op->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error)); + complete_if_batch_end_locked( + s, error, s->recv_message_op, + "fail_helper scheduling recv-message-on-complete"); + s->recv_message_op = nullptr; + } + if (s->send_message_op) { + s->send_message_op->payload->send_message.send_message.reset(); + complete_if_batch_end_locked( + s, error, s->send_message_op, + "fail_helper scheduling send-message-on-complete"); + s->send_message_op = nullptr; + } + if (s->send_trailing_md_op) { + complete_if_batch_end_locked( + s, error, s->send_trailing_md_op, + "fail_helper scheduling send-trailng-md-on-complete"); + s->send_trailing_md_op = nullptr; + } + if (s->recv_trailing_md_op) { + INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p", + s, error); + complete_if_batch_end_locked( + s, error, s->recv_trailing_md_op, + "fail_helper scheduling recv-trailing-metadata-on-complete"); + s->recv_trailing_md_op = nullptr; + } + close_other_side_locked(s, "fail_helper:other_side"); + close_stream_locked(s); + + GRPC_ERROR_UNREF(error); +} + +// TODO(vjpai): It should not be necessary to drain the incoming byte +// stream and create a new one; instead, we should simply pass the byte +// stream from the sender directly to the receiver as-is. +// +// Note that fixing this will also avoid the assumption in this code +// that the incoming byte stream's next() call will always return +// synchronously. That assumption is true today but may not always be +// true in the future. +static void message_transfer_locked(inproc_stream* sender, + inproc_stream* receiver) { + size_t remaining = + sender->send_message_op->payload->send_message.send_message->length(); + if (receiver->recv_inited) { + grpc_slice_buffer_destroy_internal(&receiver->recv_message); + } + grpc_slice_buffer_init(&receiver->recv_message); + receiver->recv_inited = true; + do { + grpc_slice message_slice; + grpc_closure unused; + GPR_ASSERT( + sender->send_message_op->payload->send_message.send_message->Next( + SIZE_MAX, &unused)); + grpc_error* error = + sender->send_message_op->payload->send_message.send_message->Pull( + &message_slice); + if (error != GRPC_ERROR_NONE) { + cancel_stream_locked(sender, GRPC_ERROR_REF(error)); + break; + } + GPR_ASSERT(error == GRPC_ERROR_NONE); + remaining -= GRPC_SLICE_LENGTH(message_slice); + grpc_slice_buffer_add(&receiver->recv_message, message_slice); + } while (remaining > 0); + sender->send_message_op->payload->send_message.send_message.reset(); + + receiver->recv_stream.Init(&receiver->recv_message, 0); + receiver->recv_message_op->payload->recv_message.recv_message->reset( + receiver->recv_stream.get()); + INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready", + receiver); + GRPC_CLOSURE_SCHED( + receiver->recv_message_op->payload->recv_message.recv_message_ready, + GRPC_ERROR_NONE); + complete_if_batch_end_locked( + sender, GRPC_ERROR_NONE, sender->send_message_op, + "message_transfer scheduling sender on_complete"); + complete_if_batch_end_locked( + receiver, GRPC_ERROR_NONE, receiver->recv_message_op, + "message_transfer scheduling receiver on_complete"); + + receiver->recv_message_op = nullptr; + sender->send_message_op = nullptr; +} + +static void op_state_machine(void* arg, grpc_error* error) { + // This function gets called when we have contents in the unprocessed reads + // Get what we want based on our ops wanted + // Schedule our appropriate closures + // and then return to ops_needed state if still needed + + // Since this is a closure directly invoked by the combiner, it should not + // unref the error parameter explicitly; the combiner will do that implicitly + grpc_error* new_err = GRPC_ERROR_NONE; + + bool needs_close = false; + + INPROC_LOG(GPR_INFO, "op_state_machine %p", arg); + inproc_stream* s = static_cast(arg); + gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed + gpr_mu_lock(mu); + s->op_closure_scheduled = false; + // cancellation takes precedence + inproc_stream* other = s->other_side; + + if (s->cancel_self_error != GRPC_ERROR_NONE) { + fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_self_error)); + goto done; + } else if (s->cancel_other_error != GRPC_ERROR_NONE) { + fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_other_error)); + goto done; + } else if (error != GRPC_ERROR_NONE) { + fail_helper_locked(s, GRPC_ERROR_REF(error)); + goto done; + } + + if (s->send_message_op && other) { + if (other->recv_message_op) { + message_transfer_locked(s, other); + maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE); + } else if (!s->t->is_client && + (s->trailing_md_sent || other->recv_trailing_md_op)) { + // A server send will never be matched if the client is waiting + // for trailing metadata already + s->send_message_op->payload->send_message.send_message.reset(); + complete_if_batch_end_locked( + s, GRPC_ERROR_NONE, s->send_message_op, + "op_state_machine scheduling send-message-on-complete"); + s->send_message_op = nullptr; + } + } + // Pause a send trailing metadata if there is still an outstanding + // send message unless we know that the send message will never get + // matched to a receive. This happens on the client if the server has + // already sent status. + if (s->send_trailing_md_op && + (!s->send_message_op || + (s->t->is_client && + (s->trailing_md_recvd || s->to_read_trailing_md_filled)))) { + grpc_metadata_batch* dest = (other == nullptr) + ? &s->write_buffer_trailing_md + : &other->to_read_trailing_md; + bool* destfilled = (other == nullptr) ? &s->write_buffer_trailing_md_filled + : &other->to_read_trailing_md_filled; + if (*destfilled || s->trailing_md_sent) { + // The buffer is already in use; that's an error! + INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s); + new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata"); + fail_helper_locked(s, GRPC_ERROR_REF(new_err)); + goto done; + } else { + if (!other || !other->closed) { + fill_in_metadata(s, + s->send_trailing_md_op->payload->send_trailing_metadata + .send_trailing_metadata, + 0, dest, nullptr, destfilled); + } + s->trailing_md_sent = true; + if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) { + INPROC_LOG(GPR_INFO, + "op_state_machine %p scheduling trailing-md-on-complete", s); + GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete, + GRPC_ERROR_NONE); + s->recv_trailing_md_op = nullptr; + needs_close = true; + } + } + maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE); + complete_if_batch_end_locked( + s, GRPC_ERROR_NONE, s->send_trailing_md_op, + "op_state_machine scheduling send-trailing-metadata-on-complete"); + s->send_trailing_md_op = nullptr; + } + if (s->recv_initial_md_op) { + if (s->initial_md_recvd) { + new_err = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md"); + INPROC_LOG( + GPR_INFO, + "op_state_machine %p scheduling on_complete errors for already " + "recvd initial md %p", + s, new_err); + fail_helper_locked(s, GRPC_ERROR_REF(new_err)); + goto done; + } + + if (s->to_read_initial_md_filled) { + s->initial_md_recvd = true; + new_err = fill_in_metadata( + s, &s->to_read_initial_md, s->to_read_initial_md_flags, + s->recv_initial_md_op->payload->recv_initial_metadata + .recv_initial_metadata, + s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, + nullptr); + s->recv_initial_md_op->payload->recv_initial_metadata + .recv_initial_metadata->deadline = s->deadline; + if (s->recv_initial_md_op->payload->recv_initial_metadata + .trailing_metadata_available != nullptr) { + *s->recv_initial_md_op->payload->recv_initial_metadata + .trailing_metadata_available = + (other != nullptr && other->send_trailing_md_op != nullptr); + } + grpc_metadata_batch_clear(&s->to_read_initial_md); + s->to_read_initial_md_filled = false; + INPROC_LOG(GPR_INFO, + "op_state_machine %p scheduling initial-metadata-ready %p", s, + new_err); + GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata + .recv_initial_metadata_ready, + GRPC_ERROR_REF(new_err)); + complete_if_batch_end_locked( + s, new_err, s->recv_initial_md_op, + "op_state_machine scheduling recv-initial-metadata-on-complete"); + s->recv_initial_md_op = nullptr; + + if (new_err != GRPC_ERROR_NONE) { + INPROC_LOG(GPR_INFO, + "op_state_machine %p scheduling on_complete errors2 %p", s, + new_err); + fail_helper_locked(s, GRPC_ERROR_REF(new_err)); + goto done; + } + } + } + if (s->recv_message_op) { + if (other && other->send_message_op) { + message_transfer_locked(other, s); + maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE); + } + } + if (s->recv_trailing_md_op && s->t->is_client && other && + other->send_message_op) { + maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE); + } + if (s->to_read_trailing_md_filled) { + if (s->trailing_md_recvd) { + new_err = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md"); + INPROC_LOG( + GPR_INFO, + "op_state_machine %p scheduling on_complete errors for already " + "recvd trailing md %p", + s, new_err); + fail_helper_locked(s, GRPC_ERROR_REF(new_err)); + goto done; + } + if (s->recv_message_op != nullptr) { + // This message needs to be wrapped up because it will never be + // satisfied + INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s); + GRPC_CLOSURE_SCHED( + s->recv_message_op->payload->recv_message.recv_message_ready, + GRPC_ERROR_NONE); + complete_if_batch_end_locked( + s, new_err, s->recv_message_op, + "op_state_machine scheduling recv-message-on-complete"); + s->recv_message_op = nullptr; + } + if ((s->trailing_md_sent || s->t->is_client) && s->send_message_op) { + // Nothing further will try to receive from this stream, so finish off + // any outstanding send_message op + s->send_message_op->payload->send_message.send_message.reset(); + complete_if_batch_end_locked( + s, new_err, s->send_message_op, + "op_state_machine scheduling send-message-on-complete"); + s->send_message_op = nullptr; + } + if (s->recv_trailing_md_op != nullptr) { + // We wanted trailing metadata and we got it + s->trailing_md_recvd = true; + new_err = + fill_in_metadata(s, &s->to_read_trailing_md, 0, + s->recv_trailing_md_op->payload + ->recv_trailing_metadata.recv_trailing_metadata, + nullptr, nullptr); + grpc_metadata_batch_clear(&s->to_read_trailing_md); + s->to_read_trailing_md_filled = false; + + // We should schedule the recv_trailing_md_op completion if + // 1. this stream is the client-side + // 2. this stream is the server-side AND has already sent its trailing md + // (If the server hasn't already sent its trailing md, it doesn't have + // a final status, so don't mark this op complete) + if (s->t->is_client || s->trailing_md_sent) { + INPROC_LOG(GPR_INFO, + "op_state_machine %p scheduling trailing-md-on-complete %p", + s, new_err); + GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete, + GRPC_ERROR_REF(new_err)); + s->recv_trailing_md_op = nullptr; + needs_close = true; + } else { + INPROC_LOG(GPR_INFO, + "op_state_machine %p server needs to delay handling " + "trailing-md-on-complete %p", + s, new_err); + } + } else { + INPROC_LOG( + GPR_INFO, + "op_state_machine %p has trailing md but not yet waiting for it", s); + } + } + if (s->trailing_md_recvd && s->recv_message_op) { + // No further message will come on this stream, so finish off the + // recv_message_op + INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s); + GRPC_CLOSURE_SCHED( + s->recv_message_op->payload->recv_message.recv_message_ready, + GRPC_ERROR_NONE); + complete_if_batch_end_locked( + s, new_err, s->recv_message_op, + "op_state_machine scheduling recv-message-on-complete"); + s->recv_message_op = nullptr; + } + if (s->trailing_md_recvd && (s->trailing_md_sent || s->t->is_client) && + s->send_message_op) { + // Nothing further will try to receive from this stream, so finish off + // any outstanding send_message op + s->send_message_op->payload->send_message.send_message.reset(); + complete_if_batch_end_locked( + s, new_err, s->send_message_op, + "op_state_machine scheduling send-message-on-complete"); + s->send_message_op = nullptr; + } + if (s->send_message_op || s->send_trailing_md_op || s->recv_initial_md_op || + s->recv_message_op || s->recv_trailing_md_op) { + // Didn't get the item we wanted so we still need to get + // rescheduled + INPROC_LOG( + GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s, + s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op, + s->recv_message_op, s->recv_trailing_md_op); + s->ops_needed = true; + } +done: + if (needs_close) { + close_other_side_locked(s, "op_state_machine"); + close_stream_locked(s); + } + gpr_mu_unlock(mu); + GRPC_ERROR_UNREF(new_err); +} + +static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) { + bool ret = false; // was the cancel accepted + INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error)); + if (s->cancel_self_error == GRPC_ERROR_NONE) { + ret = true; + s->cancel_self_error = GRPC_ERROR_REF(error); + maybe_schedule_op_closure_locked(s, s->cancel_self_error); + // Send trailing md to the other side indicating cancellation, even if we + // already have + s->trailing_md_sent = true; + + grpc_metadata_batch cancel_md; + grpc_metadata_batch_init(&cancel_md); + + inproc_stream* other = s->other_side; + grpc_metadata_batch* dest = (other == nullptr) + ? &s->write_buffer_trailing_md + : &other->to_read_trailing_md; + bool* destfilled = (other == nullptr) ? &s->write_buffer_trailing_md_filled + : &other->to_read_trailing_md_filled; + fill_in_metadata(s, &cancel_md, 0, dest, nullptr, destfilled); + grpc_metadata_batch_destroy(&cancel_md); + + if (other != nullptr) { + if (other->cancel_other_error == GRPC_ERROR_NONE) { + other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error); + } + maybe_schedule_op_closure_locked(other, other->cancel_other_error); + } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) { + s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error); + } + + // if we are a server and already received trailing md but + // couldn't complete that because we hadn't yet sent out trailing + // md, now's the chance + if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) { + complete_if_batch_end_locked( + s, s->cancel_self_error, s->recv_trailing_md_op, + "cancel_stream scheduling trailing-md-on-complete"); + s->recv_trailing_md_op = nullptr; + } + } + + close_other_side_locked(s, "cancel_stream:other_side"); + close_stream_locked(s); + + GRPC_ERROR_UNREF(error); + return ret; +} + +static void perform_stream_op(grpc_transport* gt, grpc_stream* gs, + grpc_transport_stream_op_batch* op) { + INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op); + inproc_stream* s = reinterpret_cast(gs); + gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed + gpr_mu_lock(mu); + + if (grpc_inproc_trace.enabled()) { + if (op->send_initial_metadata) { + log_metadata(op->payload->send_initial_metadata.send_initial_metadata, + s->t->is_client, true); + } + if (op->send_trailing_metadata) { + log_metadata(op->payload->send_trailing_metadata.send_trailing_metadata, + s->t->is_client, false); + } + } + grpc_error* error = GRPC_ERROR_NONE; + grpc_closure* on_complete = op->on_complete; + if (on_complete == nullptr) { + on_complete = &do_nothing_closure; + } + + if (op->cancel_stream) { + // Call cancel_stream_locked without ref'ing the cancel_error because + // this function is responsible to make sure that that field gets unref'ed + cancel_stream_locked(s, op->payload->cancel_stream.cancel_error); + // this op can complete without an error + } else if (s->cancel_self_error != GRPC_ERROR_NONE) { + // already self-canceled so still give it an error + error = GRPC_ERROR_REF(s->cancel_self_error); + } else { + INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s, + s->t->is_client ? "client" : "server", + op->send_initial_metadata ? " send_initial_metadata" : "", + op->send_message ? " send_message" : "", + op->send_trailing_metadata ? " send_trailing_metadata" : "", + op->recv_initial_metadata ? " recv_initial_metadata" : "", + op->recv_message ? " recv_message" : "", + op->recv_trailing_metadata ? " recv_trailing_metadata" : ""); + } + + bool needs_close = false; + + inproc_stream* other = s->other_side; + if (error == GRPC_ERROR_NONE && + (op->send_initial_metadata || op->send_trailing_metadata)) { + if (s->t->is_closed) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown"); + } + if (error == GRPC_ERROR_NONE && op->send_initial_metadata) { + grpc_metadata_batch* dest = (other == nullptr) + ? &s->write_buffer_initial_md + : &other->to_read_initial_md; + uint32_t* destflags = (other == nullptr) + ? &s->write_buffer_initial_md_flags + : &other->to_read_initial_md_flags; + bool* destfilled = (other == nullptr) ? &s->write_buffer_initial_md_filled + : &other->to_read_initial_md_filled; + if (*destfilled || s->initial_md_sent) { + // The buffer is already in use; that's an error! + INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s); + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata"); + } else { + if (!other || !other->closed) { + fill_in_metadata( + s, op->payload->send_initial_metadata.send_initial_metadata, + op->payload->send_initial_metadata.send_initial_metadata_flags, + dest, destflags, destfilled); + } + if (s->t->is_client) { + grpc_millis* dl = + (other == nullptr) ? &s->write_buffer_deadline : &other->deadline; + *dl = GPR_MIN(*dl, op->payload->send_initial_metadata + .send_initial_metadata->deadline); + s->initial_md_sent = true; + } + } + maybe_schedule_op_closure_locked(other, error); + } + } + + if (error == GRPC_ERROR_NONE && + (op->send_message || op->send_trailing_metadata || + op->recv_initial_metadata || op->recv_message || + op->recv_trailing_metadata)) { + // Mark ops that need to be processed by the closure + if (op->send_message) { + s->send_message_op = op; + } + if (op->send_trailing_metadata) { + s->send_trailing_md_op = op; + } + if (op->recv_initial_metadata) { + s->recv_initial_md_op = op; + } + if (op->recv_message) { + s->recv_message_op = op; + } + if (op->recv_trailing_metadata) { + s->recv_trailing_md_op = op; + } + + // We want to initiate the closure if: + // 1. We want to send a message and the other side wants to receive or end + // 2. We want to send trailing metadata and there isn't an unmatched send + // 3. We want initial metadata and the other side has sent it + // 4. We want to receive a message and there is a message ready + // 5. There is trailing metadata, even if nothing specifically wants + // that because that can shut down the receive message as well + if ((op->send_message && other && + ((other->recv_message_op != nullptr) || + (other->recv_trailing_md_op != nullptr))) || + (op->send_trailing_metadata && !op->send_message) || + (op->recv_initial_metadata && s->to_read_initial_md_filled) || + (op->recv_message && other && (other->send_message_op != nullptr)) || + (s->to_read_trailing_md_filled || s->trailing_md_recvd)) { + if (!s->op_closure_scheduled) { + GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_NONE); + s->op_closure_scheduled = true; + } + } else { + s->ops_needed = true; + } + } else { + if (error != GRPC_ERROR_NONE) { + // Schedule op's closures that we didn't push to op state machine + if (op->recv_initial_metadata) { + if (op->payload->recv_initial_metadata.trailing_metadata_available != + nullptr) { + // Set to true unconditionally, because we're failing the call, so + // even if we haven't actually seen the send_trailing_metadata op + // from the other side, we're going to return trailing metadata + // anyway. + *op->payload->recv_initial_metadata.trailing_metadata_available = + true; + } + INPROC_LOG( + GPR_INFO, + "perform_stream_op error %p scheduling initial-metadata-ready %p", + s, error); + GRPC_CLOSURE_SCHED( + op->payload->recv_initial_metadata.recv_initial_metadata_ready, + GRPC_ERROR_REF(error)); + } + if (op->recv_message) { + INPROC_LOG( + GPR_INFO, + "perform_stream_op error %p scheduling recv message-ready %p", s, + error); + GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error)); + } + } + INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s, + error); + GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error)); + } + if (needs_close) { + close_other_side_locked(s, "perform_stream_op:other_side"); + close_stream_locked(s); + } + gpr_mu_unlock(mu); + GRPC_ERROR_UNREF(error); +} + +static void close_transport_locked(inproc_transport* t) { + INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed); + grpc_connectivity_state_set( + &t->connectivity, GRPC_CHANNEL_SHUTDOWN, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."), + "close transport"); + if (!t->is_closed) { + t->is_closed = true; + /* Also end all streams on this transport */ + while (t->stream_list != nullptr) { + // cancel_stream_locked also adjusts stream list + cancel_stream_locked( + t->stream_list, + grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); + } + } +} + +static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) { + inproc_transport* t = reinterpret_cast(gt); + INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op); + gpr_mu_lock(&t->mu->mu); + if (op->on_connectivity_state_change) { + grpc_connectivity_state_notify_on_state_change( + &t->connectivity, op->connectivity_state, + op->on_connectivity_state_change); + } + if (op->set_accept_stream) { + t->accept_stream_cb = op->set_accept_stream_fn; + t->accept_stream_data = op->set_accept_stream_user_data; + } + if (op->on_consumed) { + GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE); + } + + bool do_close = false; + if (op->goaway_error != GRPC_ERROR_NONE) { + do_close = true; + GRPC_ERROR_UNREF(op->goaway_error); + } + if (op->disconnect_with_error != GRPC_ERROR_NONE) { + do_close = true; + GRPC_ERROR_UNREF(op->disconnect_with_error); + } + + if (do_close) { + close_transport_locked(t); + } + gpr_mu_unlock(&t->mu->mu); +} + +static void destroy_stream(grpc_transport* gt, grpc_stream* gs, + grpc_closure* then_schedule_closure) { + INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure); + inproc_stream* s = reinterpret_cast(gs); + s->closure_at_destroy = then_schedule_closure; + really_destroy_stream(s); +} + +static void destroy_transport(grpc_transport* gt) { + inproc_transport* t = reinterpret_cast(gt); + INPROC_LOG(GPR_INFO, "destroy_transport %p", t); + gpr_mu_lock(&t->mu->mu); + close_transport_locked(t); + gpr_mu_unlock(&t->mu->mu); + unref_transport(t->other_side); + unref_transport(t); +} + +/******************************************************************************* + * INTEGRATION GLUE + */ + +static void set_pollset(grpc_transport* gt, grpc_stream* gs, + grpc_pollset* pollset) { + // Nothing to do here +} + +static void set_pollset_set(grpc_transport* gt, grpc_stream* gs, + grpc_pollset_set* pollset_set) { + // Nothing to do here +} + +static grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; } + +/******************************************************************************* + * GLOBAL INIT AND DESTROY + */ +static void do_nothing(void* arg, grpc_error* error) {} + +void grpc_inproc_transport_init(void) { + grpc_core::ExecCtx exec_ctx; + GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, nullptr, + grpc_schedule_on_exec_ctx); + g_empty_slice = grpc_slice_from_static_buffer(nullptr, 0); + + grpc_slice key_tmp = grpc_slice_from_static_string(":path"); + g_fake_path_key = grpc_slice_intern(key_tmp); + grpc_slice_unref_internal(key_tmp); + + g_fake_path_value = grpc_slice_from_static_string("/"); + + grpc_slice auth_tmp = grpc_slice_from_static_string(":authority"); + g_fake_auth_key = grpc_slice_intern(auth_tmp); + grpc_slice_unref_internal(auth_tmp); + + g_fake_auth_value = grpc_slice_from_static_string("inproc-fail"); +} + +static const grpc_transport_vtable inproc_vtable = { + sizeof(inproc_stream), "inproc", init_stream, + set_pollset, set_pollset_set, perform_stream_op, + perform_transport_op, destroy_stream, destroy_transport, + get_endpoint}; + +/******************************************************************************* + * Main inproc transport functions + */ +static void inproc_transports_create(grpc_transport** server_transport, + const grpc_channel_args* server_args, + grpc_transport** client_transport, + const grpc_channel_args* client_args) { + INPROC_LOG(GPR_INFO, "inproc_transports_create"); + inproc_transport* st = + static_cast(gpr_zalloc(sizeof(*st))); + inproc_transport* ct = + static_cast(gpr_zalloc(sizeof(*ct))); + // Share one lock between both sides since both sides get affected + st->mu = ct->mu = static_cast(gpr_malloc(sizeof(*st->mu))); + gpr_mu_init(&st->mu->mu); + gpr_ref_init(&st->mu->refs, 2); + st->base.vtable = &inproc_vtable; + ct->base.vtable = &inproc_vtable; + // Start each side of transport with 2 refs since they each have a ref + // to the other + gpr_ref_init(&st->refs, 2); + gpr_ref_init(&ct->refs, 2); + st->is_client = false; + ct->is_client = true; + grpc_connectivity_state_init(&st->connectivity, GRPC_CHANNEL_READY, + "inproc_server"); + grpc_connectivity_state_init(&ct->connectivity, GRPC_CHANNEL_READY, + "inproc_client"); + st->other_side = ct; + ct->other_side = st; + st->stream_list = nullptr; + ct->stream_list = nullptr; + *server_transport = reinterpret_cast(st); + *client_transport = reinterpret_cast(ct); +} + +grpc_channel* grpc_inproc_channel_create(grpc_server* server, + grpc_channel_args* args, + void* reserved) { + GRPC_API_TRACE("grpc_inproc_channel_create(server=%p, args=%p)", 2, + (server, args)); + + grpc_core::ExecCtx exec_ctx; + + const grpc_channel_args* server_args = grpc_server_get_channel_args(server); + + // Add a default authority channel argument for the client + + grpc_arg default_authority_arg; + default_authority_arg.type = GRPC_ARG_STRING; + default_authority_arg.key = (char*)GRPC_ARG_DEFAULT_AUTHORITY; + default_authority_arg.value.string = (char*)"inproc.authority"; + grpc_channel_args* client_args = + grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); + + grpc_transport* server_transport; + grpc_transport* client_transport; + inproc_transports_create(&server_transport, server_args, &client_transport, + client_args); + + grpc_server_setup_transport(server, server_transport, nullptr, server_args); + grpc_channel* channel = grpc_channel_create( + "inproc", client_args, GRPC_CLIENT_DIRECT_CHANNEL, client_transport); + + // Free up created channel args + grpc_channel_args_destroy(client_args); + + // Now finish scheduled operations + + return channel; +} + +void grpc_inproc_transport_shutdown(void) { + grpc_core::ExecCtx exec_ctx; + grpc_slice_unref_internal(g_empty_slice); + grpc_slice_unref_internal(g_fake_path_key); + grpc_slice_unref_internal(g_fake_path_value); + grpc_slice_unref_internal(g_fake_auth_key); + grpc_slice_unref_internal(g_fake_auth_value); +} diff --git a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.h b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.h index 37e6d99e9..049d1402a 100644 --- a/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.h +++ b/Sources/CgRPC/src/core/ext/transport/inproc/inproc_transport.h @@ -19,23 +19,17 @@ #ifndef GRPC_CORE_EXT_TRANSPORT_INPROC_INPROC_TRANSPORT_H #define GRPC_CORE_EXT_TRANSPORT_INPROC_INPROC_TRANSPORT_H -#include "src/core/lib/transport/transport_impl.h" +#include -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/lib/transport/transport_impl.h" -grpc_channel *grpc_inproc_channel_create(grpc_server *server, - grpc_channel_args *args, - void *reserved); +grpc_channel* grpc_inproc_channel_create(grpc_server* server, + grpc_channel_args* args, + void* reserved); -extern grpc_tracer_flag grpc_inproc_trace; +extern grpc_core::TraceFlag grpc_inproc_trace; void grpc_inproc_transport_init(void); void grpc_inproc_transport_shutdown(void); -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_EXT_TRANSPORT_INPROC_INPROC_TRANSPORT_H */ diff --git a/Sources/CgRPC/src/core/lib/support/avl.c b/Sources/CgRPC/src/core/lib/avl/avl.cc similarity index 59% rename from Sources/CgRPC/src/core/lib/support/avl.c rename to Sources/CgRPC/src/core/lib/avl/avl.cc index 0e28b24c9..ec106ddb1 100644 --- a/Sources/CgRPC/src/core/lib/support/avl.c +++ b/Sources/CgRPC/src/core/lib/avl/avl.cc @@ -16,32 +16,35 @@ * */ -#include +#include + +#include "src/core/lib/avl/avl.h" #include #include #include #include -#include -gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable) { - gpr_avl out; +#include "src/core/lib/gpr/useful.h" + +grpc_avl grpc_avl_create(const grpc_avl_vtable* vtable) { + grpc_avl out; out.vtable = vtable; - out.root = NULL; + out.root = nullptr; return out; } -static gpr_avl_node *ref_node(gpr_avl_node *node) { +static grpc_avl_node* ref_node(grpc_avl_node* node) { if (node) { gpr_ref(&node->refs); } return node; } -static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node, - void *user_data) { - if (node == NULL) { +static void unref_node(const grpc_avl_vtable* vtable, grpc_avl_node* node, + void* user_data) { + if (node == nullptr) { return; } if (gpr_unref(&node->refs)) { @@ -53,18 +56,19 @@ static void unref_node(const gpr_avl_vtable *vtable, gpr_avl_node *node, } } -static long node_height(gpr_avl_node *node) { - return node == NULL ? 0 : node->height; +static long node_height(grpc_avl_node* node) { + return node == nullptr ? 0 : node->height; } #ifndef NDEBUG -static long calculate_height(gpr_avl_node *node) { - return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left), - calculate_height(node->right)); +static long calculate_height(grpc_avl_node* node) { + return node == nullptr ? 0 + : 1 + GPR_MAX(calculate_height(node->left), + calculate_height(node->right)); } -static gpr_avl_node *assert_invariants(gpr_avl_node *n) { - if (n == NULL) return NULL; +static grpc_avl_node* assert_invariants(grpc_avl_node* n) { + if (n == nullptr) return nullptr; assert_invariants(n->left); assert_invariants(n->right); assert(calculate_height(n) == n->height); @@ -72,12 +76,12 @@ static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; } #else -static gpr_avl_node *assert_invariants(gpr_avl_node *n) { return n; } +static grpc_avl_node* assert_invariants(grpc_avl_node* n) { return n; } #endif -gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left, - gpr_avl_node *right) { - gpr_avl_node *node = (gpr_avl_node *)gpr_malloc(sizeof(*node)); +grpc_avl_node* new_node(void* key, void* value, grpc_avl_node* left, + grpc_avl_node* right) { + grpc_avl_node* node = static_cast(gpr_malloc(sizeof(*node))); gpr_ref_init(&node->refs, 1); node->key = key; node->value = value; @@ -87,12 +91,12 @@ gpr_avl_node *new_node(void *key, void *value, gpr_avl_node *left, return node; } -static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node, - void *key, void *user_data) { +static grpc_avl_node* get(const grpc_avl_vtable* vtable, grpc_avl_node* node, + void* key, void* user_data) { long cmp; - if (node == NULL) { - return NULL; + if (node == nullptr) { + return nullptr; } cmp = vtable->compare_keys(node->key, key, user_data); @@ -105,35 +109,35 @@ static gpr_avl_node *get(const gpr_avl_vtable *vtable, gpr_avl_node *node, } } -void *gpr_avl_get(gpr_avl avl, void *key, void *user_data) { - gpr_avl_node *node = get(avl.vtable, avl.root, key, user_data); - return node ? node->value : NULL; +void* grpc_avl_get(grpc_avl avl, void* key, void* user_data) { + grpc_avl_node* node = get(avl.vtable, avl.root, key, user_data); + return node ? node->value : nullptr; } -int gpr_avl_maybe_get(gpr_avl avl, void *key, void **value, void *user_data) { - gpr_avl_node *node = get(avl.vtable, avl.root, key, user_data); - if (node != NULL) { +int grpc_avl_maybe_get(grpc_avl avl, void* key, void** value, void* user_data) { + grpc_avl_node* node = get(avl.vtable, avl.root, key, user_data); + if (node != nullptr) { *value = node->value; return 1; } return 0; } -static gpr_avl_node *rotate_left(const gpr_avl_vtable *vtable, void *key, - void *value, gpr_avl_node *left, - gpr_avl_node *right, void *user_data) { - gpr_avl_node *n = new_node(vtable->copy_key(right->key, user_data), - vtable->copy_value(right->value, user_data), - new_node(key, value, left, ref_node(right->left)), - ref_node(right->right)); +static grpc_avl_node* rotate_left(const grpc_avl_vtable* vtable, void* key, + void* value, grpc_avl_node* left, + grpc_avl_node* right, void* user_data) { + grpc_avl_node* n = new_node(vtable->copy_key(right->key, user_data), + vtable->copy_value(right->value, user_data), + new_node(key, value, left, ref_node(right->left)), + ref_node(right->right)); unref_node(vtable, right, user_data); return n; } -static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key, - void *value, gpr_avl_node *left, - gpr_avl_node *right, void *user_data) { - gpr_avl_node *n = +static grpc_avl_node* rotate_right(const grpc_avl_vtable* vtable, void* key, + void* value, grpc_avl_node* left, + grpc_avl_node* right, void* user_data) { + grpc_avl_node* n = new_node(vtable->copy_key(left->key, user_data), vtable->copy_value(left->value, user_data), ref_node(left->left), new_node(key, value, ref_node(left->right), right)); @@ -141,11 +145,12 @@ static gpr_avl_node *rotate_right(const gpr_avl_vtable *vtable, void *key, return n; } -static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key, - void *value, gpr_avl_node *left, - gpr_avl_node *right, void *user_data) { +static grpc_avl_node* rotate_left_right(const grpc_avl_vtable* vtable, + void* key, void* value, + grpc_avl_node* left, + grpc_avl_node* right, void* user_data) { /* rotate_right(..., rotate_left(left), right) */ - gpr_avl_node *n = + grpc_avl_node* n = new_node(vtable->copy_key(left->right->key, user_data), vtable->copy_value(left->right->value, user_data), new_node(vtable->copy_key(left->key, user_data), @@ -156,11 +161,12 @@ static gpr_avl_node *rotate_left_right(const gpr_avl_vtable *vtable, void *key, return n; } -static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key, - void *value, gpr_avl_node *left, - gpr_avl_node *right, void *user_data) { +static grpc_avl_node* rotate_right_left(const grpc_avl_vtable* vtable, + void* key, void* value, + grpc_avl_node* left, + grpc_avl_node* right, void* user_data) { /* rotate_left(..., left, rotate_right(right)) */ - gpr_avl_node *n = + grpc_avl_node* n = new_node(vtable->copy_key(right->left->key, user_data), vtable->copy_value(right->left->value, user_data), new_node(key, value, left, ref_node(right->left->left)), @@ -171,9 +177,9 @@ static gpr_avl_node *rotate_right_left(const gpr_avl_vtable *vtable, void *key, return n; } -static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key, - void *value, gpr_avl_node *left, - gpr_avl_node *right, void *user_data) { +static grpc_avl_node* rebalance(const grpc_avl_vtable* vtable, void* key, + void* value, grpc_avl_node* left, + grpc_avl_node* right, void* user_data) { switch (node_height(left) - node_height(right)) { case 2: if (node_height(left->left) - node_height(left->right) == -1) { @@ -196,11 +202,12 @@ static gpr_avl_node *rebalance(const gpr_avl_vtable *vtable, void *key, } } -static gpr_avl_node *add_key(const gpr_avl_vtable *vtable, gpr_avl_node *node, - void *key, void *value, void *user_data) { +static grpc_avl_node* add_key(const grpc_avl_vtable* vtable, + grpc_avl_node* node, void* key, void* value, + void* user_data) { long cmp; - if (node == NULL) { - return new_node(key, value, NULL, NULL); + if (node == nullptr) { + return new_node(key, value, nullptr, nullptr); } cmp = vtable->compare_keys(node->key, key, user_data); if (cmp == 0) { @@ -218,49 +225,49 @@ static gpr_avl_node *add_key(const gpr_avl_vtable *vtable, gpr_avl_node *node, } } -gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value, void *user_data) { - gpr_avl_node *old_root = avl.root; +grpc_avl grpc_avl_add(grpc_avl avl, void* key, void* value, void* user_data) { + grpc_avl_node* old_root = avl.root; avl.root = add_key(avl.vtable, avl.root, key, value, user_data); assert_invariants(avl.root); unref_node(avl.vtable, old_root, user_data); return avl; } -static gpr_avl_node *in_order_head(gpr_avl_node *node) { - while (node->left != NULL) { +static grpc_avl_node* in_order_head(grpc_avl_node* node) { + while (node->left != nullptr) { node = node->left; } return node; } -static gpr_avl_node *in_order_tail(gpr_avl_node *node) { - while (node->right != NULL) { +static grpc_avl_node* in_order_tail(grpc_avl_node* node) { + while (node->right != nullptr) { node = node->right; } return node; } -static gpr_avl_node *remove_key(const gpr_avl_vtable *vtable, - gpr_avl_node *node, void *key, - void *user_data) { +static grpc_avl_node* remove_key(const grpc_avl_vtable* vtable, + grpc_avl_node* node, void* key, + void* user_data) { long cmp; - if (node == NULL) { - return NULL; + if (node == nullptr) { + return nullptr; } cmp = vtable->compare_keys(node->key, key, user_data); if (cmp == 0) { - if (node->left == NULL) { + if (node->left == nullptr) { return ref_node(node->right); - } else if (node->right == NULL) { + } else if (node->right == nullptr) { return ref_node(node->left); } else if (node->left->height < node->right->height) { - gpr_avl_node *h = in_order_head(node->right); + grpc_avl_node* h = in_order_head(node->right); return rebalance( vtable, vtable->copy_key(h->key, user_data), vtable->copy_value(h->value, user_data), ref_node(node->left), remove_key(vtable, node->right, h->key, user_data), user_data); } else { - gpr_avl_node *h = in_order_tail(node->left); + grpc_avl_node* h = in_order_tail(node->left); return rebalance(vtable, vtable->copy_key(h->key, user_data), vtable->copy_value(h->value, user_data), remove_key(vtable, node->left, h->key, user_data), @@ -279,21 +286,21 @@ static gpr_avl_node *remove_key(const gpr_avl_vtable *vtable, } } -gpr_avl gpr_avl_remove(gpr_avl avl, void *key, void *user_data) { - gpr_avl_node *old_root = avl.root; +grpc_avl grpc_avl_remove(grpc_avl avl, void* key, void* user_data) { + grpc_avl_node* old_root = avl.root; avl.root = remove_key(avl.vtable, avl.root, key, user_data); assert_invariants(avl.root); unref_node(avl.vtable, old_root, user_data); return avl; } -gpr_avl gpr_avl_ref(gpr_avl avl, void *user_data) { +grpc_avl grpc_avl_ref(grpc_avl avl, void* user_data) { ref_node(avl.root); return avl; } -void gpr_avl_unref(gpr_avl avl, void *user_data) { +void grpc_avl_unref(grpc_avl avl, void* user_data) { unref_node(avl.vtable, avl.root, user_data); } -int gpr_avl_is_empty(gpr_avl avl) { return avl.root == NULL; } +int grpc_avl_is_empty(grpc_avl avl) { return avl.root == nullptr; } diff --git a/Sources/CgRPC/include/grpc/support/avl.h b/Sources/CgRPC/src/core/lib/avl/avl.h similarity index 59% rename from Sources/CgRPC/include/grpc/support/avl.h rename to Sources/CgRPC/src/core/lib/avl/avl.h index d53ff5d90..15a9d5694 100644 --- a/Sources/CgRPC/include/grpc/support/avl.h +++ b/Sources/CgRPC/src/core/lib/avl/avl.h @@ -16,79 +16,79 @@ * */ -#ifndef GRPC_SUPPORT_AVL_H -#define GRPC_SUPPORT_AVL_H +#ifndef GRPC_CORE_LIB_AVL_AVL_H +#define GRPC_CORE_LIB_AVL_AVL_H + +#include #include /** internal node of an AVL tree */ -typedef struct gpr_avl_node { +typedef struct grpc_avl_node { gpr_refcount refs; - void *key; - void *value; - struct gpr_avl_node *left; - struct gpr_avl_node *right; + void* key; + void* value; + struct grpc_avl_node* left; + struct grpc_avl_node* right; long height; -} gpr_avl_node; +} grpc_avl_node; /** vtable for the AVL tree - * The optional user_data is propagated from the top level gpr_avl_XXX API. + * The optional user_data is propagated from the top level grpc_avl_XXX API. * From the same API call, multiple vtable functions may be called multiple * times. */ -typedef struct gpr_avl_vtable { +typedef struct grpc_avl_vtable { /** destroy a key */ - void (*destroy_key)(void *key, void *user_data); + void (*destroy_key)(void* key, void* user_data); /** copy a key, returning new value */ - void *(*copy_key)(void *key, void *user_data); + void* (*copy_key)(void* key, void* user_data); /** compare key1, key2; return <0 if key1 < key2, >0 if key1 > key2, 0 if key1 == key2 */ - long (*compare_keys)(void *key1, void *key2, void *user_data); + long (*compare_keys)(void* key1, void* key2, void* user_data); /** destroy a value */ - void (*destroy_value)(void *value, void *user_data); + void (*destroy_value)(void* value, void* user_data); /** copy a value */ - void *(*copy_value)(void *value, void *user_data); -} gpr_avl_vtable; + void* (*copy_value)(void* value, void* user_data); +} grpc_avl_vtable; /** "pointer" to an AVL tree - this is a reference - counted object - use gpr_avl_ref to add a reference, - gpr_avl_unref when done with a reference */ -typedef struct gpr_avl { - const gpr_avl_vtable *vtable; - gpr_avl_node *root; -} gpr_avl; + counted object - use grpc_avl_ref to add a reference, + grpc_avl_unref when done with a reference */ +typedef struct grpc_avl { + const grpc_avl_vtable* vtable; + grpc_avl_node* root; +} grpc_avl; /** Create an immutable AVL tree. */ -GPRAPI gpr_avl gpr_avl_create(const gpr_avl_vtable *vtable); +grpc_avl grpc_avl_create(const grpc_avl_vtable* vtable); /** Add a reference to an existing tree - returns the tree as a convenience. The optional user_data will be passed to vtable functions. */ -GPRAPI gpr_avl gpr_avl_ref(gpr_avl avl, void *user_data); +grpc_avl grpc_avl_ref(grpc_avl avl, void* user_data); /** Remove a reference to a tree - destroying it if there are no references left. The optional user_data will be passed to vtable functions. */ -GPRAPI void gpr_avl_unref(gpr_avl avl, void *user_data); +void grpc_avl_unref(grpc_avl avl, void* user_data); /** Return a new tree with (key, value) added to avl. implicitly unrefs avl to allow easy chaining. if key exists in avl, the new tree's key entry updated (i.e. a duplicate is not created). The optional user_data will be passed to vtable functions. */ -GPRAPI gpr_avl gpr_avl_add(gpr_avl avl, void *key, void *value, - void *user_data); +grpc_avl grpc_avl_add(grpc_avl avl, void* key, void* value, void* user_data); /** Return a new tree with key deleted implicitly unrefs avl to allow easy chaining. The optional user_data will be passed to vtable functions. */ -GPRAPI gpr_avl gpr_avl_remove(gpr_avl avl, void *key, void *user_data); +grpc_avl grpc_avl_remove(grpc_avl avl, void* key, void* user_data); /** Lookup key, and return the associated value. Does not mutate avl. Returns NULL if key is not found. The optional user_data will be passed to vtable functions.*/ -GPRAPI void *gpr_avl_get(gpr_avl avl, void *key, void *user_data); +void* grpc_avl_get(grpc_avl avl, void* key, void* user_data); /** Return 1 if avl contains key, 0 otherwise; if it has the key, sets *value to - its value. THe optional user_data will be passed to vtable functions. */ -GPRAPI int gpr_avl_maybe_get(gpr_avl avl, void *key, void **value, - void *user_data); + its value. The optional user_data will be passed to vtable functions. */ +int grpc_avl_maybe_get(grpc_avl avl, void* key, void** value, void* user_data); /** Return 1 if avl is empty, 0 otherwise */ -GPRAPI int gpr_avl_is_empty(gpr_avl avl); +int grpc_avl_is_empty(grpc_avl avl); -#endif /* GRPC_SUPPORT_AVL_H */ +#endif /* GRPC_CORE_LIB_AVL_AVL_H */ diff --git a/Sources/CgRPC/src/core/lib/backoff/backoff.cc b/Sources/CgRPC/src/core/lib/backoff/backoff.cc new file mode 100644 index 000000000..e536abde0 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/backoff/backoff.cc @@ -0,0 +1,78 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/backoff/backoff.h" + +#include + +#include "src/core/lib/gpr/useful.h" + +namespace grpc_core { + +namespace { + +/* Generate a random number between 0 and 1. We roll our own RNG because seeding + * rand() modifies a global variable we have no control over. */ +double generate_uniform_random_number(uint32_t* rng_state) { + constexpr uint32_t two_raise_31 = uint32_t(1) << 31; + *rng_state = (1103515245 * *rng_state + 12345) % two_raise_31; + return *rng_state / static_cast(two_raise_31); +} + +double generate_uniform_random_number_between(uint32_t* rng_state, double a, + double b) { + if (a == b) return a; + if (a > b) GPR_SWAP(double, a, b); // make sure a < b + const double range = b - a; + return a + generate_uniform_random_number(rng_state) * range; +} + +} // namespace + +BackOff::BackOff(const Options& options) + : options_(options), + rng_state_(static_cast(gpr_now(GPR_CLOCK_REALTIME).tv_nsec)) { + Reset(); +} + +grpc_millis BackOff::NextAttemptTime() { + if (initial_) { + initial_ = false; + return current_backoff_ + grpc_core::ExecCtx::Get()->Now(); + } + current_backoff_ = static_cast( + std::min(current_backoff_ * options_.multiplier(), + static_cast(options_.max_backoff()))); + const double jitter = generate_uniform_random_number_between( + &rng_state_, -options_.jitter() * current_backoff_, + options_.jitter() * current_backoff_); + const grpc_millis next_timeout = + static_cast(current_backoff_ + jitter); + return next_timeout + grpc_core::ExecCtx::Get()->Now(); +} + +void BackOff::Reset() { + current_backoff_ = options_.initial_backoff(); + initial_ = true; +} + +void BackOff::SetRandomSeed(uint32_t seed) { rng_state_ = seed; } + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/backoff/backoff.h b/Sources/CgRPC/src/core/lib/backoff/backoff.h new file mode 100644 index 000000000..e769d150e --- /dev/null +++ b/Sources/CgRPC/src/core/lib/backoff/backoff.h @@ -0,0 +1,89 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_BACKOFF_BACKOFF_H +#define GRPC_CORE_LIB_BACKOFF_BACKOFF_H + +#include + +#include "src/core/lib/iomgr/exec_ctx.h" + +namespace grpc_core { + +/// Implementation of the backoff mechanism described in +/// doc/connection-backoff.md +class BackOff { + public: + class Options; + + /// Initialize backoff machinery - does not need to be destroyed + explicit BackOff(const Options& options); + + /// Returns the time at which the next attempt should start. + grpc_millis NextAttemptTime(); + + /// Reset the backoff, so the next value returned by NextAttemptTime() + /// will be the time of the second attempt (rather than the Nth). + void Reset(); + + void SetRandomSeed(unsigned int seed); + + class Options { + public: + Options& set_initial_backoff(grpc_millis initial_backoff) { + initial_backoff_ = initial_backoff; + return *this; + } + Options& set_multiplier(double multiplier) { + multiplier_ = multiplier; + return *this; + } + Options& set_jitter(double jitter) { + jitter_ = jitter; + return *this; + } + Options& set_max_backoff(grpc_millis max_backoff) { + max_backoff_ = max_backoff; + return *this; + } + /// how long to wait after the first failure before retrying + grpc_millis initial_backoff() const { return initial_backoff_; } + /// factor with which to multiply backoff after a failed retry + double multiplier() const { return multiplier_; } + /// amount to randomize backoffs + double jitter() const { return jitter_; } + /// maximum time between retries + grpc_millis max_backoff() const { return max_backoff_; } + + private: + grpc_millis initial_backoff_; + double multiplier_; + double jitter_; + grpc_millis max_backoff_; + }; // class Options + + private: + const Options options_; + uint32_t rng_state_; + bool initial_; + /// current delay before retries + grpc_millis current_backoff_; +}; + +} // namespace grpc_core +#endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/channel_args.c b/Sources/CgRPC/src/core/lib/channel/channel_args.cc similarity index 54% rename from Sources/CgRPC/src/core/lib/channel/channel_args.c rename to Sources/CgRPC/src/core/lib/channel/channel_args.cc index 30248b3c6..e49d532e1 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_args.c +++ b/Sources/CgRPC/src/core/lib/channel/channel_args.cc @@ -26,12 +26,12 @@ #include #include #include -#include #include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" -static grpc_arg copy_arg(const grpc_arg *src) { +static grpc_arg copy_arg(const grpc_arg* src) { grpc_arg dst; dst.type = src->type; dst.key = gpr_strdup(src->key); @@ -51,21 +51,21 @@ static grpc_arg copy_arg(const grpc_arg *src) { return dst; } -grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src, - const grpc_arg *to_add, +grpc_channel_args* grpc_channel_args_copy_and_add(const grpc_channel_args* src, + const grpc_arg* to_add, size_t num_to_add) { - return grpc_channel_args_copy_and_add_and_remove(src, NULL, 0, to_add, + return grpc_channel_args_copy_and_add_and_remove(src, nullptr, 0, to_add, num_to_add); } -grpc_channel_args *grpc_channel_args_copy_and_remove( - const grpc_channel_args *src, const char **to_remove, +grpc_channel_args* grpc_channel_args_copy_and_remove( + const grpc_channel_args* src, const char** to_remove, size_t num_to_remove) { return grpc_channel_args_copy_and_add_and_remove(src, to_remove, - num_to_remove, NULL, 0); + num_to_remove, nullptr, 0); } -static bool should_remove_arg(const grpc_arg *arg, const char **to_remove, +static bool should_remove_arg(const grpc_arg* arg, const char** to_remove, size_t num_to_remove) { for (size_t i = 0; i < num_to_remove; ++i) { if (strcmp(arg->key, to_remove[i]) == 0) return true; @@ -73,12 +73,12 @@ static bool should_remove_arg(const grpc_arg *arg, const char **to_remove, return false; } -grpc_channel_args *grpc_channel_args_copy_and_add_and_remove( - const grpc_channel_args *src, const char **to_remove, size_t num_to_remove, - const grpc_arg *to_add, size_t num_to_add) { +grpc_channel_args* grpc_channel_args_copy_and_add_and_remove( + const grpc_channel_args* src, const char** to_remove, size_t num_to_remove, + const grpc_arg* to_add, size_t num_to_add) { // Figure out how many args we'll be copying. size_t num_args_to_copy = 0; - if (src != NULL) { + if (src != nullptr) { for (size_t i = 0; i < src->num_args; ++i) { if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) { ++num_args_to_copy; @@ -86,17 +86,18 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove( } } // Create result. - grpc_channel_args *dst = - (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args)); + grpc_channel_args* dst = + static_cast(gpr_malloc(sizeof(grpc_channel_args))); dst->num_args = num_args_to_copy + num_to_add; if (dst->num_args == 0) { - dst->args = NULL; + dst->args = nullptr; return dst; } - dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args); + dst->args = + static_cast(gpr_malloc(sizeof(grpc_arg) * dst->num_args)); // Copy args from src that are not being removed. size_t dst_idx = 0; - if (src != NULL) { + if (src != nullptr) { for (size_t i = 0; i < src->num_args; ++i) { if (!should_remove_arg(&src->args[i], to_remove, num_to_remove)) { dst->args[dst_idx++] = copy_arg(&src->args[i]); @@ -111,30 +112,31 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove( return dst; } -grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) { - return grpc_channel_args_copy_and_add(src, NULL, 0); +grpc_channel_args* grpc_channel_args_copy(const grpc_channel_args* src) { + return grpc_channel_args_copy_and_add(src, nullptr, 0); } -grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a, - const grpc_channel_args *b) { +grpc_channel_args* grpc_channel_args_union(const grpc_channel_args* a, + const grpc_channel_args* b) { const size_t max_out = (a->num_args + b->num_args); - grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out); + grpc_arg* uniques = + static_cast(gpr_malloc(sizeof(*uniques) * max_out)); for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i]; size_t uniques_idx = a->num_args; for (size_t i = 0; i < b->num_args; ++i) { - const char *b_key = b->args[i].key; - if (grpc_channel_args_find(a, b_key) == NULL) { // not found + const char* b_key = b->args[i].key; + if (grpc_channel_args_find(a, b_key) == nullptr) { // not found uniques[uniques_idx++] = b->args[i]; } } - grpc_channel_args *result = - grpc_channel_args_copy_and_add(NULL, uniques, uniques_idx); + grpc_channel_args* result = + grpc_channel_args_copy_and_add(nullptr, uniques, uniques_idx); gpr_free(uniques); return result; } -static int cmp_arg(const grpc_arg *a, const grpc_arg *b) { +static int cmp_arg(const grpc_arg* a, const grpc_arg* b) { int c = GPR_ICMP(a->type, b->type); if (c != 0) return c; c = strcmp(a->key, b->key); @@ -160,26 +162,27 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) { /* stabilizing comparison function: since channel_args ordering matters for * keys with the same name, we need to preserve that ordering */ -static int cmp_key_stable(const void *ap, const void *bp) { - const grpc_arg *const *a = (const grpc_arg *const *)ap; - const grpc_arg *const *b = (const grpc_arg *const *)bp; +static int cmp_key_stable(const void* ap, const void* bp) { + const grpc_arg* const* a = static_cast(ap); + const grpc_arg* const* b = static_cast(bp); int c = strcmp((*a)->key, (*b)->key); if (c == 0) c = GPR_ICMP(*a, *b); return c; } -grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) { - grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args); +grpc_channel_args* grpc_channel_args_normalize(const grpc_channel_args* a) { + grpc_arg** args = + static_cast(gpr_malloc(sizeof(grpc_arg*) * a->num_args)); for (size_t i = 0; i < a->num_args; i++) { args[i] = &a->args[i]; } if (a->num_args > 1) - qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable); + qsort(args, a->num_args, sizeof(grpc_arg*), cmp_key_stable); - grpc_channel_args *b = - (grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args)); + grpc_channel_args* b = + static_cast(gpr_malloc(sizeof(grpc_channel_args))); b->num_args = a->num_args; - b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args); + b->args = static_cast(gpr_malloc(sizeof(grpc_arg) * b->num_args)); for (size_t i = 0; i < a->num_args; i++) { b->args[i] = copy_arg(args[i]); } @@ -188,7 +191,7 @@ grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) { return b; } -void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) { +void grpc_channel_args_destroy(grpc_channel_args* a) { size_t i; if (!a) return; for (i = 0; i < a->num_args; i++) { @@ -199,8 +202,7 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) { case GRPC_ARG_INTEGER: break; case GRPC_ARG_POINTER: - a->args[i].value.pointer.vtable->destroy(exec_ctx, - a->args[i].value.pointer.p); + a->args[i].value.pointer.vtable->destroy(a->args[i].value.pointer.p); break; } gpr_free(a->args[i].key); @@ -210,50 +212,25 @@ void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a) { } grpc_compression_algorithm grpc_channel_args_get_compression_algorithm( - const grpc_channel_args *a) { + const grpc_channel_args* a) { size_t i; - if (a == NULL) return GRPC_COMPRESS_NONE; + if (a == nullptr) return GRPC_COMPRESS_NONE; for (i = 0; i < a->num_args; ++i) { if (a->args[i].type == GRPC_ARG_INTEGER && !strcmp(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, a->args[i].key)) { - return (grpc_compression_algorithm)a->args[i].value.integer; + return static_cast(a->args[i].value.integer); break; } } return GRPC_COMPRESS_NONE; } -grpc_stream_compression_algorithm -grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a) { - size_t i; - if (a == NULL) return GRPC_STREAM_COMPRESS_NONE; - for (i = 0; i < a->num_args; ++i) { - if (a->args[i].type == GRPC_ARG_INTEGER && - !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, - a->args[i].key)) { - return (grpc_stream_compression_algorithm)a->args[i].value.integer; - break; - } - } - return GRPC_STREAM_COMPRESS_NONE; -} - -grpc_channel_args *grpc_channel_args_set_compression_algorithm( - grpc_channel_args *a, grpc_compression_algorithm algorithm) { +grpc_channel_args* grpc_channel_args_set_compression_algorithm( + grpc_channel_args* a, grpc_compression_algorithm algorithm) { GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT); grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; - tmp.value.integer = algorithm; - return grpc_channel_args_copy_and_add(a, &tmp, 1); -} - -grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( - grpc_channel_args *a, grpc_stream_compression_algorithm algorithm) { - GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT); - grpc_arg tmp; - tmp.type = GRPC_ARG_INTEGER; - tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; + tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; tmp.value.integer = algorithm; return grpc_channel_args_copy_and_add(a, &tmp, 1); } @@ -261,9 +238,9 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( /** Returns 1 if the argument for compression algorithm's enabled states bitset * was found in \a a, returning the arg's value in \a states. Otherwise, returns * 0. */ -static int find_compression_algorithm_states_bitset(const grpc_channel_args *a, - int **states_arg) { - if (a != NULL) { +static int find_compression_algorithm_states_bitset(const grpc_channel_args* a, + int** states_arg) { + if (a != nullptr) { size_t i; for (i = 0; i < a->num_args; ++i) { if (a->args[i].type == GRPC_ARG_INTEGER && @@ -278,37 +255,16 @@ static int find_compression_algorithm_states_bitset(const grpc_channel_args *a, return 0; /* GPR_FALSE */ } -/** Returns 1 if the argument for compression algorithm's enabled states bitset - * was found in \a a, returning the arg's value in \a states. Otherwise, returns - * 0. */ -static int find_stream_compression_algorithm_states_bitset( - const grpc_channel_args *a, int **states_arg) { - if (a != NULL) { - size_t i; - for (i = 0; i < a->num_args; ++i) { - if (a->args[i].type == GRPC_ARG_INTEGER && - !strcmp(GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET, - a->args[i].key)) { - *states_arg = &a->args[i].value.integer; - **states_arg |= 0x1; /* forcefully enable support for no compression */ - return 1; - } - } - } - return 0; /* GPR_FALSE */ -} - -grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_compression_algorithm algorithm, int state) { - int *states_arg = NULL; - grpc_channel_args *result = *a; +grpc_channel_args* grpc_channel_args_compression_algorithm_set_state( + grpc_channel_args** a, grpc_compression_algorithm algorithm, int state) { + int* states_arg = nullptr; + grpc_channel_args* result = *a; const int states_arg_found = find_compression_algorithm_states_bitset(*a, &states_arg); if (grpc_channel_args_get_compression_algorithm(*a) == algorithm && state == 0) { - const char *algo_name = NULL; + const char* algo_name = nullptr; GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0); gpr_log(GPR_ERROR, "Tried to disable default compression algorithm '%s'. The " @@ -316,100 +272,47 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( algo_name); } else if (states_arg_found) { if (state != 0) { - GPR_BITSET((unsigned *)states_arg, algorithm); + GPR_BITSET((unsigned*)states_arg, algorithm); } else if (algorithm != GRPC_COMPRESS_NONE) { - GPR_BITCLEAR((unsigned *)states_arg, algorithm); + GPR_BITCLEAR((unsigned*)states_arg, algorithm); } } else { /* create a new arg */ grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; + tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; /* all enabled by default */ tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; if (state != 0) { - GPR_BITSET((unsigned *)&tmp.value.integer, algorithm); + GPR_BITSET((unsigned*)&tmp.value.integer, algorithm); } else if (algorithm != GRPC_COMPRESS_NONE) { - GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm); - } - result = grpc_channel_args_copy_and_add(*a, &tmp, 1); - grpc_channel_args_destroy(exec_ctx, *a); - *a = result; - } - return result; -} - -grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_stream_compression_algorithm algorithm, int state) { - int *states_arg = NULL; - grpc_channel_args *result = *a; - const int states_arg_found = - find_stream_compression_algorithm_states_bitset(*a, &states_arg); - - if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm && - state == 0) { - const char *algo_name = NULL; - GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) != - 0); - gpr_log(GPR_ERROR, - "Tried to disable default stream compression algorithm '%s'. The " - "operation has been ignored.", - algo_name); - } else if (states_arg_found) { - if (state != 0) { - GPR_BITSET((unsigned *)states_arg, algorithm); - } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) { - GPR_BITCLEAR((unsigned *)states_arg, algorithm); - } - } else { - /* create a new arg */ - grpc_arg tmp; - tmp.type = GRPC_ARG_INTEGER; - tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; - /* all enabled by default */ - tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1; - if (state != 0) { - GPR_BITSET((unsigned *)&tmp.value.integer, algorithm); - } else if (algorithm != GRPC_STREAM_COMPRESS_NONE) { - GPR_BITCLEAR((unsigned *)&tmp.value.integer, algorithm); + GPR_BITCLEAR((unsigned*)&tmp.value.integer, algorithm); } result = grpc_channel_args_copy_and_add(*a, &tmp, 1); - grpc_channel_args_destroy(exec_ctx, *a); + grpc_channel_args_destroy(*a); *a = result; } return result; } uint32_t grpc_channel_args_compression_algorithm_get_states( - const grpc_channel_args *a) { - int *states_arg; + const grpc_channel_args* a) { + int* states_arg; if (find_compression_algorithm_states_bitset(a, &states_arg)) { - return (uint32_t)*states_arg; + return static_cast(*states_arg); } else { return (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; /* All algs. enabled */ } } -uint32_t grpc_channel_args_stream_compression_algorithm_get_states( - const grpc_channel_args *a) { - int *states_arg; - if (find_stream_compression_algorithm_states_bitset(a, &states_arg)) { - return (uint32_t)*states_arg; - } else { - return (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - - 1; /* All algs. enabled */ - } -} - -grpc_channel_args *grpc_channel_args_set_socket_mutator( - grpc_channel_args *a, grpc_socket_mutator *mutator) { +grpc_channel_args* grpc_channel_args_set_socket_mutator( + grpc_channel_args* a, grpc_socket_mutator* mutator) { grpc_arg tmp = grpc_socket_mutator_to_arg(mutator); return grpc_channel_args_copy_and_add(a, &tmp, 1); } -int grpc_channel_args_compare(const grpc_channel_args *a, - const grpc_channel_args *b) { +int grpc_channel_args_compare(const grpc_channel_args* a, + const grpc_channel_args* b) { int c = GPR_ICMP(a->num_args, b->num_args); if (c != 0) return c; for (size_t i = 0; i < a->num_args; i++) { @@ -419,21 +322,21 @@ int grpc_channel_args_compare(const grpc_channel_args *a, return 0; } -const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args, - const char *name) { - if (args != NULL) { +const grpc_arg* grpc_channel_args_find(const grpc_channel_args* args, + const char* name) { + if (args != nullptr) { for (size_t i = 0; i < args->num_args; ++i) { if (strcmp(args->args[i].key, name) == 0) { return &args->args[i]; } } } - return NULL; + return nullptr; } -int grpc_channel_arg_get_integer(const grpc_arg *arg, +int grpc_channel_arg_get_integer(const grpc_arg* arg, const grpc_integer_options options) { - if (arg == NULL) return options.default_value; + if (arg == nullptr) return options.default_value; if (arg->type != GRPC_ARG_INTEGER) { gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key); return options.default_value; @@ -451,8 +354,17 @@ int grpc_channel_arg_get_integer(const grpc_arg *arg, return arg->value.integer; } -bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value) { - if (arg == NULL) return default_value; +char* grpc_channel_arg_get_string(const grpc_arg* arg) { + if (arg == nullptr) return nullptr; + if (arg->type != GRPC_ARG_STRING) { + gpr_log(GPR_ERROR, "%s ignored: it must be an string", arg->key); + return nullptr; + } + return arg->value.string; +} + +bool grpc_channel_arg_get_bool(const grpc_arg* arg, bool default_value) { + if (arg == nullptr) return default_value; if (arg->type != GRPC_ARG_INTEGER) { gpr_log(GPR_ERROR, "%s ignored: it must be an integer", arg->key); return default_value; @@ -469,12 +381,12 @@ bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value) { } } -bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args) { +bool grpc_channel_args_want_minimal_stack(const grpc_channel_args* args) { return grpc_channel_arg_get_bool( grpc_channel_args_find(args, GRPC_ARG_MINIMAL_STACK), false); } -grpc_arg grpc_channel_arg_string_create(char *name, char *value) { +grpc_arg grpc_channel_arg_string_create(char* name, char* value) { grpc_arg arg; arg.type = GRPC_ARG_STRING; arg.key = name; @@ -482,7 +394,7 @@ grpc_arg grpc_channel_arg_string_create(char *name, char *value) { return arg; } -grpc_arg grpc_channel_arg_integer_create(char *name, int value) { +grpc_arg grpc_channel_arg_integer_create(char* name, int value) { grpc_arg arg; arg.type = GRPC_ARG_INTEGER; arg.key = name; @@ -491,7 +403,7 @@ grpc_arg grpc_channel_arg_integer_create(char *name, int value) { } grpc_arg grpc_channel_arg_pointer_create( - char *name, void *value, const grpc_arg_pointer_vtable *vtable) { + char* name, void* value, const grpc_arg_pointer_vtable* vtable) { grpc_arg arg; arg.type = GRPC_ARG_POINTER; arg.key = name; @@ -499,3 +411,31 @@ grpc_arg grpc_channel_arg_pointer_create( arg.value.pointer.vtable = vtable; return arg; } + +char* grpc_channel_args_string(const grpc_channel_args* args) { + if (args == nullptr) return nullptr; + gpr_strvec v; + gpr_strvec_init(&v); + for (size_t i = 0; i < args->num_args; ++i) { + const grpc_arg& arg = args->args[i]; + char* s; + switch (arg.type) { + case GRPC_ARG_INTEGER: + gpr_asprintf(&s, "%s=%d", arg.key, arg.value.integer); + break; + case GRPC_ARG_STRING: + gpr_asprintf(&s, "%s=%s", arg.key, arg.value.string); + break; + case GRPC_ARG_POINTER: + gpr_asprintf(&s, "%s=%p", arg.key, arg.value.pointer.p); + break; + default: + gpr_asprintf(&s, "arg with unknown type"); + } + gpr_strvec_add(&v, s); + } + char* result = + gpr_strjoin_sep(const_cast(v.strs), v.count, ", ", nullptr); + gpr_strvec_destroy(&v); + return result; +} diff --git a/Sources/CgRPC/src/core/lib/channel/channel_args.h b/Sources/CgRPC/src/core/lib/channel/channel_args.h index 0599e189c..5ff303a9d 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_args.h +++ b/Sources/CgRPC/src/core/lib/channel/channel_args.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H #define GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H +#include + #include #include #include "src/core/lib/iomgr/socket_mutator.h" @@ -26,56 +28,44 @@ // Channel args are intentionally immutable, to avoid the need for locking. /** Copy the arguments in \a src into a new instance */ -grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src); +grpc_channel_args* grpc_channel_args_copy(const grpc_channel_args* src); /** Copy the arguments in \a src into a new instance, stably sorting keys */ -grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *src); +grpc_channel_args* grpc_channel_args_normalize(const grpc_channel_args* src); /** Copy the arguments in \a src and append \a to_add. If \a to_add is NULL, it * is equivalent to calling \a grpc_channel_args_copy. */ -grpc_channel_args *grpc_channel_args_copy_and_add(const grpc_channel_args *src, - const grpc_arg *to_add, +grpc_channel_args* grpc_channel_args_copy_and_add(const grpc_channel_args* src, + const grpc_arg* to_add, size_t num_to_add); /** Copies the arguments in \a src except for those whose keys are in \a to_remove. */ -grpc_channel_args *grpc_channel_args_copy_and_remove( - const grpc_channel_args *src, const char **to_remove, size_t num_to_remove); +grpc_channel_args* grpc_channel_args_copy_and_remove( + const grpc_channel_args* src, const char** to_remove, size_t num_to_remove); /** Copies the arguments from \a src except for those whose keys are in \a to_remove and appends the arguments in \a to_add. */ -grpc_channel_args *grpc_channel_args_copy_and_add_and_remove( - const grpc_channel_args *src, const char **to_remove, size_t num_to_remove, - const grpc_arg *to_add, size_t num_to_add); +grpc_channel_args* grpc_channel_args_copy_and_add_and_remove( + const grpc_channel_args* src, const char** to_remove, size_t num_to_remove, + const grpc_arg* to_add, size_t num_to_add); /** Perform the union of \a a and \a b, prioritizing \a a entries */ -grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a, - const grpc_channel_args *b); +grpc_channel_args* grpc_channel_args_union(const grpc_channel_args* a, + const grpc_channel_args* b); /** Destroy arguments created by \a grpc_channel_args_copy */ -void grpc_channel_args_destroy(grpc_exec_ctx *exec_ctx, grpc_channel_args *a); +void grpc_channel_args_destroy(grpc_channel_args* a); /** Returns the compression algorithm set in \a a. */ grpc_compression_algorithm grpc_channel_args_get_compression_algorithm( - const grpc_channel_args *a); - -/** Returns the stream compression algorithm set in \a a. */ -grpc_stream_compression_algorithm -grpc_channel_args_get_stream_compression_algorithm(const grpc_channel_args *a); + const grpc_channel_args* a); /** Returns a channel arg instance with compression enabled. If \a a is * non-NULL, its args are copied. N.B. GRPC_COMPRESS_NONE disables compression * for the channel. */ -grpc_channel_args *grpc_channel_args_set_compression_algorithm( - grpc_channel_args *a, grpc_compression_algorithm algorithm); - -/** Returns a channel arg instance with stream compression enabled. If \a a is - * non-NULL, its args are copied. N.B. GRPC_STREAM_COMPRESS_NONE disables - * stream compression for the channel. If a value other than - * GRPC_STREAM_COMPRESS_NONE is set, it takes precedence over message-wise - * compression algorithms. */ -grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( - grpc_channel_args *a, grpc_stream_compression_algorithm algorithm); +grpc_channel_args* grpc_channel_args_set_compression_algorithm( + grpc_channel_args* a, grpc_compression_algorithm algorithm); /** Sets the support for the given compression algorithm. By default, all * compression algorithms are enabled. It's an error to disable an algorithm set @@ -84,20 +74,8 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( * Returns an instance with the updated algorithm states. The \a a pointer is * modified to point to the returned instance (which may be different from the * input value of \a a). */ -grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_compression_algorithm algorithm, int enabled); - -/** Sets the support for the given stream compression algorithm. By default, all - * stream compression algorithms are enabled. It's an error to disable an - * algorithm set by grpc_channel_args_set_stream_compression_algorithm. - * - * Returns an instance with the updated algorithm states. The \a a pointer is - * modified to point to the returned instance (which may be different from the - * input value of \a a). */ -grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( - grpc_exec_ctx *exec_ctx, grpc_channel_args **a, - grpc_stream_compression_algorithm algorithm, int enabled); +grpc_channel_args* grpc_channel_args_compression_algorithm_set_state( + grpc_channel_args** a, grpc_compression_algorithm algorithm, int enabled); /** Returns the bitset representing the support state (true for enabled, false * for disabled) for compression algorithms. @@ -105,31 +83,23 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( * The i-th bit of the returned bitset corresponds to the i-th entry in the * grpc_compression_algorithm enum. */ uint32_t grpc_channel_args_compression_algorithm_get_states( - const grpc_channel_args *a); - -/** Returns the bitset representing the support state (true for enabled, false - * for disabled) for stream compression algorithms. - * - * The i-th bit of the returned bitset corresponds to the i-th entry in the - * grpc_stream_compression_algorithm enum. */ -uint32_t grpc_channel_args_stream_compression_algorithm_get_states( - const grpc_channel_args *a); + const grpc_channel_args* a); -int grpc_channel_args_compare(const grpc_channel_args *a, - const grpc_channel_args *b); +int grpc_channel_args_compare(const grpc_channel_args* a, + const grpc_channel_args* b); /** Returns a channel arg instance with socket mutator added. The socket mutator * will perform its mutate_fd method on all file descriptors used by the * channel. * If \a a is non-MULL, its args are copied. */ -grpc_channel_args *grpc_channel_args_set_socket_mutator( - grpc_channel_args *a, grpc_socket_mutator *mutator); +grpc_channel_args* grpc_channel_args_set_socket_mutator( + grpc_channel_args* a, grpc_socket_mutator* mutator); /** Returns the value of argument \a name from \a args, or NULL if not found. */ -const grpc_arg *grpc_channel_args_find(const grpc_channel_args *args, - const char *name); +const grpc_arg* grpc_channel_args_find(const grpc_channel_args* args, + const char* name); -bool grpc_channel_args_want_minimal_stack(const grpc_channel_args *args); +bool grpc_channel_args_want_minimal_stack(const grpc_channel_args* args); typedef struct grpc_integer_options { int default_value; // Return this if value is outside of expected bounds. @@ -138,15 +108,24 @@ typedef struct grpc_integer_options { } grpc_integer_options; /** Returns the value of \a arg, subject to the contraints in \a options. */ -int grpc_channel_arg_get_integer(const grpc_arg *arg, +int grpc_channel_arg_get_integer(const grpc_arg* arg, const grpc_integer_options options); -bool grpc_channel_arg_get_bool(const grpc_arg *arg, bool default_value); +/** Returns the value of \a arg if \a arg is of type GRPC_ARG_STRING. + Otherwise, emits a warning log, and returns nullptr. + If arg is nullptr, returns nullptr, and does not emit a warning. */ +char* grpc_channel_arg_get_string(const grpc_arg* arg); + +bool grpc_channel_arg_get_bool(const grpc_arg* arg, bool default_value); // Helpers for creating channel args. -grpc_arg grpc_channel_arg_string_create(char *name, char *value); -grpc_arg grpc_channel_arg_integer_create(char *name, int value); -grpc_arg grpc_channel_arg_pointer_create(char *name, void *value, - const grpc_arg_pointer_vtable *vtable); +grpc_arg grpc_channel_arg_string_create(char* name, char* value); +grpc_arg grpc_channel_arg_integer_create(char* name, int value); +grpc_arg grpc_channel_arg_pointer_create(char* name, void* value, + const grpc_arg_pointer_vtable* vtable); + +// Returns a string representing channel args in human-readable form. +// Callers takes ownership of result. +char* grpc_channel_args_string(const grpc_channel_args* args); #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/channel_stack.c b/Sources/CgRPC/src/core/lib/channel/channel_stack.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/channel/channel_stack.c rename to Sources/CgRPC/src/core/lib/channel/channel_stack.cc index 775c8bc66..a9459b150 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_stack.c +++ b/Sources/CgRPC/src/core/lib/channel/channel_stack.cc @@ -16,14 +16,16 @@ * */ -#include "src/core/lib/channel/channel_stack.h" +#include + #include #include +#include "src/core/lib/channel/channel_stack.h" #include #include -grpc_tracer_flag grpc_trace_channel = GRPC_TRACER_INITIALIZER(false, "channel"); +grpc_core::TraceFlag grpc_trace_channel(false, "channel"); /* Memory layouts. @@ -45,7 +47,7 @@ grpc_tracer_flag grpc_trace_channel = GRPC_TRACER_INITIALIZER(false, "channel"); #define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) -size_t grpc_channel_stack_size(const grpc_channel_filter **filters, +size_t grpc_channel_stack_size(const grpc_channel_filter** filters, size_t filter_count) { /* always need the header, and size for the channel elements */ size_t size = @@ -64,40 +66,40 @@ size_t grpc_channel_stack_size(const grpc_channel_filter **filters, return size; } -#define CHANNEL_ELEMS_FROM_STACK(stk) \ - ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \ - sizeof(grpc_channel_stack)))) +#define CHANNEL_ELEMS_FROM_STACK(stk) \ + ((grpc_channel_element*)((char*)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \ + sizeof(grpc_channel_stack)))) -#define CALL_ELEMS_FROM_STACK(stk) \ - ((grpc_call_element *)((char *)(stk) + \ - ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)))) +#define CALL_ELEMS_FROM_STACK(stk) \ + ((grpc_call_element*)((char*)(stk) + \ + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)))) -grpc_channel_element *grpc_channel_stack_element( - grpc_channel_stack *channel_stack, size_t index) { +grpc_channel_element* grpc_channel_stack_element( + grpc_channel_stack* channel_stack, size_t index) { return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index; } -grpc_channel_element *grpc_channel_stack_last_element( - grpc_channel_stack *channel_stack) { +grpc_channel_element* grpc_channel_stack_last_element( + grpc_channel_stack* channel_stack) { return grpc_channel_stack_element(channel_stack, channel_stack->count - 1); } -grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack, +grpc_call_element* grpc_call_stack_element(grpc_call_stack* call_stack, size_t index) { return CALL_ELEMS_FROM_STACK(call_stack) + index; } -grpc_error *grpc_channel_stack_init( - grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, - const grpc_channel_args *channel_args, grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack) { +grpc_error* grpc_channel_stack_init( + int initial_refs, grpc_iomgr_cb_func destroy, void* destroy_arg, + const grpc_channel_filter** filters, size_t filter_count, + const grpc_channel_args* channel_args, grpc_transport* optional_transport, + const char* name, grpc_channel_stack* stack) { size_t call_size = ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) + ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element)); - grpc_channel_element *elems; + grpc_channel_element* elems; grpc_channel_element_args args; - char *user_data; + char* user_data; size_t i; stack->count = filter_count; @@ -105,11 +107,11 @@ grpc_error *grpc_channel_stack_init( name); elems = CHANNEL_ELEMS_FROM_STACK(stack); user_data = - ((char *)elems) + + (reinterpret_cast(elems)) + ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element)); /* init per-filter data */ - grpc_error *first_error = GRPC_ERROR_NONE; + grpc_error* first_error = GRPC_ERROR_NONE; for (i = 0; i < filter_count; i++) { args.channel_stack = stack; args.channel_args = channel_args; @@ -118,8 +120,7 @@ grpc_error *grpc_channel_stack_init( args.is_last = i == (filter_count - 1); elems[i].filter = filters[i]; elems[i].channel_data = user_data; - grpc_error *error = - elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); + grpc_error* error = elems[i].filter->init_channel_elem(&elems[i], &args); if (error != GRPC_ERROR_NONE) { if (first_error == GRPC_ERROR_NONE) { first_error = error; @@ -131,52 +132,50 @@ grpc_error *grpc_channel_stack_init( call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data); } - GPR_ASSERT(user_data > (char *)stack); - GPR_ASSERT((uintptr_t)(user_data - (char *)stack) == + GPR_ASSERT(user_data > (char*)stack); + GPR_ASSERT((uintptr_t)(user_data - (char*)stack) == grpc_channel_stack_size(filters, filter_count)); stack->call_stack_size = call_size; return first_error; } -void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *stack) { - grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack); +void grpc_channel_stack_destroy(grpc_channel_stack* stack) { + grpc_channel_element* channel_elems = CHANNEL_ELEMS_FROM_STACK(stack); size_t count = stack->count; size_t i; /* destroy per-filter data */ for (i = 0; i < count; i++) { - channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]); + channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]); } } -grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *channel_stack, +grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, - const grpc_call_element_args *elem_args) { - grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); + void* destroy_arg, + const grpc_call_element_args* elem_args) { + grpc_channel_element* channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); size_t count = channel_stack->count; - grpc_call_element *call_elems; - char *user_data; + grpc_call_element* call_elems; + char* user_data; size_t i; elem_args->call_stack->count = count; GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy, destroy_arg, "CALL_STACK"); call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack); - user_data = ((char *)call_elems) + + user_data = (reinterpret_cast(call_elems)) + ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element)); /* init per-filter data */ - grpc_error *first_error = GRPC_ERROR_NONE; + grpc_error* first_error = GRPC_ERROR_NONE; for (i = 0; i < count; i++) { call_elems[i].filter = channel_elems[i].filter; call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].call_data = user_data; - grpc_error *error = call_elems[i].filter->init_call_elem( - exec_ctx, &call_elems[i], elem_args); + grpc_error* error = + call_elems[i].filter->init_call_elem(&call_elems[i], elem_args); if (error != GRPC_ERROR_NONE) { if (first_error == GRPC_ERROR_NONE) { first_error = error; @@ -190,73 +189,70 @@ grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, return first_error; } -void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_stack *call_stack, - grpc_polling_entity *pollent) { +void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack* call_stack, + grpc_polling_entity* pollent) { size_t count = call_stack->count; - grpc_call_element *call_elems; - char *user_data; + grpc_call_element* call_elems; + char* user_data; size_t i; call_elems = CALL_ELEMS_FROM_STACK(call_stack); - user_data = ((char *)call_elems) + + user_data = (reinterpret_cast(call_elems)) + ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element)); /* init per-filter data */ for (i = 0; i < count; i++) { - call_elems[i].filter->set_pollset_or_pollset_set(exec_ctx, &call_elems[i], - pollent); + call_elems[i].filter->set_pollset_or_pollset_set(&call_elems[i], pollent); user_data += ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data); } } void grpc_call_stack_ignore_set_pollset_or_pollset_set( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_polling_entity *pollent) {} + grpc_call_element* elem, grpc_polling_entity* pollent) {} -void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure) { - grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack); +void grpc_call_stack_destroy(grpc_call_stack* stack, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure) { + grpc_call_element* elems = CALL_ELEMS_FROM_STACK(stack); size_t count = stack->count; size_t i; /* destroy per-filter data */ for (i = 0; i < count; i++) { elems[i].filter->destroy_call_elem( - exec_ctx, &elems[i], final_info, - i == count - 1 ? then_schedule_closure : NULL); + &elems[i], final_info, + i == count - 1 ? then_schedule_closure : nullptr); } } -void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - grpc_call_element *next_elem = elem + 1; +void grpc_call_next_op(grpc_call_element* elem, + grpc_transport_stream_op_batch* op) { + grpc_call_element* next_elem = elem + 1; GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op); - next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op); + next_elem->filter->start_transport_stream_op_batch(next_elem, op); } -void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - const grpc_channel_info *channel_info) { - grpc_channel_element *next_elem = elem + 1; - next_elem->filter->get_channel_info(exec_ctx, next_elem, channel_info); +void grpc_channel_next_get_info(grpc_channel_element* elem, + const grpc_channel_info* channel_info) { + grpc_channel_element* next_elem = elem + 1; + next_elem->filter->get_channel_info(next_elem, channel_info); } -void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_transport_op *op) { - grpc_channel_element *next_elem = elem + 1; - next_elem->filter->start_transport_op(exec_ctx, next_elem, op); +void grpc_channel_next_op(grpc_channel_element* elem, grpc_transport_op* op) { + grpc_channel_element* next_elem = elem + 1; + next_elem->filter->start_transport_op(next_elem, op); } -grpc_channel_stack *grpc_channel_stack_from_top_element( - grpc_channel_element *elem) { - return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( - sizeof(grpc_channel_stack))); +grpc_channel_stack* grpc_channel_stack_from_top_element( + grpc_channel_element* elem) { + return reinterpret_cast( + reinterpret_cast(elem) - + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack))); } -grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) { - return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( - sizeof(grpc_call_stack))); +grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) { + return reinterpret_cast( + reinterpret_cast(elem) - + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack))); } diff --git a/Sources/CgRPC/src/core/lib/channel/channel_stack.h b/Sources/CgRPC/src/core/lib/channel/channel_stack.h index f0de80f0c..4bf821866 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_stack.h +++ b/Sources/CgRPC/src/core/lib/channel/channel_stack.h @@ -33,6 +33,8 @@ Call stacks are created by channel stacks and represent the per-call data for that stack. */ +#include + #include #include @@ -40,15 +42,11 @@ #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/arena.h" #include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/polling_entity.h" -#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/transport.h" -#ifdef __cplusplus -extern "C" { -#endif - typedef struct grpc_channel_element grpc_channel_element; typedef struct grpc_call_element grpc_call_element; @@ -56,23 +54,23 @@ typedef struct grpc_channel_stack grpc_channel_stack; typedef struct grpc_call_stack grpc_call_stack; typedef struct { - grpc_channel_stack *channel_stack; - const grpc_channel_args *channel_args; + grpc_channel_stack* channel_stack; + const grpc_channel_args* channel_args; /** Transport, iff it is known */ - grpc_transport *optional_transport; + grpc_transport* optional_transport; int is_first; int is_last; } grpc_channel_element_args; typedef struct { - grpc_call_stack *call_stack; - const void *server_transport_data; - grpc_call_context_element *context; + grpc_call_stack* call_stack; + const void* server_transport_data; + grpc_call_context_element* context; grpc_slice path; gpr_timespec start_time; - gpr_timespec deadline; - gpr_arena *arena; - grpc_call_combiner *call_combiner; + grpc_millis deadline; + gpr_arena* arena; + grpc_call_combiner* call_combiner; } grpc_call_element_args; typedef struct { @@ -84,6 +82,7 @@ typedef struct { typedef struct { grpc_call_stats stats; grpc_status_code final_status; + const char** error_string; } grpc_call_final_info; /* Channel filters specify: @@ -99,14 +98,12 @@ typedef struct { typedef struct { /* Called to eg. send/receive data on a call. See grpc_call_next_op on how to call the next element in the stack */ - void (*start_transport_stream_op_batch)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op); + void (*start_transport_stream_op_batch)(grpc_call_element* elem, + grpc_transport_stream_op_batch* op); /* Called to handle channel level operations - e.g. new calls, or transport closure. See grpc_channel_next_op on how to call the next element in the stack */ - void (*start_transport_op)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, grpc_transport_op *op); + void (*start_transport_op)(grpc_channel_element* elem, grpc_transport_op* op); /* sizeof(per call data) */ size_t sizeof_call_data; @@ -119,21 +116,19 @@ typedef struct { transport and is on the server. Most filters want to ignore this argument. Implementations may assume that elem->call_data is all zeros. */ - grpc_error *(*init_call_elem)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args); - void (*set_pollset_or_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent); + grpc_error* (*init_call_elem)(grpc_call_element* elem, + const grpc_call_element_args* args); + void (*set_pollset_or_pollset_set)(grpc_call_element* elem, + grpc_polling_entity* pollent); /* Destroy per call data. The filter does not need to do any chaining. The bottom filter of a stack will be passed a non-NULL pointer to \a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when destruction is complete. \a final_info contains data about the completed call, mainly for reporting purposes. */ - void (*destroy_call_elem)(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure); + void (*destroy_call_elem)(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure); /* sizeof(per channel data) */ size_t sizeof_channel_data; @@ -144,36 +139,34 @@ typedef struct { useful for asserting correct configuration by upper layer code. The filter does not need to do any chaining. Implementations may assume that elem->call_data is all zeros. */ - grpc_error *(*init_channel_elem)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args); + grpc_error* (*init_channel_elem)(grpc_channel_element* elem, + grpc_channel_element_args* args); /* Destroy per channel data. The filter does not need to do any chaining */ - void (*destroy_channel_elem)(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem); + void (*destroy_channel_elem)(grpc_channel_element* elem); /* Implement grpc_channel_get_info() */ - void (*get_channel_info)(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - const grpc_channel_info *channel_info); + void (*get_channel_info)(grpc_channel_element* elem, + const grpc_channel_info* channel_info); /* The name of this filter */ - const char *name; + const char* name; } grpc_channel_filter; /* A channel_element tracks its filter and the filter requested memory within a channel allocation */ struct grpc_channel_element { - const grpc_channel_filter *filter; - void *channel_data; + const grpc_channel_filter* filter; + void* channel_data; }; /* A call_element tracks its filter, the filter requested memory within a channel allocation, and the filter requested memory within a call allocation */ struct grpc_call_element { - const grpc_channel_filter *filter; - void *channel_data; - void *call_data; + const grpc_channel_filter* filter; + void* channel_data; + void* call_data; }; /* A channel stack tracks a set of related filters for one channel, and @@ -198,100 +191,90 @@ struct grpc_call_stack { }; /* Get a channel element given a channel stack and its index */ -grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack, +grpc_channel_element* grpc_channel_stack_element(grpc_channel_stack* stack, size_t i); /* Get the last channel element in a channel stack */ -grpc_channel_element *grpc_channel_stack_last_element( - grpc_channel_stack *stack); +grpc_channel_element* grpc_channel_stack_last_element( + grpc_channel_stack* stack); /* Get a call stack element given a call stack and an index */ -grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i); +grpc_call_element* grpc_call_stack_element(grpc_call_stack* stack, size_t i); /* Determine memory required for a channel stack containing a set of filters */ -size_t grpc_channel_stack_size(const grpc_channel_filter **filters, +size_t grpc_channel_stack_size(const grpc_channel_filter** filters, size_t filter_count); /* Initialize a channel stack given some filters */ -grpc_error *grpc_channel_stack_init( - grpc_exec_ctx *exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, const grpc_channel_filter **filters, size_t filter_count, - const grpc_channel_args *args, grpc_transport *optional_transport, - const char *name, grpc_channel_stack *stack); +grpc_error* grpc_channel_stack_init( + int initial_refs, grpc_iomgr_cb_func destroy, void* destroy_arg, + const grpc_channel_filter** filters, size_t filter_count, + const grpc_channel_args* args, grpc_transport* optional_transport, + const char* name, grpc_channel_stack* stack); /* Destroy a channel stack */ -void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *stack); +void grpc_channel_stack_destroy(grpc_channel_stack* stack); /* Initialize a call stack given a channel stack. transport_server_data is expected to be NULL on a client, or an opaque transport owned pointer on the server. */ -grpc_error *grpc_call_stack_init(grpc_exec_ctx *exec_ctx, - grpc_channel_stack *channel_stack, +grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, - const grpc_call_element_args *elem_args); + void* destroy_arg, + const grpc_call_element_args* elem_args); /* Set a pollset or a pollset_set for a call stack: must occur before the first * op is started */ -void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_stack *call_stack, - grpc_polling_entity *pollent); +void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack* call_stack, + grpc_polling_entity* pollent); #ifndef NDEBUG #define GRPC_CALL_STACK_REF(call_stack, reason) \ grpc_stream_ref(&(call_stack)->refcount, reason) -#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ - grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason) +#define GRPC_CALL_STACK_UNREF(call_stack, reason) \ + grpc_stream_unref(&(call_stack)->refcount, reason) #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ grpc_stream_ref(&(channel_stack)->refcount, reason) -#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ - grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason) +#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \ + grpc_stream_unref(&(channel_stack)->refcount, reason) #else #define GRPC_CALL_STACK_REF(call_stack, reason) \ grpc_stream_ref(&(call_stack)->refcount) -#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \ - grpc_stream_unref(exec_ctx, &(call_stack)->refcount) +#define GRPC_CALL_STACK_UNREF(call_stack, reason) \ + grpc_stream_unref(&(call_stack)->refcount) #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \ grpc_stream_ref(&(channel_stack)->refcount) -#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \ - grpc_stream_unref(exec_ctx, &(channel_stack)->refcount) +#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \ + grpc_stream_unref(&(channel_stack)->refcount) #endif /* Destroy a call stack */ -void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure); +void grpc_call_stack_destroy(grpc_call_stack* stack, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure); /* Ignore set pollset{_set} - used by filters if they don't care about pollsets * at all. Does nothing. */ void grpc_call_stack_ignore_set_pollset_or_pollset_set( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_polling_entity *pollent); + grpc_call_element* elem, grpc_polling_entity* pollent); /* Call the next operation in a call stack */ -void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op); +void grpc_call_next_op(grpc_call_element* elem, + grpc_transport_stream_op_batch* op); /* Call the next operation (depending on call directionality) in a channel stack */ -void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, - grpc_transport_op *op); +void grpc_channel_next_op(grpc_channel_element* elem, grpc_transport_op* op); /* Pass through a request to get_channel_info() to the next child element */ -void grpc_channel_next_get_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - const grpc_channel_info *channel_info); +void grpc_channel_next_get_info(grpc_channel_element* elem, + const grpc_channel_info* channel_info); /* Given the top element of a channel stack, get the channel stack itself */ -grpc_channel_stack *grpc_channel_stack_from_top_element( - grpc_channel_element *elem); +grpc_channel_stack* grpc_channel_stack_from_top_element( + grpc_channel_element* elem); /* Given the top element of a call stack, get the call stack itself */ -grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem); +grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem); -void grpc_call_log_op(const char *file, int line, gpr_log_severity severity, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op); +void grpc_call_log_op(const char* file, int line, gpr_log_severity severity, + grpc_call_element* elem, + grpc_transport_stream_op_batch* op); -extern grpc_tracer_flag grpc_trace_channel; +extern grpc_core::TraceFlag grpc_trace_channel; #define GRPC_CALL_LOG_OP(sev, elem, op) \ - if (GRPC_TRACER_ON(grpc_trace_channel)) grpc_call_log_op(sev, elem, op) - -#ifdef __cplusplus -} -#endif + if (grpc_trace_channel.enabled()) grpc_call_log_op(sev, elem, op) #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.c b/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.cc similarity index 53% rename from Sources/CgRPC/src/core/lib/channel/channel_stack_builder.c rename to Sources/CgRPC/src/core/lib/channel/channel_stack_builder.cc index b663ebfb5..8a7244903 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.c +++ b/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/channel/channel_stack_builder.h" #include @@ -23,15 +25,15 @@ #include #include -grpc_tracer_flag grpc_trace_channel_stack_builder = - GRPC_TRACER_INITIALIZER(false, "channel_stack_builder"); +grpc_core::TraceFlag grpc_trace_channel_stack_builder(false, + "channel_stack_builder"); typedef struct filter_node { - struct filter_node *next; - struct filter_node *prev; - const grpc_channel_filter *filter; + struct filter_node* next; + struct filter_node* prev; + const grpc_channel_filter* filter; grpc_post_filter_create_init_func init; - void *init_arg; + void* init_arg; } filter_node; struct grpc_channel_stack_builder { @@ -39,23 +41,23 @@ struct grpc_channel_stack_builder { filter_node begin; filter_node end; // various set/get-able parameters - grpc_channel_args *args; - grpc_transport *transport; - char *target; - const char *name; + grpc_channel_args* args; + grpc_transport* transport; + char* target; + const char* name; }; struct grpc_channel_stack_builder_iterator { - grpc_channel_stack_builder *builder; - filter_node *node; + grpc_channel_stack_builder* builder; + filter_node* node; }; -grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) { - grpc_channel_stack_builder *b = - (grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b)); +grpc_channel_stack_builder* grpc_channel_stack_builder_create(void) { + grpc_channel_stack_builder* b = + static_cast(gpr_zalloc(sizeof(*b))); - b->begin.filter = NULL; - b->end.filter = NULL; + b->begin.filter = nullptr; + b->end.filter = nullptr; b->begin.next = &b->end; b->begin.prev = &b->end; b->end.next = &b->begin; @@ -64,76 +66,77 @@ grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) { return b; } -void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder *b, - const char *target) { +void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder* b, + const char* target) { gpr_free(b->target); b->target = gpr_strdup(target); } -const char *grpc_channel_stack_builder_get_target( - grpc_channel_stack_builder *b) { +const char* grpc_channel_stack_builder_get_target( + grpc_channel_stack_builder* b) { return b->target; } -static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node( - grpc_channel_stack_builder *builder, filter_node *node) { - grpc_channel_stack_builder_iterator *it = - (grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it)); +static grpc_channel_stack_builder_iterator* create_iterator_at_filter_node( + grpc_channel_stack_builder* builder, filter_node* node) { + grpc_channel_stack_builder_iterator* it = + static_cast( + gpr_malloc(sizeof(*it))); it->builder = builder; it->node = node; return it; } void grpc_channel_stack_builder_iterator_destroy( - grpc_channel_stack_builder_iterator *it) { + grpc_channel_stack_builder_iterator* it) { gpr_free(it); } -grpc_channel_stack_builder_iterator * +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_create_iterator_at_first( - grpc_channel_stack_builder *builder) { + grpc_channel_stack_builder* builder) { return create_iterator_at_filter_node(builder, &builder->begin); } -grpc_channel_stack_builder_iterator * +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_create_iterator_at_last( - grpc_channel_stack_builder *builder) { + grpc_channel_stack_builder* builder) { return create_iterator_at_filter_node(builder, &builder->end); } bool grpc_channel_stack_builder_iterator_is_end( - grpc_channel_stack_builder_iterator *iterator) { + grpc_channel_stack_builder_iterator* iterator) { return iterator->node == &iterator->builder->end; } -const char *grpc_channel_stack_builder_iterator_filter_name( - grpc_channel_stack_builder_iterator *iterator) { - if (iterator->node->filter == NULL) return NULL; +const char* grpc_channel_stack_builder_iterator_filter_name( + grpc_channel_stack_builder_iterator* iterator) { + if (iterator->node->filter == nullptr) return nullptr; return iterator->node->filter->name; } bool grpc_channel_stack_builder_move_next( - grpc_channel_stack_builder_iterator *iterator) { + grpc_channel_stack_builder_iterator* iterator) { if (iterator->node == &iterator->builder->end) return false; iterator->node = iterator->node->next; return true; } bool grpc_channel_stack_builder_move_prev( - grpc_channel_stack_builder_iterator *iterator) { + grpc_channel_stack_builder_iterator* iterator) { if (iterator->node == &iterator->builder->begin) return false; iterator->node = iterator->node->prev; return true; } -grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find( - grpc_channel_stack_builder *builder, const char *filter_name) { - GPR_ASSERT(filter_name != NULL); - grpc_channel_stack_builder_iterator *it = +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_iterator_find( + grpc_channel_stack_builder* builder, const char* filter_name) { + GPR_ASSERT(filter_name != nullptr); + grpc_channel_stack_builder_iterator* it = grpc_channel_stack_builder_create_iterator_at_first(builder); while (grpc_channel_stack_builder_move_next(it)) { if (grpc_channel_stack_builder_iterator_is_end(it)) break; - const char *filter_name_at_it = + const char* filter_name_at_it = grpc_channel_stack_builder_iterator_filter_name(it); if (strcmp(filter_name, filter_name_at_it) == 0) break; } @@ -141,43 +144,42 @@ grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find( } bool grpc_channel_stack_builder_move_prev( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); -void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder, - const char *name) { - GPR_ASSERT(builder->name == NULL); +void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder* builder, + const char* name) { + GPR_ASSERT(builder->name == nullptr); builder->name = name; } void grpc_channel_stack_builder_set_channel_arguments( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - const grpc_channel_args *args) { - if (builder->args != NULL) { - grpc_channel_args_destroy(exec_ctx, builder->args); + grpc_channel_stack_builder* builder, const grpc_channel_args* args) { + if (builder->args != nullptr) { + grpc_channel_args_destroy(builder->args); } builder->args = grpc_channel_args_copy(args); } void grpc_channel_stack_builder_set_transport( - grpc_channel_stack_builder *builder, grpc_transport *transport) { - GPR_ASSERT(builder->transport == NULL); + grpc_channel_stack_builder* builder, grpc_transport* transport) { + GPR_ASSERT(builder->transport == nullptr); builder->transport = transport; } -grpc_transport *grpc_channel_stack_builder_get_transport( - grpc_channel_stack_builder *builder) { +grpc_transport* grpc_channel_stack_builder_get_transport( + grpc_channel_stack_builder* builder) { return builder->transport; } -const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments( - grpc_channel_stack_builder *builder) { +const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments( + grpc_channel_stack_builder* builder) { return builder->args; } bool grpc_channel_stack_builder_append_filter( - grpc_channel_stack_builder *builder, const grpc_channel_filter *filter, - grpc_post_filter_create_init_func post_init_func, void *user_data) { - grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder* builder, const grpc_channel_filter* filter, + grpc_post_filter_create_init_func post_init_func, void* user_data) { + grpc_channel_stack_builder_iterator* it = grpc_channel_stack_builder_create_iterator_at_last(builder); bool ok = grpc_channel_stack_builder_add_filter_before( it, filter, post_init_func, user_data); @@ -186,8 +188,8 @@ bool grpc_channel_stack_builder_append_filter( } bool grpc_channel_stack_builder_remove_filter( - grpc_channel_stack_builder *builder, const char *filter_name) { - grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder* builder, const char* filter_name) { + grpc_channel_stack_builder_iterator* it = grpc_channel_stack_builder_iterator_find(builder, filter_name); if (grpc_channel_stack_builder_iterator_is_end(it)) { grpc_channel_stack_builder_iterator_destroy(it); @@ -201,9 +203,9 @@ bool grpc_channel_stack_builder_remove_filter( } bool grpc_channel_stack_builder_prepend_filter( - grpc_channel_stack_builder *builder, const grpc_channel_filter *filter, - grpc_post_filter_create_init_func post_init_func, void *user_data) { - grpc_channel_stack_builder_iterator *it = + grpc_channel_stack_builder* builder, const grpc_channel_filter* filter, + grpc_post_filter_create_init_func post_init_func, void* user_data) { + grpc_channel_stack_builder_iterator* it = grpc_channel_stack_builder_create_iterator_at_first(builder); bool ok = grpc_channel_stack_builder_add_filter_after( it, filter, post_init_func, user_data); @@ -211,10 +213,11 @@ bool grpc_channel_stack_builder_prepend_filter( return ok; } -static void add_after(filter_node *before, const grpc_channel_filter *filter, +static void add_after(filter_node* before, const grpc_channel_filter* filter, grpc_post_filter_create_init_func post_init_func, - void *user_data) { - filter_node *new_node = (filter_node *)gpr_malloc(sizeof(*new_node)); + void* user_data) { + filter_node* new_node = + static_cast(gpr_malloc(sizeof(*new_node))); new_node->next = before->next; new_node->prev = before; new_node->next->prev = new_node->prev->next = new_node; @@ -224,53 +227,52 @@ static void add_after(filter_node *before, const grpc_channel_filter *filter, } bool grpc_channel_stack_builder_add_filter_before( - grpc_channel_stack_builder_iterator *iterator, - const grpc_channel_filter *filter, - grpc_post_filter_create_init_func post_init_func, void *user_data) { + grpc_channel_stack_builder_iterator* iterator, + const grpc_channel_filter* filter, + grpc_post_filter_create_init_func post_init_func, void* user_data) { if (iterator->node == &iterator->builder->begin) return false; add_after(iterator->node->prev, filter, post_init_func, user_data); return true; } bool grpc_channel_stack_builder_add_filter_after( - grpc_channel_stack_builder_iterator *iterator, - const grpc_channel_filter *filter, - grpc_post_filter_create_init_func post_init_func, void *user_data) { + grpc_channel_stack_builder_iterator* iterator, + const grpc_channel_filter* filter, + grpc_post_filter_create_init_func post_init_func, void* user_data) { if (iterator->node == &iterator->builder->end) return false; add_after(iterator->node, filter, post_init_func, user_data); return true; } -void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder) { - filter_node *p = builder->begin.next; +void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder* builder) { + filter_node* p = builder->begin.next; while (p != &builder->end) { - filter_node *next = p->next; + filter_node* next = p->next; gpr_free(p); p = next; } - if (builder->args != NULL) { - grpc_channel_args_destroy(exec_ctx, builder->args); + if (builder->args != nullptr) { + grpc_channel_args_destroy(builder->args); } gpr_free(builder->target); gpr_free(builder); } -grpc_error *grpc_channel_stack_builder_finish( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, void **result) { +grpc_error* grpc_channel_stack_builder_finish( + grpc_channel_stack_builder* builder, size_t prefix_bytes, int initial_refs, + grpc_iomgr_cb_func destroy, void* destroy_arg, void** result) { // count the number of filters size_t num_filters = 0; - for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { + for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) { num_filters++; } // create an array of filters - const grpc_channel_filter **filters = - (const grpc_channel_filter **)gpr_malloc(sizeof(*filters) * num_filters); + const grpc_channel_filter** filters = + static_cast( + gpr_malloc(sizeof(*filters) * num_filters)); size_t i = 0; - for (filter_node *p = builder->begin.next; p != &builder->end; p = p->next) { + for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) { filters[i++] = p->filter; } @@ -280,24 +282,24 @@ grpc_error *grpc_channel_stack_builder_finish( // allocate memory, with prefix_bytes followed by channel_stack_size *result = gpr_zalloc(prefix_bytes + channel_stack_size); // fetch a pointer to the channel stack - grpc_channel_stack *channel_stack = - (grpc_channel_stack *)((char *)(*result) + prefix_bytes); + grpc_channel_stack* channel_stack = reinterpret_cast( + static_cast(*result) + prefix_bytes); // and initialize it - grpc_error *error = grpc_channel_stack_init( - exec_ctx, initial_refs, destroy, - destroy_arg == NULL ? *result : destroy_arg, filters, num_filters, - builder->args, builder->transport, builder->name, channel_stack); + grpc_error* error = grpc_channel_stack_init( + initial_refs, destroy, destroy_arg == nullptr ? *result : destroy_arg, + filters, num_filters, builder->args, builder->transport, builder->name, + channel_stack); if (error != GRPC_ERROR_NONE) { - grpc_channel_stack_destroy(exec_ctx, channel_stack); + grpc_channel_stack_destroy(channel_stack); gpr_free(*result); - *result = NULL; + *result = nullptr; } else { // run post-initialization functions i = 0; - for (filter_node *p = builder->begin.next; p != &builder->end; + for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) { - if (p->init != NULL) { + if (p->init != nullptr) { p->init(channel_stack, grpc_channel_stack_element(channel_stack, i), p->init_arg); } @@ -305,8 +307,8 @@ grpc_error *grpc_channel_stack_builder_finish( } } - grpc_channel_stack_builder_destroy(exec_ctx, builder); - gpr_free((grpc_channel_filter **)filters); + grpc_channel_stack_builder_destroy(builder); + gpr_free(const_cast(filters)); return error; } diff --git a/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.h b/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.h index fdff2a2b6..c9a170bc8 100644 --- a/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.h +++ b/Sources/CgRPC/src/core/lib/channel/channel_stack_builder.h @@ -19,15 +19,13 @@ #ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_BUILDER_H #define GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_BUILDER_H +#include + #include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_stack.h" -#ifdef __cplusplus -extern "C" { -#endif - /// grpc_channel_stack_builder offers a programmatic interface to selected /// and order channel filters typedef struct grpc_channel_stack_builder grpc_channel_stack_builder; @@ -35,135 +33,128 @@ typedef struct grpc_channel_stack_builder_iterator grpc_channel_stack_builder_iterator; /// Create a new channel stack builder -grpc_channel_stack_builder *grpc_channel_stack_builder_create(void); +grpc_channel_stack_builder* grpc_channel_stack_builder_create(void); /// Assign a name to the channel stack: \a name must be statically allocated -void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder *builder, - const char *name); +void grpc_channel_stack_builder_set_name(grpc_channel_stack_builder* builder, + const char* name); /// Set the target uri -void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder *b, - const char *target); +void grpc_channel_stack_builder_set_target(grpc_channel_stack_builder* b, + const char* target); -const char *grpc_channel_stack_builder_get_target( - grpc_channel_stack_builder *b); +const char* grpc_channel_stack_builder_get_target( + grpc_channel_stack_builder* b); /// Attach \a transport to the builder (does not take ownership) void grpc_channel_stack_builder_set_transport( - grpc_channel_stack_builder *builder, grpc_transport *transport); + grpc_channel_stack_builder* builder, grpc_transport* transport); /// Fetch attached transport -grpc_transport *grpc_channel_stack_builder_get_transport( - grpc_channel_stack_builder *builder); +grpc_transport* grpc_channel_stack_builder_get_transport( + grpc_channel_stack_builder* builder); /// Set channel arguments: copies args void grpc_channel_stack_builder_set_channel_arguments( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - const grpc_channel_args *args); + grpc_channel_stack_builder* builder, const grpc_channel_args* args); /// Return a borrowed pointer to the channel arguments -const grpc_channel_args *grpc_channel_stack_builder_get_channel_arguments( - grpc_channel_stack_builder *builder); +const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments( + grpc_channel_stack_builder* builder); /// Begin iterating over already defined filters in the builder at the beginning -grpc_channel_stack_builder_iterator * +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_create_iterator_at_first( - grpc_channel_stack_builder *builder); + grpc_channel_stack_builder* builder); /// Begin iterating over already defined filters in the builder at the end -grpc_channel_stack_builder_iterator * +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_create_iterator_at_last( - grpc_channel_stack_builder *builder); + grpc_channel_stack_builder* builder); /// Is an iterator at the first element? bool grpc_channel_stack_builder_iterator_is_first( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); /// Is an iterator at the end? bool grpc_channel_stack_builder_iterator_is_end( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); /// What is the name of the filter at this iterator position? -const char *grpc_channel_stack_builder_iterator_filter_name( - grpc_channel_stack_builder_iterator *iterator); +const char* grpc_channel_stack_builder_iterator_filter_name( + grpc_channel_stack_builder_iterator* iterator); /// Move an iterator to the next item bool grpc_channel_stack_builder_move_next( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); /// Move an iterator to the previous item bool grpc_channel_stack_builder_move_prev( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); /// Return an iterator at \a filter_name, or at the end of the list if not /// found. -grpc_channel_stack_builder_iterator *grpc_channel_stack_builder_iterator_find( - grpc_channel_stack_builder *builder, const char *filter_name); +grpc_channel_stack_builder_iterator* grpc_channel_stack_builder_iterator_find( + grpc_channel_stack_builder* builder, const char* filter_name); typedef void (*grpc_post_filter_create_init_func)( - grpc_channel_stack *channel_stack, grpc_channel_element *elem, void *arg); + grpc_channel_stack* channel_stack, grpc_channel_element* elem, void* arg); /// Add \a filter to the stack, after \a iterator. /// Call \a post_init_func(..., \a user_data) once the channel stack is /// created. bool grpc_channel_stack_builder_add_filter_after( - grpc_channel_stack_builder_iterator *iterator, - const grpc_channel_filter *filter, + grpc_channel_stack_builder_iterator* iterator, + const grpc_channel_filter* filter, grpc_post_filter_create_init_func post_init_func, - void *user_data) GRPC_MUST_USE_RESULT; + void* user_data) GRPC_MUST_USE_RESULT; /// Add \a filter to the stack, before \a iterator. /// Call \a post_init_func(..., \a user_data) once the channel stack is /// created. bool grpc_channel_stack_builder_add_filter_before( - grpc_channel_stack_builder_iterator *iterator, - const grpc_channel_filter *filter, + grpc_channel_stack_builder_iterator* iterator, + const grpc_channel_filter* filter, grpc_post_filter_create_init_func post_init_func, - void *user_data) GRPC_MUST_USE_RESULT; + void* user_data) GRPC_MUST_USE_RESULT; /// Add \a filter to the beginning of the filter list. /// Call \a post_init_func(..., \a user_data) once the channel stack is /// created. bool grpc_channel_stack_builder_prepend_filter( - grpc_channel_stack_builder *builder, const grpc_channel_filter *filter, + grpc_channel_stack_builder* builder, const grpc_channel_filter* filter, grpc_post_filter_create_init_func post_init_func, - void *user_data) GRPC_MUST_USE_RESULT; + void* user_data) GRPC_MUST_USE_RESULT; /// Add \a filter to the end of the filter list. /// Call \a post_init_func(..., \a user_data) once the channel stack is /// created. bool grpc_channel_stack_builder_append_filter( - grpc_channel_stack_builder *builder, const grpc_channel_filter *filter, + grpc_channel_stack_builder* builder, const grpc_channel_filter* filter, grpc_post_filter_create_init_func post_init_func, - void *user_data) GRPC_MUST_USE_RESULT; + void* user_data) GRPC_MUST_USE_RESULT; /// Remove any filter whose name is \a filter_name from \a builder. Returns true /// if \a filter_name was not found. bool grpc_channel_stack_builder_remove_filter( - grpc_channel_stack_builder *builder, const char *filter_name); + grpc_channel_stack_builder* builder, const char* filter_name); /// Terminate iteration and destroy \a iterator void grpc_channel_stack_builder_iterator_destroy( - grpc_channel_stack_builder_iterator *iterator); + grpc_channel_stack_builder_iterator* iterator); /// Destroy the builder, return the freshly minted channel stack in \a result. /// Allocates \a prefix_bytes bytes before the channel stack /// Returns the base pointer of the allocated block /// \a initial_refs, \a destroy, \a destroy_arg are as per /// grpc_channel_stack_init -grpc_error *grpc_channel_stack_builder_finish( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy, - void *destroy_arg, void **result); +grpc_error* grpc_channel_stack_builder_finish( + grpc_channel_stack_builder* builder, size_t prefix_bytes, int initial_refs, + grpc_iomgr_cb_func destroy, void* destroy_arg, void** result); /// Destroy the builder without creating a channel stack -void grpc_channel_stack_builder_destroy(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder); - -extern grpc_tracer_flag grpc_trace_channel_stack_builder; +void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder* builder); -#ifdef __cplusplus -} -#endif +extern grpc_core::TraceFlag grpc_trace_channel_stack_builder; #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_BUILDER_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/channel_trace.cc b/Sources/CgRPC/src/core/lib/channel/channel_trace.cc new file mode 100644 index 000000000..654300cd3 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/channel_trace.cc @@ -0,0 +1,239 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/channel/channel_trace.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "src/core/lib/channel/channel_trace_registry.h" +#include "src/core/lib/channel/status_util.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/channel.h" +#include "src/core/lib/transport/connectivity_state.h" +#include "src/core/lib/transport/error_utils.h" + +namespace grpc_core { + +ChannelTrace::TraceEvent::TraceEvent( + Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer, ReferencedType type) + : severity_(severity), + data_(data), + timestamp_(grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(), + GPR_CLOCK_REALTIME)), + next_(nullptr), + referenced_tracer_(std::move(referenced_tracer)), + referenced_type_(type) {} + +ChannelTrace::TraceEvent::TraceEvent(Severity severity, grpc_slice data) + : severity_(severity), + data_(data), + timestamp_(grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(), + GPR_CLOCK_REALTIME)), + next_(nullptr) {} + +ChannelTrace::TraceEvent::~TraceEvent() { grpc_slice_unref_internal(data_); } + +ChannelTrace::ChannelTrace(size_t max_events) + : channel_uuid_(-1), + num_events_logged_(0), + list_size_(0), + max_list_size_(max_events), + head_trace_(nullptr), + tail_trace_(nullptr) { + if (max_list_size_ == 0) return; // tracing is disabled if max_events == 0 + gpr_mu_init(&tracer_mu_); + channel_uuid_ = grpc_channel_trace_registry_register_channel_trace(this); + time_created_ = grpc_millis_to_timespec(grpc_core::ExecCtx::Get()->Now(), + GPR_CLOCK_REALTIME); +} + +ChannelTrace::~ChannelTrace() { + if (max_list_size_ == 0) return; // tracing is disabled if max_events == 0 + TraceEvent* it = head_trace_; + while (it != nullptr) { + TraceEvent* to_free = it; + it = it->next(); + Delete(to_free); + } + grpc_channel_trace_registry_unregister_channel_trace(channel_uuid_); + gpr_mu_destroy(&tracer_mu_); +} + +intptr_t ChannelTrace::GetUuid() const { return channel_uuid_; } + +void ChannelTrace::AddTraceEventHelper(TraceEvent* new_trace_event) { + ++num_events_logged_; + // first event case + if (head_trace_ == nullptr) { + head_trace_ = tail_trace_ = new_trace_event; + } + // regular event add case + else { + tail_trace_->set_next(new_trace_event); + tail_trace_ = tail_trace_->next(); + } + ++list_size_; + // maybe garbage collect the end + if (list_size_ > max_list_size_) { + TraceEvent* to_free = head_trace_; + head_trace_ = head_trace_->next(); + Delete(to_free); + --list_size_; + } +} + +void ChannelTrace::AddTraceEvent(Severity severity, grpc_slice data) { + if (max_list_size_ == 0) return; // tracing is disabled if max_events == 0 + AddTraceEventHelper(New(severity, data)); +} + +void ChannelTrace::AddTraceEventReferencingChannel( + Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer) { + if (max_list_size_ == 0) return; // tracing is disabled if max_events == 0 + // create and fill up the new event + AddTraceEventHelper( + New(severity, data, std::move(referenced_tracer), Channel)); +} + +void ChannelTrace::AddTraceEventReferencingSubchannel( + Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer) { + if (max_list_size_ == 0) return; // tracing is disabled if max_events == 0 + // create and fill up the new event + AddTraceEventHelper(New( + severity, data, std::move(referenced_tracer), Subchannel)); +} + +namespace { + +// returns an allocated string that represents tm according to RFC-3339, and, +// more specifically, follows: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// "Uses RFC 3339, where generated output will always be Z-normalized and uses +// 0, 3, 6 or 9 fractional digits." +char* fmt_time(gpr_timespec tm) { + char time_buffer[35]; + char ns_buffer[11]; // '.' + 9 digits of precision + struct tm* tm_info = localtime((const time_t*)&tm.tv_sec); + strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info); + snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec); + // This loop trims off trailing zeros by inserting a null character that the + // right point. We iterate in chunks of three because we want 0, 3, 6, or 9 + // fractional digits. + for (int i = 7; i >= 1; i -= 3) { + if (ns_buffer[i] == '0' && ns_buffer[i + 1] == '0' && + ns_buffer[i + 2] == '0') { + ns_buffer[i] = '\0'; + // Edge case in which all fractional digits were 0. + if (i == 1) { + ns_buffer[0] = '\0'; + } + } else { + break; + } + } + char* full_time_str; + gpr_asprintf(&full_time_str, "%s%sZ", time_buffer, ns_buffer); + return full_time_str; +} + +const char* severity_string(ChannelTrace::Severity severity) { + switch (severity) { + case ChannelTrace::Severity::Info: + return "CT_INFO"; + case ChannelTrace::Severity::Warning: + return "CT_WARNING"; + case ChannelTrace::Severity::Error: + return "CT_ERROR"; + default: + GPR_UNREACHABLE_CODE(return "CT_UNKNOWN"); + } +} + +} // anonymous namespace + +void ChannelTrace::TraceEvent::RenderTraceEvent(grpc_json* json) const { + grpc_json* json_iterator = nullptr; + json_iterator = grpc_json_create_child(json_iterator, json, "description", + grpc_slice_to_c_string(data_), + GRPC_JSON_STRING, true); + json_iterator = grpc_json_create_child(json_iterator, json, "severity", + severity_string(severity_), + GRPC_JSON_STRING, false); + json_iterator = + grpc_json_create_child(json_iterator, json, "timestamp", + fmt_time(timestamp_), GRPC_JSON_STRING, true); + if (referenced_tracer_ != nullptr) { + char* uuid_str; + gpr_asprintf(&uuid_str, "%" PRIdPTR, referenced_tracer_->channel_uuid_); + grpc_json* child_ref = grpc_json_create_child( + json_iterator, json, + (referenced_type_ == Channel) ? "channelRef" : "subchannelRef", nullptr, + GRPC_JSON_OBJECT, false); + json_iterator = grpc_json_create_child( + nullptr, child_ref, + (referenced_type_ == Channel) ? "channelId" : "subchannelId", uuid_str, + GRPC_JSON_STRING, true); + json_iterator = child_ref; + } +} + +char* ChannelTrace::RenderTrace() const { + if (!max_list_size_) + return nullptr; // tracing is disabled if max_events == 0 + grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT); + char* num_events_logged_str; + gpr_asprintf(&num_events_logged_str, "%" PRId64, num_events_logged_); + grpc_json* json_iterator = nullptr; + json_iterator = + grpc_json_create_child(json_iterator, json, "numEventsLogged", + num_events_logged_str, GRPC_JSON_STRING, true); + json_iterator = + grpc_json_create_child(json_iterator, json, "creationTime", + fmt_time(time_created_), GRPC_JSON_STRING, true); + grpc_json* events = grpc_json_create_child(json_iterator, json, "events", + nullptr, GRPC_JSON_ARRAY, false); + json_iterator = nullptr; + TraceEvent* it = head_trace_; + while (it != nullptr) { + json_iterator = grpc_json_create_child(json_iterator, events, nullptr, + nullptr, GRPC_JSON_OBJECT, false); + it->RenderTraceEvent(json_iterator); + it = it->next(); + } + char* json_str = grpc_json_dump_to_string(json, 0); + grpc_json_destroy(json); + return json_str; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/channel/channel_trace.h b/Sources/CgRPC/src/core/lib/channel/channel_trace.h new file mode 100644 index 000000000..1df1e585f --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/channel_trace.h @@ -0,0 +1,133 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_H +#define GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_H + +#include + +#include +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/json/json.h" + +namespace grpc_core { + +// Object used to hold live data for a channel. This data is exposed via the +// channelz service: +// https://github.com/grpc/proposal/blob/master/A14-channelz.md +class ChannelTrace : public RefCounted { + public: + ChannelTrace(size_t max_events); + ~ChannelTrace(); + + // returns the tracer's uuid + intptr_t GetUuid() const; + + enum Severity { + Unset = 0, // never to be used + Info, // we start at 1 to avoid using proto default values + Warning, + Error + }; + + // Adds a new trace event to the tracing object + // + // TODO(ncteisen): as this call is used more and more throughout the gRPC + // stack, determine if it makes more sense to accept a char* instead of a + // slice. + void AddTraceEvent(Severity severity, grpc_slice data); + + // Adds a new trace event to the tracing object. This trace event refers to a + // an event on a child of the channel. For example, if this channel has + // created a new subchannel, then it would record that with a TraceEvent + // referencing the new subchannel. + // + // TODO(ncteisen): Once channelz is implemented, the events should reference + // the overall channelz object, not just the ChannelTrace object. + // TODO(ncteisen): as this call is used more and more throughout the gRPC + // stack, determine if it makes more sense to accept a char* instead of a + // slice. + void AddTraceEventReferencingChannel( + Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer); + void AddTraceEventReferencingSubchannel( + Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer); + + // Returns the tracing data rendered as a grpc json string. + // The string is owned by the caller and must be freed. + char* RenderTrace() const; + + private: + // Types of objects that can be references by trace events. + enum ReferencedType { Channel, Subchannel }; + // Private class to encapsulate all the data and bookkeeping needed for a + // a trace event. + class TraceEvent { + public: + // Constructor for a TraceEvent that references a different channel. + // TODO(ncteisen): once channelz is implemented, this should reference the + // overall channelz object, not just the ChannelTrace object + TraceEvent(Severity severity, grpc_slice data, + RefCountedPtr referenced_tracer, + ReferencedType type); + + // Constructor for a TraceEvent that does not reverence a different + // channel. + TraceEvent(Severity severity, grpc_slice data); + + ~TraceEvent(); + + // Renders the data inside of this TraceEvent into a json object. This is + // used by the ChannelTrace, when it is rendering itself. + void RenderTraceEvent(grpc_json* json) const; + + // set and get for the next_ pointer. + TraceEvent* next() const { return next_; } + void set_next(TraceEvent* next) { next_ = next; } + + private: + Severity severity_; + grpc_slice data_; + gpr_timespec timestamp_; + TraceEvent* next_; + // the tracer object for the (sub)channel that this trace event refers to. + RefCountedPtr referenced_tracer_; + // the type that the referenced tracer points to. Unused if this trace + // does not point to any channel or subchannel + ReferencedType referenced_type_; + }; // TraceEvent + + // Internal helper to add and link in a trace event + void AddTraceEventHelper(TraceEvent* new_trace_event); + + gpr_mu tracer_mu_; + intptr_t channel_uuid_; + uint64_t num_events_logged_; + size_t list_size_; + size_t max_list_size_; + TraceEvent* head_trace_; + TraceEvent* tail_trace_; + gpr_timespec time_created_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.cc b/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.cc new file mode 100644 index 000000000..6c8243146 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.cc @@ -0,0 +1,80 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/avl/avl.h" +#include "src/core/lib/channel/channel_trace.h" +#include "src/core/lib/channel/channel_trace_registry.h" +#include "src/core/lib/gpr/useful.h" + +#include +#include + +// file global lock and avl. +static gpr_mu g_mu; +static grpc_avl g_avl; +static gpr_atm g_uuid = 0; + +// avl vtable for uuid (intptr_t) -> ChannelTrace +// this table is only looking, it does not own anything. +static void destroy_intptr(void* not_used, void* user_data) {} +static void* copy_intptr(void* key, void* user_data) { return key; } +static long compare_intptr(void* key1, void* key2, void* user_data) { + return GPR_ICMP(key1, key2); +} + +static void destroy_channel_trace(void* trace, void* user_data) {} +static void* copy_channel_trace(void* trace, void* user_data) { return trace; } +static const grpc_avl_vtable avl_vtable = { + destroy_intptr, copy_intptr, compare_intptr, destroy_channel_trace, + copy_channel_trace}; + +void grpc_channel_trace_registry_init() { + gpr_mu_init(&g_mu); + g_avl = grpc_avl_create(&avl_vtable); +} + +void grpc_channel_trace_registry_shutdown() { + grpc_avl_unref(g_avl, nullptr); + gpr_mu_destroy(&g_mu); +} + +intptr_t grpc_channel_trace_registry_register_channel_trace( + grpc_core::ChannelTrace* channel_trace) { + intptr_t prior = gpr_atm_no_barrier_fetch_add(&g_uuid, 1); + gpr_mu_lock(&g_mu); + g_avl = grpc_avl_add(g_avl, (void*)prior, channel_trace, nullptr); + gpr_mu_unlock(&g_mu); + return prior; +} + +void grpc_channel_trace_registry_unregister_channel_trace(intptr_t uuid) { + gpr_mu_lock(&g_mu); + g_avl = grpc_avl_remove(g_avl, (void*)uuid, nullptr); + gpr_mu_unlock(&g_mu); +} + +grpc_core::ChannelTrace* grpc_channel_trace_registry_get_channel_trace( + intptr_t uuid) { + gpr_mu_lock(&g_mu); + grpc_core::ChannelTrace* ret = static_cast( + grpc_avl_get(g_avl, (void*)uuid, nullptr)); + gpr_mu_unlock(&g_mu); + return ret; +} diff --git a/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.h b/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.h new file mode 100644 index 000000000..391ecba7d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/channel_trace_registry.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_REGISTRY_H +#define GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_REGISTRY_H + +#include + +#include "src/core/lib/channel/channel_trace.h" + +#include + +// TODO(ncteisen): convert this file to C++ + +void grpc_channel_trace_registry_init(); +void grpc_channel_trace_registry_shutdown(); + +// globally registers a ChannelTrace. Returns its unique uuid +intptr_t grpc_channel_trace_registry_register_channel_trace( + grpc_core::ChannelTrace* channel_trace); +// globally unregisters the ChannelTrace that is associated to uuid. +void grpc_channel_trace_registry_unregister_channel_trace(intptr_t uuid); +// if object with uuid has previously been registered, returns the ChannelTrace +// associated with that uuid. Else returns nullptr. +grpc_core::ChannelTrace* grpc_channel_trace_registry_get_channel_trace( + intptr_t uuid); + +#endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_TRACE_REGISTRY_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/connected_channel.c b/Sources/CgRPC/src/core/lib/channel/connected_channel.cc similarity index 52% rename from Sources/CgRPC/src/core/lib/channel/connected_channel.c rename to Sources/CgRPC/src/core/lib/channel/connected_channel.cc index 4f3790895..ddd302940 100644 --- a/Sources/CgRPC/src/core/lib/channel/connected_channel.c +++ b/Sources/CgRPC/src/core/lib/channel/connected_channel.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/channel/connected_channel.h" #include @@ -26,48 +28,45 @@ #include #include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/transport.h" #define MAX_BUFFER_LENGTH 8192 typedef struct connected_channel_channel_data { - grpc_transport *transport; + grpc_transport* transport; } channel_data; typedef struct { grpc_closure closure; - grpc_closure *original_closure; - grpc_call_combiner *call_combiner; - const char *reason; + grpc_closure* original_closure; + grpc_call_combiner* call_combiner; + const char* reason; } callback_state; typedef struct connected_channel_call_data { - grpc_call_combiner *call_combiner; + grpc_call_combiner* call_combiner; // Closures used for returning results on the call combiner. callback_state on_complete[6]; // Max number of pending batches. callback_state recv_initial_metadata_ready; callback_state recv_message_ready; } call_data; -static void run_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - callback_state *state = (callback_state *)arg; - GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner, - state->original_closure, GRPC_ERROR_REF(error), - state->reason); +static void run_in_call_combiner(void* arg, grpc_error* error) { + callback_state* state = static_cast(arg); + GRPC_CALL_COMBINER_START(state->call_combiner, state->original_closure, + GRPC_ERROR_REF(error), state->reason); } -static void run_cancel_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - run_in_call_combiner(exec_ctx, arg, error); +static void run_cancel_in_call_combiner(void* arg, grpc_error* error) { + run_in_call_combiner(arg, error); gpr_free(arg); } -static void intercept_callback(call_data *calld, callback_state *state, - bool free_when_done, const char *reason, - grpc_closure **original_closure) { +static void intercept_callback(call_data* calld, callback_state* state, + bool free_when_done, const char* reason, + grpc_closure** original_closure) { state->original_closure = *original_closure; state->call_combiner = calld->call_combiner; state->reason = reason; @@ -77,39 +76,38 @@ static void intercept_callback(call_data *calld, callback_state *state, state, grpc_schedule_on_exec_ctx); } -static callback_state *get_state_for_batch( - call_data *calld, grpc_transport_stream_op_batch *batch) { +static callback_state* get_state_for_batch( + call_data* calld, grpc_transport_stream_op_batch* batch) { if (batch->send_initial_metadata) return &calld->on_complete[0]; if (batch->send_message) return &calld->on_complete[1]; if (batch->send_trailing_metadata) return &calld->on_complete[2]; if (batch->recv_initial_metadata) return &calld->on_complete[3]; if (batch->recv_message) return &calld->on_complete[4]; if (batch->recv_trailing_metadata) return &calld->on_complete[5]; - GPR_UNREACHABLE_CODE(return NULL); + GPR_UNREACHABLE_CODE(return nullptr); } /* We perform a small hack to locate transport data alongside the connected channel data in call allocations, to allow everything to be pulled in minimal cache line requests */ -#define TRANSPORT_STREAM_FROM_CALL_DATA(calld) ((grpc_stream *)((calld) + 1)) +#define TRANSPORT_STREAM_FROM_CALL_DATA(calld) ((grpc_stream*)((calld) + 1)) #define CALL_DATA_FROM_TRANSPORT_STREAM(transport_stream) \ - (((call_data *)(transport_stream)) - 1) + (((call_data*)(transport_stream)) - 1) /* Intercept a call operation and either push it directly up or translate it into transport stream operations */ static void con_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); if (batch->recv_initial_metadata) { - callback_state *state = &calld->recv_initial_metadata_ready; + callback_state* state = &calld->recv_initial_metadata_ready; intercept_callback( calld, state, false, "recv_initial_metadata_ready", &batch->payload->recv_initial_metadata.recv_initial_metadata_ready); } if (batch->recv_message) { - callback_state *state = &calld->recv_message_ready; + callback_state* state = &calld->recv_message_ready; intercept_callback(calld, state, false, "recv_message_ready", &batch->payload->recv_message.recv_message_ready); } @@ -119,85 +117,78 @@ static void con_start_transport_stream_op_batch( // calld->on_complete like we can for the other ops. However, // cancellation isn't in the fast path, so we just allocate a new // closure for each one. - callback_state *state = (callback_state *)gpr_malloc(sizeof(*state)); + callback_state* state = + static_cast(gpr_malloc(sizeof(*state))); intercept_callback(calld, state, true, "on_complete (cancel_stream)", &batch->on_complete); } else { - callback_state *state = get_state_for_batch(calld, batch); + callback_state* state = get_state_for_batch(calld, batch); intercept_callback(calld, state, false, "on_complete", &batch->on_complete); } - grpc_transport_perform_stream_op(exec_ctx, chand->transport, - TRANSPORT_STREAM_FROM_CALL_DATA(calld), - batch); - GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, - "passed batch to transport"); + grpc_transport_perform_stream_op( + chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), batch); + GRPC_CALL_COMBINER_STOP(calld->call_combiner, "passed batch to transport"); } -static void con_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) { - channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_perform_op(exec_ctx, chand->transport, op); +static void con_start_transport_op(grpc_channel_element* elem, + grpc_transport_op* op) { + channel_data* chand = static_cast(elem->channel_data); + grpc_transport_perform_op(chand->transport, op); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); calld->call_combiner = args->call_combiner; int r = grpc_transport_init_stream( - exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), + chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), &args->call_stack->refcount, args->server_transport_data, args->arena); return r == 0 ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_FROM_STATIC_STRING( "transport stream initialization failed"); } -static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_set_pops(exec_ctx, chand->transport, +static void set_pollset_or_pollset_set(grpc_call_element* elem, + grpc_polling_entity* pollent) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + grpc_transport_set_pops(chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent); } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - grpc_transport_destroy_stream(exec_ctx, chand->transport, +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + grpc_transport_destroy_stream(chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), then_schedule_closure); } /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *cd = (channel_data *)elem->channel_data; +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* cd = static_cast(elem->channel_data); GPR_ASSERT(args->is_last); - cd->transport = NULL; + cd->transport = nullptr; return GRPC_ERROR_NONE; } /* Destructor for channel_data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - channel_data *cd = (channel_data *)elem->channel_data; +static void destroy_channel_elem(grpc_channel_element* elem) { + channel_data* cd = static_cast(elem->channel_data); if (cd->transport) { - grpc_transport_destroy(exec_ctx, cd->transport); + grpc_transport_destroy(cd->transport); } } /* No-op. */ -static void con_get_channel_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - const grpc_channel_info *channel_info) {} +static void con_get_channel_info(grpc_channel_element* elem, + const grpc_channel_info* channel_info) {} const grpc_channel_filter grpc_connected_filter = { con_start_transport_stream_op_batch, @@ -213,12 +204,12 @@ const grpc_channel_filter grpc_connected_filter = { "connected", }; -static void bind_transport(grpc_channel_stack *channel_stack, - grpc_channel_element *elem, void *t) { - channel_data *cd = (channel_data *)elem->channel_data; +static void bind_transport(grpc_channel_stack* channel_stack, + grpc_channel_element* elem, void* t) { + channel_data* cd = static_cast(elem->channel_data); GPR_ASSERT(elem->filter == &grpc_connected_filter); - GPR_ASSERT(cd->transport == NULL); - cd->transport = (grpc_transport *)t; + GPR_ASSERT(cd->transport == nullptr); + cd->transport = static_cast(t); /* HACK(ctiller): increase call stack size for the channel to make space for channel data. We need a cleaner (but performant) way to do this, @@ -227,20 +218,19 @@ static void bind_transport(grpc_channel_stack *channel_stack, the last call element, and the last call element MUST be the connected channel. */ channel_stack->call_stack_size += - grpc_transport_stream_size((grpc_transport *)t); + grpc_transport_stream_size(static_cast(t)); } -bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg_must_be_null) { - GPR_ASSERT(arg_must_be_null == NULL); - grpc_transport *t = grpc_channel_stack_builder_get_transport(builder); - GPR_ASSERT(t != NULL); +bool grpc_add_connected_filter(grpc_channel_stack_builder* builder, + void* arg_must_be_null) { + GPR_ASSERT(arg_must_be_null == nullptr); + grpc_transport* t = grpc_channel_stack_builder_get_transport(builder); + GPR_ASSERT(t != nullptr); return grpc_channel_stack_builder_append_filter( builder, &grpc_connected_filter, bind_transport, t); } -grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) { - call_data *calld = (call_data *)elem->call_data; +grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem) { + call_data* calld = static_cast(elem->call_data); return TRANSPORT_STREAM_FROM_CALL_DATA(calld); } diff --git a/Sources/CgRPC/src/core/lib/channel/connected_channel.h b/Sources/CgRPC/src/core/lib/channel/connected_channel.h index 10c98cce5..faa1c73a2 100644 --- a/Sources/CgRPC/src/core/lib/channel/connected_channel.h +++ b/Sources/CgRPC/src/core/lib/channel/connected_channel.h @@ -19,15 +19,16 @@ #ifndef GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H #define GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H +#include + #include "src/core/lib/channel/channel_stack_builder.h" extern const grpc_channel_filter grpc_connected_filter; -bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg_must_be_null); +bool grpc_add_connected_filter(grpc_channel_stack_builder* builder, + void* arg_must_be_null); /* Debug helper to dig the transport stream out of a call element */ -grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem); +grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem); #endif /* GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/context.h b/Sources/CgRPC/src/core/lib/channel/context.h index 191bd6335..5daf48a9a 100644 --- a/Sources/CgRPC/src/core/lib/channel/context.h +++ b/Sources/CgRPC/src/core/lib/channel/context.h @@ -42,8 +42,8 @@ typedef enum { } grpc_context_index; typedef struct { - void *value; - void (*destroy)(void *); + void* value; + void (*destroy)(void*); } grpc_call_context_element; #endif /* GRPC_CORE_LIB_CHANNEL_CONTEXT_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker.c b/Sources/CgRPC/src/core/lib/channel/handshaker.cc similarity index 59% rename from Sources/CgRPC/src/core/lib/channel/handshaker.c rename to Sources/CgRPC/src/core/lib/channel/handshaker.cc index 1753da572..2faeb64cb 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker.c +++ b/Sources/CgRPC/src/core/lib/channel/handshaker.cc @@ -16,15 +16,21 @@ * */ +#include + #include #include #include +#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/handshaker.h" +#include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/timer.h" +grpc_core::TraceFlag grpc_handshaker_trace(false, "handshaker"); + // // grpc_handshaker // @@ -34,23 +40,24 @@ void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, handshaker->vtable = vtable; } -void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker) { - handshaker->vtable->destroy(exec_ctx, handshaker); +void grpc_handshaker_destroy(grpc_handshaker* handshaker) { + handshaker->vtable->destroy(handshaker); } -void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, grpc_error* why) { - handshaker->vtable->shutdown(exec_ctx, handshaker, why); +void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why) { + handshaker->vtable->shutdown(handshaker, why); } -void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, +void grpc_handshaker_do_handshake(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args) { - handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor, - on_handshake_done, args); + handshaker->vtable->do_handshake(handshaker, acceptor, on_handshake_done, + args); +} + +const char* grpc_handshaker_name(grpc_handshaker* handshaker) { + return handshaker->vtable->name; } // @@ -84,8 +91,8 @@ struct grpc_handshake_manager { }; grpc_handshake_manager* grpc_handshake_manager_create() { - grpc_handshake_manager* mgr = - (grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager)); + grpc_handshake_manager* mgr = static_cast( + gpr_zalloc(sizeof(grpc_handshake_manager))); gpr_mu_init(&mgr->mu); gpr_ref_init(&mgr->refs, 1); return mgr; @@ -93,8 +100,8 @@ grpc_handshake_manager* grpc_handshake_manager_create() { void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head, grpc_handshake_manager* mgr) { - GPR_ASSERT(mgr->prev == NULL); - GPR_ASSERT(mgr->next == NULL); + GPR_ASSERT(mgr->prev == nullptr); + GPR_ASSERT(mgr->next == nullptr); mgr->next = *head; if (*head) { (*head)->prev = mgr; @@ -104,10 +111,10 @@ void grpc_handshake_manager_pending_list_add(grpc_handshake_manager** head, void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head, grpc_handshake_manager* mgr) { - if (mgr->next != NULL) { + if (mgr->next != nullptr) { mgr->next->prev = mgr->prev; } - if (mgr->prev != NULL) { + if (mgr->prev != nullptr) { mgr->prev->next = mgr->next; } else { GPR_ASSERT(*head == mgr); @@ -116,9 +123,9 @@ void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head, } void grpc_handshake_manager_pending_list_shutdown_all( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) { - while (head != NULL) { - grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why)); + grpc_handshake_manager* head, grpc_error* why) { + while (head != nullptr) { + grpc_handshake_manager_shutdown(head, GRPC_ERROR_REF(why)); head = head->next; } GRPC_ERROR_UNREF(why); @@ -128,6 +135,12 @@ static bool is_power_of_2(size_t n) { return (n & (n - 1)) == 0; } void grpc_handshake_manager_add(grpc_handshake_manager* mgr, grpc_handshaker* handshaker) { + if (grpc_handshaker_trace.enabled()) { + gpr_log( + GPR_INFO, + "handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR, + mgr, grpc_handshaker_name(handshaker), handshaker, mgr->count); + } gpr_mu_lock(&mgr->mu); // To avoid allocating memory for each handshaker we add, we double // the number of elements every time we need more. @@ -138,18 +151,17 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr, realloc_count = mgr->count * 2; } if (realloc_count > 0) { - mgr->handshakers = (grpc_handshaker**)gpr_realloc( - mgr->handshakers, realloc_count * sizeof(grpc_handshaker*)); + mgr->handshakers = static_cast(gpr_realloc( + mgr->handshakers, realloc_count * sizeof(grpc_handshaker*))); } mgr->handshakers[mgr->count++] = handshaker; gpr_mu_unlock(&mgr->mu); } -static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr) { +static void grpc_handshake_manager_unref(grpc_handshake_manager* mgr) { if (gpr_unref(&mgr->refs)) { for (size_t i = 0; i < mgr->count; ++i) { - grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]); + grpc_handshaker_destroy(mgr->handshakers[i]); } gpr_free(mgr->handshakers); gpr_mu_destroy(&mgr->mu); @@ -157,46 +169,75 @@ static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx, } } -void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr) { - grpc_handshake_manager_unref(exec_ctx, mgr); +void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr) { + grpc_handshake_manager_unref(mgr); } -void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr, grpc_error* why) { gpr_mu_lock(&mgr->mu); // Shutdown the handshaker that's currently in progress, if any. if (!mgr->shutdown && mgr->index > 0) { mgr->shutdown = true; - grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[mgr->index - 1], + grpc_handshaker_shutdown(mgr->handshakers[mgr->index - 1], GRPC_ERROR_REF(why)); } gpr_mu_unlock(&mgr->mu); GRPC_ERROR_UNREF(why); } +static char* handshaker_args_string(grpc_handshaker_args* args) { + char* args_str = grpc_channel_args_string(args->args); + size_t num_args = args->args != nullptr ? args->args->num_args : 0; + size_t read_buffer_length = + args->read_buffer != nullptr ? args->read_buffer->length : 0; + char* str; + gpr_asprintf(&str, + "{endpoint=%p, args=%p {size=%" PRIuPTR + ": %s}, read_buffer=%p (length=%" PRIuPTR "), exit_early=%d}", + args->endpoint, args->args, num_args, args_str, + args->read_buffer, read_buffer_length, args->exit_early); + gpr_free(args_str); + return str; +} + // Helper function to call either the next handshaker or the // on_handshake_done callback. // Returns true if we've scheduled the on_handshake_done callback. -static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +static bool call_next_handshaker_locked(grpc_handshake_manager* mgr, grpc_error* error) { + if (grpc_handshaker_trace.enabled()) { + char* args_str = handshaker_args_string(&mgr->args); + gpr_log(GPR_INFO, + "handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR + ", args=%s", + mgr, grpc_error_string(error), mgr->shutdown, mgr->index, args_str); + gpr_free(args_str); + } GPR_ASSERT(mgr->index <= mgr->count); // If we got an error or we've been shut down or we're exiting early or // we've finished the last handshaker, invoke the on_handshake_done // callback. Otherwise, call the next handshaker. if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early || mgr->index == mgr->count) { + if (grpc_handshaker_trace.enabled()) { + gpr_log(GPR_INFO, "handshake_manager %p: handshaking complete", mgr); + } // Cancel deadline timer, since we're invoking the on_handshake_done // callback now. - grpc_timer_cancel(exec_ctx, &mgr->deadline_timer); - GRPC_CLOSURE_SCHED(exec_ctx, &mgr->on_handshake_done, error); + grpc_timer_cancel(&mgr->deadline_timer); + GRPC_CLOSURE_SCHED(&mgr->on_handshake_done, error); mgr->shutdown = true; } else { - grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index], - mgr->acceptor, &mgr->call_next_handshaker, - &mgr->args); + if (grpc_handshaker_trace.enabled()) { + gpr_log( + GPR_INFO, + "handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR, + mgr, grpc_handshaker_name(mgr->handshakers[mgr->index]), + mgr->handshakers[mgr->index], mgr->index); + } + grpc_handshaker_do_handshake(mgr->handshakers[mgr->index], mgr->acceptor, + &mgr->call_next_handshaker, &mgr->args); } ++mgr->index; return mgr->shutdown; @@ -204,46 +245,45 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx, // A function used as the handshaker-done callback when chaining // handshakers together. -static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg, - grpc_error* error) { - grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg; +static void call_next_handshaker(void* arg, grpc_error* error) { + grpc_handshake_manager* mgr = static_cast(arg); gpr_mu_lock(&mgr->mu); - bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error)); + bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_REF(error)); gpr_mu_unlock(&mgr->mu); // If we're invoked the final callback, we won't be coming back // to this function, so we can release our reference to the // handshake manager. if (done) { - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } } // Callback invoked when deadline is exceeded. -static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) { - grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg; +static void on_timeout(void* arg, grpc_error* error) { + grpc_handshake_manager* mgr = static_cast(arg); if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled. grpc_handshake_manager_shutdown( - exec_ctx, mgr, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out")); + mgr, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out")); } - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } void grpc_handshake_manager_do_handshake( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, + grpc_handshake_manager* mgr, grpc_pollset_set* interested_parties, grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data) { gpr_mu_lock(&mgr->mu); GPR_ASSERT(mgr->index == 0); GPR_ASSERT(!mgr->shutdown); // Construct handshaker args. These will be passed through all // handshakers and eventually be freed by the on_handshake_done callback. + mgr->args.interested_parties = interested_parties; mgr->args.endpoint = endpoint; mgr->args.args = grpc_channel_args_copy(channel_args); mgr->args.user_data = user_data; - mgr->args.read_buffer = - (grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer)); + mgr->args.read_buffer = static_cast( + gpr_malloc(sizeof(*mgr->args.read_buffer))); grpc_slice_buffer_init(mgr->args.read_buffer); // Initialize state needed for calling handshakers. mgr->acceptor = acceptor; @@ -255,14 +295,12 @@ void grpc_handshake_manager_do_handshake( gpr_ref(&mgr->refs); GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &mgr->deadline_timer, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &mgr->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC)); + grpc_timer_init(&mgr->deadline_timer, deadline, &mgr->on_timeout); // Start first handshaker, which also owns a ref. gpr_ref(&mgr->refs); - bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE); + bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_NONE); gpr_mu_unlock(&mgr->mu); if (done) { - grpc_handshake_manager_unref(exec_ctx, mgr); + grpc_handshake_manager_unref(mgr); } } diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker.h b/Sources/CgRPC/src/core/lib/channel/handshaker.h index eb9a59bd0..be7fd127e 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker.h +++ b/Sources/CgRPC/src/core/lib/channel/handshaker.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H #define GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H +#include + #include #include "src/core/lib/iomgr/closure.h" @@ -54,6 +56,7 @@ typedef struct grpc_handshaker grpc_handshaker; /// For the on_handshake_done callback, all members are input arguments, /// which the callback takes ownership of. typedef struct { + grpc_pollset_set* interested_parties; grpc_endpoint* endpoint; grpc_channel_args* args; grpc_slice_buffer* read_buffer; @@ -67,21 +70,23 @@ typedef struct { typedef struct { /// Destroys the handshaker. - void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker); + void (*destroy)(grpc_handshaker* handshaker); /// Shuts down the handshaker (e.g., to clean up when the operation is /// aborted in the middle). - void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker, - grpc_error* why); + void (*shutdown)(grpc_handshaker* handshaker, grpc_error* why); /// Performs handshaking, modifying \a args as needed (e.g., to /// replace \a endpoint with a wrapped endpoint). /// When finished, invokes \a on_handshake_done. /// \a acceptor will be NULL for client-side handshakers. - void (*do_handshake)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker, + void (*do_handshake)(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args); + + /// The name of the handshaker, for debugging purposes. + const char* name; } grpc_handshaker_vtable; /// Base struct. To subclass, make this the first member of the @@ -94,15 +99,13 @@ struct grpc_handshaker { void grpc_handshaker_init(const grpc_handshaker_vtable* vtable, grpc_handshaker* handshaker); -void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker); -void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, grpc_error* why); -void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx, - grpc_handshaker* handshaker, +void grpc_handshaker_destroy(grpc_handshaker* handshaker); +void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why); +void grpc_handshaker_do_handshake(grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done, grpc_handshaker_args* args); +const char* grpc_handshaker_name(grpc_handshaker* handshaker); /// /// grpc_handshake_manager @@ -119,23 +122,23 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr, grpc_handshaker* handshaker); /// Destroys the handshake manager. -void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr); +void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr); /// Shuts down the handshake manager (e.g., to clean up when the operation is /// aborted in the middle). /// The caller must still call grpc_handshake_manager_destroy() after /// calling this function. -void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, - grpc_handshake_manager* mgr, +void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr, grpc_error* why); /// Invokes handshakers in the order they were added. +/// \a interested_parties may be non-nullptr to provide a pollset_set that +/// may be used during handshaking. Ownership is not taken. /// Takes ownership of \a endpoint, and then passes that ownership to /// the \a on_handshake_done callback. /// Does NOT take ownership of \a channel_args. Instead, makes a copy before /// invoking the first handshaker. -/// \a acceptor will be NULL for client-side handshakers. +/// \a acceptor will be nullptr for client-side handshakers. /// /// When done, invokes \a on_handshake_done with a grpc_handshaker_args /// object as its argument. If the callback is invoked with error != @@ -143,9 +146,9 @@ void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx, /// the necessary clean-up. Otherwise, the callback takes ownership of /// the arguments. void grpc_handshake_manager_do_handshake( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr, + grpc_handshake_manager* mgr, grpc_pollset_set* interested_parties, grpc_endpoint* endpoint, const grpc_channel_args* channel_args, - gpr_timespec deadline, grpc_tcp_server_acceptor* acceptor, + grpc_millis deadline, grpc_tcp_server_acceptor* acceptor, grpc_iomgr_cb_func on_handshake_done, void* user_data); /// Add \a mgr to the server side list of all pending handshake managers, the @@ -162,6 +165,6 @@ void grpc_handshake_manager_pending_list_remove(grpc_handshake_manager** head, /// Shutdown all pending handshake managers on the server side. // Not thread-safe. Caller needs to synchronize. void grpc_handshake_manager_pending_list_shutdown_all( - grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why); + grpc_handshake_manager* head, grpc_error* why); #endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker_factory.c b/Sources/CgRPC/src/core/lib/channel/handshaker_factory.cc similarity index 55% rename from Sources/CgRPC/src/core/lib/channel/handshaker_factory.c rename to Sources/CgRPC/src/core/lib/channel/handshaker_factory.cc index 4deb280c6..4fd43635b 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker_factory.c +++ b/Sources/CgRPC/src/core/lib/channel/handshaker_factory.cc @@ -16,24 +16,26 @@ * */ +#include + #include "src/core/lib/channel/handshaker_factory.h" #include void grpc_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { - if (handshaker_factory != NULL) { - GPR_ASSERT(handshaker_factory->vtable != NULL); - handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory, - args, handshake_mgr); + grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { + if (handshaker_factory != nullptr) { + GPR_ASSERT(handshaker_factory->vtable != nullptr); + handshaker_factory->vtable->add_handshakers(handshaker_factory, args, + handshake_mgr); } } void grpc_handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) { - if (handshaker_factory != NULL) { - GPR_ASSERT(handshaker_factory->vtable != NULL); - handshaker_factory->vtable->destroy(exec_ctx, handshaker_factory); + grpc_handshaker_factory* handshaker_factory) { + if (handshaker_factory != nullptr) { + GPR_ASSERT(handshaker_factory->vtable != nullptr); + handshaker_factory->vtable->destroy(handshaker_factory); } } diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker_factory.h b/Sources/CgRPC/src/core/lib/channel/handshaker_factory.h index 6238e735d..3e45fcf20 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker_factory.h +++ b/Sources/CgRPC/src/core/lib/channel/handshaker_factory.h @@ -19,33 +19,32 @@ #ifndef GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H #define GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H +#include + #include #include "src/core/lib/channel/handshaker.h" -#include "src/core/lib/iomgr/exec_ctx.h" // A handshaker factory is used to create handshakers. typedef struct grpc_handshaker_factory grpc_handshaker_factory; typedef struct { - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, - grpc_handshake_manager *handshake_mgr); - void (*destroy)(grpc_exec_ctx *exec_ctx, - grpc_handshaker_factory *handshaker_factory); + void (*add_handshakers)(grpc_handshaker_factory* handshaker_factory, + const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr); + void (*destroy)(grpc_handshaker_factory* handshaker_factory); } grpc_handshaker_factory_vtable; struct grpc_handshaker_factory { - const grpc_handshaker_factory_vtable *vtable; + const grpc_handshaker_factory_vtable* vtable; }; void grpc_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr); + grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr); void grpc_handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory); + grpc_handshaker_factory* handshaker_factory); #endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H */ diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker_registry.c b/Sources/CgRPC/src/core/lib/channel/handshaker_registry.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/channel/handshaker_registry.c rename to Sources/CgRPC/src/core/lib/channel/handshaker_registry.cc index c6bc87d70..eec3e1b35 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker_registry.c +++ b/Sources/CgRPC/src/core/lib/channel/handshaker_registry.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/channel/handshaker_registry.h" #include @@ -34,8 +36,9 @@ typedef struct { static void grpc_handshaker_factory_list_register( grpc_handshaker_factory_list* list, bool at_start, grpc_handshaker_factory* factory) { - list->list = (grpc_handshaker_factory**)gpr_realloc( - list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*)); + list->list = static_cast(gpr_realloc( + list->list, + (list->num_factories + 1) * sizeof(grpc_handshaker_factory*))); if (at_start) { memmove(list->list + 1, list->list, sizeof(grpc_handshaker_factory*) * list->num_factories); @@ -47,18 +50,17 @@ static void grpc_handshaker_factory_list_register( } static void grpc_handshaker_factory_list_add_handshakers( - grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list, - const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) { + grpc_handshaker_factory_list* list, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { for (size_t i = 0; i < list->num_factories; ++i) { - grpc_handshaker_factory_add_handshakers(exec_ctx, list->list[i], args, - handshake_mgr); + grpc_handshaker_factory_add_handshakers(list->list[i], args, handshake_mgr); } } static void grpc_handshaker_factory_list_destroy( - grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list) { + grpc_handshaker_factory_list* list) { for (size_t i = 0; i < list->num_factories; ++i) { - grpc_handshaker_factory_destroy(exec_ctx, list->list[i]); + grpc_handshaker_factory_destroy(list->list[i]); } gpr_free(list->list); } @@ -74,10 +76,9 @@ void grpc_handshaker_factory_registry_init() { memset(g_handshaker_factory_lists, 0, sizeof(g_handshaker_factory_lists)); } -void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx) { +void grpc_handshaker_factory_registry_shutdown() { for (size_t i = 0; i < NUM_HANDSHAKER_TYPES; ++i) { - grpc_handshaker_factory_list_destroy(exec_ctx, - &g_handshaker_factory_lists[i]); + grpc_handshaker_factory_list_destroy(&g_handshaker_factory_lists[i]); } } @@ -88,11 +89,9 @@ void grpc_handshaker_factory_register(bool at_start, &g_handshaker_factory_lists[handshaker_type], at_start, factory); } -void grpc_handshakers_add(grpc_exec_ctx* exec_ctx, - grpc_handshaker_type handshaker_type, +void grpc_handshakers_add(grpc_handshaker_type handshaker_type, const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) { grpc_handshaker_factory_list_add_handshakers( - exec_ctx, &g_handshaker_factory_lists[handshaker_type], args, - handshake_mgr); + &g_handshaker_factory_lists[handshaker_type], args, handshake_mgr); } diff --git a/Sources/CgRPC/src/core/lib/channel/handshaker_registry.h b/Sources/CgRPC/src/core/lib/channel/handshaker_registry.h index a3b2ac1dc..82ad9c5b9 100644 --- a/Sources/CgRPC/src/core/lib/channel/handshaker_registry.h +++ b/Sources/CgRPC/src/core/lib/channel/handshaker_registry.h @@ -19,10 +19,11 @@ #ifndef GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H #define GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H +#include + #include #include "src/core/lib/channel/handshaker_factory.h" -#include "src/core/lib/iomgr/exec_ctx.h" typedef enum { HANDSHAKER_CLIENT = 0, @@ -31,7 +32,7 @@ typedef enum { } grpc_handshaker_type; void grpc_handshaker_factory_registry_init(); -void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx); +void grpc_handshaker_factory_registry_shutdown(); /// Registers a new handshaker factory. Takes ownership. /// If \a at_start is true, the new handshaker will be at the beginning of @@ -40,8 +41,7 @@ void grpc_handshaker_factory_register(bool at_start, grpc_handshaker_type handshaker_type, grpc_handshaker_factory* factory); -void grpc_handshakers_add(grpc_exec_ctx* exec_ctx, - grpc_handshaker_type handshaker_type, +void grpc_handshakers_add(grpc_handshaker_type handshaker_type, const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr); diff --git a/Sources/CgRPC/src/core/lib/channel/status_util.cc b/Sources/CgRPC/src/core/lib/channel/status_util.cc new file mode 100644 index 000000000..563db4084 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/status_util.cc @@ -0,0 +1,100 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/channel/status_util.h" + +#include "src/core/lib/gpr/useful.h" + +typedef struct { + const char* str; + grpc_status_code status; +} status_string_entry; + +static const status_string_entry g_status_string_entries[] = { + {"OK", GRPC_STATUS_OK}, + {"CANCELLED", GRPC_STATUS_CANCELLED}, + {"UNKNOWN", GRPC_STATUS_UNKNOWN}, + {"INVALID_ARGUMENT", GRPC_STATUS_INVALID_ARGUMENT}, + {"DEADLINE_EXCEEDED", GRPC_STATUS_DEADLINE_EXCEEDED}, + {"NOT_FOUND", GRPC_STATUS_NOT_FOUND}, + {"ALREADY_EXISTS", GRPC_STATUS_ALREADY_EXISTS}, + {"PERMISSION_DENIED", GRPC_STATUS_PERMISSION_DENIED}, + {"UNAUTHENTICATED", GRPC_STATUS_UNAUTHENTICATED}, + {"RESOURCE_EXHAUSTED", GRPC_STATUS_RESOURCE_EXHAUSTED}, + {"FAILED_PRECONDITION", GRPC_STATUS_FAILED_PRECONDITION}, + {"ABORTED", GRPC_STATUS_ABORTED}, + {"OUT_OF_RANGE", GRPC_STATUS_OUT_OF_RANGE}, + {"UNIMPLEMENTED", GRPC_STATUS_UNIMPLEMENTED}, + {"INTERNAL", GRPC_STATUS_INTERNAL}, + {"UNAVAILABLE", GRPC_STATUS_UNAVAILABLE}, + {"DATA_LOSS", GRPC_STATUS_DATA_LOSS}, +}; + +bool grpc_status_code_from_string(const char* status_str, + grpc_status_code* status) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(g_status_string_entries); ++i) { + if (strcmp(status_str, g_status_string_entries[i].str) == 0) { + *status = g_status_string_entries[i].status; + return true; + } + } + return false; +} + +const char* grpc_status_code_to_string(grpc_status_code status) { + switch (status) { + case GRPC_STATUS_OK: + return "OK"; + case GRPC_STATUS_CANCELLED: + return "CANCELLED"; + case GRPC_STATUS_UNKNOWN: + return "UNKNOWN"; + case GRPC_STATUS_INVALID_ARGUMENT: + return "INVALID_ARGUMENT"; + case GRPC_STATUS_DEADLINE_EXCEEDED: + return "DEADLINE_EXCEEDED"; + case GRPC_STATUS_NOT_FOUND: + return "NOT_FOUND"; + case GRPC_STATUS_ALREADY_EXISTS: + return "ALREADY_EXISTS"; + case GRPC_STATUS_PERMISSION_DENIED: + return "PERMISSION_DENIED"; + case GRPC_STATUS_UNAUTHENTICATED: + return "UNAUTHENTICATED"; + case GRPC_STATUS_RESOURCE_EXHAUSTED: + return "RESOURCE_EXHAUSTED"; + case GRPC_STATUS_FAILED_PRECONDITION: + return "FAILED_PRECONDITION"; + case GRPC_STATUS_ABORTED: + return "ABORTED"; + case GRPC_STATUS_OUT_OF_RANGE: + return "OUT_OF_RANGE"; + case GRPC_STATUS_UNIMPLEMENTED: + return "UNIMPLEMENTED"; + case GRPC_STATUS_INTERNAL: + return "INTERNAL"; + case GRPC_STATUS_UNAVAILABLE: + return "UNAVAILABLE"; + case GRPC_STATUS_DATA_LOSS: + return "DATA_LOSS"; + default: + return "UNKNOWN"; + } +} diff --git a/Sources/CgRPC/src/core/lib/channel/status_util.h b/Sources/CgRPC/src/core/lib/channel/status_util.h new file mode 100644 index 000000000..5409de6b3 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/channel/status_util.h @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_CHANNEL_STATUS_UTIL_H +#define GRPC_CORE_LIB_CHANNEL_STATUS_UTIL_H + +#include + +#include + +#include +#include + +/// If \a status_str is a valid status string, sets \a status to the +/// corresponding status value and returns true. +bool grpc_status_code_from_string(const char* status_str, + grpc_status_code* status); + +/// Returns the string form of \a status, or "UNKNOWN" if invalid. +const char* grpc_status_code_to_string(grpc_status_code status); + +namespace grpc_core { +namespace internal { + +/// A set of grpc_status_code values. +class StatusCodeSet { + public: + bool Empty() const { return status_code_mask_ == 0; } + + void Add(grpc_status_code status) { status_code_mask_ |= (1 << status); } + + bool Contains(grpc_status_code status) const { + return status_code_mask_ & (1 << status); + } + + private: + int status_code_mask_ = 0; // A bitfield of status codes in the set. +}; + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_CHANNEL_STATUS_UTIL_H */ diff --git a/Sources/CgRPC/src/core/lib/compression/algorithm_metadata.h b/Sources/CgRPC/src/core/lib/compression/algorithm_metadata.h index 08feafc1b..1be79e59c 100644 --- a/Sources/CgRPC/src/core/lib/compression/algorithm_metadata.h +++ b/Sources/CgRPC/src/core/lib/compression/algorithm_metadata.h @@ -19,21 +19,30 @@ #ifndef GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H #define GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H +#include + #include +#include "src/core/lib/compression/compression_internal.h" #include "src/core/lib/transport/metadata.h" /** Return compression algorithm based metadata value */ grpc_slice grpc_compression_algorithm_slice( grpc_compression_algorithm algorithm); -/** Return stream compression algorithm based metadata value */ -grpc_slice grpc_stream_compression_algorithm_slice( - grpc_stream_compression_algorithm algorithm); +/** Find compression algorithm based on passed in mdstr - returns + * GRPC_COMPRESS_ALGORITHM_COUNT on failure */ +grpc_compression_algorithm grpc_compression_algorithm_from_slice( + grpc_slice str); -/** Return compression algorithm based metadata element (grpc-encoding: xxx) */ +/** Return compression algorithm based metadata element */ grpc_mdelem grpc_compression_encoding_mdelem( grpc_compression_algorithm algorithm); +/** Return message compression algorithm based metadata element (grpc-encoding: + * xxx) */ +grpc_mdelem grpc_message_compression_encoding_mdelem( + grpc_message_compression_algorithm algorithm); + /** Return stream compression algorithm based metadata element * (content-encoding: xxx) */ grpc_mdelem grpc_stream_compression_encoding_mdelem( @@ -41,8 +50,8 @@ grpc_mdelem grpc_stream_compression_encoding_mdelem( /** Find compression algorithm based on passed in mdstr - returns * GRPC_COMPRESS_ALGORITHM_COUNT on failure */ -grpc_compression_algorithm grpc_compression_algorithm_from_slice( - grpc_slice str); +grpc_message_compression_algorithm +grpc_message_compression_algorithm_from_slice(grpc_slice str); /** Find stream compression algorithm based on passed in mdstr - returns * GRPC_STREAM_COMPRESS_ALGORITHM_COUNT on failure */ diff --git a/Sources/CgRPC/src/core/lib/compression/compression.c b/Sources/CgRPC/src/core/lib/compression/compression.c deleted file mode 100644 index 1cfac2312..000000000 --- a/Sources/CgRPC/src/core/lib/compression/compression.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include - -#include -#include - -#include "src/core/lib/compression/algorithm_metadata.h" -#include "src/core/lib/surface/api_trace.h" -#include "src/core/lib/transport/static_metadata.h" - -int grpc_compression_algorithm_parse(grpc_slice name, - grpc_compression_algorithm *algorithm) { - /* we use strncmp not only because it's safer (even though in this case it - * doesn't matter, given that we are comparing against string literals, but - * because this way we needn't have "name" nil-terminated (useful for slice - * data, for example) */ - if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) { - *algorithm = GRPC_COMPRESS_NONE; - return 1; - } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) { - *algorithm = GRPC_COMPRESS_GZIP; - return 1; - } else if (grpc_slice_eq(name, GRPC_MDSTR_DEFLATE)) { - *algorithm = GRPC_COMPRESS_DEFLATE; - return 1; - } else { - return 0; - } -} - -int grpc_stream_compression_algorithm_parse( - grpc_slice name, grpc_stream_compression_algorithm *algorithm) { - if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) { - *algorithm = GRPC_STREAM_COMPRESS_NONE; - return 1; - } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) { - *algorithm = GRPC_STREAM_COMPRESS_GZIP; - return 1; - } else { - return 0; - } -} - -int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm, - const char **name) { - GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2, - ((int)algorithm, name)); - switch (algorithm) { - case GRPC_COMPRESS_NONE: - *name = "identity"; - return 1; - case GRPC_COMPRESS_DEFLATE: - *name = "deflate"; - return 1; - case GRPC_COMPRESS_GZIP: - *name = "gzip"; - return 1; - case GRPC_COMPRESS_ALGORITHMS_COUNT: - return 0; - } - return 0; -} - -int grpc_stream_compression_algorithm_name( - grpc_stream_compression_algorithm algorithm, const char **name) { - GRPC_API_TRACE( - "grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2, - ((int)algorithm, name)); - switch (algorithm) { - case GRPC_STREAM_COMPRESS_NONE: - *name = "identity"; - return 1; - case GRPC_STREAM_COMPRESS_GZIP: - *name = "gzip"; - return 1; - case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT: - return 0; - } - return 0; -} - -grpc_compression_algorithm grpc_compression_algorithm_from_slice( - grpc_slice str) { - if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE; - if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE; - if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP; - return GRPC_COMPRESS_ALGORITHMS_COUNT; -} - -grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice( - grpc_slice str) { - if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE; - if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP; - return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT; -} - -grpc_slice grpc_compression_algorithm_slice( - grpc_compression_algorithm algorithm) { - switch (algorithm) { - case GRPC_COMPRESS_NONE: - return GRPC_MDSTR_IDENTITY; - case GRPC_COMPRESS_DEFLATE: - return GRPC_MDSTR_DEFLATE; - case GRPC_COMPRESS_GZIP: - return GRPC_MDSTR_GZIP; - case GRPC_COMPRESS_ALGORITHMS_COUNT: - return grpc_empty_slice(); - } - return grpc_empty_slice(); -} - -grpc_slice grpc_stream_compression_algorithm_slice( - grpc_stream_compression_algorithm algorithm) { - switch (algorithm) { - case GRPC_STREAM_COMPRESS_NONE: - return GRPC_MDSTR_IDENTITY; - case GRPC_STREAM_COMPRESS_GZIP: - return GRPC_MDSTR_GZIP; - case GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT: - return grpc_empty_slice(); - } - return grpc_empty_slice(); -} - -grpc_mdelem grpc_compression_encoding_mdelem( - grpc_compression_algorithm algorithm) { - switch (algorithm) { - case GRPC_COMPRESS_NONE: - return GRPC_MDELEM_GRPC_ENCODING_IDENTITY; - case GRPC_COMPRESS_DEFLATE: - return GRPC_MDELEM_GRPC_ENCODING_DEFLATE; - case GRPC_COMPRESS_GZIP: - return GRPC_MDELEM_GRPC_ENCODING_GZIP; - default: - break; - } - return GRPC_MDNULL; -} - -grpc_mdelem grpc_stream_compression_encoding_mdelem( - grpc_stream_compression_algorithm algorithm) { - switch (algorithm) { - case GRPC_STREAM_COMPRESS_NONE: - return GRPC_MDELEM_CONTENT_ENCODING_IDENTITY; - case GRPC_STREAM_COMPRESS_GZIP: - return GRPC_MDELEM_CONTENT_ENCODING_GZIP; - default: - break; - } - return GRPC_MDNULL; -} - -void grpc_compression_options_init(grpc_compression_options *opts) { - memset(opts, 0, sizeof(*opts)); - /* all enabled by default */ - opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; - opts->enabled_stream_compression_algorithms_bitset = - (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1; -} - -void grpc_compression_options_enable_algorithm( - grpc_compression_options *opts, grpc_compression_algorithm algorithm) { - GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm); -} - -void grpc_compression_options_disable_algorithm( - grpc_compression_options *opts, grpc_compression_algorithm algorithm) { - GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm); -} - -int grpc_compression_options_is_algorithm_enabled( - const grpc_compression_options *opts, - grpc_compression_algorithm algorithm) { - return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm); -} - -int grpc_compression_options_is_stream_compression_algorithm_enabled( - const grpc_compression_options *opts, - grpc_stream_compression_algorithm algorithm) { - return GPR_BITGET(opts->enabled_stream_compression_algorithms_bitset, - algorithm); -} - -/* TODO(dgq): Add the ability to specify parameters to the individual - * compression algorithms */ -grpc_compression_algorithm grpc_compression_algorithm_for_level( - grpc_compression_level level, uint32_t accepted_encodings) { - GRPC_API_TRACE("grpc_compression_algorithm_for_level(level=%d)", 1, - ((int)level)); - if (level > GRPC_COMPRESS_LEVEL_HIGH) { - gpr_log(GPR_ERROR, "Unknown compression level %d.", (int)level); - abort(); - } - - const size_t num_supported = - GPR_BITCOUNT(accepted_encodings) - 1; /* discard NONE */ - if (level == GRPC_COMPRESS_LEVEL_NONE || num_supported == 0) { - return GRPC_COMPRESS_NONE; - } - - GPR_ASSERT(level > 0); - - /* Establish a "ranking" or compression algorithms in increasing order of - * compression. - * This is simplistic and we will probably want to introduce other dimensions - * in the future (cpu/memory cost, etc). */ - const grpc_compression_algorithm algos_ranking[] = {GRPC_COMPRESS_GZIP, - GRPC_COMPRESS_DEFLATE}; - - /* intersect algos_ranking with the supported ones keeping the ranked order */ - grpc_compression_algorithm - sorted_supported_algos[GRPC_COMPRESS_ALGORITHMS_COUNT]; - size_t algos_supported_idx = 0; - for (size_t i = 0; i < GPR_ARRAY_SIZE(algos_ranking); i++) { - const grpc_compression_algorithm alg = algos_ranking[i]; - for (size_t j = 0; j < num_supported; j++) { - if (GPR_BITGET(accepted_encodings, alg) == 1) { - /* if \a alg in supported */ - sorted_supported_algos[algos_supported_idx++] = alg; - break; - } - } - if (algos_supported_idx == num_supported) break; - } - - switch (level) { - case GRPC_COMPRESS_LEVEL_NONE: - abort(); /* should have been handled already */ - case GRPC_COMPRESS_LEVEL_LOW: - return sorted_supported_algos[0]; - case GRPC_COMPRESS_LEVEL_MED: - return sorted_supported_algos[num_supported / 2]; - case GRPC_COMPRESS_LEVEL_HIGH: - return sorted_supported_algos[num_supported - 1]; - default: - abort(); - }; -} - -GRPCAPI grpc_stream_compression_algorithm -grpc_stream_compression_algorithm_for_level( - grpc_stream_compression_level level, uint32_t accepted_stream_encodings) { - GRPC_API_TRACE("grpc_stream_compression_algorithm_for_level(level=%d)", 1, - ((int)level)); - if (level > GRPC_STREAM_COMPRESS_LEVEL_HIGH) { - gpr_log(GPR_ERROR, "Unknown compression level %d.", (int)level); - abort(); - } - - switch (level) { - case GRPC_STREAM_COMPRESS_LEVEL_NONE: - return GRPC_STREAM_COMPRESS_NONE; - case GRPC_STREAM_COMPRESS_LEVEL_LOW: - case GRPC_STREAM_COMPRESS_LEVEL_MED: - case GRPC_STREAM_COMPRESS_LEVEL_HIGH: - if (GPR_BITGET(accepted_stream_encodings, GRPC_STREAM_COMPRESS_GZIP) == - 1) { - return GRPC_STREAM_COMPRESS_GZIP; - } else { - return GRPC_STREAM_COMPRESS_NONE; - } - default: - abort(); - } -} diff --git a/Sources/CgRPC/src/core/lib/compression/compression.cc b/Sources/CgRPC/src/core/lib/compression/compression.cc new file mode 100644 index 000000000..48717541a --- /dev/null +++ b/Sources/CgRPC/src/core/lib/compression/compression.cc @@ -0,0 +1,174 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include + +#include + +#include "src/core/lib/compression/algorithm_metadata.h" +#include "src/core/lib/compression/compression_internal.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/transport/static_metadata.h" + +int grpc_compression_algorithm_is_message( + grpc_compression_algorithm algorithm) { + return (algorithm >= GRPC_COMPRESS_DEFLATE && algorithm <= GRPC_COMPRESS_GZIP) + ? 1 + : 0; +} + +int grpc_compression_algorithm_is_stream(grpc_compression_algorithm algorithm) { + return (algorithm == GRPC_COMPRESS_STREAM_GZIP) ? 1 : 0; +} + +int grpc_compression_algorithm_parse(grpc_slice name, + grpc_compression_algorithm* algorithm) { + if (grpc_slice_eq(name, GRPC_MDSTR_IDENTITY)) { + *algorithm = GRPC_COMPRESS_NONE; + return 1; + } else if (grpc_slice_eq(name, GRPC_MDSTR_DEFLATE)) { + *algorithm = GRPC_COMPRESS_DEFLATE; + return 1; + } else if (grpc_slice_eq(name, GRPC_MDSTR_GZIP)) { + *algorithm = GRPC_COMPRESS_GZIP; + return 1; + } else if (grpc_slice_eq(name, GRPC_MDSTR_STREAM_SLASH_GZIP)) { + *algorithm = GRPC_COMPRESS_STREAM_GZIP; + return 1; + } else { + return 0; + } + return 0; +} + +int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm, + const char** name) { + GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2, + ((int)algorithm, name)); + switch (algorithm) { + case GRPC_COMPRESS_NONE: + *name = "identity"; + return 1; + case GRPC_COMPRESS_DEFLATE: + *name = "deflate"; + return 1; + case GRPC_COMPRESS_GZIP: + *name = "gzip"; + return 1; + case GRPC_COMPRESS_STREAM_GZIP: + *name = "stream/gzip"; + return 1; + case GRPC_COMPRESS_ALGORITHMS_COUNT: + return 0; + } + return 0; +} + +grpc_compression_algorithm grpc_compression_algorithm_for_level( + grpc_compression_level level, uint32_t accepted_encodings) { + grpc_compression_algorithm algo; + if (level == GRPC_COMPRESS_LEVEL_NONE) { + return GRPC_COMPRESS_NONE; + } else if (level <= GRPC_COMPRESS_LEVEL_HIGH) { + // TODO(mxyan): Design algorithm to select from all algorithms, including + // stream compression algorithm + if (!grpc_compression_algorithm_from_message_stream_compression_algorithm( + &algo, + grpc_message_compression_algorithm_for_level( + level, + grpc_compression_bitset_to_message_bitset(accepted_encodings)), + static_cast(0))) { + gpr_log(GPR_ERROR, "Parse compression level error"); + return GRPC_COMPRESS_NONE; + } + return algo; + } else { + gpr_log(GPR_ERROR, "Unknown compression level: %d", level); + return GRPC_COMPRESS_NONE; + } +} + +void grpc_compression_options_init(grpc_compression_options* opts) { + memset(opts, 0, sizeof(*opts)); + /* all enabled by default */ + opts->enabled_algorithms_bitset = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; +} + +void grpc_compression_options_enable_algorithm( + grpc_compression_options* opts, grpc_compression_algorithm algorithm) { + GPR_BITSET(&opts->enabled_algorithms_bitset, algorithm); +} + +void grpc_compression_options_disable_algorithm( + grpc_compression_options* opts, grpc_compression_algorithm algorithm) { + GPR_BITCLEAR(&opts->enabled_algorithms_bitset, algorithm); +} + +int grpc_compression_options_is_algorithm_enabled( + const grpc_compression_options* opts, + grpc_compression_algorithm algorithm) { + return GPR_BITGET(opts->enabled_algorithms_bitset, algorithm); +} + +grpc_slice grpc_compression_algorithm_slice( + grpc_compression_algorithm algorithm) { + switch (algorithm) { + case GRPC_COMPRESS_NONE: + return GRPC_MDSTR_IDENTITY; + case GRPC_COMPRESS_DEFLATE: + return GRPC_MDSTR_DEFLATE; + case GRPC_COMPRESS_GZIP: + return GRPC_MDSTR_GZIP; + case GRPC_COMPRESS_STREAM_GZIP: + return GRPC_MDSTR_STREAM_SLASH_GZIP; + case GRPC_COMPRESS_ALGORITHMS_COUNT: + return grpc_empty_slice(); + } + return grpc_empty_slice(); +} + +grpc_compression_algorithm grpc_compression_algorithm_from_slice( + grpc_slice str) { + if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_COMPRESS_NONE; + if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) return GRPC_COMPRESS_DEFLATE; + if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_COMPRESS_GZIP; + if (grpc_slice_eq(str, GRPC_MDSTR_STREAM_SLASH_GZIP)) + return GRPC_COMPRESS_STREAM_GZIP; + return GRPC_COMPRESS_ALGORITHMS_COUNT; +} + +grpc_mdelem grpc_compression_encoding_mdelem( + grpc_compression_algorithm algorithm) { + switch (algorithm) { + case GRPC_COMPRESS_NONE: + return GRPC_MDELEM_GRPC_ENCODING_IDENTITY; + case GRPC_COMPRESS_DEFLATE: + return GRPC_MDELEM_GRPC_ENCODING_DEFLATE; + case GRPC_COMPRESS_GZIP: + return GRPC_MDELEM_GRPC_ENCODING_GZIP; + case GRPC_COMPRESS_STREAM_GZIP: + return GRPC_MDELEM_GRPC_ENCODING_GZIP; + default: + break; + } + return GRPC_MDNULL; +} diff --git a/Sources/CgRPC/src/core/lib/compression/compression_internal.cc b/Sources/CgRPC/src/core/lib/compression/compression_internal.cc new file mode 100644 index 000000000..538514caf --- /dev/null +++ b/Sources/CgRPC/src/core/lib/compression/compression_internal.cc @@ -0,0 +1,276 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include + +#include + +#include "src/core/lib/compression/algorithm_metadata.h" +#include "src/core/lib/compression/compression_internal.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/transport/static_metadata.h" + +/* Interfaces related to MD */ + +grpc_message_compression_algorithm +grpc_message_compression_algorithm_from_slice(grpc_slice str) { + if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) + return GRPC_MESSAGE_COMPRESS_NONE; + if (grpc_slice_eq(str, GRPC_MDSTR_DEFLATE)) + return GRPC_MESSAGE_COMPRESS_DEFLATE; + if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_MESSAGE_COMPRESS_GZIP; + return GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT; +} + +grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice( + grpc_slice str) { + if (grpc_slice_eq(str, GRPC_MDSTR_IDENTITY)) return GRPC_STREAM_COMPRESS_NONE; + if (grpc_slice_eq(str, GRPC_MDSTR_GZIP)) return GRPC_STREAM_COMPRESS_GZIP; + return GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT; +} + +grpc_mdelem grpc_message_compression_encoding_mdelem( + grpc_message_compression_algorithm algorithm) { + switch (algorithm) { + case GRPC_MESSAGE_COMPRESS_NONE: + return GRPC_MDELEM_GRPC_ENCODING_IDENTITY; + case GRPC_MESSAGE_COMPRESS_DEFLATE: + return GRPC_MDELEM_GRPC_ENCODING_DEFLATE; + case GRPC_MESSAGE_COMPRESS_GZIP: + return GRPC_MDELEM_GRPC_ENCODING_GZIP; + default: + break; + } + return GRPC_MDNULL; +} + +grpc_mdelem grpc_stream_compression_encoding_mdelem( + grpc_stream_compression_algorithm algorithm) { + switch (algorithm) { + case GRPC_STREAM_COMPRESS_NONE: + return GRPC_MDELEM_CONTENT_ENCODING_IDENTITY; + case GRPC_STREAM_COMPRESS_GZIP: + return GRPC_MDELEM_CONTENT_ENCODING_GZIP; + default: + break; + } + return GRPC_MDNULL; +} + +/* Interfaces performing transformation between compression algorithms and + * levels. */ +grpc_message_compression_algorithm +grpc_compression_algorithm_to_message_compression_algorithm( + grpc_compression_algorithm algo) { + switch (algo) { + case GRPC_COMPRESS_DEFLATE: + return GRPC_MESSAGE_COMPRESS_DEFLATE; + case GRPC_COMPRESS_GZIP: + return GRPC_MESSAGE_COMPRESS_GZIP; + default: + return GRPC_MESSAGE_COMPRESS_NONE; + } +} + +grpc_stream_compression_algorithm +grpc_compression_algorithm_to_stream_compression_algorithm( + grpc_compression_algorithm algo) { + switch (algo) { + case GRPC_COMPRESS_STREAM_GZIP: + return GRPC_STREAM_COMPRESS_GZIP; + default: + return GRPC_STREAM_COMPRESS_NONE; + } +} + +uint32_t grpc_compression_bitset_to_message_bitset(uint32_t bitset) { + return bitset & ((1u << GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT) - 1); +} + +uint32_t grpc_compression_bitset_to_stream_bitset(uint32_t bitset) { + uint32_t identity = (bitset & 1u); + uint32_t other_bits = + (bitset >> (GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT - 1)) & + ((1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 2); + return identity | other_bits; +} + +uint32_t grpc_compression_bitset_from_message_stream_compression_bitset( + uint32_t message_bitset, uint32_t stream_bitset) { + uint32_t offset_stream_bitset = + (stream_bitset & 1u) | + ((stream_bitset & (~1u)) << (GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT - 1)); + return message_bitset | offset_stream_bitset; +} + +int grpc_compression_algorithm_from_message_stream_compression_algorithm( + grpc_compression_algorithm* algorithm, + grpc_message_compression_algorithm message_algorithm, + grpc_stream_compression_algorithm stream_algorithm) { + if (message_algorithm != GRPC_MESSAGE_COMPRESS_NONE && + stream_algorithm != GRPC_STREAM_COMPRESS_NONE) { + *algorithm = GRPC_COMPRESS_NONE; + return 0; + } + if (message_algorithm == GRPC_MESSAGE_COMPRESS_NONE) { + switch (stream_algorithm) { + case GRPC_STREAM_COMPRESS_NONE: + *algorithm = GRPC_COMPRESS_NONE; + return 1; + case GRPC_STREAM_COMPRESS_GZIP: + *algorithm = GRPC_COMPRESS_STREAM_GZIP; + return 1; + default: + *algorithm = GRPC_COMPRESS_NONE; + return 0; + } + } else { + switch (message_algorithm) { + case GRPC_MESSAGE_COMPRESS_NONE: + *algorithm = GRPC_COMPRESS_NONE; + return 1; + case GRPC_MESSAGE_COMPRESS_DEFLATE: + *algorithm = GRPC_COMPRESS_DEFLATE; + return 1; + case GRPC_MESSAGE_COMPRESS_GZIP: + *algorithm = GRPC_COMPRESS_GZIP; + return 1; + default: + *algorithm = GRPC_COMPRESS_NONE; + return 0; + } + } + return 0; +} + +/* Interfaces for message compression. */ + +int grpc_message_compression_algorithm_name( + grpc_message_compression_algorithm algorithm, const char** name) { + GRPC_API_TRACE( + "grpc_message_compression_algorithm_parse(algorithm=%d, name=%p)", 2, + ((int)algorithm, name)); + switch (algorithm) { + case GRPC_MESSAGE_COMPRESS_NONE: + *name = "identity"; + return 1; + case GRPC_MESSAGE_COMPRESS_DEFLATE: + *name = "deflate"; + return 1; + case GRPC_MESSAGE_COMPRESS_GZIP: + *name = "gzip"; + return 1; + case GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT: + return 0; + } + return 0; +} + +/* TODO(dgq): Add the ability to specify parameters to the individual + * compression algorithms */ +grpc_message_compression_algorithm grpc_message_compression_algorithm_for_level( + grpc_compression_level level, uint32_t accepted_encodings) { + GRPC_API_TRACE("grpc_message_compression_algorithm_for_level(level=%d)", 1, + ((int)level)); + if (level > GRPC_COMPRESS_LEVEL_HIGH) { + gpr_log(GPR_ERROR, "Unknown message compression level %d.", + static_cast(level)); + abort(); + } + + const size_t num_supported = + GPR_BITCOUNT(accepted_encodings) - 1; /* discard NONE */ + if (level == GRPC_COMPRESS_LEVEL_NONE || num_supported == 0) { + return GRPC_MESSAGE_COMPRESS_NONE; + } + + GPR_ASSERT(level > 0); + + /* Establish a "ranking" or compression algorithms in increasing order of + * compression. + * This is simplistic and we will probably want to introduce other dimensions + * in the future (cpu/memory cost, etc). */ + const grpc_message_compression_algorithm algos_ranking[] = { + GRPC_MESSAGE_COMPRESS_GZIP, GRPC_MESSAGE_COMPRESS_DEFLATE}; + + /* intersect algos_ranking with the supported ones keeping the ranked order */ + grpc_message_compression_algorithm + sorted_supported_algos[GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT]; + size_t algos_supported_idx = 0; + for (size_t i = 0; i < GPR_ARRAY_SIZE(algos_ranking); i++) { + const grpc_message_compression_algorithm alg = algos_ranking[i]; + for (size_t j = 0; j < num_supported; j++) { + if (GPR_BITGET(accepted_encodings, alg) == 1) { + /* if \a alg in supported */ + sorted_supported_algos[algos_supported_idx++] = alg; + break; + } + } + if (algos_supported_idx == num_supported) break; + } + + switch (level) { + case GRPC_COMPRESS_LEVEL_NONE: + abort(); /* should have been handled already */ + case GRPC_COMPRESS_LEVEL_LOW: + return sorted_supported_algos[0]; + case GRPC_COMPRESS_LEVEL_MED: + return sorted_supported_algos[num_supported / 2]; + case GRPC_COMPRESS_LEVEL_HIGH: + return sorted_supported_algos[num_supported - 1]; + default: + abort(); + }; +} + +int grpc_message_compression_algorithm_parse( + grpc_slice value, grpc_message_compression_algorithm* algorithm) { + if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { + *algorithm = GRPC_MESSAGE_COMPRESS_NONE; + return 1; + } else if (grpc_slice_eq(value, GRPC_MDSTR_DEFLATE)) { + *algorithm = GRPC_MESSAGE_COMPRESS_DEFLATE; + return 1; + } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) { + *algorithm = GRPC_MESSAGE_COMPRESS_GZIP; + return 1; + } else { + return 0; + } + return 0; +} + +/* Interfaces for stream compression. */ + +int grpc_stream_compression_algorithm_parse( + grpc_slice value, grpc_stream_compression_algorithm* algorithm) { + if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { + *algorithm = GRPC_STREAM_COMPRESS_NONE; + return 1; + } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) { + *algorithm = GRPC_STREAM_COMPRESS_GZIP; + return 1; + } else { + return 0; + } + return 0; +} diff --git a/Sources/CgRPC/src/core/lib/compression/compression_internal.h b/Sources/CgRPC/src/core/lib/compression/compression_internal.h new file mode 100644 index 000000000..da007368b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/compression/compression_internal.h @@ -0,0 +1,88 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_COMPRESSION_COMPRESSION_INTERNAL_H +#define GRPC_CORE_LIB_COMPRESSION_COMPRESSION_INTERNAL_H + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + GRPC_MESSAGE_COMPRESS_NONE = 0, + GRPC_MESSAGE_COMPRESS_DEFLATE, + GRPC_MESSAGE_COMPRESS_GZIP, + /* TODO(ctiller): snappy */ + GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT +} grpc_message_compression_algorithm; + +/** Stream compresssion algorithms supported by gRPC */ +typedef enum { + GRPC_STREAM_COMPRESS_NONE = 0, + GRPC_STREAM_COMPRESS_GZIP, + GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT +} grpc_stream_compression_algorithm; + +/* Interfaces performing transformation between compression algorithms and + * levels. */ + +grpc_message_compression_algorithm +grpc_compression_algorithm_to_message_compression_algorithm( + grpc_compression_algorithm algo); + +grpc_stream_compression_algorithm +grpc_compression_algorithm_to_stream_compression_algorithm( + grpc_compression_algorithm algo); + +uint32_t grpc_compression_bitset_to_message_bitset(uint32_t bitset); + +uint32_t grpc_compression_bitset_to_stream_bitset(uint32_t bitset); + +uint32_t grpc_compression_bitset_from_message_stream_compression_bitset( + uint32_t message_bitset, uint32_t stream_bitset); + +int grpc_compression_algorithm_from_message_stream_compression_algorithm( + grpc_compression_algorithm* algorithm, + grpc_message_compression_algorithm message_algorithm, + grpc_stream_compression_algorithm stream_algorithm); + +/* Interfaces for message compression. */ + +int grpc_message_compression_algorithm_name( + grpc_message_compression_algorithm algorithm, const char** name); + +grpc_message_compression_algorithm grpc_message_compression_algorithm_for_level( + grpc_compression_level level, uint32_t accepted_encodings); + +int grpc_message_compression_algorithm_parse( + grpc_slice value, grpc_message_compression_algorithm* algorithm); + +/* Interfaces for stream compression. */ + +int grpc_stream_compression_algorithm_parse( + grpc_slice value, grpc_stream_compression_algorithm* algorithm); + +#ifdef __cplusplus +} +#endif + +#endif /* GRPC_CORE_LIB_COMPRESSION_COMPRESSION_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/lib/compression/message_compress.c b/Sources/CgRPC/src/core/lib/compression/message_compress.cc similarity index 69% rename from Sources/CgRPC/src/core/lib/compression/message_compress.c rename to Sources/CgRPC/src/core/lib/compression/message_compress.cc index c051e2886..e06454f87 100644 --- a/Sources/CgRPC/src/core/lib/compression/message_compress.c +++ b/Sources/CgRPC/src/core/lib/compression/message_compress.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/compression/message_compress.h" #include @@ -29,30 +31,30 @@ #define OUTPUT_BLOCK_SIZE 1024 -static int zlib_body(grpc_exec_ctx* exec_ctx, z_stream* zs, - grpc_slice_buffer* input, grpc_slice_buffer* output, +static int zlib_body(z_stream* zs, grpc_slice_buffer* input, + grpc_slice_buffer* output, int (*flate)(z_stream* zs, int flush)) { int r; int flush; size_t i; grpc_slice outbuf = GRPC_SLICE_MALLOC(OUTPUT_BLOCK_SIZE); - const uInt uint_max = ~(uInt)0; + const uInt uint_max = ~static_cast(0); GPR_ASSERT(GRPC_SLICE_LENGTH(outbuf) <= uint_max); - zs->avail_out = (uInt)GRPC_SLICE_LENGTH(outbuf); + zs->avail_out = static_cast GRPC_SLICE_LENGTH(outbuf); zs->next_out = GRPC_SLICE_START_PTR(outbuf); flush = Z_NO_FLUSH; for (i = 0; i < input->count; i++) { if (i == input->count - 1) flush = Z_FINISH; GPR_ASSERT(GRPC_SLICE_LENGTH(input->slices[i]) <= uint_max); - zs->avail_in = (uInt)GRPC_SLICE_LENGTH(input->slices[i]); + zs->avail_in = static_cast GRPC_SLICE_LENGTH(input->slices[i]); zs->next_in = GRPC_SLICE_START_PTR(input->slices[i]); do { if (zs->avail_out == 0) { grpc_slice_buffer_add_indexed(output, outbuf); outbuf = GRPC_SLICE_MALLOC(OUTPUT_BLOCK_SIZE); GPR_ASSERT(GRPC_SLICE_LENGTH(outbuf) <= uint_max); - zs->avail_out = (uInt)GRPC_SLICE_LENGTH(outbuf); + zs->avail_out = static_cast GRPC_SLICE_LENGTH(outbuf); zs->next_out = GRPC_SLICE_START_PTR(outbuf); } r = flate(zs, flush); @@ -74,7 +76,7 @@ static int zlib_body(grpc_exec_ctx* exec_ctx, z_stream* zs, return 1; error: - grpc_slice_unref_internal(exec_ctx, outbuf); + grpc_slice_unref_internal(outbuf); return 0; } @@ -84,8 +86,8 @@ static void* zalloc_gpr(void* opaque, unsigned int items, unsigned int size) { static void zfree_gpr(void* opaque, void* address) { gpr_free(address); } -static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, - grpc_slice_buffer* output, int gzip) { +static int zlib_compress(grpc_slice_buffer* input, grpc_slice_buffer* output, + int gzip) { z_stream zs; int r; size_t i; @@ -97,11 +99,10 @@ static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0), 8, Z_DEFAULT_STRATEGY); GPR_ASSERT(r == Z_OK); - r = zlib_body(exec_ctx, &zs, input, output, deflate) && - output->length < input->length; + r = zlib_body(&zs, input, output, deflate) && output->length < input->length; if (!r) { for (i = count_before; i < output->count; i++) { - grpc_slice_unref_internal(exec_ctx, output->slices[i]); + grpc_slice_unref_internal(output->slices[i]); } output->count = count_before; output->length = length_before; @@ -110,8 +111,8 @@ static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, return r; } -static int zlib_decompress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, - grpc_slice_buffer* output, int gzip) { +static int zlib_decompress(grpc_slice_buffer* input, grpc_slice_buffer* output, + int gzip) { z_stream zs; int r; size_t i; @@ -122,10 +123,10 @@ static int zlib_decompress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input, zs.zfree = zfree_gpr; r = inflateInit2(&zs, 15 | (gzip ? 16 : 0)); GPR_ASSERT(r == Z_OK); - r = zlib_body(exec_ctx, &zs, input, output, inflate); + r = zlib_body(&zs, input, output, inflate); if (!r) { for (i = count_before; i < output->count; i++) { - grpc_slice_unref_internal(exec_ctx, output->slices[i]); + grpc_slice_unref_internal(output->slices[i]); } output->count = count_before; output->length = length_before; @@ -142,46 +143,43 @@ static int copy(grpc_slice_buffer* input, grpc_slice_buffer* output) { return 1; } -static int compress_inner(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +static int compress_inner(grpc_message_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { switch (algorithm) { - case GRPC_COMPRESS_NONE: + case GRPC_MESSAGE_COMPRESS_NONE: /* the fallback path always needs to be send uncompressed: we simply rely on that here */ return 0; - case GRPC_COMPRESS_DEFLATE: - return zlib_compress(exec_ctx, input, output, 0); - case GRPC_COMPRESS_GZIP: - return zlib_compress(exec_ctx, input, output, 1); - case GRPC_COMPRESS_ALGORITHMS_COUNT: + case GRPC_MESSAGE_COMPRESS_DEFLATE: + return zlib_compress(input, output, 0); + case GRPC_MESSAGE_COMPRESS_GZIP: + return zlib_compress(input, output, 1); + case GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT: break; } gpr_log(GPR_ERROR, "invalid compression algorithm %d", algorithm); return 0; } -int grpc_msg_compress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_compress(grpc_message_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { - if (!compress_inner(exec_ctx, algorithm, input, output)) { + if (!compress_inner(algorithm, input, output)) { copy(input, output); return 0; } return 1; } -int grpc_msg_decompress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_decompress(grpc_message_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output) { switch (algorithm) { - case GRPC_COMPRESS_NONE: + case GRPC_MESSAGE_COMPRESS_NONE: return copy(input, output); - case GRPC_COMPRESS_DEFLATE: - return zlib_decompress(exec_ctx, input, output, 0); - case GRPC_COMPRESS_GZIP: - return zlib_decompress(exec_ctx, input, output, 1); - case GRPC_COMPRESS_ALGORITHMS_COUNT: + case GRPC_MESSAGE_COMPRESS_DEFLATE: + return zlib_decompress(input, output, 0); + case GRPC_MESSAGE_COMPRESS_GZIP: + return zlib_decompress(input, output, 1); + case GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT: break; } gpr_log(GPR_ERROR, "invalid compression algorithm %d", algorithm); diff --git a/Sources/CgRPC/src/core/lib/compression/message_compress.h b/Sources/CgRPC/src/core/lib/compression/message_compress.h index ca8ca37f8..91654e47e 100644 --- a/Sources/CgRPC/src/core/lib/compression/message_compress.h +++ b/Sources/CgRPC/src/core/lib/compression/message_compress.h @@ -19,21 +19,22 @@ #ifndef GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H #define GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H -#include +#include + #include +#include "src/core/lib/compression/compression_internal.h" + /* compress 'input' to 'output' using 'algorithm'. On success, appends compressed slices to output and returns 1. On failure, appends uncompressed slices to output and returns 0. */ -int grpc_msg_compress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_compress(grpc_message_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output); /* decompress 'input' to 'output' using 'algorithm'. On success, appends slices to output and returns 1. On failure, output is unchanged, and returns 0. */ -int grpc_msg_decompress(grpc_exec_ctx* exec_ctx, - grpc_compression_algorithm algorithm, +int grpc_msg_decompress(grpc_message_compression_algorithm algorithm, grpc_slice_buffer* input, grpc_slice_buffer* output); #endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */ diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression.c b/Sources/CgRPC/src/core/lib/compression/stream_compression.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/compression/stream_compression.c rename to Sources/CgRPC/src/core/lib/compression/stream_compression.cc index 411489f02..46cb3daf4 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression.c +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include "src/core/lib/compression/stream_compression.h" @@ -24,23 +26,23 @@ extern const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable; -bool grpc_stream_compress(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, +bool grpc_stream_compress(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, grpc_stream_compression_flush flush) { return ctx->vtable->compress(ctx, in, out, output_size, max_output_size, flush); } -bool grpc_stream_decompress(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context) { +bool grpc_stream_decompress(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, + bool* end_of_context) { return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size, end_of_context); } -grpc_stream_compression_context *grpc_stream_compression_context_create( +grpc_stream_compression_context* grpc_stream_compression_context_create( grpc_stream_compression_method method) { switch (method) { case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS: @@ -51,18 +53,18 @@ grpc_stream_compression_context *grpc_stream_compression_context_create( return grpc_stream_compression_gzip_vtable.context_create(method); default: gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method); - return NULL; + return nullptr; } } void grpc_stream_compression_context_destroy( - grpc_stream_compression_context *ctx) { + grpc_stream_compression_context* ctx) { ctx->vtable->context_destroy(ctx); } int grpc_stream_compression_method_parse( grpc_slice value, bool is_compress, - grpc_stream_compression_method *method) { + grpc_stream_compression_method* method) { if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression.h b/Sources/CgRPC/src/core/lib/compression/stream_compression.h index 6d073280f..c80f2f869 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression.h +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_H #define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_H +#include + #include #include @@ -30,7 +32,7 @@ typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable; /* Stream compression/decompression context */ typedef struct grpc_stream_compression_context { - const grpc_stream_compression_vtable *vtable; + const grpc_stream_compression_vtable* vtable; } grpc_stream_compression_context; typedef enum grpc_stream_compression_method { @@ -49,16 +51,16 @@ typedef enum grpc_stream_compression_flush { } grpc_stream_compression_flush; struct grpc_stream_compression_vtable { - bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, - grpc_slice_buffer *out, size_t *output_size, + bool (*compress)(grpc_stream_compression_context* ctx, grpc_slice_buffer* in, + grpc_slice_buffer* out, size_t* output_size, size_t max_output_size, grpc_stream_compression_flush flush); - bool (*decompress)(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context); - grpc_stream_compression_context *(*context_create)( + bool (*decompress)(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, + bool* end_of_context); + grpc_stream_compression_context* (*context_create)( grpc_stream_compression_method method); - void (*context_destroy)(grpc_stream_compression_context *ctx); + void (*context_destroy)(grpc_stream_compression_context* ctx); }; /** @@ -74,9 +76,9 @@ struct grpc_stream_compression_vtable { * previous compressed bytes. It allows corresponding decompression context to * be dropped when reaching this boundary. */ -bool grpc_stream_compress(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, +bool grpc_stream_compress(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, grpc_stream_compression_flush flush); /** @@ -86,29 +88,29 @@ bool grpc_stream_compress(grpc_stream_compression_context *ctx, * it is set to false. The total number of bytes emitted is outputed in \a * output_size. */ -bool grpc_stream_decompress(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context); +bool grpc_stream_decompress(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, + bool* end_of_context); /** * Creates a stream compression context. \a pending_bytes_buffer is the input * buffer for compression/decompression operations. \a method specifies whether * the context is for compression or decompression. */ -grpc_stream_compression_context *grpc_stream_compression_context_create( +grpc_stream_compression_context* grpc_stream_compression_context_create( grpc_stream_compression_method method); /** * Destroys a stream compression context. */ void grpc_stream_compression_context_destroy( - grpc_stream_compression_context *ctx); + grpc_stream_compression_context* ctx); /** * Parse stream compression method based on algorithm name */ int grpc_stream_compression_method_parse( - grpc_slice value, bool is_compress, grpc_stream_compression_method *method); + grpc_slice value, bool is_compress, grpc_stream_compression_method* method); #endif diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.c b/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.cc similarity index 70% rename from Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.c rename to Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.cc index abcbdb3a9..682f71284 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.c +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -29,18 +31,18 @@ typedef struct grpc_stream_compression_context_gzip { grpc_stream_compression_context base; z_stream zs; - int (*flate)(z_stream *zs, int flush); + int (*flate)(z_stream* zs, int flush); } grpc_stream_compression_context_gzip; -static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, int flush, - bool *end_of_context) { +static bool gzip_flate(grpc_stream_compression_context_gzip* ctx, + grpc_slice_buffer* in, grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, int flush, + bool* end_of_context) { GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH); /* Full flush is not allowed when inflating. */ GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; int r; bool eoc = false; size_t original_max_output_size = max_output_size; @@ -48,17 +50,17 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size : OUTPUT_BLOCK_SIZE; grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size); - ctx->zs.avail_out = (uInt)slice_size; + ctx->zs.avail_out = static_cast(slice_size); ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out); while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) { grpc_slice slice = grpc_slice_buffer_take_first(in); - ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice); + ctx->zs.avail_in = static_cast GRPC_SLICE_LENGTH(slice); ctx->zs.next_in = GRPC_SLICE_START_PTR(slice); r = ctx->flate(&ctx->zs, Z_NO_FLUSH); if (r < 0 && r != Z_BUF_ERROR) { gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + return false; } else if (r == Z_STREAM_END && ctx->flate == inflate) { eoc = true; @@ -69,7 +71,7 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, GRPC_SLICE_LENGTH(slice))); } - grpc_slice_unref_internal(&exec_ctx, slice); + grpc_slice_unref_internal(slice); } if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { GPR_ASSERT(in->length == 0); @@ -88,8 +90,8 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, break; default: gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + return false; } } else if (flush == Z_FINISH) { @@ -104,8 +106,8 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, break; default: gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); + grpc_slice_unref_internal(slice_out); + return false; } } @@ -114,14 +116,15 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, if (ctx->zs.avail_out == 0) { grpc_slice_buffer_add(out, slice_out); } else if (ctx->zs.avail_out < slice_size) { - slice_out.data.refcounted.length -= ctx->zs.avail_out; + size_t len = GRPC_SLICE_LENGTH(slice_out); + GRPC_SLICE_SET_LENGTH(slice_out, len - ctx->zs.avail_out); grpc_slice_buffer_add(out, slice_out); } else { - grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_slice_unref_internal(slice_out); } max_output_size -= (slice_size - ctx->zs.avail_out); } - grpc_exec_ctx_finish(&exec_ctx); + if (end_of_context) { *end_of_context = eoc; } @@ -131,17 +134,17 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, return true; } -static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, - grpc_slice_buffer *out, - size_t *output_size, +static bool grpc_stream_compress_gzip(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, + grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, grpc_stream_compression_flush flush) { - if (ctx == NULL) { + if (ctx == nullptr) { return false; } - grpc_stream_compression_context_gzip *gzip_ctx = - (grpc_stream_compression_context_gzip *)ctx; + grpc_stream_compression_context_gzip* gzip_ctx = + reinterpret_cast(ctx); GPR_ASSERT(gzip_ctx->flate == deflate); int gzip_flush; switch (flush) { @@ -158,36 +161,36 @@ static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, gzip_flush = 0; } return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush, - NULL); + nullptr); } -static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, - grpc_slice_buffer *out, - size_t *output_size, +static bool grpc_stream_decompress_gzip(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, + grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, - bool *end_of_context) { - if (ctx == NULL) { + bool* end_of_context) { + if (ctx == nullptr) { return false; } - grpc_stream_compression_context_gzip *gzip_ctx = - (grpc_stream_compression_context_gzip *)ctx; + grpc_stream_compression_context_gzip* gzip_ctx = + reinterpret_cast(ctx); GPR_ASSERT(gzip_ctx->flate == inflate); return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH, end_of_context); } -static grpc_stream_compression_context * +static grpc_stream_compression_context* grpc_stream_compression_context_create_gzip( grpc_stream_compression_method method) { GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); - grpc_stream_compression_context_gzip *gzip_ctx = - (grpc_stream_compression_context_gzip *)gpr_zalloc( - sizeof(grpc_stream_compression_context_gzip)); + grpc_stream_compression_context_gzip* gzip_ctx = + static_cast( + gpr_zalloc(sizeof(grpc_stream_compression_context_gzip))); int r; - if (gzip_ctx == NULL) { - return NULL; + if (gzip_ctx == nullptr) { + return nullptr; } if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) { r = inflateInit2(&gzip_ctx->zs, 0x1F); @@ -199,20 +202,20 @@ grpc_stream_compression_context_create_gzip( } if (r != Z_OK) { gpr_free(gzip_ctx); - return NULL; + return nullptr; } gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable; - return (grpc_stream_compression_context *)gzip_ctx; + return reinterpret_cast(gzip_ctx); } static void grpc_stream_compression_context_destroy_gzip( - grpc_stream_compression_context *ctx) { - if (ctx == NULL) { + grpc_stream_compression_context* ctx) { + if (ctx == nullptr) { return; } - grpc_stream_compression_context_gzip *gzip_ctx = - (grpc_stream_compression_context_gzip *)ctx; + grpc_stream_compression_context_gzip* gzip_ctx = + reinterpret_cast(ctx); if (gzip_ctx->flate == inflate) { inflateEnd(&gzip_ctx->zs); } else { @@ -222,7 +225,6 @@ static void grpc_stream_compression_context_destroy_gzip( } const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = { - .compress = grpc_stream_compress_gzip, - .decompress = grpc_stream_decompress_gzip, - .context_create = grpc_stream_compression_context_create_gzip, - .context_destroy = grpc_stream_compression_context_destroy_gzip}; + grpc_stream_compress_gzip, grpc_stream_decompress_gzip, + grpc_stream_compression_context_create_gzip, + grpc_stream_compression_context_destroy_gzip}; diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.h b/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.h index 7cf49a0de..740f09734 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.h +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression_gzip.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H #define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H +#include + #include "src/core/lib/compression/stream_compression.h" extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable; diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.c b/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/compression/stream_compression_identity.c rename to Sources/CgRPC/src/core/lib/compression/stream_compression_identity.cc index 3dfcf53b8..b7981394d 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.c +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.cc @@ -16,22 +16,23 @@ * */ +#include + #include #include #include "src/core/lib/compression/stream_compression_identity.h" -#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" #define OUTPUT_BLOCK_SIZE (1024) /* Singleton context used for all identity streams. */ static grpc_stream_compression_context identity_ctx = { - .vtable = &grpc_stream_compression_identity_vtable}; + &grpc_stream_compression_identity_vtable}; -static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, - grpc_slice_buffer *out, - size_t *output_size, +static void grpc_stream_compression_pass_through(grpc_slice_buffer* in, + grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size) { if (max_output_size >= in->length) { if (output_size) { @@ -46,13 +47,13 @@ static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, } } -static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, - grpc_slice_buffer *out, - size_t *output_size, +static bool grpc_stream_compress_identity(grpc_stream_compression_context* ctx, + grpc_slice_buffer* in, + grpc_slice_buffer* out, + size_t* output_size, size_t max_output_size, grpc_stream_compression_flush flush) { - if (ctx == NULL) { + if (ctx == nullptr) { return false; } grpc_stream_compression_pass_through(in, out, output_size, max_output_size); @@ -60,10 +61,10 @@ static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, } static bool grpc_stream_decompress_identity( - grpc_stream_compression_context *ctx, grpc_slice_buffer *in, - grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, - bool *end_of_context) { - if (ctx == NULL) { + grpc_stream_compression_context* ctx, grpc_slice_buffer* in, + grpc_slice_buffer* out, size_t* output_size, size_t max_output_size, + bool* end_of_context) { + if (ctx == nullptr) { return false; } grpc_stream_compression_pass_through(in, out, output_size, max_output_size); @@ -73,22 +74,21 @@ static bool grpc_stream_decompress_identity( return true; } -static grpc_stream_compression_context * +static grpc_stream_compression_context* grpc_stream_compression_context_create_identity( grpc_stream_compression_method method) { GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); /* No context needed in this case. Use fake context instead. */ - return (grpc_stream_compression_context *)&identity_ctx; + return &identity_ctx; } static void grpc_stream_compression_context_destroy_identity( - grpc_stream_compression_context *ctx) { + grpc_stream_compression_context* ctx) { return; } const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = { - .compress = grpc_stream_compress_identity, - .decompress = grpc_stream_decompress_identity, - .context_create = grpc_stream_compression_context_create_identity, - .context_destroy = grpc_stream_compression_context_destroy_identity}; + grpc_stream_compress_identity, grpc_stream_decompress_identity, + grpc_stream_compression_context_create_identity, + grpc_stream_compression_context_destroy_identity}; diff --git a/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.h b/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.h index 41926e949..cc77b63ec 100644 --- a/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.h +++ b/Sources/CgRPC/src/core/lib/compression/stream_compression_identity.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H #define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H +#include + #include "src/core/lib/compression/stream_compression.h" extern const grpc_stream_compression_vtable diff --git a/Sources/CgRPC/src/core/lib/debug/stats.c b/Sources/CgRPC/src/core/lib/debug/stats.cc similarity index 74% rename from Sources/CgRPC/src/core/lib/debug/stats.c rename to Sources/CgRPC/src/core/lib/debug/stats.cc index 4096384dd..d8ddf03ac 100644 --- a/Sources/CgRPC/src/core/lib/debug/stats.c +++ b/Sources/CgRPC/src/core/lib/debug/stats.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/debug/stats.h" #include @@ -23,22 +25,22 @@ #include #include -#include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" -grpc_stats_data *grpc_stats_per_cpu_storage = NULL; +grpc_stats_data* grpc_stats_per_cpu_storage = nullptr; static size_t g_num_cores; void grpc_stats_init(void) { g_num_cores = GPR_MAX(1, gpr_cpu_num_cores()); - grpc_stats_per_cpu_storage = - (grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores); + grpc_stats_per_cpu_storage = static_cast( + gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores)); } void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); } -void grpc_stats_collect(grpc_stats_data *output) { +void grpc_stats_collect(grpc_stats_data* output) { memset(output, 0, sizeof(*output)); for (size_t core = 0; core < g_num_cores; core++) { for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { @@ -52,8 +54,8 @@ void grpc_stats_collect(grpc_stats_data *output) { } } -void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, - grpc_stats_data *c) { +void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a, + grpc_stats_data* c) { for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) { c->counters[i] = b->counters[i] - a->counters[i]; } @@ -62,13 +64,13 @@ void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, } } -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, - const int *table, int table_size) { - GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx); - const int *const start = table; +int grpc_stats_histo_find_bucket_slow(int value, const int* table, + int table_size) { + GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(); + const int* const start = table; while (table_size > 0) { int step = table_size / 2; - const int *it = table + step; + const int* it = table + step; if (value >= *it) { table = it + 1; table_size -= step + 1; @@ -76,20 +78,21 @@ int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, table_size = step; } } - return (int)(table - start) - 1; + return static_cast(table - start) - 1; } -size_t grpc_stats_histo_count(const grpc_stats_data *stats, +size_t grpc_stats_histo_count(const grpc_stats_data* stats, grpc_stats_histograms histogram) { size_t sum = 0; for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) { - sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i]; + sum += static_cast( + stats->histograms[grpc_stats_histo_start[histogram] + i]); } return sum; } -static double threshold_for_count_below(const gpr_atm *bucket_counts, - const int *bucket_boundaries, +static double threshold_for_count_below(const gpr_atm* bucket_counts, + const int* bucket_boundaries, int num_buckets, double count_below) { double count_so_far; double lower_bound; @@ -100,7 +103,7 @@ static double threshold_for_count_below(const gpr_atm *bucket_counts, /* find the lowest bucket that gets us above count_below */ count_so_far = 0.0; for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) { - count_so_far += (double)bucket_counts[lower_idx]; + count_so_far += static_cast(bucket_counts[lower_idx]); if (count_so_far >= count_below) { break; } @@ -119,13 +122,13 @@ static double threshold_for_count_below(const gpr_atm *bucket_counts, should lie */ lower_bound = bucket_boundaries[lower_idx]; upper_bound = bucket_boundaries[lower_idx + 1]; - return upper_bound - - (upper_bound - lower_bound) * (count_so_far - count_below) / - (double)bucket_counts[lower_idx]; + return upper_bound - (upper_bound - lower_bound) * + (count_so_far - count_below) / + static_cast(bucket_counts[lower_idx]); } } -double grpc_stats_histo_percentile(const grpc_stats_data *stats, +double grpc_stats_histo_percentile(const grpc_stats_data* stats, grpc_stats_histograms histogram, double percentile) { size_t count = grpc_stats_histo_count(stats, histogram); @@ -133,12 +136,13 @@ double grpc_stats_histo_percentile(const grpc_stats_data *stats, return threshold_for_count_below( stats->histograms + grpc_stats_histo_start[histogram], grpc_stats_histo_bucket_boundaries[histogram], - grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0); + grpc_stats_histo_buckets[histogram], + static_cast(count) * percentile / 100.0); } -char *grpc_stats_data_as_json(const grpc_stats_data *data) { +char* grpc_stats_data_as_json(const grpc_stats_data* data) { gpr_strvec v; - char *tmp; + char* tmp; bool is_first = true; gpr_strvec_init(&v); gpr_strvec_add(&v, gpr_strdup("{")); @@ -168,7 +172,7 @@ char *grpc_stats_data_as_json(const grpc_stats_data *data) { is_first = false; } gpr_strvec_add(&v, gpr_strdup("}")); - tmp = gpr_strvec_flatten(&v, NULL); + tmp = gpr_strvec_flatten(&v, nullptr); gpr_strvec_destroy(&v); return tmp; } diff --git a/Sources/CgRPC/src/core/lib/debug/stats.h b/Sources/CgRPC/src/core/lib/debug/stats.h index 09d190d48..749665262 100644 --- a/Sources/CgRPC/src/core/lib/debug/stats.h +++ b/Sources/CgRPC/src/core/lib/debug/stats.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_DEBUG_STATS_H #define GRPC_CORE_LIB_DEBUG_STATS_H +#include + #include #include "src/core/lib/debug/stats_data.h" #include "src/core/lib/iomgr/exec_ctx.h" @@ -28,34 +30,32 @@ typedef struct grpc_stats_data { gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS]; } grpc_stats_data; -extern grpc_stats_data *grpc_stats_per_cpu_storage; +extern grpc_stats_data* grpc_stats_per_cpu_storage; -#define GRPC_THREAD_STATS_DATA(exec_ctx) \ - (&grpc_stats_per_cpu_storage[(exec_ctx)->starting_cpu]) +#define GRPC_THREAD_STATS_DATA() \ + (&grpc_stats_per_cpu_storage[grpc_core::ExecCtx::Get()->starting_cpu()]) -#define GRPC_STATS_INC_COUNTER(exec_ctx, ctr) \ - (gpr_atm_no_barrier_fetch_add( \ - &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1)) +#define GRPC_STATS_INC_COUNTER(ctr) \ + (gpr_atm_no_barrier_fetch_add(&GRPC_THREAD_STATS_DATA()->counters[(ctr)], 1)) -#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \ - (gpr_atm_no_barrier_fetch_add( \ - &GRPC_THREAD_STATS_DATA((exec_ctx)) \ - ->histograms[histogram##_FIRST_SLOT + (index)], \ +#define GRPC_STATS_INC_HISTOGRAM(histogram, index) \ + (gpr_atm_no_barrier_fetch_add( \ + &GRPC_THREAD_STATS_DATA()->histograms[histogram##_FIRST_SLOT + (index)], \ 1)) void grpc_stats_init(void); void grpc_stats_shutdown(void); -void grpc_stats_collect(grpc_stats_data *output); +void grpc_stats_collect(grpc_stats_data* output); // c = b-a -void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a, - grpc_stats_data *c); -char *grpc_stats_data_as_json(const grpc_stats_data *data); -int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, int value, - const int *table, int table_size); -double grpc_stats_histo_percentile(const grpc_stats_data *data, +void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a, + grpc_stats_data* c); +char* grpc_stats_data_as_json(const grpc_stats_data* data); +int grpc_stats_histo_find_bucket_slow(int value, const int* table, + int table_size); +double grpc_stats_histo_percentile(const grpc_stats_data* data, grpc_stats_histograms histogram, double percentile); -size_t grpc_stats_histo_count(const grpc_stats_data *data, +size_t grpc_stats_histo_count(const grpc_stats_data* data, grpc_stats_histograms histogram); #endif diff --git a/Sources/CgRPC/src/core/lib/debug/stats_data.c b/Sources/CgRPC/src/core/lib/debug/stats_data.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/debug/stats_data.c rename to Sources/CgRPC/src/core/lib/debug/stats_data.cc index c0aec63c1..309ece94b 100644 --- a/Sources/CgRPC/src/core/lib/debug/stats_data.c +++ b/Sources/CgRPC/src/core/lib/debug/stats_data.cc @@ -18,11 +18,14 @@ * Automatically generated by tools/codegen/core/gen_stats_data.py */ -#include "src/core/lib/debug/stats_data.h" -#include +#include + #include "src/core/lib/debug/stats.h" +#include "src/core/lib/debug/stats_data.h" +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/exec_ctx.h" -const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { + +const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "client_calls_created", "server_calls_created", "cqs_created", @@ -77,6 +80,7 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "http2_initiate_write_due_to_transport_flow_control_unstalled", "http2_initiate_write_due_to_ping_response", "http2_initiate_write_due_to_force_rst_stream", + "http2_spurious_writes_begun", "hpack_recv_indexed", "hpack_recv_lithdr_incidx", "hpack_recv_lithdr_incidx_v", @@ -103,6 +107,10 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "combiner_locks_scheduled_items", "combiner_locks_scheduled_final_items", "combiner_locks_offloaded", + "call_combiner_locks_initiated", + "call_combiner_locks_scheduled_items", + "call_combiner_set_notify_on_cancel", + "call_combiner_cancelled", "executor_scheduled_short_items", "executor_scheduled_long_items", "executor_scheduled_to_self", @@ -111,12 +119,17 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "executor_push_retries", "server_requested_calls", "server_slowpath_requests_queued", + "cq_ev_queue_trylock_failures", + "cq_ev_queue_trylock_successes", + "cq_ev_queue_transient_pop_failures", }; -const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { +const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "Number of client side calls created by this process", "Number of server side calls created by this process", - "Number of completion queues created", "Number of client channels created", - "Number of client subchannels created", "Number of server channels created", + "Number of completion queues created", + "Number of client channels created", + "Number of client subchannels created", + "Number of server channels created", "Number of polling syscalls (epoll_wait, poll, etc) made by this process", "Number of sleeping syscalls made by this process", "How many polling wakeups were performed by the process (only valid for " @@ -146,7 +159,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "Number of batches containing receive initial metadata", "Number of batches containing receive message", "Number of batches containing receive trailing metadata", - "Number of settings frames sent", "Number of HTTP2 pings sent by process", + "Number of settings frames sent", + "Number of HTTP2 pings sent by process", "Number of HTTP2 writes initiated", "Number of HTTP2 writes offloaded to the executor from application threads", "Number of HTTP2 writes that finished seeing more data needed to be " @@ -177,6 +191,7 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "'transport_flow_control_unstalled'", "Number of HTTP2 writes initiated due to 'ping_response'", "Number of HTTP2 writes initiated due to 'force_rst_stream'", + "Number of HTTP2 writes initiated with nothing to write", "Number of HPACK indexed fields received", "Number of HPACK literal headers received with incremental indexing", "Number of HPACK literal headers received with incremental indexing and " @@ -208,6 +223,11 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "Number of items scheduled against combiner locks", "Number of final items scheduled against combiner locks", "Number of combiner locks offloaded to different threads", + "Number of call combiner lock entries by process (first items queued to a " + "call combiner)", + "Number of items scheduled against call combiner locks", + "Number of times a cancellation callback was set on a call combiner", + "Number of times a call combiner was cancelled", "Number of finite runtime closures scheduled against the executor (gRPC " "thread pool)", "Number of potentially infinite runtime closures scheduled against the " @@ -220,8 +240,14 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "How many calls were requested (not necessarily received) by the server", "How many times was the server slow path taken (indicates too few " "outstanding requests)", + "Number of lock (trylock) acquisition failures on completion queue event " + "queue. High value here indicates high contention on completion queues", + "Number of lock (trylock) acquisition successes on completion queue event " + "queue.", + "Number of times NULL was popped out of completion queue's event queue " + "even though the event queue was not empty", }; -const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { +const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "call_initial_size", "poll_events_returned", "tcp_write_size", @@ -236,7 +262,7 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "http2_send_flowctl_per_write", "server_cqs_checked", }; -const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { +const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { "Initial size of the grpc_call arena created at call start", "How many events are called for each syscall_poll", "Number of bytes offered to each syscall_write", @@ -319,11 +345,10 @@ const uint8_t grpc_stats_table_7[102] = { 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51}; const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64}; const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5}; -void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_call_initial_size(int value) { value = GPR_CLAMP(value, 0, 262144); if (value < 6) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, value); return; } union { @@ -336,19 +361,17 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6; _bkt.dbl = grpc_stats_table_0[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_0, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_0, 64)); } -void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_poll_events_returned(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 29) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value); return; } union { @@ -361,20 +384,17 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29; _bkt.dbl = grpc_stats_table_2[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_2, 128)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 128)); } -void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_write_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, value); return; } union { @@ -387,19 +407,17 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_write_iov_size(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value); return; } union { @@ -412,19 +430,17 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_read_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, value); return; } union { @@ -437,19 +453,17 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_tcp_read_offer(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, value); return; } union { @@ -462,20 +476,18 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_tcp_read_offer_iov_size(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + value); return; } union { @@ -488,21 +500,19 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_message_size(int value) { value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + value); return; } union { @@ -515,22 +525,19 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5; _bkt.dbl = grpc_stats_table_4[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_4, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64)); } -void grpc_stats_inc_http2_send_initial_metadata_per_write( - grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_http2_send_initial_metadata_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - value); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, value); return; } union { @@ -544,21 +551,18 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write( _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - bucket); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, bucket); return; } GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, - grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6, - 64)); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_message_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + value); return; } union { @@ -571,22 +575,19 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_trailing_metadata_per_write( - grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_http2_send_trailing_metadata_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - value); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, value); return; } union { @@ -600,21 +601,18 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - bucket); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, bucket); return; } GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, - grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6, - 64)); + GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, - int value) { +void grpc_stats_inc_http2_send_flowctl_per_write(int value) { value = GPR_CLAMP(value, 0, 1024); if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + value); return; } union { @@ -627,20 +625,18 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; _bkt.dbl = grpc_stats_table_6[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { +void grpc_stats_inc_server_cqs_checked(int value) { value = GPR_CLAMP(value, 0, 64); if (value < 3) { - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value); return; } union { @@ -653,25 +649,24 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3; _bkt.dbl = grpc_stats_table_8[bucket]; bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket); + GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket); return; } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_8, 8)); + GRPC_STATS_INC_HISTOGRAM( + GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, + grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_8, 8)); } const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 8}; const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832}; -const int *const grpc_stats_histo_bucket_boundaries[13] = { +const int* const grpc_stats_histo_bucket_boundaries[13] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_8}; -void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = { +void (*const grpc_stats_inc_histogram[13])(int x) = { grpc_stats_inc_call_initial_size, grpc_stats_inc_poll_events_returned, grpc_stats_inc_tcp_write_size, diff --git a/Sources/CgRPC/src/core/lib/debug/stats_data.h b/Sources/CgRPC/src/core/lib/debug/stats_data.h index 28dab0011..da1266ad7 100644 --- a/Sources/CgRPC/src/core/lib/debug/stats_data.h +++ b/Sources/CgRPC/src/core/lib/debug/stats_data.h @@ -21,6 +21,8 @@ #ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H #define GRPC_CORE_LIB_DEBUG_STATS_DATA_H +#include + #include #include "src/core/lib/iomgr/exec_ctx.h" @@ -79,6 +81,7 @@ typedef enum { GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED, GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE, GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM, + GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN, GRPC_STATS_COUNTER_HPACK_RECV_INDEXED, GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX, GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V, @@ -105,6 +108,10 @@ typedef enum { GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS, GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS, GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED, + GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED, + GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS, + GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL, + GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED, GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS, GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS, GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF, @@ -113,10 +120,13 @@ typedef enum { GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES, GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS, GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED, + GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES, + GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES, + GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES, GRPC_STATS_COUNTER_COUNT } grpc_stats_counters; -extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; -extern const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT]; +extern const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT]; +extern const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT]; typedef enum { GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, @@ -133,8 +143,8 @@ typedef enum { GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; -extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; -extern const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT]; +extern const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT]; +extern const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT]; typedef enum { GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0, GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64, @@ -164,307 +174,262 @@ typedef enum { GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8, GRPC_STATS_HISTOGRAM_BUCKETS = 840 } grpc_stats_histogram_constants; -#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) -#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) -#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED) -#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED) -#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED) -#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED) -#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL) -#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT) -#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK) -#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER) -#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN) -#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD) -#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV) -#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD) -#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) -#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE) -#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ) -#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED) -#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS) -#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) -#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) -#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) -#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) -#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES) -#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) -#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) -#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED) -#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED) -#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_CLIENT_CALLS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) +#define GRPC_STATS_INC_SERVER_CALLS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CALLS_CREATED) +#define GRPC_STATS_INC_CQS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQS_CREATED) +#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED) +#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED) +#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED) +#define GRPC_STATS_INC_SYSCALL_POLL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_POLL) +#define GRPC_STATS_INC_SYSCALL_WAIT() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WAIT) +#define GRPC_STATS_INC_POLLSET_KICK() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK) +#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER) +#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN) +#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD) +#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV) +#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD) +#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS) +#define GRPC_STATS_INC_SYSCALL_WRITE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WRITE) +#define GRPC_STATS_INC_SYSCALL_READ() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_READ) +#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED) +#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS) +#define GRPC_STATS_INC_HTTP2_OP_BATCHES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_BATCHES) +#define GRPC_STATS_INC_HTTP2_OP_CANCEL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_CANCEL) +#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE) +#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA) +#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES) +#define GRPC_STATS_INC_HTTP2_PINGS_SENT() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PINGS_SENT) +#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN) +#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED) +#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED) +#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \ - exec_ctx) \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA() \ GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM) +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \ - exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE() \ + GRPC_STATS_INC_COUNTER( \ GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE) -#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \ - GRPC_STATS_INC_COUNTER( \ - (exec_ctx), \ - GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM) -#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX) -#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V) -#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED) -#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN) -#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY) -#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64) -#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX) -#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V) -#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED) -#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN) -#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY) -#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64) -#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) -#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) -#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \ +#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM() \ GRPC_STATS_INC_COUNTER( \ - (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) -#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS) -#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) -#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) -#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) -#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) -#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) -#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED) -#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \ - grpc_stats_inc_call_initial_size((exec_ctx), (int)(value)) -void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \ - grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value)) -void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \ - grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \ - grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value)) -void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \ - grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_initial_metadata_per_write( - grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, - int x); -#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \ - (int)(value)) -void grpc_stats_inc_http2_send_trailing_metadata_per_write( - grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \ - grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value)) -void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, - int x); -#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \ - grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value)) -void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x); + GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM) +#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN) +#define GRPC_STATS_INC_HPACK_RECV_INDEXED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_INDEXED) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX) +#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V) +#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED) +#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN) +#define GRPC_STATS_INC_HPACK_RECV_BINARY() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY) +#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64) +#define GRPC_STATS_INC_HPACK_SEND_INDEXED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_INDEXED) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX) +#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V) +#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED) +#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN) +#define GRPC_STATS_INC_HPACK_SEND_BINARY() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY) +#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64) +#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS() \ + GRPC_STATS_INC_COUNTER( \ + GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS) +#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED) +#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED) +#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS) +#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL) +#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS) +#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF) +#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED) +#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) +#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) +#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) +#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES) +#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES() \ + GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES) +#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value) \ + grpc_stats_inc_call_initial_size((int)(value)) +void grpc_stats_inc_call_initial_size(int x); +#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(value) \ + grpc_stats_inc_poll_events_returned((int)(value)) +void grpc_stats_inc_poll_events_returned(int x); +#define GRPC_STATS_INC_TCP_WRITE_SIZE(value) \ + grpc_stats_inc_tcp_write_size((int)(value)) +void grpc_stats_inc_tcp_write_size(int x); +#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value) \ + grpc_stats_inc_tcp_write_iov_size((int)(value)) +void grpc_stats_inc_tcp_write_iov_size(int x); +#define GRPC_STATS_INC_TCP_READ_SIZE(value) \ + grpc_stats_inc_tcp_read_size((int)(value)) +void grpc_stats_inc_tcp_read_size(int x); +#define GRPC_STATS_INC_TCP_READ_OFFER(value) \ + grpc_stats_inc_tcp_read_offer((int)(value)) +void grpc_stats_inc_tcp_read_offer(int x); +#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(value) \ + grpc_stats_inc_tcp_read_offer_iov_size((int)(value)) +void grpc_stats_inc_tcp_read_offer_iov_size(int x); +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value) \ + grpc_stats_inc_http2_send_message_size((int)(value)) +void grpc_stats_inc_http2_send_message_size(int x); +#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(value) \ + grpc_stats_inc_http2_send_initial_metadata_per_write((int)(value)) +void grpc_stats_inc_http2_send_initial_metadata_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(value) \ + grpc_stats_inc_http2_send_message_per_write((int)(value)) +void grpc_stats_inc_http2_send_message_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(value) \ + grpc_stats_inc_http2_send_trailing_metadata_per_write((int)(value)) +void grpc_stats_inc_http2_send_trailing_metadata_per_write(int x); +#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(value) \ + grpc_stats_inc_http2_send_flowctl_per_write((int)(value)) +void grpc_stats_inc_http2_send_flowctl_per_write(int x); +#define GRPC_STATS_INC_SERVER_CQS_CHECKED(value) \ + grpc_stats_inc_server_cqs_checked((int)(value)) +void grpc_stats_inc_server_cqs_checked(int x); extern const int grpc_stats_histo_buckets[13]; extern const int grpc_stats_histo_start[13]; -extern const int *const grpc_stats_histo_bucket_boundaries[13]; -extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, - int x); +extern const int* const grpc_stats_histo_bucket_boundaries[13]; +extern void (*const grpc_stats_inc_histogram[13])(int x); #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/Sources/CgRPC/src/core/lib/debug/trace.c b/Sources/CgRPC/src/core/lib/debug/trace.c deleted file mode 100644 index 7cb2789a1..000000000 --- a/Sources/CgRPC/src/core/lib/debug/trace.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/debug/trace.h" - -#include - -#include -#include -#include "src/core/lib/support/env.h" - -int grpc_tracer_set_enabled(const char *name, int enabled); - -typedef struct tracer { - grpc_tracer_flag *flag; - struct tracer *next; -} tracer; -static tracer *tracers; - -#ifdef GRPC_THREADSAFE_TRACER -#define TRACER_SET(flag, on) gpr_atm_no_barrier_store(&(flag).value, (on)) -#else -#define TRACER_SET(flag, on) (flag).value = (on) -#endif - -void grpc_register_tracer(grpc_tracer_flag *flag) { - tracer *t = (tracer *)gpr_malloc(sizeof(*t)); - t->flag = flag; - t->next = tracers; - TRACER_SET(*flag, false); - tracers = t; -} - -static void add(const char *beg, const char *end, char ***ss, size_t *ns) { - size_t n = *ns; - size_t np = n + 1; - char *s; - size_t len; - GPR_ASSERT(end >= beg); - len = (size_t)(end - beg); - s = (char *)gpr_malloc(len + 1); - memcpy(s, beg, len); - s[len] = 0; - *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np); - (*ss)[n] = s; - *ns = np; -} - -static void split(const char *s, char ***ss, size_t *ns) { - const char *c = strchr(s, ','); - if (c == NULL) { - add(s, s + strlen(s), ss, ns); - } else { - add(s, c, ss, ns); - split(c + 1, ss, ns); - } -} - -static void parse(const char *s) { - char **strings = NULL; - size_t nstrings = 0; - size_t i; - split(s, &strings, &nstrings); - - for (i = 0; i < nstrings; i++) { - if (strings[i][0] == '-') { - grpc_tracer_set_enabled(strings[i] + 1, 0); - } else { - grpc_tracer_set_enabled(strings[i], 1); - } - } - - for (i = 0; i < nstrings; i++) { - gpr_free(strings[i]); - } - gpr_free(strings); -} - -static void list_tracers() { - gpr_log(GPR_DEBUG, "available tracers:"); - tracer *t; - for (t = tracers; t; t = t->next) { - gpr_log(GPR_DEBUG, "\t%s", t->flag->name); - } -} - -void grpc_tracer_init(const char *env_var) { - char *e = gpr_getenv(env_var); - if (e != NULL) { - parse(e); - gpr_free(e); - } -} - -void grpc_tracer_shutdown(void) { - while (tracers) { - tracer *t = tracers; - tracers = t->next; - gpr_free(t); - } -} - -int grpc_tracer_set_enabled(const char *name, int enabled) { - tracer *t; - if (0 == strcmp(name, "all")) { - for (t = tracers; t; t = t->next) { - TRACER_SET(*t->flag, enabled); - } - } else if (0 == strcmp(name, "list_tracers")) { - list_tracers(); - } else if (0 == strcmp(name, "refcount")) { - for (t = tracers; t; t = t->next) { - if (strstr(t->flag->name, "refcount") != NULL) { - TRACER_SET(*t->flag, enabled); - } - } - } else { - int found = 0; - for (t = tracers; t; t = t->next) { - if (0 == strcmp(name, t->flag->name)) { - TRACER_SET(*t->flag, enabled); - found = 1; - } - } - if (!found) { - gpr_log(GPR_ERROR, "Unknown trace var: '%s'", name); - return 0; /* early return */ - } - } - return 1; -} diff --git a/Sources/CgRPC/src/core/lib/debug/trace.cc b/Sources/CgRPC/src/core/lib/debug/trace.cc new file mode 100644 index 000000000..01c1e867d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/debug/trace.cc @@ -0,0 +1,145 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/debug/trace.h" + +#include + +#include +#include +#include +#include "src/core/lib/gpr/env.h" + +int grpc_tracer_set_enabled(const char* name, int enabled); + +namespace grpc_core { + +TraceFlag* TraceFlagList::root_tracer_ = nullptr; + +bool TraceFlagList::Set(const char* name, bool enabled) { + TraceFlag* t; + if (0 == strcmp(name, "all")) { + for (t = root_tracer_; t; t = t->next_tracer_) { + t->set_enabled(enabled); + } + } else if (0 == strcmp(name, "list_tracers")) { + LogAllTracers(); + } else if (0 == strcmp(name, "refcount")) { + for (t = root_tracer_; t; t = t->next_tracer_) { + if (strstr(t->name_, "refcount") != nullptr) { + t->set_enabled(enabled); + } + } + } else { + bool found = false; + for (t = root_tracer_; t; t = t->next_tracer_) { + if (0 == strcmp(name, t->name_)) { + t->set_enabled(enabled); + found = true; + } + } + // check for unknowns, but ignore "", to allow to GRPC_TRACE= + if (!found && 0 != strcmp(name, "")) { + gpr_log(GPR_ERROR, "Unknown trace var: '%s'", name); + return false; /* early return */ + } + } + return true; +} + +void TraceFlagList::Add(TraceFlag* flag) { + flag->next_tracer_ = root_tracer_; + root_tracer_ = flag; +} + +void TraceFlagList::LogAllTracers() { + gpr_log(GPR_DEBUG, "available tracers:"); + TraceFlag* t; + for (t = root_tracer_; t != nullptr; t = t->next_tracer_) { + gpr_log(GPR_DEBUG, "\t%s", t->name_); + } +} + +// Flags register themselves on the list during construction +TraceFlag::TraceFlag(bool default_enabled, const char* name) : name_(name) { + set_enabled(default_enabled); + TraceFlagList::Add(this); +} + +} // namespace grpc_core + +static void add(const char* beg, const char* end, char*** ss, size_t* ns) { + size_t n = *ns; + size_t np = n + 1; + char* s; + size_t len; + GPR_ASSERT(end >= beg); + len = static_cast(end - beg); + s = static_cast(gpr_malloc(len + 1)); + memcpy(s, beg, len); + s[len] = 0; + *ss = static_cast(gpr_realloc(*ss, sizeof(char**) * np)); + (*ss)[n] = s; + *ns = np; +} + +static void split(const char* s, char*** ss, size_t* ns) { + const char* c = strchr(s, ','); + if (c == nullptr) { + add(s, s + strlen(s), ss, ns); + } else { + add(s, c, ss, ns); + split(c + 1, ss, ns); + } +} + +static void parse(const char* s) { + char** strings = nullptr; + size_t nstrings = 0; + size_t i; + split(s, &strings, &nstrings); + + for (i = 0; i < nstrings; i++) { + if (strings[i][0] == '-') { + grpc_core::TraceFlagList::Set(strings[i] + 1, false); + } else { + grpc_core::TraceFlagList::Set(strings[i], true); + } + } + + for (i = 0; i < nstrings; i++) { + gpr_free(strings[i]); + } + gpr_free(strings); +} + +void grpc_tracer_init(const char* env_var) { + char* e = gpr_getenv(env_var); + if (e != nullptr) { + parse(e); + gpr_free(e); + } +} + +void grpc_tracer_shutdown(void) {} + +int grpc_tracer_set_enabled(const char* name, int enabled) { + return grpc_core::TraceFlagList::Set(name, enabled != 0); +} diff --git a/Sources/CgRPC/src/core/lib/debug/trace.h b/Sources/CgRPC/src/core/lib/debug/trace.h index 64f2e3fc3..28157c638 100644 --- a/Sources/CgRPC/src/core/lib/debug/trace.h +++ b/Sources/CgRPC/src/core/lib/debug/trace.h @@ -19,37 +19,95 @@ #ifndef GRPC_CORE_LIB_DEBUG_TRACE_H #define GRPC_CORE_LIB_DEBUG_TRACE_H -#include #include + +#include #include +void grpc_tracer_init(const char* env_var_name); +void grpc_tracer_shutdown(void); + #if defined(__has_feature) #if __has_feature(thread_sanitizer) #define GRPC_THREADSAFE_TRACER #endif #endif -typedef struct { +namespace grpc_core { + +class TraceFlag; +class TraceFlagList { + public: + static bool Set(const char* name, bool enabled); + static void Add(TraceFlag* flag); + + private: + static void LogAllTracers(); + static TraceFlag* root_tracer_; +}; + +namespace testing { +void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag); +} + +class TraceFlag { + public: + TraceFlag(bool default_enabled, const char* name); + ~TraceFlag() {} + + const char* name() const { return name_; } + +// This following define may be commented out to ensure that the compiler +// deletes any "if (tracer.enabled()) {...}" codeblocks. This is useful to +// test the performance impact tracers have on the system. +// +// #define COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD +#ifdef COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD + bool enabled() { return false; } +#else + bool enabled() { +#ifdef GRPC_THREADSAFE_TRACER + return gpr_atm_no_barrier_load(&value_) != 0; +#else + return value_; +#endif // GRPC_THREADSAFE_TRACER + } +#endif // COMPILE_OUT_ALL_TRACERS_IN_OPT_BUILD + + private: + friend void grpc_core::testing::grpc_tracer_enable_flag(TraceFlag* flag); + friend class TraceFlagList; + + void set_enabled(bool enabled) { #ifdef GRPC_THREADSAFE_TRACER - gpr_atm value; + gpr_atm_no_barrier_store(&value_, enabled); #else - bool value; + value_ = enabled; #endif - const char *name; -} grpc_tracer_flag; + } + TraceFlag* next_tracer_; + const char* const name_; #ifdef GRPC_THREADSAFE_TRACER -#define GRPC_TRACER_ON(flag) (gpr_atm_no_barrier_load(&(flag).value) != 0) -#define GRPC_TRACER_INITIALIZER(on, name) \ - { (gpr_atm)(on), (name) } + gpr_atm value_; #else -#define GRPC_TRACER_ON(flag) ((flag).value) -#define GRPC_TRACER_INITIALIZER(on, name) \ - { (on), (name) } + bool value_; #endif +}; -void grpc_register_tracer(grpc_tracer_flag *flag); -void grpc_tracer_init(const char *env_var_name); -void grpc_tracer_shutdown(void); +#ifndef NDEBUG +typedef TraceFlag DebugOnlyTraceFlag; +#else +class DebugOnlyTraceFlag { + public: + DebugOnlyTraceFlag(bool default_enabled, const char* name) {} + bool enabled() { return false; } + + private: + void set_enabled(bool enabled) {} +}; +#endif + +} // namespace grpc_core #endif /* GRPC_CORE_LIB_DEBUG_TRACE_H */ diff --git a/Sources/CgRPC/src/core/lib/support/alloc.c b/Sources/CgRPC/src/core/lib/gpr/alloc.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/support/alloc.c rename to Sources/CgRPC/src/core/lib/gpr/alloc.cc index 886d69d64..611e4ccee 100644 --- a/Sources/CgRPC/src/core/lib/support/alloc.c +++ b/Sources/CgRPC/src/core/lib/gpr/alloc.cc @@ -16,18 +16,19 @@ * */ +#include + #include #include -#include #include #include #include "src/core/lib/profiling/timers.h" -static void *zalloc_with_calloc(size_t sz) { return calloc(sz, 1); } +static void* zalloc_with_calloc(size_t sz) { return calloc(sz, 1); } -static void *zalloc_with_gpr_malloc(size_t sz) { - void *p = gpr_malloc(sz); +static void* zalloc_with_gpr_malloc(size_t sz) { + void* p = gpr_malloc(sz); memset(p, 0, sz); return p; } @@ -40,63 +41,59 @@ gpr_allocation_functions gpr_get_allocation_functions() { } void gpr_set_allocation_functions(gpr_allocation_functions functions) { - GPR_ASSERT(functions.malloc_fn != NULL); - GPR_ASSERT(functions.realloc_fn != NULL); - GPR_ASSERT(functions.free_fn != NULL); - if (functions.zalloc_fn == NULL) { + GPR_ASSERT(functions.malloc_fn != nullptr); + GPR_ASSERT(functions.realloc_fn != nullptr); + GPR_ASSERT(functions.free_fn != nullptr); + if (functions.zalloc_fn == nullptr) { functions.zalloc_fn = zalloc_with_gpr_malloc; } g_alloc_functions = functions; } -void *gpr_malloc(size_t size) { - void *p; - if (size == 0) return NULL; - GPR_TIMER_BEGIN("gpr_malloc", 0); +void* gpr_malloc(size_t size) { + GPR_TIMER_SCOPE("gpr_malloc", 0); + void* p; + if (size == 0) return nullptr; p = g_alloc_functions.malloc_fn(size); if (!p) { abort(); } - GPR_TIMER_END("gpr_malloc", 0); return p; } -void *gpr_zalloc(size_t size) { - void *p; - if (size == 0) return NULL; - GPR_TIMER_BEGIN("gpr_zalloc", 0); +void* gpr_zalloc(size_t size) { + GPR_TIMER_SCOPE("gpr_zalloc", 0); + void* p; + if (size == 0) return nullptr; p = g_alloc_functions.zalloc_fn(size); if (!p) { abort(); } - GPR_TIMER_END("gpr_zalloc", 0); return p; } -void gpr_free(void *p) { - GPR_TIMER_BEGIN("gpr_free", 0); +void gpr_free(void* p) { + GPR_TIMER_SCOPE("gpr_free", 0); g_alloc_functions.free_fn(p); - GPR_TIMER_END("gpr_free", 0); } -void *gpr_realloc(void *p, size_t size) { - if ((size == 0) && (p == NULL)) return NULL; - GPR_TIMER_BEGIN("gpr_realloc", 0); +void* gpr_realloc(void* p, size_t size) { + GPR_TIMER_SCOPE("gpr_realloc", 0); + if ((size == 0) && (p == nullptr)) return nullptr; p = g_alloc_functions.realloc_fn(p, size); if (!p) { abort(); } - GPR_TIMER_END("gpr_realloc", 0); return p; } -void *gpr_malloc_aligned(size_t size, size_t alignment_log) { - size_t alignment = ((size_t)1) << alignment_log; - size_t extra = alignment - 1 + sizeof(void *); - void *p = gpr_malloc(size + extra); - void **ret = (void **)(((uintptr_t)p + extra) & ~(alignment - 1)); +void* gpr_malloc_aligned(size_t size, size_t alignment) { + GPR_ASSERT(((alignment - 1) & alignment) == 0); // Must be power of 2. + size_t extra = alignment - 1 + sizeof(void*); + void* p = gpr_malloc(size + extra); + void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1)); ret[-1] = p; - return (void *)ret; + return (void*)ret; } -void gpr_free_aligned(void *ptr) { gpr_free(((void **)ptr)[-1]); } +void gpr_free_aligned(void* ptr) { gpr_free((static_cast(ptr))[-1]); } diff --git a/Sources/CgRPC/src/core/lib/gpr/arena.cc b/Sources/CgRPC/src/core/lib/gpr/arena.cc new file mode 100644 index 000000000..b02c5b9fb --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gpr/arena.cc @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/gpr/arena.h" + +#include + +#include +#include +#include + +// Uncomment this to use a simple arena that simply allocates the +// requested amount of memory for each call to gpr_arena_alloc(). This +// effectively eliminates the efficiency gain of using an arena, but it +// may be useful for debugging purposes. +//#define SIMPLE_ARENA_FOR_DEBUGGING + +#ifdef SIMPLE_ARENA_FOR_DEBUGGING + +#include + +struct gpr_arena { + gpr_mu mu; + void** ptrs; + size_t num_ptrs; +}; + +gpr_arena* gpr_arena_create(size_t ignored_initial_size) { + gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena)); + gpr_mu_init(&arena->mu); + return arena; +} + +size_t gpr_arena_destroy(gpr_arena* arena) { + gpr_mu_destroy(&arena->mu); + for (size_t i = 0; i < arena->num_ptrs; ++i) { + gpr_free(arena->ptrs[i]); + } + gpr_free(arena->ptrs); + gpr_free(arena); + return 1; // Value doesn't matter, since it won't be used. +} + +void* gpr_arena_alloc(gpr_arena* arena, size_t size) { + gpr_mu_lock(&arena->mu); + arena->ptrs = + (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1)); + void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size); + gpr_mu_unlock(&arena->mu); + return retval; +} + +#else // SIMPLE_ARENA_FOR_DEBUGGING + +// TODO(roth): We currently assume that all callers need alignment of 16 +// bytes, which may be wrong in some cases. As part of converting the +// arena API to C++, we should consider replacing gpr_arena_alloc() with a +// template that takes the type of the value being allocated, which +// would allow us to use the alignment actually needed by the caller. +#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ + (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) + +typedef struct zone { + size_t size_begin; + size_t size_end; + gpr_atm next_atm; +} zone; + +struct gpr_arena { + gpr_atm size_so_far; + zone initial_zone; +}; + +static void* zalloc_aligned(size_t size) { + void* ptr = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT); + memset(ptr, 0, size); + return ptr; +} + +gpr_arena* gpr_arena_create(size_t initial_size) { + initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size); + gpr_arena* a = static_cast(zalloc_aligned( + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size)); + a->initial_zone.size_end = initial_size; + return a; +} + +size_t gpr_arena_destroy(gpr_arena* arena) { + gpr_atm size = gpr_atm_no_barrier_load(&arena->size_so_far); + zone* z = (zone*)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm); + gpr_free_aligned(arena); + while (z) { + zone* next_z = (zone*)gpr_atm_no_barrier_load(&z->next_atm); + gpr_free_aligned(z); + z = next_z; + } + return static_cast(size); +} + +void* gpr_arena_alloc(gpr_arena* arena, size_t size) { + size = ROUND_UP_TO_ALIGNMENT_SIZE(size); + size_t start = static_cast( + gpr_atm_no_barrier_fetch_add(&arena->size_so_far, size)); + zone* z = &arena->initial_zone; + while (start > z->size_end) { + zone* next_z = (zone*)gpr_atm_acq_load(&z->next_atm); + if (next_z == nullptr) { + size_t next_z_size = + static_cast(gpr_atm_no_barrier_load(&arena->size_so_far)); + next_z = static_cast(zalloc_aligned( + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + next_z_size)); + next_z->size_begin = z->size_end; + next_z->size_end = z->size_end + next_z_size; + if (!gpr_atm_rel_cas(&z->next_atm, static_cast(NULL), + (gpr_atm)next_z)) { + gpr_free_aligned(next_z); + next_z = (zone*)gpr_atm_acq_load(&z->next_atm); + } + } + z = next_z; + } + if (start + size > z->size_end) { + return gpr_arena_alloc(arena, size); + } + GPR_ASSERT(start >= z->size_begin); + GPR_ASSERT(start + size <= z->size_end); + char* ptr = (z == &arena->initial_zone) + ? reinterpret_cast(arena) + + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + : reinterpret_cast(z) + + ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)); + return ptr + start - z->size_begin; +} + +#endif // SIMPLE_ARENA_FOR_DEBUGGING diff --git a/Sources/CgRPC/src/core/lib/support/arena.h b/Sources/CgRPC/src/core/lib/gpr/arena.h similarity index 78% rename from Sources/CgRPC/src/core/lib/support/arena.h rename to Sources/CgRPC/src/core/lib/gpr/arena.h index 47f0e4d16..6d2a073dd 100644 --- a/Sources/CgRPC/src/core/lib/support/arena.h +++ b/Sources/CgRPC/src/core/lib/gpr/arena.h @@ -22,18 +22,20 @@ // Tracks the total memory allocated against it, so that future arenas can // pre-allocate the right amount of memory -#ifndef GRPC_CORE_LIB_SUPPORT_ARENA_H -#define GRPC_CORE_LIB_SUPPORT_ARENA_H +#ifndef GRPC_CORE_LIB_GPR_ARENA_H +#define GRPC_CORE_LIB_GPR_ARENA_H + +#include #include typedef struct gpr_arena gpr_arena; // Create an arena, with \a initial_size bytes in the first allocated buffer -gpr_arena *gpr_arena_create(size_t initial_size); +gpr_arena* gpr_arena_create(size_t initial_size); // Allocate \a size bytes from the arena -void *gpr_arena_alloc(gpr_arena *arena, size_t size); +void* gpr_arena_alloc(gpr_arena* arena, size_t size); // Destroy an arena, returning the total number of bytes allocated -size_t gpr_arena_destroy(gpr_arena *arena); +size_t gpr_arena_destroy(gpr_arena* arena); -#endif /* GRPC_CORE_LIB_SUPPORT_ARENA_H */ +#endif /* GRPC_CORE_LIB_GPR_ARENA_H */ diff --git a/Sources/CgRPC/src/core/lib/support/atm.c b/Sources/CgRPC/src/core/lib/gpr/atm.cc similarity index 87% rename from Sources/CgRPC/src/core/lib/support/atm.c rename to Sources/CgRPC/src/core/lib/gpr/atm.cc index 2f37d62f7..649d400d3 100644 --- a/Sources/CgRPC/src/core/lib/support/atm.c +++ b/Sources/CgRPC/src/core/lib/gpr/atm.cc @@ -16,10 +16,13 @@ * */ +#include + #include -#include -gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm *value, gpr_atm delta, +#include "src/core/lib/gpr/useful.h" + +gpr_atm gpr_atm_no_barrier_clamped_add(gpr_atm* value, gpr_atm delta, gpr_atm min, gpr_atm max) { gpr_atm current_value; gpr_atm new_value; diff --git a/Sources/CgRPC/src/core/lib/support/cpu_iphone.c b/Sources/CgRPC/src/core/lib/gpr/cpu_iphone.cc similarity index 97% rename from Sources/CgRPC/src/core/lib/support/cpu_iphone.c rename to Sources/CgRPC/src/core/lib/gpr/cpu_iphone.cc index dfd69b9fd..2847e03ba 100644 --- a/Sources/CgRPC/src/core/lib/support/cpu_iphone.c +++ b/Sources/CgRPC/src/core/lib/gpr/cpu_iphone.cc @@ -18,6 +18,8 @@ #include +#include + #ifdef GPR_CPU_IPHONE /* Probably 2 instead of 1, but see comment on gpr_cpu_current_cpu. */ diff --git a/Sources/CgRPC/src/core/lib/support/cpu_linux.c b/Sources/CgRPC/src/core/lib/gpr/cpu_linux.cc similarity index 76% rename from Sources/CgRPC/src/core/lib/support/cpu_linux.c rename to Sources/CgRPC/src/core/lib/gpr/cpu_linux.cc index 228066844..9fc2f0b14 100644 --- a/Sources/CgRPC/src/core/lib/support/cpu_linux.c +++ b/Sources/CgRPC/src/core/lib/gpr/cpu_linux.cc @@ -36,9 +36,16 @@ static int ncpus = 0; static void init_num_cpus() { +#ifndef GPR_MUSL_LIBC_COMPAT + if (sched_getcpu() < 0) { + gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno)); + ncpus = 1; + return; + } +#endif /* This must be signed. sysconf returns -1 when the number cannot be determined */ - ncpus = (int)sysconf(_SC_NPROCESSORS_ONLN); + ncpus = static_cast(sysconf(_SC_NPROCESSORS_CONF)); if (ncpus < 1) { gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1"); ncpus = 1; @@ -48,7 +55,7 @@ static void init_num_cpus() { unsigned gpr_cpu_num_cores(void) { static gpr_once once = GPR_ONCE_INIT; gpr_once_init(&once, init_num_cpus); - return (unsigned)ncpus; + return static_cast(ncpus); } unsigned gpr_cpu_current_cpu(void) { @@ -56,12 +63,19 @@ unsigned gpr_cpu_current_cpu(void) { // sched_getcpu() is undefined on musl return 0; #else + if (gpr_cpu_num_cores() == 1) { + return 0; + } int cpu = sched_getcpu(); if (cpu < 0) { gpr_log(GPR_ERROR, "Error determining current CPU: %s\n", strerror(errno)); return 0; } - return (unsigned)cpu; + if (static_cast(cpu) >= gpr_cpu_num_cores()) { + gpr_log(GPR_ERROR, "Cannot handle hot-plugged CPUs"); + return 0; + } + return static_cast(cpu); #endif } diff --git a/Sources/CgRPC/src/core/lib/support/cpu_posix.c b/Sources/CgRPC/src/core/lib/gpr/cpu_posix.cc similarity index 63% rename from Sources/CgRPC/src/core/lib/support/cpu_posix.c rename to Sources/CgRPC/src/core/lib/gpr/cpu_posix.cc index a1ba8202a..915fd4976 100644 --- a/Sources/CgRPC/src/core/lib/support/cpu_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/cpu_posix.cc @@ -18,22 +18,26 @@ #include -#ifdef GPR_CPU_POSIX +#if defined(GPR_CPU_POSIX) #include +#include #include #include +#include +#include #include #include -#include -static __thread char magic_thread_local; +#include "src/core/lib/gpr/useful.h" static long ncpus = 0; +static pthread_key_t thread_id_key; + static void init_ncpus() { - ncpus = sysconf(_SC_NPROCESSORS_ONLN); + ncpus = sysconf(_SC_NPROCESSORS_CONF); if (ncpus < 1 || ncpus > INT32_MAX) { gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1"); ncpus = 1; @@ -46,12 +50,32 @@ unsigned gpr_cpu_num_cores(void) { return (unsigned)ncpus; } +static void delete_thread_id(void* value) { + if (value) { + gpr_free(value); + } +} + +static void init_thread_id_key(void) { + pthread_key_create(&thread_id_key, delete_thread_id); +} + unsigned gpr_cpu_current_cpu(void) { /* NOTE: there's no way I know to return the actual cpu index portably... most code that's using this is using it to shard across work queues though, so here we use thread identity instead to achieve a similar though not identical effect */ - return (unsigned)GPR_HASH_POINTER(&magic_thread_local, gpr_cpu_num_cores()); + static gpr_once once = GPR_ONCE_INIT; + gpr_once_init(&once, init_thread_id_key); + + unsigned int* thread_id = + static_cast(pthread_getspecific(thread_id_key)); + if (thread_id == nullptr) { + thread_id = static_cast(gpr_malloc(sizeof(unsigned int))); + pthread_setspecific(thread_id_key, thread_id); + } + + return (unsigned)GPR_HASH_POINTER(thread_id, gpr_cpu_num_cores()); } #endif /* GPR_CPU_POSIX */ diff --git a/Sources/CgRPC/src/core/lib/support/cpu_windows.c b/Sources/CgRPC/src/core/lib/gpr/cpu_windows.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/support/cpu_windows.c rename to Sources/CgRPC/src/core/lib/gpr/cpu_windows.cc index af26ff367..8d8945340 100644 --- a/Sources/CgRPC/src/core/lib/support/cpu_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/cpu_windows.cc @@ -19,6 +19,7 @@ #include #ifdef GPR_WINDOWS +#include #include unsigned gpr_cpu_num_cores(void) { diff --git a/Sources/CgRPC/src/core/lib/support/env.h b/Sources/CgRPC/src/core/lib/gpr/env.h similarity index 74% rename from Sources/CgRPC/src/core/lib/support/env.h rename to Sources/CgRPC/src/core/lib/gpr/env.h index e2c012a72..aec8a3166 100644 --- a/Sources/CgRPC/src/core/lib/support/env.h +++ b/Sources/CgRPC/src/core/lib/gpr/env.h @@ -16,14 +16,12 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_ENV_H -#define GRPC_CORE_LIB_SUPPORT_ENV_H +#ifndef GRPC_CORE_LIB_GPR_ENV_H +#define GRPC_CORE_LIB_GPR_ENV_H -#include +#include -#ifdef __cplusplus -extern "C" { -#endif +#include /* Env utility functions */ @@ -31,19 +29,15 @@ extern "C" { Returns a newly allocated string. It is the responsability of the caller to gpr_free the return value if not NULL (which means that the environment variable exists). */ -char *gpr_getenv(const char *name); +char* gpr_getenv(const char* name); -/* Sets the the environment with the specified name to the specified value. */ -void gpr_setenv(const char *name, const char *value); +/* Sets the environment with the specified name to the specified value. */ +void gpr_setenv(const char* name, const char* value); /* This is a version of gpr_getenv that does not produce any output if it has to use an insecure version of the function. It is ONLY to be used to solve the problem in which we need to check an env variable to configure the verbosity level of logging. So DO NOT USE THIS. */ -const char *gpr_getenv_silent(const char *name, char **dst); - -#ifdef __cplusplus -} -#endif +const char* gpr_getenv_silent(const char* name, char** dst); -#endif /* GRPC_CORE_LIB_SUPPORT_ENV_H */ +#endif /* GRPC_CORE_LIB_GPR_ENV_H */ diff --git a/Sources/CgRPC/src/core/lib/support/env_linux.c b/Sources/CgRPC/src/core/lib/gpr/env_linux.cc similarity index 74% rename from Sources/CgRPC/src/core/lib/support/env_linux.c rename to Sources/CgRPC/src/core/lib/gpr/env_linux.cc index 4c45a977c..fadc42f22 100644 --- a/Sources/CgRPC/src/core/lib/support/env_linux.c +++ b/Sources/CgRPC/src/core/lib/gpr/env_linux.cc @@ -25,7 +25,7 @@ #ifdef GPR_LINUX_ENV -#include "src/core/lib/support/env.h" +#include "src/core/lib/gpr/env.h" #include #include @@ -34,19 +34,19 @@ #include #include -#include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" -const char *gpr_getenv_silent(const char *name, char **dst) { - const char *insecure_func_used = NULL; - char *result = NULL; +const char* gpr_getenv_silent(const char* name, char** dst) { + const char* insecure_func_used = nullptr; + char* result = nullptr; #if defined(GPR_BACKWARDS_COMPATIBILITY_MODE) - typedef char *(*getenv_type)(const char *); + typedef char* (*getenv_type)(const char*); static getenv_type getenv_func = NULL; /* Check to see which getenv variant is supported (go from most * to least secure) */ - const char *names[] = {"secure_getenv", "__secure_getenv", "getenv"}; + const char* names[] = {"secure_getenv", "__secure_getenv", "getenv"}; for (size_t i = 0; getenv_func == NULL && i < GPR_ARRAY_SIZE(names); i++) { getenv_func = (getenv_type)dlsym(RTLD_DEFAULT, names[i]); if (getenv_func != NULL && strstr(names[i], "secure") == NULL) { @@ -60,21 +60,21 @@ const char *gpr_getenv_silent(const char *name, char **dst) { result = getenv(name); insecure_func_used = "getenv"; #endif - *dst = result == NULL ? result : gpr_strdup(result); + *dst = result == nullptr ? result : gpr_strdup(result); return insecure_func_used; } -char *gpr_getenv(const char *name) { - char *result = NULL; - const char *insecure_func_used = gpr_getenv_silent(name, &result); - if (insecure_func_used != NULL) { +char* gpr_getenv(const char* name) { + char* result = nullptr; + const char* insecure_func_used = gpr_getenv_silent(name, &result); + if (insecure_func_used != nullptr) { gpr_log(GPR_DEBUG, "Warning: insecure environment read function '%s' used", insecure_func_used); } return result; } -void gpr_setenv(const char *name, const char *value) { +void gpr_setenv(const char* name, const char* value) { int res = setenv(name, value, 1); GPR_ASSERT(res == 0); } diff --git a/Sources/CgRPC/src/core/lib/support/env_posix.c b/Sources/CgRPC/src/core/lib/gpr/env_posix.cc similarity index 72% rename from Sources/CgRPC/src/core/lib/support/env_posix.c rename to Sources/CgRPC/src/core/lib/gpr/env_posix.cc index b88822ca0..599f85aa7 100644 --- a/Sources/CgRPC/src/core/lib/support/env_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/env_posix.cc @@ -20,26 +20,26 @@ #ifdef GPR_POSIX_ENV -#include "src/core/lib/support/env.h" +#include "src/core/lib/gpr/env.h" #include #include #include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" -const char *gpr_getenv_silent(const char *name, char **dst) { +const char* gpr_getenv_silent(const char* name, char** dst) { *dst = gpr_getenv(name); - return NULL; + return nullptr; } -char *gpr_getenv(const char *name) { - char *result = getenv(name); - return result == NULL ? result : gpr_strdup(result); +char* gpr_getenv(const char* name) { + char* result = getenv(name); + return result == nullptr ? result : gpr_strdup(result); } -void gpr_setenv(const char *name, const char *value) { +void gpr_setenv(const char* name, const char* value) { int res = setenv(name, value, 1); GPR_ASSERT(res == 0); } diff --git a/Sources/CgRPC/src/core/lib/support/env_windows.c b/Sources/CgRPC/src/core/lib/gpr/env_windows.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/support/env_windows.c rename to Sources/CgRPC/src/core/lib/gpr/env_windows.cc index a6499543f..cf8ed60d8 100644 --- a/Sources/CgRPC/src/core/lib/support/env_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/env_windows.cc @@ -22,21 +22,21 @@ #include -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/support/string_windows.h" - #include #include #include -const char *gpr_getenv_silent(const char *name, char **dst) { +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/string_windows.h" + +const char* gpr_getenv_silent(const char* name, char** dst) { *dst = gpr_getenv(name); return NULL; } -char *gpr_getenv(const char *name) { - char *result = NULL; +char* gpr_getenv(const char* name) { + char* result = NULL; DWORD size; LPTSTR tresult = NULL; LPTSTR tname = gpr_char_to_tchar(name); @@ -48,7 +48,7 @@ char *gpr_getenv(const char *name) { return NULL; } size = ret * (DWORD)sizeof(TCHAR); - tresult = gpr_malloc(size); + tresult = (LPTSTR)gpr_malloc(size); ret = GetEnvironmentVariable(tname, tresult, size); gpr_free(tname); if (ret == 0) { @@ -60,7 +60,7 @@ char *gpr_getenv(const char *name) { return result; } -void gpr_setenv(const char *name, const char *value) { +void gpr_setenv(const char* name, const char* value) { LPTSTR tname = gpr_char_to_tchar(name); LPTSTR tvalue = gpr_char_to_tchar(value); BOOL res = SetEnvironmentVariable(tname, tvalue); diff --git a/Sources/CgRPC/src/core/lib/support/fork.c b/Sources/CgRPC/src/core/lib/gpr/fork.cc similarity index 67% rename from Sources/CgRPC/src/core/lib/support/fork.c rename to Sources/CgRPC/src/core/lib/gpr/fork.cc index 2f29af899..812522b05 100644 --- a/Sources/CgRPC/src/core/lib/support/fork.c +++ b/Sources/CgRPC/src/core/lib/gpr/fork.cc @@ -16,14 +16,16 @@ * */ -#include "src/core/lib/support/fork.h" +#include + +#include "src/core/lib/gpr/fork.h" #include #include -#include -#include "src/core/lib/support/env.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/useful.h" /* * NOTE: FORKING IS NOT GENERALLY SUPPORTED, THIS IS ONLY INTENDED TO WORK @@ -38,18 +40,32 @@ void grpc_fork_support_init() { fork_support_enabled = 1; #else fork_support_enabled = 0; - char *env = gpr_getenv("GRPC_ENABLE_FORK_SUPPORT"); - if (env != NULL) { - static const char *truthy[] = {"yes", "Yes", "YES", "true", +#endif + bool env_var_set = false; + char* env = gpr_getenv("GRPC_ENABLE_FORK_SUPPORT"); + if (env != nullptr) { + static const char* truthy[] = {"yes", "Yes", "YES", "true", "True", "TRUE", "1"}; + static const char* falsey[] = {"no", "No", "NO", "false", + "False", "FALSE", "0"}; for (size_t i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { if (0 == strcmp(env, truthy[i])) { fork_support_enabled = 1; + env_var_set = true; + break; + } + } + if (!env_var_set) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(falsey); i++) { + if (0 == strcmp(env, falsey[i])) { + fork_support_enabled = 0; + env_var_set = true; + break; + } } } gpr_free(env); } -#endif if (override_fork_support_enabled != -1) { fork_support_enabled = override_fork_support_enabled; } diff --git a/Sources/CgRPC/src/core/lib/support/fork.h b/Sources/CgRPC/src/core/lib/gpr/fork.h similarity index 89% rename from Sources/CgRPC/src/core/lib/support/fork.h rename to Sources/CgRPC/src/core/lib/gpr/fork.h index 215d4214a..94c61bb83 100644 --- a/Sources/CgRPC/src/core/lib/support/fork.h +++ b/Sources/CgRPC/src/core/lib/gpr/fork.h @@ -16,8 +16,8 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_FORK_H -#define GRPC_CORE_LIB_SUPPORT_FORK_H +#ifndef GRPC_CORE_LIB_GPR_FORK_H +#define GRPC_CORE_LIB_GPR_FORK_H /* * NOTE: FORKING IS NOT GENERALLY SUPPORTED, THIS IS ONLY INTENDED TO WORK @@ -32,4 +32,4 @@ int grpc_fork_support_enabled(void); // environment variables/compile flags void grpc_enable_fork_support(int enable); -#endif /* GRPC_CORE_LIB_SUPPORT_FORK_H */ +#endif /* GRPC_CORE_LIB_GPR_FORK_H */ diff --git a/Sources/CgRPC/src/core/lib/support/host_port.c b/Sources/CgRPC/src/core/lib/gpr/host_port.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/support/host_port.c rename to Sources/CgRPC/src/core/lib/gpr/host_port.cc index 3302e574a..a34e01cb5 100644 --- a/Sources/CgRPC/src/core/lib/support/host_port.c +++ b/Sources/CgRPC/src/core/lib/gpr/host_port.cc @@ -16,17 +16,20 @@ * */ -#include +#include + +#include "src/core/lib/gpr/host_port.h" #include #include #include #include -#include "src/core/lib/support/string.h" -int gpr_join_host_port(char **out, const char *host, int port) { - if (host[0] != '[' && strchr(host, ':') != NULL) { +#include "src/core/lib/gpr/string.h" + +int gpr_join_host_port(char** out, const char* host, int port) { + if (host[0] != '[' && strchr(host, ':') != nullptr) { /* IPv6 literals must be enclosed in brackets. */ return gpr_asprintf(out, "[%s]:%d", host, port); } else { @@ -35,24 +38,24 @@ int gpr_join_host_port(char **out, const char *host, int port) { } } -int gpr_split_host_port(const char *name, char **host, char **port) { - const char *host_start; +int gpr_split_host_port(const char* name, char** host, char** port) { + const char* host_start; size_t host_len; - const char *port_start; + const char* port_start; - *host = NULL; - *port = NULL; + *host = nullptr; + *port = nullptr; if (name[0] == '[') { /* Parse a bracketed host, typically an IPv6 literal. */ - const char *rbracket = strchr(name, ']'); - if (rbracket == NULL) { + const char* rbracket = strchr(name, ']'); + if (rbracket == nullptr) { /* Unmatched [ */ return 0; } if (rbracket[1] == '\0') { /* ] */ - port_start = NULL; + port_start = nullptr; } else if (rbracket[1] == ':') { /* ]: */ port_start = rbracket + 2; @@ -61,33 +64,33 @@ int gpr_split_host_port(const char *name, char **host, char **port) { return 0; } host_start = name + 1; - host_len = (size_t)(rbracket - host_start); - if (memchr(host_start, ':', host_len) == NULL) { + host_len = static_cast(rbracket - host_start); + if (memchr(host_start, ':', host_len) == nullptr) { /* Require all bracketed hosts to contain a colon, because a hostname or IPv4 address should never use brackets. */ return 0; } } else { - const char *colon = strchr(name, ':'); - if (colon != NULL && strchr(colon + 1, ':') == NULL) { + const char* colon = strchr(name, ':'); + if (colon != nullptr && strchr(colon + 1, ':') == nullptr) { /* Exactly 1 colon. Split into host:port. */ host_start = name; - host_len = (size_t)(colon - name); + host_len = static_cast(colon - name); port_start = colon + 1; } else { /* 0 or 2+ colons. Bare hostname or IPv6 litearal. */ host_start = name; host_len = strlen(name); - port_start = NULL; + port_start = nullptr; } } /* Allocate return values. */ - *host = (char *)gpr_malloc(host_len + 1); + *host = static_cast(gpr_malloc(host_len + 1)); memcpy(*host, host_start, host_len); (*host)[host_len] = '\0'; - if (port_start != NULL) { + if (port_start != nullptr) { *port = gpr_strdup(port_start); } diff --git a/Sources/CgRPC/include/grpc/support/host_port.h b/Sources/CgRPC/src/core/lib/gpr/host_port.h similarity index 81% rename from Sources/CgRPC/include/grpc/support/host_port.h rename to Sources/CgRPC/src/core/lib/gpr/host_port.h index 41592dfe2..0bf0960f8 100644 --- a/Sources/CgRPC/include/grpc/support/host_port.h +++ b/Sources/CgRPC/src/core/lib/gpr/host_port.h @@ -16,15 +16,11 @@ * */ -#ifndef GRPC_SUPPORT_HOST_PORT_H -#define GRPC_SUPPORT_HOST_PORT_H +#ifndef GRPC_CORE_LIB_GPR_HOST_PORT_H +#define GRPC_CORE_LIB_GPR_HOST_PORT_H #include -#ifdef __cplusplus -extern "C" { -#endif - /** Given a host and port, creates a newly-allocated string of the form "host:port" or "[ho:st]:port", depending on whether the host contains colons like an IPv6 literal. If the host is already bracketed, then additional @@ -35,17 +31,13 @@ extern "C" { destroyed using gpr_free(). In the unlikely event of an error, returns -1 and sets *out to NULL. */ -GPRAPI int gpr_join_host_port(char **out, const char *host, int port); +int gpr_join_host_port(char** out, const char* host, int port); /** Given a name in the form "host:port" or "[ho:st]:port", split into hostname and port number, into newly allocated strings, which must later be destroyed using gpr_free(). Return 1 on success, 0 on failure. Guarantees *host and *port == NULL on failure. */ -GPRAPI int gpr_split_host_port(const char *name, char **host, char **port); - -#ifdef __cplusplus -} -#endif +int gpr_split_host_port(const char* name, char** host, char** port); -#endif /* GRPC_SUPPORT_HOST_PORT_H */ +#endif /* GRPC_CORE_LIB_GPR_HOST_PORT_H */ diff --git a/Sources/CgRPC/src/core/lib/support/log.c b/Sources/CgRPC/src/core/lib/gpr/log.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/support/log.c rename to Sources/CgRPC/src/core/lib/gpr/log.cc index fadb4d9a2..01ef112fb 100644 --- a/Sources/CgRPC/src/core/lib/support/log.c +++ b/Sources/CgRPC/src/core/lib/gpr/log.cc @@ -16,22 +16,23 @@ * */ +#include + #include #include #include -#include -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" #include #include -extern void gpr_default_log(gpr_log_func_args *args); +void gpr_default_log(gpr_log_func_args* args); static gpr_atm g_log_func = (gpr_atm)gpr_default_log; static gpr_atm g_min_severity_to_print = GPR_LOG_VERBOSITY_UNSET; -const char *gpr_log_severity_string(gpr_log_severity severity) { +const char* gpr_log_severity_string(gpr_log_severity severity) { switch (severity) { case GPR_LOG_SEVERITY_DEBUG: return "D"; @@ -43,9 +44,16 @@ const char *gpr_log_severity_string(gpr_log_severity severity) { GPR_UNREACHABLE_CODE(return "UNKNOWN"); } -void gpr_log_message(const char *file, int line, gpr_log_severity severity, - const char *message) { - if ((gpr_atm)severity < gpr_atm_no_barrier_load(&g_min_severity_to_print)) { +int gpr_should_log(gpr_log_severity severity) { + return static_cast(severity) >= + gpr_atm_no_barrier_load(&g_min_severity_to_print) + ? 1 + : 0; +} + +void gpr_log_message(const char* file, int line, gpr_log_severity severity, + const char* message) { + if (gpr_should_log(severity) == 0) { return; } @@ -64,17 +72,17 @@ void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) { } void gpr_log_verbosity_init() { - char *verbosity = NULL; - const char *insecure_getenv = gpr_getenv_silent("GRPC_VERBOSITY", &verbosity); + char* verbosity = nullptr; + const char* insecure_getenv = gpr_getenv_silent("GRPC_VERBOSITY", &verbosity); gpr_atm min_severity_to_print = GPR_LOG_SEVERITY_ERROR; - if (verbosity != NULL) { + if (verbosity != nullptr) { if (gpr_stricmp(verbosity, "DEBUG") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_DEBUG; + min_severity_to_print = static_cast(GPR_LOG_SEVERITY_DEBUG); } else if (gpr_stricmp(verbosity, "INFO") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_INFO; + min_severity_to_print = static_cast(GPR_LOG_SEVERITY_INFO); } else if (gpr_stricmp(verbosity, "ERROR") == 0) { - min_severity_to_print = (gpr_atm)GPR_LOG_SEVERITY_ERROR; + min_severity_to_print = static_cast(GPR_LOG_SEVERITY_ERROR); } gpr_free(verbosity); } @@ -83,7 +91,7 @@ void gpr_log_verbosity_init() { gpr_atm_no_barrier_store(&g_min_severity_to_print, min_severity_to_print); } - if (insecure_getenv != NULL) { + if (insecure_getenv != nullptr) { gpr_log(GPR_DEBUG, "Warning: insecure environment read function '%s' used", insecure_getenv); } diff --git a/Sources/CgRPC/src/core/lib/support/log_android.c b/Sources/CgRPC/src/core/lib/gpr/log_android.cc similarity index 82% rename from Sources/CgRPC/src/core/lib/support/log_android.c rename to Sources/CgRPC/src/core/lib/gpr/log_android.cc index 6f1cec51f..40ef4c640 100644 --- a/Sources/CgRPC/src/core/lib/support/log_android.c +++ b/Sources/CgRPC/src/core/lib/gpr/log_android.cc @@ -39,9 +39,13 @@ static android_LogPriority severity_to_log_priority(gpr_log_severity severity) { return ANDROID_LOG_DEFAULT; } -void gpr_log(const char *file, int line, gpr_log_severity severity, - const char *format, ...) { - char *message = NULL; +void gpr_log(const char* file, int line, gpr_log_severity severity, + const char* format, ...) { + /* Avoid message construction if gpr_log_message won't log */ + if (gpr_should_log(severity) == 0) { + return; + } + char* message = NULL; va_list args; va_start(args, format); vasprintf(&message, format, args); @@ -50,10 +54,10 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, free(message); } -void gpr_default_log(gpr_log_func_args *args) { - char *final_slash; - const char *display_file; - char *output = NULL; +void gpr_default_log(gpr_log_func_args* args) { + const char* final_slash; + const char* display_file; + char* output = NULL; final_slash = strrchr(args->file, '/'); if (final_slash == NULL) diff --git a/Sources/CgRPC/src/core/lib/support/log_linux.c b/Sources/CgRPC/src/core/lib/gpr/log_linux.cc similarity index 82% rename from Sources/CgRPC/src/core/lib/support/log_linux.c rename to Sources/CgRPC/src/core/lib/gpr/log_linux.cc index 775501869..561276f0c 100644 --- a/Sources/CgRPC/src/core/lib/support/log_linux.c +++ b/Sources/CgRPC/src/core/lib/gpr/log_linux.cc @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -41,9 +42,13 @@ static long gettid(void) { return syscall(__NR_gettid); } -void gpr_log(const char *file, int line, gpr_log_severity severity, - const char *format, ...) { - char *message = NULL; +void gpr_log(const char* file, int line, gpr_log_severity severity, + const char* format, ...) { + /* Avoid message construction if gpr_log_message won't log */ + if (gpr_should_log(severity) == 0) { + return; + } + char* message = nullptr; va_list args; va_start(args, format); if (vasprintf(&message, format, args) == -1) { @@ -56,10 +61,10 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, free(message); } -void gpr_default_log(gpr_log_func_args *args) { - const char *final_slash; - char *prefix; - const char *display_file; +void gpr_default_log(gpr_log_func_args* args) { + const char* final_slash; + char* prefix; + const char* display_file; char time_buffer[64]; time_t timer; gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); @@ -67,9 +72,9 @@ void gpr_default_log(gpr_log_func_args *args) { static __thread long tid = 0; if (tid == 0) tid = gettid(); - timer = (time_t)now.tv_sec; + timer = static_cast(now.tv_sec); final_slash = strrchr(args->file, '/'); - if (final_slash == NULL) + if (final_slash == nullptr) display_file = args->file; else display_file = final_slash + 1; diff --git a/Sources/CgRPC/src/core/lib/support/log_posix.c b/Sources/CgRPC/src/core/lib/gpr/log_posix.cc similarity index 80% rename from Sources/CgRPC/src/core/lib/support/log_posix.c rename to Sources/CgRPC/src/core/lib/gpr/log_posix.cc index 8b376fce4..0acb22557 100644 --- a/Sources/CgRPC/src/core/lib/support/log_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/log_posix.cc @@ -27,28 +27,31 @@ #include #include #include -#include #include #include static intptr_t gettid(void) { return (intptr_t)pthread_self(); } -void gpr_log(const char *file, int line, gpr_log_severity severity, - const char *format, ...) { +void gpr_log(const char* file, int line, gpr_log_severity severity, + const char* format, ...) { + /* Avoid message construction if gpr_log_message won't log */ + if (gpr_should_log(severity) == 0) { + return; + } char buf[64]; - char *allocated = NULL; - char *message = NULL; + char* allocated = nullptr; + char* message = nullptr; int ret; va_list args; va_start(args, format); ret = vsnprintf(buf, sizeof(buf), format, args); va_end(args); if (ret < 0) { - message = NULL; + message = nullptr; } else if ((size_t)ret <= sizeof(buf) - 1) { message = buf; } else { - message = allocated = gpr_malloc((size_t)ret + 1); + message = allocated = (char*)gpr_malloc((size_t)ret + 1); va_start(args, format); vsnprintf(message, (size_t)(ret + 1), format, args); va_end(args); @@ -57,9 +60,9 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, gpr_free(allocated); } -void gpr_default_log(gpr_log_func_args *args) { - char *final_slash; - const char *display_file; +void gpr_default_log(gpr_log_func_args* args) { + const char* final_slash; + const char* display_file; char time_buffer[64]; time_t timer; gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); @@ -67,7 +70,7 @@ void gpr_default_log(gpr_log_func_args *args) { timer = (time_t)now.tv_sec; final_slash = strrchr(args->file, '/'); - if (final_slash == NULL) + if (final_slash == nullptr) display_file = args->file; else display_file = final_slash + 1; @@ -79,7 +82,7 @@ void gpr_default_log(gpr_log_func_args *args) { strcpy(time_buffer, "error:strftime"); } - char *prefix; + char* prefix; gpr_asprintf(&prefix, "%s%s.%09d %7tu %s:%d]", gpr_log_severity_string(args->severity), time_buffer, (int)(now.tv_nsec), gettid(), display_file, args->line); diff --git a/Sources/CgRPC/src/core/lib/support/log_windows.c b/Sources/CgRPC/src/core/lib/gpr/log_windows.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/support/log_windows.c rename to Sources/CgRPC/src/core/lib/gpr/log_windows.cc index 0fdab79ae..060be572b 100644 --- a/Sources/CgRPC/src/core/lib/support/log_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/log_windows.cc @@ -29,12 +29,17 @@ #include #include -#include "src/core/lib/support/string.h" -#include "src/core/lib/support/string_windows.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/string_windows.h" -void gpr_log(const char *file, int line, gpr_log_severity severity, - const char *format, ...) { - char *message = NULL; +void gpr_log(const char* file, int line, gpr_log_severity severity, + const char* format, ...) { + /* Avoid message construction if gpr_log_message won't log */ + if (gpr_should_log(severity) == 0) { + return; + } + + char* message = NULL; va_list args; int ret; @@ -47,7 +52,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, } else { /* Allocate a new buffer, with space for the NUL terminator. */ size_t strp_buflen = (size_t)ret + 1; - message = gpr_malloc(strp_buflen); + message = (char*)gpr_malloc(strp_buflen); /* Print to the buffer. */ va_start(args, format); @@ -65,9 +70,9 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, } /* Simple starter implementation */ -void gpr_default_log(gpr_log_func_args *args) { - char *final_slash; - const char *display_file; +void gpr_default_log(gpr_log_func_args* args) { + const char* final_slash; + const char* display_file; char time_buffer[64]; time_t timer; gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); diff --git a/Sources/CgRPC/src/core/lib/gpr/mpscq.cc b/Sources/CgRPC/src/core/lib/gpr/mpscq.cc new file mode 100644 index 000000000..076a6bb03 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gpr/mpscq.cc @@ -0,0 +1,117 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/gpr/mpscq.h" + +#include + +void gpr_mpscq_init(gpr_mpscq* q) { + gpr_atm_no_barrier_store(&q->head, (gpr_atm)&q->stub); + q->tail = &q->stub; + gpr_atm_no_barrier_store(&q->stub.next, (gpr_atm)NULL); +} + +void gpr_mpscq_destroy(gpr_mpscq* q) { + GPR_ASSERT(gpr_atm_no_barrier_load(&q->head) == (gpr_atm)&q->stub); + GPR_ASSERT(q->tail == &q->stub); +} + +bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n) { + gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL); + gpr_mpscq_node* prev = + (gpr_mpscq_node*)gpr_atm_full_xchg(&q->head, (gpr_atm)n); + gpr_atm_rel_store(&prev->next, (gpr_atm)n); + return prev == &q->stub; +} + +gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q) { + bool empty; + return gpr_mpscq_pop_and_check_end(q, &empty); +} + +gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty) { + gpr_mpscq_node* tail = q->tail; + gpr_mpscq_node* next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next); + if (tail == &q->stub) { + // indicates the list is actually (ephemerally) empty + if (next == nullptr) { + *empty = true; + return nullptr; + } + q->tail = next; + tail = next; + next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next); + } + if (next != nullptr) { + *empty = false; + q->tail = next; + return tail; + } + gpr_mpscq_node* head = (gpr_mpscq_node*)gpr_atm_acq_load(&q->head); + if (tail != head) { + *empty = false; + // indicates a retry is in order: we're still adding + return nullptr; + } + gpr_mpscq_push(q, &q->stub); + next = (gpr_mpscq_node*)gpr_atm_acq_load(&tail->next); + if (next != nullptr) { + *empty = false; + q->tail = next; + return tail; + } + // indicates a retry is in order: we're still adding + *empty = false; + return nullptr; +} + +void gpr_locked_mpscq_init(gpr_locked_mpscq* q) { + gpr_mpscq_init(&q->queue); + gpr_mu_init(&q->mu); +} + +void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q) { + gpr_mpscq_destroy(&q->queue); + gpr_mu_destroy(&q->mu); +} + +bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n) { + return gpr_mpscq_push(&q->queue, n); +} + +gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q) { + if (gpr_mu_trylock(&q->mu)) { + gpr_mpscq_node* n = gpr_mpscq_pop(&q->queue); + gpr_mu_unlock(&q->mu); + return n; + } + return nullptr; +} + +gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q) { + gpr_mu_lock(&q->mu); + bool empty = false; + gpr_mpscq_node* n; + do { + n = gpr_mpscq_pop_and_check_end(&q->queue, &empty); + } while (n == nullptr && !empty); + gpr_mu_unlock(&q->mu); + return n; +} diff --git a/Sources/CgRPC/src/core/lib/gpr/mpscq.h b/Sources/CgRPC/src/core/lib/gpr/mpscq.h new file mode 100644 index 000000000..6b67880d1 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gpr/mpscq.h @@ -0,0 +1,86 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPR_MPSCQ_H +#define GRPC_CORE_LIB_GPR_MPSCQ_H + +#include + +#include +#include +#include +#include + +// Multiple-producer single-consumer lock free queue, based upon the +// implementation from Dmitry Vyukov here: +// http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue + +// List node (include this in a data structure at the top, and add application +// fields after it - to simulate inheritance) +typedef struct gpr_mpscq_node { + gpr_atm next; +} gpr_mpscq_node; + +// Actual queue type +typedef struct gpr_mpscq { + gpr_atm head; + // make sure head & tail don't share a cacheline + char padding[GPR_CACHELINE_SIZE]; + gpr_mpscq_node* tail; + gpr_mpscq_node stub; +} gpr_mpscq; + +void gpr_mpscq_init(gpr_mpscq* q); +void gpr_mpscq_destroy(gpr_mpscq* q); +// Push a node +// Thread safe - can be called from multiple threads concurrently +// Returns true if this was possibly the first node (may return true +// sporadically, will not return false sporadically) +bool gpr_mpscq_push(gpr_mpscq* q, gpr_mpscq_node* n); +// Pop a node (returns NULL if no node is ready - which doesn't indicate that +// the queue is empty!!) +// Thread compatible - can only be called from one thread at a time +gpr_mpscq_node* gpr_mpscq_pop(gpr_mpscq* q); +// Pop a node; sets *empty to true if the queue is empty, or false if it is not +gpr_mpscq_node* gpr_mpscq_pop_and_check_end(gpr_mpscq* q, bool* empty); + +// An mpscq with a lock: it's safe to pop from multiple threads, but doing +// only one thread will succeed concurrently +typedef struct gpr_locked_mpscq { + gpr_mpscq queue; + gpr_mu mu; +} gpr_locked_mpscq; + +void gpr_locked_mpscq_init(gpr_locked_mpscq* q); +void gpr_locked_mpscq_destroy(gpr_locked_mpscq* q); +// Push a node +// Thread safe - can be called from multiple threads concurrently +// Returns true if this was possibly the first node (may return true +// sporadically, will not return false sporadically) +bool gpr_locked_mpscq_push(gpr_locked_mpscq* q, gpr_mpscq_node* n); + +// Pop a node (returns NULL if no node is ready - which doesn't indicate that +// the queue is empty!!) +// Thread safe - can be called from multiple threads concurrently +gpr_mpscq_node* gpr_locked_mpscq_try_pop(gpr_locked_mpscq* q); + +// Pop a node. Returns NULL only if the queue was empty at some point after +// calling this function +gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q); + +#endif /* GRPC_CORE_LIB_GPR_MPSCQ_H */ diff --git a/Sources/CgRPC/src/core/lib/support/murmur_hash.c b/Sources/CgRPC/src/core/lib/gpr/murmur_hash.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/support/murmur_hash.c rename to Sources/CgRPC/src/core/lib/gpr/murmur_hash.cc index f06b970de..cf25abf40 100644 --- a/Sources/CgRPC/src/core/lib/support/murmur_hash.c +++ b/Sources/CgRPC/src/core/lib/gpr/murmur_hash.cc @@ -16,7 +16,9 @@ * */ -#include "src/core/lib/support/murmur_hash.h" +#include + +#include "src/core/lib/gpr/murmur_hash.h" #include @@ -29,23 +31,20 @@ (h) *= 0xc2b2ae35; \ (h) ^= (h) >> 16; -uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) { - const uint8_t *data = (const uint8_t *)key; - const size_t nblocks = len / 4; - int i; - +uint32_t gpr_murmur_hash3(const void* key, size_t len, uint32_t seed) { uint32_t h1 = seed; uint32_t k1; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; - const uint32_t *blocks = ((const uint32_t *)key) + nblocks; - const uint8_t *tail = (const uint8_t *)(data + nblocks * 4); + const uint8_t* keyptr = static_cast(key); + const size_t bsize = sizeof(k1); + const size_t nblocks = len / bsize; /* body */ - for (i = -(int)nblocks; i; i++) { - memcpy(&k1, blocks + i, sizeof(uint32_t)); + for (size_t i = 0; i < nblocks; i++, keyptr += bsize) { + memcpy(&k1, keyptr, bsize); k1 *= c1; k1 = ROTL32(k1, 15); @@ -61,13 +60,13 @@ uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) { /* tail */ switch (len & 3) { case 3: - k1 ^= ((uint32_t)tail[2]) << 16; + k1 ^= (static_cast(keyptr[2])) << 16; /* fallthrough */ case 2: - k1 ^= ((uint32_t)tail[1]) << 8; + k1 ^= (static_cast(keyptr[1])) << 8; /* fallthrough */ case 1: - k1 ^= tail[0]; + k1 ^= keyptr[0]; k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; @@ -75,7 +74,7 @@ uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed) { }; /* finalization */ - h1 ^= (uint32_t)len; + h1 ^= static_cast(len); FMIX32(h1); return h1; } diff --git a/Sources/CgRPC/src/core/lib/support/murmur_hash.h b/Sources/CgRPC/src/core/lib/gpr/murmur_hash.h similarity index 78% rename from Sources/CgRPC/src/core/lib/support/murmur_hash.h rename to Sources/CgRPC/src/core/lib/gpr/murmur_hash.h index 7510b4d09..8004889a9 100644 --- a/Sources/CgRPC/src/core/lib/support/murmur_hash.h +++ b/Sources/CgRPC/src/core/lib/gpr/murmur_hash.h @@ -16,14 +16,14 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_MURMUR_HASH_H -#define GRPC_CORE_LIB_SUPPORT_MURMUR_HASH_H +#ifndef GRPC_CORE_LIB_GPR_MURMUR_HASH_H +#define GRPC_CORE_LIB_GPR_MURMUR_HASH_H #include #include /* compute the hash of key (length len) */ -uint32_t gpr_murmur_hash3(const void *key, size_t len, uint32_t seed); +uint32_t gpr_murmur_hash3(const void* key, size_t len, uint32_t seed); -#endif /* GRPC_CORE_LIB_SUPPORT_MURMUR_HASH_H */ +#endif /* GRPC_CORE_LIB_GPR_MURMUR_HASH_H */ diff --git a/Sources/CgRPC/src/core/lib/support/spinlock.h b/Sources/CgRPC/src/core/lib/gpr/spinlock.h similarity index 79% rename from Sources/CgRPC/src/core/lib/support/spinlock.h rename to Sources/CgRPC/src/core/lib/gpr/spinlock.h index 37adda11b..9f35530a8 100644 --- a/Sources/CgRPC/src/core/lib/support/spinlock.h +++ b/Sources/CgRPC/src/core/lib/gpr/spinlock.h @@ -16,22 +16,31 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_SPINLOCK_H -#define GRPC_CORE_LIB_SUPPORT_SPINLOCK_H +#ifndef GRPC_CORE_LIB_GPR_SPINLOCK_H +#define GRPC_CORE_LIB_GPR_SPINLOCK_H + +#include #include /* Simple spinlock. No backoff strategy, gpr_spinlock_lock is almost always a concurrency code smell. */ -typedef struct { gpr_atm atm; } gpr_spinlock; +typedef struct { + gpr_atm atm; +} gpr_spinlock; +#ifdef __cplusplus +#define GPR_SPINLOCK_INITIALIZER (gpr_spinlock{0}) +#else #define GPR_SPINLOCK_INITIALIZER ((gpr_spinlock){0}) +#endif #define GPR_SPINLOCK_STATIC_INITIALIZER \ { 0 } + #define gpr_spinlock_trylock(lock) (gpr_atm_acq_cas(&(lock)->atm, 0, 1)) #define gpr_spinlock_unlock(lock) (gpr_atm_rel_store(&(lock)->atm, 0)) #define gpr_spinlock_lock(lock) \ do { \ } while (!gpr_spinlock_trylock((lock))) -#endif /* GRPC_CORE_LIB_SUPPORT_SPINLOCK_H */ +#endif /* GRPC_CORE_LIB_GPR_SPINLOCK_H */ diff --git a/Sources/CgRPC/src/core/lib/support/string.c b/Sources/CgRPC/src/core/lib/gpr/string.cc similarity index 61% rename from Sources/CgRPC/src/core/lib/support/string.c rename to Sources/CgRPC/src/core/lib/gpr/string.cc index 6b172df82..ef2a6900b 100644 --- a/Sources/CgRPC/src/core/lib/support/string.c +++ b/Sources/CgRPC/src/core/lib/gpr/string.cc @@ -16,7 +16,9 @@ * */ -#include "src/core/lib/support/string.h" +#include + +#include "src/core/lib/gpr/string.h" #include #include @@ -26,19 +28,20 @@ #include #include -#include -#include +#include + +#include "src/core/lib/gpr/useful.h" -char *gpr_strdup(const char *src) { - char *dst; +char* gpr_strdup(const char* src) { + char* dst; size_t len; if (!src) { - return NULL; + return nullptr; } len = strlen(src) + 1; - dst = (char *)gpr_malloc(len); + dst = static_cast(gpr_malloc(len)); memcpy(dst, src, len); @@ -48,28 +51,28 @@ char *gpr_strdup(const char *src) { typedef struct { size_t capacity; size_t length; - char *data; + char* data; } dump_out; static dump_out dump_out_create(void) { - dump_out r = {0, 0, NULL}; + dump_out r = {0, 0, nullptr}; return r; } -static void dump_out_append(dump_out *out, char c) { +static void dump_out_append(dump_out* out, char c) { if (out->length == out->capacity) { out->capacity = GPR_MAX(8, 2 * out->capacity); - out->data = (char *)gpr_realloc(out->data, out->capacity); + out->data = static_cast(gpr_realloc(out->data, out->capacity)); } out->data[out->length++] = c; } -static void hexdump(dump_out *out, const char *buf, size_t len) { - static const char *hex = "0123456789abcdef"; +static void hexdump(dump_out* out, const char* buf, size_t len) { + static const char* hex = "0123456789abcdef"; - const uint8_t *const beg = (const uint8_t *)buf; - const uint8_t *const end = beg + len; - const uint8_t *cur; + const uint8_t* const beg = reinterpret_cast(buf); + const uint8_t* const end = beg + len; + const uint8_t* cur; for (cur = beg; cur != end; ++cur) { if (cur != beg) dump_out_append(out, ' '); @@ -78,24 +81,24 @@ static void hexdump(dump_out *out, const char *buf, size_t len) { } } -static void asciidump(dump_out *out, const char *buf, size_t len) { - const uint8_t *const beg = (const uint8_t *)buf; - const uint8_t *const end = beg + len; - const uint8_t *cur; +static void asciidump(dump_out* out, const char* buf, size_t len) { + const uint8_t* const beg = reinterpret_cast(buf); + const uint8_t* const end = beg + len; + const uint8_t* cur; int out_was_empty = (out->length == 0); if (!out_was_empty) { dump_out_append(out, ' '); dump_out_append(out, '\''); } for (cur = beg; cur != end; ++cur) { - dump_out_append(out, (char)(isprint(*cur) ? *(char *)cur : '.')); + dump_out_append(out, (isprint(*cur) ? *(char*)cur : '.')); } if (!out_was_empty) { dump_out_append(out, '\''); } } -char *gpr_dump(const char *buf, size_t len, uint32_t flags) { +char* gpr_dump(const char* buf, size_t len, uint32_t flags) { dump_out out = dump_out_create(); if (flags & GPR_DUMP_HEX) { hexdump(&out, buf, len); @@ -107,7 +110,7 @@ char *gpr_dump(const char *buf, size_t len, uint32_t flags) { return out.data; } -int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) { +int gpr_parse_bytes_to_uint32(const char* buf, size_t len, uint32_t* result) { uint32_t out = 0; uint32_t new_val; size_t i; @@ -116,7 +119,7 @@ int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) { for (i = 0; i < len; i++) { if (buf[i] < '0' || buf[i] > '9') return 0; /* bad char */ - new_val = 10 * out + (uint32_t)(buf[i] - '0'); + new_val = 10 * out + static_cast(buf[i] - '0'); if (new_val < out) return 0; /* overflow */ out = new_val; } @@ -125,7 +128,7 @@ int gpr_parse_bytes_to_uint32(const char *buf, size_t len, uint32_t *result) { return 1; } -void gpr_reverse_bytes(char *str, int len) { +void gpr_reverse_bytes(char* str, int len) { char *p1, *p2; for (p1 = str, p2 = str + len - 1; p2 > p1; ++p1, --p2) { char temp = *p1; @@ -134,7 +137,7 @@ void gpr_reverse_bytes(char *str, int len) { } } -int gpr_ltoa(long value, char *string) { +int gpr_ltoa(long value, char* string) { long sign; int i = 0; @@ -146,7 +149,7 @@ int gpr_ltoa(long value, char *string) { sign = value < 0 ? -1 : 1; while (value) { - string[i++] = (char)('0' + sign * (value % 10)); + string[i++] = static_cast('0' + sign * (value % 10)); value /= 10; } if (sign < 0) string[i++] = '-'; @@ -155,7 +158,7 @@ int gpr_ltoa(long value, char *string) { return i; } -int int64_ttoa(int64_t value, char *string) { +int int64_ttoa(int64_t value, char* string) { int64_t sign; int i = 0; @@ -167,7 +170,7 @@ int int64_ttoa(int64_t value, char *string) { sign = value < 0 ? -1 : 1; while (value) { - string[i++] = (char)('0' + sign * (value % 10)); + string[i++] = static_cast('0' + sign * (value % 10)); value /= 10; } if (sign < 0) string[i++] = '-'; @@ -176,33 +179,33 @@ int int64_ttoa(int64_t value, char *string) { return i; } -int gpr_parse_nonnegative_int(const char *value) { - char *end; +int gpr_parse_nonnegative_int(const char* value) { + char* end; long result = strtol(value, &end, 0); if (*end != '\0' || result < 0 || result > INT_MAX) return -1; - return (int)result; + return static_cast(result); } -char *gpr_leftpad(const char *str, char flag, size_t length) { +char* gpr_leftpad(const char* str, char flag, size_t length) { const size_t str_length = strlen(str); const size_t out_length = str_length > length ? str_length : length; - char *out = (char *)gpr_malloc(out_length + 1); + char* out = static_cast(gpr_malloc(out_length + 1)); memset(out, flag, out_length - str_length); memcpy(out + out_length - str_length, str, str_length); out[out_length] = 0; return out; } -char *gpr_strjoin(const char **strs, size_t nstrs, size_t *final_length) { +char* gpr_strjoin(const char** strs, size_t nstrs, size_t* final_length) { return gpr_strjoin_sep(strs, nstrs, "", final_length); } -char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, - size_t *final_length) { +char* gpr_strjoin_sep(const char** strs, size_t nstrs, const char* sep, + size_t* final_length) { const size_t sep_len = strlen(sep); size_t out_length = 0; size_t i; - char *out; + char* out; for (i = 0; i < nstrs; i++) { out_length += strlen(strs[i]); } @@ -210,7 +213,7 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, if (nstrs > 0) { out_length += sep_len * (nstrs - 1); /* separators */ } - out = (char *)gpr_malloc(out_length); + out = static_cast(gpr_malloc(out_length)); out_length = 0; for (i = 0; i < nstrs; i++) { const size_t slen = strlen(strs[i]); @@ -222,15 +225,15 @@ char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, out_length += slen; } out[out_length] = 0; - if (final_length != NULL) { + if (final_length != nullptr) { *final_length = out_length; } return out; } -void gpr_strvec_init(gpr_strvec *sv) { memset(sv, 0, sizeof(*sv)); } +void gpr_strvec_init(gpr_strvec* sv) { memset(sv, 0, sizeof(*sv)); } -void gpr_strvec_destroy(gpr_strvec *sv) { +void gpr_strvec_destroy(gpr_strvec* sv) { size_t i; for (i = 0; i < sv->count; i++) { gpr_free(sv->strs[i]); @@ -238,19 +241,20 @@ void gpr_strvec_destroy(gpr_strvec *sv) { gpr_free(sv->strs); } -void gpr_strvec_add(gpr_strvec *sv, char *str) { +void gpr_strvec_add(gpr_strvec* sv, char* str) { if (sv->count == sv->capacity) { sv->capacity = GPR_MAX(sv->capacity + 8, sv->capacity * 2); - sv->strs = (char **)gpr_realloc(sv->strs, sizeof(char *) * sv->capacity); + sv->strs = static_cast( + gpr_realloc(sv->strs, sizeof(char*) * sv->capacity)); } sv->strs[sv->count++] = str; } -char *gpr_strvec_flatten(gpr_strvec *sv, size_t *final_length) { - return gpr_strjoin((const char **)sv->strs, sv->count, final_length); +char* gpr_strvec_flatten(gpr_strvec* sv, size_t* final_length) { + return gpr_strjoin((const char**)sv->strs, sv->count, final_length); } -int gpr_stricmp(const char *a, const char *b) { +int gpr_stricmp(const char* a, const char* b) { int ca, cb; do { ca = tolower(*a); @@ -261,23 +265,24 @@ int gpr_stricmp(const char *a, const char *b) { return ca - cb; } -static void add_string_to_split(const char *beg, const char *end, char ***strs, - size_t *nstrs, size_t *capstrs) { - char *out = (char *)gpr_malloc((size_t)(end - beg) + 1); - memcpy(out, beg, (size_t)(end - beg)); +static void add_string_to_split(const char* beg, const char* end, char*** strs, + size_t* nstrs, size_t* capstrs) { + char* out = + static_cast(gpr_malloc(static_cast(end - beg) + 1)); + memcpy(out, beg, static_cast(end - beg)); out[end - beg] = 0; if (*nstrs == *capstrs) { *capstrs = GPR_MAX(8, 2 * *capstrs); - *strs = (char **)gpr_realloc(*strs, sizeof(*strs) * *capstrs); + *strs = static_cast(gpr_realloc(*strs, sizeof(*strs) * *capstrs)); } (*strs)[*nstrs] = out; ++*nstrs; } -void gpr_string_split(const char *input, const char *sep, char ***strs, - size_t *nstrs) { - const char *next; - *strs = NULL; +void gpr_string_split(const char* input, const char* sep, char*** strs, + size_t* nstrs) { + const char* next; + *strs = nullptr; *nstrs = 0; size_t capstrs = 0; while ((next = strstr(input, sep))) { @@ -287,24 +292,24 @@ void gpr_string_split(const char *input, const char *sep, char ***strs, add_string_to_split(input, input + strlen(input), strs, nstrs, &capstrs); } -void *gpr_memrchr(const void *s, int c, size_t n) { - if (s == NULL) return NULL; - char *b = (char *)s; +void* gpr_memrchr(const void* s, int c, size_t n) { + if (s == nullptr) return nullptr; + char* b = (char*)s; size_t i; for (i = 0; i < n; i++) { if (b[n - i - 1] == c) { return &b[n - i - 1]; } } - return NULL; + return nullptr; } -bool gpr_is_true(const char *s) { +bool gpr_is_true(const char* s) { size_t i; - if (s == NULL) { + if (s == nullptr) { return false; } - static const char *truthy[] = {"yes", "true", "1"}; + static const char* truthy[] = {"yes", "true", "1"}; for (i = 0; i < GPR_ARRAY_SIZE(truthy); i++) { if (0 == gpr_stricmp(s, truthy[i])) { return true; diff --git a/Sources/CgRPC/src/core/lib/support/string.h b/Sources/CgRPC/src/core/lib/gpr/string.h similarity index 69% rename from Sources/CgRPC/src/core/lib/support/string.h rename to Sources/CgRPC/src/core/lib/gpr/string.h index 5a56fa3a0..2e8a4898d 100644 --- a/Sources/CgRPC/src/core/lib/support/string.h +++ b/Sources/CgRPC/src/core/lib/gpr/string.h @@ -16,17 +16,13 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_STRING_H -#define GRPC_CORE_LIB_SUPPORT_STRING_H - -#include -#include +#ifndef GRPC_CORE_LIB_GPR_STRING_H +#define GRPC_CORE_LIB_GPR_STRING_H #include -#ifdef __cplusplus -extern "C" { -#endif +#include +#include /* String utility functions */ @@ -36,12 +32,12 @@ extern "C" { /* Converts array buf, of length len, into a C string according to the flags. Result should be freed with gpr_free() */ -char *gpr_dump(const char *buf, size_t len, uint32_t flags); +char* gpr_dump(const char* buf, size_t len, uint32_t flags); /* Parses an array of bytes into an integer (base 10). Returns 1 on success, 0 on failure. */ -int gpr_parse_bytes_to_uint32(const char *data, size_t length, - uint32_t *result); +int gpr_parse_bytes_to_uint32(const char* data, size_t length, + uint32_t* result); /* Minimum buffer size for calling ltoa */ #define GPR_LTOA_MIN_BUFSIZE (3 * sizeof(long)) @@ -49,7 +45,7 @@ int gpr_parse_bytes_to_uint32(const char *data, size_t length, /* Convert a long to a string in base 10; returns the length of the output string (or 0 on failure). output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */ -int gpr_ltoa(long value, char *output); +int gpr_ltoa(long value, char* output); /* Minimum buffer size for calling int64toa */ #define GPR_INT64TOA_MIN_BUFSIZE (3 * sizeof(int64_t)) @@ -59,58 +55,55 @@ output string (or 0 on failure). output must be at least GPR_INT64TOA_MIN_BUFSIZE bytes long. NOTE: This function ensures sufficient bit width even on Win x64, where long is 32bit is size.*/ -int int64_ttoa(int64_t value, char *output); +int int64_ttoa(int64_t value, char* output); // Parses a non-negative number from a value string. Returns -1 on error. -int gpr_parse_nonnegative_int(const char *value); +int gpr_parse_nonnegative_int(const char* value); /* Reverse a run of bytes */ -void gpr_reverse_bytes(char *str, int len); +void gpr_reverse_bytes(char* str, int len); /* Pad a string with flag characters. The given length specifies the minimum field width. The input string is never truncated. */ -char *gpr_leftpad(const char *str, char flag, size_t length); +char* gpr_leftpad(const char* str, char flag, size_t length); /* Join a set of strings, returning the resulting string. Total combined length (excluding null terminator) is returned in total_length if it is non-null. */ -char *gpr_strjoin(const char **strs, size_t nstrs, size_t *total_length); +char* gpr_strjoin(const char** strs, size_t nstrs, size_t* total_length); /* Join a set of strings using a separator, returning the resulting string. Total combined length (excluding null terminator) is returned in total_length if it is non-null. */ -char *gpr_strjoin_sep(const char **strs, size_t nstrs, const char *sep, - size_t *total_length); +char* gpr_strjoin_sep(const char** strs, size_t nstrs, const char* sep, + size_t* total_length); -void gpr_string_split(const char *input, const char *sep, char ***strs, - size_t *nstrs); +void gpr_string_split(const char* input, const char* sep, char*** strs, + size_t* nstrs); /* A vector of strings... for building up a final string one piece at a time */ typedef struct { - char **strs; + char** strs; size_t count; size_t capacity; } gpr_strvec; /* Initialize/destroy */ -void gpr_strvec_init(gpr_strvec *strs); -void gpr_strvec_destroy(gpr_strvec *strs); +void gpr_strvec_init(gpr_strvec* strs); +void gpr_strvec_destroy(gpr_strvec* strs); /* Add a string to a strvec, takes ownership of the string */ -void gpr_strvec_add(gpr_strvec *strs, char *add); +void gpr_strvec_add(gpr_strvec* strs, char* add); /* Return a joined string with all added substrings, optionally setting total_length as per gpr_strjoin */ -char *gpr_strvec_flatten(gpr_strvec *strs, size_t *total_length); +char* gpr_strvec_flatten(gpr_strvec* strs, size_t* total_length); /** Case insensitive string comparison... return <0 if lower(a)0 if lower(a)>lower(b) */ -int gpr_stricmp(const char *a, const char *b); +int gpr_stricmp(const char* a, const char* b); -void *gpr_memrchr(const void *s, int c, size_t n); +void* gpr_memrchr(const void* s, int c, size_t n); /** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */ -bool gpr_is_true(const char *s); -#ifdef __cplusplus -} -#endif +bool gpr_is_true(const char* s); -#endif /* GRPC_CORE_LIB_SUPPORT_STRING_H */ +#endif /* GRPC_CORE_LIB_GPR_STRING_H */ diff --git a/Sources/CgRPC/src/core/lib/support/string_posix.c b/Sources/CgRPC/src/core/lib/gpr/string_posix.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/support/string_posix.c rename to Sources/CgRPC/src/core/lib/gpr/string_posix.cc index e768faf73..d32775fb3 100644 --- a/Sources/CgRPC/src/core/lib/support/string_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/string_posix.cc @@ -25,8 +25,9 @@ #include #include +#include -int gpr_asprintf(char **strp, const char *format, ...) { +int gpr_asprintf(char** strp, const char* format, ...) { va_list args; int ret; char buf[64]; @@ -37,13 +38,13 @@ int gpr_asprintf(char **strp, const char *format, ...) { ret = vsnprintf(buf, sizeof(buf), format, args); va_end(args); if (ret < 0) { - *strp = NULL; + *strp = nullptr; return -1; } /* Allocate a new buffer, with space for the NUL terminator. */ - strp_buflen = (size_t)ret + 1; - if ((*strp = (char *)gpr_malloc(strp_buflen)) == NULL) { + strp_buflen = static_cast(ret) + 1; + if ((*strp = static_cast(gpr_malloc(strp_buflen))) == nullptr) { /* This shouldn't happen, because gpr_malloc() calls abort(). */ return -1; } @@ -58,13 +59,13 @@ int gpr_asprintf(char **strp, const char *format, ...) { va_start(args, format); ret = vsnprintf(*strp, strp_buflen, format, args); va_end(args); - if ((size_t)ret == strp_buflen - 1) { + if (static_cast(ret) == strp_buflen - 1) { return ret; } /* This should never happen. */ gpr_free(*strp); - *strp = NULL; + *strp = nullptr; return -1; } diff --git a/Sources/CgRPC/src/core/lib/support/string_util_windows.c b/Sources/CgRPC/src/core/lib/gpr/string_util_windows.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/support/string_util_windows.c rename to Sources/CgRPC/src/core/lib/gpr/string_util_windows.cc index 2a0340444..8c8c99cd2 100644 --- a/Sources/CgRPC/src/core/lib/support/string_util_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/string_util_windows.cc @@ -26,15 +26,18 @@ anything else, especially strsafe.h. */ #include +#include #include #include #include #include #include +#include #include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/string_windows.h" #if defined UNICODE || defined _UNICODE LPTSTR @@ -42,7 +45,7 @@ gpr_char_to_tchar(LPCSTR input) { LPTSTR ret; int needed = MultiByteToWideChar(CP_UTF8, 0, input, -1, NULL, 0); if (needed <= 0) return NULL; - ret = gpr_malloc((unsigned)needed * sizeof(TCHAR)); + ret = (LPTSTR)gpr_malloc((unsigned)needed * sizeof(TCHAR)); MultiByteToWideChar(CP_UTF8, 0, input, -1, ret, needed); return ret; } @@ -52,19 +55,19 @@ gpr_tchar_to_char(LPCTSTR input) { LPSTR ret; int needed = WideCharToMultiByte(CP_UTF8, 0, input, -1, NULL, 0, NULL, NULL); if (needed <= 0) return NULL; - ret = gpr_malloc((unsigned)needed); + ret = (LPSTR)gpr_malloc((unsigned)needed); WideCharToMultiByte(CP_UTF8, 0, input, -1, ret, needed, NULL, NULL); return ret; } #else -char *gpr_tchar_to_char(LPTSTR input) { return gpr_strdup(input); } +LPSTR gpr_tchar_to_char(LPCTSTR input) { return (LPSTR)gpr_strdup(input); } -char *gpr_char_to_tchar(LPTSTR input) { return gpr_strdup(input); } +LPTSTR gpr_char_to_tchar(LPCTSTR input) { return (LPTSTR)gpr_strdup(input); } #endif -char *gpr_format_message(int messageid) { +char* gpr_format_message(int messageid) { LPTSTR tmessage; - char *message; + char* message; DWORD status = FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, diff --git a/Sources/CgRPC/src/core/lib/support/string_windows.c b/Sources/CgRPC/src/core/lib/gpr/string_windows.cc similarity index 88% rename from Sources/CgRPC/src/core/lib/support/string_windows.c rename to Sources/CgRPC/src/core/lib/gpr/string_windows.cc index 50278d955..25bfd412e 100644 --- a/Sources/CgRPC/src/core/lib/support/string_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/string_windows.cc @@ -27,10 +27,11 @@ #include #include +#include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" -int gpr_asprintf(char **strp, const char *format, ...) { +int gpr_asprintf(char** strp, const char* format, ...) { va_list args; int ret; size_t strp_buflen; @@ -46,7 +47,7 @@ int gpr_asprintf(char **strp, const char *format, ...) { /* Allocate a new buffer, with space for the NUL terminator. */ strp_buflen = (size_t)ret + 1; - if ((*strp = gpr_malloc(strp_buflen)) == NULL) { + if ((*strp = (char*)gpr_malloc(strp_buflen)) == NULL) { /* This shouldn't happen, because gpr_malloc() calls abort(). */ return -1; } diff --git a/Sources/CgRPC/src/core/lib/support/string_windows.h b/Sources/CgRPC/src/core/lib/gpr/string_windows.h similarity index 85% rename from Sources/CgRPC/src/core/lib/support/string_windows.h rename to Sources/CgRPC/src/core/lib/gpr/string_windows.h index 7c7f31e7a..e370f802c 100644 --- a/Sources/CgRPC/src/core/lib/support/string_windows.h +++ b/Sources/CgRPC/src/core/lib/gpr/string_windows.h @@ -16,8 +16,8 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H -#define GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H +#ifndef GRPC_CORE_LIB_GPR_STRING_WINDOWS_H +#define GRPC_CORE_LIB_GPR_STRING_WINDOWS_H #include @@ -29,4 +29,4 @@ LPSTR gpr_tchar_to_char(LPCTSTR input); #endif /* GPR_WINDOWS */ -#endif /* GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H */ +#endif /* GRPC_CORE_LIB_GPR_STRING_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/support/sync.c b/Sources/CgRPC/src/core/lib/gpr/sync.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/support/sync.c rename to Sources/CgRPC/src/core/lib/gpr/sync.cc index 994dcb0e1..2f18fc5ec 100644 --- a/Sources/CgRPC/src/core/lib/support/sync.c +++ b/Sources/CgRPC/src/core/lib/gpr/sync.cc @@ -18,6 +18,8 @@ /* Generic implementation of synchronization primitives. */ +#include + #include #include #include @@ -45,47 +47,47 @@ static void event_initialize(void) { } /* Hash ev into an element of sync_array[]. */ -static struct sync_array_s *hash(gpr_event *ev) { +static struct sync_array_s* hash(gpr_event* ev) { return &sync_array[((uintptr_t)ev) % event_sync_partitions]; } -void gpr_event_init(gpr_event *ev) { +void gpr_event_init(gpr_event* ev) { gpr_once_init(&event_once, &event_initialize); ev->state = 0; } -void gpr_event_set(gpr_event *ev, void *value) { - struct sync_array_s *s = hash(ev); +void gpr_event_set(gpr_event* ev, void* value) { + struct sync_array_s* s = hash(ev); gpr_mu_lock(&s->mu); GPR_ASSERT(gpr_atm_acq_load(&ev->state) == 0); gpr_atm_rel_store(&ev->state, (gpr_atm)value); gpr_cv_broadcast(&s->cv); gpr_mu_unlock(&s->mu); - GPR_ASSERT(value != NULL); + GPR_ASSERT(value != nullptr); } -void *gpr_event_get(gpr_event *ev) { - return (void *)gpr_atm_acq_load(&ev->state); +void* gpr_event_get(gpr_event* ev) { + return (void*)gpr_atm_acq_load(&ev->state); } -void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline) { - void *result = (void *)gpr_atm_acq_load(&ev->state); - if (result == NULL) { - struct sync_array_s *s = hash(ev); +void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline) { + void* result = (void*)gpr_atm_acq_load(&ev->state); + if (result == nullptr) { + struct sync_array_s* s = hash(ev); gpr_mu_lock(&s->mu); do { - result = (void *)gpr_atm_acq_load(&ev->state); - } while (result == NULL && !gpr_cv_wait(&s->cv, &s->mu, abs_deadline)); + result = (void*)gpr_atm_acq_load(&ev->state); + } while (result == nullptr && !gpr_cv_wait(&s->cv, &s->mu, abs_deadline)); gpr_mu_unlock(&s->mu); } return result; } -void gpr_ref_init(gpr_refcount *r, int n) { gpr_atm_rel_store(&r->count, n); } +void gpr_ref_init(gpr_refcount* r, int n) { gpr_atm_rel_store(&r->count, n); } -void gpr_ref(gpr_refcount *r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); } +void gpr_ref(gpr_refcount* r) { gpr_atm_no_barrier_fetch_add(&r->count, 1); } -void gpr_ref_non_zero(gpr_refcount *r) { +void gpr_ref_non_zero(gpr_refcount* r) { #ifndef NDEBUG gpr_atm prior = gpr_atm_no_barrier_fetch_add(&r->count, 1); assert(prior > 0); @@ -94,29 +96,29 @@ void gpr_ref_non_zero(gpr_refcount *r) { #endif } -void gpr_refn(gpr_refcount *r, int n) { +void gpr_refn(gpr_refcount* r, int n) { gpr_atm_no_barrier_fetch_add(&r->count, n); } -int gpr_unref(gpr_refcount *r) { +int gpr_unref(gpr_refcount* r) { gpr_atm prior = gpr_atm_full_fetch_add(&r->count, -1); GPR_ASSERT(prior > 0); return prior == 1; } -int gpr_ref_is_unique(gpr_refcount *r) { +int gpr_ref_is_unique(gpr_refcount* r) { return gpr_atm_acq_load(&r->count) == 1; } -void gpr_stats_init(gpr_stats_counter *c, intptr_t n) { +void gpr_stats_init(gpr_stats_counter* c, intptr_t n) { gpr_atm_rel_store(&c->value, n); } -void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc) { +void gpr_stats_inc(gpr_stats_counter* c, intptr_t inc) { gpr_atm_no_barrier_fetch_add(&c->value, inc); } -intptr_t gpr_stats_read(const gpr_stats_counter *c) { +intptr_t gpr_stats_read(const gpr_stats_counter* c) { /* don't need acquire-load, but we have no no-barrier load yet */ return gpr_atm_acq_load(&c->value); } diff --git a/Sources/CgRPC/src/core/lib/support/sync_posix.c b/Sources/CgRPC/src/core/lib/gpr/sync_posix.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/support/sync_posix.c rename to Sources/CgRPC/src/core/lib/gpr/sync_posix.cc index 62d800b18..848d23730 100644 --- a/Sources/CgRPC/src/core/lib/support/sync_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/sync_posix.cc @@ -33,7 +33,9 @@ gpr_atm gpr_counter_atm_cas = 0; gpr_atm gpr_counter_atm_add = 0; #endif -void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); } +void gpr_mu_init(gpr_mu* mu) { + GPR_ASSERT(pthread_mutex_init(mu, nullptr) == 0); +} void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); } @@ -41,29 +43,32 @@ void gpr_mu_lock(gpr_mu* mu) { #ifdef GPR_LOW_LEVEL_COUNTERS GPR_ATM_INC_COUNTER(gpr_mu_locks); #endif - GPR_TIMER_BEGIN("gpr_mu_lock", 0); + GPR_TIMER_SCOPE("gpr_mu_lock", 0); GPR_ASSERT(pthread_mutex_lock(mu) == 0); - GPR_TIMER_END("gpr_mu_lock", 0); } void gpr_mu_unlock(gpr_mu* mu) { - GPR_TIMER_BEGIN("gpr_mu_unlock", 0); + GPR_TIMER_SCOPE("gpr_mu_unlock", 0); GPR_ASSERT(pthread_mutex_unlock(mu) == 0); - GPR_TIMER_END("gpr_mu_unlock", 0); } int gpr_mu_trylock(gpr_mu* mu) { - int err; - GPR_TIMER_BEGIN("gpr_mu_trylock", 0); - err = pthread_mutex_trylock(mu); + GPR_TIMER_SCOPE("gpr_mu_trylock", 0); + int err = pthread_mutex_trylock(mu); GPR_ASSERT(err == 0 || err == EBUSY); - GPR_TIMER_END("gpr_mu_trylock", 0); return err == 0; } /*----------------------------------------*/ -void gpr_cv_init(gpr_cv* cv) { GPR_ASSERT(pthread_cond_init(cv, NULL) == 0); } +void gpr_cv_init(gpr_cv* cv) { + pthread_condattr_t attr; + GPR_ASSERT(pthread_condattr_init(&attr) == 0); +#if GPR_LINUX + GPR_ASSERT(pthread_condattr_setclock(&attr, CLOCK_MONOTONIC) == 0); +#endif // GPR_LINUX + GPR_ASSERT(pthread_cond_init(cv, &attr) == 0); +} void gpr_cv_destroy(gpr_cv* cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); } @@ -74,8 +79,12 @@ int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) { err = pthread_cond_wait(cv, mu); } else { struct timespec abs_deadline_ts; +#if GPR_LINUX + abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_MONOTONIC); +#else abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME); - abs_deadline_ts.tv_sec = (time_t)abs_deadline.tv_sec; +#endif // GPR_LINUX + abs_deadline_ts.tv_sec = static_cast(abs_deadline.tv_sec); abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec; err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts); } diff --git a/Sources/CgRPC/src/core/lib/support/sync_windows.c b/Sources/CgRPC/src/core/lib/gpr/sync_windows.cc similarity index 79% rename from Sources/CgRPC/src/core/lib/support/sync_windows.c rename to Sources/CgRPC/src/core/lib/gpr/sync_windows.cc index 008c5aecb..7cd41633d 100644 --- a/Sources/CgRPC/src/core/lib/support/sync_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/sync_windows.cc @@ -26,25 +26,25 @@ #include #include -void gpr_mu_init(gpr_mu *mu) { +void gpr_mu_init(gpr_mu* mu) { InitializeCriticalSection(&mu->cs); mu->locked = 0; } -void gpr_mu_destroy(gpr_mu *mu) { DeleteCriticalSection(&mu->cs); } +void gpr_mu_destroy(gpr_mu* mu) { DeleteCriticalSection(&mu->cs); } -void gpr_mu_lock(gpr_mu *mu) { +void gpr_mu_lock(gpr_mu* mu) { EnterCriticalSection(&mu->cs); GPR_ASSERT(!mu->locked); mu->locked = 1; } -void gpr_mu_unlock(gpr_mu *mu) { +void gpr_mu_unlock(gpr_mu* mu) { mu->locked = 0; LeaveCriticalSection(&mu->cs); } -int gpr_mu_trylock(gpr_mu *mu) { +int gpr_mu_trylock(gpr_mu* mu) { int result = TryEnterCriticalSection(&mu->cs); if (result) { if (mu->locked) { /* This thread already holds the lock. */ @@ -58,13 +58,13 @@ int gpr_mu_trylock(gpr_mu *mu) { /*----------------------------------------*/ -void gpr_cv_init(gpr_cv *cv) { InitializeConditionVariable(cv); } +void gpr_cv_init(gpr_cv* cv) { InitializeConditionVariable(cv); } -void gpr_cv_destroy(gpr_cv *cv) { +void gpr_cv_destroy(gpr_cv* cv) { /* Condition variables don't need destruction in Win32. */ } -int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { +int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) { int timeout = 0; DWORD timeout_max_ms; mu->locked = 0; @@ -93,23 +93,23 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) { return timeout; } -void gpr_cv_signal(gpr_cv *cv) { WakeConditionVariable(cv); } +void gpr_cv_signal(gpr_cv* cv) { WakeConditionVariable(cv); } -void gpr_cv_broadcast(gpr_cv *cv) { WakeAllConditionVariable(cv); } +void gpr_cv_broadcast(gpr_cv* cv) { WakeAllConditionVariable(cv); } /*----------------------------------------*/ -static void *dummy; +static void* dummy; struct run_once_func_arg { void (*init_function)(void); }; -static BOOL CALLBACK run_once_func(gpr_once *once, void *v, void **pv) { - struct run_once_func_arg *arg = v; +static BOOL CALLBACK run_once_func(gpr_once* once, void* v, void** pv) { + struct run_once_func_arg* arg = (struct run_once_func_arg*)v; (*arg->init_function)(); return 1; } -void gpr_once_init(gpr_once *once, void (*init_function)(void)) { +void gpr_once_init(gpr_once* once, void (*init_function)(void)) { struct run_once_func_arg arg; arg.init_function = init_function; InitOnceExecuteOnce(once, run_once_func, &arg, &dummy); diff --git a/Sources/CgRPC/src/core/lib/support/time.c b/Sources/CgRPC/src/core/lib/gpr/time.cc similarity index 94% rename from Sources/CgRPC/src/core/lib/support/time.c rename to Sources/CgRPC/src/core/lib/gpr/time.cc index 6903674d7..64c1c98f5 100644 --- a/Sources/CgRPC/src/core/lib/support/time.c +++ b/Sources/CgRPC/src/core/lib/gpr/time.cc @@ -18,6 +18,8 @@ /* Generic implementation of time calls. */ +#include + #include #include #include @@ -81,8 +83,9 @@ static gpr_timespec to_seconds_from_sub_second_time(int64_t time_in_units, units_per_sec) - 1; } - out.tv_nsec = (int32_t)((time_in_units - out.tv_sec * units_per_sec) * - GPR_NS_PER_SEC / units_per_sec); + out.tv_nsec = + static_cast((time_in_units - out.tv_sec * units_per_sec) * + GPR_NS_PER_SEC / units_per_sec); out.clock_type = type; } return out; @@ -216,12 +219,13 @@ int32_t gpr_time_to_millis(gpr_timespec t) { care?) */ return -2147483647; } else { - return (int32_t)(t.tv_sec * GPR_MS_PER_SEC + t.tv_nsec / GPR_NS_PER_MS); + return static_cast(t.tv_sec * GPR_MS_PER_SEC + + t.tv_nsec / GPR_NS_PER_MS); } } double gpr_timespec_to_micros(gpr_timespec t) { - return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3; + return static_cast(t.tv_sec) * GPR_US_PER_SEC + t.tv_nsec * 1e-3; } gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) { diff --git a/Sources/CgRPC/src/core/lib/support/time_posix.c b/Sources/CgRPC/src/core/lib/gpr/time_posix.cc similarity index 91% rename from Sources/CgRPC/src/core/lib/support/time_posix.c rename to Sources/CgRPC/src/core/lib/gpr/time_posix.cc index 3ead40d80..28836bfa5 100644 --- a/Sources/CgRPC/src/core/lib/support/time_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/time_posix.cc @@ -17,7 +17,8 @@ */ #include -#include "src/core/lib/support/time_precise.h" + +#include "src/core/lib/gpr/time_precise.h" #ifdef GPR_POSIX_TIME @@ -30,7 +31,6 @@ #include #include #include -#include "src/core/lib/support/block_annotate.h" static struct timespec timespec_from_gpr(gpr_timespec gts) { struct timespec rv; @@ -38,12 +38,12 @@ static struct timespec timespec_from_gpr(gpr_timespec gts) { /* fine to assert, as this is only used in gpr_sleep_until */ GPR_ASSERT(gts.tv_sec <= INT32_MAX && gts.tv_sec >= INT32_MIN); } - rv.tv_sec = (time_t)gts.tv_sec; + rv.tv_sec = static_cast(gts.tv_sec); rv.tv_nsec = gts.tv_nsec; return rv; } -#if _POSIX_TIMERS > 0 +#if _POSIX_TIMERS > 0 || defined(__OpenBSD__) static gpr_timespec gpr_from_timespec(struct timespec ts, gpr_clock_type clock_type) { /* @@ -53,7 +53,7 @@ static gpr_timespec gpr_from_timespec(struct timespec ts, */ gpr_timespec rv; rv.tv_sec = ts.tv_sec; - rv.tv_nsec = (int32_t)ts.tv_nsec; + rv.tv_nsec = static_cast(ts.tv_nsec); rv.clock_type = clock_type; return rv; } @@ -82,7 +82,7 @@ static gpr_timespec now_impl(gpr_clock_type clock_type) { } } #else -/* For some reason Apple's OSes haven't implemented clock_gettime. */ + /* For some reason Apple's OSes haven't implemented clock_gettime. */ #include #include @@ -108,7 +108,7 @@ static gpr_timespec now_impl(gpr_clock_type clock) { now.clock_type = clock; switch (clock) { case GPR_CLOCK_REALTIME: - gettimeofday(&now_tv, NULL); + gettimeofday(&now_tv, nullptr); now.tv_sec = now_tv.tv_sec; now.tv_nsec = now_tv.tv_usec * 1000; break; @@ -157,9 +157,7 @@ void gpr_sleep_until(gpr_timespec until) { delta = gpr_time_sub(until, now); delta_ts = timespec_from_gpr(delta); - GRPC_SCHEDULING_START_BLOCKING_REGION; - ns_result = nanosleep(&delta_ts, NULL); - GRPC_SCHEDULING_END_BLOCKING_REGION; + ns_result = nanosleep(&delta_ts, nullptr); if (ns_result == 0) { break; } diff --git a/Sources/CgRPC/src/core/lib/support/time_precise.c b/Sources/CgRPC/src/core/lib/gpr/time_precise.cc similarity index 87% rename from Sources/CgRPC/src/core/lib/support/time_precise.c rename to Sources/CgRPC/src/core/lib/gpr/time_precise.cc index 6ce19e53c..1b34fd7eb 100644 --- a/Sources/CgRPC/src/core/lib/support/time_precise.c +++ b/Sources/CgRPC/src/core/lib/gpr/time_precise.cc @@ -16,13 +16,17 @@ * */ +#include + #include #include #include +#include "src/core/lib/gpr/time_precise.h" + #ifdef GRPC_TIMERS_RDTSC #if defined(__i386__) -static void gpr_get_cycle_counter(int64_t int *clk) { +static void gpr_get_cycle_counter(int64_t int* clk) { int64_t int ret; __asm__ volatile("rdtsc" : "=A"(ret)); *clk = ret; @@ -30,7 +34,7 @@ static void gpr_get_cycle_counter(int64_t int *clk) { // ---------------------------------------------------------------- #elif defined(__x86_64__) || defined(__amd64__) -static void gpr_get_cycle_counter(int64_t *clk) { +static void gpr_get_cycle_counter(int64_t* clk) { uint64_t low, high; __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); *clk = (int64_t)(high << 32) | (int64_t)low; @@ -54,7 +58,7 @@ void gpr_precise_clock_init(void) { gpr_log(GPR_DEBUG, "... cycles_per_second = %f\n", cycles_per_second); } -void gpr_precise_clock_now(gpr_timespec *clk) { +void gpr_precise_clock_now(gpr_timespec* clk) { int64_t counter; double secs; gpr_get_cycle_counter(&counter); @@ -67,7 +71,7 @@ void gpr_precise_clock_now(gpr_timespec *clk) { #else /* GRPC_TIMERS_RDTSC */ void gpr_precise_clock_init(void) {} -void gpr_precise_clock_now(gpr_timespec *clk) { +void gpr_precise_clock_now(gpr_timespec* clk) { *clk = gpr_now(GPR_CLOCK_REALTIME); clk->clock_type = GPR_CLOCK_PRECISE; } diff --git a/Sources/CgRPC/src/core/lib/support/time_precise.h b/Sources/CgRPC/src/core/lib/gpr/time_precise.h similarity index 75% rename from Sources/CgRPC/src/core/lib/support/time_precise.h rename to Sources/CgRPC/src/core/lib/gpr/time_precise.h index aa28d6d7c..a63ea9dc6 100644 --- a/Sources/CgRPC/src/core/lib/support/time_precise.h +++ b/Sources/CgRPC/src/core/lib/gpr/time_precise.h @@ -16,12 +16,14 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_TIME_PRECISE_H -#define GRPC_CORE_LIB_SUPPORT_TIME_PRECISE_H +#ifndef GRPC_CORE_LIB_GPR_TIME_PRECISE_H +#define GRPC_CORE_LIB_GPR_TIME_PRECISE_H + +#include #include void gpr_precise_clock_init(void); -void gpr_precise_clock_now(gpr_timespec *clk); +void gpr_precise_clock_now(gpr_timespec* clk); -#endif /* GRPC_CORE_LIB_SUPPORT_TIME_PRECISE_H */ +#endif /* GRPC_CORE_LIB_GPR_TIME_PRECISE_H */ diff --git a/Sources/CgRPC/src/core/lib/support/time_windows.c b/Sources/CgRPC/src/core/lib/gpr/time_windows.cc similarity index 93% rename from Sources/CgRPC/src/core/lib/support/time_windows.c rename to Sources/CgRPC/src/core/lib/gpr/time_windows.cc index 40df3761c..247cc1646 100644 --- a/Sources/CgRPC/src/core/lib/support/time_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/time_windows.cc @@ -28,8 +28,7 @@ #include #include -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/time_precise.h" +#include "src/core/lib/gpr/time_precise.h" static LARGE_INTEGER g_start_time; static double g_time_scale; @@ -92,9 +91,7 @@ void gpr_sleep_until(gpr_timespec until) { sleep_millis = delta.tv_sec * GPR_MS_PER_SEC + delta.tv_nsec / GPR_NS_PER_MS; GPR_ASSERT((sleep_millis >= 0) && (sleep_millis <= INT_MAX)); - GRPC_SCHEDULING_START_BLOCKING_REGION; Sleep((DWORD)sleep_millis); - GRPC_SCHEDULING_END_BLOCKING_REGION; } } diff --git a/Sources/CgRPC/include/grpc/support/tls.h b/Sources/CgRPC/src/core/lib/gpr/tls.h similarity index 79% rename from Sources/CgRPC/include/grpc/support/tls.h rename to Sources/CgRPC/src/core/lib/gpr/tls.h index 8519a8350..aee8f4d94 100644 --- a/Sources/CgRPC/include/grpc/support/tls.h +++ b/Sources/CgRPC/src/core/lib/gpr/tls.h @@ -16,8 +16,8 @@ * */ -#ifndef GRPC_SUPPORT_TLS_H -#define GRPC_SUPPORT_TLS_H +#ifndef GRPC_CORE_LIB_GPR_TLS_H +#define GRPC_CORE_LIB_GPR_TLS_H #include @@ -32,6 +32,12 @@ GPR_TLS_DECL(foo); Thread locals always have static scope. + Declaring a thread local class variable 'foo': + GPR_TLS_CLASS_DECL(foo); + + Defining the thread local class variable: + GPR_TLS_CLASS_DEF(foo); + Initializing a thread local (must be done at library initialization time): gpr_tls_init(&foo); @@ -48,15 +54,15 @@ ALL functions here may be implemented as macros. */ #ifdef GPR_GCC_TLS -#include +#include "src/core/lib/gpr/tls_gcc.h" #endif #ifdef GPR_MSVC_TLS -#include +#include "src/core/lib/gpr/tls_msvc.h" #endif #ifdef GPR_PTHREAD_TLS -#include +#include "src/core/lib/gpr/tls_pthread.h" #endif -#endif /* GRPC_SUPPORT_TLS_H */ +#endif /* GRPC_CORE_LIB_GPR_TLS_H */ diff --git a/Sources/CgRPC/include/grpc/support/tls_gcc.h b/Sources/CgRPC/src/core/lib/gpr/tls_gcc.h similarity index 51% rename from Sources/CgRPC/include/grpc/support/tls_gcc.h rename to Sources/CgRPC/src/core/lib/gpr/tls_gcc.h index e6d8c0144..72b360b02 100644 --- a/Sources/CgRPC/include/grpc/support/tls_gcc.h +++ b/Sources/CgRPC/src/core/lib/gpr/tls_gcc.h @@ -16,8 +16,10 @@ * */ -#ifndef GRPC_SUPPORT_TLS_GCC_H -#define GRPC_SUPPORT_TLS_GCC_H +#ifndef GRPC_CORE_LIB_GPR_TLS_GCC_H +#define GRPC_CORE_LIB_GPR_TLS_GCC_H + +#include #include @@ -26,44 +28,6 @@ /** Thread local storage based on gcc compiler primitives. #include tls.h to use this - and see that file for documentation */ -#ifndef NDEBUG - -struct gpr_gcc_thread_local { - intptr_t value; - bool *inited; -}; - -#define GPR_TLS_DECL(name) \ - static bool name##_inited = false; \ - static __thread struct gpr_gcc_thread_local name = {0, &(name##_inited)} - -#define gpr_tls_init(tls) \ - do { \ - GPR_ASSERT(*((tls)->inited) == false); \ - *((tls)->inited) = true; \ - } while (0) - -/** It is allowed to call gpr_tls_init after gpr_tls_destroy is called. */ -#define gpr_tls_destroy(tls) \ - do { \ - GPR_ASSERT(*((tls)->inited)); \ - *((tls)->inited) = false; \ - } while (0) - -#define gpr_tls_set(tls, new_value) \ - do { \ - GPR_ASSERT(*((tls)->inited)); \ - (tls)->value = (new_value); \ - } while (0) - -#define gpr_tls_get(tls) \ - ({ \ - GPR_ASSERT(*((tls)->inited)); \ - (tls)->value; \ - }) - -#else /* NDEBUG */ - struct gpr_gcc_thread_local { intptr_t value; }; @@ -71,6 +35,11 @@ struct gpr_gcc_thread_local { #define GPR_TLS_DECL(name) \ static __thread struct gpr_gcc_thread_local name = {0} +#define GPR_TLS_CLASS_DECL(name) \ + static __thread struct gpr_gcc_thread_local name + +#define GPR_TLS_CLASS_DEF(name) __thread struct gpr_gcc_thread_local name = {0} + #define gpr_tls_init(tls) \ do { \ } while (0) @@ -80,6 +49,4 @@ struct gpr_gcc_thread_local { #define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value)) #define gpr_tls_get(tls) ((tls)->value) -#endif /* NDEBUG */ - -#endif /* GRPC_SUPPORT_TLS_GCC_H */ +#endif /* GRPC_CORE_LIB_GPR_TLS_GCC_H */ diff --git a/Sources/CgRPC/include/grpc/support/tls_msvc.h b/Sources/CgRPC/src/core/lib/gpr/tls_msvc.h similarity index 66% rename from Sources/CgRPC/include/grpc/support/tls_msvc.h rename to Sources/CgRPC/src/core/lib/gpr/tls_msvc.h index e5f2205fc..f4b3f0f50 100644 --- a/Sources/CgRPC/include/grpc/support/tls_msvc.h +++ b/Sources/CgRPC/src/core/lib/gpr/tls_msvc.h @@ -16,19 +16,30 @@ * */ -#ifndef GRPC_SUPPORT_TLS_MSVC_H -#define GRPC_SUPPORT_TLS_MSVC_H +#ifndef GRPC_CORE_LIB_GPR_TLS_MSVC_H +#define GRPC_CORE_LIB_GPR_TLS_MSVC_H /** Thread local storage based on ms visual c compiler primitives. +#include + #include tls.h to use this - and see that file for documentation */ struct gpr_msvc_thread_local { intptr_t value; }; +/** Use GPR_TLS_DECL to declare tls static variables outside a class */ #define GPR_TLS_DECL(name) \ static __declspec(thread) struct gpr_msvc_thread_local name = {0} +/** Use GPR_TLS_CLASS_DECL to declare tls static variable members of a class. + * GPR_TLS_CLASS_DEF needs to be called to define this member. */ +#define GPR_TLS_CLASS_DECL(name) \ + static __declspec(thread) struct gpr_msvc_thread_local name + +#define GPR_TLS_CLASS_DEF(name) \ + __declspec(thread) struct gpr_msvc_thread_local name = {0} + #define gpr_tls_init(tls) \ do { \ } while (0) @@ -38,4 +49,4 @@ struct gpr_msvc_thread_local { #define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value)) #define gpr_tls_get(tls) ((tls)->value) -#endif /* GRPC_SUPPORT_TLS_MSVC_H */ +#endif /* GRPC_CORE_LIB_GPR_TLS_MSVC_H */ diff --git a/Sources/CgRPC/src/core/lib/support/tls_pthread.c b/Sources/CgRPC/src/core/lib/gpr/tls_pthread.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/support/tls_pthread.c rename to Sources/CgRPC/src/core/lib/gpr/tls_pthread.cc index 9ebee577f..2e5b30690 100644 --- a/Sources/CgRPC/src/core/lib/support/tls_pthread.c +++ b/Sources/CgRPC/src/core/lib/gpr/tls_pthread.cc @@ -20,10 +20,10 @@ #ifdef GPR_PTHREAD_TLS -#include +#include "src/core/lib/gpr/tls.h" -intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value) { - GPR_ASSERT(0 == pthread_setspecific(tls->key, (void *)value)); +intptr_t gpr_tls_set(struct gpr_pthread_thread_local* tls, intptr_t value) { + GPR_ASSERT(0 == pthread_setspecific(tls->key, (void*)value)); return value; } diff --git a/Sources/CgRPC/include/grpc/support/tls_pthread.h b/Sources/CgRPC/src/core/lib/gpr/tls_pthread.h similarity index 62% rename from Sources/CgRPC/include/grpc/support/tls_pthread.h rename to Sources/CgRPC/src/core/lib/gpr/tls_pthread.h index a68b45569..a15f2f338 100644 --- a/Sources/CgRPC/include/grpc/support/tls_pthread.h +++ b/Sources/CgRPC/src/core/lib/gpr/tls_pthread.h @@ -16,8 +16,10 @@ * */ -#ifndef GRPC_SUPPORT_TLS_PTHREAD_H -#define GRPC_SUPPORT_TLS_PTHREAD_H +#ifndef GRPC_CORE_LIB_GPR_TLS_PTHREAD_H +#define GRPC_CORE_LIB_GPR_TLS_PTHREAD_H + +#include #include /* for GPR_ASSERT */ #include @@ -29,17 +31,26 @@ struct gpr_pthread_thread_local { pthread_key_t key; }; +/** Use GPR_TLS_DECL to declare tls static variables outside a class */ #define GPR_TLS_DECL(name) static struct gpr_pthread_thread_local name = {0} +/** Use GPR_TLS_CLASS_DECL to declare tls static variable members of a class. + * GPR_TLS_CLASS_DEF needs to be called to define this member. */ +#define GPR_TLS_CLASS_DECL(name) static struct gpr_pthread_thread_local name + +/** Use GPR_TLS_CLASS_DEF to declare tls static variable members of a class. + * GPR_TLS_CLASS_DEF needs to be called to define this member. */ +#define GPR_TLS_CLASS_DEF(name) struct gpr_pthread_thread_local name = {0} + #define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL)) #define gpr_tls_destroy(tls) pthread_key_delete((tls)->key) #define gpr_tls_get(tls) ((intptr_t)pthread_getspecific((tls)->key)) #ifdef __cplusplus extern "C" { #endif -intptr_t gpr_tls_set(struct gpr_pthread_thread_local *tls, intptr_t value); +intptr_t gpr_tls_set(struct gpr_pthread_thread_local* tls, intptr_t value); #ifdef __cplusplus } #endif -#endif /* GRPC_SUPPORT_TLS_PTHREAD_H */ +#endif /* GRPC_CORE_LIB_GPR_TLS_PTHREAD_H */ diff --git a/Sources/CgRPC/src/core/lib/support/tmpfile.h b/Sources/CgRPC/src/core/lib/gpr/tmpfile.h similarity index 77% rename from Sources/CgRPC/src/core/lib/support/tmpfile.h rename to Sources/CgRPC/src/core/lib/gpr/tmpfile.h index caa1d0f4d..3ce3ff5e5 100644 --- a/Sources/CgRPC/src/core/lib/support/tmpfile.h +++ b/Sources/CgRPC/src/core/lib/gpr/tmpfile.h @@ -16,23 +16,17 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_TMPFILE_H -#define GRPC_CORE_LIB_SUPPORT_TMPFILE_H +#ifndef GRPC_CORE_LIB_GPR_TMPFILE_H +#define GRPC_CORE_LIB_GPR_TMPFILE_H -#include +#include -#ifdef __cplusplus -extern "C" { -#endif +#include /* Creates a temporary file from a prefix. If tmp_filename is not NULL, *tmp_filename is assigned the name of the created file and it is the responsibility of the caller to gpr_free it unless an error occurs in which case it will be set to NULL. */ -FILE *gpr_tmpfile(const char *prefix, char **tmp_filename); - -#ifdef __cplusplus -} -#endif +FILE* gpr_tmpfile(const char* prefix, char** tmp_filename); -#endif /* GRPC_CORE_LIB_SUPPORT_TMPFILE_H */ +#endif /* GRPC_CORE_LIB_GPR_TMPFILE_H */ diff --git a/Sources/CgRPC/src/core/lib/support/tmpfile_msys.c b/Sources/CgRPC/src/core/lib/gpr/tmpfile_msys.cc similarity index 88% rename from Sources/CgRPC/src/core/lib/support/tmpfile_msys.c rename to Sources/CgRPC/src/core/lib/gpr/tmpfile_msys.cc index 614c0a4a1..76cd886f3 100644 --- a/Sources/CgRPC/src/core/lib/support/tmpfile_msys.c +++ b/Sources/CgRPC/src/core/lib/gpr/tmpfile_msys.cc @@ -29,11 +29,11 @@ #include #include -#include "src/core/lib/support/string_windows.h" -#include "src/core/lib/support/tmpfile.h" +#include "src/core/lib/gpr/string_windows.h" +#include "src/core/lib/gpr/tmpfile.h" -FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) { - FILE *result = NULL; +FILE* gpr_tmpfile(const char* prefix, char** tmp_filename_out) { + FILE* result = NULL; char tmp_filename[MAX_PATH]; UINT success; diff --git a/Sources/CgRPC/src/core/lib/support/tmpfile_posix.c b/Sources/CgRPC/src/core/lib/gpr/tmpfile_posix.cc similarity index 80% rename from Sources/CgRPC/src/core/lib/support/tmpfile_posix.c rename to Sources/CgRPC/src/core/lib/gpr/tmpfile_posix.cc index 7ad3af0a5..ffdad335d 100644 --- a/Sources/CgRPC/src/core/lib/support/tmpfile_posix.c +++ b/Sources/CgRPC/src/core/lib/gpr/tmpfile_posix.cc @@ -20,7 +20,7 @@ #ifdef GPR_POSIX_TMPFILE -#include "src/core/lib/support/tmpfile.h" +#include "src/core/lib/gpr/tmpfile.h" #include #include @@ -31,17 +31,17 @@ #include #include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" -FILE *gpr_tmpfile(const char *prefix, char **tmp_filename) { - FILE *result = NULL; - char *filename_template; +FILE* gpr_tmpfile(const char* prefix, char** tmp_filename) { + FILE* result = nullptr; + char* filename_template; int fd; - if (tmp_filename != NULL) *tmp_filename = NULL; + if (tmp_filename != nullptr) *tmp_filename = nullptr; gpr_asprintf(&filename_template, "/tmp/%s_XXXXXX", prefix); - GPR_ASSERT(filename_template != NULL); + GPR_ASSERT(filename_template != nullptr); fd = mkstemp(filename_template); if (fd == -1) { @@ -50,7 +50,7 @@ FILE *gpr_tmpfile(const char *prefix, char **tmp_filename) { goto end; } result = fdopen(fd, "w+"); - if (result == NULL) { + if (result == nullptr) { gpr_log(GPR_ERROR, "Could not open file %s from fd %d (error = %s).", filename_template, fd, strerror(errno)); unlink(filename_template); @@ -59,7 +59,7 @@ FILE *gpr_tmpfile(const char *prefix, char **tmp_filename) { } end: - if (result != NULL && tmp_filename != NULL) { + if (result != nullptr && tmp_filename != nullptr) { *tmp_filename = filename_template; } else { gpr_free(filename_template); diff --git a/Sources/CgRPC/src/core/lib/support/tmpfile_windows.c b/Sources/CgRPC/src/core/lib/gpr/tmpfile_windows.cc similarity index 90% rename from Sources/CgRPC/src/core/lib/support/tmpfile_windows.c rename to Sources/CgRPC/src/core/lib/gpr/tmpfile_windows.cc index 47b4510a7..d48680841 100644 --- a/Sources/CgRPC/src/core/lib/support/tmpfile_windows.c +++ b/Sources/CgRPC/src/core/lib/gpr/tmpfile_windows.cc @@ -29,11 +29,11 @@ #include #include -#include "src/core/lib/support/string_windows.h" -#include "src/core/lib/support/tmpfile.h" +#include "src/core/lib/gpr/string_windows.h" +#include "src/core/lib/gpr/tmpfile.h" -FILE *gpr_tmpfile(const char *prefix, char **tmp_filename_out) { - FILE *result = NULL; +FILE* gpr_tmpfile(const char* prefix, char** tmp_filename_out) { + FILE* result = NULL; LPTSTR template_string = NULL; TCHAR tmp_path[MAX_PATH]; TCHAR tmp_filename[MAX_PATH]; diff --git a/Sources/CgRPC/include/grpc/support/useful.h b/Sources/CgRPC/src/core/lib/gpr/useful.h similarity index 95% rename from Sources/CgRPC/include/grpc/support/useful.h rename to Sources/CgRPC/src/core/lib/gpr/useful.h index bd66d3bb2..a4e73b9a6 100644 --- a/Sources/CgRPC/include/grpc/support/useful.h +++ b/Sources/CgRPC/src/core/lib/gpr/useful.h @@ -16,8 +16,8 @@ * */ -#ifndef GRPC_SUPPORT_USEFUL_H -#define GRPC_SUPPORT_USEFUL_H +#ifndef GRPC_CORE_LIB_GPR_USEFUL_H +#define GRPC_CORE_LIB_GPR_USEFUL_H /** useful macros that don't belong anywhere else */ @@ -62,4 +62,4 @@ #define GPR_HASH_POINTER(x, range) \ ((((size_t)x) >> 4) ^ (((size_t)x) >> 9) ^ (((size_t)x) >> 14)) % (range) -#endif /* GRPC_SUPPORT_USEFUL_H */ +#endif /* GRPC_CORE_LIB_GPR_USEFUL_H */ diff --git a/Sources/CgRPC/src/core/lib/support/wrap_memcpy.c b/Sources/CgRPC/src/core/lib/gpr/wrap_memcpy.cc similarity index 88% rename from Sources/CgRPC/src/core/lib/support/wrap_memcpy.c rename to Sources/CgRPC/src/core/lib/gpr/wrap_memcpy.cc index cff056dc3..9b8608e05 100644 --- a/Sources/CgRPC/src/core/lib/support/wrap_memcpy.c +++ b/Sources/CgRPC/src/core/lib/gpr/wrap_memcpy.cc @@ -26,15 +26,17 @@ * Enable by setting LDFLAGS=-Wl,-wrap,memcpy when linking. */ +extern "C" { #ifdef __linux__ #if defined(__x86_64__) && !defined(GPR_MUSL_LIBC_COMPAT) __asm__(".symver memcpy,memcpy@GLIBC_2.2.5"); -void *__wrap_memcpy(void *destination, const void *source, size_t num) { +void* __wrap_memcpy(void* destination, const void* source, size_t num) { return memcpy(destination, source, num); } #else /* !__x86_64__ */ -void *__wrap_memcpy(void *destination, const void *source, size_t num) { +void* __wrap_memcpy(void* destination, const void* source, size_t num) { return memmove(destination, source, num); } #endif #endif +} diff --git a/Sources/CgRPC/src/core/lib/gprpp/abstract.h b/Sources/CgRPC/src/core/lib/gprpp/abstract.h new file mode 100644 index 000000000..cc96edc49 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/abstract.h @@ -0,0 +1,34 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_ABSTRACT_H +#define GRPC_CORE_LIB_GPRPP_ABSTRACT_H + +// This is needed to support abstract base classes in the c core. Since gRPC +// doesn't have a c++ runtime, it will hit a linker error on delete unless +// we define a virtual operator delete. See this blog for more info: +// https://eli.thegreenplace.net/2015/c-deleting-destructors-and-virtual-operator-delete/ +#define GRPC_ABSTRACT_BASE_CLASS \ + static void operator delete(void* p) { abort(); } + +// gRPC currently can't depend on libstdc++, so we can't use "= 0" for +// pure virtual methods. Instead, we use this macro. +#define GRPC_ABSTRACT \ + { GPR_ASSERT(false); } + +#endif /* GRPC_CORE_LIB_GPRPP_ABSTRACT_H */ diff --git a/Sources/CgRPC/src/core/lib/support/atomic.h b/Sources/CgRPC/src/core/lib/gprpp/atomic.h similarity index 75% rename from Sources/CgRPC/src/core/lib/support/atomic.h rename to Sources/CgRPC/src/core/lib/gprpp/atomic.h index 73c59ae3c..8b08fc4e9 100644 --- a/Sources/CgRPC/src/core/lib/support/atomic.h +++ b/Sources/CgRPC/src/core/lib/gprpp/atomic.h @@ -16,15 +16,15 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_ATOMIC_H -#define GRPC_CORE_LIB_SUPPORT_ATOMIC_H +#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_H +#define GRPC_CORE_LIB_GPRPP_ATOMIC_H #include #ifdef GPR_HAS_CXX11_ATOMIC -#include "src/core/lib/support/atomic_with_std.h" +#include "src/core/lib/gprpp/atomic_with_std.h" #else -#include "src/core/lib/support/atomic_with_atm.h" +#include "src/core/lib/gprpp/atomic_with_atm.h" #endif -#endif /* GRPC_CORE_LIB_SUPPORT_ATOMIC_H */ +#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_H */ diff --git a/Sources/CgRPC/src/core/lib/support/atomic_with_atm.h b/Sources/CgRPC/src/core/lib/gprpp/atomic_with_atm.h similarity index 88% rename from Sources/CgRPC/src/core/lib/support/atomic_with_atm.h rename to Sources/CgRPC/src/core/lib/gprpp/atomic_with_atm.h index fe00e9b5b..3d0021bb1 100644 --- a/Sources/CgRPC/src/core/lib/support/atomic_with_atm.h +++ b/Sources/CgRPC/src/core/lib/gprpp/atomic_with_atm.h @@ -16,8 +16,10 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_ATM_H -#define GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_ATM_H +#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H +#define GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H + +#include #include @@ -52,4 +54,4 @@ class atomic { } // namespace grpc_core -#endif /* GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_ATM_H */ +#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_ATM_H */ diff --git a/Sources/CgRPC/src/core/lib/support/atomic_with_std.h b/Sources/CgRPC/src/core/lib/gprpp/atomic_with_std.h similarity index 80% rename from Sources/CgRPC/src/core/lib/support/atomic_with_std.h rename to Sources/CgRPC/src/core/lib/gprpp/atomic_with_std.h index c7a92f701..a4ad16e5c 100644 --- a/Sources/CgRPC/src/core/lib/support/atomic_with_std.h +++ b/Sources/CgRPC/src/core/lib/gprpp/atomic_with_std.h @@ -16,8 +16,10 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_STD_H -#define GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_STD_H +#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H +#define GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H + +#include #include @@ -30,4 +32,4 @@ typedef std::memory_order memory_order; } // namespace grpc_core -#endif /* GRPC_CORE_LIB_SUPPORT_ATOMIC_WITH_STD_H */ +#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_WITH_STD_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/debug_location.h b/Sources/CgRPC/src/core/lib/gprpp/debug_location.h new file mode 100644 index 000000000..287761bea --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/debug_location.h @@ -0,0 +1,52 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_DEBUG_LOCATION_H +#define GRPC_CORE_LIB_GPRPP_DEBUG_LOCATION_H + +namespace grpc_core { + +// Used for tracking file and line where a call is made for debug builds. +// No-op for non-debug builds. +// Callers can use the DEBUG_LOCATION macro in either case. +#ifndef NDEBUG +class DebugLocation { + public: + DebugLocation(const char* file, int line) : file_(file), line_(line) {} + bool Log() const { return true; } + const char* file() const { return file_; } + int line() const { return line_; } + + private: + const char* file_; + const int line_; +}; +#define DEBUG_LOCATION ::grpc_core::DebugLocation(__FILE__, __LINE__) +#else +class DebugLocation { + public: + bool Log() const { return false; } + const char* file() const { return nullptr; } + int line() const { return -1; } +}; +#define DEBUG_LOCATION ::grpc_core::DebugLocation() +#endif + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_DEBUG_LOCATION_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/inlined_vector.h b/Sources/CgRPC/src/core/lib/gprpp/inlined_vector.h new file mode 100644 index 000000000..f36f6cb70 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/inlined_vector.h @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_INLINED_VECTOR_H +#define GRPC_CORE_LIB_GPRPP_INLINED_VECTOR_H + +#include + +#include + +#include "src/core/lib/gprpp/memory.h" + +namespace grpc_core { + +// NOTE: We eventually want to use absl::InlinedVector here. However, +// there are currently build problems that prevent us from using absl. +// In the interim, we define a custom implementation as a place-holder, +// with the intent to eventually replace this with the absl +// implementation. +// +// This place-holder implementation does not implement the full set of +// functionality from the absl version; it has just the methods that we +// currently happen to need in gRPC. If additional functionality is +// needed before this gets replaced with the absl version, it can be +// added, with the following proviso: +// +// ANY METHOD ADDED HERE MUST COMPLY WITH THE INTERFACE IN THE absl +// IMPLEMENTATION! +// +// TODO(nnoble, roth): Replace this with absl::InlinedVector once we +// integrate absl into the gRPC build system in a usable way. +template +class InlinedVector { + public: + InlinedVector() { init_data(); } + ~InlinedVector() { destroy_elements(); } + + // For now, we do not support copying. + InlinedVector(const InlinedVector&) = delete; + InlinedVector& operator=(const InlinedVector&) = delete; + + T* data() { + return dynamic_ != nullptr ? dynamic_ : reinterpret_cast(inline_); + } + + const T* data() const { + return dynamic_ != nullptr ? dynamic_ : reinterpret_cast(inline_); + } + + T& operator[](size_t offset) { + assert(offset < size_); + return data()[offset]; + } + + const T& operator[](size_t offset) const { + assert(offset < size_); + return data()[offset]; + } + + void reserve(size_t capacity) { + if (capacity > capacity_) { + T* new_dynamic = static_cast(gpr_malloc(sizeof(T) * capacity)); + for (size_t i = 0; i < size_; ++i) { + new (&new_dynamic[i]) T(std::move(data()[i])); + data()[i].~T(); + } + gpr_free(dynamic_); + dynamic_ = new_dynamic; + capacity_ = capacity; + } + } + + template + void emplace_back(Args&&... args) { + if (size_ == capacity_) { + reserve(capacity_ * 2); + } + new (&(data()[size_])) T(std::forward(args)...); + ++size_; + } + + void push_back(const T& value) { emplace_back(value); } + + void push_back(T&& value) { emplace_back(std::move(value)); } + + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + + void clear() { + destroy_elements(); + init_data(); + } + + private: + void init_data() { + dynamic_ = nullptr; + size_ = 0; + capacity_ = N; + } + + void destroy_elements() { + for (size_t i = 0; i < size_; ++i) { + T& value = data()[i]; + value.~T(); + } + gpr_free(dynamic_); + } + + typename std::aligned_storage::type inline_[N]; + T* dynamic_; + size_t size_; + size_t capacity_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_INLINED_VECTOR_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/manual_constructor.h b/Sources/CgRPC/src/core/lib/gprpp/manual_constructor.h new file mode 100644 index 000000000..7f827ca8b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/manual_constructor.h @@ -0,0 +1,213 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_MANUAL_CONSTRUCTOR_H +#define GRPC_CORE_LIB_GPRPP_MANUAL_CONSTRUCTOR_H + +// manually construct a region of memory with some type + +#include + +#include +#include +#include +#include +#include + +#include + +namespace grpc_core { + +// this contains templated helpers needed to implement the ManualConstructors +// in this file. +namespace manual_ctor_impl { + +// is_one_of returns true it a class, Member, is present in a variadic list of +// classes, List. +template +class is_one_of; + +template +class is_one_of { + public: + static constexpr const bool value = true; +}; + +template +class is_one_of { + public: + static constexpr const bool value = is_one_of::value; +}; + +template +class is_one_of { + public: + static constexpr const bool value = false; +}; + +// max_size_of returns sizeof(Type) for the largest type in the variadic list +// of classes, Types. +template +class max_size_of; + +template +class max_size_of { + public: + static constexpr const size_t value = sizeof(A); +}; + +template +class max_size_of { + public: + static constexpr const size_t value = sizeof(A) > max_size_of::value + ? sizeof(A) + : max_size_of::value; +}; + +// max_size_of returns alignof(Type) for the largest type in the variadic list +// of classes, Types. +template +class max_align_of; + +template +class max_align_of { + public: + static constexpr const size_t value = alignof(A); +}; + +template +class max_align_of { + public: + static constexpr const size_t value = alignof(A) > max_align_of::value + ? alignof(A) + : max_align_of::value; +}; + +} // namespace manual_ctor_impl + +template +class PolymorphicManualConstructor { + public: + // No constructor or destructor because one of the most useful uses of + // this class is as part of a union, and members of a union could not have + // constructors or destructors till C++11. And, anyway, the whole point of + // this class is to bypass constructor and destructor. + + BaseType* get() { return reinterpret_cast(&space_); } + const BaseType* get() const { + return reinterpret_cast(&space_); + } + + BaseType* operator->() { return get(); } + const BaseType* operator->() const { return get(); } + + BaseType& operator*() { return *get(); } + const BaseType& operator*() const { return *get(); } + + template + void Init() { + FinishInit(new (&space_) DerivedType); + } + + // Init() constructs the Type instance using the given arguments + // (which are forwarded to Type's constructor). + // + // Note that Init() with no arguments performs default-initialization, + // not zero-initialization (i.e it behaves the same as "new Type;", not + // "new Type();"), so it will leave non-class types uninitialized. + template + void Init(Ts&&... args) { + FinishInit(new (&space_) DerivedType(std::forward(args)...)); + } + + // Init() that is equivalent to copy and move construction. + // Enables usage like this: + // ManualConstructor> v; + // v.Init({1, 2, 3}); + template + void Init(const DerivedType& x) { + FinishInit(new (&space_) DerivedType(x)); + } + template + void Init(DerivedType&& x) { + FinishInit(new (&space_) DerivedType(std::move(x))); + } + + void Destroy() { get()->~BaseType(); } + + private: + template + void FinishInit(DerivedType* p) { + static_assert( + manual_ctor_impl::is_one_of::value, + "DerivedType must be one of the predeclared DerivedTypes"); + GPR_ASSERT(static_cast(p) == p); + } + + typename std::aligned_storage< + grpc_core::manual_ctor_impl::max_size_of::value, + grpc_core::manual_ctor_impl::max_align_of::value>::type + space_; +}; + +template +class ManualConstructor { + public: + // No constructor or destructor because one of the most useful uses of + // this class is as part of a union, and members of a union could not have + // constructors or destructors till C++11. And, anyway, the whole point of + // this class is to bypass constructor and destructor. + + Type* get() { return reinterpret_cast(&space_); } + const Type* get() const { return reinterpret_cast(&space_); } + + Type* operator->() { return get(); } + const Type* operator->() const { return get(); } + + Type& operator*() { return *get(); } + const Type& operator*() const { return *get(); } + + void Init() { new (&space_) Type; } + + // Init() constructs the Type instance using the given arguments + // (which are forwarded to Type's constructor). + // + // Note that Init() with no arguments performs default-initialization, + // not zero-initialization (i.e it behaves the same as "new Type;", not + // "new Type();"), so it will leave non-class types uninitialized. + template + void Init(Ts&&... args) { + new (&space_) Type(std::forward(args)...); + } + + // Init() that is equivalent to copy and move construction. + // Enables usage like this: + // ManualConstructor> v; + // v.Init({1, 2, 3}); + void Init(const Type& x) { new (&space_) Type(x); } + void Init(Type&& x) { new (&space_) Type(std::move(x)); } + + void Destroy() { get()->~Type(); } + + private: + typename std::aligned_storage::type space_; +}; + +} // namespace grpc_core + +#endif diff --git a/Sources/CgRPC/src/core/lib/gprpp/memory.h b/Sources/CgRPC/src/core/lib/gprpp/memory.h new file mode 100644 index 000000000..ba2f54667 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/memory.h @@ -0,0 +1,111 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_MEMORY_H +#define GRPC_CORE_LIB_GPRPP_MEMORY_H + +#include + +#include + +#include +#include +#include + +namespace grpc_core { + +// The alignment of memory returned by gpr_malloc(). +constexpr size_t kAlignmentForDefaultAllocationInBytes = 8; + +// Alternative to new, since we cannot use it (for fear of libstdc++) +template +inline T* New(Args&&... args) { + void* p = alignof(T) > kAlignmentForDefaultAllocationInBytes + ? gpr_malloc_aligned(sizeof(T), alignof(T)) + : gpr_malloc(sizeof(T)); + return new (p) T(std::forward(args)...); +} + +// Alternative to delete, since we cannot use it (for fear of libstdc++) +template +inline void Delete(T* p) { + p->~T(); + if (alignof(T) > kAlignmentForDefaultAllocationInBytes) { + gpr_free_aligned(p); + } else { + gpr_free(p); + } +} + +template +class DefaultDelete { + public: + void operator()(T* p) { Delete(p); } +}; + +template > +using UniquePtr = std::unique_ptr; + +template +inline UniquePtr MakeUnique(Args&&... args) { + return UniquePtr(New(std::forward(args)...)); +} + +// an allocator that uses gpr_malloc/gpr_free +template +class Allocator { + public: + typedef T value_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef std::false_type propagate_on_container_move_assignment; + template + struct rebind { + typedef Allocator other; + }; + typedef std::true_type is_always_equal; + + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } + pointer allocate(std::size_t n, + std::allocator::const_pointer hint = nullptr) { + return static_cast(gpr_malloc(n * sizeof(T))); + } + void deallocate(T* p, std::size_t n) { gpr_free(p); } + size_t max_size() const { + return std::numeric_limits::max() / sizeof(value_type); + } + void construct(pointer p, const_reference val) { new ((void*)p) T(val); } + template + void construct(U* p, Args&&... args) { + ::new ((void*)p) U(std::forward(args)...); + } + void destroy(pointer p) { p->~T(); } + template + void destroy(U* p) { + p->~U(); + } +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_MEMORY_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/orphanable.h b/Sources/CgRPC/src/core/lib/gprpp/orphanable.h new file mode 100644 index 000000000..73a73995c --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/orphanable.h @@ -0,0 +1,199 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_ORPHANABLE_H +#define GRPC_CORE_LIB_GPRPP_ORPHANABLE_H + +#include + +#include +#include + +#include +#include + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/debug_location.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" + +namespace grpc_core { + +// A base class for orphanable objects, which have one external owner +// but are not necessarily destroyed immediately when the external owner +// gives up ownership. Instead, the owner calls the object's Orphan() +// method, and the object then takes responsibility for its own cleanup +// and destruction. +class Orphanable { + public: + // Gives up ownership of the object. The implementation must arrange + // to eventually destroy the object without further interaction from the + // caller. + virtual void Orphan() GRPC_ABSTRACT; + + // Not copyable or movable. + Orphanable(const Orphanable&) = delete; + Orphanable& operator=(const Orphanable&) = delete; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + Orphanable() {} + virtual ~Orphanable() {} +}; + +template +class OrphanableDelete { + public: + void operator()(T* p) { p->Orphan(); } +}; + +template > +using OrphanablePtr = std::unique_ptr; + +template +inline OrphanablePtr MakeOrphanable(Args&&... args) { + return OrphanablePtr(New(std::forward(args)...)); +} + +// A type of Orphanable with internal ref-counting. +template +class InternallyRefCounted : public Orphanable { + public: + // Not copyable nor movable. + InternallyRefCounted(const InternallyRefCounted&) = delete; + InternallyRefCounted& operator=(const InternallyRefCounted&) = delete; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // Allow Delete() to access destructor. + template + friend void Delete(T*); + + // Allow RefCountedPtr<> to access Unref() and IncrementRefCount(). + friend class RefCountedPtr; + + InternallyRefCounted() { gpr_ref_init(&refs_, 1); } + virtual ~InternallyRefCounted() {} + + RefCountedPtr Ref() GRPC_MUST_USE_RESULT { + IncrementRefCount(); + return RefCountedPtr(static_cast(this)); + } + + void Unref() { + if (gpr_unref(&refs_)) { + Delete(static_cast(this)); + } + } + + private: + void IncrementRefCount() { gpr_ref(&refs_); } + + gpr_refcount refs_; +}; + +// An alternative version of the InternallyRefCounted base class that +// supports tracing. This is intended to be used in cases where the +// object will be handled both by idiomatic C++ code using smart +// pointers and legacy code that is manually calling Ref() and Unref(). +// Once all of our code is converted to idiomatic C++, we may be able to +// eliminate this class. +template +class InternallyRefCountedWithTracing : public Orphanable { + public: + // Not copyable nor movable. + InternallyRefCountedWithTracing(const InternallyRefCountedWithTracing&) = + delete; + InternallyRefCountedWithTracing& operator=( + const InternallyRefCountedWithTracing&) = delete; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // Allow Delete() to access destructor. + template + friend void Delete(T*); + + // Allow RefCountedPtr<> to access Unref() and IncrementRefCount(). + friend class RefCountedPtr; + + InternallyRefCountedWithTracing() + : InternallyRefCountedWithTracing(static_cast(nullptr)) {} + + explicit InternallyRefCountedWithTracing(TraceFlag* trace_flag) + : trace_flag_(trace_flag) { + gpr_ref_init(&refs_, 1); + } + +#ifdef NDEBUG + explicit InternallyRefCountedWithTracing(DebugOnlyTraceFlag* trace_flag) + : InternallyRefCountedWithTracing() {} +#endif + + virtual ~InternallyRefCountedWithTracing() {} + + RefCountedPtr Ref() GRPC_MUST_USE_RESULT { + IncrementRefCount(); + return RefCountedPtr(static_cast(this)); + } + + RefCountedPtr Ref(const DebugLocation& location, + const char* reason) GRPC_MUST_USE_RESULT { + if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { + gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); + gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s", + trace_flag_->name(), this, location.file(), location.line(), + old_refs, old_refs + 1, reason); + } + return Ref(); + } + + // TODO(roth): Once all of our code is converted to C++ and can use + // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods + // private, since they will only be used by RefCountedPtr<>, which is a + // friend of this class. + + void Unref() { + if (gpr_unref(&refs_)) { + Delete(static_cast(this)); + } + } + + void Unref(const DebugLocation& location, const char* reason) { + if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { + gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); + gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s", + trace_flag_->name(), this, location.file(), location.line(), + old_refs, old_refs - 1, reason); + } + Unref(); + } + + private: + void IncrementRefCount() { gpr_ref(&refs_); } + + TraceFlag* trace_flag_ = nullptr; + gpr_refcount refs_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_ORPHANABLE_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/ref_counted.h b/Sources/CgRPC/src/core/lib/gprpp/ref_counted.h new file mode 100644 index 000000000..c67e3f315 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/ref_counted.h @@ -0,0 +1,169 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_REF_COUNTED_H +#define GRPC_CORE_LIB_GPRPP_REF_COUNTED_H + +#include + +#include +#include + +#include + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/debug_location.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" + +namespace grpc_core { + +// A base class for reference-counted objects. +// New objects should be created via New() and start with a refcount of 1. +// When the refcount reaches 0, the object will be deleted via Delete(). +// +// This will commonly be used by CRTP (curiously-recurring template pattern) +// e.g., class MyClass : public RefCounted +template +class RefCounted { + public: + RefCountedPtr Ref() GRPC_MUST_USE_RESULT { + IncrementRefCount(); + return RefCountedPtr(static_cast(this)); + } + + // TODO(roth): Once all of our code is converted to C++ and can use + // RefCountedPtr<> instead of manual ref-counting, make this method + // private, since it will only be used by RefCountedPtr<>, which is a + // friend of this class. + void Unref() { + if (gpr_unref(&refs_)) { + Delete(static_cast(this)); + } + } + + // Not copyable nor movable. + RefCounted(const RefCounted&) = delete; + RefCounted& operator=(const RefCounted&) = delete; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // Allow Delete() to access destructor. + template + friend void Delete(T*); + + RefCounted() { gpr_ref_init(&refs_, 1); } + + virtual ~RefCounted() {} + + private: + // Allow RefCountedPtr<> to access IncrementRefCount(). + friend class RefCountedPtr; + + void IncrementRefCount() { gpr_ref(&refs_); } + + gpr_refcount refs_; +}; + +// An alternative version of the RefCounted base class that +// supports tracing. This is intended to be used in cases where the +// object will be handled both by idiomatic C++ code using smart +// pointers and legacy code that is manually calling Ref() and Unref(). +// Once all of our code is converted to idiomatic C++, we may be able to +// eliminate this class. +template +class RefCountedWithTracing { + public: + RefCountedPtr Ref() GRPC_MUST_USE_RESULT { + IncrementRefCount(); + return RefCountedPtr(static_cast(this)); + } + + RefCountedPtr Ref(const DebugLocation& location, + const char* reason) GRPC_MUST_USE_RESULT { + if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { + gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); + gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s", + trace_flag_->name(), this, location.file(), location.line(), + old_refs, old_refs + 1, reason); + } + return Ref(); + } + + // TODO(roth): Once all of our code is converted to C++ and can use + // RefCountedPtr<> instead of manual ref-counting, make the Unref() methods + // private, since they will only be used by RefCountedPtr<>, which is a + // friend of this class. + + void Unref() { + if (gpr_unref(&refs_)) { + Delete(static_cast(this)); + } + } + + void Unref(const DebugLocation& location, const char* reason) { + if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { + gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); + gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s", + trace_flag_->name(), this, location.file(), location.line(), + old_refs, old_refs - 1, reason); + } + Unref(); + } + + // Not copyable nor movable. + RefCountedWithTracing(const RefCountedWithTracing&) = delete; + RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + // Allow Delete() to access destructor. + template + friend void Delete(T*); + + RefCountedWithTracing() + : RefCountedWithTracing(static_cast(nullptr)) {} + + explicit RefCountedWithTracing(TraceFlag* trace_flag) + : trace_flag_(trace_flag) { + gpr_ref_init(&refs_, 1); + } + +#ifdef NDEBUG + explicit RefCountedWithTracing(DebugOnlyTraceFlag* trace_flag) + : RefCountedWithTracing() {} +#endif + + virtual ~RefCountedWithTracing() {} + + private: + // Allow RefCountedPtr<> to access IncrementRefCount(). + friend class RefCountedPtr; + + void IncrementRefCount() { gpr_ref(&refs_); } + + TraceFlag* trace_flag_ = nullptr; + gpr_refcount refs_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_REF_COUNTED_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/ref_counted_ptr.h b/Sources/CgRPC/src/core/lib/gprpp/ref_counted_ptr.h new file mode 100644 index 000000000..388e2ec41 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/ref_counted_ptr.h @@ -0,0 +1,112 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_REF_COUNTED_PTR_H +#define GRPC_CORE_LIB_GPRPP_REF_COUNTED_PTR_H + +#include + +#include + +#include "src/core/lib/gprpp/memory.h" + +namespace grpc_core { + +// A smart pointer class for objects that provide IncrementRefCount() and +// Unref() methods, such as those provided by the RefCounted base class. +template +class RefCountedPtr { + public: + RefCountedPtr() {} + RefCountedPtr(std::nullptr_t) {} + + // If value is non-null, we take ownership of a ref to it. + explicit RefCountedPtr(T* value) { value_ = value; } + + // Move support. + RefCountedPtr(RefCountedPtr&& other) { + value_ = other.value_; + other.value_ = nullptr; + } + RefCountedPtr& operator=(RefCountedPtr&& other) { + if (value_ != nullptr) value_->Unref(); + value_ = other.value_; + other.value_ = nullptr; + return *this; + } + + // Copy support. + RefCountedPtr(const RefCountedPtr& other) { + if (other.value_ != nullptr) other.value_->IncrementRefCount(); + value_ = other.value_; + } + RefCountedPtr& operator=(const RefCountedPtr& other) { + // Note: Order of reffing and unreffing is important here in case value_ + // and other.value_ are the same object. + if (other.value_ != nullptr) other.value_->IncrementRefCount(); + if (value_ != nullptr) value_->Unref(); + value_ = other.value_; + return *this; + } + + ~RefCountedPtr() { + if (value_ != nullptr) value_->Unref(); + } + + // If value is non-null, we take ownership of a ref to it. + void reset(T* value = nullptr) { + if (value_ != nullptr) value_->Unref(); + value_ = value; + } + + // TODO(roth): This method exists solely as a transition mechanism to allow + // us to pass a ref to idiomatic C code that does not use RefCountedPtr<>. + // Once all of our code has been converted to idiomatic C++, this + // method should go away. + T* release() { + T* value = value_; + value_ = nullptr; + return value; + } + + T* get() const { return value_; } + + T& operator*() const { return *value_; } + T* operator->() const { return value_; } + + bool operator==(const RefCountedPtr& other) const { + return value_ == other.value_; + } + bool operator==(const T* other) const { return value_ == other; } + bool operator!=(const RefCountedPtr& other) const { + return value_ != other.value_; + } + bool operator!=(const T* other) const { return value_ != other; } + + private: + T* value_ = nullptr; +}; + +template +inline RefCountedPtr MakeRefCounted(Args&&... args) { + return RefCountedPtr(New(std::forward(args)...)); +} + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_REF_COUNTED_PTR_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/thd.h b/Sources/CgRPC/src/core/lib/gprpp/thd.h new file mode 100644 index 000000000..05c7ded45 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/thd.h @@ -0,0 +1,135 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_GPRPP_THD_H +#define GRPC_CORE_LIB_GPRPP_THD_H + +/** Internal thread interface. */ + +#include + +#include +#include +#include +#include + +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/memory.h" + +namespace grpc_core { +namespace internal { + +/// Base class for platform-specific thread-state +class ThreadInternalsInterface { + public: + virtual ~ThreadInternalsInterface() {} + virtual void Start() GRPC_ABSTRACT; + virtual void Join() GRPC_ABSTRACT; + GRPC_ABSTRACT_BASE_CLASS +}; + +} // namespace internal + +class Thread { + public: + /// Default constructor only to allow use in structs that lack constructors + /// Does not produce a validly-constructed thread; must later + /// use placement new to construct a real thread. Does not init mu_ and cv_ + Thread() : state_(FAKE), impl_(nullptr) {} + + /// Normal constructor to create a thread with name \a thd_name, + /// which will execute a thread based on function \a thd_body + /// with argument \a arg once it is started. + /// The optional \a success argument indicates whether the thread + /// is successfully created. + Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, + bool* success = nullptr); + + /// Move constructor for thread. After this is called, the other thread + /// no longer represents a living thread object + Thread(Thread&& other) : state_(other.state_), impl_(other.impl_) { + other.state_ = MOVED; + other.impl_ = nullptr; + } + + /// Move assignment operator for thread. After this is called, the other + /// thread no longer represents a living thread object. Not allowed if this + /// thread actually exists + Thread& operator=(Thread&& other) { + if (this != &other) { + // TODO(vjpai): if we can be sure that all Thread's are actually + // constructed, then we should assert GPR_ASSERT(impl_ == nullptr) here. + // However, as long as threads come in structures that are + // allocated via gpr_malloc, this will not be the case, so we cannot + // assert it for the time being. + state_ = other.state_; + impl_ = other.impl_; + other.state_ = MOVED; + other.impl_ = nullptr; + } + return *this; + } + + /// The destructor is strictly optional; either the thread never came to life + /// and the constructor itself killed it or it has already been joined and + /// the Join function kills it. The destructor shouldn't have to do anything. + ~Thread() { GPR_ASSERT(impl_ == nullptr); } + + void Start() { + if (impl_ != nullptr) { + GPR_ASSERT(state_ == ALIVE); + state_ = STARTED; + impl_->Start(); + } else { + GPR_ASSERT(state_ == FAILED); + } + }; + + void Join() { + if (impl_ != nullptr) { + impl_->Join(); + grpc_core::Delete(impl_); + state_ = DONE; + impl_ = nullptr; + } else { + GPR_ASSERT(state_ == FAILED); + } + }; + + static void Init(); + static bool AwaitAll(gpr_timespec deadline); + + private: + Thread(const Thread&) = delete; + Thread& operator=(const Thread&) = delete; + + /// The thread states are as follows: + /// FAKE -- just a dummy placeholder Thread created by the default constructor + /// ALIVE -- an actual thread of control exists associated with this thread + /// STARTED -- the thread of control has been started + /// DONE -- the thread of control has completed and been joined + /// FAILED -- the thread of control never came alive + /// MOVED -- contents were moved out and we're no longer tracking them + enum ThreadState { FAKE, ALIVE, STARTED, DONE, FAILED, MOVED }; + ThreadState state_; + internal::ThreadInternalsInterface* impl_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_GPRPP_THD_H */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/thd_posix.cc b/Sources/CgRPC/src/core/lib/gprpp/thd_posix.cc new file mode 100644 index 000000000..2f6c2edca --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/thd_posix.cc @@ -0,0 +1,209 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Posix implementation for gpr threads. */ + +#include + +#ifdef GPR_POSIX_SYNC + +#include "src/core/lib/gprpp/thd.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "src/core/lib/gpr/fork.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/memory.h" + +namespace grpc_core { +namespace { +gpr_mu g_mu; +gpr_cv g_cv; +int g_thread_count; +int g_awaiting_threads; + +class ThreadInternalsPosix; +struct thd_arg { + ThreadInternalsPosix* thread; + void (*body)(void* arg); /* body of a thread */ + void* arg; /* argument to a thread */ + const char* name; /* name of thread. Can be nullptr. */ +}; + +class ThreadInternalsPosix + : public grpc_core::internal::ThreadInternalsInterface { + public: + ThreadInternalsPosix(const char* thd_name, void (*thd_body)(void* arg), + void* arg, bool* success) + : started_(false) { + gpr_mu_init(&mu_); + gpr_cv_init(&ready_); + pthread_attr_t attr; + /* don't use gpr_malloc as we may cause an infinite recursion with + * the profiling code */ + thd_arg* info = static_cast(malloc(sizeof(*info))); + GPR_ASSERT(info != nullptr); + info->thread = this; + info->body = thd_body; + info->arg = arg; + info->name = thd_name; + inc_thd_count(); + + GPR_ASSERT(pthread_attr_init(&attr) == 0); + GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == + 0); + + *success = + (pthread_create(&pthread_id_, &attr, + [](void* v) -> void* { + thd_arg arg = *static_cast(v); + free(v); + if (arg.name != nullptr) { +#if GPR_APPLE_PTHREAD_NAME + /* Apple supports 64 characters, and will + * truncate if it's longer. */ + pthread_setname_np(arg.name); +#elif GPR_LINUX_PTHREAD_NAME + /* Linux supports 16 characters max, and will + * error if it's longer. */ + char buf[16]; + size_t buf_len = GPR_ARRAY_SIZE(buf) - 1; + strncpy(buf, arg.name, buf_len); + buf[buf_len] = '\0'; + pthread_setname_np(pthread_self(), buf); +#endif // GPR_APPLE_PTHREAD_NAME + } + + gpr_mu_lock(&arg.thread->mu_); + while (!arg.thread->started_) { + gpr_cv_wait(&arg.thread->ready_, &arg.thread->mu_, + gpr_inf_future(GPR_CLOCK_MONOTONIC)); + } + gpr_mu_unlock(&arg.thread->mu_); + + (*arg.body)(arg.arg); + dec_thd_count(); + return nullptr; + }, + info) == 0); + + GPR_ASSERT(pthread_attr_destroy(&attr) == 0); + + if (!success) { + /* don't use gpr_free, as this was allocated using malloc (see above) */ + free(info); + dec_thd_count(); + } + }; + + ~ThreadInternalsPosix() override { + gpr_mu_destroy(&mu_); + gpr_cv_destroy(&ready_); + } + + void Start() override { + gpr_mu_lock(&mu_); + started_ = true; + gpr_cv_signal(&ready_); + gpr_mu_unlock(&mu_); + } + + void Join() override { pthread_join(pthread_id_, nullptr); } + + private: + /***************************************** + * Only used when fork support is enabled + */ + + static void inc_thd_count() { + if (grpc_fork_support_enabled()) { + gpr_mu_lock(&g_mu); + g_thread_count++; + gpr_mu_unlock(&g_mu); + } + } + + static void dec_thd_count() { + if (grpc_fork_support_enabled()) { + gpr_mu_lock(&g_mu); + g_thread_count--; + if (g_awaiting_threads && g_thread_count == 0) { + gpr_cv_signal(&g_cv); + } + gpr_mu_unlock(&g_mu); + } + } + + gpr_mu mu_; + gpr_cv ready_; + bool started_; + pthread_t pthread_id_; +}; + +} // namespace + +Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, + bool* success) { + bool outcome = false; + impl_ = + grpc_core::New(thd_name, thd_body, arg, &outcome); + if (outcome) { + state_ = ALIVE; + } else { + state_ = FAILED; + grpc_core::Delete(impl_); + impl_ = nullptr; + } + + if (success != nullptr) { + *success = outcome; + } +} + +void Thread::Init() { + gpr_mu_init(&g_mu); + gpr_cv_init(&g_cv); + g_thread_count = 0; + g_awaiting_threads = 0; +} + +bool Thread::AwaitAll(gpr_timespec deadline) { + gpr_mu_lock(&g_mu); + g_awaiting_threads = 1; + int res = 0; + while ((g_thread_count > 0) && + (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0)) { + res = gpr_cv_wait(&g_cv, &g_mu, deadline); + } + g_awaiting_threads = 0; + gpr_mu_unlock(&g_mu); + return res == 0; +} + +} // namespace grpc_core + +// The following is in the external namespace as it is exposed as C89 API +gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)pthread_self(); } + +#endif /* GPR_POSIX_SYNC */ diff --git a/Sources/CgRPC/src/core/lib/gprpp/thd_windows.cc b/Sources/CgRPC/src/core/lib/gprpp/thd_windows.cc new file mode 100644 index 000000000..59ea02f3d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/gprpp/thd_windows.cc @@ -0,0 +1,162 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Windows implementation for gpr threads. */ + +#include + +#ifdef GPR_WINDOWS + +#include "src/core/lib/gprpp/thd.h" + +#include +#include +#include +#include + +#include "src/core/lib/gprpp/memory.h" + +#if defined(_MSC_VER) +#define thread_local __declspec(thread) +#define WIN_LAMBDA +#elif defined(__GNUC__) +#define thread_local __thread +#define WIN_LAMBDA WINAPI +#else +#error "Unknown compiler - please file a bug report" +#endif + +namespace { +class ThreadInternalsWindows; +struct thd_info { + ThreadInternalsWindows* thread; + void (*body)(void* arg); /* body of a thread */ + void* arg; /* argument to a thread */ + HANDLE join_event; /* the join event */ +}; + +thread_local struct thd_info* g_thd_info; + +class ThreadInternalsWindows + : public grpc_core::internal::ThreadInternalsInterface { + public: + ThreadInternalsWindows(void (*thd_body)(void* arg), void* arg, bool* success) + : started_(false) { + gpr_mu_init(&mu_); + gpr_cv_init(&ready_); + + HANDLE handle; + info_ = (struct thd_info*)gpr_malloc(sizeof(*info_)); + info_->thread = this; + info_->body = thd_body; + info_->arg = arg; + + info_->join_event = CreateEvent(nullptr, FALSE, FALSE, nullptr); + if (info_->join_event == nullptr) { + gpr_free(info_); + *success = false; + } else { + handle = CreateThread( + nullptr, 64 * 1024, + [](void* v) WIN_LAMBDA -> DWORD { + g_thd_info = static_cast(v); + gpr_mu_lock(&g_thd_info->thread->mu_); + while (!g_thd_info->thread->started_) { + gpr_cv_wait(&g_thd_info->thread->ready_, &g_thd_info->thread->mu_, + gpr_inf_future(GPR_CLOCK_MONOTONIC)); + } + gpr_mu_unlock(&g_thd_info->thread->mu_); + g_thd_info->body(g_thd_info->arg); + BOOL ret = SetEvent(g_thd_info->join_event); + GPR_ASSERT(ret); + return 0; + }, + info_, 0, nullptr); + if (handle == nullptr) { + destroy_thread(); + *success = false; + } else { + CloseHandle(handle); + *success = true; + } + } + } + + ~ThreadInternalsWindows() override { + gpr_mu_destroy(&mu_); + gpr_cv_destroy(&ready_); + } + + void Start() override { + gpr_mu_lock(&mu_); + started_ = true; + gpr_cv_signal(&ready_); + gpr_mu_unlock(&mu_); + } + + void Join() override { + DWORD ret = WaitForSingleObject(info_->join_event, INFINITE); + GPR_ASSERT(ret == WAIT_OBJECT_0); + destroy_thread(); + } + + private: + void destroy_thread() { + CloseHandle(info_->join_event); + gpr_free(info_); + } + + gpr_mu mu_; + gpr_cv ready_; + bool started_; + thd_info* info_; +}; + +} // namespace + +namespace grpc_core { + +void Thread::Init() {} + +bool Thread::AwaitAll(gpr_timespec deadline) { + // TODO: Consider adding this if needed + return false; +} + +Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg, + bool* success) { + bool outcome = false; + impl_ = grpc_core::New(thd_body, arg, &outcome); + if (outcome) { + state_ = ALIVE; + } else { + state_ = FAILED; + grpc_core::Delete(impl_); + impl_ = nullptr; + } + + if (success != nullptr) { + *success = outcome; + } +} + +} // namespace grpc_core + +gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; } + +#endif /* GPR_WINDOWS */ diff --git a/Sources/CgRPC/src/core/lib/http/format_request.c b/Sources/CgRPC/src/core/lib/http/format_request.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/http/format_request.c rename to Sources/CgRPC/src/core/lib/http/format_request.cc index 88fb0ab0b..171234464 100644 --- a/Sources/CgRPC/src/core/lib/http/format_request.c +++ b/Sources/CgRPC/src/core/lib/http/format_request.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/http/format_request.h" #include @@ -25,11 +27,10 @@ #include #include #include -#include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" -static void fill_common_header(const grpc_httpcli_request *request, - gpr_strvec *buf, bool connection_close) { +static void fill_common_header(const grpc_httpcli_request* request, + gpr_strvec* buf, bool connection_close) { size_t i; gpr_strvec_add(buf, gpr_strdup(request->http.path)); gpr_strvec_add(buf, gpr_strdup(" HTTP/1.0\r\n")); @@ -51,9 +52,9 @@ static void fill_common_header(const grpc_httpcli_request *request, } grpc_slice grpc_httpcli_format_get_request( - const grpc_httpcli_request *request) { + const grpc_httpcli_request* request) { gpr_strvec out; - char *flat; + char* flat; size_t flat_len; gpr_strvec_init(&out); @@ -67,11 +68,11 @@ grpc_slice grpc_httpcli_format_get_request( return grpc_slice_new(flat, flat_len, gpr_free); } -grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request, - const char *body_bytes, +grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request* request, + const char* body_bytes, size_t body_size) { gpr_strvec out; - char *tmp; + char* tmp; size_t out_len; size_t i; @@ -90,7 +91,8 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request, if (!has_content_type) { gpr_strvec_add(&out, gpr_strdup("Content-Type: text/plain\r\n")); } - gpr_asprintf(&tmp, "Content-Length: %lu\r\n", (unsigned long)body_size); + gpr_asprintf(&tmp, "Content-Length: %lu\r\n", + static_cast(body_size)); gpr_strvec_add(&out, tmp); } gpr_strvec_add(&out, gpr_strdup("\r\n")); @@ -98,7 +100,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request, gpr_strvec_destroy(&out); if (body_bytes) { - tmp = (char *)gpr_realloc(tmp, out_len + body_size); + tmp = static_cast(gpr_realloc(tmp, out_len + body_size)); memcpy(tmp + out_len, body_bytes, body_size); out_len += body_size; } @@ -107,14 +109,14 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request, } grpc_slice grpc_httpcli_format_connect_request( - const grpc_httpcli_request *request) { + const grpc_httpcli_request* request) { gpr_strvec out; gpr_strvec_init(&out); gpr_strvec_add(&out, gpr_strdup("CONNECT ")); fill_common_header(request, &out, false); gpr_strvec_add(&out, gpr_strdup("\r\n")); size_t flat_len; - char *flat = gpr_strvec_flatten(&out, &flat_len); + char* flat = gpr_strvec_flatten(&out, &flat_len); gpr_strvec_destroy(&out); return grpc_slice_new(flat, flat_len, gpr_free); } diff --git a/Sources/CgRPC/src/core/lib/http/format_request.h b/Sources/CgRPC/src/core/lib/http/format_request.h index 12b42e42f..bcc332fe6 100644 --- a/Sources/CgRPC/src/core/lib/http/format_request.h +++ b/Sources/CgRPC/src/core/lib/http/format_request.h @@ -19,14 +19,16 @@ #ifndef GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H #define GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H +#include + #include #include "src/core/lib/http/httpcli.h" -grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request *request); -grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request, - const char *body_bytes, +grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request* request); +grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request* request, + const char* body_bytes, size_t body_size); grpc_slice grpc_httpcli_format_connect_request( - const grpc_httpcli_request *request); + const grpc_httpcli_request* request); #endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */ diff --git a/Sources/CgRPC/src/core/lib/http/httpcli.c b/Sources/CgRPC/src/core/lib/http/httpcli.c deleted file mode 100644 index db995943a..000000000 --- a/Sources/CgRPC/src/core/lib/http/httpcli.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/http/httpcli.h" - -#include - -#include -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/http/format_request.h" -#include "src/core/lib/http/parser.h" -#include "src/core/lib/iomgr/endpoint.h" -#include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/tcp_client.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" - -typedef struct { - grpc_slice request_text; - grpc_http_parser parser; - grpc_resolved_addresses *addresses; - size_t next_address; - grpc_endpoint *ep; - char *host; - char *ssl_host_override; - gpr_timespec deadline; - int have_read_byte; - const grpc_httpcli_handshaker *handshaker; - grpc_closure *on_done; - grpc_httpcli_context *context; - grpc_polling_entity *pollent; - grpc_iomgr_object iomgr_obj; - grpc_slice_buffer incoming; - grpc_slice_buffer outgoing; - grpc_closure on_read; - grpc_closure done_write; - grpc_closure connected; - grpc_error *overall_error; - grpc_resource_quota *resource_quota; -} internal_request; - -static grpc_httpcli_get_override g_get_override = NULL; -static grpc_httpcli_post_override g_post_override = NULL; - -static void plaintext_handshake(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint, const char *host, - gpr_timespec deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, - void *arg, - grpc_endpoint *endpoint)) { - on_done(exec_ctx, arg, endpoint); -} - -const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http", - plaintext_handshake}; - -void grpc_httpcli_context_init(grpc_httpcli_context *context) { - context->pollset_set = grpc_pollset_set_create(); -} - -void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context) { - grpc_pollset_set_destroy(exec_ctx, context->pollset_set); -} - -static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *due_to_error); - -static void finish(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *error) { - grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent, - req->context->pollset_set); - GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error); - grpc_http_parser_destroy(&req->parser); - if (req->addresses != NULL) { - grpc_resolved_addresses_destroy(req->addresses); - } - if (req->ep != NULL) { - grpc_endpoint_destroy(exec_ctx, req->ep); - } - grpc_slice_unref_internal(exec_ctx, req->request_text); - gpr_free(req->host); - gpr_free(req->ssl_host_override); - grpc_iomgr_unregister_object(&req->iomgr_obj); - grpc_slice_buffer_destroy_internal(exec_ctx, &req->incoming); - grpc_slice_buffer_destroy_internal(exec_ctx, &req->outgoing); - GRPC_ERROR_UNREF(req->overall_error); - grpc_resource_quota_unref_internal(exec_ctx, req->resource_quota); - gpr_free(req); -} - -static void append_error(internal_request *req, grpc_error *error) { - if (req->overall_error == GRPC_ERROR_NONE) { - req->overall_error = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed HTTP/1 client request"); - } - grpc_resolved_address *addr = &req->addresses->addrs[req->next_address - 1]; - char *addr_text = grpc_sockaddr_to_uri(addr); - req->overall_error = grpc_error_add_child( - req->overall_error, - grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, - grpc_slice_from_copied_string(addr_text))); - gpr_free(addr_text); -} - -static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) { - grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read); -} - -static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { - internal_request *req = (internal_request *)user_data; - size_t i; - - for (i = 0; i < req->incoming.count; i++) { - if (GRPC_SLICE_LENGTH(req->incoming.slices[i])) { - req->have_read_byte = 1; - grpc_error *err = - grpc_http_parser_parse(&req->parser, req->incoming.slices[i], NULL); - if (err != GRPC_ERROR_NONE) { - finish(exec_ctx, req, err); - return; - } - } - } - - if (error == GRPC_ERROR_NONE) { - do_read(exec_ctx, req); - } else if (!req->have_read_byte) { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); - } else { - finish(exec_ctx, req, grpc_http_parser_eof(&req->parser)); - } -} - -static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) { - do_read(exec_ctx, req); -} - -static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - internal_request *req = (internal_request *)arg; - if (error == GRPC_ERROR_NONE) { - on_written(exec_ctx, req); - } else { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); - } -} - -static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) { - grpc_slice_ref_internal(req->request_text); - grpc_slice_buffer_add(&req->outgoing, req->request_text); - grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write); -} - -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *ep) { - internal_request *req = (internal_request *)arg; - - if (!ep) { - next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Unexplained handshake failure")); - return; - } - - req->ep = ep; - start_write(exec_ctx, req); -} - -static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - internal_request *req = (internal_request *)arg; - - if (!req->ep) { - next_address(exec_ctx, req, GRPC_ERROR_REF(error)); - return; - } - req->handshaker->handshake( - exec_ctx, req, req->ep, - req->ssl_host_override ? req->ssl_host_override : req->host, - req->deadline, on_handshake_done); -} - -static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req, - grpc_error *error) { - grpc_resolved_address *addr; - if (error != GRPC_ERROR_NONE) { - append_error(req, error); - } - if (req->next_address == req->addresses->naddrs) { - finish(exec_ctx, req, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Failed HTTP requests to all targets", &req->overall_error, 1)); - return; - } - addr = &req->addresses->addrs[req->next_address++]; - GRPC_CLOSURE_INIT(&req->connected, on_connected, req, - grpc_schedule_on_exec_ctx); - grpc_arg arg = grpc_channel_arg_pointer_create( - (char *)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota, - grpc_resource_quota_arg_vtable()); - grpc_channel_args args = {1, &arg}; - grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep, - req->context->pollset_set, &args, addr, - req->deadline); -} - -static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - internal_request *req = (internal_request *)arg; - if (error != GRPC_ERROR_NONE) { - finish(exec_ctx, req, GRPC_ERROR_REF(error)); - return; - } - req->next_address = 0; - next_address(exec_ctx, req, GRPC_ERROR_NONE); -} - -static void internal_request_begin(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context, - grpc_polling_entity *pollent, - grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, - grpc_httpcli_response *response, - const char *name, grpc_slice request_text) { - internal_request *req = - (internal_request *)gpr_malloc(sizeof(internal_request)); - memset(req, 0, sizeof(*req)); - req->request_text = request_text; - grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response); - req->on_done = on_done; - req->deadline = deadline; - req->handshaker = - request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; - req->context = context; - req->pollent = pollent; - req->overall_error = GRPC_ERROR_NONE; - req->resource_quota = grpc_resource_quota_ref_internal(resource_quota); - GRPC_CLOSURE_INIT(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_INIT(&req->done_write, done_write, req, - grpc_schedule_on_exec_ctx); - grpc_slice_buffer_init(&req->incoming); - grpc_slice_buffer_init(&req->outgoing); - grpc_iomgr_register_object(&req->iomgr_obj, name); - req->host = gpr_strdup(request->host); - req->ssl_host_override = gpr_strdup(request->ssl_host_override); - - GPR_ASSERT(pollent); - grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent, - req->context->pollset_set); - grpc_resolve_address( - exec_ctx, request->host, req->handshaker->default_port, - req->context->pollset_set, - GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx), - &req->addresses); -} - -void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, - grpc_polling_entity *pollent, - grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_done, - grpc_httpcli_response *response) { - char *name; - if (g_get_override && - g_get_override(exec_ctx, request, deadline, on_done, response)) { - return; - } - gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path); - internal_request_begin(exec_ctx, context, pollent, resource_quota, request, - deadline, on_done, response, name, - grpc_httpcli_format_get_request(request)); - gpr_free(name); -} - -void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, - grpc_polling_entity *pollent, - grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, - gpr_timespec deadline, grpc_closure *on_done, - grpc_httpcli_response *response) { - char *name; - if (g_post_override && - g_post_override(exec_ctx, request, body_bytes, body_size, deadline, - on_done, response)) { - return; - } - gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path); - internal_request_begin( - exec_ctx, context, pollent, resource_quota, request, deadline, on_done, - response, name, - grpc_httpcli_format_post_request(request, body_bytes, body_size)); - gpr_free(name); -} - -void grpc_httpcli_set_override(grpc_httpcli_get_override get, - grpc_httpcli_post_override post) { - g_get_override = get; - g_post_override = post; -} diff --git a/Sources/CgRPC/src/core/lib/http/httpcli.cc b/Sources/CgRPC/src/core/lib/http/httpcli.cc new file mode 100644 index 000000000..12060074c --- /dev/null +++ b/Sources/CgRPC/src/core/lib/http/httpcli.cc @@ -0,0 +1,303 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/http/httpcli.h" + +#include + +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/http/format_request.h" +#include "src/core/lib/http/parser.h" +#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/slice/slice_internal.h" + +typedef struct { + grpc_slice request_text; + grpc_http_parser parser; + grpc_resolved_addresses* addresses; + size_t next_address; + grpc_endpoint* ep; + char* host; + char* ssl_host_override; + grpc_millis deadline; + int have_read_byte; + const grpc_httpcli_handshaker* handshaker; + grpc_closure* on_done; + grpc_httpcli_context* context; + grpc_polling_entity* pollent; + grpc_iomgr_object iomgr_obj; + grpc_slice_buffer incoming; + grpc_slice_buffer outgoing; + grpc_closure on_read; + grpc_closure done_write; + grpc_closure connected; + grpc_error* overall_error; + grpc_resource_quota* resource_quota; +} internal_request; + +static grpc_httpcli_get_override g_get_override = nullptr; +static grpc_httpcli_post_override g_post_override = nullptr; + +static void plaintext_handshake(void* arg, grpc_endpoint* endpoint, + const char* host, grpc_millis deadline, + void (*on_done)(void* arg, + grpc_endpoint* endpoint)) { + on_done(arg, endpoint); +} + +const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http", + plaintext_handshake}; + +void grpc_httpcli_context_init(grpc_httpcli_context* context) { + context->pollset_set = grpc_pollset_set_create(); +} + +void grpc_httpcli_context_destroy(grpc_httpcli_context* context) { + grpc_pollset_set_destroy(context->pollset_set); +} + +static void next_address(internal_request* req, grpc_error* due_to_error); + +static void finish(internal_request* req, grpc_error* error) { + grpc_polling_entity_del_from_pollset_set(req->pollent, + req->context->pollset_set); + GRPC_CLOSURE_SCHED(req->on_done, error); + grpc_http_parser_destroy(&req->parser); + if (req->addresses != nullptr) { + grpc_resolved_addresses_destroy(req->addresses); + } + if (req->ep != nullptr) { + grpc_endpoint_destroy(req->ep); + } + grpc_slice_unref_internal(req->request_text); + gpr_free(req->host); + gpr_free(req->ssl_host_override); + grpc_iomgr_unregister_object(&req->iomgr_obj); + grpc_slice_buffer_destroy_internal(&req->incoming); + grpc_slice_buffer_destroy_internal(&req->outgoing); + GRPC_ERROR_UNREF(req->overall_error); + grpc_resource_quota_unref_internal(req->resource_quota); + gpr_free(req); +} + +static void append_error(internal_request* req, grpc_error* error) { + if (req->overall_error == GRPC_ERROR_NONE) { + req->overall_error = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed HTTP/1 client request"); + } + grpc_resolved_address* addr = &req->addresses->addrs[req->next_address - 1]; + char* addr_text = grpc_sockaddr_to_uri(addr); + req->overall_error = grpc_error_add_child( + req->overall_error, + grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, + grpc_slice_from_copied_string(addr_text))); + gpr_free(addr_text); +} + +static void do_read(internal_request* req) { + grpc_endpoint_read(req->ep, &req->incoming, &req->on_read); +} + +static void on_read(void* user_data, grpc_error* error) { + internal_request* req = static_cast(user_data); + size_t i; + + for (i = 0; i < req->incoming.count; i++) { + if (GRPC_SLICE_LENGTH(req->incoming.slices[i])) { + req->have_read_byte = 1; + grpc_error* err = grpc_http_parser_parse( + &req->parser, req->incoming.slices[i], nullptr); + if (err != GRPC_ERROR_NONE) { + finish(req, err); + return; + } + } + } + + if (error == GRPC_ERROR_NONE) { + do_read(req); + } else if (!req->have_read_byte) { + next_address(req, GRPC_ERROR_REF(error)); + } else { + finish(req, grpc_http_parser_eof(&req->parser)); + } +} + +static void on_written(internal_request* req) { do_read(req); } + +static void done_write(void* arg, grpc_error* error) { + internal_request* req = static_cast(arg); + if (error == GRPC_ERROR_NONE) { + on_written(req); + } else { + next_address(req, GRPC_ERROR_REF(error)); + } +} + +static void start_write(internal_request* req) { + grpc_slice_ref_internal(req->request_text); + grpc_slice_buffer_add(&req->outgoing, req->request_text); + grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write); +} + +static void on_handshake_done(void* arg, grpc_endpoint* ep) { + internal_request* req = static_cast(arg); + + if (!ep) { + next_address(req, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Unexplained handshake failure")); + return; + } + + req->ep = ep; + start_write(req); +} + +static void on_connected(void* arg, grpc_error* error) { + internal_request* req = static_cast(arg); + + if (!req->ep) { + next_address(req, GRPC_ERROR_REF(error)); + return; + } + req->handshaker->handshake( + req, req->ep, req->ssl_host_override ? req->ssl_host_override : req->host, + req->deadline, on_handshake_done); +} + +static void next_address(internal_request* req, grpc_error* error) { + grpc_resolved_address* addr; + if (error != GRPC_ERROR_NONE) { + append_error(req, error); + } + if (req->next_address == req->addresses->naddrs) { + finish(req, + GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Failed HTTP requests to all targets", &req->overall_error, 1)); + return; + } + addr = &req->addresses->addrs[req->next_address++]; + GRPC_CLOSURE_INIT(&req->connected, on_connected, req, + grpc_schedule_on_exec_ctx); + grpc_arg arg = grpc_channel_arg_pointer_create( + (char*)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota, + grpc_resource_quota_arg_vtable()); + grpc_channel_args args = {1, &arg}; + grpc_tcp_client_connect(&req->connected, &req->ep, req->context->pollset_set, + &args, addr, req->deadline); +} + +static void on_resolved(void* arg, grpc_error* error) { + internal_request* req = static_cast(arg); + if (error != GRPC_ERROR_NONE) { + finish(req, GRPC_ERROR_REF(error)); + return; + } + req->next_address = 0; + next_address(req, GRPC_ERROR_NONE); +} + +static void internal_request_begin(grpc_httpcli_context* context, + grpc_polling_entity* pollent, + grpc_resource_quota* resource_quota, + const grpc_httpcli_request* request, + grpc_millis deadline, grpc_closure* on_done, + grpc_httpcli_response* response, + const char* name, grpc_slice request_text) { + internal_request* req = + static_cast(gpr_malloc(sizeof(internal_request))); + memset(req, 0, sizeof(*req)); + req->request_text = request_text; + grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response); + req->on_done = on_done; + req->deadline = deadline; + req->handshaker = + request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; + req->context = context; + req->pollent = pollent; + req->overall_error = GRPC_ERROR_NONE; + req->resource_quota = grpc_resource_quota_ref_internal(resource_quota); + GRPC_CLOSURE_INIT(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx); + GRPC_CLOSURE_INIT(&req->done_write, done_write, req, + grpc_schedule_on_exec_ctx); + grpc_slice_buffer_init(&req->incoming); + grpc_slice_buffer_init(&req->outgoing); + grpc_iomgr_register_object(&req->iomgr_obj, name); + req->host = gpr_strdup(request->host); + req->ssl_host_override = gpr_strdup(request->ssl_host_override); + + GPR_ASSERT(pollent); + grpc_polling_entity_add_to_pollset_set(req->pollent, + req->context->pollset_set); + grpc_resolve_address( + request->host, req->handshaker->default_port, req->context->pollset_set, + GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx), + &req->addresses); +} + +void grpc_httpcli_get(grpc_httpcli_context* context, + grpc_polling_entity* pollent, + grpc_resource_quota* resource_quota, + const grpc_httpcli_request* request, grpc_millis deadline, + grpc_closure* on_done, grpc_httpcli_response* response) { + char* name; + if (g_get_override && g_get_override(request, deadline, on_done, response)) { + return; + } + gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path); + internal_request_begin(context, pollent, resource_quota, request, deadline, + on_done, response, name, + grpc_httpcli_format_get_request(request)); + gpr_free(name); +} + +void grpc_httpcli_post(grpc_httpcli_context* context, + grpc_polling_entity* pollent, + grpc_resource_quota* resource_quota, + const grpc_httpcli_request* request, + const char* body_bytes, size_t body_size, + grpc_millis deadline, grpc_closure* on_done, + grpc_httpcli_response* response) { + char* name; + if (g_post_override && g_post_override(request, body_bytes, body_size, + deadline, on_done, response)) { + return; + } + gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path); + internal_request_begin( + context, pollent, resource_quota, request, deadline, on_done, response, + name, grpc_httpcli_format_post_request(request, body_bytes, body_size)); + gpr_free(name); +} + +void grpc_httpcli_set_override(grpc_httpcli_get_override get, + grpc_httpcli_post_override post) { + g_get_override = get; + g_post_override = post; +} diff --git a/Sources/CgRPC/src/core/lib/http/httpcli.h b/Sources/CgRPC/src/core/lib/http/httpcli.h index 809618695..b0735081f 100644 --- a/Sources/CgRPC/src/core/lib/http/httpcli.h +++ b/Sources/CgRPC/src/core/lib/http/httpcli.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_HTTP_HTTPCLI_H #define GRPC_CORE_LIB_HTTP_HTTPCLI_H +#include + #include #include @@ -36,15 +38,14 @@ TODO(ctiller): allow caching and capturing multiple requests for the same content and combining them */ typedef struct grpc_httpcli_context { - grpc_pollset_set *pollset_set; + grpc_pollset_set* pollset_set; } grpc_httpcli_context; typedef struct { - const char *default_port; - void (*handshake)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint, - const char *host, gpr_timespec deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint)); + const char* default_port; + void (*handshake)(void* arg, grpc_endpoint* endpoint, const char* host, + grpc_millis deadline, + void (*on_done)(void* arg, grpc_endpoint* endpoint)); } grpc_httpcli_handshaker; extern const grpc_httpcli_handshaker grpc_httpcli_plaintext; @@ -53,23 +54,22 @@ extern const grpc_httpcli_handshaker grpc_httpcli_ssl; /* A request */ typedef struct grpc_httpcli_request { /* The host name to connect to */ - char *host; + char* host; /* The host to verify in the SSL handshake (or NULL) */ - char *ssl_host_override; + char* ssl_host_override; /* The main part of the request The following headers are supplied automatically and MUST NOT be set here: Host, Connection, User-Agent */ grpc_http_request http; /* handshaker to use ssl for the request */ - const grpc_httpcli_handshaker *handshaker; + const grpc_httpcli_handshaker* handshaker; } grpc_httpcli_request; /* Expose the parser response type as a httpcli response too */ typedef struct grpc_http_response grpc_httpcli_response; -void grpc_httpcli_context_init(grpc_httpcli_context *context); -void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, - grpc_httpcli_context *context); +void grpc_httpcli_context_init(grpc_httpcli_context* context); +void grpc_httpcli_context_destroy(grpc_httpcli_context* context); /* Asynchronously perform a HTTP GET. 'context' specifies the http context under which to do the get @@ -80,12 +80,12 @@ void grpc_httpcli_context_destroy(grpc_exec_ctx *exec_ctx, destroyed once the call returns 'deadline' contains a deadline for the request (or gpr_inf_future) 'on_response' is a callback to report results to */ -void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, - grpc_polling_entity *pollent, - grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - gpr_timespec deadline, grpc_closure *on_complete, - grpc_httpcli_response *response); +void grpc_httpcli_get(grpc_httpcli_context* context, + grpc_polling_entity* pollent, + grpc_resource_quota* resource_quota, + const grpc_httpcli_request* request, grpc_millis deadline, + grpc_closure* on_complete, + grpc_httpcli_response* response); /* Asynchronously perform a HTTP POST. 'context' specifies the http context under which to do the post @@ -101,24 +101,25 @@ void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, lifetime of the request 'on_response' is a callback to report results to Does not support ?var1=val1&var2=val2 in the path. */ -void grpc_httpcli_post(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context, - grpc_polling_entity *pollent, - grpc_resource_quota *resource_quota, - const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, - gpr_timespec deadline, grpc_closure *on_complete, - grpc_httpcli_response *response); +void grpc_httpcli_post(grpc_httpcli_context* context, + grpc_polling_entity* pollent, + grpc_resource_quota* resource_quota, + const grpc_httpcli_request* request, + const char* body_bytes, size_t body_size, + grpc_millis deadline, grpc_closure* on_complete, + grpc_httpcli_response* response); /* override functions return 1 if they handled the request, 0 otherwise */ -typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx *exec_ctx, - const grpc_httpcli_request *request, - gpr_timespec deadline, - grpc_closure *on_complete, - grpc_httpcli_response *response); -typedef int (*grpc_httpcli_post_override)( - grpc_exec_ctx *exec_ctx, const grpc_httpcli_request *request, - const char *body_bytes, size_t body_size, gpr_timespec deadline, - grpc_closure *on_complete, grpc_httpcli_response *response); +typedef int (*grpc_httpcli_get_override)(const grpc_httpcli_request* request, + grpc_millis deadline, + grpc_closure* on_complete, + grpc_httpcli_response* response); +typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request* request, + const char* body_bytes, + size_t body_size, + grpc_millis deadline, + grpc_closure* on_complete, + grpc_httpcli_response* response); void grpc_httpcli_set_override(grpc_httpcli_get_override get, grpc_httpcli_post_override post); diff --git a/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.c b/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.c deleted file mode 100644 index c553fa398..000000000 --- a/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/http/httpcli.h" - -#include - -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/handshaker_registry.h" -#include "src/core/lib/security/transport/security_handshaker.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" -#include "src/core/tsi/ssl_transport_security.h" -#include "src/core/tsi/transport_security_adapter.h" - -typedef struct { - grpc_channel_security_connector base; - tsi_ssl_client_handshaker_factory *handshaker_factory; - char *secure_peer_name; -} grpc_httpcli_ssl_channel_security_connector; - -static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - grpc_httpcli_ssl_channel_security_connector *c = - (grpc_httpcli_ssl_channel_security_connector *)sc; - if (c->handshaker_factory != NULL) { - tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory); - c->handshaker_factory = NULL; - } - if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name); - gpr_free(sc); -} - -static void httpcli_ssl_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_httpcli_ssl_channel_security_connector *c = - (grpc_httpcli_ssl_channel_security_connector *)sc; - tsi_handshaker *handshaker = NULL; - if (c->handshaker_factory != NULL) { - tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( - c->handshaker_factory, c->secure_peer_name, &handshaker); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", - tsi_result_to_string(result)); - } - } - grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(handshaker), &sc->base)); -} - -static void httpcli_ssl_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - grpc_httpcli_ssl_channel_security_connector *c = - (grpc_httpcli_ssl_channel_security_connector *)sc; - grpc_error *error = GRPC_ERROR_NONE; - - /* Check the peer name. */ - if (c->secure_peer_name != NULL && - !tsi_ssl_peer_matches_name(&peer, c->secure_peer_name)) { - char *msg; - gpr_asprintf(&msg, "Peer name %s is not in peer certificate", - c->secure_peer_name); - error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - } - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); - tsi_peer_destruct(&peer); -} - -static grpc_security_connector_vtable httpcli_ssl_vtable = { - httpcli_ssl_destroy, httpcli_ssl_check_peer}; - -static grpc_security_status httpcli_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, const char *pem_root_certs, - const char *secure_peer_name, grpc_channel_security_connector **sc) { - tsi_result result = TSI_OK; - grpc_httpcli_ssl_channel_security_connector *c; - - if (secure_peer_name != NULL && pem_root_certs == NULL) { - gpr_log(GPR_ERROR, - "Cannot assert a secure peer name without a trust root."); - return GRPC_SECURITY_ERROR; - } - - c = gpr_zalloc(sizeof(grpc_httpcli_ssl_channel_security_connector)); - - gpr_ref_init(&c->base.base.refcount, 1); - c->base.base.vtable = &httpcli_ssl_vtable; - if (secure_peer_name != NULL) { - c->secure_peer_name = gpr_strdup(secure_peer_name); - } - result = tsi_create_ssl_client_handshaker_factory( - NULL, pem_root_certs, NULL, NULL, 0, &c->handshaker_factory); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", - tsi_result_to_string(result)); - httpcli_ssl_destroy(exec_ctx, &c->base.base); - *sc = NULL; - return GRPC_SECURITY_ERROR; - } - c->base.add_handshakers = httpcli_ssl_add_handshakers; - *sc = &c->base; - return GRPC_SECURITY_OK; -} - -/* handshaker */ - -typedef struct { - void (*func)(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *endpoint); - void *arg; - grpc_handshake_manager *handshake_mgr; -} on_done_closure; - -static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_handshaker_args *args = arg; - on_done_closure *c = args->user_data; - if (error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(error); - gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg); - - c->func(exec_ctx, c->arg, NULL); - } else { - grpc_channel_args_destroy(exec_ctx, args->args); - grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer); - gpr_free(args->read_buffer); - c->func(exec_ctx, c->arg, args->endpoint); - } - grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr); - gpr_free(c); -} - -static void ssl_handshake(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *tcp, const char *host, - gpr_timespec deadline, - void (*on_done)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *endpoint)) { - on_done_closure *c = gpr_malloc(sizeof(*c)); - const char *pem_root_certs = grpc_get_default_ssl_roots(); - if (pem_root_certs == NULL) { - gpr_log(GPR_ERROR, "Could not get default pem root certs."); - on_done(exec_ctx, arg, NULL); - gpr_free(c); - return; - } - c->func = on_done; - c->arg = arg; - grpc_channel_security_connector *sc = NULL; - GPR_ASSERT(httpcli_ssl_channel_security_connector_create( - exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK); - grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base); - grpc_channel_args args = {1, &channel_arg}; - c->handshake_mgr = grpc_handshake_manager_create(); - grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, &args, c->handshake_mgr); - grpc_handshake_manager_do_handshake( - exec_ctx, c->handshake_mgr, tcp, NULL /* channel_args */, deadline, - NULL /* acceptor */, on_handshake_done, c /* user_data */); - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "httpcli"); -} - -const grpc_httpcli_handshaker grpc_httpcli_ssl = {"https", ssl_handshake}; diff --git a/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.cc b/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.cc new file mode 100644 index 000000000..0b53d63e7 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/http/httpcli_security_connector.cc @@ -0,0 +1,202 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/http/httpcli.h" + +#include + +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker_registry.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/security/transport/security_handshaker.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_adapter.h" + +typedef struct { + grpc_channel_security_connector base; + tsi_ssl_client_handshaker_factory* handshaker_factory; + char* secure_peer_name; +} grpc_httpcli_ssl_channel_security_connector; + +static void httpcli_ssl_destroy(grpc_security_connector* sc) { + grpc_httpcli_ssl_channel_security_connector* c = + reinterpret_cast(sc); + if (c->handshaker_factory != nullptr) { + tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory); + c->handshaker_factory = nullptr; + } + if (c->secure_peer_name != nullptr) gpr_free(c->secure_peer_name); + gpr_free(sc); +} + +static void httpcli_ssl_add_handshakers(grpc_channel_security_connector* sc, + grpc_handshake_manager* handshake_mgr) { + grpc_httpcli_ssl_channel_security_connector* c = + reinterpret_cast(sc); + tsi_handshaker* handshaker = nullptr; + if (c->handshaker_factory != nullptr) { + tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( + c->handshaker_factory, c->secure_peer_name, &handshaker); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", + tsi_result_to_string(result)); + } + } + grpc_handshake_manager_add( + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(handshaker), &sc->base)); +} + +static void httpcli_ssl_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + grpc_httpcli_ssl_channel_security_connector* c = + reinterpret_cast(sc); + grpc_error* error = GRPC_ERROR_NONE; + + /* Check the peer name. */ + if (c->secure_peer_name != nullptr && + !tsi_ssl_peer_matches_name(&peer, c->secure_peer_name)) { + char* msg; + gpr_asprintf(&msg, "Peer name %s is not in peer certificate", + c->secure_peer_name); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + } + GRPC_CLOSURE_SCHED(on_peer_checked, error); + tsi_peer_destruct(&peer); +} + +static int httpcli_ssl_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + grpc_httpcli_ssl_channel_security_connector* c1 = + reinterpret_cast(sc1); + grpc_httpcli_ssl_channel_security_connector* c2 = + reinterpret_cast(sc2); + return strcmp(c1->secure_peer_name, c2->secure_peer_name); +} + +static grpc_security_connector_vtable httpcli_ssl_vtable = { + httpcli_ssl_destroy, httpcli_ssl_check_peer, httpcli_ssl_cmp}; + +static grpc_security_status httpcli_ssl_channel_security_connector_create( + const char* pem_root_certs, const tsi_ssl_root_certs_store* root_store, + const char* secure_peer_name, grpc_channel_security_connector** sc) { + tsi_result result = TSI_OK; + grpc_httpcli_ssl_channel_security_connector* c; + + if (secure_peer_name != nullptr && pem_root_certs == nullptr) { + gpr_log(GPR_ERROR, + "Cannot assert a secure peer name without a trust root."); + return GRPC_SECURITY_ERROR; + } + + c = static_cast( + gpr_zalloc(sizeof(grpc_httpcli_ssl_channel_security_connector))); + + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.vtable = &httpcli_ssl_vtable; + if (secure_peer_name != nullptr) { + c->secure_peer_name = gpr_strdup(secure_peer_name); + } + tsi_ssl_client_handshaker_options options; + memset(&options, 0, sizeof(options)); + options.pem_root_certs = pem_root_certs; + options.root_store = root_store; + result = tsi_create_ssl_client_handshaker_factory_with_options( + &options, &c->handshaker_factory); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", + tsi_result_to_string(result)); + httpcli_ssl_destroy(&c->base.base); + *sc = nullptr; + return GRPC_SECURITY_ERROR; + } + // We don't actually need a channel credentials object in this case, + // but we set it to a non-nullptr address so that we don't trigger + // assertions in grpc_channel_security_connector_cmp(). + c->base.channel_creds = (grpc_channel_credentials*)1; + c->base.add_handshakers = httpcli_ssl_add_handshakers; + *sc = &c->base; + return GRPC_SECURITY_OK; +} + +/* handshaker */ + +typedef struct { + void (*func)(void* arg, grpc_endpoint* endpoint); + void* arg; + grpc_handshake_manager* handshake_mgr; +} on_done_closure; + +static void on_handshake_done(void* arg, grpc_error* error) { + grpc_handshaker_args* args = static_cast(arg); + on_done_closure* c = static_cast(args->user_data); + if (error != GRPC_ERROR_NONE) { + const char* msg = grpc_error_string(error); + gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg); + + c->func(c->arg, nullptr); + } else { + grpc_channel_args_destroy(args->args); + grpc_slice_buffer_destroy_internal(args->read_buffer); + gpr_free(args->read_buffer); + c->func(c->arg, args->endpoint); + } + grpc_handshake_manager_destroy(c->handshake_mgr); + gpr_free(c); +} + +static void ssl_handshake(void* arg, grpc_endpoint* tcp, const char* host, + grpc_millis deadline, + void (*on_done)(void* arg, grpc_endpoint* endpoint)) { + on_done_closure* c = static_cast(gpr_malloc(sizeof(*c))); + const char* pem_root_certs = + grpc_core::DefaultSslRootStore::GetPemRootCerts(); + const tsi_ssl_root_certs_store* root_store = + grpc_core::DefaultSslRootStore::GetRootStore(); + if (root_store == nullptr) { + gpr_log(GPR_ERROR, "Could not get default pem root certs."); + on_done(arg, nullptr); + gpr_free(c); + return; + } + c->func = on_done; + c->arg = arg; + grpc_channel_security_connector* sc = nullptr; + GPR_ASSERT(httpcli_ssl_channel_security_connector_create( + pem_root_certs, root_store, host, &sc) == GRPC_SECURITY_OK); + grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base); + grpc_channel_args args = {1, &channel_arg}; + c->handshake_mgr = grpc_handshake_manager_create(); + grpc_handshakers_add(HANDSHAKER_CLIENT, &args, c->handshake_mgr); + grpc_handshake_manager_do_handshake( + c->handshake_mgr, nullptr /* interested_parties */, tcp, + nullptr /* channel_args */, deadline, nullptr /* acceptor */, + on_handshake_done, c /* user_data */); + GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli"); +} + +const grpc_httpcli_handshaker grpc_httpcli_ssl = {"https", ssl_handshake}; diff --git a/Sources/CgRPC/src/core/lib/http/parser.c b/Sources/CgRPC/src/core/lib/http/parser.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/http/parser.c rename to Sources/CgRPC/src/core/lib/http/parser.cc index 0950bd655..a37fdda8e 100644 --- a/Sources/CgRPC/src/core/lib/http/parser.c +++ b/Sources/CgRPC/src/core/lib/http/parser.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/http/parser.h" #include @@ -23,21 +25,22 @@ #include #include -#include -grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1"); +#include "src/core/lib/gpr/useful.h" + +grpc_core::TraceFlag grpc_http1_trace(false, "http1"); -static char *buf2str(void *buffer, size_t length) { - char *out = (char *)gpr_malloc(length + 1); +static char* buf2str(void* buffer, size_t length) { + char* out = static_cast(gpr_malloc(length + 1)); memcpy(out, buffer, length); out[length] = 0; return out; } -static grpc_error *handle_response_line(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; +static grpc_error* handle_response_line(grpc_http_parser* parser) { + uint8_t* beg = parser->cur_line; + uint8_t* cur = beg; + uint8_t* end = beg + parser->cur_line_length; if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'H'"); @@ -75,10 +78,10 @@ static grpc_error *handle_response_line(grpc_http_parser *parser) { return GRPC_ERROR_NONE; } -static grpc_error *handle_request_line(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; +static grpc_error* handle_request_line(grpc_http_parser* parser) { + uint8_t* beg = parser->cur_line; + uint8_t* cur = beg; + uint8_t* end = beg + parser->cur_line_length; uint8_t vers_major = 0; uint8_t vers_minor = 0; @@ -87,14 +90,15 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) { if (cur == end) return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No method on HTTP request line"); - parser->http.request->method = buf2str(beg, (size_t)(cur - beg - 1)); + parser->http.request->method = + buf2str(beg, static_cast(cur - beg - 1)); beg = cur; while (cur != end && *cur++ != ' ') ; if (cur == end) return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No path on HTTP request line"); - parser->http.request->path = buf2str(beg, (size_t)(cur - beg - 1)); + parser->http.request->path = buf2str(beg, static_cast(cur - beg - 1)); if (cur == end || *cur++ != 'H') return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'H'"); @@ -106,12 +110,12 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected 'P'"); if (cur == end || *cur++ != '/') return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Expected '/'"); - vers_major = (uint8_t)(*cur++ - '1' + 1); + vers_major = static_cast(*cur++ - '1' + 1); ++cur; if (cur == end) return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "End of line in HTTP version string"); - vers_minor = (uint8_t)(*cur++ - '1' + 1); + vers_minor = static_cast(*cur++ - '1' + 1); if (vers_major == 1) { if (vers_minor == 0) { @@ -137,7 +141,7 @@ static grpc_error *handle_request_line(grpc_http_parser *parser) { return GRPC_ERROR_NONE; } -static grpc_error *handle_first_line(grpc_http_parser *parser) { +static grpc_error* handle_first_line(grpc_http_parser* parser) { switch (parser->type) { case GRPC_HTTP_REQUEST: return handle_request_line(parser); @@ -148,14 +152,14 @@ static grpc_error *handle_first_line(grpc_http_parser *parser) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here")); } -static grpc_error *add_header(grpc_http_parser *parser) { - uint8_t *beg = parser->cur_line; - uint8_t *cur = beg; - uint8_t *end = beg + parser->cur_line_length; - size_t *hdr_count = NULL; - grpc_http_header **hdrs = NULL; - grpc_http_header hdr = {NULL, NULL}; - grpc_error *error = GRPC_ERROR_NONE; +static grpc_error* add_header(grpc_http_parser* parser) { + uint8_t* beg = parser->cur_line; + uint8_t* cur = beg; + uint8_t* end = beg + parser->cur_line_length; + size_t* hdr_count = nullptr; + grpc_http_header** hdrs = nullptr; + grpc_http_header hdr = {nullptr, nullptr}; + grpc_error* error = GRPC_ERROR_NONE; GPR_ASSERT(cur != end); @@ -174,14 +178,15 @@ static grpc_error *add_header(grpc_http_parser *parser) { goto done; } GPR_ASSERT(cur >= beg); - hdr.key = buf2str(beg, (size_t)(cur - beg)); + hdr.key = buf2str(beg, static_cast(cur - beg)); cur++; /* skip : */ while (cur != end && (*cur == ' ' || *cur == '\t')) { cur++; } GPR_ASSERT((size_t)(end - cur) >= parser->cur_line_end_length); - hdr.value = buf2str(cur, (size_t)(end - cur) - parser->cur_line_end_length); + hdr.value = buf2str( + cur, static_cast(end - cur) - parser->cur_line_end_length); switch (parser->type) { case GRPC_HTTP_RESPONSE: @@ -197,8 +202,8 @@ static grpc_error *add_header(grpc_http_parser *parser) { if (*hdr_count == parser->hdr_capacity) { parser->hdr_capacity = GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2); - *hdrs = (grpc_http_header *)gpr_realloc( - *hdrs, parser->hdr_capacity * sizeof(**hdrs)); + *hdrs = static_cast( + gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs))); } (*hdrs)[(*hdr_count)++] = hdr; @@ -210,9 +215,9 @@ static grpc_error *add_header(grpc_http_parser *parser) { return error; } -static grpc_error *finish_line(grpc_http_parser *parser, - bool *found_body_start) { - grpc_error *err; +static grpc_error* finish_line(grpc_http_parser* parser, + bool* found_body_start) { + grpc_error* err; switch (parser->state) { case GRPC_HTTP_FIRST_LINE: err = handle_first_line(parser); @@ -239,9 +244,9 @@ static grpc_error *finish_line(grpc_http_parser *parser, return GRPC_ERROR_NONE; } -static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) { - size_t *body_length = NULL; - char **body = NULL; +static grpc_error* addbyte_body(grpc_http_parser* parser, uint8_t byte) { + size_t* body_length = nullptr; + char** body = nullptr; if (parser->type == GRPC_HTTP_RESPONSE) { body_length = &parser->http.response->body_length; @@ -256,15 +261,16 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) { if (*body_length == parser->body_capacity) { parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2); - *body = (char *)gpr_realloc((void *)*body, parser->body_capacity); + *body = + static_cast(gpr_realloc((void*)*body, parser->body_capacity)); } - (*body)[*body_length] = (char)byte; + (*body)[*body_length] = static_cast(byte); (*body_length)++; return GRPC_ERROR_NONE; } -static bool check_line(grpc_http_parser *parser) { +static bool check_line(grpc_http_parser* parser) { if (parser->cur_line_length >= 2 && parser->cur_line[parser->cur_line_length - 2] == '\r' && parser->cur_line[parser->cur_line_length - 1] == '\n') { @@ -288,13 +294,13 @@ static bool check_line(grpc_http_parser *parser) { return false; } -static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte, - bool *found_body_start) { +static grpc_error* addbyte(grpc_http_parser* parser, uint8_t byte, + bool* found_body_start) { switch (parser->state) { case GRPC_HTTP_FIRST_LINE: case GRPC_HTTP_HEADERS: if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) { - if (GRPC_TRACER_ON(grpc_http1_trace)) + if (grpc_http1_trace.enabled()) gpr_log(GPR_ERROR, "HTTP header max line length (%d) exceeded", GRPC_HTTP_PARSER_MAX_HEADER_LENGTH); return GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -312,8 +318,8 @@ static grpc_error *addbyte(grpc_http_parser *parser, uint8_t byte, GPR_UNREACHABLE_CODE(return GRPC_ERROR_NONE); } -void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type, - void *request_or_response) { +void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type, + void* request_or_response) { memset(parser, 0, sizeof(*parser)); parser->state = GRPC_HTTP_FIRST_LINE; parser->type = type; @@ -321,9 +327,9 @@ void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type, parser->cur_line_end_length = 2; } -void grpc_http_parser_destroy(grpc_http_parser *parser) {} +void grpc_http_parser_destroy(grpc_http_parser* parser) {} -void grpc_http_request_destroy(grpc_http_request *request) { +void grpc_http_request_destroy(grpc_http_request* request) { size_t i; gpr_free(request->body); for (i = 0; i < request->hdr_count; i++) { @@ -335,7 +341,7 @@ void grpc_http_request_destroy(grpc_http_request *request) { gpr_free(request->path); } -void grpc_http_response_destroy(grpc_http_response *response) { +void grpc_http_response_destroy(grpc_http_response* response) { size_t i; gpr_free(response->body); for (i = 0; i < response->hdr_count; i++) { @@ -345,19 +351,19 @@ void grpc_http_response_destroy(grpc_http_response *response) { gpr_free(response->hdrs); } -grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice, - size_t *start_of_body) { +grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice, + size_t* start_of_body) { for (size_t i = 0; i < GRPC_SLICE_LENGTH(slice); i++) { bool found_body_start = false; - grpc_error *err = + grpc_error* err = addbyte(parser, GRPC_SLICE_START_PTR(slice)[i], &found_body_start); if (err != GRPC_ERROR_NONE) return err; - if (found_body_start && start_of_body != NULL) *start_of_body = i + 1; + if (found_body_start && start_of_body != nullptr) *start_of_body = i + 1; } return GRPC_ERROR_NONE; } -grpc_error *grpc_http_parser_eof(grpc_http_parser *parser) { +grpc_error* grpc_http_parser_eof(grpc_http_parser* parser) { if (parser->state != GRPC_HTTP_BODY) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Did not finish headers"); } diff --git a/Sources/CgRPC/src/core/lib/http/parser.h b/Sources/CgRPC/src/core/lib/http/parser.h index c8dced390..1d2e13e83 100644 --- a/Sources/CgRPC/src/core/lib/http/parser.h +++ b/Sources/CgRPC/src/core/lib/http/parser.h @@ -19,8 +19,9 @@ #ifndef GRPC_CORE_LIB_HTTP_PARSER_H #define GRPC_CORE_LIB_HTTP_PARSER_H -#include #include + +#include #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/error.h" @@ -29,8 +30,8 @@ /* A single header to be passed in a request */ typedef struct grpc_http_header { - char *key; - char *value; + char* key; + char* value; } grpc_http_header; typedef enum { @@ -53,17 +54,17 @@ typedef enum { /* A request */ typedef struct grpc_http_request { /* Method of the request (e.g. GET, POST) */ - char *method; + char* method; /* The path of the resource to fetch */ - char *path; + char* path; /* HTTP version to use */ grpc_http_version version; /* Headers attached to the request */ size_t hdr_count; - grpc_http_header *hdrs; + grpc_http_header* hdrs; /* Body: length and contents; contents are NOT null-terminated */ size_t body_length; - char *body; + char* body; } grpc_http_request; /* A response */ @@ -72,10 +73,10 @@ typedef struct grpc_http_response { int status; /* Headers: count and key/values */ size_t hdr_count; - grpc_http_header *hdrs; + grpc_http_header* hdrs; /* Body: length and contents; contents are NOT null-terminated */ size_t body_length; - char *body; + char* body; } grpc_http_response; typedef struct { @@ -83,9 +84,9 @@ typedef struct { grpc_http_type type; union { - grpc_http_response *response; - grpc_http_request *request; - void *request_or_response; + grpc_http_response* response; + grpc_http_request* request; + void* request_or_response; } http; size_t body_capacity; size_t hdr_capacity; @@ -95,18 +96,18 @@ typedef struct { size_t cur_line_end_length; } grpc_http_parser; -void grpc_http_parser_init(grpc_http_parser *parser, grpc_http_type type, - void *request_or_response); -void grpc_http_parser_destroy(grpc_http_parser *parser); +void grpc_http_parser_init(grpc_http_parser* parser, grpc_http_type type, + void* request_or_response); +void grpc_http_parser_destroy(grpc_http_parser* parser); /* Sets \a start_of_body to the offset in \a slice of the start of the body. */ -grpc_error *grpc_http_parser_parse(grpc_http_parser *parser, grpc_slice slice, - size_t *start_of_body); -grpc_error *grpc_http_parser_eof(grpc_http_parser *parser); +grpc_error* grpc_http_parser_parse(grpc_http_parser* parser, grpc_slice slice, + size_t* start_of_body); +grpc_error* grpc_http_parser_eof(grpc_http_parser* parser); -void grpc_http_request_destroy(grpc_http_request *request); -void grpc_http_response_destroy(grpc_http_response *response); +void grpc_http_request_destroy(grpc_http_request* request); +void grpc_http_response_destroy(grpc_http_response* response); -extern grpc_tracer_flag grpc_http1_trace; +extern grpc_core::TraceFlag grpc_http1_trace; #endif /* GRPC_CORE_LIB_HTTP_PARSER_H */ diff --git a/Sources/CgRPC/src/core/lib/support/block_annotate.h b/Sources/CgRPC/src/core/lib/iomgr/block_annotate.h similarity index 60% rename from Sources/CgRPC/src/core/lib/support/block_annotate.h rename to Sources/CgRPC/src/core/lib/iomgr/block_annotate.h index 8e3ef7df6..a57873aab 100644 --- a/Sources/CgRPC/src/core/lib/support/block_annotate.h +++ b/Sources/CgRPC/src/core/lib/iomgr/block_annotate.h @@ -16,20 +16,12 @@ * */ -#ifndef GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H -#define GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H - -#ifdef __cplusplus -extern "C" { -#endif +#ifndef GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H +#define GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H void gpr_thd_start_blocking_region(); void gpr_thd_end_blocking_region(); -#ifdef __cplusplus -} -#endif - /* These annotations identify the beginning and end of regions where the code may block for reasons other than synchronization functions. These include poll, epoll, and getaddrinfo. */ @@ -39,17 +31,27 @@ void gpr_thd_end_blocking_region(); do { \ gpr_thd_start_blocking_region(); \ } while (0) -#define GRPC_SCHEDULING_END_BLOCKING_REGION \ - do { \ - gpr_thd_end_blocking_region(); \ +#define GRPC_SCHEDULING_END_BLOCKING_REGION \ + do { \ + gpr_thd_end_blocking_region(); \ + grpc_core::ExecCtx::Get()->InvalidateNow(); \ } while (0) +#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \ + do { \ + gpr_thd_end_blocking_region(); \ + } while (0) + #else #define GRPC_SCHEDULING_START_BLOCKING_REGION \ do { \ } while (0) -#define GRPC_SCHEDULING_END_BLOCKING_REGION \ - do { \ +#define GRPC_SCHEDULING_END_BLOCKING_REGION \ + do { \ + grpc_core::ExecCtx::Get()->InvalidateNow(); \ + } while (0) +#define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \ + do { \ } while (0) #endif -#endif /* GRPC_CORE_LIB_SUPPORT_BLOCK_ANNOTATE_H */ +#endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/call_combiner.c b/Sources/CgRPC/src/core/lib/iomgr/call_combiner.cc similarity index 61% rename from Sources/CgRPC/src/core/lib/iomgr/call_combiner.c rename to Sources/CgRPC/src/core/lib/iomgr/call_combiner.cc index 48d8eaec1..00a839b64 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/call_combiner.c +++ b/Sources/CgRPC/src/core/lib/iomgr/call_combiner.cc @@ -16,22 +16,27 @@ * */ +#include + #include "src/core/lib/iomgr/call_combiner.h" +#include + #include +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/profiling/timers.h" -grpc_tracer_flag grpc_call_combiner_trace = - GRPC_TRACER_INITIALIZER(false, "call_combiner"); +grpc_core::TraceFlag grpc_call_combiner_trace(false, "call_combiner"); static grpc_error* decode_cancel_state_error(gpr_atm cancel_state) { if (cancel_state & 1) { - return (grpc_error*)(cancel_state & ~(gpr_atm)1); + return (grpc_error*)(cancel_state & ~static_cast(1)); } return GRPC_ERROR_NONE; } static gpr_atm encode_cancel_state_error(grpc_error* error) { - return (gpr_atm)1 | (gpr_atm)error; + return static_cast(1) | (gpr_atm)error; } void grpc_call_combiner_init(grpc_call_combiner* call_combiner) { @@ -53,86 +58,91 @@ void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner) { #define DEBUG_FMT_ARGS #endif -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error DEBUG_ARGS, const char* reason) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, + GPR_TIMER_SCOPE("call_combiner_start", 0); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR "%s] error=%s", call_combiner, closure DEBUG_FMT_ARGS, reason, grpc_error_string(error)); } - size_t prev_size = - (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1); - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, + size_t prev_size = static_cast( + gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1)); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, prev_size + 1); } + GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(); if (prev_size == 0) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY"); + GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(); + + GPR_TIMER_MARK("call_combiner_initiate", 0); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY"); } // Queue was empty, so execute this closure immediately. - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); } else { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { + if (grpc_call_combiner_trace.enabled()) { gpr_log(GPR_INFO, " QUEUING"); } // Queue was not empty, so add closure to queue. closure->error_data.error = error; - gpr_mpscq_push(&call_combiner->queue, (gpr_mpscq_node*)closure); + gpr_mpscq_push(&call_combiner->queue, + reinterpret_cast(closure)); } } -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner DEBUG_ARGS, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS, const char* reason) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, + GPR_TIMER_SCOPE("call_combiner_stop", 0); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]", call_combiner DEBUG_FMT_ARGS, reason); } - size_t prev_size = - (size_t)gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1); - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, + size_t prev_size = static_cast( + gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1)); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, prev_size - 1); } GPR_ASSERT(prev_size >= 1); if (prev_size > 1) { while (true) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " checking queue"); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " checking queue"); } bool empty; - grpc_closure* closure = (grpc_closure*)gpr_mpscq_pop_and_check_end( - &call_combiner->queue, &empty); - if (closure == NULL) { + grpc_closure* closure = reinterpret_cast( + gpr_mpscq_pop_and_check_end(&call_combiner->queue, &empty)); + if (closure == nullptr) { // This can happen either due to a race condition within the mpscq // code or because of a race with grpc_call_combiner_start(). - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " queue returned no result; checking again"); + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " queue returned no result; checking again"); } continue; } - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s", + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s", closure, grpc_error_string(closure->error_data.error)); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error); + GRPC_CLOSURE_SCHED(closure, closure->error_data.error); break; } - } else if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, " queue empty"); + } else if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, " queue empty"); } } -void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, grpc_closure* closure) { + GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(); while (true) { // Decode original state. gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); @@ -140,19 +150,19 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, // If error is set, invoke the cancellation closure immediately. // Otherwise, store the new closure. if (original_error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "call_combiner=%p: scheduling notify_on_cancel callback=%p " "for pre-existing cancellation", call_combiner, closure); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error)); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(original_error)); break; } else { if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, (gpr_atm)closure)) { - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p", + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p", call_combiner, closure); } // If we replaced an earlier closure, invoke the original @@ -160,12 +170,12 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, // up any resources they may be holding for the callback. if (original_state != 0) { closure = (grpc_closure*)original_state; - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "call_combiner=%p: scheduling old cancel callback=%p", call_combiner, closure); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } break; } @@ -174,9 +184,9 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, } } -void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner, grpc_error* error) { + GRPC_STATS_INC_CALL_COMBINER_CANCELLED(); while (true) { gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state); grpc_error* original_error = decode_cancel_state_error(original_state); @@ -188,12 +198,12 @@ void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, encode_cancel_state_error(error))) { if (original_state != 0) { grpc_closure* notify_on_cancel = (grpc_closure*)original_state; - if (GRPC_TRACER_ON(grpc_call_combiner_trace)) { - gpr_log(GPR_DEBUG, + if (grpc_call_combiner_trace.enabled()) { + gpr_log(GPR_INFO, "call_combiner=%p: scheduling notify_on_cancel callback=%p", call_combiner, notify_on_cancel); } - GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(notify_on_cancel, GRPC_ERROR_REF(error)); } break; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/call_combiner.h b/Sources/CgRPC/src/core/lib/iomgr/call_combiner.h index 5cfb3f0c0..0ccd08ea5 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/call_combiner.h +++ b/Sources/CgRPC/src/core/lib/iomgr/call_combiner.h @@ -19,13 +19,14 @@ #ifndef GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H #define GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H +#include + #include #include +#include "src/core/lib/gpr/mpscq.h" #include "src/core/lib/iomgr/closure.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/support/mpscq.h" // A simple, lock-free mechanism for serializing activity related to a // single call. This is similar to a combiner but is more lightweight. @@ -36,7 +37,7 @@ // when it is done with the action that was kicked off by the original // callback. -extern grpc_tracer_flag grpc_call_combiner_trace; +extern grpc_core::TraceFlag grpc_call_combiner_trace; typedef struct { gpr_atm size; // size_t, num closures in queue or currently executing @@ -53,37 +54,29 @@ void grpc_call_combiner_init(grpc_call_combiner* call_combiner); void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner); #ifndef NDEBUG -#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ - reason) \ - grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ - __FILE__, __LINE__, (reason)) -#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ - grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \ - (reason)) +#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \ + grpc_call_combiner_start((call_combiner), (closure), (error), __FILE__, \ + __LINE__, (reason)) +#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \ + grpc_call_combiner_stop((call_combiner), __FILE__, __LINE__, (reason)) /// Starts processing \a closure on \a call_combiner. -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error, const char* file, int line, const char* reason); /// Yields the call combiner to the next closure in the queue, if any. -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner, const char* file, int line, const char* reason); #else -#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error, \ - reason) \ - grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \ - (reason)) -#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \ - grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason)) +#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \ + grpc_call_combiner_start((call_combiner), (closure), (error), (reason)) +#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \ + grpc_call_combiner_stop((call_combiner), (reason)) /// Starts processing \a closure on \a call_combiner. -void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_start(grpc_call_combiner* call_combiner, grpc_closure* closure, grpc_error* error, const char* reason); /// Yields the call combiner to the next closure in the queue, if any. -void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_stop(grpc_call_combiner* call_combiner, const char* reason); #endif @@ -109,13 +102,11 @@ void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx, /// cancellation; this effectively unregisters the previously set closure. /// However, most filters will not need to explicitly unregister their /// callbacks, as this is done automatically when the call is destroyed. -void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner, grpc_closure* closure); /// Indicates that the call has been cancelled. -void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx, - grpc_call_combiner* call_combiner, +void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner, grpc_error* error); #endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/closure.c b/Sources/CgRPC/src/core/lib/iomgr/closure.c deleted file mode 100644 index 00edefc6a..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/closure.c +++ /dev/null @@ -1,219 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/closure.h" - -#include -#include -#include - -#include "src/core/lib/profiling/timers.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_closure = GRPC_TRACER_INITIALIZER(false, "closure"); -#endif - -#ifndef NDEBUG -grpc_closure *grpc_closure_init(const char *file, int line, - grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg, - grpc_closure_scheduler *scheduler) { -#else -grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg, - grpc_closure_scheduler *scheduler) { -#endif - closure->cb = cb; - closure->cb_arg = cb_arg; - closure->scheduler = scheduler; -#ifndef NDEBUG - closure->scheduled = false; - closure->file_initiated = NULL; - closure->line_initiated = 0; - closure->run = false; - closure->file_created = file; - closure->line_created = line; -#endif - return closure; -} - -void grpc_closure_list_init(grpc_closure_list *closure_list) { - closure_list->head = closure_list->tail = NULL; -} - -bool grpc_closure_list_append(grpc_closure_list *closure_list, - grpc_closure *closure, grpc_error *error) { - if (closure == NULL) { - GRPC_ERROR_UNREF(error); - return false; - } - closure->error_data.error = error; - closure->next_data.next = NULL; - bool was_empty = (closure_list->head == NULL); - if (was_empty) { - closure_list->head = closure; - } else { - closure_list->tail->next_data.next = closure; - } - closure_list->tail = closure; - return was_empty; -} - -void grpc_closure_list_fail_all(grpc_closure_list *list, - grpc_error *forced_failure) { - for (grpc_closure *c = list->head; c != NULL; c = c->next_data.next) { - if (c->error_data.error == GRPC_ERROR_NONE) { - c->error_data.error = GRPC_ERROR_REF(forced_failure); - } - } - GRPC_ERROR_UNREF(forced_failure); -} - -bool grpc_closure_list_empty(grpc_closure_list closure_list) { - return closure_list.head == NULL; -} - -void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst) { - if (src->head == NULL) { - return; - } - if (dst->head == NULL) { - *dst = *src; - } else { - dst->tail->next_data.next = src->head; - dst->tail = src->tail; - } - src->head = src->tail = NULL; -} - -typedef struct { - grpc_iomgr_cb_func cb; - void *cb_arg; - grpc_closure wrapper; -} wrapped_closure; - -static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - wrapped_closure *wc = (wrapped_closure *)arg; - grpc_iomgr_cb_func cb = wc->cb; - void *cb_arg = wc->cb_arg; - gpr_free(wc); - cb(exec_ctx, cb_arg, error); -} - -#ifndef NDEBUG -grpc_closure *grpc_closure_create(const char *file, int line, - grpc_iomgr_cb_func cb, void *cb_arg, - grpc_closure_scheduler *scheduler) { -#else -grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, - grpc_closure_scheduler *scheduler) { -#endif - wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc)); - wc->cb = cb; - wc->cb_arg = cb_arg; -#ifndef NDEBUG - grpc_closure_init(file, line, &wc->wrapper, closure_wrapper, wc, scheduler); -#else - grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler); -#endif - return &wc->wrapper; -} - -#ifndef NDEBUG -void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx, - grpc_closure *c, grpc_error *error) { -#else -void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *c, - grpc_error *error) { -#endif - GPR_TIMER_BEGIN("grpc_closure_run", 0); - if (c != NULL) { -#ifndef NDEBUG - c->file_initiated = file; - c->line_initiated = line; - c->run = true; -#endif - assert(c->cb); - c->scheduler->vtable->run(exec_ctx, c, error); - } else { - GRPC_ERROR_UNREF(error); - } - GPR_TIMER_END("grpc_closure_run", 0); -} - -#ifndef NDEBUG -void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, - grpc_closure *c, grpc_error *error) { -#else -void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, - grpc_error *error) { -#endif - GPR_TIMER_BEGIN("grpc_closure_sched", 0); - if (c != NULL) { -#ifndef NDEBUG - if (c->scheduled) { - gpr_log(GPR_ERROR, - "Closure already scheduled. (closure: %p, created: [%s:%d], " - "previously scheduled at: [%s: %d] run?: %s", - c, c->file_created, c->line_created, c->file_initiated, - c->line_initiated, c->run ? "true" : "false"); - abort(); - } - c->scheduled = true; - c->file_initiated = file; - c->line_initiated = line; - c->run = false; -#endif - assert(c->cb); - c->scheduler->vtable->sched(exec_ctx, c, error); - } else { - GRPC_ERROR_UNREF(error); - } - GPR_TIMER_END("grpc_closure_sched", 0); -} - -#ifndef NDEBUG -void grpc_closure_list_sched(const char *file, int line, - grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { -#else -void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { -#endif - grpc_closure *c = list->head; - while (c != NULL) { - grpc_closure *next = c->next_data.next; -#ifndef NDEBUG - if (c->scheduled) { - gpr_log(GPR_ERROR, - "Closure already scheduled. (closure: %p, created: [%s:%d], " - "previously scheduled at: [%s: %d] run?: %s", - c, c->file_created, c->line_created, c->file_initiated, - c->line_initiated, c->run ? "true" : "false"); - abort(); - } - c->scheduled = true; - c->file_initiated = file; - c->line_initiated = line; - c->run = false; -#endif - assert(c->cb); - c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error); - c = next; - } - list->head = list->tail = NULL; -} diff --git a/Sources/CgRPC/src/core/lib/iomgr/closure.h b/Sources/CgRPC/src/core/lib/iomgr/closure.h index cd32a4ba3..34a494485 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/closure.h +++ b/Sources/CgRPC/src/core/lib/iomgr/closure.h @@ -21,25 +21,22 @@ #include -#include +#include +#include +#include #include +#include "src/core/lib/gpr/mpscq.h" #include "src/core/lib/iomgr/error.h" -#include "src/core/lib/support/mpscq.h" - -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/lib/profiling/timers.h" struct grpc_closure; typedef struct grpc_closure grpc_closure; -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_closure; -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_closure; typedef struct grpc_closure_list { - grpc_closure *head; - grpc_closure *tail; + grpc_closure* head; + grpc_closure* tail; } grpc_closure_list; /** gRPC Callback definition. @@ -49,24 +46,21 @@ typedef struct grpc_closure_list { * describing what went wrong. * Error contract: it is not the cb's job to unref this error; * the closure scheduler will do that after the cb returns */ -typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); +typedef void (*grpc_iomgr_cb_func)(void* arg, grpc_error* error); typedef struct grpc_closure_scheduler grpc_closure_scheduler; typedef struct grpc_closure_scheduler_vtable { /* NOTE: for all these functions, closure->scheduler == the scheduler that was used to find this vtable */ - void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); - void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); - const char *name; + void (*run)(grpc_closure* closure, grpc_error* error); + void (*sched)(grpc_closure* closure, grpc_error* error); + const char* name; } grpc_closure_scheduler_vtable; /** Abstract type that can schedule closures for execution */ struct grpc_closure_scheduler { - const grpc_closure_scheduler_vtable *vtable; + const grpc_closure_scheduler_vtable* vtable; }; /** A closure over a grpc_iomgr_cb_func. */ @@ -74,7 +68,7 @@ struct grpc_closure { /** Once queued, next indicates the next queued closure; before then, scratch * space */ union { - grpc_closure *next; + grpc_closure* next; gpr_mpscq_node atm_next; uintptr_t scratch; } next_data; @@ -83,15 +77,15 @@ struct grpc_closure { grpc_iomgr_cb_func cb; /** Arguments to be passed to "cb". */ - void *cb_arg; + void* cb_arg; - /** Scheduler to schedule against: NULL to schedule against current execution - context */ - grpc_closure_scheduler *scheduler; + /** Scheduler to schedule against: nullptr to schedule against current + execution context */ + grpc_closure_scheduler* scheduler; /** Once queued, the result of the closure. Before then: scratch space */ union { - grpc_error *error; + grpc_error* error; uintptr_t scratch; } error_data; @@ -100,109 +94,258 @@ struct grpc_closure { #ifndef NDEBUG bool scheduled; bool run; // true = run, false = scheduled - const char *file_created; + const char* file_created; int line_created; - const char *file_initiated; + const char* file_initiated; int line_initiated; #endif }; +#ifndef NDEBUG +inline grpc_closure* grpc_closure_init(const char* file, int line, + grpc_closure* closure, + grpc_iomgr_cb_func cb, void* cb_arg, + grpc_closure_scheduler* scheduler) { +#else +inline grpc_closure* grpc_closure_init(grpc_closure* closure, + grpc_iomgr_cb_func cb, void* cb_arg, + grpc_closure_scheduler* scheduler) { +#endif + closure->cb = cb; + closure->cb_arg = cb_arg; + closure->scheduler = scheduler; +#ifndef NDEBUG + closure->scheduled = false; + closure->file_initiated = nullptr; + closure->line_initiated = 0; + closure->run = false; + closure->file_created = file; + closure->line_created = line; +#endif + return closure; +} + /** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */ #ifndef NDEBUG -grpc_closure *grpc_closure_init(const char *file, int line, - grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg, - grpc_closure_scheduler *scheduler); #define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \ grpc_closure_init(__FILE__, __LINE__, closure, cb, cb_arg, scheduler) #else -grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb, - void *cb_arg, - grpc_closure_scheduler *scheduler); #define GRPC_CLOSURE_INIT(closure, cb, cb_arg, scheduler) \ grpc_closure_init(closure, cb, cb_arg, scheduler) #endif +namespace closure_impl { + +typedef struct { + grpc_iomgr_cb_func cb; + void* cb_arg; + grpc_closure wrapper; +} wrapped_closure; + +inline void closure_wrapper(void* arg, grpc_error* error) { + wrapped_closure* wc = static_cast(arg); + grpc_iomgr_cb_func cb = wc->cb; + void* cb_arg = wc->cb_arg; + gpr_free(wc); + cb(cb_arg, error); +} + +} // namespace closure_impl + +#ifndef NDEBUG +inline grpc_closure* grpc_closure_create(const char* file, int line, + grpc_iomgr_cb_func cb, void* cb_arg, + grpc_closure_scheduler* scheduler) { +#else +inline grpc_closure* grpc_closure_create(grpc_iomgr_cb_func cb, void* cb_arg, + grpc_closure_scheduler* scheduler) { +#endif + closure_impl::wrapped_closure* wc = + static_cast(gpr_malloc(sizeof(*wc))); + wc->cb = cb; + wc->cb_arg = cb_arg; +#ifndef NDEBUG + grpc_closure_init(file, line, &wc->wrapper, closure_impl::closure_wrapper, wc, + scheduler); +#else + grpc_closure_init(&wc->wrapper, closure_impl::closure_wrapper, wc, scheduler); +#endif + return &wc->wrapper; +} + /* Create a heap allocated closure: try to avoid except for very rare events */ #ifndef NDEBUG -grpc_closure *grpc_closure_create(const char *file, int line, - grpc_iomgr_cb_func cb, void *cb_arg, - grpc_closure_scheduler *scheduler); #define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \ grpc_closure_create(__FILE__, __LINE__, cb, cb_arg, scheduler) #else -grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg, - grpc_closure_scheduler *scheduler); #define GRPC_CLOSURE_CREATE(cb, cb_arg, scheduler) \ grpc_closure_create(cb, cb_arg, scheduler) #endif #define GRPC_CLOSURE_LIST_INIT \ - { NULL, NULL } + { nullptr, nullptr } -void grpc_closure_list_init(grpc_closure_list *list); +inline void grpc_closure_list_init(grpc_closure_list* closure_list) { + closure_list->head = closure_list->tail = nullptr; +} /** add \a closure to the end of \a list and set \a closure's result to \a error Returns true if \a list becomes non-empty */ -bool grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure, - grpc_error *error); +inline bool grpc_closure_list_append(grpc_closure_list* closure_list, + grpc_closure* closure, grpc_error* error) { + if (closure == nullptr) { + GRPC_ERROR_UNREF(error); + return false; + } + closure->error_data.error = error; + closure->next_data.next = nullptr; + bool was_empty = (closure_list->head == nullptr); + if (was_empty) { + closure_list->head = closure; + } else { + closure_list->tail->next_data.next = closure; + } + closure_list->tail = closure; + return was_empty; +} /** force all success bits in \a list to false */ -void grpc_closure_list_fail_all(grpc_closure_list *list, - grpc_error *forced_failure); +inline void grpc_closure_list_fail_all(grpc_closure_list* list, + grpc_error* forced_failure) { + for (grpc_closure* c = list->head; c != nullptr; c = c->next_data.next) { + if (c->error_data.error == GRPC_ERROR_NONE) { + c->error_data.error = GRPC_ERROR_REF(forced_failure); + } + } + GRPC_ERROR_UNREF(forced_failure); +} /** append all closures from \a src to \a dst and empty \a src. */ -void grpc_closure_list_move(grpc_closure_list *src, grpc_closure_list *dst); +inline void grpc_closure_list_move(grpc_closure_list* src, + grpc_closure_list* dst) { + if (src->head == nullptr) { + return; + } + if (dst->head == nullptr) { + *dst = *src; + } else { + dst->tail->next_data.next = src->head; + dst->tail = src->tail; + } + src->head = src->tail = nullptr; +} /** return whether \a list is empty. */ -bool grpc_closure_list_empty(grpc_closure_list list); +inline bool grpc_closure_list_empty(grpc_closure_list closure_list) { + return closure_list.head == nullptr; +} + +#ifndef NDEBUG +inline void grpc_closure_run(const char* file, int line, grpc_closure* c, + grpc_error* error) { +#else +inline void grpc_closure_run(grpc_closure* c, grpc_error* error) { +#endif + GPR_TIMER_SCOPE("grpc_closure_run", 0); + if (c != nullptr) { +#ifndef NDEBUG + c->file_initiated = file; + c->line_initiated = line; + c->run = true; + GPR_ASSERT(c->cb != nullptr); +#endif + c->scheduler->vtable->run(c, error); + } else { + GRPC_ERROR_UNREF(error); + } +} /** Run a closure directly. Caller ensures that no locks are being held above. * Note that calling this at the end of a closure callback function itself is * by definition safe. */ #ifndef NDEBUG -void grpc_closure_run(const char *file, int line, grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error); -#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \ - grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error) +#define GRPC_CLOSURE_RUN(closure, error) \ + grpc_closure_run(__FILE__, __LINE__, closure, error) #else -void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \ - grpc_closure_run(exec_ctx, closure, error) +#define GRPC_CLOSURE_RUN(closure, error) grpc_closure_run(closure, error) #endif -/** Schedule a closure to be run. Does not need to be run from a safe point. */ #ifndef NDEBUG -void grpc_closure_sched(const char *file, int line, grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error); -#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \ - grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error) +inline void grpc_closure_sched(const char* file, int line, grpc_closure* c, + grpc_error* error) { #else -void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \ - grpc_closure_sched(exec_ctx, closure, error) +inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) { #endif + GPR_TIMER_SCOPE("grpc_closure_sched", 0); + if (c != nullptr) { +#ifndef NDEBUG + if (c->scheduled) { + gpr_log(GPR_ERROR, + "Closure already scheduled. (closure: %p, created: [%s:%d], " + "previously scheduled at: [%s: %d] run?: %s", + c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated, c->run ? "true" : "false"); + abort(); + } + c->scheduled = true; + c->file_initiated = file; + c->line_initiated = line; + c->run = false; + GPR_ASSERT(c->cb != nullptr); +#endif + c->scheduler->vtable->sched(c, error); + } else { + GRPC_ERROR_UNREF(error); + } +} -/** Schedule all closures in a list to be run. Does not need to be run from a - * safe point. */ +/** Schedule a closure to be run. Does not need to be run from a safe point. */ #ifndef NDEBUG -void grpc_closure_list_sched(const char *file, int line, - grpc_exec_ctx *exec_ctx, - grpc_closure_list *closure_list); -#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \ - grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list) +#define GRPC_CLOSURE_SCHED(closure, error) \ + grpc_closure_sched(__FILE__, __LINE__, closure, error) #else -void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, - grpc_closure_list *closure_list); -#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \ - grpc_closure_list_sched(exec_ctx, closure_list) +#define GRPC_CLOSURE_SCHED(closure, error) grpc_closure_sched(closure, error) #endif -#ifdef __cplusplus +#ifndef NDEBUG +inline void grpc_closure_list_sched(const char* file, int line, + grpc_closure_list* list) { +#else +inline void grpc_closure_list_sched(grpc_closure_list* list) { +#endif + grpc_closure* c = list->head; + while (c != nullptr) { + grpc_closure* next = c->next_data.next; +#ifndef NDEBUG + if (c->scheduled) { + gpr_log(GPR_ERROR, + "Closure already scheduled. (closure: %p, created: [%s:%d], " + "previously scheduled at: [%s: %d] run?: %s", + c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated, c->run ? "true" : "false"); + abort(); + } + c->scheduled = true; + c->file_initiated = file; + c->line_initiated = line; + c->run = false; + GPR_ASSERT(c->cb != nullptr); +#endif + c->scheduler->vtable->sched(c, c->error_data.error); + c = next; + } + list->head = list->tail = nullptr; } + +/** Schedule all closures in a list to be run. Does not need to be run from a + * safe point. */ +#ifndef NDEBUG +#define GRPC_CLOSURE_LIST_SCHED(closure_list) \ + grpc_closure_list_sched(__FILE__, __LINE__, closure_list) +#else +#define GRPC_CLOSURE_LIST_SCHED(closure_list) \ + grpc_closure_list_sched(closure_list) #endif #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/combiner.c b/Sources/CgRPC/src/core/lib/iomgr/combiner.cc similarity index 53% rename from Sources/CgRPC/src/core/lib/iomgr/combiner.c rename to Sources/CgRPC/src/core/lib/iomgr/combiner.cc index f899b25f1..9429842eb 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/combiner.c +++ b/Sources/CgRPC/src/core/lib/iomgr/combiner.cc @@ -16,9 +16,12 @@ * */ +#include + #include "src/core/lib/iomgr/combiner.h" #include +#include #include #include @@ -28,21 +31,20 @@ #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/profiling/timers.h" -grpc_tracer_flag grpc_combiner_trace = - GRPC_TRACER_INITIALIZER(false, "combiner"); +grpc_core::TraceFlag grpc_combiner_trace(false, "combiner"); -#define GRPC_COMBINER_TRACE(fn) \ - do { \ - if (GRPC_TRACER_ON(grpc_combiner_trace)) { \ - fn; \ - } \ +#define GRPC_COMBINER_TRACE(fn) \ + do { \ + if (grpc_combiner_trace.enabled()) { \ + fn; \ + } \ } while (0) #define STATE_UNORPHANED 1 #define STATE_ELEM_COUNT_LOW_BIT 2 struct grpc_combiner { - grpc_combiner *next_combiner_on_this_exec_ctx; + grpc_combiner* next_combiner_on_this_exec_ctx; grpc_closure_scheduler scheduler; grpc_closure_scheduler finally_scheduler; gpr_mpscq queue; @@ -61,20 +63,18 @@ struct grpc_combiner { gpr_refcount refs; }; -static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error); -static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error); +static void combiner_exec(grpc_closure* closure, grpc_error* error); +static void combiner_finally_exec(grpc_closure* closure, grpc_error* error); static const grpc_closure_scheduler_vtable scheduler = { combiner_exec, combiner_exec, "combiner:immediately"}; static const grpc_closure_scheduler_vtable finally_scheduler = { combiner_finally_exec, combiner_finally_exec, "combiner:finally"}; -static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); +static void offload(void* arg, grpc_error* error); -grpc_combiner *grpc_combiner_create(void) { - grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock)); +grpc_combiner* grpc_combiner_create(void) { + grpc_combiner* lock = static_cast(gpr_zalloc(sizeof(*lock))); gpr_ref_init(&lock->refs, 1); lock->scheduler.vtable = &scheduler; lock->finally_scheduler.vtable = &finally_scheduler; @@ -83,29 +83,29 @@ grpc_combiner *grpc_combiner_create(void) { grpc_closure_list_init(&lock->final_list); GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); + GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock)); return lock; } -static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock)); +static void really_destroy(grpc_combiner* lock) { + GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock)); GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); gpr_mpscq_destroy(&lock->queue); gpr_free(lock); } -static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { +static void start_destroy(grpc_combiner* lock) { gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); GRPC_COMBINER_TRACE(gpr_log( - GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); + GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); if (old_state == 1) { - really_destroy(exec_ctx, lock); + really_destroy(lock); } } #ifndef NDEBUG #define GRPC_COMBINER_DEBUG_SPAM(op, delta) \ - if (GRPC_TRACER_ON(grpc_combiner_trace)) { \ + if (grpc_combiner_trace.enabled()) { \ gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \ "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \ gpr_atm_no_barrier_load(&lock->refs.count), \ @@ -115,66 +115,67 @@ static void start_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { #define GRPC_COMBINER_DEBUG_SPAM(op, delta) #endif -void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { +void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) { GRPC_COMBINER_DEBUG_SPAM("UNREF", -1); if (gpr_unref(&lock->refs)) { - start_destroy(exec_ctx, lock); + start_destroy(lock); } } -grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS) { +grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) { GRPC_COMBINER_DEBUG_SPAM(" REF", 1); gpr_ref(&lock->refs); return lock; } -static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock) { - lock->next_combiner_on_this_exec_ctx = NULL; - if (exec_ctx->active_combiner == NULL) { - exec_ctx->active_combiner = exec_ctx->last_combiner = lock; +static void push_last_on_exec_ctx(grpc_combiner* lock) { + lock->next_combiner_on_this_exec_ctx = nullptr; + if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) { + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = + grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock; } else { - exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock; - exec_ctx->last_combiner = lock; + grpc_core::ExecCtx::Get() + ->combiner_data() + ->last_combiner->next_combiner_on_this_exec_ctx = lock; + grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock; } } -static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock) { - lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner; - exec_ctx->active_combiner = lock; - if (lock->next_combiner_on_this_exec_ctx == NULL) { - exec_ctx->last_combiner = lock; +static void push_first_on_exec_ctx(grpc_combiner* lock) { + lock->next_combiner_on_this_exec_ctx = + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner; + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock; + if (lock->next_combiner_on_this_exec_ctx == nullptr) { + grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock; } } #define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ - ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ - offsetof(grpc_combiner, scheduler_name))) - -static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, - grpc_error *error) { - GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx); - GPR_TIMER_BEGIN("combiner.execute", 0); - grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); + ((grpc_combiner*)(((char*)((closure)->scheduler)) - \ + offsetof(grpc_combiner, scheduler_name))) + +static void combiner_exec(grpc_closure* cl, grpc_error* error) { + GPR_TIMER_SCOPE("combiner.execute", 0); + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(); + grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, + GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, lock, cl, last)); if (last == 1) { - GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx); + GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(); + GPR_TIMER_MARK("combiner.initiated", 0); gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, - (gpr_atm)exec_ctx); + (gpr_atm)grpc_core::ExecCtx::Get()); // first element on this list: add it to the list of combiner locks // executing within this exec_ctx - push_last_on_exec_ctx(exec_ctx, lock); + push_last_on_exec_ctx(lock); } else { // there may be a race with setting here: if that happens, we may delay // offload for one or two actions, and that's fine gpr_atm initiator = gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null); - if (initiator != 0 && initiator != (gpr_atm)exec_ctx) { + if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) { gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0); } } @@ -182,56 +183,56 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl, assert(cl->cb); cl->error_data.error = error; gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next); - GPR_TIMER_END("combiner.execute", 0); } -static void move_next(grpc_exec_ctx *exec_ctx) { - exec_ctx->active_combiner = - exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; - if (exec_ctx->active_combiner == NULL) { - exec_ctx->last_combiner = NULL; +static void move_next() { + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = + grpc_core::ExecCtx::Get() + ->combiner_data() + ->active_combiner->next_combiner_on_this_exec_ctx; + if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) { + grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr; } } -static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_combiner *lock = (grpc_combiner *)arg; - push_last_on_exec_ctx(exec_ctx, lock); +static void offload(void* arg, grpc_error* error) { + grpc_combiner* lock = static_cast(arg); + push_last_on_exec_ctx(lock); } -static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { - GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx); - move_next(exec_ctx); - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); - GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE); +static void queue_offload(grpc_combiner* lock) { + GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(); + move_next(); + GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock)); + GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE); } -bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { - GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0); - grpc_combiner *lock = exec_ctx->active_combiner; - if (lock == NULL) { - GPR_TIMER_END("combiner.continue_exec_ctx", 0); +bool grpc_combiner_continue_exec_ctx() { + GPR_TIMER_SCOPE("combiner.continue_exec_ctx", 0); + grpc_combiner* lock = + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner; + if (lock == nullptr) { return false; } bool contended = gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0; - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, + GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p grpc_combiner_continue_exec_ctx " "contended=%d " "exec_ctx_ready_to_finish=%d " "time_to_execute_final_list=%d", lock, contended, - grpc_exec_ctx_ready_to_finish(exec_ctx), + grpc_core::ExecCtx::Get()->IsReadyToFinish(), lock->time_to_execute_final_list)); - if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) && + if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() && grpc_executor_is_threaded()) { GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0); // this execution context wants to move on: schedule remaining work to be // picked up on the executor - queue_offload(exec_ctx, lock); - GPR_TIMER_END("combiner.continue_exec_ctx", 0); + queue_offload(lock); return true; } @@ -239,54 +240,51 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { // peek to see if something new has shown up, and execute that with // priority (gpr_atm_acq_load(&lock->state) >> 1) > 1) { - gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue); + gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue); GRPC_COMBINER_TRACE( - gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n)); - if (n == NULL) { + gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n)); + if (n == nullptr) { // queue is in an inconsistent state: use this as a cue that we should // go off and do something else for a while (and come back later) GPR_TIMER_MARK("delay_busy", 0); - queue_offload(exec_ctx, lock); - GPR_TIMER_END("combiner.continue_exec_ctx", 0); + queue_offload(lock); return true; } - GPR_TIMER_BEGIN("combiner.exec1", 0); - grpc_closure *cl = (grpc_closure *)n; - grpc_error *cl_err = cl->error_data.error; + GPR_TIMER_SCOPE("combiner.exec1", 0); + grpc_closure* cl = reinterpret_cast(n); + grpc_error* cl_err = cl->error_data.error; #ifndef NDEBUG cl->scheduled = false; #endif - cl->cb(exec_ctx, cl->cb_arg, cl_err); + cl->cb(cl->cb_arg, cl_err); GRPC_ERROR_UNREF(cl_err); - GPR_TIMER_END("combiner.exec1", 0); } else { - grpc_closure *c = lock->final_list.head; - GPR_ASSERT(c != NULL); + grpc_closure* c = lock->final_list.head; + GPR_ASSERT(c != nullptr); grpc_closure_list_init(&lock->final_list); int loops = 0; - while (c != NULL) { - GPR_TIMER_BEGIN("combiner.exec_1final", 0); + while (c != nullptr) { + GPR_TIMER_SCOPE("combiner.exec_1final", 0); GRPC_COMBINER_TRACE( - gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c)); - grpc_closure *next = c->next_data.next; - grpc_error *error = c->error_data.error; + gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c)); + grpc_closure* next = c->next_data.next; + grpc_error* error = c->error_data.error; #ifndef NDEBUG c->scheduled = false; #endif - c->cb(exec_ctx, c->cb_arg, error); + c->cb(c->cb_arg, error); GRPC_ERROR_UNREF(error); c = next; - GPR_TIMER_END("combiner.exec_1final", 0); } } GPR_TIMER_MARK("unref", 0); - move_next(exec_ctx); + move_next(); lock->time_to_execute_final_list = false; gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT); GRPC_COMBINER_TRACE( - gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state)); + gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state)); // Define a macro to ease readability of the following switch statement. #define OLD_STATE_WAS(orphaned, elem_count) \ (((orphaned) ? 0 : STATE_UNORPHANED) | \ @@ -306,44 +304,36 @@ bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { break; case OLD_STATE_WAS(false, 1): // had one count, one unorphaned --> unlocked unorphaned - GPR_TIMER_END("combiner.continue_exec_ctx", 0); return true; case OLD_STATE_WAS(true, 1): // and one count, one orphaned --> unlocked and orphaned - really_destroy(exec_ctx, lock); - GPR_TIMER_END("combiner.continue_exec_ctx", 0); + really_destroy(lock); return true; case OLD_STATE_WAS(false, 0): case OLD_STATE_WAS(true, 0): // these values are illegal - representing an already unlocked or // deleted lock - GPR_TIMER_END("combiner.continue_exec_ctx", 0); GPR_UNREACHABLE_CODE(return true); } - push_first_on_exec_ctx(exec_ctx, lock); - GPR_TIMER_END("combiner.continue_exec_ctx", 0); + push_first_on_exec_ctx(lock); return true; } -static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, - grpc_error *error); +static void enqueue_finally(void* closure, grpc_error* error); -static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_error *error) { - GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx); - grpc_combiner *lock = +static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) { + GPR_TIMER_SCOPE("combiner.execute_finally", 0); + GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(); + grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); - GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, - "C:%p grpc_combiner_execute_finally c=%p; ac=%p", - lock, closure, exec_ctx->active_combiner)); - GPR_TIMER_BEGIN("combiner.execute_finally", 0); - if (exec_ctx->active_combiner != lock) { + GRPC_COMBINER_TRACE(gpr_log( + GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure, + grpc_core::ExecCtx::Get()->combiner_data()->active_combiner)); + if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) { GPR_TIMER_MARK("slowpath", 0); - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_CREATE(enqueue_finally, closure, + GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure, grpc_combiner_scheduler(lock)), error); - GPR_TIMER_END("combiner.execute_finally", 0); return; } @@ -351,20 +341,18 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx, gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); } grpc_closure_list_append(&lock->final_list, closure, error); - GPR_TIMER_END("combiner.execute_finally", 0); } -static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, - grpc_error *error) { - combiner_finally_exec(exec_ctx, (grpc_closure *)closure, +static void enqueue_finally(void* closure, grpc_error* error) { + combiner_finally_exec(static_cast(closure), GRPC_ERROR_REF(error)); } -grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) { +grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) { return &combiner->scheduler; } -grpc_closure_scheduler *grpc_combiner_finally_scheduler( - grpc_combiner *combiner) { +grpc_closure_scheduler* grpc_combiner_finally_scheduler( + grpc_combiner* combiner) { return &combiner->finally_scheduler; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/combiner.h b/Sources/CgRPC/src/core/lib/iomgr/combiner.h index 8e0434369..0d63e468d 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/combiner.h +++ b/Sources/CgRPC/src/core/lib/iomgr/combiner.h @@ -19,12 +19,14 @@ #ifndef GRPC_CORE_LIB_IOMGR_COMBINER_H #define GRPC_CORE_LIB_IOMGR_COMBINER_H +#include + #include #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/mpscq.h" #include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/support/mpscq.h" // Provides serialized access to some resource. // Each action queued on a combiner is executed serially in a borrowed thread. @@ -33,34 +35,32 @@ // Initialize the lock, with an optional workqueue to shift load to when // necessary -grpc_combiner *grpc_combiner_create(void); +grpc_combiner* grpc_combiner_create(void); #ifndef NDEBUG #define GRPC_COMBINER_DEBUG_ARGS \ , const char *file, int line, const char *reason #define GRPC_COMBINER_REF(combiner, reason) \ grpc_combiner_ref((combiner), __FILE__, __LINE__, (reason)) -#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ - grpc_combiner_unref((exec_ctx), (combiner), __FILE__, __LINE__, (reason)) +#define GRPC_COMBINER_UNREF(combiner, reason) \ + grpc_combiner_unref((combiner), __FILE__, __LINE__, (reason)) #else #define GRPC_COMBINER_DEBUG_ARGS #define GRPC_COMBINER_REF(combiner, reason) grpc_combiner_ref((combiner)) -#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \ - grpc_combiner_unref((exec_ctx), (combiner)) +#define GRPC_COMBINER_UNREF(combiner, reason) grpc_combiner_unref((combiner)) #endif // Ref/unref the lock, for when we're sharing the lock ownership // Prefer to use the macros above -grpc_combiner *grpc_combiner_ref(grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); -void grpc_combiner_unref(grpc_exec_ctx *exec_ctx, - grpc_combiner *lock GRPC_COMBINER_DEBUG_ARGS); +grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS); +void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS); // Fetch a scheduler to schedule closures against -grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock); +grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* lock); // Scheduler to execute \a action within the lock just prior to unlocking. -grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock); +grpc_closure_scheduler* grpc_combiner_finally_scheduler(grpc_combiner* lock); -bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx); +bool grpc_combiner_continue_exec_ctx(); -extern grpc_tracer_flag grpc_combiner_trace; +extern grpc_core::TraceFlag grpc_combiner_trace; #endif /* GRPC_CORE_LIB_IOMGR_COMBINER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint.c b/Sources/CgRPC/src/core/lib/iomgr/endpoint.cc similarity index 50% rename from Sources/CgRPC/src/core/lib/iomgr/endpoint.c rename to Sources/CgRPC/src/core/lib/iomgr/endpoint.cc index 37cce335c..92e793011 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint.c +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint.cc @@ -16,38 +16,42 @@ * */ +#include + #include "src/core/lib/iomgr/endpoint.h" -void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_slice_buffer* slices, grpc_closure* cb) { - ep->vtable->read(exec_ctx, ep, slices, cb); +grpc_core::TraceFlag grpc_tcp_trace(false, "tcp"); + +void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + ep->vtable->read(ep, slices, cb); } -void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_slice_buffer* slices, grpc_closure* cb) { - ep->vtable->write(exec_ctx, ep, slices, cb); +void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + ep->vtable->write(ep, slices, cb); } -void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_pollset* pollset) { - ep->vtable->add_to_pollset(exec_ctx, ep, pollset); +void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) { + ep->vtable->add_to_pollset(ep, pollset); } -void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_endpoint* ep, +void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pollset_set) { - ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set); + ep->vtable->add_to_pollset_set(ep, pollset_set); } -void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, - grpc_error* why) { - ep->vtable->shutdown(exec_ctx, ep, why); +void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset_set) { + ep->vtable->delete_from_pollset_set(ep, pollset_set); } -void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) { - ep->vtable->destroy(exec_ctx, ep); +void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) { + ep->vtable->shutdown(ep, why); } +void grpc_endpoint_destroy(grpc_endpoint* ep) { ep->vtable->destroy(ep); } + char* grpc_endpoint_get_peer(grpc_endpoint* ep) { return ep->vtable->get_peer(ep); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint.h b/Sources/CgRPC/src/core/lib/iomgr/endpoint.h index 8f0523a98..15db1649f 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint.h +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_ENDPOINT_H #define GRPC_CORE_LIB_IOMGR_ENDPOINT_H +#include + #include #include #include @@ -33,19 +35,16 @@ typedef struct grpc_endpoint grpc_endpoint; typedef struct grpc_endpoint_vtable grpc_endpoint_vtable; struct grpc_endpoint_vtable { - void (*read)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); - void (*write)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); - void (*add_to_pollset)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset); - void (*add_to_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pollset); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); - grpc_resource_user *(*get_resource_user)(grpc_endpoint *ep); - char *(*get_peer)(grpc_endpoint *ep); - int (*get_fd)(grpc_endpoint *ep); + void (*read)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb); + void (*write)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb); + void (*add_to_pollset)(grpc_endpoint* ep, grpc_pollset* pollset); + void (*add_to_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset); + void (*delete_from_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset); + void (*shutdown)(grpc_endpoint* ep, grpc_error* why); + void (*destroy)(grpc_endpoint* ep); + grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep); + char* (*get_peer)(grpc_endpoint* ep); + int (*get_fd)(grpc_endpoint* ep); }; /* When data is available on the connection, calls the callback with slices. @@ -53,14 +52,14 @@ struct grpc_endpoint_vtable { indicates the endpoint is closed. Valid slices may be placed into \a slices even when the callback is invoked with error != GRPC_ERROR_NONE. */ -void grpc_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); +void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb); -char *grpc_endpoint_get_peer(grpc_endpoint *ep); +char* grpc_endpoint_get_peer(grpc_endpoint* ep); /* Get the file descriptor used by \a ep. Return -1 if \a ep is not using an fd. - */ -int grpc_endpoint_get_fd(grpc_endpoint *ep); + */ +int grpc_endpoint_get_fd(grpc_endpoint* ep); /* Write slices out to the socket. @@ -72,27 +71,28 @@ int grpc_endpoint_get_fd(grpc_endpoint *ep); No guarantee is made to the content of slices after a write EXCEPT that it is a valid slice buffer. */ -void grpc_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb); +void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb); /* Causes any pending and future read/write callbacks to run immediately with success==0 */ -void grpc_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why); -void grpc_endpoint_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep); +void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why); +void grpc_endpoint_destroy(grpc_endpoint* ep); + +/* Add an endpoint to a pollset or pollset_set, so that when the pollset is + polled, events from this endpoint are considered */ +void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset); +void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset_set); -/* Add an endpoint to a pollset, so that when the pollset is polled, events from - this endpoint are considered */ -void grpc_endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset); -void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_endpoint *ep, - grpc_pollset_set *pollset_set); +/* Delete an endpoint from a pollset_set */ +void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset_set); -grpc_resource_user *grpc_endpoint_get_resource_user(grpc_endpoint *endpoint); +grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint); struct grpc_endpoint { - const grpc_endpoint_vtable *vtable; + const grpc_endpoint_vtable* vtable; }; #endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair.h b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair.h index b60e62fc3..08f9e3cab 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair.h +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair.h @@ -19,14 +19,16 @@ #ifndef GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H #define GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H +#include + #include "src/core/lib/iomgr/endpoint.h" typedef struct { - grpc_endpoint *client; - grpc_endpoint *server; + grpc_endpoint* client; + grpc_endpoint* server; } grpc_endpoint_pair; -grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, - grpc_channel_args *args); +grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name, + grpc_channel_args* args); #endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.c b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.cc index 3ade2148b..49850ab3a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -33,8 +35,8 @@ #include #include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/tcp_posix.h" -#include "src/core/lib/support/string.h" static void create_sockets(int sv[2]) { int flags; @@ -47,25 +49,24 @@ static void create_sockets(int sv[2]) { GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]) == GRPC_ERROR_NONE); } -grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, - grpc_channel_args *args) { +grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name, + grpc_channel_args* args) { int sv[2]; grpc_endpoint_pair p; - char *final_name; + char* final_name; create_sockets(sv); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; gpr_asprintf(&final_name, "%s:client", name); - p.client = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], final_name), args, + p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), args, "socketpair-server"); gpr_free(final_name); gpr_asprintf(&final_name, "%s:server", name); - p.server = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[0], final_name), args, + p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), args, "socketpair-client"); gpr_free(final_name); - grpc_exec_ctx_finish(&exec_ctx); return p; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.c b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.cc similarity index 89% rename from Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.c rename to Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.cc index ff72fe049..b99d178cb 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.c +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_uv.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_UV @@ -26,8 +28,8 @@ #include "src/core/lib/iomgr/endpoint_pair.h" -grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, - grpc_channel_args *args) { +grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name, + grpc_channel_args* args) { grpc_endpoint_pair endpoint_pair; // TODO(mlumish): implement this properly under libuv GPR_ASSERT(false && diff --git a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.c b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.cc index 782fa2fd6..177331d68 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/endpoint_pair_windows.cc @@ -16,10 +16,13 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/iomgr/endpoint_pair.h" +#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include @@ -44,19 +47,19 @@ static void create_sockets(SOCKET sv[2]) { memset(&addr, 0, sizeof(addr)); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr.sin_family = AF_INET; - GPR_ASSERT(bind(lst_sock, (struct sockaddr *)&addr, sizeof(addr)) != + GPR_ASSERT(bind(lst_sock, (grpc_sockaddr*)&addr, sizeof(addr)) != SOCKET_ERROR); GPR_ASSERT(listen(lst_sock, SOMAXCONN) != SOCKET_ERROR); - GPR_ASSERT(getsockname(lst_sock, (struct sockaddr *)&addr, &addr_len) != + GPR_ASSERT(getsockname(lst_sock, (grpc_sockaddr*)&addr, &addr_len) != SOCKET_ERROR); cli_sock = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED); GPR_ASSERT(cli_sock != INVALID_SOCKET); - GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL, - NULL, NULL, NULL) == 0); - svr_sock = accept(lst_sock, (struct sockaddr *)&addr, &addr_len); + GPR_ASSERT(WSAConnect(cli_sock, (grpc_sockaddr*)&addr, addr_len, NULL, NULL, + NULL, NULL) == 0); + svr_sock = accept(lst_sock, (grpc_sockaddr*)&addr, &addr_len); GPR_ASSERT(svr_sock != INVALID_SOCKET); closesocket(lst_sock); @@ -68,18 +71,16 @@ static void create_sockets(SOCKET sv[2]) { } grpc_endpoint_pair grpc_iomgr_create_endpoint_pair( - const char *name, grpc_channel_args *channel_args) { + const char* name, grpc_channel_args* channel_args) { SOCKET sv[2]; grpc_endpoint_pair p; create_sockets(sv); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - p.client = grpc_tcp_create(&exec_ctx, - grpc_winsocket_create(sv[1], "endpoint:client"), + grpc_core::ExecCtx exec_ctx; + p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"), channel_args, "endpoint:server"); - p.server = grpc_tcp_create(&exec_ctx, - grpc_winsocket_create(sv[0], "endpoint:server"), + p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"), channel_args, "endpoint:client"); - grpc_exec_ctx_finish(&exec_ctx); + return p; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/error.c b/Sources/CgRPC/src/core/lib/iomgr/error.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/iomgr/error.c rename to Sources/CgRPC/src/core/lib/iomgr/error.cc index aa0550153..4088cf612 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/error.c +++ b/Sources/CgRPC/src/core/lib/iomgr/error.cc @@ -15,32 +15,33 @@ * limitations under the License. * */ +#include #include "src/core/lib/iomgr/error.h" +#include #include #include #include #include #include -#include #ifdef GPR_WINDOWS #include #endif #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/error_internal.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_error_refcount = - GRPC_TRACER_INITIALIZER(false, "error_refcount"); -#endif +grpc_core::DebugOnlyTraceFlag grpc_trace_error_refcount(false, + "error_refcount"); +grpc_core::DebugOnlyTraceFlag grpc_trace_closure(false, "closure"); -static const char *error_int_name(grpc_error_ints key) { +static const char* error_int_name(grpc_error_ints key) { switch (key) { case GRPC_ERROR_INT_ERRNO: return "errno"; @@ -78,7 +79,7 @@ static const char *error_int_name(grpc_error_ints key) { GPR_UNREACHABLE_CODE(return "unknown"); } -static const char *error_str_name(grpc_error_strs key) { +static const char* error_str_name(grpc_error_strs key) { switch (key) { case GRPC_ERROR_STR_KEY: return "key"; @@ -110,7 +111,7 @@ static const char *error_str_name(grpc_error_strs key) { GPR_UNREACHABLE_CODE(return "unknown"); } -static const char *error_time_name(grpc_error_times key) { +static const char* error_time_name(grpc_error_times key) { switch (key) { case GRPC_ERROR_TIME_CREATED: return "created"; @@ -120,15 +121,15 @@ static const char *error_time_name(grpc_error_times key) { GPR_UNREACHABLE_CODE(return "unknown"); } -bool grpc_error_is_special(grpc_error *err) { +bool grpc_error_is_special(grpc_error* err) { return err == GRPC_ERROR_NONE || err == GRPC_ERROR_OOM || err == GRPC_ERROR_CANCELLED; } #ifndef NDEBUG -grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) { +grpc_error* grpc_error_ref(grpc_error* err, const char* file, int line) { if (grpc_error_is_special(err)) return err; - if (GRPC_TRACER_ON(grpc_trace_error_refcount)) { + if (grpc_trace_error_refcount.enabled()) { gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err, gpr_atm_no_barrier_load(&err->atomics.refs.count), gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line); @@ -137,17 +138,18 @@ grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line) { return err; } #else -grpc_error *grpc_error_ref(grpc_error *err) { +grpc_error* grpc_error_ref(grpc_error* err) { if (grpc_error_is_special(err)) return err; gpr_ref(&err->atomics.refs); return err; } #endif -static void unref_errs(grpc_error *err) { +static void unref_errs(grpc_error* err) { uint8_t slot = err->first_err; while (slot != UINT8_MAX) { - grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + grpc_linked_error* lerr = + reinterpret_cast(err->arena + slot); GRPC_ERROR_UNREF(lerr->err); GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX : lerr->next != UINT8_MAX); @@ -155,33 +157,29 @@ static void unref_errs(grpc_error *err) { } } -static void unref_slice(grpc_slice slice) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, slice); - grpc_exec_ctx_finish(&exec_ctx); -} +static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); } -static void unref_strs(grpc_error *err) { +static void unref_strs(grpc_error* err) { for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) { uint8_t slot = err->strs[which]; if (slot != UINT8_MAX) { - unref_slice(*(grpc_slice *)(err->arena + slot)); + unref_slice(*reinterpret_cast(err->arena + slot)); } } } -static void error_destroy(grpc_error *err) { +static void error_destroy(grpc_error* err) { GPR_ASSERT(!grpc_error_is_special(err)); unref_errs(err); unref_strs(err); - gpr_free((void *)gpr_atm_acq_load(&err->atomics.error_string)); + gpr_free((void*)gpr_atm_acq_load(&err->atomics.error_string)); gpr_free(err); } #ifndef NDEBUG -void grpc_error_unref(grpc_error *err, const char *file, int line) { +void grpc_error_unref(grpc_error* err, const char* file, int line) { if (grpc_error_is_special(err)) return; - if (GRPC_TRACER_ON(grpc_trace_error_refcount)) { + if (grpc_trace_error_refcount.enabled()) { gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err, gpr_atm_no_barrier_load(&err->atomics.refs.count), gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line); @@ -191,7 +189,7 @@ void grpc_error_unref(grpc_error *err, const char *file, int line) { } } #else -void grpc_error_unref(grpc_error *err) { +void grpc_error_unref(grpc_error* err) { if (grpc_error_is_special(err)) return; if (gpr_unref(&err->atomics.refs)) { error_destroy(err); @@ -199,22 +197,22 @@ void grpc_error_unref(grpc_error *err) { } #endif -static uint8_t get_placement(grpc_error **err, size_t size) { +static uint8_t get_placement(grpc_error** err, size_t size) { GPR_ASSERT(*err); - uint8_t slots = (uint8_t)(size / sizeof(intptr_t)); + uint8_t slots = static_cast(size / sizeof(intptr_t)); if ((*err)->arena_size + slots > (*err)->arena_capacity) { - (*err)->arena_capacity = - (uint8_t)GPR_MIN(UINT8_MAX - 1, (3 * (*err)->arena_capacity / 2)); + (*err)->arena_capacity = static_cast GPR_MIN( + UINT8_MAX - 1, (3 * (*err)->arena_capacity / 2)); if ((*err)->arena_size + slots > (*err)->arena_capacity) { return UINT8_MAX; } #ifndef NDEBUG - grpc_error *orig = *err; + grpc_error* orig = *err; #endif - *err = (grpc_error *)gpr_realloc( - *err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t)); + *err = static_cast(gpr_realloc( + *err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t))); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_error_refcount)) { + if (grpc_trace_error_refcount.enabled()) { if (*err != orig) { gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err); } @@ -222,11 +220,11 @@ static uint8_t get_placement(grpc_error **err, size_t size) { #endif } uint8_t placement = (*err)->arena_size; - (*err)->arena_size = (uint8_t)((*err)->arena_size + slots); + (*err)->arena_size = static_cast((*err)->arena_size + slots); return placement; } -static void internal_set_int(grpc_error **err, grpc_error_ints which, +static void internal_set_int(grpc_error** err, grpc_error_ints which, intptr_t value) { uint8_t slot = (*err)->ints[which]; if (slot == UINT8_MAX) { @@ -241,36 +239,36 @@ static void internal_set_int(grpc_error **err, grpc_error_ints which, (*err)->arena[slot] = value; } -static void internal_set_str(grpc_error **err, grpc_error_strs which, +static void internal_set_str(grpc_error** err, grpc_error_strs which, grpc_slice value) { uint8_t slot = (*err)->strs[which]; if (slot == UINT8_MAX) { slot = get_placement(err, sizeof(value)); if (slot == UINT8_MAX) { - const char *str = grpc_slice_to_c_string(value); + const char* str = grpc_slice_to_c_string(value); gpr_log(GPR_ERROR, "Error %p is full, dropping string {\"%s\":\"%s\"}", *err, error_str_name(which), str); - gpr_free((void *)str); + gpr_free((void*)str); return; } } else { - unref_slice(*(grpc_slice *)((*err)->arena + slot)); + unref_slice(*reinterpret_cast((*err)->arena + slot)); } (*err)->strs[which] = slot; memcpy((*err)->arena + slot, &value, sizeof(value)); } -static char *fmt_time(gpr_timespec tm); -static void internal_set_time(grpc_error **err, grpc_error_times which, +static char* fmt_time(gpr_timespec tm); +static void internal_set_time(grpc_error** err, grpc_error_times which, gpr_timespec value) { uint8_t slot = (*err)->times[which]; if (slot == UINT8_MAX) { slot = get_placement(err, sizeof(value)); if (slot == UINT8_MAX) { - const char *time_str = fmt_time(value); + const char* time_str = fmt_time(value); gpr_log(GPR_ERROR, "Error %p is full, dropping \"%s\":\"%s\"}", *err, error_time_name(which), time_str); - gpr_free((void *)time_str); + gpr_free((void*)time_str); return; } } @@ -278,7 +276,7 @@ static void internal_set_time(grpc_error **err, grpc_error_times which, memcpy((*err)->arena + slot, &value, sizeof(value)); } -static void internal_add_error(grpc_error **err, grpc_error *new_err) { +static void internal_add_error(grpc_error** err, grpc_error* new_err) { grpc_linked_error new_last = {new_err, UINT8_MAX}; uint8_t slot = get_placement(err, sizeof(grpc_linked_error)); if (slot == UINT8_MAX) { @@ -293,8 +291,8 @@ static void internal_add_error(grpc_error **err, grpc_error *new_err) { (*err)->first_err = slot; } else { GPR_ASSERT((*err)->last_err != UINT8_MAX); - grpc_linked_error *old_last = - (grpc_linked_error *)((*err)->arena + (*err)->last_err); + grpc_linked_error* old_last = + reinterpret_cast((*err)->arena + (*err)->last_err); old_last->next = slot; (*err)->last_err = slot; } @@ -314,20 +312,21 @@ static void internal_add_error(grpc_error **err, grpc_error *new_err) { // It is very common to include and extra int and string in an error #define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME) -grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc, - grpc_error **referencing, +grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc, + grpc_error** referencing, size_t num_referencing) { - GPR_TIMER_BEGIN("grpc_error_create", 0); - uint8_t initial_arena_capacity = (uint8_t)( + GPR_TIMER_SCOPE("grpc_error_create", 0); + uint8_t initial_arena_capacity = static_cast( DEFAULT_ERROR_CAPACITY + - (uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY); - grpc_error *err = (grpc_error *)gpr_malloc( - sizeof(*err) + initial_arena_capacity * sizeof(intptr_t)); - if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL + static_cast(num_referencing * SLOTS_PER_LINKED_ERROR) + + SURPLUS_CAPACITY); + grpc_error* err = static_cast( + gpr_malloc(sizeof(*err) + initial_arena_capacity * sizeof(intptr_t))); + if (err == nullptr) { // TODO(ctiller): make gpr_malloc return NULL return GRPC_ERROR_OOM; } #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_error_refcount)) { + if (grpc_trace_error_refcount.enabled()) { gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line); } #endif @@ -358,31 +357,32 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc, gpr_atm_no_barrier_store(&err->atomics.error_string, 0); gpr_ref_init(&err->atomics.refs, 1); - GPR_TIMER_END("grpc_error_create", 0); return err; } -static void ref_strs(grpc_error *err) { +static void ref_strs(grpc_error* err) { for (size_t i = 0; i < GRPC_ERROR_STR_MAX; ++i) { uint8_t slot = err->strs[i]; if (slot != UINT8_MAX) { - grpc_slice_ref_internal(*(grpc_slice *)(err->arena + slot)); + grpc_slice_ref_internal( + *reinterpret_cast(err->arena + slot)); } } } -static void ref_errs(grpc_error *err) { +static void ref_errs(grpc_error* err) { uint8_t slot = err->first_err; while (slot != UINT8_MAX) { - grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + grpc_linked_error* lerr = + reinterpret_cast(err->arena + slot); GRPC_ERROR_REF(lerr->err); slot = lerr->next; } } -static grpc_error *copy_error_and_unref(grpc_error *in) { - GPR_TIMER_BEGIN("copy_error_and_unref", 0); - grpc_error *out; +static grpc_error* copy_error_and_unref(grpc_error* in) { + GPR_TIMER_SCOPE("copy_error_and_unref", 0); + grpc_error* out; if (grpc_error_is_special(in)) { out = GRPC_ERROR_CREATE_FROM_STATIC_STRING("unknown"); if (in == GRPC_ERROR_NONE) { @@ -403,19 +403,20 @@ static grpc_error *copy_error_and_unref(grpc_error *in) { uint8_t new_arena_capacity = in->arena_capacity; // the returned err will be added to, so we ensure this is room to avoid // unneeded allocations. - if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) { - new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2); + if (in->arena_capacity - in->arena_size < + static_cast SLOTS_PER_STR) { + new_arena_capacity = static_cast(3 * new_arena_capacity / 2); } - out = (grpc_error *)gpr_malloc(sizeof(*in) + - new_arena_capacity * sizeof(intptr_t)); + out = static_cast( + gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t))); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_error_refcount)) { + if (grpc_trace_error_refcount.enabled()) { gpr_log(GPR_DEBUG, "%p create copying %p", out, in); } #endif // bulk memcpy of the rest of the struct. size_t skip = sizeof(&out->atomics); - memcpy((void *)((uintptr_t)out + skip), (void *)((uintptr_t)in + skip), + memcpy((void*)((uintptr_t)out + skip), (void*)((uintptr_t)in + skip), sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip); // manually set the atomics and the new capacity gpr_atm_no_barrier_store(&out->atomics.error_string, 0); @@ -425,23 +426,21 @@ static grpc_error *copy_error_and_unref(grpc_error *in) { ref_errs(out); GRPC_ERROR_UNREF(in); } - GPR_TIMER_END("copy_error_and_unref", 0); return out; } -grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, +grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which, intptr_t value) { - GPR_TIMER_BEGIN("grpc_error_set_int", 0); - grpc_error *new_err = copy_error_and_unref(src); + GPR_TIMER_SCOPE("grpc_error_set_int", 0); + grpc_error* new_err = copy_error_and_unref(src); internal_set_int(&new_err, which, value); - GPR_TIMER_END("grpc_error_set_int", 0); return new_err; } typedef struct { - grpc_error *error; + grpc_error* error; grpc_status_code code; - const char *msg; + const char* msg; } special_error_status_map; static special_error_status_map error_status_map[] = { {GRPC_ERROR_NONE, GRPC_STATUS_OK, ""}, @@ -449,42 +448,37 @@ static special_error_status_map error_status_map[] = { {GRPC_ERROR_OOM, GRPC_STATUS_RESOURCE_EXHAUSTED, "Out of memory"}, }; -bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) { - GPR_TIMER_BEGIN("grpc_error_get_int", 0); +bool grpc_error_get_int(grpc_error* err, grpc_error_ints which, intptr_t* p) { + GPR_TIMER_SCOPE("grpc_error_get_int", 0); if (grpc_error_is_special(err)) { if (which == GRPC_ERROR_INT_GRPC_STATUS) { for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) { if (error_status_map[i].error == err) { - if (p != NULL) *p = error_status_map[i].code; - GPR_TIMER_END("grpc_error_get_int", 0); + if (p != nullptr) *p = error_status_map[i].code; return true; } } } - GPR_TIMER_END("grpc_error_get_int", 0); return false; } uint8_t slot = err->ints[which]; if (slot != UINT8_MAX) { - if (p != NULL) *p = err->arena[slot]; - GPR_TIMER_END("grpc_error_get_int", 0); + if (p != nullptr) *p = err->arena[slot]; return true; } - GPR_TIMER_END("grpc_error_get_int", 0); return false; } -grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, +grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which, grpc_slice str) { - GPR_TIMER_BEGIN("grpc_error_set_str", 0); - grpc_error *new_err = copy_error_and_unref(src); + GPR_TIMER_SCOPE("grpc_error_set_str", 0); + grpc_error* new_err = copy_error_and_unref(src); internal_set_str(&new_err, which, str); - GPR_TIMER_END("grpc_error_set_str", 0); return new_err; } -bool grpc_error_get_str(grpc_error *err, grpc_error_strs which, - grpc_slice *str) { +bool grpc_error_get_str(grpc_error* err, grpc_error_strs which, + grpc_slice* str) { if (grpc_error_is_special(err)) { if (which == GRPC_ERROR_STR_GRPC_MESSAGE) { for (size_t i = 0; i < GPR_ARRAY_SIZE(error_status_map); i++) { @@ -498,53 +492,52 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which, } uint8_t slot = err->strs[which]; if (slot != UINT8_MAX) { - *str = *(grpc_slice *)(err->arena + slot); + *str = *reinterpret_cast(err->arena + slot); return true; } else { return false; } } -grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) { - GPR_TIMER_BEGIN("grpc_error_add_child", 0); - grpc_error *new_err = copy_error_and_unref(src); +grpc_error* grpc_error_add_child(grpc_error* src, grpc_error* child) { + GPR_TIMER_SCOPE("grpc_error_add_child", 0); + grpc_error* new_err = copy_error_and_unref(src); internal_add_error(&new_err, child); - GPR_TIMER_END("grpc_error_add_child", 0); return new_err; } -static const char *no_error_string = "\"No Error\""; -static const char *oom_error_string = "\"Out of memory\""; -static const char *cancelled_error_string = "\"Cancelled\""; +static const char* no_error_string = "\"No Error\""; +static const char* oom_error_string = "\"Out of memory\""; +static const char* cancelled_error_string = "\"Cancelled\""; typedef struct { - char *key; - char *value; + char* key; + char* value; } kv_pair; typedef struct { - kv_pair *kvs; + kv_pair* kvs; size_t num_kvs; size_t cap_kvs; } kv_pairs; -static void append_chr(char c, char **s, size_t *sz, size_t *cap) { +static void append_chr(char c, char** s, size_t* sz, size_t* cap) { if (*sz == *cap) { *cap = GPR_MAX(8, 3 * *cap / 2); - *s = (char *)gpr_realloc(*s, *cap); + *s = static_cast(gpr_realloc(*s, *cap)); } (*s)[(*sz)++] = c; } -static void append_str(const char *str, char **s, size_t *sz, size_t *cap) { - for (const char *c = str; *c; c++) { +static void append_str(const char* str, char** s, size_t* sz, size_t* cap) { + for (const char* c = str; *c; c++) { append_chr(*c, s, sz, cap); } } -static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz, - size_t *cap) { - static const char *hex = "0123456789abcdef"; +static void append_esc_str(const uint8_t* str, size_t len, char** s, size_t* sz, + size_t* cap) { + static const char* hex = "0123456789abcdef"; append_chr('"', s, sz, cap); for (size_t i = 0; i < len; i++, str++) { if (*str < 32 || *str >= 127) { @@ -574,74 +567,74 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz, break; } } else { - append_chr((char)*str, s, sz, cap); + append_chr(static_cast(*str), s, sz, cap); } } append_chr('"', s, sz, cap); } -static void append_kv(kv_pairs *kvs, char *key, char *value) { +static void append_kv(kv_pairs* kvs, char* key, char* value) { if (kvs->num_kvs == kvs->cap_kvs) { kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4); - kvs->kvs = - (kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs); + kvs->kvs = static_cast( + gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs)); } kvs->kvs[kvs->num_kvs].key = key; kvs->kvs[kvs->num_kvs].value = value; kvs->num_kvs++; } -static char *key_int(grpc_error_ints which) { +static char* key_int(grpc_error_ints which) { return gpr_strdup(error_int_name(which)); } -static char *fmt_int(intptr_t p) { - char *s; +static char* fmt_int(intptr_t p) { + char* s; gpr_asprintf(&s, "%" PRIdPTR, p); return s; } -static void collect_ints_kvs(grpc_error *err, kv_pairs *kvs) { +static void collect_ints_kvs(grpc_error* err, kv_pairs* kvs) { for (size_t which = 0; which < GRPC_ERROR_INT_MAX; ++which) { uint8_t slot = err->ints[which]; if (slot != UINT8_MAX) { - append_kv(kvs, key_int((grpc_error_ints)which), + append_kv(kvs, key_int(static_cast(which)), fmt_int(err->arena[slot])); } } } -static char *key_str(grpc_error_strs which) { +static char* key_str(grpc_error_strs which) { return gpr_strdup(error_str_name(which)); } -static char *fmt_str(grpc_slice slice) { - char *s = NULL; +static char* fmt_str(grpc_slice slice) { + char* s = nullptr; size_t sz = 0; size_t cap = 0; - append_esc_str((const uint8_t *)GRPC_SLICE_START_PTR(slice), + append_esc_str((const uint8_t*)GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice), &s, &sz, &cap); append_chr(0, &s, &sz, &cap); return s; } -static void collect_strs_kvs(grpc_error *err, kv_pairs *kvs) { +static void collect_strs_kvs(grpc_error* err, kv_pairs* kvs) { for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) { uint8_t slot = err->strs[which]; if (slot != UINT8_MAX) { - append_kv(kvs, key_str((grpc_error_strs)which), - fmt_str(*(grpc_slice *)(err->arena + slot))); + append_kv(kvs, key_str(static_cast(which)), + fmt_str(*reinterpret_cast(err->arena + slot))); } } } -static char *key_time(grpc_error_times which) { +static char* key_time(grpc_error_times which) { return gpr_strdup(error_time_name(which)); } -static char *fmt_time(gpr_timespec tm) { - char *out; - const char *pfx = "!!"; +static char* fmt_time(gpr_timespec tm) { + char* out; + const char* pfx = "!!"; switch (tm.clock_type) { case GPR_CLOCK_MONOTONIC: pfx = "@monotonic:"; @@ -660,24 +653,25 @@ static char *fmt_time(gpr_timespec tm) { return out; } -static void collect_times_kvs(grpc_error *err, kv_pairs *kvs) { +static void collect_times_kvs(grpc_error* err, kv_pairs* kvs) { for (size_t which = 0; which < GRPC_ERROR_TIME_MAX; ++which) { uint8_t slot = err->times[which]; if (slot != UINT8_MAX) { - append_kv(kvs, key_time((grpc_error_times)which), - fmt_time(*(gpr_timespec *)(err->arena + slot))); + append_kv(kvs, key_time(static_cast(which)), + fmt_time(*reinterpret_cast(err->arena + slot))); } } } -static void add_errs(grpc_error *err, char **s, size_t *sz, size_t *cap) { +static void add_errs(grpc_error* err, char** s, size_t* sz, size_t* cap) { uint8_t slot = err->first_err; bool first = true; while (slot != UINT8_MAX) { - grpc_linked_error *lerr = (grpc_linked_error *)(err->arena + slot); + grpc_linked_error* lerr = + reinterpret_cast(err->arena + slot); if (!first) append_chr(',', s, sz, cap); first = false; - const char *e = grpc_error_string(lerr->err); + const char* e = grpc_error_string(lerr->err); append_str(e, s, sz, cap); GPR_ASSERT(err->last_err == slot ? lerr->next == UINT8_MAX : lerr->next != UINT8_MAX); @@ -685,8 +679,8 @@ static void add_errs(grpc_error *err, char **s, size_t *sz, size_t *cap) { } } -static char *errs_string(grpc_error *err) { - char *s = NULL; +static char* errs_string(grpc_error* err) { + char* s = nullptr; size_t sz = 0; size_t cap = 0; append_chr('[', &s, &sz, &cap); @@ -696,22 +690,22 @@ static char *errs_string(grpc_error *err) { return s; } -static int cmp_kvs(const void *a, const void *b) { - const kv_pair *ka = (const kv_pair *)a; - const kv_pair *kb = (const kv_pair *)b; +static int cmp_kvs(const void* a, const void* b) { + const kv_pair* ka = static_cast(a); + const kv_pair* kb = static_cast(b); return strcmp(ka->key, kb->key); } -static char *finish_kvs(kv_pairs *kvs) { - char *s = NULL; +static char* finish_kvs(kv_pairs* kvs) { + char* s = nullptr; size_t sz = 0; size_t cap = 0; append_chr('{', &s, &sz, &cap); for (size_t i = 0; i < kvs->num_kvs; i++) { if (i != 0) append_chr(',', &s, &sz, &cap); - append_esc_str((const uint8_t *)kvs->kvs[i].key, strlen(kvs->kvs[i].key), - &s, &sz, &cap); + append_esc_str(reinterpret_cast(kvs->kvs[i].key), + strlen(kvs->kvs[i].key), &s, &sz, &cap); gpr_free(kvs->kvs[i].key); append_chr(':', &s, &sz, &cap); append_str(kvs->kvs[i].value, &s, &sz, &cap); @@ -724,16 +718,15 @@ static char *finish_kvs(kv_pairs *kvs) { return s; } -const char *grpc_error_string(grpc_error *err) { - GPR_TIMER_BEGIN("grpc_error_string", 0); +const char* grpc_error_string(grpc_error* err) { + GPR_TIMER_SCOPE("grpc_error_string", 0); if (err == GRPC_ERROR_NONE) return no_error_string; if (err == GRPC_ERROR_OOM) return oom_error_string; if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string; - void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string); - if (p != NULL) { - GPR_TIMER_END("grpc_error_string", 0); - return (const char *)p; + void* p = (void*)gpr_atm_acq_load(&err->atomics.error_string); + if (p != nullptr) { + return static_cast(p); } kv_pairs kvs; @@ -748,25 +741,24 @@ const char *grpc_error_string(grpc_error *err) { qsort(kvs.kvs, kvs.num_kvs, sizeof(kv_pair), cmp_kvs); - char *out = finish_kvs(&kvs); + char* out = finish_kvs(&kvs); if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) { gpr_free(out); - out = (char *)gpr_atm_no_barrier_load(&err->atomics.error_string); + out = (char*)gpr_atm_acq_load(&err->atomics.error_string); } - GPR_TIMER_END("grpc_error_string", 0); return out; } -grpc_error *grpc_os_error(const char *file, int line, int err, - const char *call_name) { +grpc_error* grpc_os_error(const char* file, int line, int err, + const char* call_name) { return grpc_error_set_str( grpc_error_set_str( grpc_error_set_int( grpc_error_create(file, line, - grpc_slice_from_static_string("OS Error"), NULL, - 0), + grpc_slice_from_static_string("OS Error"), + nullptr, 0), GRPC_ERROR_INT_ERRNO, err), GRPC_ERROR_STR_OS_ERROR, grpc_slice_from_static_string(strerror(err))), @@ -774,10 +766,10 @@ grpc_error *grpc_os_error(const char *file, int line, int err, } #ifdef GPR_WINDOWS -grpc_error *grpc_wsa_error(const char *file, int line, int err, - const char *call_name) { - char *utf8_message = gpr_format_message(err); - grpc_error *error = grpc_error_set_str( +grpc_error* grpc_wsa_error(const char* file, int line, int err, + const char* call_name) { + char* utf8_message = gpr_format_message(err); + grpc_error* error = grpc_error_set_str( grpc_error_set_str( grpc_error_set_int( grpc_error_create(file, line, @@ -791,10 +783,10 @@ grpc_error *grpc_wsa_error(const char *file, int line, int err, } #endif -bool grpc_log_if_error(const char *what, grpc_error *error, const char *file, +bool grpc_log_if_error(const char* what, grpc_error* error, const char* file, int line) { if (error == GRPC_ERROR_NONE) return true; - const char *msg = grpc_error_string(error); + const char* msg = grpc_error_string(error); gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "%s: %s", what, msg); GRPC_ERROR_UNREF(error); return false; diff --git a/Sources/CgRPC/src/core/lib/iomgr/error.h b/Sources/CgRPC/src/core/lib/iomgr/error.h index b36294869..f8cae4da8 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/error.h +++ b/Sources/CgRPC/src/core/lib/iomgr/error.h @@ -19,28 +19,25 @@ #ifndef GRPC_CORE_LIB_IOMGR_ERROR_H #define GRPC_CORE_LIB_IOMGR_ERROR_H +#include + +#include #include -#include #include #include +#include #include #include "src/core/lib/debug/trace.h" -#ifdef __cplusplus -extern "C" { -#endif - /// Opaque representation of an error. /// See https://github.com/grpc/grpc/blob/master/doc/core/grpc-error.md for a /// full write up of this object. typedef struct grpc_error grpc_error; -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_error_refcount; -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_error_refcount; typedef enum { /// 'errno' from the operating system @@ -122,15 +119,15 @@ typedef enum { /// They are always even so that other code (particularly combiner locks, /// polling engines) can safely use the lower bit for themselves. -#define GRPC_ERROR_NONE ((grpc_error *)NULL) -#define GRPC_ERROR_OOM ((grpc_error *)2) -#define GRPC_ERROR_CANCELLED ((grpc_error *)4) +#define GRPC_ERROR_NONE ((grpc_error*)NULL) +#define GRPC_ERROR_OOM ((grpc_error*)2) +#define GRPC_ERROR_CANCELLED ((grpc_error*)4) -const char *grpc_error_string(grpc_error *error); +const char* grpc_error_string(grpc_error* error); /// Create an error - but use GRPC_ERROR_CREATE instead -grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc, - grpc_error **referencing, size_t num_referencing); +grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc, + grpc_error** referencing, size_t num_referencing); /// Create an error (this is the preferred way of generating an error that is /// not due to a system call - for system calls, use GRPC_OS_ERROR or /// GRPC_WSA_ERROR as appropriate) @@ -156,50 +153,55 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc, errs, count) #ifndef NDEBUG -grpc_error *grpc_error_ref(grpc_error *err, const char *file, int line); -void grpc_error_unref(grpc_error *err, const char *file, int line); +grpc_error* grpc_error_ref(grpc_error* err, const char* file, int line); +void grpc_error_unref(grpc_error* err, const char* file, int line); #define GRPC_ERROR_REF(err) grpc_error_ref(err, __FILE__, __LINE__) #define GRPC_ERROR_UNREF(err) grpc_error_unref(err, __FILE__, __LINE__) #else -grpc_error *grpc_error_ref(grpc_error *err); -void grpc_error_unref(grpc_error *err); +grpc_error* grpc_error_ref(grpc_error* err); +void grpc_error_unref(grpc_error* err); #define GRPC_ERROR_REF(err) grpc_error_ref(err) #define GRPC_ERROR_UNREF(err) grpc_error_unref(err) #endif -grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which, +grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which, intptr_t value) GRPC_MUST_USE_RESULT; -bool grpc_error_get_int(grpc_error *error, grpc_error_ints which, intptr_t *p); -grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which, +bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p); +/// This call takes ownership of the slice; the error is responsible for +/// eventually unref-ing it. +grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which, grpc_slice str) GRPC_MUST_USE_RESULT; /// Returns false if the specified string is not set. /// Caller does NOT own the slice. -bool grpc_error_get_str(grpc_error *error, grpc_error_strs which, - grpc_slice *s); +bool grpc_error_get_str(grpc_error* error, grpc_error_strs which, + grpc_slice* s); /// Add a child error: an error that is believed to have contributed to this /// error occurring. Allows root causing high level errors from lower level -/// errors that contributed to them. -grpc_error *grpc_error_add_child(grpc_error *src, - grpc_error *child) GRPC_MUST_USE_RESULT; -grpc_error *grpc_os_error(const char *file, int line, int err, - const char *call_name) GRPC_MUST_USE_RESULT; +/// errors that contributed to them. The src error takes ownership of the +/// child error. +grpc_error* grpc_error_add_child(grpc_error* src, + grpc_error* child) GRPC_MUST_USE_RESULT; +grpc_error* grpc_os_error(const char* file, int line, int err, + const char* call_name) GRPC_MUST_USE_RESULT; + +inline grpc_error* grpc_assert_never_ok(grpc_error* error) { + GPR_ASSERT(error != GRPC_ERROR_NONE); + return error; +} + /// create an error associated with errno!=0 (an 'operating system' error) #define GRPC_OS_ERROR(err, call_name) \ - grpc_os_error(__FILE__, __LINE__, err, call_name) -grpc_error *grpc_wsa_error(const char *file, int line, int err, - const char *call_name) GRPC_MUST_USE_RESULT; + grpc_assert_never_ok(grpc_os_error(__FILE__, __LINE__, err, call_name)) +grpc_error* grpc_wsa_error(const char* file, int line, int err, + const char* call_name) GRPC_MUST_USE_RESULT; /// windows only: create an error associated with WSAGetLastError()!=0 #define GRPC_WSA_ERROR(err, call_name) \ grpc_wsa_error(__FILE__, __LINE__, err, call_name) -bool grpc_log_if_error(const char *what, grpc_error *error, const char *file, +bool grpc_log_if_error(const char* what, grpc_error* error, const char* file, int line); #define GRPC_LOG_IF_ERROR(what, error) \ grpc_log_if_error((what), (error), __FILE__, __LINE__) -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_LIB_IOMGR_ERROR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/error_internal.h b/Sources/CgRPC/src/core/lib/iomgr/error_internal.h index 750748455..7fde347ab 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/error_internal.h +++ b/Sources/CgRPC/src/core/lib/iomgr/error_internal.h @@ -19,15 +19,18 @@ #ifndef GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H #define GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H +#include + #include #include // TODO, do we need this? #include +#include "src/core/lib/iomgr/error.h" typedef struct grpc_linked_error grpc_linked_error; struct grpc_linked_error { - grpc_error *err; + grpc_error* err; uint8_t next; }; @@ -55,6 +58,6 @@ struct grpc_error { intptr_t arena[0]; }; -bool grpc_error_is_special(grpc_error *err); +bool grpc_error_is_special(struct grpc_error* err); #endif /* GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.c b/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.cc similarity index 63% rename from Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.c rename to Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.cc index 3ac12ab56..e5db1be0e 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.cc @@ -16,15 +16,21 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" -/* This polling engine is only relevant on linux kernels supporting epoll() */ -#ifdef GRPC_LINUX_EPOLL +#include +/* This polling engine is only relevant on linux kernels supporting epoll + epoll_create() or epoll_create1() */ +#ifdef GRPC_LINUX_EPOLL #include "src/core/lib/iomgr/ev_epoll1_linux.h" #include #include +#include +#include #include #include #include @@ -34,19 +40,19 @@ #include #include -#include #include -#include -#include #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/string.h" static grpc_wakeup_fd global_wakeup_fd; @@ -82,11 +88,28 @@ typedef struct epoll_set { /* The global singleton epoll set */ static epoll_set g_epoll_set; +static int epoll_create_and_cloexec() { +#ifdef GRPC_LINUX_EPOLL_CREATE1 + int fd = epoll_create1(EPOLL_CLOEXEC); + if (fd < 0) { + gpr_log(GPR_ERROR, "epoll_create1 unavailable"); + } +#else + int fd = epoll_create(MAX_EPOLL_EVENTS); + if (fd < 0) { + gpr_log(GPR_ERROR, "epoll_create unavailable"); + } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) { + gpr_log(GPR_ERROR, "fcntl following epoll_create failed"); + return -1; + } +#endif + return fd; +} + /* Must be called *only* once */ static bool epoll_set_init() { - g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC); + g_epoll_set.epfd = epoll_create_and_cloexec(); if (g_epoll_set.epfd < 0) { - gpr_log(GPR_ERROR, "epoll unavailable"); return false; } @@ -111,10 +134,10 @@ static void epoll_set_shutdown() { struct grpc_fd { int fd; - gpr_atm read_closure; - gpr_atm write_closure; + grpc_core::ManualConstructor read_closure; + grpc_core::ManualConstructor write_closure; - struct grpc_fd *freelist_next; + struct grpc_fd* freelist_next; /* The pollset that last noticed that the fd is readable. The actual type * stored in this is (grpc_pollset *) */ @@ -132,7 +155,7 @@ static void fd_global_shutdown(void); typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state; -static const char *kick_state_string(kick_state st) { +static const char* kick_state_string(kick_state st) { switch (st) { case UNKICKED: return "UNKICKED"; @@ -148,8 +171,8 @@ struct grpc_pollset_worker { kick_state state; int kick_state_mutator; // which line of code last changed kick state bool initialized_cv; - grpc_pollset_worker *next; - grpc_pollset_worker *prev; + grpc_pollset_worker* next; + grpc_pollset_worker* prev; gpr_cv cv; grpc_closure_list schedule_on_end_work; }; @@ -164,29 +187,29 @@ struct grpc_pollset_worker { typedef struct pollset_neighborhood { gpr_mu mu; - grpc_pollset *active_root; + grpc_pollset* active_root; char pad[GPR_CACHELINE_SIZE]; } pollset_neighborhood; struct grpc_pollset { gpr_mu mu; - pollset_neighborhood *neighborhood; + pollset_neighborhood* neighborhood; bool reassigning_neighborhood; - grpc_pollset_worker *root_worker; + grpc_pollset_worker* root_worker; bool kicked_without_poller; /* Set to true if the pollset is observed to have no workers available to poll */ bool seen_inactive; bool shutting_down; /* Is the pollset shutting down ? */ - grpc_closure *shutdown_closure; /* Called after after shutdown is complete */ + grpc_closure* shutdown_closure; /* Called after after shutdown is complete */ /* Number of workers who are *about-to* attach themselves to the pollset * worker list */ int begin_refs; - grpc_pollset *next; - grpc_pollset *prev; + grpc_pollset* next; + grpc_pollset* prev; }; /******************************************************************************* @@ -201,8 +224,8 @@ struct grpc_pollset_set { * Common helpers */ -static bool append_error(grpc_error **composite, grpc_error *error, - const char *desc) { +static bool append_error(grpc_error** composite, grpc_error* error, + const char* desc) { if (error == GRPC_ERROR_NONE) return true; if (*composite == GRPC_ERROR_NONE) { *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); @@ -233,7 +256,7 @@ static bool append_error(grpc_error **composite, grpc_error *error, * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a * case occurs. */ -static grpc_fd *fd_freelist = NULL; +static grpc_fd* fd_freelist = nullptr; static gpr_mu fd_freelist_mu; static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } @@ -241,47 +264,49 @@ static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } static void fd_global_shutdown(void) { gpr_mu_lock(&fd_freelist_mu); gpr_mu_unlock(&fd_freelist_mu); - while (fd_freelist != NULL) { - grpc_fd *fd = fd_freelist; + while (fd_freelist != nullptr) { + grpc_fd* fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; gpr_free(fd); } gpr_mu_destroy(&fd_freelist_mu); } -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *new_fd = NULL; +static grpc_fd* fd_create(int fd, const char* name) { + grpc_fd* new_fd = nullptr; gpr_mu_lock(&fd_freelist_mu); - if (fd_freelist != NULL) { + if (fd_freelist != nullptr) { new_fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; } gpr_mu_unlock(&fd_freelist_mu); - if (new_fd == NULL) { - new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd)); + if (new_fd == nullptr) { + new_fd = static_cast(gpr_malloc(sizeof(grpc_fd))); + new_fd->read_closure.Init(); + new_fd->write_closure.Init(); } new_fd->fd = fd; - grpc_lfev_init(&new_fd->read_closure); - grpc_lfev_init(&new_fd->write_closure); + new_fd->read_closure->InitEvent(); + new_fd->write_closure->InitEvent(); gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); - new_fd->freelist_next = NULL; + new_fd->freelist_next = nullptr; - char *fd_name; + char* fd_name; gpr_asprintf(&fd_name, "%s fd=%d", name, fd); grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { + if (grpc_trace_fd_refcount.enabled()) { gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name); } #endif gpr_free(fd_name); struct epoll_event ev; - ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); + ev.events = static_cast(EPOLLIN | EPOLLOUT | EPOLLET); ev.data.ptr = new_fd; if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) { gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno)); @@ -290,37 +315,34 @@ static grpc_fd *fd_create(int fd, const char *name) { return new_fd; } -static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; } +static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; } /* if 'releasing_fd' is true, it means that we are going to detach the internal * fd from grpc_fd structure (i.e which means we should not be calling * shutdown() syscall on that fd) */ -static void fd_shutdown_internal(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_error *why, bool releasing_fd) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { +static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why, + bool releasing_fd) { + if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) { if (!releasing_fd) { shutdown(fd->fd, SHUT_RDWR); } - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); + fd->write_closure->SetShutdown(GRPC_ERROR_REF(why)); } GRPC_ERROR_UNREF(why); } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - fd_shutdown_internal(exec_ctx, fd, why, false); +static void fd_shutdown(grpc_fd* fd, grpc_error* why) { + fd_shutdown_internal(fd, why, false); } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { - grpc_error *error = GRPC_ERROR_NONE; - bool is_release_fd = (release_fd != NULL); +static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason) { + grpc_error* error = GRPC_ERROR_NONE; + bool is_release_fd = (release_fd != nullptr); - if (!grpc_lfev_is_shutdown(&fd->read_closure)) { - fd_shutdown_internal(exec_ctx, fd, - GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason), + if (!fd->read_closure->IsShutdown()) { + fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason), is_release_fd); } @@ -332,11 +354,11 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, close(fd->fd); } - GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error)); grpc_iomgr_unregister_object(&fd->iomgr_object); - grpc_lfev_destroy(&fd->read_closure); - grpc_lfev_destroy(&fd->write_closure); + fd->read_closure->DestroyEvent(); + fd->write_closure->DestroyEvent(); gpr_mu_lock(&fd_freelist_mu); fd->freelist_next = fd_freelist; @@ -344,36 +366,30 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_mu_unlock(&fd_freelist_mu); } -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset *)notifier; + return (grpc_pollset*)notifier; } -static bool fd_is_shutdown(grpc_fd *fd) { - return grpc_lfev_is_shutdown(&fd->read_closure); +static bool fd_is_shutdown(grpc_fd* fd) { + return fd->read_closure->IsShutdown(); } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); +static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) { + fd->read_closure->NotifyOn(closure); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); +static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { + fd->write_closure->NotifyOn(closure); } -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); +static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { + fd->read_closure->SetReady(); /* Use release store to match with acquire load in fd_get_read_notifier */ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); -} +static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } /******************************************************************************* * Pollset Definitions @@ -385,12 +401,12 @@ GPR_TLS_DECL(g_current_thread_worker); /* The designated poller */ static gpr_atm g_active_poller; -static pollset_neighborhood *g_neighborhoods; +static pollset_neighborhood* g_neighborhoods; static size_t g_num_neighborhoods; /* Return true if first in list */ -static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) { - if (pollset->root_worker == NULL) { +static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) { + if (pollset->root_worker == nullptr) { pollset->root_worker = worker; worker->next = worker->prev = worker; return true; @@ -406,11 +422,11 @@ static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) { /* Return true if last in list */ typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result; -static worker_remove_result worker_remove(grpc_pollset *pollset, - grpc_pollset_worker *worker) { +static worker_remove_result worker_remove(grpc_pollset* pollset, + grpc_pollset_worker* worker) { if (worker == pollset->root_worker) { if (worker == worker->next) { - pollset->root_worker = NULL; + pollset->root_worker = nullptr; return EMPTIED; } else { pollset->root_worker = worker->next; @@ -426,26 +442,26 @@ static worker_remove_result worker_remove(grpc_pollset *pollset, } static size_t choose_neighborhood(void) { - return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods; + return static_cast(gpr_cpu_current_cpu()) % g_num_neighborhoods; } -static grpc_error *pollset_global_init(void) { +static grpc_error* pollset_global_init(void) { gpr_tls_init(&g_current_thread_pollset); gpr_tls_init(&g_current_thread_worker); gpr_atm_no_barrier_store(&g_active_poller, 0); global_wakeup_fd.read_fd = -1; - grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd); + grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd); if (err != GRPC_ERROR_NONE) return err; struct epoll_event ev; - ev.events = (uint32_t)(EPOLLIN | EPOLLET); + ev.events = static_cast(EPOLLIN | EPOLLET); ev.data.ptr = &global_wakeup_fd; if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) { return GRPC_OS_ERROR(errno, "epoll_ctl"); } g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS); - g_neighborhoods = (pollset_neighborhood *)gpr_zalloc( - sizeof(*g_neighborhoods) * g_num_neighborhoods); + g_neighborhoods = static_cast( + gpr_zalloc(sizeof(*g_neighborhoods) * g_num_neighborhoods)); for (size_t i = 0; i < g_num_neighborhoods; i++) { gpr_mu_init(&g_neighborhoods[i].mu); } @@ -462,24 +478,24 @@ static void pollset_global_shutdown(void) { gpr_free(g_neighborhoods); } -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { gpr_mu_init(&pollset->mu); *mu = &pollset->mu; pollset->neighborhood = &g_neighborhoods[choose_neighborhood()]; pollset->reassigning_neighborhood = false; - pollset->root_worker = NULL; + pollset->root_worker = nullptr; pollset->kicked_without_poller = false; pollset->seen_inactive = true; pollset->shutting_down = false; - pollset->shutdown_closure = NULL; + pollset->shutdown_closure = nullptr; pollset->begin_refs = 0; - pollset->next = pollset->prev = NULL; + pollset->next = pollset->prev = nullptr; } -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset* pollset) { gpr_mu_lock(&pollset->mu); if (!pollset->seen_inactive) { - pollset_neighborhood *neighborhood = pollset->neighborhood; + pollset_neighborhood* neighborhood = pollset->neighborhood; gpr_mu_unlock(&pollset->mu); retry_lock_neighborhood: gpr_mu_lock(&neighborhood->mu); @@ -495,7 +511,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { pollset->next->prev = pollset->prev; if (pollset == pollset->neighborhood->active_root) { pollset->neighborhood->active_root = - pollset->next == pollset ? NULL : pollset->next; + pollset->next == pollset ? nullptr : pollset->next; } } gpr_mu_unlock(&pollset->neighborhood->mu); @@ -504,27 +520,26 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - GPR_TIMER_BEGIN("pollset_kick_all", 0); - grpc_error *error = GRPC_ERROR_NONE; - if (pollset->root_worker != NULL) { - grpc_pollset_worker *worker = pollset->root_worker; +static grpc_error* pollset_kick_all(grpc_pollset* pollset) { + GPR_TIMER_SCOPE("pollset_kick_all", 0); + grpc_error* error = GRPC_ERROR_NONE; + if (pollset->root_worker != nullptr) { + grpc_pollset_worker* worker = pollset->root_worker; do { - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK(); switch (worker->state) { case KICKED: - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); break; case UNKICKED: SET_KICK_STATE(worker, KICKED); if (worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&worker->cv); } break; case DESIGNATED_POLLER: - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); SET_KICK_STATE(worker, KICKED); append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), "pollset_kick_all"); @@ -536,51 +551,38 @@ static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx, } // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here // in the else case - GPR_TIMER_END("pollset_kick_all", 0); return error; } -static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL && +static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) { + if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr && pollset->begin_refs == 0) { GPR_TIMER_MARK("pollset_finish_shutdown", 0); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); - pollset->shutdown_closure = NULL; + GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE); + pollset->shutdown_closure = nullptr; } } -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_TIMER_BEGIN("pollset_shutdown", 0); - GPR_ASSERT(pollset->shutdown_closure == NULL); +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + GPR_TIMER_SCOPE("pollset_shutdown", 0); + GPR_ASSERT(pollset->shutdown_closure == nullptr); GPR_ASSERT(!pollset->shutting_down); pollset->shutdown_closure = closure; pollset->shutting_down = true; - GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset)); - pollset_maybe_finish_shutdown(exec_ctx, pollset); - GPR_TIMER_END("pollset_shutdown", 0); + GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); + pollset_maybe_finish_shutdown(pollset); } -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, now) <= 0) { +static int poll_deadline_to_millis_timeout(grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); + if (delta > INT_MAX) { + return INT_MAX; + } else if (delta < 0) { return 0; + } else { + return static_cast(delta); } - - static const gpr_timespec round_up = { - 0, /* tv_sec */ - GPR_NS_PER_MS - 1, /* tv_nsec */ - GPR_TIMESPAN /* clock_type */ - }; - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); - return millis >= 1 ? millis : 1; } /* Process the epoll events found by do_epoll_wait() function. @@ -591,41 +593,39 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only called by g_active_poller thread. So there is no need for synchronization when accessing fields in g_epoll_set */ -static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - static const char *err_desc = "process_events"; - grpc_error *error = GRPC_ERROR_NONE; +static grpc_error* process_epoll_events(grpc_pollset* pollset) { + GPR_TIMER_SCOPE("process_epoll_events", 0); - GPR_TIMER_BEGIN("process_epoll_events", 0); + static const char* err_desc = "process_events"; + grpc_error* error = GRPC_ERROR_NONE; long num_events = gpr_atm_acq_load(&g_epoll_set.num_events); long cursor = gpr_atm_acq_load(&g_epoll_set.cursor); for (int idx = 0; (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events; idx++) { long c = cursor++; - struct epoll_event *ev = &g_epoll_set.events[c]; - void *data_ptr = ev->data.ptr; + struct epoll_event* ev = &g_epoll_set.events[c]; + void* data_ptr = ev->data.ptr; if (data_ptr == &global_wakeup_fd) { append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd), err_desc); } else { - grpc_fd *fd = (grpc_fd *)(data_ptr); + grpc_fd* fd = static_cast(data_ptr); bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0; bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0; bool write_ev = (ev->events & EPOLLOUT) != 0; if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); + fd_become_readable(fd, pollset); } if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); + fd_become_writable(fd); } } } gpr_atm_rel_store(&g_epoll_set.cursor, cursor); - GPR_TIMER_END("process_epoll_events", 0); return error; } @@ -636,17 +636,16 @@ static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx, NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller (i.e the designated poller thread) will be calling this function. So there is no need for any synchronization when accesing fields in g_epoll_set */ -static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, - gpr_timespec now, gpr_timespec deadline) { - GPR_TIMER_BEGIN("do_epoll_wait", 0); +static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) { + GPR_TIMER_SCOPE("do_epoll_wait", 0); int r; - int timeout = poll_deadline_to_millis_timeout(deadline, now); + int timeout = poll_deadline_to_millis_timeout(deadline); if (timeout != 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; } do { - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS, timeout); } while (r < 0 && errno == EINTR); @@ -656,31 +655,30 @@ static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); - GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r); + GRPC_STATS_INC_POLL_EVENTS_RETURNED(r); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r); } gpr_atm_rel_store(&g_epoll_set.num_events, r); gpr_atm_rel_store(&g_epoll_set.cursor, 0); - GPR_TIMER_END("do_epoll_wait", 0); return GRPC_ERROR_NONE; } -static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl, gpr_timespec *now, - gpr_timespec deadline) { - GPR_TIMER_BEGIN("begin_worker", 0); - if (worker_hdl != NULL) *worker_hdl = worker; +static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("begin_worker", 0); + if (worker_hdl != nullptr) *worker_hdl = worker; worker->initialized_cv = false; SET_KICK_STATE(worker, UNKICKED); worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; pollset->begin_refs++; - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker); } if (pollset->seen_inactive) { @@ -692,14 +690,14 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->reassigning_neighborhood = true; pollset->neighborhood = &g_neighborhoods[choose_neighborhood()]; } - pollset_neighborhood *neighborhood = pollset->neighborhood; + pollset_neighborhood* neighborhood = pollset->neighborhood; gpr_mu_unlock(&pollset->mu); // pollset unlocked: state may change (even worker->kick_state) retry_lock_neighborhood: gpr_mu_lock(&neighborhood->mu); gpr_mu_lock(&pollset->mu); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d", + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d", pollset, worker, kick_state_string(worker->state), is_reassigning); } @@ -722,7 +720,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, not visible in the "kick any" path yet */ if (worker->state == UNKICKED) { pollset->seen_inactive = false; - if (neighborhood->active_root == NULL) { + if (neighborhood->active_root == nullptr) { neighborhood->active_root = pollset->next = pollset->prev = pollset; /* Make this the designated poller if there isn't one already */ if (worker->state == UNKICKED && @@ -750,24 +748,25 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, worker->initialized_cv = true; gpr_cv_init(&worker->cv); while (worker->state == UNKICKED && !pollset->shutting_down) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", pollset, worker, kick_state_string(worker->state), pollset->shutting_down); } - if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && + if (gpr_cv_wait(&worker->cv, &pollset->mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) && worker->state == UNKICKED) { /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker received a kick */ SET_KICK_STATE(worker, KICKED); } } - *now = gpr_now(now->clock_type); + grpc_core::ExecCtx::Get()->InvalidateNow(); } - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d " "kicked_without_poller: %d", pollset, worker, kick_state_string(worker->state), @@ -785,45 +784,43 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, if (pollset->kicked_without_poller) { pollset->kicked_without_poller = false; - GPR_TIMER_END("begin_worker", 0); return false; } - GPR_TIMER_END("begin_worker", 0); return worker->state == DESIGNATED_POLLER && !pollset->shutting_down; } static bool check_neighborhood_for_available_poller( - grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) { - GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0); + pollset_neighborhood* neighborhood) { + GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0); bool found_worker = false; do { - grpc_pollset *inspect = neighborhood->active_root; - if (inspect == NULL) { + grpc_pollset* inspect = neighborhood->active_root; + if (inspect == nullptr) { break; } gpr_mu_lock(&inspect->mu); GPR_ASSERT(!inspect->seen_inactive); - grpc_pollset_worker *inspect_worker = inspect->root_worker; - if (inspect_worker != NULL) { + grpc_pollset_worker* inspect_worker = inspect->root_worker; + if (inspect_worker != nullptr) { do { switch (inspect_worker->state) { case UNKICKED: if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)inspect_worker)) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. choose next poller to be %p", + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. choose next poller to be %p", inspect_worker); } SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); if (inspect_worker->initialized_cv) { GPR_TIMER_MARK("signal worker", 0); - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&inspect_worker->cv); } } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. beaten to choose next poller"); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. beaten to choose next poller"); } } // even if we didn't win the cas, there's a worker, we can stop @@ -840,65 +837,62 @@ static bool check_neighborhood_for_available_poller( } while (!found_worker && inspect_worker != inspect->root_worker); } if (!found_worker) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect); } inspect->seen_inactive = true; if (inspect == neighborhood->active_root) { neighborhood->active_root = - inspect->next == inspect ? NULL : inspect->next; + inspect->next == inspect ? nullptr : inspect->next; } inspect->next->prev = inspect->prev; inspect->prev->next = inspect->next; - inspect->next = inspect->prev = NULL; + inspect->next = inspect->prev = nullptr; } gpr_mu_unlock(&inspect->mu); } while (!found_worker); - GPR_TIMER_END("check_neighborhood_for_available_poller", 0); return found_worker; } -static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl) { - GPR_TIMER_BEGIN("end_worker", 0); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker); +static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, + grpc_pollset_worker** worker_hdl) { + GPR_TIMER_SCOPE("end_worker", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker); } - if (worker_hdl != NULL) *worker_hdl = NULL; + if (worker_hdl != nullptr) *worker_hdl = nullptr; /* Make sure we appear kicked */ SET_KICK_STATE(worker, KICKED); grpc_closure_list_move(&worker->schedule_on_end_work, - &exec_ctx->closure_list); + grpc_core::ExecCtx::Get()->closure_list()); if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (worker->next != worker && worker->next->state == UNKICKED) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker); } GPR_ASSERT(worker->next->initialized_cv); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); SET_KICK_STATE(worker->next, DESIGNATED_POLLER); - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&worker->next->cv); - if (grpc_exec_ctx_has_work(exec_ctx)) { + if (grpc_core::ExecCtx::Get()->HasWork()) { gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->mu); } } else { gpr_atm_no_barrier_store(&g_active_poller, 0); size_t poller_neighborhood_idx = - (size_t)(pollset->neighborhood - g_neighborhoods); + static_cast(pollset->neighborhood - g_neighborhoods); gpr_mu_unlock(&pollset->mu); bool found_worker = false; bool scan_state[MAX_NEIGHBORHOODS]; for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) { - pollset_neighborhood *neighborhood = + pollset_neighborhood* neighborhood = &g_neighborhoods[(poller_neighborhood_idx + i) % g_num_neighborhoods]; if (gpr_mu_trylock(&neighborhood->mu)) { - found_worker = - check_neighborhood_for_available_poller(exec_ctx, neighborhood); + found_worker = check_neighborhood_for_available_poller(neighborhood); gpr_mu_unlock(&neighborhood->mu); scan_state[i] = true; } else { @@ -907,53 +901,50 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) { if (scan_state[i]) continue; - pollset_neighborhood *neighborhood = + pollset_neighborhood* neighborhood = &g_neighborhoods[(poller_neighborhood_idx + i) % g_num_neighborhoods]; gpr_mu_lock(&neighborhood->mu); - found_worker = - check_neighborhood_for_available_poller(exec_ctx, neighborhood); + found_worker = check_neighborhood_for_available_poller(neighborhood); gpr_mu_unlock(&neighborhood->mu); } - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->mu); } - } else if (grpc_exec_ctx_has_work(exec_ctx)) { + } else if (grpc_core::ExecCtx::Get()->HasWork()) { gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->mu); } if (worker->initialized_cv) { gpr_cv_destroy(&worker->cv); } - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. remove worker"); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. remove worker"); } if (EMPTIED == worker_remove(pollset, worker)) { - pollset_maybe_finish_shutdown(exec_ctx, pollset); + pollset_maybe_finish_shutdown(pollset); } GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); - GPR_TIMER_END("end_worker", 0); } /* pollset->po.mu lock must be held by the caller before calling this. The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { +static grpc_error* pollset_work(grpc_pollset* ps, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("pollset_work", 0); grpc_pollset_worker worker; - grpc_error *error = GRPC_ERROR_NONE; - static const char *err_desc = "pollset_work"; - GPR_TIMER_BEGIN("pollset_work", 0); + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "pollset_work"; if (ps->kicked_without_poller) { ps->kicked_without_poller = false; - GPR_TIMER_END("pollset_work", 0); return GRPC_ERROR_NONE; } - if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) { + if (begin_worker(ps, &worker, worker_hdl, deadline)) { gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!ps->shutting_down); @@ -971,15 +962,14 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, process_epoll_events() returns very quickly: It just queues the work on exec_ctx but does not execute it (the actual exectution or more - accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting - a designated poller). So we are not waiting long periods without a - designated poller */ + accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker() + AFTER selecting a designated poller). So we are not waiting long periods + without a designated poller */ if (gpr_atm_acq_load(&g_epoll_set.cursor) == gpr_atm_acq_load(&g_epoll_set.num_events)) { - append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline), - err_desc); + append_error(&error, do_epoll_wait(ps, deadline), err_desc); } - append_error(&error, process_epoll_events(exec_ctx, ps), err_desc); + append_error(&error, process_epoll_events(ps), err_desc); gpr_mu_lock(&ps->mu); /* lock */ @@ -987,87 +977,86 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, } else { gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps); } - end_worker(exec_ctx, ps, &worker, worker_hdl); + end_worker(ps, &worker, worker_hdl); gpr_tls_set(&g_current_thread_pollset, 0); - GPR_TIMER_END("pollset_work", 0); return error; } -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { - GPR_TIMER_BEGIN("pollset_kick", 0); - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); - grpc_error *ret_err = GRPC_ERROR_NONE; - if (GRPC_TRACER_ON(grpc_polling_trace)) { +static grpc_error* pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) { + GPR_TIMER_SCOPE("pollset_kick", 0); + GRPC_STATS_INC_POLLSET_KICK(); + grpc_error* ret_err = GRPC_ERROR_NONE; + if (grpc_polling_trace.enabled()) { gpr_strvec log; gpr_strvec_init(&log); - char *tmp; - gpr_asprintf( - &tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset, - specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset), - (void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker); + char* tmp; + gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset, + specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset), + (void*)gpr_tls_get(&g_current_thread_worker), + pollset->root_worker); gpr_strvec_add(&log, tmp); - if (pollset->root_worker != NULL) { + if (pollset->root_worker != nullptr) { gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}", kick_state_string(pollset->root_worker->state), pollset->root_worker->next, kick_state_string(pollset->root_worker->next->state)); gpr_strvec_add(&log, tmp); } - if (specific_worker != NULL) { + if (specific_worker != nullptr) { gpr_asprintf(&tmp, " worker_kick_state=%s", kick_state_string(specific_worker->state)); gpr_strvec_add(&log, tmp); } - tmp = gpr_strvec_flatten(&log, NULL); + tmp = gpr_strvec_flatten(&log, nullptr); gpr_strvec_destroy(&log); - gpr_log(GPR_ERROR, "%s", tmp); + gpr_log(GPR_DEBUG, "%s", tmp); gpr_free(tmp); } - if (specific_worker == NULL) { + if (specific_worker == nullptr) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { - grpc_pollset_worker *root_worker = pollset->root_worker; - if (root_worker == NULL) { - GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx); + grpc_pollset_worker* root_worker = pollset->root_worker; + if (root_worker == nullptr) { + GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(); pollset->kicked_without_poller = true; - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kicked_without_poller"); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kicked_without_poller"); } goto done; } - grpc_pollset_worker *next_worker = root_worker->next; + grpc_pollset_worker* next_worker = root_worker->next; if (root_worker->state == KICKED) { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. already kicked %p", root_worker); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. already kicked %p", root_worker); } SET_KICK_STATE(root_worker, KICKED); goto done; } else if (next_worker->state == KICKED) { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. already kicked %p", next_worker); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. already kicked %p", next_worker); } SET_KICK_STATE(next_worker, KICKED); goto done; } else if (root_worker == next_worker && // only try and wake up a poller if // there is no next worker - root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( + root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load( &g_active_poller)) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kicked %p", root_worker); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kicked %p", root_worker); } SET_KICK_STATE(root_worker, KICKED); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); goto done; } else if (next_worker->state == UNKICKED) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kicked %p", next_worker); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kicked %p", next_worker); } GPR_ASSERT(next_worker->initialized_cv); SET_KICK_STATE(next_worker, KICKED); @@ -1075,22 +1064,22 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, goto done; } else if (next_worker->state == DESIGNATED_POLLER) { if (root_worker->state != DESIGNATED_POLLER) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { + if (grpc_polling_trace.enabled()) { gpr_log( - GPR_ERROR, + GPR_INFO, " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)", root_worker, root_worker->initialized_cv, next_worker); } SET_KICK_STATE(root_worker, KICKED); if (root_worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); gpr_cv_signal(&root_worker->cv); } goto done; } else { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker, + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker, root_worker); } SET_KICK_STATE(next_worker, KICKED); @@ -1098,15 +1087,15 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, goto done; } } else { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GPR_ASSERT(next_worker->state == KICKED); SET_KICK_STATE(next_worker, KICKED); goto done; } } else { - GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kicked while waking up"); + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kicked while waking up"); } goto done; } @@ -1115,81 +1104,72 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } if (specific_worker->state == KICKED) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. specific worker already kicked"); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. specific worker already kicked"); } goto done; } else if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { - GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker); + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker); } SET_KICK_STATE(specific_worker, KICKED); goto done; } else if (specific_worker == - (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kick active poller"); + (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) { + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kick active poller"); } SET_KICK_STATE(specific_worker, KICKED); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); goto done; } else if (specific_worker->initialized_cv) { - GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kick waiting worker"); + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kick waiting worker"); } SET_KICK_STATE(specific_worker, KICKED); gpr_cv_signal(&specific_worker->cv); goto done; } else { - GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, " .. kick non-waiting worker"); + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, " .. kick non-waiting worker"); } SET_KICK_STATE(specific_worker, KICKED); goto done; } done: - GPR_TIMER_END("pollset_kick", 0); return ret_err; } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) {} +static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {} /******************************************************************************* * Pollset-set Definitions */ -static grpc_pollset_set *pollset_set_create(void) { - return (grpc_pollset_set *)((intptr_t)0xdeafbeef); +static grpc_pollset_set* pollset_set_create(void) { + return (grpc_pollset_set*)(static_cast(0xdeafbeef)); } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) {} +static void pollset_set_destroy(grpc_pollset_set* pss) {} -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} +static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {} -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} +static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {} -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} +static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {} -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} +static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {} -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) {} +static void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) {} +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} /******************************************************************************* * Event engine binding @@ -1235,13 +1215,14 @@ static const grpc_event_engine_vtable vtable = { /* It is possible that GLIBC has epoll but the underlying kernel doesn't. * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll * support is available */ -const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { +const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) { if (!grpc_has_wakeup_fd()) { - return NULL; + gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd."); + return nullptr; } if (!epoll_set_init()) { - return NULL; + return nullptr; } fd_global_init(); @@ -1249,7 +1230,7 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { fd_global_shutdown(); epoll_set_shutdown(); - return NULL; + return nullptr; } return &vtable; @@ -1257,11 +1238,11 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { #else /* defined(GRPC_LINUX_EPOLL) */ #if defined(GRPC_POSIX_SOCKET) -#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/ev_epoll1_linux.h" /* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return * NULL */ -const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { - return NULL; +const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) { + return nullptr; } #endif /* defined(GRPC_POSIX_SOCKET) */ #endif /* !defined(GRPC_LINUX_EPOLL) */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.h b/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.h index 0696e0df4..ca0db7250 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.h +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epoll1_linux.h @@ -19,11 +19,13 @@ #ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H #define GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H +#include + #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/port.h" // a polling engine that utilizes a singleton epoll set and turnstile polling -const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request); +const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request); #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.c b/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.c deleted file mode 100644 index 8eb4de44d..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.c +++ /dev/null @@ -1,1461 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -/* This polling engine is only relevant on linux kernels supporting epoll() */ -#ifdef GRPC_LINUX_EPOLL - -#include "src/core/lib/iomgr/ev_epollex_linux.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/is_epollexclusive_available.h" -#include "src/core/lib/iomgr/lockfree_event.h" -#include "src/core/lib/iomgr/sys_epoll_wrapper.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/spinlock.h" - -/******************************************************************************* - * Polling object - */ - -typedef enum { - PO_POLLING_GROUP, - PO_POLLSET_SET, - PO_POLLSET, - PO_FD, /* ordering is important: we always want to lock pollsets before fds: - this guarantees that using an fd as a pollable is safe */ - PO_EMPTY_POLLABLE, - PO_COUNT -} polling_obj_type; - -typedef struct polling_obj polling_obj; -typedef struct polling_group polling_group; - -struct polling_obj { - gpr_mu mu; - polling_obj_type type; - polling_group *group; - struct polling_obj *next; - struct polling_obj *prev; -}; - -struct polling_group { - polling_obj po; - gpr_refcount refs; -}; - -static void po_init(polling_obj *po, polling_obj_type type); -static void po_destroy(polling_obj *po); -static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b); -static int po_cmp(polling_obj *a, polling_obj *b); - -static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po, - size_t initial_po_count); -static polling_group *pg_ref(polling_group *pg); -static void pg_unref(polling_group *pg); -static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a, - polling_group *b); -static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, - polling_obj *po); - -/******************************************************************************* - * pollable Declarations - */ - -typedef struct pollable { - polling_obj po; - int epfd; - grpc_wakeup_fd wakeup; - grpc_pollset_worker *root_worker; -} pollable; - -static const char *polling_obj_type_string(polling_obj_type t) { - switch (t) { - case PO_POLLING_GROUP: - return "polling_group"; - case PO_POLLSET_SET: - return "pollset_set"; - case PO_POLLSET: - return "pollset"; - case PO_FD: - return "fd"; - case PO_EMPTY_POLLABLE: - return "empty_pollable"; - case PO_COUNT: - return ""; - } - return ""; -} - -static char *pollable_desc(pollable *p) { - char *out; - gpr_asprintf(&out, "type=%s group=%p epfd=%d wakeup=%d", - polling_obj_type_string(p->po.type), p->po.group, p->epfd, - p->wakeup.read_fd); - return out; -} - -static pollable g_empty_pollable; - -static void pollable_init(pollable *p, polling_obj_type type); -static void pollable_destroy(pollable *p); -/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */ -static grpc_error *pollable_materialize(pollable *p); - -/******************************************************************************* - * Fd Declarations - */ - -struct grpc_fd { - pollable pollable_obj; - int fd; - /* refst format: - bit 0 : 1=Active / 0=Orphaned - bits 1-n : refcount - Ref/Unref by two to avoid altering the orphaned bit */ - gpr_atm refst; - - /* The fd is either closed or we relinquished control of it. In either - cases, this indicates that the 'fd' on this structure is no longer - valid */ - gpr_mu orphaned_mu; - bool orphaned; - - gpr_atm read_closure; - gpr_atm write_closure; - - struct grpc_fd *freelist_next; - grpc_closure *on_done_closure; - - /* The pollset that last noticed that the fd is readable. The actual type - * stored in this is (grpc_pollset *) */ - gpr_atm read_notifier_pollset; - - grpc_iomgr_object iomgr_object; -}; - -static void fd_global_init(void); -static void fd_global_shutdown(void); - -/******************************************************************************* - * Pollset Declarations - */ - -typedef struct pollset_worker_link { - grpc_pollset_worker *next; - grpc_pollset_worker *prev; -} pollset_worker_link; - -typedef enum { - PWL_POLLSET, - PWL_POLLABLE, - POLLSET_WORKER_LINK_COUNT -} pollset_worker_links; - -struct grpc_pollset_worker { - bool kicked; - bool initialized_cv; - pollset_worker_link links[POLLSET_WORKER_LINK_COUNT]; - gpr_cv cv; - grpc_pollset *pollset; - pollable *pollable_obj; -}; - -#define MAX_EPOLL_EVENTS 100 -#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5 - -struct grpc_pollset { - pollable pollable_obj; - pollable *current_pollable_obj; - int kick_alls_pending; - bool kicked_without_poller; - grpc_closure *shutdown_closure; - grpc_pollset_worker *root_worker; - - int event_cursor; - int event_count; - struct epoll_event events[MAX_EPOLL_EVENTS]; -}; - -/******************************************************************************* - * Pollset-set Declarations - */ -struct grpc_pollset_set { - polling_obj po; -}; - -/******************************************************************************* - * Common helpers - */ - -static bool append_error(grpc_error **composite, grpc_error *error, - const char *desc) { - if (error == GRPC_ERROR_NONE) return true; - if (*composite == GRPC_ERROR_NONE) { - *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); - } - *composite = grpc_error_add_child(*composite, error); - return false; -} - -/******************************************************************************* - * Fd Definitions - */ - -/* We need to keep a freelist not because of any concerns of malloc performance - * but instead so that implementations with multiple threads in (for example) - * epoll_wait deal with the race between pollset removal and incoming poll - * notifications. - * - * The problem is that the poller ultimately holds a reference to this - * object, so it is very difficult to know when is safe to free it, at least - * without some expensive synchronization. - * - * If we keep the object freelisted, in the worst case losing this race just - * becomes a spurious read notification on a reused fd. - */ - -/* The alarm system needs to be able to wakeup 'some poller' sometimes - * (specifically when a new alarm needs to be triggered earlier than the next - * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a - * case occurs. */ - -static grpc_fd *fd_freelist = NULL; -static gpr_mu fd_freelist_mu; - -#ifndef NDEBUG -#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) -#define UNREF_BY(ec, fd, n, reason) \ - unref_by(ec, fd, n, reason, __FILE__, __LINE__) -static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, - "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", - fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); - } -#else -#define REF_BY(fd, n, reason) ref_by(fd, n) -#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n) -static void ref_by(grpc_fd *fd, int n) { -#endif - GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); -} - -static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_fd *fd = (grpc_fd *)arg; - /* Add the fd to the freelist */ - grpc_iomgr_unregister_object(&fd->iomgr_object); - pollable_destroy(&fd->pollable_obj); - gpr_mu_destroy(&fd->orphaned_mu); - gpr_mu_lock(&fd_freelist_mu); - fd->freelist_next = fd_freelist; - fd_freelist = fd; - - grpc_lfev_destroy(&fd->read_closure); - grpc_lfev_destroy(&fd->write_closure); - - gpr_mu_unlock(&fd_freelist_mu); -} - -#ifndef NDEBUG -static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, - "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", - fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), - gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); - } -#else -static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) { -#endif - gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); - if (old == n) { - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); - } else { - GPR_ASSERT(old > n); - } -} - -static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } - -static void fd_global_shutdown(void) { - gpr_mu_lock(&fd_freelist_mu); - gpr_mu_unlock(&fd_freelist_mu); - while (fd_freelist != NULL) { - grpc_fd *fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - gpr_free(fd); - } - gpr_mu_destroy(&fd_freelist_mu); -} - -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *new_fd = NULL; - - gpr_mu_lock(&fd_freelist_mu); - if (fd_freelist != NULL) { - new_fd = fd_freelist; - fd_freelist = fd_freelist->freelist_next; - } - gpr_mu_unlock(&fd_freelist_mu); - - if (new_fd == NULL) { - new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd)); - } - - pollable_init(&new_fd->pollable_obj, PO_FD); - - gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); - new_fd->fd = fd; - gpr_mu_init(&new_fd->orphaned_mu); - new_fd->orphaned = false; - grpc_lfev_init(&new_fd->read_closure); - grpc_lfev_init(&new_fd->write_closure); - gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); - - new_fd->freelist_next = NULL; - new_fd->on_done_closure = NULL; - - char *fd_name; - gpr_asprintf(&fd_name, "%s fd=%d", name, fd); - grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { - gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name); - } -#endif - gpr_free(fd_name); - return new_fd; -} - -static int fd_wrapped_fd(grpc_fd *fd) { - int ret_fd = -1; - gpr_mu_lock(&fd->orphaned_mu); - if (!fd->orphaned) { - ret_fd = fd->fd; - } - gpr_mu_unlock(&fd->orphaned_mu); - - return ret_fd; -} - -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { - bool is_fd_closed = already_closed; - grpc_error *error = GRPC_ERROR_NONE; - - gpr_mu_lock(&fd->pollable_obj.po.mu); - gpr_mu_lock(&fd->orphaned_mu); - fd->on_done_closure = on_done; - - /* If release_fd is not NULL, we should be relinquishing control of the file - descriptor fd->fd (but we still own the grpc_fd structure). */ - if (release_fd != NULL) { - *release_fd = fd->fd; - } else if (!is_fd_closed) { - close(fd->fd); - is_fd_closed = true; - } - - fd->orphaned = true; - - if (!is_fd_closed) { - gpr_log(GPR_DEBUG, "TODO: handle fd removal?"); - } - - /* Remove the active status but keep referenced. We want this grpc_fd struct - to be alive (and not added to freelist) until the end of this function */ - REF_BY(fd, 1, reason); - - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); - - gpr_mu_unlock(&fd->orphaned_mu); - gpr_mu_unlock(&fd->pollable_obj.po.mu); - UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */ - GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error)); - GRPC_ERROR_UNREF(error); -} - -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { - gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset *)notifier; -} - -static bool fd_is_shutdown(grpc_fd *fd) { - return grpc_lfev_is_shutdown(&fd->read_closure); -} - -/* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { - shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); - } - GRPC_ERROR_UNREF(why); -} - -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); -} - -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); -} - -/******************************************************************************* - * Pollable Definitions - */ - -static void pollable_init(pollable *p, polling_obj_type type) { - po_init(&p->po, type); - p->root_worker = NULL; - p->epfd = -1; -} - -static void pollable_destroy(pollable *p) { - po_destroy(&p->po); - if (p->epfd != -1) { - close(p->epfd); - grpc_wakeup_fd_destroy(&p->wakeup); - } -} - -/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */ -static grpc_error *pollable_materialize(pollable *p) { - if (p->epfd == -1) { - int new_epfd = epoll_create1(EPOLL_CLOEXEC); - if (new_epfd < 0) { - return GRPC_OS_ERROR(errno, "epoll_create1"); - } - grpc_error *err = grpc_wakeup_fd_init(&p->wakeup); - if (err != GRPC_ERROR_NONE) { - close(new_epfd); - return err; - } - struct epoll_event ev; - ev.events = (uint32_t)(EPOLLIN | EPOLLET); - ev.data.ptr = (void *)(1 | (intptr_t)&p->wakeup); - if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) { - err = GRPC_OS_ERROR(errno, "epoll_ctl"); - close(new_epfd); - grpc_wakeup_fd_destroy(&p->wakeup); - return err; - } - - p->epfd = new_epfd; - } - return GRPC_ERROR_NONE; -} - -/* pollable must be materialized */ -static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) { - grpc_error *error = GRPC_ERROR_NONE; - static const char *err_desc = "pollable_add_fd"; - const int epfd = p->epfd; - GPR_ASSERT(epfd != -1); - - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p); - } - - gpr_mu_lock(&fd->orphaned_mu); - if (fd->orphaned) { - gpr_mu_unlock(&fd->orphaned_mu); - return GRPC_ERROR_NONE; - } - struct epoll_event ev_fd; - ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE); - ev_fd.data.ptr = fd; - if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) { - switch (errno) { - case EEXIST: - break; - default: - append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc); - } - } - gpr_mu_unlock(&fd->orphaned_mu); - - return error; -} - -/******************************************************************************* - * Pollset Definitions - */ - -GPR_TLS_DECL(g_current_thread_pollset); -GPR_TLS_DECL(g_current_thread_worker); - -/* Global state management */ -static grpc_error *pollset_global_init(void) { - gpr_tls_init(&g_current_thread_pollset); - gpr_tls_init(&g_current_thread_worker); - pollable_init(&g_empty_pollable, PO_EMPTY_POLLABLE); - return GRPC_ERROR_NONE; -} - -static void pollset_global_shutdown(void) { - pollable_destroy(&g_empty_pollable); - gpr_tls_destroy(&g_current_thread_pollset); - gpr_tls_destroy(&g_current_thread_worker); -} - -static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL && - pollset->kick_alls_pending == 0) { - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE); - pollset->shutdown_closure = NULL; - } -} - -static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error_unused) { - grpc_error *error = GRPC_ERROR_NONE; - grpc_pollset *pollset = (grpc_pollset *)arg; - gpr_mu_lock(&pollset->pollable_obj.po.mu); - if (pollset->root_worker != NULL) { - grpc_pollset_worker *worker = pollset->root_worker; - do { - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); - if (worker->pollable_obj != &pollset->pollable_obj) { - gpr_mu_lock(&worker->pollable_obj->po.mu); - } - if (worker->initialized_cv && worker != pollset->root_worker) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kickall_via_cv %p (pollable %p vs %p)", - pollset, worker, &pollset->pollable_obj, - worker->pollable_obj); - } - worker->kicked = true; - gpr_cv_signal(&worker->cv); - } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kickall_via_wakeup %p (pollable %p vs %p)", - pollset, worker, &pollset->pollable_obj, - worker->pollable_obj); - } - append_error(&error, - grpc_wakeup_fd_wakeup(&worker->pollable_obj->wakeup), - "pollset_shutdown"); - } - if (worker->pollable_obj != &pollset->pollable_obj) { - gpr_mu_unlock(&worker->pollable_obj->po.mu); - } - - worker = worker->links[PWL_POLLSET].next; - } while (worker != pollset->root_worker); - } - pollset->kick_alls_pending--; - pollset_maybe_finish_shutdown(exec_ctx, pollset); - gpr_mu_unlock(&pollset->pollable_obj.po.mu); - GRPC_LOG_IF_ERROR("kick_all", error); -} - -static void pollset_kick_all(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - pollset->kick_alls_pending++; - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(do_kick_all, pollset, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); -} - -static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p, - grpc_pollset_worker *specific_worker) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, - "PS:%p kick %p tls_pollset=%p tls_worker=%p " - "root_worker=(pollset:%p pollable:%p)", - p, specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset), - (void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker, - p->root_worker); - } - if (specific_worker == NULL) { - if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { - if (pollset->root_worker == NULL) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", p); - } - pollset->kicked_without_poller = true; - return GRPC_ERROR_NONE; - } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_any_via_wakeup_fd", p); - } - grpc_error *err = pollable_materialize(p); - if (err != GRPC_ERROR_NONE) return err; - return grpc_wakeup_fd_wakeup(&p->wakeup); - } - } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", p); - } - return GRPC_ERROR_NONE; - } - } else if (specific_worker->kicked) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p); - } - return GRPC_ERROR_NONE; - } else if (gpr_tls_get(&g_current_thread_worker) == - (intptr_t)specific_worker) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p); - } - specific_worker->kicked = true; - return GRPC_ERROR_NONE; - } else if (specific_worker == p->root_worker) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p); - } - grpc_error *err = pollable_materialize(p); - if (err != GRPC_ERROR_NONE) return err; - specific_worker->kicked = true; - return grpc_wakeup_fd_wakeup(&p->wakeup); - } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p); - } - specific_worker->kicked = true; - gpr_cv_signal(&specific_worker->cv); - return GRPC_ERROR_NONE; - } -} - -/* p->po.mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { - pollable *p = pollset->current_pollable_obj; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); - if (p != &pollset->pollable_obj) { - gpr_mu_lock(&p->po.mu); - } - grpc_error *error = pollset_kick_inner(pollset, p, specific_worker); - if (p != &pollset->pollable_obj) { - gpr_mu_unlock(&p->po.mu); - } - return error; -} - -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - pollable_init(&pollset->pollable_obj, PO_POLLSET); - pollset->current_pollable_obj = &g_empty_pollable; - pollset->kicked_without_poller = false; - pollset->shutdown_closure = NULL; - pollset->root_worker = NULL; - *mu = &pollset->pollable_obj.po.mu; -} - -/* Convert a timespec to milliseconds: - - Very small or negative poll times are clamped to zero to do a non-blocking - poll (which becomes spin polling) - - Other small values are rounded up to one millisecond - - Longer than a millisecond polls are rounded up to the next nearest - millisecond to avoid spinning - - Infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - - if (gpr_time_cmp(deadline, now) <= 0) { - return 0; - } - - static const gpr_timespec round_up = { - 0, /* tv_sec */ - GPR_NS_PER_MS - 1, /* tv_nsec */ - GPR_TIMESPAN /* clock_type */ - }; - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up)); - return millis >= 1 ? millis : 1; -} - -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); - - /* Note, it is possible that fd_become_readable might be called twice with - different 'notifier's when an fd becomes readable and it is in two epoll - sets (This can happen briefly during polling island merges). In such cases - it does not really matter which notifer is set as the read_notifier_pollset - (They would both point to the same polling island anyway) */ - /* Use release store to match with acquire load in fd_get_read_notifier */ - gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); -} - -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); -} - -static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { - grpc_error *error = GRPC_ERROR_NONE; - static const char *err_desc = "fd_become_pollable"; - if (append_error(&error, pollable_materialize(&fd->pollable_obj), err_desc)) { - append_error(&error, pollable_add_fd(&fd->pollable_obj, fd), err_desc); - } - return error; -} - -/* pollset->po.mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_ASSERT(pollset->shutdown_closure == NULL); - pollset->shutdown_closure = closure; - pollset_kick_all(exec_ctx, pollset); - pollset_maybe_finish_shutdown(exec_ctx, pollset); -} - -static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) { - return p != &g_empty_pollable && p != &pollset->pollable_obj; -} - -static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, bool drain) { - static const char *err_desc = "pollset_process_events"; - grpc_error *error = GRPC_ERROR_NONE; - for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) && - pollset->event_cursor != pollset->event_count; - i++) { - int n = pollset->event_cursor++; - struct epoll_event *ev = &pollset->events[n]; - void *data_ptr = ev->data.ptr; - if (1 & (intptr_t)data_ptr) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr); - } - append_error(&error, - grpc_wakeup_fd_consume_wakeup( - (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)), - err_desc); - } else { - grpc_fd *fd = (grpc_fd *)data_ptr; - bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0; - bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0; - bool write_ev = (ev->events & EPOLLOUT) != 0; - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, - "PS:%p got fd %p: cancel=%d read=%d " - "write=%d", - pollset, fd, cancel, read_ev, write_ev); - } - if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); - } - if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); - } - } - } - - return error; -} - -/* pollset_shutdown is guaranteed to be called before pollset_destroy. */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - pollable_destroy(&pollset->pollable_obj); - if (pollset_is_pollable_fd(pollset, pollset->current_pollable_obj)) { - UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable_obj, 2, - "pollset_pollable"); - } - GRPC_LOG_IF_ERROR("pollset_process_events", - pollset_process_events(exec_ctx, pollset, true)); -} - -static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - pollable *p, gpr_timespec now, - gpr_timespec deadline) { - int timeout = poll_deadline_to_millis_timeout(deadline, now); - - if (GRPC_TRACER_ON(grpc_polling_trace)) { - char *desc = pollable_desc(p); - gpr_log(GPR_DEBUG, "PS:%p poll %p[%s] for %dms", pollset, p, desc, timeout); - gpr_free(desc); - } - - if (timeout != 0) { - GRPC_SCHEDULING_START_BLOCKING_REGION; - } - int r; - do { - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); - r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout); - } while (r < 0 && errno == EINTR); - if (timeout != 0) { - GRPC_SCHEDULING_END_BLOCKING_REGION; - } - - if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); - - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p poll %p got %d events", pollset, p, r); - } - - pollset->event_cursor = 0; - pollset->event_count = r; - - return GRPC_ERROR_NONE; -} - -/* Return true if first in list */ -static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link, - grpc_pollset_worker *worker) { - if (*root == NULL) { - *root = worker; - worker->links[link].next = worker->links[link].prev = worker; - return true; - } else { - worker->links[link].next = *root; - worker->links[link].prev = worker->links[link].next->links[link].prev; - worker->links[link].next->links[link].prev = worker; - worker->links[link].prev->links[link].next = worker; - return false; - } -} - -/* Return true if last in list */ -typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result; - -static worker_remove_result worker_remove(grpc_pollset_worker **root, - pollset_worker_links link, - grpc_pollset_worker *worker) { - if (worker == *root) { - if (worker == worker->links[link].next) { - *root = NULL; - return EMPTIED; - } else { - *root = worker->links[link].next; - worker->links[link].prev->links[link].next = worker->links[link].next; - worker->links[link].next->links[link].prev = worker->links[link].prev; - return NEW_ROOT; - } - } else { - worker->links[link].prev->links[link].next = worker->links[link].next; - worker->links[link].next->links[link].prev = worker->links[link].prev; - return REMOVED; - } -} - -/* Return true if this thread should poll */ -static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl, gpr_timespec *now, - gpr_timespec deadline) { - bool do_poll = true; - if (worker_hdl != NULL) *worker_hdl = worker; - worker->initialized_cv = false; - worker->kicked = false; - worker->pollset = pollset; - worker->pollable_obj = pollset->current_pollable_obj; - - if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) { - REF_BY((grpc_fd *)worker->pollable_obj, 2, "one_poll"); - } - - worker_insert(&pollset->root_worker, PWL_POLLSET, worker); - if (!worker_insert(&worker->pollable_obj->root_worker, PWL_POLLABLE, - worker)) { - worker->initialized_cv = true; - gpr_cv_init(&worker->cv); - if (worker->pollable_obj != &pollset->pollable_obj) { - gpr_mu_unlock(&pollset->pollable_obj.po.mu); - } - if (GRPC_TRACER_ON(grpc_polling_trace) && - worker->pollable_obj->root_worker != worker) { - gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, - worker->pollable_obj, worker, - poll_deadline_to_millis_timeout(deadline, *now)); - } - while (do_poll && worker->pollable_obj->root_worker != worker) { - if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->po.mu, deadline)) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset, - worker->pollable_obj, worker); - } - do_poll = false; - } else if (worker->kicked) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, - worker->pollable_obj, worker); - } - do_poll = false; - } else if (GRPC_TRACER_ON(grpc_polling_trace) && - worker->pollable_obj->root_worker != worker) { - gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset, - worker->pollable_obj, worker); - } - } - if (worker->pollable_obj != &pollset->pollable_obj) { - gpr_mu_unlock(&worker->pollable_obj->po.mu); - gpr_mu_lock(&pollset->pollable_obj.po.mu); - gpr_mu_lock(&worker->pollable_obj->po.mu); - } - *now = gpr_now(now->clock_type); - } - - return do_poll && pollset->shutdown_closure == NULL && - pollset->current_pollable_obj == worker->pollable_obj; -} - -static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *worker, - grpc_pollset_worker **worker_hdl) { - if (NEW_ROOT == - worker_remove(&worker->pollable_obj->root_worker, PWL_POLLABLE, worker)) { - gpr_cv_signal(&worker->pollable_obj->root_worker->cv); - } - if (worker->initialized_cv) { - gpr_cv_destroy(&worker->cv); - } - if (pollset_is_pollable_fd(pollset, worker->pollable_obj)) { - UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable_obj, 2, "one_poll"); - } - if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) { - pollset_maybe_finish_shutdown(exec_ctx, pollset); - } -} - -/* pollset->po.mu lock must be held by the caller before calling this. - The function pollset_work() may temporarily release the lock (pollset->po.mu) - during the course of its execution but it will always re-acquire the lock and - ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { - grpc_pollset_worker worker; - if (0 && GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRId64 - ".%09d deadline=%" PRId64 ".%09d kwp=%d root_worker=%p", - pollset, worker_hdl, &worker, now.tv_sec, now.tv_nsec, - deadline.tv_sec, deadline.tv_nsec, pollset->kicked_without_poller, - pollset->root_worker); - } - grpc_error *error = GRPC_ERROR_NONE; - static const char *err_desc = "pollset_work"; - if (pollset->kicked_without_poller) { - pollset->kicked_without_poller = false; - return GRPC_ERROR_NONE; - } - if (pollset->current_pollable_obj != &pollset->pollable_obj) { - gpr_mu_lock(&pollset->current_pollable_obj->po.mu); - } - if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) { - gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); - gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); - GPR_ASSERT(!pollset->shutdown_closure); - append_error(&error, pollable_materialize(worker.pollable_obj), err_desc); - if (worker.pollable_obj != &pollset->pollable_obj) { - gpr_mu_unlock(&worker.pollable_obj->po.mu); - } - gpr_mu_unlock(&pollset->pollable_obj.po.mu); - if (pollset->event_cursor == pollset->event_count) { - append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable_obj, - now, deadline), - err_desc); - } - append_error(&error, pollset_process_events(exec_ctx, pollset, false), - err_desc); - gpr_mu_lock(&pollset->pollable_obj.po.mu); - if (worker.pollable_obj != &pollset->pollable_obj) { - gpr_mu_lock(&worker.pollable_obj->po.mu); - } - gpr_tls_set(&g_current_thread_pollset, 0); - gpr_tls_set(&g_current_thread_worker, 0); - pollset_maybe_finish_shutdown(exec_ctx, pollset); - } - end_worker(exec_ctx, pollset, &worker, worker_hdl); - if (worker.pollable_obj != &pollset->pollable_obj) { - gpr_mu_unlock(&worker.pollable_obj->po.mu); - } - if (grpc_exec_ctx_has_work(exec_ctx)) { - gpr_mu_unlock(&pollset->pollable_obj.po.mu); - grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->pollable_obj.po.mu); - } - return error; -} - -static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_fd *fd = (grpc_fd *)arg; - UNREF_BY(exec_ctx, fd, 2, "pollset_pollable"); -} - -/* expects pollsets locked, flag whether fd is locked or not */ -static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, grpc_fd *fd, - bool fd_locked) { - static const char *err_desc = "pollset_add_fd"; - grpc_error *error = GRPC_ERROR_NONE; - if (pollset->current_pollable_obj == &g_empty_pollable) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, - "PS:%p add fd %p; transition pollable from empty to fd", pollset, - fd); - } - /* empty pollable --> single fd pollable */ - pollset_kick_all(exec_ctx, pollset); - pollset->current_pollable_obj = &fd->pollable_obj; - if (!fd_locked) gpr_mu_lock(&fd->pollable_obj.po.mu); - append_error(&error, fd_become_pollable_locked(fd), err_desc); - if (!fd_locked) gpr_mu_unlock(&fd->pollable_obj.po.mu); - REF_BY(fd, 2, "pollset_pollable"); - } else if (pollset->current_pollable_obj == &pollset->pollable_obj) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd); - } - append_error(&error, pollable_add_fd(pollset->current_pollable_obj, fd), - err_desc); - } else if (pollset->current_pollable_obj != &fd->pollable_obj) { - grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable_obj; - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, - "PS:%p add fd %p; transition pollable from fd %p to multipoller", - pollset, fd, had_fd); - } - /* Introduce a spurious completion. - If we do not, then it may be that the fd-specific epoll set consumed - a completion without being polled, leading to a missed edge going up. */ - grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read"); - grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write"); - pollset_kick_all(exec_ctx, pollset); - pollset->current_pollable_obj = &pollset->pollable_obj; - if (append_error(&error, pollable_materialize(&pollset->pollable_obj), - err_desc)) { - pollable_add_fd(&pollset->pollable_obj, had_fd); - pollable_add_fd(&pollset->pollable_obj, fd); - } - GRPC_CLOSURE_SCHED(exec_ctx, - GRPC_CLOSURE_CREATE(unref_fd_no_longer_poller, had_fd, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); - } - return error; -} - -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - gpr_mu_lock(&pollset->pollable_obj.po.mu); - grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false); - gpr_mu_unlock(&pollset->pollable_obj.po.mu); - GRPC_LOG_IF_ERROR("pollset_add_fd", error); -} - -/******************************************************************************* - * Pollset-set Definitions - */ - -static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss)); - po_init(&pss->po, PO_POLLSET_SET); - return pss; -} - -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { - po_destroy(&pss->po); - gpr_free(pss); -} - -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - po_join(exec_ctx, &pss->po, &fd->pollable_obj.po); -} - -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) {} - -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - po_join(exec_ctx, &pss->po, &ps->pollable_obj.po); -} - -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) {} - -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - po_join(exec_ctx, &bag->po, &item->po); -} - -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) {} - -static void po_init(polling_obj *po, polling_obj_type type) { - gpr_mu_init(&po->mu); - po->type = type; - po->group = NULL; - po->next = po; - po->prev = po; -} - -static polling_group *pg_lock_latest(polling_group *pg) { - /* assumes pg unlocked; consumes ref, returns ref */ - gpr_mu_lock(&pg->po.mu); - while (pg->po.group != NULL) { - polling_group *new_pg = pg_ref(pg->po.group); - gpr_mu_unlock(&pg->po.mu); - pg_unref(pg); - pg = new_pg; - gpr_mu_lock(&pg->po.mu); - } - return pg; -} - -static void po_destroy(polling_obj *po) { - if (po->group != NULL) { - polling_group *pg = pg_lock_latest(po->group); - po->prev->next = po->next; - po->next->prev = po->prev; - gpr_mu_unlock(&pg->po.mu); - pg_unref(pg); - } - gpr_mu_destroy(&po->mu); -} - -static polling_group *pg_ref(polling_group *pg) { - gpr_ref(&pg->refs); - return pg; -} - -static void pg_unref(polling_group *pg) { - if (gpr_unref(&pg->refs)) { - po_destroy(&pg->po); - gpr_free(pg); - } -} - -static int po_cmp(polling_obj *a, polling_obj *b) { - if (a == b) return 0; - if (a->type < b->type) return -1; - if (a->type > b->type) return 1; - if (a < b) return -1; - assert(a > b); - return 1; -} - -static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { - switch (po_cmp(a, b)) { - case 0: - return; - case 1: - GPR_SWAP(polling_obj *, a, b); - /* fall through */ - case -1: - gpr_mu_lock(&a->mu); - gpr_mu_lock(&b->mu); - - if (a->group == NULL) { - if (b->group == NULL) { - polling_obj *initial_po[] = {a, b}; - pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po)); - gpr_mu_unlock(&a->mu); - gpr_mu_unlock(&b->mu); - } else { - polling_group *b_group = pg_ref(b->group); - gpr_mu_unlock(&b->mu); - gpr_mu_unlock(&a->mu); - pg_join(exec_ctx, b_group, a); - } - } else if (b->group == NULL) { - polling_group *a_group = pg_ref(a->group); - gpr_mu_unlock(&a->mu); - gpr_mu_unlock(&b->mu); - pg_join(exec_ctx, a_group, b); - } else if (a->group == b->group) { - /* nothing to do */ - gpr_mu_unlock(&a->mu); - gpr_mu_unlock(&b->mu); - } else { - polling_group *a_group = pg_ref(a->group); - polling_group *b_group = pg_ref(b->group); - gpr_mu_unlock(&a->mu); - gpr_mu_unlock(&b->mu); - pg_merge(exec_ctx, a_group, b_group); - } - } -} - -static void pg_notify(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) { - if (a->type == PO_FD && b->type == PO_POLLSET) { - pollset_add_fd_locked(exec_ctx, (grpc_pollset *)b, (grpc_fd *)a, true); - } else if (a->type == PO_POLLSET && b->type == PO_FD) { - pollset_add_fd_locked(exec_ctx, (grpc_pollset *)a, (grpc_fd *)b, true); - } -} - -static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from, - polling_group *to) { - for (polling_obj *a = from->po.next; a != &from->po; a = a->next) { - for (polling_obj *b = to->po.next; b != &to->po; b = b->next) { - if (po_cmp(a, b) < 0) { - gpr_mu_lock(&a->mu); - gpr_mu_lock(&b->mu); - } else { - GPR_ASSERT(po_cmp(a, b) != 0); - gpr_mu_lock(&b->mu); - gpr_mu_lock(&a->mu); - } - pg_notify(exec_ctx, a, b); - gpr_mu_unlock(&a->mu); - gpr_mu_unlock(&b->mu); - } - } -} - -static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po, - size_t initial_po_count) { - /* assumes all polling objects in initial_po are locked */ - polling_group *pg = (polling_group *)gpr_malloc(sizeof(*pg)); - po_init(&pg->po, PO_POLLING_GROUP); - gpr_ref_init(&pg->refs, (int)initial_po_count); - for (size_t i = 0; i < initial_po_count; i++) { - GPR_ASSERT(initial_po[i]->group == NULL); - initial_po[i]->group = pg; - } - for (size_t i = 1; i < initial_po_count; i++) { - initial_po[i]->prev = initial_po[i - 1]; - } - for (size_t i = 0; i < initial_po_count - 1; i++) { - initial_po[i]->next = initial_po[i + 1]; - } - initial_po[0]->prev = &pg->po; - initial_po[initial_po_count - 1]->next = &pg->po; - pg->po.next = initial_po[0]; - pg->po.prev = initial_po[initial_po_count - 1]; - for (size_t i = 1; i < initial_po_count; i++) { - for (size_t j = 0; j < i; j++) { - pg_notify(exec_ctx, initial_po[i], initial_po[j]); - } - } -} - -static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg, - polling_obj *po) { - /* assumes neither pg nor po are locked; consumes one ref to pg */ - pg = pg_lock_latest(pg); - /* pg locked */ - for (polling_obj *existing = pg->po.next /* skip pg - it's just a stub */; - existing != &pg->po; existing = existing->next) { - if (po_cmp(po, existing) < 0) { - gpr_mu_lock(&po->mu); - gpr_mu_lock(&existing->mu); - } else { - GPR_ASSERT(po_cmp(po, existing) != 0); - gpr_mu_lock(&existing->mu); - gpr_mu_lock(&po->mu); - } - /* pg, po, existing locked */ - if (po->group != NULL) { - gpr_mu_unlock(&pg->po.mu); - polling_group *po_group = pg_ref(po->group); - gpr_mu_unlock(&po->mu); - gpr_mu_unlock(&existing->mu); - pg_merge(exec_ctx, pg, po_group); - /* early exit: polling obj picked up a group during joining: we needed - to do a full merge */ - return; - } - pg_notify(exec_ctx, po, existing); - gpr_mu_unlock(&po->mu); - gpr_mu_unlock(&existing->mu); - } - gpr_mu_lock(&po->mu); - if (po->group != NULL) { - gpr_mu_unlock(&pg->po.mu); - polling_group *po_group = pg_ref(po->group); - gpr_mu_unlock(&po->mu); - pg_merge(exec_ctx, pg, po_group); - /* early exit: polling obj picked up a group during joining: we needed - to do a full merge */ - return; - } - po->group = pg; - po->next = &pg->po; - po->prev = pg->po.prev; - po->prev->next = po->next->prev = po; - gpr_mu_unlock(&pg->po.mu); - gpr_mu_unlock(&po->mu); -} - -static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a, - polling_group *b) { - for (;;) { - if (a == b) { - pg_unref(a); - pg_unref(b); - return; - } - if (a > b) GPR_SWAP(polling_group *, a, b); - gpr_mu_lock(&a->po.mu); - gpr_mu_lock(&b->po.mu); - if (a->po.group != NULL) { - polling_group *m2 = pg_ref(a->po.group); - gpr_mu_unlock(&a->po.mu); - gpr_mu_unlock(&b->po.mu); - pg_unref(a); - a = m2; - } else if (b->po.group != NULL) { - polling_group *m2 = pg_ref(b->po.group); - gpr_mu_unlock(&a->po.mu); - gpr_mu_unlock(&b->po.mu); - pg_unref(b); - b = m2; - } else { - break; - } - } - polling_group **unref = NULL; - size_t unref_count = 0; - size_t unref_cap = 0; - b->po.group = a; - pg_broadcast(exec_ctx, a, b); - pg_broadcast(exec_ctx, b, a); - while (b->po.next != &b->po) { - polling_obj *po = b->po.next; - gpr_mu_lock(&po->mu); - if (unref_count == unref_cap) { - unref_cap = GPR_MAX(8, 3 * unref_cap / 2); - unref = (polling_group **)gpr_realloc(unref, unref_cap * sizeof(*unref)); - } - unref[unref_count++] = po->group; - po->group = pg_ref(a); - // unlink from b - po->prev->next = po->next; - po->next->prev = po->prev; - // link to a - po->next = &a->po; - po->prev = a->po.prev; - po->next->prev = po->prev->next = po; - gpr_mu_unlock(&po->mu); - } - gpr_mu_unlock(&a->po.mu); - gpr_mu_unlock(&b->po.mu); - for (size_t i = 0; i < unref_count; i++) { - pg_unref(unref[i]); - } - gpr_free(unref); - pg_unref(b); -} - -/******************************************************************************* - * Event engine binding - */ - -static void shutdown_engine(void) { - fd_global_shutdown(); - pollset_global_shutdown(); -} - -static const grpc_event_engine_vtable vtable = { - sizeof(grpc_pollset), - - fd_create, - fd_wrapped_fd, - fd_orphan, - fd_shutdown, - fd_notify_on_read, - fd_notify_on_write, - fd_is_shutdown, - fd_get_read_notifier_pollset, - - pollset_init, - pollset_shutdown, - pollset_destroy, - pollset_work, - pollset_kick, - pollset_add_fd, - - pollset_set_create, - pollset_set_destroy, - pollset_set_add_pollset, - pollset_set_del_pollset, - pollset_set_add_pollset_set, - pollset_set_del_pollset_set, - pollset_set_add_fd, - pollset_set_del_fd, - - shutdown_engine, -}; - -const grpc_event_engine_vtable *grpc_init_epollex_linux( - bool explicitly_requested) { - if (!grpc_has_wakeup_fd()) { - return NULL; - } - - if (!grpc_is_epollexclusive_available()) { - return NULL; - } - - fd_global_init(); - - if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { - pollset_global_shutdown(); - fd_global_shutdown(); - return NULL; - } - - return &vtable; -} - -#else /* defined(GRPC_LINUX_EPOLL) */ -#if defined(GRPC_POSIX_SOCKET) -#include "src/core/lib/iomgr/ev_posix.h" -/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return - * NULL */ -const grpc_event_engine_vtable *grpc_init_epollex_linux( - bool explicitly_requested) { - return NULL; -} -#endif /* defined(GRPC_POSIX_SOCKET) */ - -#endif /* !defined(GRPC_LINUX_EPOLL) */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.cc b/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.cc new file mode 100644 index 000000000..65f1c912a --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.cc @@ -0,0 +1,1513 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include + +/* This polling engine is only relevant on linux kernels supporting epoll() */ +#ifdef GRPC_LINUX_EPOLL_CREATE1 + +#include "src/core/lib/iomgr/ev_epollex_linux.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/spinlock.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/block_annotate.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/is_epollexclusive_available.h" +#include "src/core/lib/iomgr/lockfree_event.h" +#include "src/core/lib/iomgr/sys_epoll_wrapper.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/wakeup_fd_posix.h" +#include "src/core/lib/profiling/timers.h" + +// debug aid: create workers on the heap (allows asan to spot +// use-after-destruction) +//#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1 + +#define MAX_EPOLL_EVENTS 100 +// TODO(juanlishen): We use a greater-than-one value here as a workaround fix to +// a keepalive ping timeout issue. We may want to revert https://github +// .com/grpc/grpc/pull/14943 once we figure out the root cause. +#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 16 + +grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false, + "pollable_refcount"); + +/******************************************************************************* + * pollable Declarations + */ + +typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type; + +typedef struct pollable pollable; + +/// A pollable is something that can be polled: it has an epoll set to poll on, +/// and a wakeup fd for kicks +/// There are three broad types: +/// - PO_EMPTY - the empty pollable, used before file descriptors are added to +/// a pollset +/// - PO_FD - a pollable containing only one FD - used to optimize single-fd +/// pollsets (which are common with synchronous api usage) +/// - PO_MULTI - a pollable containing many fds +struct pollable { + pollable_type type; // immutable + gpr_refcount refs; + + int epfd; + grpc_wakeup_fd wakeup; + + // only for type fd... one ref to the owner fd + grpc_fd* owner_fd; + + grpc_pollset_set* pollset_set; + pollable* next; + pollable* prev; + + gpr_mu mu; + grpc_pollset_worker* root_worker; + + int event_cursor; + int event_count; + struct epoll_event events[MAX_EPOLL_EVENTS]; +}; + +static const char* pollable_type_string(pollable_type t) { + switch (t) { + case PO_MULTI: + return "pollset"; + case PO_FD: + return "fd"; + case PO_EMPTY: + return "empty"; + } + return ""; +} + +static char* pollable_desc(pollable* p) { + char* out; + gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type), + p->epfd, p->wakeup.read_fd); + return out; +} + +/// Shared empty pollable - used by pollset to poll on until the first fd is +/// added +static pollable* g_empty_pollable; + +static grpc_error* pollable_create(pollable_type type, pollable** p); +#ifdef NDEBUG +static pollable* pollable_ref(pollable* p); +static void pollable_unref(pollable* p); +#define POLLABLE_REF(p, r) pollable_ref(p) +#define POLLABLE_UNREF(p, r) pollable_unref(p) +#else +static pollable* pollable_ref(pollable* p, int line, const char* reason); +static void pollable_unref(pollable* p, int line, const char* reason); +#define POLLABLE_REF(p, r) pollable_ref((p), __LINE__, (r)) +#define POLLABLE_UNREF(p, r) pollable_unref((p), __LINE__, (r)) +#endif + +/******************************************************************************* + * Fd Declarations + */ + +struct grpc_fd { + int fd; + /* refst format: + bit 0 : 1=Active / 0=Orphaned + bits 1-n : refcount + Ref/Unref by two to avoid altering the orphaned bit */ + gpr_atm refst; + + gpr_mu orphan_mu; + + gpr_mu pollable_mu; + pollable* pollable_obj; + + grpc_core::ManualConstructor read_closure; + grpc_core::ManualConstructor write_closure; + + struct grpc_fd* freelist_next; + grpc_closure* on_done_closure; + + /* The pollset that last noticed that the fd is readable. The actual type + * stored in this is (grpc_pollset *) */ + gpr_atm read_notifier_pollset; + + grpc_iomgr_object iomgr_object; +}; + +static void fd_global_init(void); +static void fd_global_shutdown(void); + +/******************************************************************************* + * Pollset Declarations + */ + +typedef struct { + grpc_pollset_worker* next; + grpc_pollset_worker* prev; +} pwlink; + +typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks; + +struct grpc_pollset_worker { + bool kicked; + bool initialized_cv; +#ifndef NDEBUG + // debug aid: which thread started this worker + pid_t originator; +#endif + gpr_cv cv; + grpc_pollset* pollset; + pollable* pollable_obj; + + pwlink links[PWLINK_COUNT]; +}; + +struct grpc_pollset { + gpr_mu mu; + gpr_atm worker_count; + pollable* active_pollable; + bool kicked_without_poller; + grpc_closure* shutdown_closure; + bool already_shutdown; + grpc_pollset_worker* root_worker; + int containing_pollset_set_count; +}; + +/******************************************************************************* + * Pollset-set Declarations + */ + +struct grpc_pollset_set { + gpr_refcount refs; + gpr_mu mu; + grpc_pollset_set* parent; + + size_t pollset_count; + size_t pollset_capacity; + grpc_pollset** pollsets; + + size_t fd_count; + size_t fd_capacity; + grpc_fd** fds; +}; + +/******************************************************************************* + * Common helpers + */ + +static bool append_error(grpc_error** composite, grpc_error* error, + const char* desc) { + if (error == GRPC_ERROR_NONE) return true; + if (*composite == GRPC_ERROR_NONE) { + *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); + } + *composite = grpc_error_add_child(*composite, error); + return false; +} + +/******************************************************************************* + * Fd Definitions + */ + +/* We need to keep a freelist not because of any concerns of malloc performance + * but instead so that implementations with multiple threads in (for example) + * epoll_wait deal with the race between pollset removal and incoming poll + * notifications. + * + * The problem is that the poller ultimately holds a reference to this + * object, so it is very difficult to know when is safe to free it, at least + * without some expensive synchronization. + * + * If we keep the object freelisted, in the worst case losing this race just + * becomes a spurious read notification on a reused fd. + */ + +static grpc_fd* fd_freelist = nullptr; +static gpr_mu fd_freelist_mu; + +#ifndef NDEBUG +#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) +#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) +static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file, + int line) { + if (grpc_trace_fd_refcount.enabled()) { + gpr_log(GPR_DEBUG, + "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", + fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line); + } +#else +#define REF_BY(fd, n, reason) ref_by(fd, n) +#define UNREF_BY(fd, n, reason) unref_by(fd, n) +static void ref_by(grpc_fd* fd, int n) { +#endif + GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); +} + +static void fd_destroy(void* arg, grpc_error* error) { + grpc_fd* fd = static_cast(arg); + /* Add the fd to the freelist */ + grpc_iomgr_unregister_object(&fd->iomgr_object); + POLLABLE_UNREF(fd->pollable_obj, "fd_pollable"); + gpr_mu_destroy(&fd->pollable_mu); + gpr_mu_destroy(&fd->orphan_mu); + gpr_mu_lock(&fd_freelist_mu); + fd->freelist_next = fd_freelist; + fd_freelist = fd; + + fd->read_closure->DestroyEvent(); + fd->write_closure->DestroyEvent(); + + gpr_mu_unlock(&fd_freelist_mu); +} + +#ifndef NDEBUG +static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file, + int line) { + if (grpc_trace_fd_refcount.enabled()) { + gpr_log(GPR_DEBUG, + "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", + fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), + gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); + } +#else +static void unref_by(grpc_fd* fd, int n) { +#endif + gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); + if (old == n) { + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); + } else { + GPR_ASSERT(old > n); + } +} + +static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } + +static void fd_global_shutdown(void) { + gpr_mu_lock(&fd_freelist_mu); + gpr_mu_unlock(&fd_freelist_mu); + while (fd_freelist != nullptr) { + grpc_fd* fd = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + gpr_free(fd); + } + gpr_mu_destroy(&fd_freelist_mu); +} + +static grpc_fd* fd_create(int fd, const char* name) { + grpc_fd* new_fd = nullptr; + + gpr_mu_lock(&fd_freelist_mu); + if (fd_freelist != nullptr) { + new_fd = fd_freelist; + fd_freelist = fd_freelist->freelist_next; + } + gpr_mu_unlock(&fd_freelist_mu); + + if (new_fd == nullptr) { + new_fd = static_cast(gpr_malloc(sizeof(grpc_fd))); + new_fd->read_closure.Init(); + new_fd->write_closure.Init(); + } + + gpr_mu_init(&new_fd->pollable_mu); + gpr_mu_init(&new_fd->orphan_mu); + new_fd->pollable_obj = nullptr; + gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); + new_fd->fd = fd; + new_fd->read_closure->InitEvent(); + new_fd->write_closure->InitEvent(); + gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); + + new_fd->freelist_next = nullptr; + new_fd->on_done_closure = nullptr; + + char* fd_name; + gpr_asprintf(&fd_name, "%s fd=%d", name, fd); + grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); +#ifndef NDEBUG + if (grpc_trace_fd_refcount.enabled()) { + gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name); + } +#endif + gpr_free(fd_name); + return new_fd; +} + +static int fd_wrapped_fd(grpc_fd* fd) { + int ret_fd = fd->fd; + return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1; +} + +static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason) { + bool is_fd_closed = already_closed; + + gpr_mu_lock(&fd->orphan_mu); + + fd->on_done_closure = on_done; + + /* If release_fd is not NULL, we should be relinquishing control of the file + descriptor fd->fd (but we still own the grpc_fd structure). */ + if (release_fd != nullptr) { + *release_fd = fd->fd; + } else if (!is_fd_closed) { + close(fd->fd); + is_fd_closed = true; + } + + if (!is_fd_closed) { + gpr_log(GPR_DEBUG, "TODO: handle fd removal?"); + } + + /* Remove the active status but keep referenced. We want this grpc_fd struct + to be alive (and not added to freelist) until the end of this function */ + REF_BY(fd, 1, reason); + + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE); + + gpr_mu_unlock(&fd->orphan_mu); + + UNREF_BY(fd, 2, reason); /* Drop the reference */ +} + +static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { + gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); + return (grpc_pollset*)notifier; +} + +static bool fd_is_shutdown(grpc_fd* fd) { + return fd->read_closure->IsShutdown(); +} + +/* Might be called multiple times */ +static void fd_shutdown(grpc_fd* fd, grpc_error* why) { + if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) { + shutdown(fd->fd, SHUT_RDWR); + fd->write_closure->SetShutdown(GRPC_ERROR_REF(why)); + } + GRPC_ERROR_UNREF(why); +} + +static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) { + fd->read_closure->NotifyOn(closure); +} + +static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { + fd->write_closure->NotifyOn(closure); +} + +/******************************************************************************* + * Pollable Definitions + */ + +static grpc_error* pollable_create(pollable_type type, pollable** p) { + *p = nullptr; + + int epfd = epoll_create1(EPOLL_CLOEXEC); + if (epfd == -1) { + return GRPC_OS_ERROR(errno, "epoll_create1"); + } + *p = static_cast(gpr_malloc(sizeof(**p))); + grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup); + if (err != GRPC_ERROR_NONE) { + close(epfd); + gpr_free(*p); + *p = nullptr; + return err; + } + struct epoll_event ev; + ev.events = static_cast(EPOLLIN | EPOLLET); + ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup); + if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) { + err = GRPC_OS_ERROR(errno, "epoll_ctl"); + close(epfd); + grpc_wakeup_fd_destroy(&(*p)->wakeup); + gpr_free(*p); + *p = nullptr; + return err; + } + + (*p)->type = type; + gpr_ref_init(&(*p)->refs, 1); + gpr_mu_init(&(*p)->mu); + (*p)->epfd = epfd; + (*p)->owner_fd = nullptr; + (*p)->pollset_set = nullptr; + (*p)->next = (*p)->prev = *p; + (*p)->root_worker = nullptr; + (*p)->event_cursor = 0; + (*p)->event_count = 0; + return GRPC_ERROR_NONE; +} + +#ifdef NDEBUG +static pollable* pollable_ref(pollable* p) { +#else +static pollable* pollable_ref(pollable* p, int line, const char* reason) { + if (grpc_trace_pollable_refcount.enabled()) { + int r = static_cast gpr_atm_no_barrier_load(&p->refs.count); + gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG, + "POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason); + } +#endif + gpr_ref(&p->refs); + return p; +} + +#ifdef NDEBUG +static void pollable_unref(pollable* p) { +#else +static void pollable_unref(pollable* p, int line, const char* reason) { + if (p == nullptr) return; + if (grpc_trace_pollable_refcount.enabled()) { + int r = static_cast gpr_atm_no_barrier_load(&p->refs.count); + gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG, + "POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason); + } +#endif + if (p != nullptr && gpr_unref(&p->refs)) { + close(p->epfd); + grpc_wakeup_fd_destroy(&p->wakeup); + gpr_free(p); + } +} + +static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) { + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "pollable_add_fd"; + const int epfd = p->epfd; + + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p); + } + + struct epoll_event ev_fd; + ev_fd.events = + static_cast(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE); + ev_fd.data.ptr = fd; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) { + switch (errno) { + case EEXIST: + break; + default: + append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc); + } + } + + return error; +} + +/******************************************************************************* + * Pollset Definitions + */ + +GPR_TLS_DECL(g_current_thread_pollset); +GPR_TLS_DECL(g_current_thread_worker); + +/* Global state management */ +static grpc_error* pollset_global_init(void) { + gpr_tls_init(&g_current_thread_pollset); + gpr_tls_init(&g_current_thread_worker); + return pollable_create(PO_EMPTY, &g_empty_pollable); +} + +static void pollset_global_shutdown(void) { + POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable"); + gpr_tls_destroy(&g_current_thread_pollset); + gpr_tls_destroy(&g_current_thread_worker); +} + +/* pollset->mu must be held while calling this function */ +static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) " + "rw=%p (target:NULL) cpsc=%d (target:0)", + pollset, pollset->active_pollable, pollset->shutdown_closure, + pollset->root_worker, pollset->containing_pollset_set_count); + } + if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr && + pollset->containing_pollset_set_count == 0) { + GPR_TIMER_MARK("pollset_finish_shutdown", 0); + GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE); + pollset->shutdown_closure = nullptr; + pollset->already_shutdown = true; + } +} + +/* pollset->mu must be held before calling this function, + * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be + * held */ +static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) { + GPR_TIMER_SCOPE("kick_one_worker", 0); + pollable* p = specific_worker->pollable_obj; + grpc_core::mu_guard lock(&p->mu); + GPR_ASSERT(specific_worker != nullptr); + if (specific_worker->kicked) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p); + } + GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); + return GRPC_ERROR_NONE; + } + if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p); + } + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); + specific_worker->kicked = true; + return GRPC_ERROR_NONE; + } + if (specific_worker == p->root_worker) { + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p); + } + specific_worker->kicked = true; + grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup); + return error; + } + if (specific_worker->initialized_cv) { + GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p); + } + specific_worker->kicked = true; + gpr_cv_signal(&specific_worker->cv); + return GRPC_ERROR_NONE; + } + // we can get here during end_worker after removing specific_worker from the + // pollable list but before removing it from the pollset list + return GRPC_ERROR_NONE; +} + +static grpc_error* pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) { + GPR_TIMER_SCOPE("pollset_kick", 0); + GRPC_STATS_INC_POLLSET_KICK(); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p", + pollset, specific_worker, + (void*)gpr_tls_get(&g_current_thread_pollset), + (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker); + } + if (specific_worker == nullptr) { + if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { + if (pollset->root_worker == nullptr) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset); + } + GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(); + pollset->kicked_without_poller = true; + return GRPC_ERROR_NONE; + } else { + // We've been asked to kick a poller, but we haven't been told which one + // ... any will do + // We look at the pollset worker list because: + // 1. the pollable list may include workers from other pollers, so we'd + // need to do an O(N) search + // 2. we'd additionally need to take the pollable lock, which we've so + // far avoided + // Now, we would prefer to wake a poller in cv_wait, and not in + // epoll_wait (since the latter would imply the need to do an additional + // wakeup) + // We know that if a worker is at the root of a pollable, it's (likely) + // also the root of a pollset, and we know that if a worker is NOT at + // the root of a pollset, it's (likely) not at the root of a pollable, + // so we take our chances and choose the SECOND worker enqueued against + // the pollset as a worker that's likely to be in cv_wait + return kick_one_worker( + pollset->root_worker->links[PWLINK_POLLSET].next); + } + } else { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset); + } + GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); + return GRPC_ERROR_NONE; + } + } else { + return kick_one_worker(specific_worker); + } +} + +static grpc_error* pollset_kick_all(grpc_pollset* pollset) { + GPR_TIMER_SCOPE("pollset_kick_all", 0); + grpc_error* error = GRPC_ERROR_NONE; + const char* err_desc = "pollset_kick_all"; + grpc_pollset_worker* w = pollset->root_worker; + if (w != nullptr) { + do { + GRPC_STATS_INC_POLLSET_KICK(); + append_error(&error, kick_one_worker(w), err_desc); + w = w->links[PWLINK_POLLSET].next; + } while (w != pollset->root_worker); + } + return error; +} + +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { + gpr_mu_init(&pollset->mu); + gpr_atm_no_barrier_store(&pollset->worker_count, 0); + pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset"); + pollset->kicked_without_poller = false; + pollset->shutdown_closure = nullptr; + pollset->already_shutdown = false; + pollset->root_worker = nullptr; + pollset->containing_pollset_set_count = 0; + *mu = &pollset->mu; +} + +static int poll_deadline_to_millis_timeout(grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); + if (delta > INT_MAX) + return INT_MAX; + else if (delta < 0) + return 0; + else + return static_cast(delta); +} + +static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { + fd->read_closure->SetReady(); + + /* Note, it is possible that fd_become_readable might be called twice with + different 'notifier's when an fd becomes readable and it is in two epoll + sets (This can happen briefly during polling island merges). In such cases + it does not really matter which notifer is set as the read_notifier_pollset + (They would both point to the same polling island anyway) */ + /* Use release store to match with acquire load in fd_get_read_notifier */ + gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); +} + +static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } + +static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) { + gpr_mu_lock(&fd->pollable_mu); + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "fd_get_or_become_pollable"; + if (fd->pollable_obj == nullptr) { + if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj), + err_desc)) { + fd->pollable_obj->owner_fd = fd; + if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd), + err_desc)) { + POLLABLE_UNREF(fd->pollable_obj, "fd_pollable"); + fd->pollable_obj = nullptr; + } + } + } + if (error == GRPC_ERROR_NONE) { + GPR_ASSERT(fd->pollable_obj != nullptr); + *p = POLLABLE_REF(fd->pollable_obj, "pollset"); + } else { + GPR_ASSERT(fd->pollable_obj == nullptr); + *p = nullptr; + } + gpr_mu_unlock(&fd->pollable_mu); + return error; +} + +/* pollset->po.mu lock must be held by the caller before calling this */ +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + GPR_TIMER_SCOPE("pollset_shutdown", 0); + GPR_ASSERT(pollset->shutdown_closure == nullptr); + pollset->shutdown_closure = closure; + GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); + pollset_maybe_finish_shutdown(pollset); +} + +static grpc_error* pollable_process_events(grpc_pollset* pollset, + pollable* pollable_obj, bool drain) { + GPR_TIMER_SCOPE("pollable_process_events", 0); + static const char* err_desc = "pollset_process_events"; + // Use a simple heuristic to determine how many fd events to process + // per loop iteration. (events/workers) + int handle_count = 1; + int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count); + GPR_ASSERT(worker_count > 0); + handle_count = + (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count; + if (handle_count == 0) { + handle_count = 1; + } else if (handle_count > MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) { + handle_count = MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL; + } + grpc_error* error = GRPC_ERROR_NONE; + for (int i = 0; (drain || i < handle_count) && + pollable_obj->event_cursor != pollable_obj->event_count; + i++) { + int n = pollable_obj->event_cursor++; + struct epoll_event* ev = &pollable_obj->events[n]; + void* data_ptr = ev->data.ptr; + if (1 & (intptr_t)data_ptr) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr); + } + append_error(&error, + grpc_wakeup_fd_consume_wakeup( + (grpc_wakeup_fd*)((~static_cast(1)) & + (intptr_t)data_ptr)), + err_desc); + } else { + grpc_fd* fd = static_cast(data_ptr); + bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0; + bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0; + bool write_ev = (ev->events & EPOLLOUT) != 0; + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p got fd %p: cancel=%d read=%d " + "write=%d", + pollset, fd, cancel, read_ev, write_ev); + } + if (read_ev || cancel) { + fd_become_readable(fd, pollset); + } + if (write_ev || cancel) { + fd_become_writable(fd); + } + } + } + + return error; +} + +/* pollset_shutdown is guaranteed to be called before pollset_destroy. */ +static void pollset_destroy(grpc_pollset* pollset) { + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + pollset->active_pollable = nullptr; + gpr_mu_destroy(&pollset->mu); +} + +static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) { + GPR_TIMER_SCOPE("pollable_epoll", 0); + int timeout = poll_deadline_to_millis_timeout(deadline); + + if (grpc_polling_trace.enabled()) { + char* desc = pollable_desc(p); + gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout); + gpr_free(desc); + } + + if (timeout != 0) { + GRPC_SCHEDULING_START_BLOCKING_REGION; + } + int r; + do { + GRPC_STATS_INC_SYSCALL_POLL(); + r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout); + } while (r < 0 && errno == EINTR); + if (timeout != 0) { + GRPC_SCHEDULING_END_BLOCKING_REGION; + } + + if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); + + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r); + } + + p->event_cursor = 0; + p->event_count = r; + + return GRPC_ERROR_NONE; +} + +/* Return true if first in list */ +static bool worker_insert(grpc_pollset_worker** root_worker, + grpc_pollset_worker* worker, pwlinks link) { + if (*root_worker == nullptr) { + *root_worker = worker; + worker->links[link].next = worker->links[link].prev = worker; + return true; + } else { + worker->links[link].next = *root_worker; + worker->links[link].prev = worker->links[link].next->links[link].prev; + worker->links[link].next->links[link].prev = worker; + worker->links[link].prev->links[link].next = worker; + return false; + } +} + +/* returns the new root IFF the root changed */ +typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result; + +static worker_remove_result worker_remove(grpc_pollset_worker** root_worker, + grpc_pollset_worker* worker, + pwlinks link) { + if (worker == *root_worker) { + if (worker == worker->links[link].next) { + *root_worker = nullptr; + return WRR_EMPTIED; + } else { + *root_worker = worker->links[link].next; + worker->links[link].prev->links[link].next = worker->links[link].next; + worker->links[link].next->links[link].prev = worker->links[link].prev; + return WRR_NEW_ROOT; + } + } else { + worker->links[link].prev->links[link].next = worker->links[link].next; + worker->links[link].next->links[link].prev = worker->links[link].prev; + return WRR_REMOVED; + } +} + +/* Return true if this thread should poll */ +static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("begin_worker", 0); + bool do_poll = + (pollset->shutdown_closure == nullptr && !pollset->already_shutdown); + gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1); + if (worker_hdl != nullptr) *worker_hdl = worker; + worker->initialized_cv = false; + worker->kicked = false; + worker->pollset = pollset; + worker->pollable_obj = + POLLABLE_REF(pollset->active_pollable, "pollset_worker"); + worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET); + gpr_mu_lock(&worker->pollable_obj->mu); + if (!worker_insert(&worker->pollable_obj->root_worker, worker, + PWLINK_POLLABLE)) { + worker->initialized_cv = true; + gpr_cv_init(&worker->cv); + gpr_mu_unlock(&pollset->mu); + if (grpc_polling_trace.enabled() && + worker->pollable_obj->root_worker != worker) { + gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset, + worker->pollable_obj, worker, + poll_deadline_to_millis_timeout(deadline)); + } + while (do_poll && worker->pollable_obj->root_worker != worker) { + if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset, + worker->pollable_obj, worker); + } + do_poll = false; + } else if (worker->kicked) { + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset, + worker->pollable_obj, worker); + } + do_poll = false; + } else if (grpc_polling_trace.enabled() && + worker->pollable_obj->root_worker != worker) { + gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset, + worker->pollable_obj, worker); + } + } + grpc_core::ExecCtx::Get()->InvalidateNow(); + } else { + gpr_mu_unlock(&pollset->mu); + } + gpr_mu_unlock(&worker->pollable_obj->mu); + + return do_poll; +} + +static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker, + grpc_pollset_worker** worker_hdl) { + GPR_TIMER_SCOPE("end_worker", 0); + gpr_mu_lock(&pollset->mu); + gpr_mu_lock(&worker->pollable_obj->mu); + switch (worker_remove(&worker->pollable_obj->root_worker, worker, + PWLINK_POLLABLE)) { + case WRR_NEW_ROOT: { + // wakeup new poller + grpc_pollset_worker* new_root = worker->pollable_obj->root_worker; + GPR_ASSERT(new_root->initialized_cv); + gpr_cv_signal(&new_root->cv); + break; + } + case WRR_EMPTIED: + if (pollset->active_pollable != worker->pollable_obj) { + // pollable no longer being polled: flush events + pollable_process_events(pollset, worker->pollable_obj, true); + } + break; + case WRR_REMOVED: + break; + } + gpr_mu_unlock(&worker->pollable_obj->mu); + POLLABLE_UNREF(worker->pollable_obj, "pollset_worker"); + if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) == + WRR_EMPTIED) { + pollset_maybe_finish_shutdown(pollset); + } + if (worker->initialized_cv) { + gpr_cv_destroy(&worker->cv); + } + gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1); +} + +#ifndef NDEBUG +static long gettid(void) { return syscall(__NR_gettid); } +#endif + +/* pollset->mu lock must be held by the caller before calling this. + The function pollset_work() may temporarily release the lock (pollset->po.mu) + during the course of its execution but it will always re-acquire the lock and + ensure that it is held by the time the function returns */ +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("pollset_work", 0); +#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP + grpc_pollset_worker* worker = + (grpc_pollset_worker*)gpr_malloc(sizeof(*worker)); +#define WORKER_PTR (worker) +#else + grpc_pollset_worker worker; +#define WORKER_PTR (&worker) +#endif +#ifndef NDEBUG + WORKER_PTR->originator = gettid(); +#endif + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR + " kwp=%d pollable=%p", + pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(), + deadline, pollset->kicked_without_poller, pollset->active_pollable); + } + static const char* err_desc = "pollset_work"; + grpc_error* error = GRPC_ERROR_NONE; + if (pollset->kicked_without_poller) { + pollset->kicked_without_poller = false; + } else { + if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) { + gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); + gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR); + if (WORKER_PTR->pollable_obj->event_cursor == + WORKER_PTR->pollable_obj->event_count) { + append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline), + err_desc); + } + append_error( + &error, + pollable_process_events(pollset, WORKER_PTR->pollable_obj, false), + err_desc); + grpc_core::ExecCtx::Get()->Flush(); + gpr_tls_set(&g_current_thread_pollset, 0); + gpr_tls_set(&g_current_thread_worker, 0); + } + end_worker(pollset, WORKER_PTR, worker_hdl); + } +#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP + gpr_free(worker); +#endif +#undef WORKER_PTR + return error; +} + +static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked( + grpc_pollset* pollset, grpc_fd* fd) { + static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd"; + grpc_error* error = GRPC_ERROR_NONE; + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p add fd %p (%d); transition pollable from empty to fd", + pollset, fd, fd->fd); + } + append_error(&error, pollset_kick_all(pollset), err_desc); + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + append_error(&error, fd_get_or_become_pollable(fd, &pollset->active_pollable), + err_desc); + return error; +} + +static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked( + grpc_pollset* pollset, grpc_fd* and_add_fd) { + static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi"; + grpc_error* error = GRPC_ERROR_NONE; + if (grpc_polling_trace.enabled()) { + gpr_log( + GPR_INFO, + "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller", + pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1, + pollset->active_pollable->owner_fd); + } + append_error(&error, pollset_kick_all(pollset), err_desc); + grpc_fd* initial_fd = pollset->active_pollable->owner_fd; + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + pollset->active_pollable = nullptr; + if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable), + err_desc)) { + append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd), + err_desc); + if (and_add_fd != nullptr) { + append_error(&error, + pollable_add_fd(pollset->active_pollable, and_add_fd), + err_desc); + } + } + return error; +} + +/* expects pollsets locked, flag whether fd is locked or not */ +static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) { + grpc_error* error = GRPC_ERROR_NONE; + pollable* po_at_start = + POLLABLE_REF(pollset->active_pollable, "pollset_add_fd"); + switch (pollset->active_pollable->type) { + case PO_EMPTY: + /* empty pollable --> single fd pollable */ + error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd); + break; + case PO_FD: + gpr_mu_lock(&po_at_start->owner_fd->orphan_mu); + if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) & + 1) == 0) { + error = + pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd); + } else { + /* fd --> multipoller */ + error = + pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd); + } + gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu); + break; + case PO_MULTI: + error = pollable_add_fd(pollset->active_pollable, fd); + break; + } + if (error != GRPC_ERROR_NONE) { + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + pollset->active_pollable = po_at_start; + } else { + POLLABLE_UNREF(po_at_start, "pollset_add_fd"); + } + return error; +} + +static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset, + pollable** pollable_obj) { + grpc_error* error = GRPC_ERROR_NONE; + pollable* po_at_start = + POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable"); + switch (pollset->active_pollable->type) { + case PO_EMPTY: + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + error = pollable_create(PO_MULTI, &pollset->active_pollable); + /* Any workers currently polling on this pollset must now be woked up so + * that they can pick up the new active_pollable */ + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, + "PS:%p active pollable transition from empty to multi", + pollset); + } + static const char* err_desc = + "pollset_as_multipollable_locked: empty -> multi"; + append_error(&error, pollset_kick_all(pollset), err_desc); + break; + case PO_FD: + gpr_mu_lock(&po_at_start->owner_fd->orphan_mu); + if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) & + 1) == 0) { + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + error = pollable_create(PO_MULTI, &pollset->active_pollable); + } else { + error = pollset_transition_pollable_from_fd_to_multi_locked(pollset, + nullptr); + } + gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu); + break; + case PO_MULTI: + break; + } + if (error != GRPC_ERROR_NONE) { + POLLABLE_UNREF(pollset->active_pollable, "pollset"); + pollset->active_pollable = po_at_start; + *pollable_obj = nullptr; + } else { + *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set"); + POLLABLE_UNREF(po_at_start, "pollset_as_multipollable"); + } + return error; +} + +static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) { + GPR_TIMER_SCOPE("pollset_add_fd", 0); + gpr_mu_lock(&pollset->mu); + grpc_error* error = pollset_add_fd_locked(pollset, fd); + gpr_mu_unlock(&pollset->mu); + GRPC_LOG_IF_ERROR("pollset_add_fd", error); +} + +/******************************************************************************* + * Pollset-set Definitions + */ + +static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) { + gpr_mu_lock(&pss->mu); + while (pss->parent != nullptr) { + gpr_mu_unlock(&pss->mu); + pss = pss->parent; + gpr_mu_lock(&pss->mu); + } + return pss; +} + +static grpc_pollset_set* pollset_set_create(void) { + grpc_pollset_set* pss = + static_cast(gpr_zalloc(sizeof(*pss))); + gpr_mu_init(&pss->mu); + gpr_ref_init(&pss->refs, 1); + return pss; +} + +static void pollset_set_unref(grpc_pollset_set* pss) { + if (pss == nullptr) return; + if (!gpr_unref(&pss->refs)) return; + pollset_set_unref(pss->parent); + gpr_mu_destroy(&pss->mu); + for (size_t i = 0; i < pss->pollset_count; i++) { + gpr_mu_lock(&pss->pollsets[i]->mu); + if (0 == --pss->pollsets[i]->containing_pollset_set_count) { + pollset_maybe_finish_shutdown(pss->pollsets[i]); + } + gpr_mu_unlock(&pss->pollsets[i]->mu); + } + for (size_t i = 0; i < pss->fd_count; i++) { + UNREF_BY(pss->fds[i], 2, "pollset_set"); + } + gpr_free(pss->pollsets); + gpr_free(pss->fds); + gpr_free(pss); +} + +static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) { + GPR_TIMER_SCOPE("pollset_set_add_fd", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd); + } + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "pollset_set_add_fd"; + pss = pss_lock_adam(pss); + for (size_t i = 0; i < pss->pollset_count; i++) { + append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd), + err_desc); + } + if (pss->fd_count == pss->fd_capacity) { + pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8); + pss->fds = static_cast( + gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds))); + } + REF_BY(fd, 2, "pollset_set"); + pss->fds[pss->fd_count++] = fd; + gpr_mu_unlock(&pss->mu); + + GRPC_LOG_IF_ERROR(err_desc, error); +} + +static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) { + GPR_TIMER_SCOPE("pollset_set_del_fd", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd); + } + pss = pss_lock_adam(pss); + size_t i; + for (i = 0; i < pss->fd_count; i++) { + if (pss->fds[i] == fd) { + UNREF_BY(fd, 2, "pollset_set"); + break; + } + } + GPR_ASSERT(i != pss->fd_count); + for (; i < pss->fd_count - 1; i++) { + pss->fds[i] = pss->fds[i + 1]; + } + pss->fd_count--; + gpr_mu_unlock(&pss->mu); +} + +static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { + GPR_TIMER_SCOPE("pollset_set_del_pollset", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps); + } + pss = pss_lock_adam(pss); + size_t i; + for (i = 0; i < pss->pollset_count; i++) { + if (pss->pollsets[i] == ps) { + break; + } + } + GPR_ASSERT(i != pss->pollset_count); + for (; i < pss->pollset_count - 1; i++) { + pss->pollsets[i] = pss->pollsets[i + 1]; + } + pss->pollset_count--; + gpr_mu_unlock(&pss->mu); + gpr_mu_lock(&ps->mu); + if (0 == --ps->containing_pollset_set_count) { + pollset_maybe_finish_shutdown(ps); + } + gpr_mu_unlock(&ps->mu); +} + +// add all fds to pollables, and output a new array of unorphaned out_fds +// assumes pollsets are multipollable +static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count, + grpc_pollset** pollsets, + size_t pollset_count, + const char* err_desc, grpc_fd** out_fds, + size_t* out_fd_count) { + GPR_TIMER_SCOPE("add_fds_to_pollsets", 0); + grpc_error* error = GRPC_ERROR_NONE; + for (size_t i = 0; i < fd_count; i++) { + gpr_mu_lock(&fds[i]->orphan_mu); + if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) { + gpr_mu_unlock(&fds[i]->orphan_mu); + UNREF_BY(fds[i], 2, "pollset_set"); + } else { + for (size_t j = 0; j < pollset_count; j++) { + append_error(&error, + pollable_add_fd(pollsets[j]->active_pollable, fds[i]), + err_desc); + } + gpr_mu_unlock(&fds[i]->orphan_mu); + out_fds[(*out_fd_count)++] = fds[i]; + } + } + return error; +} + +static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { + GPR_TIMER_SCOPE("pollset_set_add_pollset", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps); + } + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "pollset_set_add_pollset"; + pollable* pollable_obj = nullptr; + gpr_mu_lock(&ps->mu); + if (!GRPC_LOG_IF_ERROR(err_desc, + pollset_as_multipollable_locked(ps, &pollable_obj))) { + GPR_ASSERT(pollable_obj == nullptr); + gpr_mu_unlock(&ps->mu); + return; + } + ps->containing_pollset_set_count++; + gpr_mu_unlock(&ps->mu); + pss = pss_lock_adam(pss); + size_t initial_fd_count = pss->fd_count; + pss->fd_count = 0; + append_error(&error, + add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc, + pss->fds, &pss->fd_count), + err_desc); + if (pss->pollset_count == pss->pollset_capacity) { + pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8); + pss->pollsets = static_cast(gpr_realloc( + pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets))); + } + pss->pollsets[pss->pollset_count++] = ps; + gpr_mu_unlock(&pss->mu); + POLLABLE_UNREF(pollable_obj, "pollset_set"); + + GRPC_LOG_IF_ERROR(err_desc, error); +} + +static void pollset_set_add_pollset_set(grpc_pollset_set* a, + grpc_pollset_set* b) { + GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b); + } + grpc_error* error = GRPC_ERROR_NONE; + static const char* err_desc = "pollset_set_add_fd"; + for (;;) { + if (a == b) { + // pollset ancestors are the same: nothing to do + return; + } + if (a > b) { + GPR_SWAP(grpc_pollset_set*, a, b); + } + gpr_mu* a_mu = &a->mu; + gpr_mu* b_mu = &b->mu; + gpr_mu_lock(a_mu); + gpr_mu_lock(b_mu); + if (a->parent != nullptr) { + a = a->parent; + } else if (b->parent != nullptr) { + b = b->parent; + } else { + break; // exit loop, both pollsets locked + } + gpr_mu_unlock(a_mu); + gpr_mu_unlock(b_mu); + } + // try to do the least copying possible + // TODO(ctiller): there's probably a better heuristic here + const size_t a_size = a->fd_count + a->pollset_count; + const size_t b_size = b->fd_count + b->pollset_count; + if (b_size > a_size) { + GPR_SWAP(grpc_pollset_set*, a, b); + } + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a); + } + gpr_ref(&a->refs); + b->parent = a; + if (a->fd_capacity < a->fd_count + b->fd_count) { + a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count); + a->fds = static_cast( + gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds))); + } + size_t initial_a_fd_count = a->fd_count; + a->fd_count = 0; + append_error( + &error, + add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets, + b->pollset_count, "merge_a2b", a->fds, &a->fd_count), + err_desc); + append_error( + &error, + add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count, + "merge_b2a", a->fds, &a->fd_count), + err_desc); + if (a->pollset_capacity < a->pollset_count + b->pollset_count) { + a->pollset_capacity = + GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count); + a->pollsets = static_cast( + gpr_realloc(a->pollsets, a->pollset_capacity * sizeof(*a->pollsets))); + } + if (b->pollset_count > 0) { + memcpy(a->pollsets + a->pollset_count, b->pollsets, + b->pollset_count * sizeof(*b->pollsets)); + } + a->pollset_count += b->pollset_count; + gpr_free(b->fds); + gpr_free(b->pollsets); + b->fds = nullptr; + b->pollsets = nullptr; + b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0; + gpr_mu_unlock(&a->mu); + gpr_mu_unlock(&b->mu); +} + +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} + +/******************************************************************************* + * Event engine binding + */ + +static void shutdown_engine(void) { + fd_global_shutdown(); + pollset_global_shutdown(); +} + +static const grpc_event_engine_vtable vtable = { + sizeof(grpc_pollset), + + fd_create, + fd_wrapped_fd, + fd_orphan, + fd_shutdown, + fd_notify_on_read, + fd_notify_on_write, + fd_is_shutdown, + fd_get_read_notifier_pollset, + + pollset_init, + pollset_shutdown, + pollset_destroy, + pollset_work, + pollset_kick, + pollset_add_fd, + + pollset_set_create, + pollset_set_unref, // destroy ==> unref 1 public ref + pollset_set_add_pollset, + pollset_set_del_pollset, + pollset_set_add_pollset_set, + pollset_set_del_pollset_set, + pollset_set_add_fd, + pollset_set_del_fd, + + shutdown_engine, +}; + +const grpc_event_engine_vtable* grpc_init_epollex_linux( + bool explicitly_requested) { + if (!grpc_has_wakeup_fd()) { + gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd."); + return nullptr; + } + + if (!grpc_is_epollexclusive_available()) { + gpr_log(GPR_INFO, "Skipping epollex because it is not supported."); + return nullptr; + } + + fd_global_init(); + + if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { + pollset_global_shutdown(); + fd_global_shutdown(); + return nullptr; + } + + return &vtable; +} + +#else /* defined(GRPC_LINUX_EPOLL_CREATE1) */ +#if defined(GRPC_POSIX_SOCKET) +#include "src/core/lib/iomgr/ev_epollex_linux.h" +/* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means + epoll_create1 is not available. Return NULL */ +const grpc_event_engine_vtable* grpc_init_epollex_linux( + bool explicitly_requested) { + return nullptr; +} +#endif /* defined(GRPC_POSIX_SOCKET) */ + +#endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.h b/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.h index cff9b43c0..e70ba72a7 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.h +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epollex_linux.h @@ -19,10 +19,12 @@ #ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H #define GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H +#include + #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/port.h" -const grpc_event_engine_vtable *grpc_init_epollex_linux( +const grpc_event_engine_vtable* grpc_init_epollex_linux( bool explicitly_requested); #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.c b/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.cc similarity index 70% rename from Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.c rename to Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.cc index 4d8bdf140..494bc71c1 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.c +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.cc @@ -16,15 +16,21 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" +#include +#include + /* This polling engine is only relevant on linux kernels supporting epoll() */ -#ifdef GRPC_LINUX_EPOLL +#ifdef GRPC_LINUX_EPOLL_CREATE1 #include "src/core/lib/iomgr/ev_epollsig_linux.h" #include #include +#include #include #include #include @@ -34,25 +40,25 @@ #include #include -#include #include -#include -#include #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" -#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) +#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1) -#define GRPC_POLLING_TRACE(...) \ - if (GRPC_TRACER_ON(grpc_polling_trace)) { \ - gpr_log(GPR_INFO, __VA_ARGS__); \ +#define GRPC_POLLING_TRACE(...) \ + if (grpc_polling_trace.enabled()) { \ + gpr_log(GPR_INFO, __VA_ARGS__); \ } static int grpc_wakeup_signal = -1; @@ -87,10 +93,10 @@ typedef struct poll_obj { poll_obj_type obj_type; #endif gpr_mu mu; - struct polling_island *pi; + struct polling_island* pi; } poll_obj; -const char *poll_obj_string(poll_obj_type po_type) { +const char* poll_obj_string(poll_obj_type po_type) { switch (po_type) { case POLL_OBJ_FD: return "fd"; @@ -103,11 +109,11 @@ const char *poll_obj_string(poll_obj_type po_type) { GPR_UNREACHABLE_CODE(return "UNKNOWN"); } -/******************************************************************************* - * Fd Declarations - */ + /******************************************************************************* + * Fd Declarations + */ -#define FD_FROM_PO(po) ((grpc_fd *)(po)) +#define FD_FROM_PO(po) ((grpc_fd*)(po)) struct grpc_fd { poll_obj po; @@ -124,11 +130,11 @@ struct grpc_fd { valid */ bool orphaned; - gpr_atm read_closure; - gpr_atm write_closure; + grpc_core::ManualConstructor read_closure; + grpc_core::ManualConstructor write_closure; - struct grpc_fd *freelist_next; - grpc_closure *on_done_closure; + struct grpc_fd* freelist_next; + grpc_closure* on_done_closure; /* The pollset that last noticed that the fd is readable. The actual type * stored in this is (grpc_pollset *) */ @@ -139,14 +145,14 @@ struct grpc_fd { /* Reference counting for fds */ #ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, +static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line); +static void fd_unref(grpc_fd* fd, const char* reason, const char* file, int line); #define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__) #define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__) #else -static void fd_ref(grpc_fd *fd); -static void fd_unref(grpc_fd *fd); +static void fd_ref(grpc_fd* fd); +static void fd_unref(grpc_fd* fd); #define GRPC_FD_REF(fd, reason) fd_ref(fd) #define GRPC_FD_UNREF(fd, reason) fd_unref(fd) #endif @@ -161,13 +167,12 @@ static void fd_global_shutdown(void); #ifndef NDEBUG #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__) -#define PI_UNREF(exec_ctx, p, r) \ - pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__) +#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__) #else #define PI_ADD_REF(p, r) pi_add_ref((p)) -#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p)) +#define PI_UNREF(p, r) pi_unref((p)) #endif @@ -200,7 +205,7 @@ typedef struct polling_island { /* The file descriptors in the epoll set */ size_t fd_cnt; size_t fd_capacity; - grpc_fd **fds; + grpc_fd** fds; } polling_island; /******************************************************************************* @@ -212,8 +217,8 @@ struct grpc_pollset_worker { /* Used to prevent a worker from getting kicked multiple times */ gpr_atm is_kicked; - struct grpc_pollset_worker *next; - struct grpc_pollset_worker *prev; + struct grpc_pollset_worker* next; + struct grpc_pollset_worker* prev; }; struct grpc_pollset { @@ -224,7 +229,7 @@ struct grpc_pollset { bool shutting_down; /* Is the pollset shutting down ? */ bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ - grpc_closure *shutdown_done; /* Called after after shutdown is complete */ + grpc_closure* shutdown_done; /* Called after after shutdown is complete */ }; /******************************************************************************* @@ -238,8 +243,8 @@ struct grpc_pollset_set { * Common helpers */ -static bool append_error(grpc_error **composite, grpc_error *error, - const char *desc) { +static bool append_error(grpc_error** composite, grpc_error* error, + const char* desc) { if (error == GRPC_ERROR_NONE) return true; if (*composite == GRPC_ERROR_NONE) { *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc); @@ -263,10 +268,10 @@ static grpc_wakeup_fd polling_island_wakeup_fd; /* The polling island being polled right now. See comments in workqueue_maybe_wakeup for why this is tracked. */ -static __thread polling_island *g_current_thread_polling_island; +static __thread polling_island* g_current_thread_polling_island; /* Forward declaration */ -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); +static void polling_island_delete(polling_island* pi); #ifdef GRPC_TSAN /* Currently TSAN may incorrectly flag data races between epoll_ctl and @@ -279,38 +284,40 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi); gpr_atm g_epoll_sync; #endif /* defined(GRPC_TSAN) */ -static void pi_add_ref(polling_island *pi); -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi); +static void pi_add_ref(polling_island* pi); +static void pi_unref(polling_island* pi); #ifndef NDEBUG -static void pi_add_ref_dbg(polling_island *pi, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { +static void pi_add_ref_dbg(polling_island* pi, const char* reason, + const char* file, int line) { + if (grpc_polling_trace.enabled()) { gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); - gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", + gpr_log(GPR_INFO, + "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR + " (%s) - (%s, %d)", pi, old_cnt, old_cnt + 1, reason, file, line); } pi_add_ref(pi); } -static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { +static void pi_unref_dbg(polling_island* pi, const char* reason, + const char* file, int line) { + if (grpc_polling_trace.enabled()) { gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); - gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR - " (%s) - (%s, %d)", + gpr_log(GPR_INFO, + "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR + " (%s) - (%s, %d)", pi, old_cnt, (old_cnt - 1), reason, file, line); } - pi_unref(exec_ctx, pi); + pi_unref(pi); } #endif -static void pi_add_ref(polling_island *pi) { +static void pi_add_ref(polling_island* pi) { gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1); } -static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { +static void pi_unref(polling_island* pi) { /* If ref count went to zero, delete the polling island. Note that this deletion not be done under a lock. Once the ref count goes to zero, we are guaranteed that no one else holds a reference to the @@ -320,23 +327,23 @@ static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) { non-empty, we should remove a ref to the merged_to polling island */ if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) { - polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - polling_island_delete(exec_ctx, pi); - if (next != NULL) { - PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */ + polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to); + polling_island_delete(pi); + if (next != nullptr) { + PI_UNREF(next, "pi_delete"); /* Recursive call */ } } } /* The caller is expected to hold pi->mu lock before calling this function */ -static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, +static void polling_island_add_fds_locked(polling_island* pi, grpc_fd** fds, size_t fd_count, bool add_fd_refs, - grpc_error **error) { + grpc_error** error) { int err; size_t i; struct epoll_event ev; - char *err_msg; - const char *err_desc = "polling_island_add_fds"; + char* err_msg; + const char* err_desc = "polling_island_add_fds"; #ifdef GRPC_TSAN /* See the definition of g_epoll_sync for more context */ @@ -344,7 +351,7 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, #endif /* defined(GRPC_TSAN) */ for (i = 0; i < fd_count; i++) { - ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); + ev.events = static_cast(EPOLLIN | EPOLLOUT | EPOLLET); ev.data.ptr = fds[i]; err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev); @@ -363,8 +370,8 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, if (pi->fd_cnt == pi->fd_capacity) { pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2); - pi->fds = - (grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity); + pi->fds = static_cast( + gpr_realloc(pi->fds, sizeof(grpc_fd*) * pi->fd_capacity)); } pi->fds[pi->fd_cnt++] = fds[i]; @@ -375,15 +382,15 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds, } /* The caller is expected to hold pi->mu before calling this */ -static void polling_island_add_wakeup_fd_locked(polling_island *pi, - grpc_wakeup_fd *wakeup_fd, - grpc_error **error) { +static void polling_island_add_wakeup_fd_locked(polling_island* pi, + grpc_wakeup_fd* wakeup_fd, + grpc_error** error) { struct epoll_event ev; int err; - char *err_msg; - const char *err_desc = "polling_island_add_wakeup_fd"; + char* err_msg; + const char* err_desc = "polling_island_add_wakeup_fd"; - ev.events = (uint32_t)(EPOLLIN | EPOLLET); + ev.events = static_cast(EPOLLIN | EPOLLET); ev.data.ptr = wakeup_fd; err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev); @@ -399,16 +406,16 @@ static void polling_island_add_wakeup_fd_locked(polling_island *pi, } /* The caller is expected to hold pi->mu lock before calling this function */ -static void polling_island_remove_all_fds_locked(polling_island *pi, +static void polling_island_remove_all_fds_locked(polling_island* pi, bool remove_fd_refs, - grpc_error **error) { + grpc_error** error) { int err; size_t i; - char *err_msg; - const char *err_desc = "polling_island_remove_fds"; + char* err_msg; + const char* err_desc = "polling_island_remove_fds"; for (i = 0; i < pi->fd_cnt; i++) { - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL); + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, nullptr); if (err < 0 && errno != ENOENT) { gpr_asprintf(&err_msg, "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with " @@ -427,18 +434,18 @@ static void polling_island_remove_all_fds_locked(polling_island *pi, } /* The caller is expected to hold pi->mu lock before calling this function */ -static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, +static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd, bool is_fd_closed, - grpc_error **error) { + grpc_error** error) { int err; size_t i; - char *err_msg; - const char *err_desc = "polling_island_remove_fd"; + char* err_msg; + const char* err_desc = "polling_island_remove_fd"; /* If fd is already closed, then it would have been automatically been removed from the epoll set */ if (!is_fd_closed) { - err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL); + err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, nullptr); if (err < 0 && errno != ENOENT) { gpr_asprintf( &err_msg, @@ -459,24 +466,23 @@ static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd, } /* Might return NULL in case of an error */ -static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, - grpc_fd *initial_fd, - grpc_error **error) { - polling_island *pi = NULL; - const char *err_desc = "polling_island_create"; +static polling_island* polling_island_create(grpc_fd* initial_fd, + grpc_error** error) { + polling_island* pi = nullptr; + const char* err_desc = "polling_island_create"; *error = GRPC_ERROR_NONE; - pi = (polling_island *)gpr_malloc(sizeof(*pi)); + pi = static_cast(gpr_malloc(sizeof(*pi))); gpr_mu_init(&pi->mu); pi->fd_cnt = 0; pi->fd_capacity = 0; - pi->fds = NULL; + pi->fds = nullptr; pi->epoll_fd = -1; gpr_atm_rel_store(&pi->ref_count, 0); gpr_atm_rel_store(&pi->poller_count, 0); - gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL); + gpr_atm_rel_store(&pi->merged_to, (gpr_atm) nullptr); pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC); @@ -485,19 +491,19 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx, goto done; } - if (initial_fd != NULL) { + if (initial_fd != nullptr) { polling_island_add_fds_locked(pi, &initial_fd, 1, true, error); } done: if (*error != GRPC_ERROR_NONE) { - polling_island_delete(exec_ctx, pi); - pi = NULL; + polling_island_delete(pi); + pi = nullptr; } return pi; } -static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { +static void polling_island_delete(polling_island* pi) { GPR_ASSERT(pi->fd_cnt == 0); if (pi->epoll_fd >= 0) { @@ -511,11 +517,11 @@ static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) { /* Attempts to gets the last polling island in the linked list (liked by the * 'merged_to' field). Since this does not lock the polling island, there are no * guarantees that the island returned is the last island */ -static polling_island *polling_island_maybe_get_latest(polling_island *pi) { - polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - while (next != NULL) { +static polling_island* polling_island_maybe_get_latest(polling_island* pi) { + polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to); + while (next != nullptr) { pi = next; - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); + next = (polling_island*)gpr_atm_acq_load(&pi->merged_to); } return pi; @@ -530,19 +536,19 @@ static polling_island *polling_island_maybe_get_latest(polling_island *pi) { ... critical section .. ... gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */ -static polling_island *polling_island_lock(polling_island *pi) { - polling_island *next = NULL; +static polling_island* polling_island_lock(polling_island* pi) { + polling_island* next = nullptr; while (true) { - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - if (next == NULL) { + next = (polling_island*)gpr_atm_acq_load(&pi->merged_to); + if (next == nullptr) { /* Looks like 'pi' is the last node in the linked list but unless we check this by holding the pi->mu lock, we cannot be sure (i.e without the pi->mu lock, we don't prevent island merges). To be absolutely sure, check once more by holding the pi->mu lock */ gpr_mu_lock(&pi->mu); - next = (polling_island *)gpr_atm_acq_load(&pi->merged_to); - if (next == NULL) { + next = (polling_island*)gpr_atm_acq_load(&pi->merged_to); + if (next == nullptr) { /* pi is infact the last node and we have the pi->mu lock. we're done */ break; } @@ -579,11 +585,11 @@ static polling_island *polling_island_lock(polling_island *pi) { // Release locks: Always call polling_island_unlock_pair() to release locks polling_island_unlock_pair(p1, p2); */ -static void polling_island_lock_pair(polling_island **p, polling_island **q) { - polling_island *pi_1 = *p; - polling_island *pi_2 = *q; - polling_island *next_1 = NULL; - polling_island *next_2 = NULL; +static void polling_island_lock_pair(polling_island** p, polling_island** q) { + polling_island* pi_1 = *p; + polling_island* pi_2 = *q; + polling_island* next_1 = nullptr; + polling_island* next_2 = nullptr; /* The algorithm is simple: - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and @@ -600,16 +606,16 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) { - If the polling islands are the last islands, we are done. If not, release the locks and continue the process from the first step */ while (true) { - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); - while (next_1 != NULL) { + next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to); + while (next_1 != nullptr) { pi_1 = next_1; - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); + next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to); } - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); - while (next_2 != NULL) { + next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to); + while (next_2 != nullptr) { pi_2 = next_2; - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); + next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to); } if (pi_1 == pi_2) { @@ -625,9 +631,9 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) { gpr_mu_lock(&pi_1->mu); } - next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to); - next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to); - if (next_1 == NULL && next_2 == NULL) { + next_1 = (polling_island*)gpr_atm_acq_load(&pi_1->merged_to); + next_2 = (polling_island*)gpr_atm_acq_load(&pi_2->merged_to); + if (next_1 == nullptr && next_2 == nullptr) { break; } @@ -639,7 +645,7 @@ static void polling_island_lock_pair(polling_island **p, polling_island **q) { *q = pi_2; } -static void polling_island_unlock_pair(polling_island *p, polling_island *q) { +static void polling_island_unlock_pair(polling_island* p, polling_island* q) { if (p == q) { gpr_mu_unlock(&p->mu); } else { @@ -648,16 +654,16 @@ static void polling_island_unlock_pair(polling_island *p, polling_island *q) { } } -static polling_island *polling_island_merge(polling_island *p, - polling_island *q, - grpc_error **error) { +static polling_island* polling_island_merge(polling_island* p, + polling_island* q, + grpc_error** error) { /* Get locks on both the polling islands */ polling_island_lock_pair(&p, &q); if (p != q) { /* Make sure that p points to the polling island with fewer fds than q */ if (p->fd_cnt > q->fd_cnt) { - GPR_SWAP(polling_island *, p, q); + GPR_SWAP(polling_island*, p, q); } /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q @@ -682,8 +688,8 @@ static polling_island *polling_island_merge(polling_island *p, return q; } -static grpc_error *polling_island_global_init() { - grpc_error *error = GRPC_ERROR_NONE; +static grpc_error* polling_island_global_init() { + grpc_error* error = GRPC_ERROR_NONE; error = grpc_wakeup_fd_init(&polling_island_wakeup_fd); if (error == GRPC_ERROR_NONE) { @@ -719,15 +725,15 @@ static void polling_island_global_shutdown() { * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a * case occurs. */ -static grpc_fd *fd_freelist = NULL; +static grpc_fd* fd_freelist = nullptr; static gpr_mu fd_freelist_mu; #ifndef NDEBUG #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) -static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, +static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file, int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { + if (grpc_trace_fd_refcount.enabled()) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), @@ -736,22 +742,22 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(fd, n, reason) unref_by(fd, n) -static void ref_by(grpc_fd *fd, int n) { +static void ref_by(grpc_fd* fd, int n) { #endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); } #ifndef NDEBUG -static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, +static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file, int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { + if (grpc_trace_fd_refcount.enabled()) { gpr_log(GPR_DEBUG, "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); } #else -static void unref_by(grpc_fd *fd, int n) { +static void unref_by(grpc_fd* fd, int n) { #endif gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); if (old == n) { @@ -761,8 +767,8 @@ static void unref_by(grpc_fd *fd, int n) { fd_freelist = fd; grpc_iomgr_unregister_object(&fd->iomgr_object); - grpc_lfev_destroy(&fd->read_closure); - grpc_lfev_destroy(&fd->write_closure); + fd->read_closure->DestroyEvent(); + fd->write_closure->DestroyEvent(); gpr_mu_unlock(&fd_freelist_mu); } else { @@ -772,18 +778,18 @@ static void unref_by(grpc_fd *fd, int n) { /* Increment refcount by two to avoid changing the orphan bit */ #ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, +static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line) { ref_by(fd, 2, reason, file, line); } -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, +static void fd_unref(grpc_fd* fd, const char* reason, const char* file, int line) { unref_by(fd, 2, reason, file, line); } #else -static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } -static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } +static void fd_ref(grpc_fd* fd) { ref_by(fd, 2); } +static void fd_unref(grpc_fd* fd) { unref_by(fd, 2); } #endif static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } @@ -791,8 +797,8 @@ static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } static void fd_global_shutdown(void) { gpr_mu_lock(&fd_freelist_mu); gpr_mu_unlock(&fd_freelist_mu); - while (fd_freelist != NULL) { - grpc_fd *fd = fd_freelist; + while (fd_freelist != nullptr) { + grpc_fd* fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; gpr_mu_destroy(&fd->po.mu); gpr_free(fd); @@ -800,26 +806,28 @@ static void fd_global_shutdown(void) { gpr_mu_destroy(&fd_freelist_mu); } -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *new_fd = NULL; +static grpc_fd* fd_create(int fd, const char* name) { + grpc_fd* new_fd = nullptr; gpr_mu_lock(&fd_freelist_mu); - if (fd_freelist != NULL) { + if (fd_freelist != nullptr) { new_fd = fd_freelist; fd_freelist = fd_freelist->freelist_next; } gpr_mu_unlock(&fd_freelist_mu); - if (new_fd == NULL) { - new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd)); + if (new_fd == nullptr) { + new_fd = static_cast(gpr_malloc(sizeof(grpc_fd))); gpr_mu_init(&new_fd->po.mu); + new_fd->read_closure.Init(); + new_fd->write_closure.Init(); } /* Note: It is not really needed to get the new_fd->po.mu lock here. If this * is a newly created fd (or an fd we got from the freelist), no one else * would be holding a lock to it anyway. */ gpr_mu_lock(&new_fd->po.mu); - new_fd->po.pi = NULL; + new_fd->po.pi = nullptr; #ifndef NDEBUG new_fd->po.obj_type = POLL_OBJ_FD; #endif @@ -827,23 +835,23 @@ static grpc_fd *fd_create(int fd, const char *name) { gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1); new_fd->fd = fd; new_fd->orphaned = false; - grpc_lfev_init(&new_fd->read_closure); - grpc_lfev_init(&new_fd->write_closure); + new_fd->read_closure->InitEvent(); + new_fd->write_closure->InitEvent(); gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL); - new_fd->freelist_next = NULL; - new_fd->on_done_closure = NULL; + new_fd->freelist_next = nullptr; + new_fd->on_done_closure = nullptr; gpr_mu_unlock(&new_fd->po.mu); - char *fd_name; + char* fd_name; gpr_asprintf(&fd_name, "%s fd=%d", name, fd); grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); gpr_free(fd_name); return new_fd; } -static int fd_wrapped_fd(grpc_fd *fd) { +static int fd_wrapped_fd(grpc_fd* fd) { int ret_fd = -1; gpr_mu_lock(&fd->po.mu); if (!fd->orphaned) { @@ -854,11 +862,10 @@ static int fd_wrapped_fd(grpc_fd *fd) { return ret_fd; } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { - grpc_error *error = GRPC_ERROR_NONE; - polling_island *unref_pi = NULL; +static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason) { + grpc_error* error = GRPC_ERROR_NONE; + polling_island* unref_pi = nullptr; gpr_mu_lock(&fd->po.mu); fd->on_done_closure = on_done; @@ -875,18 +882,18 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - Unlock the latest polling island - Set fd->po.pi to NULL (but remove the ref on the polling island before doing this.) */ - if (fd->po.pi != NULL) { - polling_island *pi_latest = polling_island_lock(fd->po.pi); + if (fd->po.pi != nullptr) { + polling_island* pi_latest = polling_island_lock(fd->po.pi); polling_island_remove_fd_locked(pi_latest, fd, already_closed, &error); gpr_mu_unlock(&pi_latest->mu); unref_pi = fd->po.pi; - fd->po.pi = NULL; + fd->po.pi = nullptr; } /* If release_fd is not NULL, we should be relinquishing control of the file descriptor fd->fd (but we still own the grpc_fd structure). */ - if (release_fd != NULL) { + if (release_fd != nullptr) { *release_fd = fd->fd; } else { close(fd->fd); @@ -894,52 +901,48 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, fd->orphaned = true; - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_REF(error)); gpr_mu_unlock(&fd->po.mu); UNREF_BY(fd, 2, reason); /* Drop the reference */ - if (unref_pi != NULL) { + if (unref_pi != nullptr) { /* Unref stale polling island here, outside the fd lock above. The polling island owns a workqueue which owns an fd, and unreffing inside the lock can cause an eventual lock loop that makes TSAN very unhappy. */ - PI_UNREF(exec_ctx, unref_pi, "fd_orphan"); + PI_UNREF(unref_pi, "fd_orphan"); } if (error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(error); + const char* msg = grpc_error_string(error); gpr_log(GPR_DEBUG, "fd_orphan: %s", msg); } GRPC_ERROR_UNREF(error); } -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset); - return (grpc_pollset *)notifier; + return (grpc_pollset*)notifier; } -static bool fd_is_shutdown(grpc_fd *fd) { - return grpc_lfev_is_shutdown(&fd->read_closure); +static bool fd_is_shutdown(grpc_fd* fd) { + return fd->read_closure->IsShutdown(); } /* Might be called multiple times */ -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure, - GRPC_ERROR_REF(why))) { +static void fd_shutdown(grpc_fd* fd, grpc_error* why) { + if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) { shutdown(fd->fd, SHUT_RDWR); - grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why)); + fd->write_closure->SetShutdown(GRPC_ERROR_REF(why)); } GRPC_ERROR_UNREF(why); } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); +static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) { + fd->read_closure->NotifyOn(closure); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); +static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { + fd->write_closure->NotifyOn(closure); } /******************************************************************************* @@ -959,7 +962,7 @@ static void sig_handler(int sig_num) { static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); } /* Global state management */ -static grpc_error *pollset_global_init(void) { +static grpc_error* pollset_global_init(void) { gpr_tls_init(&g_current_thread_pollset); gpr_tls_init(&g_current_thread_worker); poller_kick_init(); @@ -971,14 +974,15 @@ static void pollset_global_shutdown(void) { gpr_tls_destroy(&g_current_thread_worker); } -static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) { - grpc_error *err = GRPC_ERROR_NONE; +static grpc_error* pollset_worker_kick(grpc_pollset_worker* worker) { + grpc_error* err = GRPC_ERROR_NONE; /* Kick the worker only if it was not already kicked */ - if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) { + if (gpr_atm_no_barrier_cas(&worker->is_kicked, static_cast(0), + static_cast(1))) { GRPC_POLLING_TRACE( "pollset_worker_kick: Kicking worker: %p (thread id: %ld)", - (void *)worker, (long int)worker->pt_id); + (void*)worker, (long int)worker->pt_id); int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal); if (err_num != 0) { err = GRPC_OS_ERROR(err_num, "pthread_kill"); @@ -989,56 +993,55 @@ static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) { /* Return 1 if the pollset has active threads in pollset_work (pollset must * be locked) */ -static int pollset_has_workers(grpc_pollset *p) { +static int pollset_has_workers(grpc_pollset* p) { return p->root_worker.next != &p->root_worker; } -static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void remove_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->prev->next = worker->next; worker->next->prev = worker->prev; } -static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { +static grpc_pollset_worker* pop_front_worker(grpc_pollset* p) { if (pollset_has_workers(p)) { - grpc_pollset_worker *w = p->root_worker.next; + grpc_pollset_worker* w = p->root_worker.next; remove_worker(p, w); return w; } else { - return NULL; + return nullptr; } } -static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void push_back_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->next = &p->root_worker; worker->prev = worker->next->prev; worker->prev->next = worker->next->prev = worker; } -static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->prev = &p->root_worker; worker->next = worker->prev->next; worker->prev->next = worker->next->prev = worker; } /* p->mu must be held before calling this function */ -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, - grpc_pollset_worker *specific_worker) { - GPR_TIMER_BEGIN("pollset_kick", 0); - grpc_error *error = GRPC_ERROR_NONE; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); - const char *err_desc = "Kick Failure"; - grpc_pollset_worker *worker = specific_worker; - if (worker != NULL) { +static grpc_error* pollset_kick(grpc_pollset* p, + grpc_pollset_worker* specific_worker) { + GPR_TIMER_SCOPE("pollset_kick", 0); + grpc_error* error = GRPC_ERROR_NONE; + GRPC_STATS_INC_POLLSET_KICK(); + const char* err_desc = "Kick Failure"; + grpc_pollset_worker* worker = specific_worker; + if (worker != nullptr) { if (worker == GRPC_POLLSET_KICK_BROADCAST) { if (pollset_has_workers(p)) { - GPR_TIMER_BEGIN("pollset_kick.broadcast", 0); + GPR_TIMER_SCOPE("pollset_kick.broadcast", 0); for (worker = p->root_worker.next; worker != &p->root_worker; worker = worker->next) { if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) { append_error(&error, pollset_worker_kick(worker), err_desc); } } - GPR_TIMER_END("pollset_kick.broadcast", 0); } else { p->kicked_without_pollers = true; } @@ -1058,7 +1061,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, GPR_TIMER_MARK("kick_anonymous", 0); worker = pop_front_worker(p); - if (worker != NULL) { + if (worker != nullptr) { GPR_TIMER_MARK("finally_kick", 0); push_back_worker(p, worker); append_error(&error, pollset_worker_kick(worker), err_desc); @@ -1068,15 +1071,14 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, } } - GPR_TIMER_END("pollset_kick", 0); GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error)); return error; } -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { gpr_mu_init(&pollset->po.mu); *mu = &pollset->po.mu; - pollset->po.pi = NULL; + pollset->po.pi = nullptr; #ifndef NDEBUG pollset->po.obj_type = POLL_OBJ_POLLSET; #endif @@ -1086,38 +1088,22 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { pollset->shutting_down = false; pollset->finish_shutdown_called = false; - pollset->shutdown_done = NULL; -} - -/* Convert a timespec to milliseconds: - - Very small or negative poll times are clamped to zero to do a non-blocking - poll (which becomes spin polling) - - Other small values are rounded up to one millisecond - - Longer than a millisecond polls are rounded up to the next nearest - millisecond to avoid spinning - - Infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } + pollset->shutdown_done = nullptr; +} - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { +static int poll_deadline_to_millis_timeout(grpc_millis millis) { + if (millis == GRPC_MILLIS_INF_FUTURE) return -1; + grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now(); + if (delta > INT_MAX) + return INT_MAX; + else if (delta < 0) return 0; - } - timeout = gpr_time_sub(deadline, now); - int millis = gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); - return millis >= 1 ? millis : 1; + else + return static_cast(delta); } -static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); +static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) { + fd->read_closure->SetReady(); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -1128,39 +1114,34 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } -static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); -} +static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); } -static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, - grpc_pollset *ps, - const char *reason) { - if (ps->po.pi != NULL) { - PI_UNREF(exec_ctx, ps->po.pi, reason); +static void pollset_release_polling_island(grpc_pollset* ps, + const char* reason) { + if (ps->po.pi != nullptr) { + PI_UNREF(ps->po.pi, reason); } - ps->po.pi = NULL; + ps->po.pi = nullptr; } -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { +static void finish_shutdown_locked(grpc_pollset* pollset) { /* The pollset cannot have any workers if we are at this stage */ GPR_ASSERT(!pollset_has_workers(pollset)); pollset->finish_shutdown_called = true; /* Release the ref and set pollset->po.pi to NULL */ - pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown"); - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); + pollset_release_polling_island(pollset, "ps_shutdown"); + GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE); } /* pollset->po.mu lock must be held by the caller before calling this */ -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_TIMER_BEGIN("pollset_shutdown", 0); +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + GPR_TIMER_SCOPE("pollset_shutdown", 0); GPR_ASSERT(!pollset->shutting_down); pollset->shutting_down = true; pollset->shutdown_done = closure; - pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); /* If the pollset has any workers, we cannot call finish_shutdown_locked() because it would release the underlying polling island. In such a case, we @@ -1168,32 +1149,30 @@ static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (!pollset_has_workers(pollset)) { GPR_ASSERT(!pollset->finish_shutdown_called); GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); + finish_shutdown_locked(pollset); } - GPR_TIMER_END("pollset_shutdown", 0); } /* pollset_shutdown is guaranteed to be called before pollset_destroy. So other * than destroying the mutexes, there is nothing special that needs to be done * here */ -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset* pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); gpr_mu_destroy(&pollset->po.mu); } #define GRPC_EPOLL_MAX_EVENTS 100 /* Note: sig_mask contains the signal mask to use *during* epoll_wait() */ -static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, - grpc_pollset_worker *worker, int timeout_ms, - sigset_t *sig_mask, grpc_error **error) { +static void pollset_work_and_unlock(grpc_pollset* pollset, + grpc_pollset_worker* worker, int timeout_ms, + sigset_t* sig_mask, grpc_error** error) { + GPR_TIMER_SCOPE("pollset_work_and_unlock", 0); struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; int epoll_fd = -1; int ep_rv; - polling_island *pi = NULL; - char *err_msg; - const char *err_desc = "pollset_work_and_unlock"; - GPR_TIMER_BEGIN("pollset_work_and_unlock", 0); + polling_island* pi = nullptr; + char* err_msg; + const char* err_desc = "pollset_work_and_unlock"; /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the latest polling island pointed by pollset->po.pi @@ -1205,16 +1184,15 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, right-away from epoll_wait() and pick up the latest polling_island the next this function (i.e pollset_work_and_unlock()) is called */ - if (pollset->po.pi == NULL) { - pollset->po.pi = polling_island_create(exec_ctx, NULL, error); - if (pollset->po.pi == NULL) { - GPR_TIMER_END("pollset_work_and_unlock", 0); + if (pollset->po.pi == nullptr) { + pollset->po.pi = polling_island_create(nullptr, error); + if (pollset->po.pi == nullptr) { return; /* Fatal error. We cannot continue */ } PI_ADD_REF(pollset->po.pi, "ps"); GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p", - (void *)pollset, (void *)pollset->po.pi); + (void*)pollset, (void*)pollset->po.pi); } pi = polling_island_maybe_get_latest(pollset->po.pi); @@ -1226,7 +1204,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the polling island to be deleted */ PI_ADD_REF(pi, "ps"); - PI_UNREF(exec_ctx, pollset->po.pi, "ps"); + PI_UNREF(pollset->po.pi, "ps"); pollset->po.pi = pi; } @@ -1240,7 +1218,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, g_current_thread_polling_island = pi; GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask); GRPC_SCHEDULING_END_BLOCKING_REGION; @@ -1254,7 +1232,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, /* We were interrupted. Save an interation by doing a zero timeout epoll_wait to see if there are any other events of interest */ GRPC_POLLING_TRACE("pollset_work: pollset: %p, worker: %p received kick", - (void *)pollset, (void *)worker); + (void*)pollset, (void*)worker); ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0); } } @@ -1265,60 +1243,58 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx, #endif /* defined(GRPC_TSAN) */ for (int i = 0; i < ep_rv; ++i) { - void *data_ptr = ep_ev[i].data.ptr; + void* data_ptr = ep_ev[i].data.ptr; if (data_ptr == &polling_island_wakeup_fd) { GRPC_POLLING_TRACE( "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: " "%d) got merged", - (void *)pollset, (void *)worker, epoll_fd); + (void*)pollset, (void*)worker, epoll_fd); /* This means that our polling island is merged with a different island. We do not have to do anything here since the subsequent call to the function pollset_work_and_unlock() will pick up the correct epoll_fd */ } else { - grpc_fd *fd = (grpc_fd *)data_ptr; + grpc_fd* fd = static_cast(data_ptr); int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP); int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI); int write_ev = ep_ev[i].events & EPOLLOUT; if (read_ev || cancel) { - fd_become_readable(exec_ctx, fd, pollset); + fd_become_readable(fd, pollset); } if (write_ev || cancel) { - fd_become_writable(exec_ctx, fd); + fd_become_writable(fd); } } } - g_current_thread_polling_island = NULL; + g_current_thread_polling_island = nullptr; gpr_atm_no_barrier_fetch_add(&pi->poller_count, -1); - GPR_ASSERT(pi != NULL); + GPR_ASSERT(pi != nullptr); /* Before leaving, release the extra ref we added to the polling island. It is important to use "pi" here (i.e our old copy of pollset->po.pi that we got before releasing the polling island lock). This is because pollset->po.pi pointer might get udpated in other parts of the code when there is an island merge while we are doing epoll_wait() above */ - PI_UNREF(exec_ctx, pi, "ps_work"); - - GPR_TIMER_END("pollset_work_and_unlock", 0); + PI_UNREF(pi, "ps_work"); } /* pollset->po.mu lock must be held by the caller before calling this. The function pollset_work() may temporarily release the lock (pollset->po.mu) during the course of its execution but it will always re-acquire the lock and ensure that it is held by the time the function returns */ -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { - GPR_TIMER_BEGIN("pollset_work", 0); - grpc_error *error = GRPC_ERROR_NONE; - int timeout_ms = poll_deadline_to_millis_timeout(deadline, now); +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("pollset_work", 0); + grpc_error* error = GRPC_ERROR_NONE; + int timeout_ms = poll_deadline_to_millis_timeout(deadline); sigset_t new_mask; grpc_pollset_worker worker; - worker.next = worker.prev = NULL; + worker.next = worker.prev = nullptr; worker.pt_id = pthread_self(); gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0); @@ -1371,9 +1347,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, push_front_worker(pollset, &worker); /* Add worker to pollset */ - pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms, - &g_orig_sigmask, &error); - grpc_exec_ctx_flush(exec_ctx); + pollset_work_and_unlock(pollset, &worker, timeout_ms, &g_orig_sigmask, + &error); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->po.mu); @@ -1393,36 +1369,33 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->shutting_down && !pollset_has_workers(pollset) && !pollset->finish_shutdown_called) { GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0); - finish_shutdown_locked(exec_ctx, pollset); + finish_shutdown_locked(pollset); gpr_mu_unlock(&pollset->po.mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->po.mu); } - if (worker_hdl) *worker_hdl = NULL; + if (worker_hdl) *worker_hdl = nullptr; gpr_tls_set(&g_current_thread_pollset, (intptr_t)0); gpr_tls_set(&g_current_thread_worker, (intptr_t)0); - GPR_TIMER_END("pollset_work", 0); - GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); return error; } -static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, - poll_obj_type bag_type, poll_obj *item, - poll_obj_type item_type) { - GPR_TIMER_BEGIN("add_poll_object", 0); +static void add_poll_object(poll_obj* bag, poll_obj_type bag_type, + poll_obj* item, poll_obj_type item_type) { + GPR_TIMER_SCOPE("add_poll_object", 0); #ifndef NDEBUG GPR_ASSERT(item->obj_type == item_type); GPR_ASSERT(bag->obj_type == bag_type); #endif - grpc_error *error = GRPC_ERROR_NONE; - polling_island *pi_new = NULL; + grpc_error* error = GRPC_ERROR_NONE; + polling_island* pi_new = nullptr; gpr_mu_lock(&bag->mu); gpr_mu_lock(&item->mu); @@ -1449,7 +1422,7 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, if (item->pi == bag->pi) { pi_new = item->pi; - if (pi_new == NULL) { + if (pi_new == nullptr) { /* GPR_ASSERT(item->pi == bag->pi == NULL) */ /* If we are adding an fd to a bag (i.e pollset or pollset_set), then @@ -1463,18 +1436,18 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, keeping TSAN happy outweigh any performance advantage we might have by keeping the lock held. */ gpr_mu_unlock(&item->mu); - pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error); + pi_new = polling_island_create(FD_FROM_PO(item), &error); gpr_mu_lock(&item->mu); /* Need to reverify any assumptions made between the initial lock and getting to this branch: if they've changed, we need to throw away our work and figure things out again. */ - if (item->pi != NULL) { + if (item->pi != nullptr) { GRPC_POLLING_TRACE( "add_poll_object: Raced creating new polling island. pi_new: %p " "(fd: %d, %s: %p)", - (void *)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), - (void *)bag); + (void*)pi_new, FD_FROM_PO(item)->fd, poll_obj_string(bag_type), + (void*)bag); /* No need to lock 'pi_new' here since this is a new polling island and no one has a reference to it yet */ polling_island_remove_all_fds_locked(pi_new, true, &error); @@ -1482,31 +1455,30 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, /* Ref and unref so that the polling island gets deleted during unref */ PI_ADD_REF(pi_new, "dance_of_destruction"); - PI_UNREF(exec_ctx, pi_new, "dance_of_destruction"); + PI_UNREF(pi_new, "dance_of_destruction"); goto retry; } } else { - pi_new = polling_island_create(exec_ctx, NULL, &error); + pi_new = polling_island_create(nullptr, &error); } GRPC_POLLING_TRACE( "add_poll_object: Created new polling island. pi_new: %p (%s: %p, " "%s: %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); + (void*)pi_new, poll_obj_string(item_type), (void*)item, + poll_obj_string(bag_type), (void*)bag); } else { GRPC_POLLING_TRACE( "add_poll_object: Same polling island. pi: %p (%s, %s)", - (void *)pi_new, poll_obj_string(item_type), - poll_obj_string(bag_type)); + (void*)pi_new, poll_obj_string(item_type), poll_obj_string(bag_type)); } - } else if (item->pi == NULL) { + } else if (item->pi == nullptr) { /* GPR_ASSERT(bag->pi != NULL) */ /* Make pi_new point to latest pi*/ pi_new = polling_island_lock(bag->pi); if (item_type == POLL_OBJ_FD) { - grpc_fd *fd = FD_FROM_PO(item); + grpc_fd* fd = FD_FROM_PO(item); polling_island_add_fds_locked(pi_new, &fd, 1, true, &error); } @@ -1514,9 +1486,9 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, GRPC_POLLING_TRACE( "add_poll_obj: item->pi was NULL. pi_new: %p (item(%s): %p, " "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); - } else if (bag->pi == NULL) { + (void*)pi_new, poll_obj_string(item_type), (void*)item, + poll_obj_string(bag_type), (void*)bag); + } else if (bag->pi == nullptr) { /* GPR_ASSERT(item->pi != NULL) */ /* Make pi_new to point to latest pi */ pi_new = polling_island_lock(item->pi); @@ -1524,15 +1496,15 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, GRPC_POLLING_TRACE( "add_poll_obj: bag->pi was NULL. pi_new: %p (item(%s): %p, " "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); + (void*)pi_new, poll_obj_string(item_type), (void*)item, + poll_obj_string(bag_type), (void*)bag); } else { pi_new = polling_island_merge(item->pi, bag->pi, &error); GRPC_POLLING_TRACE( "add_poll_obj: polling islands merged. pi_new: %p (item(%s): %p, " "bag(%s): %p)", - (void *)pi_new, poll_obj_string(item_type), (void *)item, - poll_obj_string(bag_type), (void *)bag); + (void*)pi_new, poll_obj_string(item_type), (void*)item, + poll_obj_string(bag_type), (void*)bag); } /* At this point, pi_new is the polling island that both item->pi and bag->pi @@ -1540,16 +1512,16 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, if (item->pi != pi_new) { PI_ADD_REF(pi_new, poll_obj_string(item_type)); - if (item->pi != NULL) { - PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type)); + if (item->pi != nullptr) { + PI_UNREF(item->pi, poll_obj_string(item_type)); } item->pi = pi_new; } if (bag->pi != pi_new) { PI_ADD_REF(pi_new, poll_obj_string(bag_type)); - if (bag->pi != NULL) { - PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type)); + if (bag->pi != nullptr) { + PI_UNREF(bag->pi, poll_obj_string(bag_type)); } bag->pi = pi_new; } @@ -1558,79 +1530,68 @@ static void add_poll_object(grpc_exec_ctx *exec_ctx, poll_obj *bag, gpr_mu_unlock(&bag->mu); GRPC_LOG_IF_ERROR("add_poll_object", error); - GPR_TIMER_END("add_poll_object", 0); } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po, - POLL_OBJ_FD); +static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) { + add_poll_object(&pollset->po, POLL_OBJ_POLLSET, &fd->po, POLL_OBJ_FD); } /******************************************************************************* * Pollset-set Definitions */ -static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss)); +static grpc_pollset_set* pollset_set_create(void) { + grpc_pollset_set* pss = + static_cast(gpr_malloc(sizeof(*pss))); gpr_mu_init(&pss->po.mu); - pss->po.pi = NULL; + pss->po.pi = nullptr; #ifndef NDEBUG pss->po.obj_type = POLL_OBJ_POLLSET_SET; #endif return pss; } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss) { +static void pollset_set_destroy(grpc_pollset_set* pss) { gpr_mu_destroy(&pss->po.mu); - if (pss->po.pi != NULL) { - PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy"); + if (pss->po.pi != nullptr) { + PI_UNREF(pss->po.pi, "pss_destroy"); } gpr_free(pss); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po, - POLL_OBJ_FD); +static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) { + add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &fd->po, POLL_OBJ_FD); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss, - grpc_fd *fd) { +static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) { /* Nothing to do */ } -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { - add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po, - POLL_OBJ_POLLSET); +static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { + add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &ps->po, POLL_OBJ_POLLSET); } -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pss, grpc_pollset *ps) { +static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { /* Nothing to do */ } -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po, +static void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { + add_poll_object(&bag->po, POLL_OBJ_POLLSET_SET, &item->po, POLL_OBJ_POLLSET_SET); } -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { /* Nothing to do */ } /* Test helper functions * */ -void *grpc_fd_get_polling_island(grpc_fd *fd) { - polling_island *pi; +void* grpc_fd_get_polling_island(grpc_fd* fd) { + polling_island* pi; gpr_mu_lock(&fd->po.mu); pi = fd->po.pi; @@ -1639,8 +1600,8 @@ void *grpc_fd_get_polling_island(grpc_fd *fd) { return pi; } -void *grpc_pollset_get_polling_island(grpc_pollset *ps) { - polling_island *pi; +void* grpc_pollset_get_polling_island(grpc_pollset* ps) { + polling_island* pi; gpr_mu_lock(&ps->po.mu); pi = ps->po.pi; @@ -1649,9 +1610,9 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) { return pi; } -bool grpc_are_polling_islands_equal(void *p, void *q) { - polling_island *p1 = (polling_island *)p; - polling_island *p2 = (polling_island *)q; +bool grpc_are_polling_islands_equal(void* p, void* q) { + polling_island* p1 = static_cast(p); + polling_island* p2 = static_cast(q); /* Note: polling_island_lock_pair() may change p1 and p2 to point to the latest polling islands in their respective linked lists */ @@ -1717,53 +1678,58 @@ static bool is_epoll_available() { return true; } -const grpc_event_engine_vtable *grpc_init_epollsig_linux( +const grpc_event_engine_vtable* grpc_init_epollsig_linux( bool explicit_request) { /* If use of signals is disabled, we cannot use epoll engine*/ if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) { - return NULL; + gpr_log(GPR_ERROR, "Skipping epollsig because use of signals is disabled."); + return nullptr; } if (!grpc_has_wakeup_fd()) { - return NULL; + gpr_log(GPR_ERROR, "Skipping epollsig because of no wakeup fd."); + return nullptr; } if (!is_epoll_available()) { - return NULL; + gpr_log(GPR_ERROR, "Skipping epollsig because epoll is unavailable."); + return nullptr; } if (!is_grpc_wakeup_signal_initialized) { if (explicit_request) { grpc_use_signal(SIGRTMIN + 6); } else { - return NULL; + gpr_log(GPR_ERROR, + "Skipping epollsig because uninitialized wakeup signal."); + return nullptr; } } fd_global_init(); if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { - return NULL; + return nullptr; } if (!GRPC_LOG_IF_ERROR("polling_island_global_init", polling_island_global_init())) { - return NULL; + return nullptr; } return &vtable; } -#else /* defined(GRPC_LINUX_EPOLL) */ +#else /* defined(GRPC_LINUX_EPOLL_CREATE1) */ #if defined(GRPC_POSIX_SOCKET) -#include "src/core/lib/iomgr/ev_posix.h" -/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return - * NULL */ -const grpc_event_engine_vtable *grpc_init_epollsig_linux( +#include "src/core/lib/iomgr/ev_epollsig_linux.h" +/* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means + epoll_create1 is not available. Return NULL */ +const grpc_event_engine_vtable* grpc_init_epollsig_linux( bool explicit_request) { - return NULL; + return nullptr; } #endif /* defined(GRPC_POSIX_SOCKET) */ void grpc_use_signal(int signum) {} -#endif /* !defined(GRPC_LINUX_EPOLL) */ +#endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.h b/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.h index 88328682b..2ba2f0a63 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.h +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_epollsig_linux.h @@ -19,15 +19,17 @@ #ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H #define GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H +#include + #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/port.h" -const grpc_event_engine_vtable *grpc_init_epollsig_linux(bool explicit_request); +const grpc_event_engine_vtable* grpc_init_epollsig_linux(bool explicit_request); -#ifdef GRPC_LINUX_EPOLL -void *grpc_fd_get_polling_island(grpc_fd *fd); -void *grpc_pollset_get_polling_island(grpc_pollset *ps); -bool grpc_are_polling_islands_equal(void *p, void *q); -#endif /* defined(GRPC_LINUX_EPOLL) */ +#ifdef GRPC_LINUX_EPOLL_CREATE1 +void* grpc_fd_get_polling_island(grpc_fd* fd); +void* grpc_pollset_get_polling_island(grpc_pollset* ps); +bool grpc_are_polling_islands_equal(void* p, void* q); +#endif /* defined(GRPC_LINUX_EPOLL_CREATE1) */ #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.c b/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.cc similarity index 67% rename from Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.cc index e170702dc..504787e65 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -24,6 +26,7 @@ #include #include +#include #include #include #include @@ -32,31 +35,29 @@ #include #include #include -#include -#include -#include #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/murmur_hash.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/wakeup_fd_cv.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/murmur_hash.h" -#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) +#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1) /******************************************************************************* * FD declarations */ - typedef struct grpc_fd_watcher { - struct grpc_fd_watcher *next; - struct grpc_fd_watcher *prev; - grpc_pollset *pollset; - grpc_pollset_worker *worker; - grpc_fd *fd; + struct grpc_fd_watcher* next; + struct grpc_fd_watcher* prev; + grpc_pollset* pollset; + grpc_pollset_worker* worker; + grpc_fd* fd; } grpc_fd_watcher; struct grpc_fd { @@ -72,7 +73,8 @@ struct grpc_fd { int shutdown; int closed; int released; - grpc_error *shutdown_error; + gpr_atm pollhup; + grpc_error* shutdown_error; /* The watcher list. @@ -97,18 +99,18 @@ struct grpc_fd { the inactive pollers may be kicked out of their poll loops to take that responsibility. */ grpc_fd_watcher inactive_watcher_root; - grpc_fd_watcher *read_watcher; - grpc_fd_watcher *write_watcher; + grpc_fd_watcher* read_watcher; + grpc_fd_watcher* write_watcher; - grpc_closure *read_closure; - grpc_closure *write_closure; + grpc_closure* read_closure; + grpc_closure* write_closure; - grpc_closure *on_done_closure; + grpc_closure* on_done_closure; grpc_iomgr_object iomgr_object; /* The pollset that last noticed and notified that the fd is readable */ - grpc_pollset *read_notifier_pollset; + grpc_pollset* read_notifier_pollset; }; /* Begin polling on an fd. @@ -122,35 +124,34 @@ struct grpc_fd { Polling strategies that do not need to alter their behavior depending on the fd's current interest (such as epoll) do not need to call this function. MUST NOT be called with a pollset lock taken */ -static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, - grpc_pollset_worker *worker, uint32_t read_mask, - uint32_t write_mask, grpc_fd_watcher *rec); +static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, + grpc_pollset_worker* worker, uint32_t read_mask, + uint32_t write_mask, grpc_fd_watcher* rec); /* Complete polling previously started with fd_begin_poll MUST NOT be called with a pollset lock taken if got_read or got_write are 1, also does the become_{readable,writable} as appropriate. */ -static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *rec, - int got_read, int got_write, - grpc_pollset *read_notifier_pollset); +static void fd_end_poll(grpc_fd_watcher* rec, int got_read, int got_write, + grpc_pollset* read_notifier_pollset); /* Return 1 if this fd is orphaned, 0 otherwise */ -static bool fd_is_orphaned(grpc_fd *fd); +static bool fd_is_orphaned(grpc_fd* fd); #ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line); -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, +static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line); +static void fd_unref(grpc_fd* fd, const char* reason, const char* file, int line); #define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__) #define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__) #else -static void fd_ref(grpc_fd *fd); -static void fd_unref(grpc_fd *fd); +static void fd_ref(grpc_fd* fd); +static void fd_unref(grpc_fd* fd); #define GRPC_FD_REF(fd, reason) fd_ref(fd) #define GRPC_FD_UNREF(fd, reason) fd_unref(fd) #endif -#define CLOSURE_NOT_READY ((grpc_closure *)0) -#define CLOSURE_READY ((grpc_closure *)1) +#define CLOSURE_NOT_READY ((grpc_closure*)0) +#define CLOSURE_READY ((grpc_closure*)1) /******************************************************************************* * pollset declarations @@ -158,15 +159,15 @@ static void fd_unref(grpc_fd *fd); typedef struct grpc_cached_wakeup_fd { grpc_wakeup_fd fd; - struct grpc_cached_wakeup_fd *next; + struct grpc_cached_wakeup_fd* next; } grpc_cached_wakeup_fd; struct grpc_pollset_worker { - grpc_cached_wakeup_fd *wakeup_fd; + grpc_cached_wakeup_fd* wakeup_fd; int reevaluate_polling_on_wakeup; int kicked_specifically; - struct grpc_pollset_worker *next; - struct grpc_pollset_worker *prev; + struct grpc_pollset_worker* next; + struct grpc_pollset_worker* prev; }; struct grpc_pollset { @@ -175,23 +176,20 @@ struct grpc_pollset { int shutting_down; int called_shutdown; int kicked_without_pollers; - grpc_closure *shutdown_done; - grpc_closure_list idle_jobs; + grpc_closure* shutdown_done; int pollset_set_count; /* all polled fds */ size_t fd_count; size_t fd_capacity; - grpc_fd **fds; + grpc_fd** fds; /* Local cache of eventfds for workers */ - grpc_cached_wakeup_fd *local_wakeup_cache; + grpc_cached_wakeup_fd* local_wakeup_cache; }; /* Add an fd to a pollset */ -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); +static void pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd); -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); +static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd); /* Convert a timespec to milliseconds: - very small or negative poll times are clamped to zero to do a @@ -200,8 +198,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - longer than a millisecond polls are rounded up to the next nearest millisecond to avoid spinning - infinite timeouts are converted to -1 */ -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now); +static int poll_deadline_to_millis_timeout(grpc_millis deadline); /* Allow kick to wakeup the currently polling worker */ #define GRPC_POLLSET_CAN_KICK_SELF 1 @@ -209,13 +206,13 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, #define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2 /* As per pollset_kick, with an extended set of flags (defined above) -- mostly for fd_posix's use. */ -static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, - grpc_pollset_worker *specific_worker, +static grpc_error* pollset_kick_ext(grpc_pollset* p, + grpc_pollset_worker* specific_worker, uint32_t flags) GRPC_MUST_USE_RESULT; /* Return 1 if the pollset has active threads in pollset_work (pollset must * be locked) */ -static bool pollset_has_workers(grpc_pollset *pollset); +static bool pollset_has_workers(grpc_pollset* pollset); /******************************************************************************* * pollset_set definitions @@ -226,15 +223,15 @@ struct grpc_pollset_set { size_t pollset_count; size_t pollset_capacity; - grpc_pollset **pollsets; + grpc_pollset** pollsets; size_t pollset_set_count; size_t pollset_set_capacity; - struct grpc_pollset_set **pollset_sets; + struct grpc_pollset_set** pollset_sets; size_t fd_count; size_t fd_capacity; - grpc_fd **fds; + grpc_fd** fds; }; /******************************************************************************* @@ -247,9 +244,9 @@ struct grpc_pollset_set { typedef struct poll_result { gpr_refcount refcount; - cv_node *watchers; + grpc_cv_node* watchers; int watchcount; - struct pollfd *fds; + struct pollfd* fds; nfds_t nfds; int retval; int err; @@ -257,28 +254,35 @@ typedef struct poll_result { } poll_result; typedef struct poll_args { + grpc_core::Thread poller_thd; gpr_cv trigger; int trigger_set; - struct pollfd *fds; + bool harvestable; + gpr_cv harvest; + bool joinable; + gpr_cv join; + struct pollfd* fds; nfds_t nfds; - poll_result *result; - struct poll_args *next; - struct poll_args *prev; + poll_result* result; + struct poll_args* next; + struct poll_args* prev; } poll_args; // This is a 2-tiered cache, we mantain a hash table // of active poll calls, so we can wait on the result -// of that call. We also maintain a freelist of inactive -// poll threads. +// of that call. We also maintain freelists of inactive +// poll args and of dead poller threads. typedef struct poll_hash_table { - poll_args *free_pollers; - poll_args **active_pollers; + poll_args* free_pollers; + poll_args** active_pollers; + poll_args* dead_pollers; unsigned int size; unsigned int count; } poll_hash_table; +// TODO(kpayson64): Eliminate use of global non-POD variables poll_hash_table poll_cache; -cv_fd_table g_cvfds; +grpc_cv_fd_table g_cvfds; /******************************************************************************* * fd_posix.c @@ -287,9 +291,9 @@ cv_fd_table g_cvfds; #ifndef NDEBUG #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) -static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, +static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file, int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { + if (grpc_trace_fd_refcount.enabled()) { gpr_log(GPR_DEBUG, "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), @@ -298,22 +302,22 @@ static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, #else #define REF_BY(fd, n, reason) ref_by(fd, n) #define UNREF_BY(fd, n, reason) unref_by(fd, n) -static void ref_by(grpc_fd *fd, int n) { +static void ref_by(grpc_fd* fd, int n) { #endif GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); } #ifndef NDEBUG -static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, +static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file, int line) { - if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) { + if (grpc_trace_fd_refcount.enabled()) { gpr_log(GPR_DEBUG, "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst), gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line); } #else -static void unref_by(grpc_fd *fd, int n) { +static void unref_by(grpc_fd* fd, int n) { #endif gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n); if (old == n) { @@ -326,8 +330,8 @@ static void unref_by(grpc_fd *fd, int n) { } } -static grpc_fd *fd_create(int fd, const char *name) { - grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r)); +static grpc_fd* fd_create(int fd, const char* name) { + grpc_fd* r = static_cast(gpr_malloc(sizeof(*r))); gpr_mu_init(&r->mu); gpr_atm_rel_store(&r->refst, 1); r->shutdown = 0; @@ -336,27 +340,27 @@ static grpc_fd *fd_create(int fd, const char *name) { r->fd = fd; r->inactive_watcher_root.next = r->inactive_watcher_root.prev = &r->inactive_watcher_root; - r->read_watcher = r->write_watcher = NULL; - r->on_done_closure = NULL; + r->read_watcher = r->write_watcher = nullptr; + r->on_done_closure = nullptr; r->closed = 0; r->released = 0; - r->read_notifier_pollset = NULL; + gpr_atm_no_barrier_store(&r->pollhup, 0); + r->read_notifier_pollset = nullptr; - char *name2; + char* name2; gpr_asprintf(&name2, "%s fd=%d", name, fd); grpc_iomgr_register_object(&r->iomgr_object, name2); gpr_free(name2); return r; } -static bool fd_is_orphaned(grpc_fd *fd) { +static bool fd_is_orphaned(grpc_fd* fd) { return (gpr_atm_acq_load(&fd->refst) & 1) == 0; } /* Return the read-notifier pollset */ -static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { - grpc_pollset *notifier = NULL; +static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) { + grpc_pollset* notifier = nullptr; gpr_mu_lock(&fd->mu); notifier = fd->read_notifier_pollset; @@ -365,56 +369,53 @@ static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, return notifier; } -static grpc_error *pollset_kick_locked(grpc_exec_ctx *exec_ctx, - grpc_fd_watcher *watcher) { +static grpc_error* pollset_kick_locked(grpc_fd_watcher* watcher) { gpr_mu_lock(&watcher->pollset->mu); GPR_ASSERT(watcher->worker); - grpc_error *err = - pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker, - GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); + grpc_error* err = pollset_kick_ext(watcher->pollset, watcher->worker, + GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP); gpr_mu_unlock(&watcher->pollset->mu); return err; } -static void maybe_wake_one_watcher_locked(grpc_exec_ctx *exec_ctx, - grpc_fd *fd) { +static void maybe_wake_one_watcher_locked(grpc_fd* fd) { if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { - pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next); + pollset_kick_locked(fd->inactive_watcher_root.next); } else if (fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->read_watcher); + pollset_kick_locked(fd->read_watcher); } else if (fd->write_watcher) { - pollset_kick_locked(exec_ctx, fd->write_watcher); + pollset_kick_locked(fd->write_watcher); } } -static void wake_all_watchers_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_fd_watcher *watcher; +static void wake_all_watchers_locked(grpc_fd* fd) { + grpc_fd_watcher* watcher; for (watcher = fd->inactive_watcher_root.next; watcher != &fd->inactive_watcher_root; watcher = watcher->next) { - pollset_kick_locked(exec_ctx, watcher); + pollset_kick_locked(watcher); } if (fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->read_watcher); + pollset_kick_locked(fd->read_watcher); } if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { - pollset_kick_locked(exec_ctx, fd->write_watcher); + pollset_kick_locked(fd->write_watcher); } } -static int has_watchers(grpc_fd *fd) { - return fd->read_watcher != NULL || fd->write_watcher != NULL || +static int has_watchers(grpc_fd* fd) { + return fd->read_watcher != nullptr || fd->write_watcher != nullptr || fd->inactive_watcher_root.next != &fd->inactive_watcher_root; } -static void close_fd_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { +static void close_fd_locked(grpc_fd* fd) { fd->closed = 1; if (!fd->released) { close(fd->fd); } - GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE); } -static int fd_wrapped_fd(grpc_fd *fd) { +static int fd_wrapped_fd(grpc_fd* fd) { if (fd->released || fd->closed) { return -1; } else { @@ -422,12 +423,11 @@ static int fd_wrapped_fd(grpc_fd *fd) { } } -static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *on_done, int *release_fd, - bool already_closed, const char *reason) { +static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason) { fd->on_done_closure = on_done; - fd->released = release_fd != NULL; - if (release_fd != NULL) { + fd->released = release_fd != nullptr; + if (release_fd != nullptr) { *release_fd = fd->fd; fd->released = true; } else if (already_closed) { @@ -436,9 +436,9 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, gpr_mu_lock(&fd->mu); REF_BY(fd, 1, reason); /* remove active status, but keep referenced */ if (!has_watchers(fd)) { - close_fd_locked(exec_ctx, fd); + close_fd_locked(fd); } else { - wake_all_watchers_locked(exec_ctx, fd); + wake_all_watchers_locked(fd); } gpr_mu_unlock(&fd->mu); UNREF_BY(fd, 2, reason); /* drop the reference */ @@ -446,43 +446,47 @@ static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, /* increment refcount by two to avoid changing the orphan bit */ #ifndef NDEBUG -static void fd_ref(grpc_fd *fd, const char *reason, const char *file, +static void fd_ref(grpc_fd* fd, const char* reason, const char* file, int line) { ref_by(fd, 2, reason, file, line); } -static void fd_unref(grpc_fd *fd, const char *reason, const char *file, +static void fd_unref(grpc_fd* fd, const char* reason, const char* file, int line) { unref_by(fd, 2, reason, file, line); } #else -static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); } +static void fd_ref(grpc_fd* fd) { ref_by(fd, 2); } -static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); } +static void fd_unref(grpc_fd* fd) { unref_by(fd, 2); } #endif -static grpc_error *fd_shutdown_error(grpc_fd *fd) { +static grpc_error* fd_shutdown_error(grpc_fd* fd) { if (!fd->shutdown) { return GRPC_ERROR_NONE; } else { - return GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "FD shutdown", &fd->shutdown_error, 1); + return grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "FD shutdown", &fd->shutdown_error, 1), + GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); } } -static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st, grpc_closure *closure) { - if (fd->shutdown) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown")); +static void notify_on_locked(grpc_fd* fd, grpc_closure** st, + grpc_closure* closure) { + if (fd->shutdown || gpr_atm_no_barrier_load(&fd->pollhup)) { + GRPC_CLOSURE_SCHED( + closure, grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); } else if (*st == CLOSURE_NOT_READY) { /* not ready ==> switch to a waiting state by setting the closure */ *st = closure; } else if (*st == CLOSURE_READY) { /* already ready ==> queue the closure to run immediately */ *st = CLOSURE_NOT_READY; - GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd)); - maybe_wake_one_watcher_locked(exec_ctx, fd); + GRPC_CLOSURE_SCHED(closure, fd_shutdown_error(fd)); + maybe_wake_one_watcher_locked(fd); } else { /* upcallptr was set to a different closure. This is an error! */ gpr_log(GPR_ERROR, @@ -493,8 +497,7 @@ static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } /* returns 1 if state becomes not ready */ -static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure **st) { +static int set_ready_locked(grpc_fd* fd, grpc_closure** st) { if (*st == CLOSURE_READY) { /* duplicate ready ==> ignore */ return 0; @@ -504,18 +507,18 @@ static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd, return 0; } else { /* waiting ==> queue closure */ - GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd)); + GRPC_CLOSURE_SCHED(*st, fd_shutdown_error(fd)); *st = CLOSURE_NOT_READY; return 1; } } static void set_read_notifier_pollset_locked( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *read_notifier_pollset) { + grpc_fd* fd, grpc_pollset* read_notifier_pollset) { fd->read_notifier_pollset = read_notifier_pollset; } -static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { +static void fd_shutdown(grpc_fd* fd, grpc_error* why) { gpr_mu_lock(&fd->mu); /* only shutdown once */ if (!fd->shutdown) { @@ -523,40 +526,38 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { fd->shutdown_error = why; /* signal read/write closed to OS so that future operations fail */ shutdown(fd->fd, SHUT_RDWR); - set_ready_locked(exec_ctx, fd, &fd->read_closure); - set_ready_locked(exec_ctx, fd, &fd->write_closure); + set_ready_locked(fd, &fd->read_closure); + set_ready_locked(fd, &fd->write_closure); } else { GRPC_ERROR_UNREF(why); } gpr_mu_unlock(&fd->mu); } -static bool fd_is_shutdown(grpc_fd *fd) { +static bool fd_is_shutdown(grpc_fd* fd) { gpr_mu_lock(&fd->mu); bool r = fd->shutdown; gpr_mu_unlock(&fd->mu); return r; } -static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { +static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) { gpr_mu_lock(&fd->mu); - notify_on_locked(exec_ctx, fd, &fd->read_closure, closure); + notify_on_locked(fd, &fd->read_closure, closure); gpr_mu_unlock(&fd->mu); } -static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { +static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { gpr_mu_lock(&fd->mu); - notify_on_locked(exec_ctx, fd, &fd->write_closure, closure); + notify_on_locked(fd, &fd->write_closure, closure); gpr_mu_unlock(&fd->mu); } -static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, - grpc_pollset_worker *worker, uint32_t read_mask, - uint32_t write_mask, grpc_fd_watcher *watcher) { +static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset, + grpc_pollset_worker* worker, uint32_t read_mask, + uint32_t write_mask, grpc_fd_watcher* watcher) { uint32_t mask = 0; - grpc_closure *cur; + grpc_closure* cur; int requested; /* keep track of pollers that have requested our events, in case they change */ @@ -566,9 +567,9 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, /* if we are shutdown, then don't add to the watcher set */ if (fd->shutdown) { - watcher->fd = NULL; - watcher->pollset = NULL; - watcher->worker = NULL; + watcher->fd = nullptr; + watcher->pollset = nullptr; + watcher->worker = nullptr; gpr_mu_unlock(&fd->mu); GRPC_FD_UNREF(fd, "poll"); return 0; @@ -577,7 +578,7 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, /* if there is nobody polling for read, but we need to, then start doing so */ cur = fd->read_closure; requested = cur != CLOSURE_READY; - if (read_mask && fd->read_watcher == NULL && requested) { + if (read_mask && fd->read_watcher == nullptr && requested) { fd->read_watcher = watcher; mask |= read_mask; } @@ -585,12 +586,12 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, */ cur = fd->write_closure; requested = cur != CLOSURE_READY; - if (write_mask && fd->write_watcher == NULL && requested) { + if (write_mask && fd->write_watcher == nullptr && requested) { fd->write_watcher = watcher; mask |= write_mask; } /* if not polling, remember this watcher in case we need someone to later */ - if (mask == 0 && worker != NULL) { + if (mask == 0 && worker != nullptr) { watcher->next = &fd->inactive_watcher_root; watcher->prev = watcher->next->prev; watcher->next->prev = watcher->prev->next = watcher; @@ -603,14 +604,13 @@ static uint32_t fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, return mask; } -static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, - int got_read, int got_write, - grpc_pollset *read_notifier_pollset) { +static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write, + grpc_pollset* read_notifier_pollset) { int was_polling = 0; int kick = 0; - grpc_fd *fd = watcher->fd; + grpc_fd* fd = watcher->fd; - if (fd == NULL) { + if (fd == nullptr) { return; } @@ -622,7 +622,7 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, if (!got_read) { kick = 1; } - fd->read_watcher = NULL; + fd->read_watcher = nullptr; } if (watcher == fd->write_watcher) { /* remove write watcher, kick if we still need a write */ @@ -630,31 +630,31 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, if (!got_write) { kick = 1; } - fd->write_watcher = NULL; + fd->write_watcher = nullptr; } - if (!was_polling && watcher->worker != NULL) { + if (!was_polling && watcher->worker != nullptr) { /* remove from inactive list */ watcher->next->prev = watcher->prev; watcher->prev->next = watcher->next; } if (got_read) { - if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) { + if (set_ready_locked(fd, &fd->read_closure)) { kick = 1; } - if (read_notifier_pollset != NULL) { - set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset); + if (read_notifier_pollset != nullptr) { + set_read_notifier_pollset_locked(fd, read_notifier_pollset); } } if (got_write) { - if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) { + if (set_ready_locked(fd, &fd->write_closure)) { kick = 1; } } if (kick) { - maybe_wake_one_watcher_locked(exec_ctx, fd); + maybe_wake_one_watcher_locked(fd); } if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) { - close_fd_locked(exec_ctx, fd); + close_fd_locked(fd); } gpr_mu_unlock(&fd->mu); @@ -668,46 +668,46 @@ static void fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher, GPR_TLS_DECL(g_current_thread_poller); GPR_TLS_DECL(g_current_thread_worker); -static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void remove_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->prev->next = worker->next; worker->next->prev = worker->prev; } -static bool pollset_has_workers(grpc_pollset *p) { +static bool pollset_has_workers(grpc_pollset* p) { return p->root_worker.next != &p->root_worker; } -static bool pollset_in_pollset_sets(grpc_pollset *p) { +static bool pollset_in_pollset_sets(grpc_pollset* p) { return p->pollset_set_count; } -static bool pollset_has_observers(grpc_pollset *p) { +static bool pollset_has_observers(grpc_pollset* p) { return pollset_has_workers(p) || pollset_in_pollset_sets(p); } -static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) { +static grpc_pollset_worker* pop_front_worker(grpc_pollset* p) { if (pollset_has_workers(p)) { - grpc_pollset_worker *w = p->root_worker.next; + grpc_pollset_worker* w = p->root_worker.next; remove_worker(p, w); return w; } else { - return NULL; + return nullptr; } } -static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void push_back_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->next = &p->root_worker; worker->prev = worker->next->prev; worker->prev->next = worker->next->prev = worker; } -static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) { +static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) { worker->prev = &p->root_worker; worker->next = worker->prev->next; worker->prev->next = worker->next->prev = worker; } -static void kick_append_error(grpc_error **composite, grpc_error *error) { +static void kick_append_error(grpc_error** composite, grpc_error* error) { if (error == GRPC_ERROR_NONE) return; if (*composite == GRPC_ERROR_NONE) { *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Kick Failure"); @@ -715,17 +715,17 @@ static void kick_append_error(grpc_error **composite, grpc_error *error) { *composite = grpc_error_add_child(*composite, error); } -static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, - grpc_pollset_worker *specific_worker, +static grpc_error* pollset_kick_ext(grpc_pollset* p, + grpc_pollset_worker* specific_worker, uint32_t flags) { - GPR_TIMER_BEGIN("pollset_kick_ext", 0); - grpc_error *error = GRPC_ERROR_NONE; - GRPC_STATS_INC_POLLSET_KICK(exec_ctx); + GPR_TIMER_SCOPE("pollset_kick_ext", 0); + grpc_error* error = GRPC_ERROR_NONE; + GRPC_STATS_INC_POLLSET_KICK(); /* pollset->mu already held */ - if (specific_worker != NULL) { + if (specific_worker != nullptr) { if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) { - GPR_TIMER_BEGIN("pollset_kick_ext.broadcast", 0); + GPR_TIMER_SCOPE("pollset_kick_ext.broadcast", 0); GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0); for (specific_worker = p->root_worker.next; specific_worker != &p->root_worker; @@ -734,7 +734,6 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, &error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd)); } p->kicked_without_pollers = true; - GPR_TIMER_END("pollset_kick_ext.broadcast", 0); } else if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)specific_worker) { GPR_TIMER_MARK("different_thread_worker", 0); @@ -757,7 +756,7 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0); GPR_TIMER_MARK("kick_anonymous", 0); specific_worker = pop_front_worker(p); - if (specific_worker != NULL) { + if (specific_worker != nullptr) { if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { GPR_TIMER_MARK("kick_anonymous_not_self", 0); push_back_worker(p, specific_worker); @@ -766,10 +765,10 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { push_back_worker(p, specific_worker); - specific_worker = NULL; + specific_worker = nullptr; } } - if (specific_worker != NULL) { + if (specific_worker != nullptr) { GPR_TIMER_MARK("finally_kick", 0); push_back_worker(p, specific_worker); kick_append_error( @@ -781,19 +780,18 @@ static grpc_error *pollset_kick_ext(grpc_exec_ctx *exec_ctx, grpc_pollset *p, } } - GPR_TIMER_END("pollset_kick_ext", 0); GRPC_LOG_IF_ERROR("pollset_kick_ext", GRPC_ERROR_REF(error)); return error; } -static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, - grpc_pollset_worker *specific_worker) { - return pollset_kick_ext(exec_ctx, p, specific_worker, 0); +static grpc_error* pollset_kick(grpc_pollset* p, + grpc_pollset_worker* specific_worker) { + return pollset_kick_ext(p, specific_worker, 0); } /* global state management */ -static grpc_error *pollset_global_init(void) { +static grpc_error* pollset_global_init(void) { gpr_tls_init(&g_current_thread_poller); gpr_tls_init(&g_current_thread_worker); return GRPC_ERROR_NONE; @@ -806,27 +804,25 @@ static void pollset_global_shutdown(void) { /* main interface */ -static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { gpr_mu_init(&pollset->mu); *mu = &pollset->mu; pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker; pollset->shutting_down = 0; pollset->called_shutdown = 0; pollset->kicked_without_pollers = 0; - pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL; - pollset->local_wakeup_cache = NULL; + pollset->local_wakeup_cache = nullptr; pollset->kicked_without_pollers = 0; pollset->fd_count = 0; pollset->fd_capacity = 0; - pollset->fds = NULL; + pollset->fds = nullptr; pollset->pollset_set_count = 0; } -static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { +static void pollset_destroy(grpc_pollset* pollset) { GPR_ASSERT(!pollset_has_workers(pollset)); - GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); while (pollset->local_wakeup_cache) { - grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next; + grpc_cached_wakeup_fd* next = pollset->local_wakeup_cache->next; grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd); gpr_free(pollset->local_wakeup_cache); pollset->local_wakeup_cache = next; @@ -835,8 +831,7 @@ static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); } -static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_fd *fd) { +static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) { gpr_mu_lock(&pollset->mu); size_t i; /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */ @@ -846,27 +841,26 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (pollset->fd_count == pollset->fd_capacity) { pollset->fd_capacity = GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2); - pollset->fds = (grpc_fd **)gpr_realloc( - pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity); + pollset->fds = static_cast( + gpr_realloc(pollset->fds, sizeof(grpc_fd*) * pollset->fd_capacity)); } pollset->fds[pollset->fd_count++] = fd; GRPC_FD_REF(fd, "multipoller"); - pollset_kick(exec_ctx, pollset, NULL); + pollset_kick(pollset, nullptr); exit: gpr_mu_unlock(&pollset->mu); } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs)); +static void finish_shutdown(grpc_pollset* pollset) { size_t i; for (i = 0; i < pollset->fd_count; i++) { GRPC_FD_UNREF(pollset->fds[i], "multipoller"); } pollset->fd_count = 0; - GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE); } -static void work_combine_error(grpc_error **composite, grpc_error *error) { +static void work_combine_error(grpc_error** composite, grpc_error* error) { if (error == GRPC_ERROR_NONE) return; if (*composite == GRPC_ERROR_NONE) { *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("pollset_work"); @@ -874,12 +868,13 @@ static void work_combine_error(grpc_error **composite, grpc_error *error) { *composite = grpc_error_add_child(*composite, error); } -static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GPR_TIMER_SCOPE("pollset_work", 0); grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; /* Avoid malloc for small number of elements. */ enum { inline_elements = 96 }; @@ -891,16 +886,15 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, int locked = 1; int queued_work = 0; int keep_polling = 0; - GPR_TIMER_BEGIN("pollset_work", 0); /* this must happen before we (potentially) drop pollset->mu */ - worker.next = worker.prev = NULL; + worker.next = worker.prev = nullptr; worker.reevaluate_polling_on_wakeup = 0; - if (pollset->local_wakeup_cache != NULL) { + if (pollset->local_wakeup_cache != nullptr) { worker.wakeup_fd = pollset->local_wakeup_cache; pollset->local_wakeup_cache = worker.wakeup_fd->next; } else { - worker.wakeup_fd = - (grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd)); + worker.wakeup_fd = static_cast( + gpr_malloc(sizeof(*worker.wakeup_fd))); error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd); if (error != GRPC_ERROR_NONE) { GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); @@ -908,14 +902,6 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } } worker.kicked_specifically = 0; - /* If there's work waiting for the pollset to be idle, and the - pollset is idle, then do that work */ - if (!pollset_has_workers(pollset) && - !grpc_closure_list_empty(pollset->idle_jobs)) { - GPR_TIMER_MARK("pollset_work.idle_jobs", 0); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); - goto done; - } /* If we're shutting down then we don't execute any extended work */ if (pollset->shutting_down) { GPR_TIMER_MARK("pollset_work.shutting_down", 0); @@ -928,13 +914,14 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset); while (keep_polling) { keep_polling = 0; - if (!pollset->kicked_without_pollers) { + if (!pollset->kicked_without_pollers || + deadline <= grpc_core::ExecCtx::Get()->Now()) { if (!added_worker) { push_front_worker(pollset, &worker); added_worker = 1; gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); } - GPR_TIMER_BEGIN("maybe_work_and_unlock", 0); + GPR_TIMER_SCOPE("maybe_work_and_unlock", 0); #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) @@ -942,10 +929,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, int r; size_t i, fd_count; nfds_t pfd_count; - grpc_fd_watcher *watchers; - struct pollfd *pfds; + grpc_fd_watcher* watchers; + struct pollfd* pfds; - timeout = poll_deadline_to_millis_timeout(deadline, now); + timeout = poll_deadline_to_millis_timeout(deadline); if (pollset->fd_count + 2 <= inline_elements) { pfds = pollfd_space; @@ -954,9 +941,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, /* Allocate one buffer to hold both pfds and watchers arrays */ const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2); const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2); - void *buf = gpr_malloc(pfd_size + watch_size); - pfds = (struct pollfd *)buf; - watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size); + void* buf = gpr_malloc(pfd_size + watch_size); + pfds = static_cast(buf); + watchers = static_cast( + (void*)(static_cast(buf) + pfd_size)); } fd_count = 0; @@ -965,7 +953,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, pfds[0].events = POLLIN; pfds[0].revents = 0; for (i = 0; i < pollset->fd_count; i++) { - if (fd_is_orphaned(pollset->fds[i])) { + if (fd_is_orphaned(pollset->fds[i]) || + gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) { GRPC_FD_UNREF(pollset->fds[i], "multipoller"); } else { pollset->fds[fd_count++] = pollset->fds[i]; @@ -980,21 +969,21 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_mu_unlock(&pollset->mu); for (i = 1; i < pfd_count; i++) { - grpc_fd *fd = watchers[i].fd; - pfds[i].events = (short)fd_begin_poll(fd, pollset, &worker, POLLIN, - POLLOUT, &watchers[i]); + grpc_fd* fd = watchers[i].fd; + pfds[i].events = static_cast( + fd_begin_poll(fd, pollset, &worker, POLLIN, POLLOUT, &watchers[i])); GRPC_FD_UNREF(fd, "multipoller_start"); } /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ GRPC_SCHEDULING_START_BLOCKING_REGION; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); + GRPC_STATS_INC_SYSCALL_POLL(); r = grpc_poll_function(pfds, pfd_count, timeout); GRPC_SCHEDULING_END_BLOCKING_REGION; - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "%p poll=%d", pollset, r); } if (r < 0) { @@ -1003,36 +992,42 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } for (i = 1; i < pfd_count; i++) { - if (watchers[i].fd == NULL) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + if (watchers[i].fd == nullptr) { + fd_end_poll(&watchers[i], 0, 0, nullptr); } else { // Wake up all the file descriptors, if we have an invalid one // we can identify it on the next pollset_work() - fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset); + fd_end_poll(&watchers[i], 1, 1, pollset); } } } else if (r == 0) { for (i = 1; i < pfd_count; i++) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + fd_end_poll(&watchers[i], 0, 0, nullptr); } } else { if (pfds[0].revents & POLLIN_CHECK) { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "%p: got_wakeup", pollset); } work_combine_error( &error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd)); } for (i = 1; i < pfd_count; i++) { - if (watchers[i].fd == NULL) { - fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL); + if (watchers[i].fd == nullptr) { + fd_end_poll(&watchers[i], 0, 0, nullptr); } else { - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset, + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset, pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0, (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents); } - fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK, + /* This is a mitigation to prevent poll() from spinning on a + ** POLLHUP https://github.com/grpc/grpc/pull/13665 + */ + if (pfds[i].revents & POLLHUP) { + gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1); + } + fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK, pfds[i].revents & POLLOUT_CHECK, pollset); } } @@ -1043,7 +1038,6 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_free(pfds); } - GPR_TIMER_END("maybe_work_and_unlock", 0); locked = 0; } else { GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0); @@ -1055,7 +1049,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, worker list, which means nobody could ask us to re-evaluate polling). */ done: if (!locked) { - queued_work |= grpc_exec_ctx_flush(exec_ctx); + queued_work |= grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&pollset->mu); locked = 1; } @@ -1068,13 +1062,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (queued_work || worker.kicked_specifically) { /* If there's queued work on the list, then set the deadline to be immediate so we get back out of the polling loop quickly */ - deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC); + deadline = 0; } keep_polling = 1; } - if (keep_polling) { - now = gpr_now(now.clock_type); - } } gpr_tls_set(&g_current_thread_poller, 0); if (added_worker) { @@ -1087,82 +1078,63 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, /* check shutdown conditions */ if (pollset->shutting_down) { if (pollset_has_workers(pollset)) { - pollset_kick(exec_ctx, pollset, NULL); + pollset_kick(pollset, nullptr); } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); - grpc_exec_ctx_flush(exec_ctx); + finish_shutdown(pollset); + grpc_core::ExecCtx::Get()->Flush(); /* Continuing to access pollset here is safe -- it is the caller's * responsibility to not destroy when it has outstanding calls to * pollset_work. * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ gpr_mu_lock(&pollset->mu); - } else if (!grpc_closure_list_empty(pollset->idle_jobs)) { - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); - gpr_mu_unlock(&pollset->mu); - grpc_exec_ctx_flush(exec_ctx); - gpr_mu_lock(&pollset->mu); } } - if (worker_hdl) *worker_hdl = NULL; - GPR_TIMER_END("pollset_work", 0); + if (worker_hdl) *worker_hdl = nullptr; GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error)); return error; } -static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { GPR_ASSERT(!pollset->shutting_down); pollset->shutting_down = 1; pollset->shutdown_done = closure; - pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); - if (!pollset_has_workers(pollset)) { - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs); - } + pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset->called_shutdown && !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } } -static int poll_deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { - return -1; - } - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { - return 0; - } - timeout = gpr_time_sub(deadline, now); - return gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); +static int poll_deadline_to_millis_timeout(grpc_millis deadline) { + if (deadline == GRPC_MILLIS_INF_FUTURE) return -1; + if (deadline == 0) return 0; + grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now(); + if (n < 0) return 0; + if (n > INT_MAX) return -1; + return static_cast(n); } /******************************************************************************* * pollset_set_posix.c */ -static grpc_pollset_set *pollset_set_create(void) { - grpc_pollset_set *pollset_set = - (grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set)); +static grpc_pollset_set* pollset_set_create(void) { + grpc_pollset_set* pollset_set = + static_cast(gpr_zalloc(sizeof(*pollset_set))); gpr_mu_init(&pollset_set->mu); return pollset_set; } -static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set) { +static void pollset_set_destroy(grpc_pollset_set* pollset_set) { size_t i; gpr_mu_destroy(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } for (i = 0; i < pollset_set->pollset_count; i++) { - grpc_pollset *pollset = pollset_set->pollsets[i]; + grpc_pollset* pollset = pollset_set->pollsets[i]; gpr_mu_lock(&pollset->mu); pollset->pollset_set_count--; /* check shutdown */ @@ -1170,7 +1142,7 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } else { gpr_mu_unlock(&pollset->mu); } @@ -1181,9 +1153,8 @@ static void pollset_set_destroy(grpc_exec_ctx *exec_ctx, gpr_free(pollset_set); } -static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { +static void pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { size_t i, j; gpr_mu_lock(&pollset->mu); pollset->pollset_set_count++; @@ -1192,16 +1163,16 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = GPR_MAX(8, 2 * pollset_set->pollset_capacity); - pollset_set->pollsets = (grpc_pollset **)gpr_realloc( + pollset_set->pollsets = static_cast(gpr_realloc( pollset_set->pollsets, - pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)); + pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets))); } pollset_set->pollsets[pollset_set->pollset_count++] = pollset; for (i = 0, j = 0; i < pollset_set->fd_count; i++) { if (fd_is_orphaned(pollset_set->fds[i])) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } else { - pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); + pollset_add_fd(pollset, pollset_set->fds[i]); pollset_set->fds[j++] = pollset_set->fds[i]; } } @@ -1209,15 +1180,14 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&pollset_set->mu); } -static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { +static void pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { size_t i; gpr_mu_lock(&pollset_set->mu); for (i = 0; i < pollset_set->pollset_count; i++) { if (pollset_set->pollsets[i] == pollset) { pollset_set->pollset_count--; - GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i], + GPR_SWAP(grpc_pollset*, pollset_set->pollsets[i], pollset_set->pollsets[pollset_set->pollset_count]); break; } @@ -1230,29 +1200,28 @@ static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, !pollset_has_observers(pollset)) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); - finish_shutdown(exec_ctx, pollset); + finish_shutdown(pollset); } else { gpr_mu_unlock(&pollset->mu); } } -static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { +static void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { size_t i, j; gpr_mu_lock(&bag->mu); if (bag->pollset_set_count == bag->pollset_set_capacity) { bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity); - bag->pollset_sets = (grpc_pollset_set **)gpr_realloc( - bag->pollset_sets, - bag->pollset_set_capacity * sizeof(*bag->pollset_sets)); + bag->pollset_sets = static_cast( + gpr_realloc(bag->pollset_sets, + bag->pollset_set_capacity * sizeof(*bag->pollset_sets))); } bag->pollset_sets[bag->pollset_set_count++] = item; for (i = 0, j = 0; i < bag->fd_count; i++) { if (fd_is_orphaned(bag->fds[i])) { GRPC_FD_UNREF(bag->fds[i], "pollset_set"); } else { - pollset_set_add_fd(exec_ctx, item, bag->fds[i]); + pollset_set_add_fd(item, bag->fds[i]); bag->fds[j++] = bag->fds[i]; } } @@ -1260,15 +1229,14 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&bag->mu); } -static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { size_t i; gpr_mu_lock(&bag->mu); for (i = 0; i < bag->pollset_set_count; i++) { if (bag->pollset_sets[i] == item) { bag->pollset_set_count--; - GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i], + GPR_SWAP(grpc_pollset_set*, bag->pollset_sets[i], bag->pollset_sets[bag->pollset_set_count]); break; } @@ -1276,41 +1244,40 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&bag->mu); } -static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { +static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) { size_t i; gpr_mu_lock(&pollset_set->mu); if (pollset_set->fd_count == pollset_set->fd_capacity) { pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity); - pollset_set->fds = (grpc_fd **)gpr_realloc( - pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds)); + pollset_set->fds = static_cast( + gpr_realloc(pollset_set->fds, + pollset_set->fd_capacity * sizeof(*pollset_set->fds))); } GRPC_FD_REF(fd, "pollset_set"); pollset_set->fds[pollset_set->fd_count++] = fd; for (i = 0; i < pollset_set->pollset_count; i++) { - pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd); + pollset_add_fd(pollset_set->pollsets[i], fd); } for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + pollset_set_add_fd(pollset_set->pollset_sets[i], fd); } gpr_mu_unlock(&pollset_set->mu); } -static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { +static void pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) { size_t i; gpr_mu_lock(&pollset_set->mu); for (i = 0; i < pollset_set->fd_count; i++) { if (pollset_set->fds[i] == fd) { pollset_set->fd_count--; - GPR_SWAP(grpc_fd *, pollset_set->fds[i], + GPR_SWAP(grpc_fd*, pollset_set->fds[i], pollset_set->fds[pollset_set->fd_count]); GRPC_FD_UNREF(fd, "pollset_set"); break; } } for (i = 0; i < pollset_set->pollset_set_count; i++) { - pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd); + pollset_set_del_fd(pollset_set->pollset_sets[i], fd); } gpr_mu_unlock(&pollset_set->mu); } @@ -1319,10 +1286,11 @@ static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, * Condition Variable polling extensions */ -static void run_poll(void *args); -static void cache_poller_locked(poll_args *args); +static void run_poll(void* args); +static void cache_poller_locked(poll_args* args); +static void cache_harvest_locked(); -static void cache_insert_locked(poll_args *args) { +static void cache_insert_locked(poll_args* args) { uint32_t key = gpr_murmur_hash3(args->fds, args->nfds * sizeof(struct pollfd), 0xDEADBEEF); key = key % poll_cache.size; @@ -1330,18 +1298,18 @@ static void cache_insert_locked(poll_args *args) { poll_cache.active_pollers[key]->prev = args; } args->next = poll_cache.active_pollers[key]; - args->prev = NULL; + args->prev = nullptr; poll_cache.active_pollers[key] = args; poll_cache.count++; } -static void init_result(poll_args *pargs) { - pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result)); +static void init_result(poll_args* pargs) { + pargs->result = static_cast(gpr_malloc(sizeof(poll_result))); gpr_ref_init(&pargs->result->refcount, 1); - pargs->result->watchers = NULL; + pargs->result->watchers = nullptr; pargs->result->watchcount = 0; - pargs->result->fds = - (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds); + pargs->result->fds = static_cast( + gpr_malloc(sizeof(struct pollfd) * pargs->nfds)); memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds); pargs->result->nfds = pargs->nfds; pargs->result->retval = 0; @@ -1351,11 +1319,11 @@ static void init_result(poll_args *pargs) { // Creates a poll_args object for a given arguments to poll(). // This object may return a poll_args in the cache. -static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) { +static poll_args* get_poller_locked(struct pollfd* fds, nfds_t count) { uint32_t key = gpr_murmur_hash3(fds, count * sizeof(struct pollfd), 0xDEADBEEF); key = key % poll_cache.size; - poll_args *curr = poll_cache.active_pollers[key]; + poll_args* curr = poll_cache.active_pollers[key]; while (curr) { if (curr->nfds == count && memcmp(curr->fds, fds, count * sizeof(struct pollfd)) == 0) { @@ -1366,38 +1334,41 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) { } if (poll_cache.free_pollers) { - poll_args *pargs = poll_cache.free_pollers; + poll_args* pargs = poll_cache.free_pollers; poll_cache.free_pollers = pargs->next; if (poll_cache.free_pollers) { - poll_cache.free_pollers->prev = NULL; + poll_cache.free_pollers->prev = nullptr; } pargs->fds = fds; pargs->nfds = count; - pargs->next = NULL; - pargs->prev = NULL; + pargs->next = nullptr; + pargs->prev = nullptr; init_result(pargs); cache_poller_locked(pargs); return pargs; } - poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args)); + poll_args* pargs = + static_cast(gpr_malloc(sizeof(struct poll_args))); gpr_cv_init(&pargs->trigger); + gpr_cv_init(&pargs->harvest); + gpr_cv_init(&pargs->join); + pargs->harvestable = false; + pargs->joinable = false; pargs->fds = fds; pargs->nfds = count; - pargs->next = NULL; - pargs->prev = NULL; + pargs->next = nullptr; + pargs->prev = nullptr; pargs->trigger_set = 0; init_result(pargs); cache_poller_locked(pargs); - gpr_thd_id t_id; - gpr_thd_options opt = gpr_thd_options_default(); gpr_ref(&g_cvfds.pollcount); - gpr_thd_options_set_detached(&opt); - GPR_ASSERT(gpr_thd_new(&t_id, &run_poll, pargs, &opt)); + pargs->poller_thd = grpc_core::Thread("grpc_poller", &run_poll, pargs); + pargs->poller_thd.Start(); return pargs; } -static void cache_delete_locked(poll_args *args) { +static void cache_delete_locked(poll_args* args) { if (!args->prev) { uint32_t key = gpr_murmur_hash3( args->fds, args->nfds * sizeof(struct pollfd), 0xDEADBEEF); @@ -1416,25 +1387,25 @@ static void cache_delete_locked(poll_args *args) { if (poll_cache.free_pollers) { poll_cache.free_pollers->prev = args; } - args->prev = NULL; + args->prev = nullptr; args->next = poll_cache.free_pollers; gpr_free(args->fds); poll_cache.free_pollers = args; } -static void cache_poller_locked(poll_args *args) { +static void cache_poller_locked(poll_args* args) { if (poll_cache.count + 1 > poll_cache.size / 2) { - poll_args **old_active_pollers = poll_cache.active_pollers; + poll_args** old_active_pollers = poll_cache.active_pollers; poll_cache.size = poll_cache.size * 2; poll_cache.count = 0; poll_cache.active_pollers = - (poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size); + static_cast(gpr_malloc(sizeof(void*) * poll_cache.size)); for (unsigned int i = 0; i < poll_cache.size; i++) { - poll_cache.active_pollers[i] = NULL; + poll_cache.active_pollers[i] = nullptr; } for (unsigned int i = 0; i < poll_cache.size / 2; i++) { - poll_args *curr = old_active_pollers[i]; - poll_args *next = NULL; + poll_args* curr = old_active_pollers[i]; + poll_args* next = nullptr; while (curr) { next = curr->next; cache_insert_locked(curr); @@ -1447,7 +1418,7 @@ static void cache_poller_locked(poll_args *args) { cache_insert_locked(args); } -static void cache_destroy_locked(poll_args *args) { +static void cache_destroy_locked(poll_args* args) { if (args->next) { args->next->prev = args->prev; } @@ -1458,10 +1429,36 @@ static void cache_destroy_locked(poll_args *args) { poll_cache.free_pollers = args->next; } - gpr_free(args); + // Now move this args to the dead poller list for later join + if (poll_cache.dead_pollers != nullptr) { + poll_cache.dead_pollers->prev = args; + } + args->prev = nullptr; + args->next = poll_cache.dead_pollers; + poll_cache.dead_pollers = args; } -static void decref_poll_result(poll_result *res) { +static void cache_harvest_locked() { + while (poll_cache.dead_pollers) { + poll_args* args = poll_cache.dead_pollers; + poll_cache.dead_pollers = poll_cache.dead_pollers->next; + // Keep the list consistent in case new dead pollers get added when we + // release the lock below to wait on joining + if (poll_cache.dead_pollers) { + poll_cache.dead_pollers->prev = nullptr; + } + args->harvestable = true; + gpr_cv_signal(&args->harvest); + while (!args->joinable) { + gpr_cv_wait(&args->join, &g_cvfds.mu, + gpr_inf_future(GPR_CLOCK_MONOTONIC)); + } + args->poller_thd.Join(); + gpr_free(args); + } +} + +static void decref_poll_result(poll_result* res) { if (gpr_unref(&res->refcount)) { GPR_ASSERT(!res->watchers); gpr_free(res->fds); @@ -1469,7 +1466,7 @@ static void decref_poll_result(poll_result *res) { } } -void remove_cvn(cv_node **head, cv_node *target) { +void remove_cvn(grpc_cv_node** head, grpc_cv_node* target) { if (target->next) { target->next->prev = target->prev; } @@ -1484,17 +1481,18 @@ void remove_cvn(cv_node **head, cv_node *target) { gpr_timespec thread_grace; // Poll in a background thread -static void run_poll(void *args) { - poll_args *pargs = (poll_args *)args; +static void run_poll(void* args) { + poll_args* pargs = static_cast(args); while (1) { - poll_result *result = pargs->result; + poll_result* result = pargs->result; int retval = g_cvfds.poll(result->fds, result->nfds, CV_POLL_PERIOD_MS); gpr_mu_lock(&g_cvfds.mu); + cache_harvest_locked(); if (retval != 0) { result->completed = 1; result->retval = retval; result->err = errno; - cv_node *watcher = result->watchers; + grpc_cv_node* watcher = result->watchers; while (watcher) { gpr_cv_signal(watcher->cv); watcher = watcher->next; @@ -1505,10 +1503,11 @@ static void run_poll(void *args) { decref_poll_result(result); // Leave this polling thread alive for a grace period to do another poll() // op - gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME); + gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC); deadline = gpr_time_add(deadline, thread_grace); pargs->trigger_set = 0; gpr_cv_wait(&pargs->trigger, &g_cvfds.mu, deadline); + cache_harvest_locked(); if (!pargs->trigger_set) { cache_destroy_locked(pargs); break; @@ -1517,35 +1516,48 @@ static void run_poll(void *args) { gpr_mu_unlock(&g_cvfds.mu); } - // We still have the lock here if (gpr_unref(&g_cvfds.pollcount)) { gpr_cv_signal(&g_cvfds.shutdown_cv); } + while (!pargs->harvestable) { + gpr_cv_wait(&pargs->harvest, &g_cvfds.mu, + gpr_inf_future(GPR_CLOCK_MONOTONIC)); + } + pargs->joinable = true; + gpr_cv_signal(&pargs->join); gpr_mu_unlock(&g_cvfds.mu); } // This function overrides poll() to handle condition variable wakeup fds -static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) { +static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) { + if (timeout == 0) { + // Don't bother using background threads for polling if timeout is 0, + // poll-cv might not wait for a poll to return otherwise. + // https://github.com/grpc/grpc/issues/13298 + return poll(fds, nfds, 0); + } unsigned int i; int res, idx; - cv_node *pollcv; + grpc_cv_node* pollcv; int skip_poll = 0; nfds_t nsockfds = 0; - poll_result *result = NULL; + poll_result* result = nullptr; gpr_mu_lock(&g_cvfds.mu); - pollcv = (cv_node *)gpr_malloc(sizeof(cv_node)); - pollcv->next = NULL; + cache_harvest_locked(); + pollcv = static_cast(gpr_malloc(sizeof(grpc_cv_node))); + pollcv->next = nullptr; gpr_cv pollcv_cv; gpr_cv_init(&pollcv_cv); pollcv->cv = &pollcv_cv; - cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node)); + grpc_cv_node* fd_cvs = + static_cast(gpr_malloc(nfds * sizeof(grpc_cv_node))); for (i = 0; i < nfds; i++) { fds[i].revents = 0; if (fds[i].fd < 0 && (fds[i].events & POLLIN)) { idx = GRPC_FD_TO_IDX(fds[i].fd); fd_cvs[i].cv = &pollcv_cv; - fd_cvs[i].prev = NULL; + fd_cvs[i].prev = nullptr; fd_cvs[i].next = g_cvfds.cvfds[idx].cvs; if (g_cvfds.cvfds[idx].cvs) { g_cvfds.cvfds[idx].cvs->prev = &(fd_cvs[i]); @@ -1560,9 +1572,9 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) { } } - gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME); + gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC); if (timeout < 0) { - deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); } else { deadline = gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN)); @@ -1570,8 +1582,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) { res = 0; if (!skip_poll && nsockfds > 0) { - struct pollfd *pollfds = - (struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds); + struct pollfd* pollfds = static_cast( + gpr_malloc(sizeof(struct pollfd) * nsockfds)); idx = 0; for (i = 0; i < nfds; i++) { if (fds[i].fd >= 0) { @@ -1581,10 +1593,10 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) { idx++; } } - poll_args *pargs = get_poller_locked(pollfds, nsockfds); + poll_args* pargs = get_poller_locked(pollfds, nsockfds); result = pargs->result; pollcv->next = result->watchers; - pollcv->prev = NULL; + pollcv->prev = nullptr; if (result->watchers) { result->watchers->prev = pollcv; } @@ -1595,12 +1607,14 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) { pargs->trigger_set = 1; gpr_cv_signal(&pargs->trigger); gpr_cv_wait(&pollcv_cv, &g_cvfds.mu, deadline); + cache_harvest_locked(); res = result->retval; errno = result->err; result->watchcount--; remove_cvn(&result->watchers, pollcv); } else if (!skip_poll) { gpr_cv_wait(&pollcv_cv, &g_cvfds.mu, deadline); + cache_harvest_locked(); } idx = 0; @@ -1634,13 +1648,13 @@ static void global_cv_fd_table_init() { gpr_cv_init(&g_cvfds.shutdown_cv); gpr_ref_init(&g_cvfds.pollcount, 1); g_cvfds.size = CV_DEFAULT_TABLE_SIZE; - g_cvfds.cvfds = - (fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE); - g_cvfds.free_fds = NULL; + g_cvfds.cvfds = static_cast( + gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE)); + g_cvfds.free_fds = nullptr; thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN); for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) { g_cvfds.cvfds[i].is_set = 0; - g_cvfds.cvfds[i].cvs = NULL; + g_cvfds.cvfds[i].cvs = nullptr; g_cvfds.cvfds[i].next_free = g_cvfds.free_fds; g_cvfds.free_fds = &g_cvfds.cvfds[i]; } @@ -1651,11 +1665,13 @@ static void global_cv_fd_table_init() { // Initialize the cache poll_cache.size = 32; poll_cache.count = 0; - poll_cache.free_pollers = NULL; - poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32); + poll_cache.free_pollers = nullptr; + poll_cache.active_pollers = + static_cast(gpr_malloc(sizeof(void*) * 32)); for (unsigned int i = 0; i < poll_cache.size; i++) { - poll_cache.active_pollers[i] = NULL; + poll_cache.active_pollers[i] = nullptr; } + poll_cache.dead_pollers = nullptr; gpr_mu_unlock(&g_cvfds.mu); } @@ -1666,7 +1682,7 @@ static void global_cv_fd_table_shutdown() { // Not doing so will result in reported memory leaks if (!gpr_unref(&g_cvfds.pollcount)) { int res = gpr_cv_wait(&g_cvfds.shutdown_cv, &g_cvfds.mu, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(3, GPR_TIMESPAN))); GPR_ASSERT(res == 0); } @@ -1674,6 +1690,7 @@ static void global_cv_fd_table_shutdown() { grpc_poll_function = g_cvfds.poll; gpr_free(g_cvfds.cvfds); + cache_harvest_locked(); gpr_free(poll_cache.active_pollers); gpr_mu_unlock(&g_cvfds.mu); @@ -1722,23 +1739,24 @@ static const grpc_event_engine_vtable vtable = { shutdown_engine, }; -const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request) { +const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request) { if (!grpc_has_wakeup_fd()) { - return NULL; + gpr_log(GPR_ERROR, "Skipping poll because of no wakeup fd."); + return nullptr; } if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { - return NULL; + return nullptr; } return &vtable; } -const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request) { +const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request) { global_cv_fd_table_init(); grpc_enable_cv_wakeup_fds(1); if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) { global_cv_fd_table_shutdown(); grpc_enable_cv_wakeup_fds(0); - return NULL; + return nullptr; } return &vtable; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.h b/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.h index d444e6094..ab3cd9029 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_poll_posix.h @@ -19,9 +19,11 @@ #ifndef GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H #define GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H +#include + #include "src/core/lib/iomgr/ev_posix.h" -const grpc_event_engine_vtable *grpc_init_poll_posix(bool explicit_request); -const grpc_event_engine_vtable *grpc_init_poll_cv_posix(bool explicit_request); +const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request); +const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request); #endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_posix.c b/Sources/CgRPC/src/core/lib/iomgr/ev_posix.c deleted file mode 100644 index 4d3ae2228..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_posix.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_POSIX_SOCKET - -#include "src/core/lib/iomgr/ev_posix.h" - -#include - -#include -#include -#include -#include - -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/ev_epoll1_linux.h" -#include "src/core/lib/iomgr/ev_epollex_linux.h" -#include "src/core/lib/iomgr/ev_epollsig_linux.h" -#include "src/core/lib/iomgr/ev_poll_posix.h" -#include "src/core/lib/support/env.h" - -grpc_tracer_flag grpc_polling_trace = - GRPC_TRACER_INITIALIZER(false, "polling"); /* Disabled by default */ - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_fd_refcount = - GRPC_TRACER_INITIALIZER(false, "fd_refcount"); -#endif - -/** Default poll() function - a pointer so that it can be overridden by some - * tests */ -grpc_poll_function_type grpc_poll_function = poll; - -grpc_wakeup_fd grpc_global_wakeup_fd; - -static const grpc_event_engine_vtable *g_event_engine; -static const char *g_poll_strategy_name = NULL; - -typedef const grpc_event_engine_vtable *(*event_engine_factory_fn)( - bool explicit_request); - -typedef struct { - const char *name; - event_engine_factory_fn factory; -} event_engine_factory; - -static const event_engine_factory g_factories[] = { - {"epoll1", grpc_init_epoll1_linux}, - {"epollsig", grpc_init_epollsig_linux}, - {"poll", grpc_init_poll_posix}, - {"poll-cv", grpc_init_poll_cv_posix}, - {"epollex", grpc_init_epollex_linux}, -}; - -static void add(const char *beg, const char *end, char ***ss, size_t *ns) { - size_t n = *ns; - size_t np = n + 1; - char *s; - size_t len; - GPR_ASSERT(end >= beg); - len = (size_t)(end - beg); - s = (char *)gpr_malloc(len + 1); - memcpy(s, beg, len); - s[len] = 0; - *ss = (char **)gpr_realloc(*ss, sizeof(char **) * np); - (*ss)[n] = s; - *ns = np; -} - -static void split(const char *s, char ***ss, size_t *ns) { - const char *c = strchr(s, ','); - if (c == NULL) { - add(s, s + strlen(s), ss, ns); - } else { - add(s, c, ss, ns); - split(c + 1, ss, ns); - } -} - -static bool is(const char *want, const char *have) { - return 0 == strcmp(want, "all") || 0 == strcmp(want, have); -} - -static void try_engine(const char *engine) { - for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) { - if (is(engine, g_factories[i].name)) { - if ((g_event_engine = g_factories[i].factory( - 0 == strcmp(engine, g_factories[i].name)))) { - g_poll_strategy_name = g_factories[i].name; - gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name); - return; - } - } - } -} - -/* This should be used for testing purposes ONLY */ -void grpc_set_event_engine_test_only( - const grpc_event_engine_vtable *ev_engine) { - g_event_engine = ev_engine; -} - -const grpc_event_engine_vtable *grpc_get_event_engine_test_only() { - return g_event_engine; -} - -/* Call this only after calling grpc_event_engine_init() */ -const char *grpc_get_poll_strategy_name() { return g_poll_strategy_name; } - -void grpc_event_engine_init(void) { - grpc_register_tracer(&grpc_polling_trace); - - char *s = gpr_getenv("GRPC_POLL_STRATEGY"); - if (s == NULL) { - s = gpr_strdup("all"); - } - - char **strings = NULL; - size_t nstrings = 0; - split(s, &strings, &nstrings); - - for (size_t i = 0; g_event_engine == NULL && i < nstrings; i++) { - try_engine(strings[i]); - } - - for (size_t i = 0; i < nstrings; i++) { - gpr_free(strings[i]); - } - gpr_free(strings); - gpr_free(s); - - if (g_event_engine == NULL) { - gpr_log(GPR_ERROR, "No event engine could be initialized"); - abort(); - } -} - -void grpc_event_engine_shutdown(void) { - g_event_engine->shutdown_engine(); - g_event_engine = NULL; -} - -grpc_fd *grpc_fd_create(int fd, const char *name) { - return g_event_engine->fd_create(fd, name); -} - -int grpc_fd_wrapped_fd(grpc_fd *fd) { - return g_event_engine->fd_wrapped_fd(fd); -} - -void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason) { - g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, already_closed, - reason); -} - -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { - g_event_engine->fd_shutdown(exec_ctx, fd, why); -} - -bool grpc_fd_is_shutdown(grpc_fd *fd) { - return g_event_engine->fd_is_shutdown(fd); -} - -void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - g_event_engine->fd_notify_on_read(exec_ctx, fd, closure); -} - -void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure) { - g_event_engine->fd_notify_on_write(exec_ctx, fd, closure); -} - -size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; } - -void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - g_event_engine->pollset_init(pollset, mu); -} - -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - g_event_engine->pollset_shutdown(exec_ctx, pollset, closure); -} - -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - g_event_engine->pollset_destroy(exec_ctx, pollset); -} - -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline) { - return g_event_engine->pollset_work(exec_ctx, pollset, worker, now, deadline); -} - -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { - return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker); -} - -void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd) { - g_event_engine->pollset_add_fd(exec_ctx, pollset, fd); -} - -grpc_pollset_set *grpc_pollset_set_create(void) { - return g_event_engine->pollset_set_create(); -} - -void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set) { - g_event_engine->pollset_set_destroy(exec_ctx, pollset_set); -} - -void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - g_event_engine->pollset_set_add_pollset(exec_ctx, pollset_set, pollset); -} - -void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset) { - g_event_engine->pollset_set_del_pollset(exec_ctx, pollset_set, pollset); -} - -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - g_event_engine->pollset_set_add_pollset_set(exec_ctx, bag, item); -} - -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item) { - g_event_engine->pollset_set_del_pollset_set(exec_ctx, bag, item); -} - -void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - g_event_engine->pollset_set_add_fd(exec_ctx, pollset_set, fd); -} - -void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd) { - g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd); -} - -#endif // GRPC_POSIX_SOCKET diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_posix.cc b/Sources/CgRPC/src/core/lib/iomgr/ev_posix.cc new file mode 100644 index 000000000..4ea63fc6e --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_posix.cc @@ -0,0 +1,330 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_POSIX_SOCKET + +#include "src/core/lib/iomgr/ev_posix.h" + +#include + +#include +#include +#include + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/iomgr/ev_epoll1_linux.h" +#include "src/core/lib/iomgr/ev_epollex_linux.h" +#include "src/core/lib/iomgr/ev_epollsig_linux.h" +#include "src/core/lib/iomgr/ev_poll_posix.h" + +grpc_core::TraceFlag grpc_polling_trace(false, + "polling"); /* Disabled by default */ +grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount(false, "fd_refcount"); +grpc_core::DebugOnlyTraceFlag grpc_polling_api_trace(false, "polling_api"); + +#ifndef NDEBUG + +// Polling API trace only enabled in debug builds +#define GRPC_POLLING_API_TRACE(format, ...) \ + if (grpc_polling_api_trace.enabled()) { \ + gpr_log(GPR_INFO, "(polling-api) " format, __VA_ARGS__); \ + } +#else +#define GRPC_POLLING_API_TRACE(...) +#endif + +/** Default poll() function - a pointer so that it can be overridden by some + * tests */ +grpc_poll_function_type grpc_poll_function = poll; + +grpc_wakeup_fd grpc_global_wakeup_fd; + +static const grpc_event_engine_vtable* g_event_engine = nullptr; +static const char* g_poll_strategy_name = nullptr; + +typedef const grpc_event_engine_vtable* (*event_engine_factory_fn)( + bool explicit_request); + +typedef struct { + const char* name; + event_engine_factory_fn factory; +} event_engine_factory; + +namespace { + +grpc_poll_function_type real_poll_function; + +int dummy_poll(struct pollfd fds[], nfds_t nfds, int timeout) { + if (timeout == 0) { + return real_poll_function(fds, nfds, 0); + } else { + gpr_log(GPR_ERROR, "Attempted a blocking poll when declared non-polling."); + GPR_ASSERT(false); + return -1; + } +} + +const grpc_event_engine_vtable* init_non_polling(bool explicit_request) { + if (!explicit_request) { + return nullptr; + } + // return the simplest engine as a dummy but also override the poller + auto ret = grpc_init_poll_posix(explicit_request); + real_poll_function = grpc_poll_function; + grpc_poll_function = dummy_poll; + + return ret; +} +} // namespace + +static const event_engine_factory g_factories[] = { + {"epollex", grpc_init_epollex_linux}, {"epoll1", grpc_init_epoll1_linux}, + {"epollsig", grpc_init_epollsig_linux}, {"poll", grpc_init_poll_posix}, + {"poll-cv", grpc_init_poll_cv_posix}, {"none", init_non_polling}, +}; + +static void add(const char* beg, const char* end, char*** ss, size_t* ns) { + size_t n = *ns; + size_t np = n + 1; + char* s; + size_t len; + GPR_ASSERT(end >= beg); + len = static_cast(end - beg); + s = static_cast(gpr_malloc(len + 1)); + memcpy(s, beg, len); + s[len] = 0; + *ss = static_cast(gpr_realloc(*ss, sizeof(char**) * np)); + (*ss)[n] = s; + *ns = np; +} + +static void split(const char* s, char*** ss, size_t* ns) { + const char* c = strchr(s, ','); + if (c == nullptr) { + add(s, s + strlen(s), ss, ns); + } else { + add(s, c, ss, ns); + split(c + 1, ss, ns); + } +} + +static bool is(const char* want, const char* have) { + return 0 == strcmp(want, "all") || 0 == strcmp(want, have); +} + +static void try_engine(const char* engine) { + for (size_t i = 0; i < GPR_ARRAY_SIZE(g_factories); i++) { + if (is(engine, g_factories[i].name)) { + if ((g_event_engine = g_factories[i].factory( + 0 == strcmp(engine, g_factories[i].name)))) { + g_poll_strategy_name = g_factories[i].name; + gpr_log(GPR_DEBUG, "Using polling engine: %s", g_factories[i].name); + return; + } + } + } +} + +/* This should be used for testing purposes ONLY */ +void grpc_set_event_engine_test_only( + const grpc_event_engine_vtable* ev_engine) { + g_event_engine = ev_engine; +} + +const grpc_event_engine_vtable* grpc_get_event_engine_test_only() { + return g_event_engine; +} + +/* Call this only after calling grpc_event_engine_init() */ +const char* grpc_get_poll_strategy_name() { return g_poll_strategy_name; } + +void grpc_event_engine_init(void) { + char* s = gpr_getenv("GRPC_POLL_STRATEGY"); + if (s == nullptr) { + s = gpr_strdup("all"); + } + + char** strings = nullptr; + size_t nstrings = 0; + split(s, &strings, &nstrings); + + for (size_t i = 0; g_event_engine == nullptr && i < nstrings; i++) { + try_engine(strings[i]); + } + + for (size_t i = 0; i < nstrings; i++) { + gpr_free(strings[i]); + } + gpr_free(strings); + + if (g_event_engine == nullptr) { + gpr_log(GPR_ERROR, "No event engine could be initialized from %s", s); + abort(); + } + gpr_free(s); +} + +void grpc_event_engine_shutdown(void) { + g_event_engine->shutdown_engine(); + g_event_engine = nullptr; +} + +grpc_fd* grpc_fd_create(int fd, const char* name) { + GRPC_POLLING_API_TRACE("fd_create(%d, %s)", fd, name); + return g_event_engine->fd_create(fd, name); +} + +int grpc_fd_wrapped_fd(grpc_fd* fd) { + return g_event_engine->fd_wrapped_fd(fd); +} + +void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason) { + GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %d, %s)", + grpc_fd_wrapped_fd(fd), on_done, release_fd, + already_closed, reason); + g_event_engine->fd_orphan(fd, on_done, release_fd, already_closed, reason); +} + +void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why) { + GRPC_POLLING_API_TRACE("fd_shutdown(%d)", grpc_fd_wrapped_fd(fd)); + g_event_engine->fd_shutdown(fd, why); +} + +bool grpc_fd_is_shutdown(grpc_fd* fd) { + return g_event_engine->fd_is_shutdown(fd); +} + +void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) { + g_event_engine->fd_notify_on_read(fd, closure); +} + +void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) { + g_event_engine->fd_notify_on_write(fd, closure); +} + +static size_t pollset_size(void) { return g_event_engine->pollset_size; } + +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { + GRPC_POLLING_API_TRACE("pollset_init(%p)", pollset); + g_event_engine->pollset_init(pollset, mu); +} + +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + GRPC_POLLING_API_TRACE("pollset_shutdown(%p)", pollset); + g_event_engine->pollset_shutdown(pollset, closure); +} + +static void pollset_destroy(grpc_pollset* pollset) { + GRPC_POLLING_API_TRACE("pollset_destroy(%p)", pollset); + g_event_engine->pollset_destroy(pollset); +} + +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker, + grpc_millis deadline) { + GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") begin", pollset, + deadline); + grpc_error* err = g_event_engine->pollset_work(pollset, worker, deadline); + GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRIdPTR ") end", pollset, + deadline); + return err; +} + +static grpc_error* pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) { + GRPC_POLLING_API_TRACE("pollset_kick(%p, %p)", pollset, specific_worker); + return g_event_engine->pollset_kick(pollset, specific_worker); +} + +void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd) { + GRPC_POLLING_API_TRACE("pollset_add_fd(%p, %d)", pollset, + grpc_fd_wrapped_fd(fd)); + g_event_engine->pollset_add_fd(pollset, fd); +} + +void pollset_global_init() {} +void pollset_global_shutdown() {} + +grpc_pollset_vtable grpc_posix_pollset_vtable = { + pollset_global_init, pollset_global_shutdown, + pollset_init, pollset_shutdown, + pollset_destroy, pollset_work, + pollset_kick, pollset_size}; + +static grpc_pollset_set* pollset_set_create(void) { + grpc_pollset_set* pss = g_event_engine->pollset_set_create(); + GRPC_POLLING_API_TRACE("pollset_set_create(%p)", pss); + return pss; +} + +static void pollset_set_destroy(grpc_pollset_set* pollset_set) { + GRPC_POLLING_API_TRACE("pollset_set_destroy(%p)", pollset_set); + g_event_engine->pollset_set_destroy(pollset_set); +} + +static void pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { + GRPC_POLLING_API_TRACE("pollset_set_add_pollset(%p, %p)", pollset_set, + pollset); + g_event_engine->pollset_set_add_pollset(pollset_set, pollset); +} + +static void pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { + GRPC_POLLING_API_TRACE("pollset_set_del_pollset(%p, %p)", pollset_set, + pollset); + g_event_engine->pollset_set_del_pollset(pollset_set, pollset); +} + +static void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { + GRPC_POLLING_API_TRACE("pollset_set_add_pollset_set(%p, %p)", bag, item); + g_event_engine->pollset_set_add_pollset_set(bag, item); +} + +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { + GRPC_POLLING_API_TRACE("pollset_set_del_pollset_set(%p, %p)", bag, item); + g_event_engine->pollset_set_del_pollset_set(bag, item); +} + +grpc_pollset_set_vtable grpc_posix_pollset_set_vtable = { + pollset_set_create, pollset_set_destroy, + pollset_set_add_pollset, pollset_set_del_pollset, + pollset_set_add_pollset_set, pollset_set_del_pollset_set}; + +void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) { + GRPC_POLLING_API_TRACE("pollset_set_add_fd(%p, %d)", pollset_set, + grpc_fd_wrapped_fd(fd)); + g_event_engine->pollset_set_add_fd(pollset_set, fd); +} + +void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) { + GRPC_POLLING_API_TRACE("pollset_set_del_fd(%p, %d)", pollset_set, + grpc_fd_wrapped_fd(fd)); + g_event_engine->pollset_set_del_fd(pollset_set, fd); +} + +#endif // GRPC_POSIX_SOCKET diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_posix.h b/Sources/CgRPC/src/core/lib/iomgr/ev_posix.h index 1ff2ff141..6a5129a74 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_EV_POSIX_H #define GRPC_CORE_LIB_IOMGR_EV_POSIX_H +#include + #include #include "src/core/lib/debug/trace.h" @@ -27,57 +29,45 @@ #include "src/core/lib/iomgr/pollset_set.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -extern grpc_tracer_flag grpc_polling_trace; /* Disabled by default */ +extern grpc_core::TraceFlag grpc_polling_trace; /* Disabled by default */ typedef struct grpc_fd grpc_fd; typedef struct grpc_event_engine_vtable { size_t pollset_size; - grpc_fd *(*fd_create)(int fd, const char *name); - int (*fd_wrapped_fd)(grpc_fd *fd); - void (*fd_orphan)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason); - void (*fd_shutdown)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); - void (*fd_notify_on_read)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); - void (*fd_notify_on_write)(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); - bool (*fd_is_shutdown)(grpc_fd *fd); - grpc_pollset *(*fd_get_read_notifier_pollset)(grpc_exec_ctx *exec_ctx, - grpc_fd *fd); - - void (*pollset_init)(grpc_pollset *pollset, gpr_mu **mu); - void (*pollset_shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); - void (*pollset_destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); - grpc_error *(*pollset_work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline); - grpc_error *(*pollset_kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker); - void (*pollset_add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); - - grpc_pollset_set *(*pollset_set_create)(void); - void (*pollset_set_destroy)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set); - void (*pollset_set_add_pollset)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset); - void (*pollset_set_del_pollset)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset); - void (*pollset_set_add_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item); - void (*pollset_set_del_pollset_set)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item); - void (*pollset_set_add_fd)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); - void (*pollset_set_del_fd)(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); + grpc_fd* (*fd_create)(int fd, const char* name); + int (*fd_wrapped_fd)(grpc_fd* fd); + void (*fd_orphan)(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason); + void (*fd_shutdown)(grpc_fd* fd, grpc_error* why); + void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure); + void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure); + bool (*fd_is_shutdown)(grpc_fd* fd); + grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd); + + void (*pollset_init)(grpc_pollset* pollset, gpr_mu** mu); + void (*pollset_shutdown)(grpc_pollset* pollset, grpc_closure* closure); + void (*pollset_destroy)(grpc_pollset* pollset); + grpc_error* (*pollset_work)(grpc_pollset* pollset, + grpc_pollset_worker** worker, + grpc_millis deadline); + grpc_error* (*pollset_kick)(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker); + void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd); + + grpc_pollset_set* (*pollset_set_create)(void); + void (*pollset_set_destroy)(grpc_pollset_set* pollset_set); + void (*pollset_set_add_pollset)(grpc_pollset_set* pollset_set, + grpc_pollset* pollset); + void (*pollset_set_del_pollset)(grpc_pollset_set* pollset_set, + grpc_pollset* pollset); + void (*pollset_set_add_pollset_set)(grpc_pollset_set* bag, + grpc_pollset_set* item); + void (*pollset_set_del_pollset_set)(grpc_pollset_set* bag, + grpc_pollset_set* item); + void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd); + void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd); void (*shutdown_engine)(void); } grpc_event_engine_vtable; @@ -86,15 +76,15 @@ void grpc_event_engine_init(void); void grpc_event_engine_shutdown(void); /* Return the name of the poll strategy */ -const char *grpc_get_poll_strategy_name(); +const char* grpc_get_poll_strategy_name(); /* Create a wrapped file descriptor. Requires fd is a non-blocking file descriptor. This takes ownership of closing fd. */ -grpc_fd *grpc_fd_create(int fd, const char *name); +grpc_fd* grpc_fd_create(int fd, const char* name); /* Return the wrapped fd, or -1 if it has been released or closed. */ -int grpc_fd_wrapped_fd(grpc_fd *fd); +int grpc_fd_wrapped_fd(grpc_fd* fd); /* Releases fd to be asynchronously destroyed. on_done is called when the underlying file descriptor is definitely close()d. @@ -103,14 +93,14 @@ int grpc_fd_wrapped_fd(grpc_fd *fd); Requires: *fd initialized; no outstanding notify_on_read or notify_on_write. MUST NOT be called with a pollset lock taken */ -void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done, - int *release_fd, bool already_closed, const char *reason); +void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd, + bool already_closed, const char* reason); /* Has grpc_fd_shutdown been called on an fd? */ -bool grpc_fd_is_shutdown(grpc_fd *fd); +bool grpc_fd_is_shutdown(grpc_fd* fd); /* Cause any current and future callbacks to fail. */ -void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); +void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why); /* Register read interest, causing read_cb to be called once when fd becomes readable, on deadline specified by deadline, or on shutdown triggered by @@ -125,37 +115,31 @@ void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why); underlying platform. This means that users must drain fd in read_cb before calling notify_on_read again. Users are also expected to handle spurious events, i.e read_cb is called while nothing can be readable from fd */ -void grpc_fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); +void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure); /* Exactly the same semantics as above, except based on writable events. */ -void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - grpc_closure *closure); +void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure); /* Return the read notifier pollset from the fd */ -grpc_pollset *grpc_fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx, - grpc_fd *fd); +grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd); /* pollset_posix functions */ /* Add an fd to a pollset */ -void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - struct grpc_fd *fd); +void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd); /* pollset_set_posix functions */ -void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); -void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, grpc_fd *fd); +void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd); +void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd); /* override to allow tests to hook poll() usage */ -typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int); +typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int); extern grpc_poll_function_type grpc_poll_function; /* WARNING: The following two functions should be used for testing purposes * ONLY */ -void grpc_set_event_engine_test_only(const grpc_event_engine_vtable *); -const grpc_event_engine_vtable *grpc_get_event_engine_test_only(); +void grpc_set_event_engine_test_only(const grpc_event_engine_vtable*); +const grpc_event_engine_vtable* grpc_get_event_engine_test_only(); #endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/ev_windows.c b/Sources/CgRPC/src/core/lib/iomgr/ev_windows.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/iomgr/ev_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/ev_windows.cc index c24dfaeaf..32c62b7a7 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/ev_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/ev_windows.cc @@ -16,13 +16,15 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/debug/trace.h" -grpc_tracer_flag grpc_polling_trace = - GRPC_TRACER_INITIALIZER(false, "polling"); /* Disabled by default */ +grpc_core::TraceFlag grpc_polling_trace(false, + "polling"); /* Disabled by default */ #endif // GRPC_WINSOCK_SOCKET diff --git a/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.c b/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.c deleted file mode 100644 index 41c69add1..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/exec_ctx.h" - -#include -#include -#include - -#include "src/core/lib/iomgr/combiner.h" -#include "src/core/lib/profiling/timers.h" - -bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) { - if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) { - if (exec_ctx->check_ready_to_finish(exec_ctx, - exec_ctx->check_ready_to_finish_arg)) { - exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; - return true; - } - return false; - } else { - return true; - } -} - -bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { - return false; -} - -bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) { - return true; -} - -bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx) { - return exec_ctx->active_combiner != NULL || - !grpc_closure_list_empty(exec_ctx->closure_list); -} - -void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) { - exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; - grpc_exec_ctx_flush(exec_ctx); -} - -static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { -#ifndef NDEBUG - closure->scheduled = false; - if (GRPC_TRACER_ON(grpc_trace_closure)) { - gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]", - closure, closure->file_created, closure->line_created, - closure->run ? "run" : "scheduled", closure->file_initiated, - closure->line_initiated); - } -#endif - closure->cb(exec_ctx, closure->cb_arg, error); -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_closure)) { - gpr_log(GPR_DEBUG, "closure %p finished", closure); - } -#endif - GRPC_ERROR_UNREF(error); -} - -bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { - bool did_something = 0; - GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0); - for (;;) { - if (!grpc_closure_list_empty(exec_ctx->closure_list)) { - grpc_closure *c = exec_ctx->closure_list.head; - exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL; - while (c != NULL) { - grpc_closure *next = c->next_data.next; - grpc_error *error = c->error_data.error; - did_something = true; - exec_ctx_run(exec_ctx, c, error); - c = next; - } - } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) { - break; - } - } - GPR_ASSERT(exec_ctx->active_combiner == NULL); - GPR_TIMER_END("grpc_exec_ctx_flush", 0); - return did_something; -} - -static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - grpc_closure_list_append(&exec_ctx->closure_list, closure, error); -} - -void grpc_exec_ctx_global_init(void) {} -void grpc_exec_ctx_global_shutdown(void) {} - -static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = { - exec_ctx_run, exec_ctx_sched, "exec_ctx"}; -static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable}; -grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler; diff --git a/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.cc b/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.cc new file mode 100644 index 000000000..2f544b20a --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.cc @@ -0,0 +1,147 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/exec_ctx.h" + +#include +#include + +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/profiling/timers.h" + +static void exec_ctx_run(grpc_closure* closure, grpc_error* error) { +#ifndef NDEBUG + closure->scheduled = false; + if (grpc_trace_closure.enabled()) { + gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]", + closure, closure->file_created, closure->line_created, + closure->run ? "run" : "scheduled", closure->file_initiated, + closure->line_initiated); + } +#endif + closure->cb(closure->cb_arg, error); +#ifndef NDEBUG + if (grpc_trace_closure.enabled()) { + gpr_log(GPR_DEBUG, "closure %p finished", closure); + } +#endif + GRPC_ERROR_UNREF(error); +} + +static void exec_ctx_sched(grpc_closure* closure, grpc_error* error) { + grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure, + error); +} + +static gpr_timespec g_start_time; + +static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) { + ts = gpr_time_sub(ts, g_start_time); + double x = GPR_MS_PER_SEC * static_cast(ts.tv_sec) + + static_cast(ts.tv_nsec) / GPR_NS_PER_MS; + if (x < 0) return 0; + if (x > GPR_ATM_MAX) return GPR_ATM_MAX; + return static_cast(x); +} + +static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { + ts = gpr_time_sub(ts, g_start_time); + double x = GPR_MS_PER_SEC * static_cast(ts.tv_sec) + + static_cast(ts.tv_nsec) / GPR_NS_PER_MS + + static_cast(GPR_NS_PER_SEC - 1) / + static_cast(GPR_NS_PER_SEC); + if (x < 0) return 0; + if (x > GPR_ATM_MAX) return GPR_ATM_MAX; + return static_cast(x); +} + +gpr_timespec grpc_millis_to_timespec(grpc_millis millis, + gpr_clock_type clock_type) { + // special-case infinities as grpc_millis can be 32bit on some platforms + // while gpr_time_from_millis always takes an int64_t. + if (millis == GRPC_MILLIS_INF_FUTURE) { + return gpr_inf_future(clock_type); + } + if (millis == GRPC_MILLIS_INF_PAST) { + return gpr_inf_past(clock_type); + } + + if (clock_type == GPR_TIMESPAN) { + return gpr_time_from_millis(millis, GPR_TIMESPAN); + } + return gpr_time_add(gpr_convert_clock_type(g_start_time, clock_type), + gpr_time_from_millis(millis, GPR_TIMESPAN)); +} + +grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec ts) { + return timespec_to_atm_round_down( + gpr_convert_clock_type(ts, g_start_time.clock_type)); +} + +grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec ts) { + return timespec_to_atm_round_up( + gpr_convert_clock_type(ts, g_start_time.clock_type)); +} + +static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = { + exec_ctx_run, exec_ctx_sched, "exec_ctx"}; +static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable}; +grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler; + +namespace grpc_core { +GPR_TLS_CLASS_DEF(ExecCtx::exec_ctx_); + +void ExecCtx::GlobalInit(void) { + g_start_time = gpr_now(GPR_CLOCK_MONOTONIC); + gpr_tls_init(&exec_ctx_); +} + +bool ExecCtx::Flush() { + bool did_something = 0; + GPR_TIMER_SCOPE("grpc_exec_ctx_flush", 0); + for (;;) { + if (!grpc_closure_list_empty(closure_list_)) { + grpc_closure* c = closure_list_.head; + closure_list_.head = closure_list_.tail = nullptr; + while (c != nullptr) { + grpc_closure* next = c->next_data.next; + grpc_error* error = c->error_data.error; + did_something = true; + exec_ctx_run(c, error); + c = next; + } + } else if (!grpc_combiner_continue_exec_ctx()) { + break; + } + } + GPR_ASSERT(combiner_data_.active_combiner == nullptr); + return did_something; +} + +grpc_millis ExecCtx::Now() { + if (!now_is_valid_) { + now_ = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC)); + now_is_valid_ = true; + } + return now_; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.h b/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.h index c89792c8c..72d0ae58c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.h +++ b/Sources/CgRPC/src/core/lib/iomgr/exec_ctx.h @@ -19,10 +19,19 @@ #ifndef GRPC_CORE_LIB_IOMGR_EXEC_CTX_H #define GRPC_CORE_LIB_IOMGR_EXEC_CTX_H +#include + +#include #include +#include + +#include "src/core/lib/gpr/tls.h" #include "src/core/lib/iomgr/closure.h" -/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */ +typedef gpr_atm grpc_millis; + +#define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX +#define GRPC_MILLIS_INF_PAST GPR_ATM_MIN /** A workqueue represents a list of work to be executed asynchronously. Forward declared here to avoid a circular dependency with workqueue.h. */ @@ -36,74 +45,166 @@ typedef struct grpc_combiner grpc_combiner; should be given to not delete said call/channel from this exec_ctx */ #define GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP 2 +extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx; + +gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock); +grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec); +grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec); + +namespace grpc_core { /** Execution context. * A bag of data that collects information along a callstack. - * Generally created at public API entry points, and passed down as - * pointer to child functions that manipulate it. + * It is created on the stack at public API entry points, and stored internally + * as a thread-local variable. + * + * Generally, to create an exec_ctx instance, add the following line at the top + * of the public API entry point or at the start of a thread's work function : + * + * grpc_core::ExecCtx exec_ctx; + * + * Access the created ExecCtx instance using : + * grpc_core::ExecCtx::Get() * * Specific responsibilities (this may grow in the future): * - track a list of work that needs to be delayed until the top of the * call stack (this provides a convenient mechanism to run callbacks * without worrying about locking issues) - * - provide a decision maker (via grpc_exec_ctx_ready_to_finish) that provides + * - provide a decision maker (via IsReadyToFinish) that provides a * signal as to whether a borrowed thread should continue to do work or * should actively try to finish up and get this thread back to its owner * * CONVENTIONS: * - Instance of this must ALWAYS be constructed on the stack, never * heap allocated. - * - Instances and pointers to them must always be called exec_ctx. - * - Instances are always passed as the first argument to a function that - * takes it, and always as a pointer (grpc_exec_ctx is never copied). + * - Exactly one instance of ExecCtx must be created per thread. Instances must + * always be called exec_ctx. + * - Do not pass exec_ctx as a parameter to a function. Always access it using + * grpc_core::ExecCtx::Get() */ -struct grpc_exec_ctx { - grpc_closure_list closure_list; - /** currently active combiner: updated only via combiner.c */ - grpc_combiner *active_combiner; - /** last active combiner in the active combiner list */ - grpc_combiner *last_combiner; - uintptr_t flags; - unsigned starting_cpu; - void *check_ready_to_finish_arg; - bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg); -}; +class ExecCtx { + public: + /** Default Constructor */ + + ExecCtx() : flags_(GRPC_EXEC_CTX_FLAG_IS_FINISHED) { Set(this); } + + /** Parameterised Constructor */ + ExecCtx(uintptr_t fl) : flags_(fl) { Set(this); } + + /** Destructor */ + virtual ~ExecCtx() { + flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; + Flush(); + Set(last_exec_ctx_); + } + + /** Disallow copy and assignment operators */ + ExecCtx(const ExecCtx&) = delete; + ExecCtx& operator=(const ExecCtx&) = delete; + + /** Return starting_cpu */ + unsigned starting_cpu() const { return starting_cpu_; } + + struct CombinerData { + /* currently active combiner: updated only via combiner.c */ + grpc_combiner* active_combiner; + /* last active combiner in the active combiner list */ + grpc_combiner* last_combiner; + }; + + /** Only to be used by grpc-combiner code */ + CombinerData* combiner_data() { return &combiner_data_; } + + /** Return pointer to grpc_closure_list */ + grpc_closure_list* closure_list() { return &closure_list_; } + + /** Return flags */ + uintptr_t flags() { return flags_; } + + /** Checks if there is work to be done */ + bool HasWork() { + return combiner_data_.active_combiner != nullptr || + !grpc_closure_list_empty(closure_list_); + } + + /** Flush any work that has been enqueued onto this grpc_exec_ctx. + * Caller must guarantee that no interfering locks are held. + * Returns true if work was performed, false otherwise. */ + bool Flush(); + + /** Returns true if we'd like to leave this execution context as soon as +possible: useful for deciding whether to do something more or not depending +on outside context */ + bool IsReadyToFinish() { + if ((flags_ & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) { + if (CheckReadyToFinish()) { + flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED; + return true; + } + return false; + } else { + return true; + } + } + + /** Returns the stored current time relative to start if valid, + * otherwise refreshes the stored time, sets it valid and returns the new + * value */ + grpc_millis Now(); + + /** Invalidates the stored time value. A new time value will be set on calling + * Now() */ + void InvalidateNow() { now_is_valid_ = false; } + + /** To be used only by shutdown code in iomgr */ + void SetNowIomgrShutdown() { + now_ = GRPC_MILLIS_INF_FUTURE; + now_is_valid_ = true; + } + + /** To be used only for testing. + * Sets the now value + */ + void TestOnlySetNow(grpc_millis new_val) { + now_ = new_val; + now_is_valid_ = true; + } -/* initializer for grpc_exec_ctx: - prefer to use GRPC_EXEC_CTX_INIT whenever possible */ -#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \ - { \ - GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(), \ - finish_check_arg, finish_check \ + /** Global initialization for ExecCtx. Called by iomgr */ + static void GlobalInit(void); + + /** Global shutdown for ExecCtx. Called by iomgr */ + static void GlobalShutdown(void) { gpr_tls_destroy(&exec_ctx_); } + + /** Gets pointer to current exec_ctx */ + static ExecCtx* Get() { + return reinterpret_cast(gpr_tls_get(&exec_ctx_)); } -/* initialize an execution context at the top level of an API call into grpc - (this is safe to use elsewhere, though possibly not as efficient) */ -#define GRPC_EXEC_CTX_INIT \ - GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL) - -extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx; - -bool grpc_exec_ctx_has_work(grpc_exec_ctx *exec_ctx); - -/** Flush any work that has been enqueued onto this grpc_exec_ctx. - * Caller must guarantee that no interfering locks are held. - * Returns true if work was performed, false otherwise. */ -bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx); -/** Finish any pending work for a grpc_exec_ctx. Must be called before - * the instance is destroyed, or work may be lost. */ -void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx); -/** Returns true if we'd like to leave this execution context as soon as - possible: useful for deciding whether to do something more or not depending - on outside context */ -bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx); -/** A finish check that is never ready to finish */ -bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); -/** A finish check that is always ready to finish */ -bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored); - -void grpc_exec_ctx_global_init(void); - -void grpc_exec_ctx_global_init(void); -void grpc_exec_ctx_global_shutdown(void); + static void Set(ExecCtx* exec_ctx) { + gpr_tls_set(&exec_ctx_, reinterpret_cast(exec_ctx)); + } + + protected: + /** Check if ready to finish */ + virtual bool CheckReadyToFinish() { return false; } + + /** Disallow delete on ExecCtx */ + static void operator delete(void* p) { abort(); } + + private: + /** Set exec_ctx_ to exec_ctx */ + + grpc_closure_list closure_list_ = GRPC_CLOSURE_LIST_INIT; + CombinerData combiner_data_ = {nullptr, nullptr}; + uintptr_t flags_; + unsigned starting_cpu_ = gpr_cpu_current_cpu(); + + bool now_is_valid_ = false; + grpc_millis now_ = 0; + + GPR_TLS_CLASS_DECL(exec_ctx_); + ExecCtx* last_exec_ctx_ = Get(); +}; +} // namespace grpc_core #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/executor.c b/Sources/CgRPC/src/core/lib/iomgr/executor.cc similarity index 57% rename from Sources/CgRPC/src/core/lib/iomgr/executor.c rename to Sources/CgRPC/src/core/lib/iomgr/executor.cc index 892385d7d..f19f8cf20 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/executor.c +++ b/Sources/CgRPC/src/core/lib/iomgr/executor.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/executor.h" #include @@ -24,13 +26,13 @@ #include #include #include -#include -#include -#include #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/spinlock.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/support/spinlock.h" #define MAX_DEPTH 2 @@ -41,44 +43,43 @@ typedef struct { size_t depth; bool shutdown; bool queued_long_job; - gpr_thd_id id; + grpc_core::Thread thd; } thread_state; -static thread_state *g_thread_state; +static thread_state* g_thread_state; static size_t g_max_threads; static gpr_atm g_cur_threads; static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER; GPR_TLS_DECL(g_this_thread_state); -static grpc_tracer_flag executor_trace = - GRPC_TRACER_INITIALIZER(false, "executor"); +grpc_core::TraceFlag executor_trace(false, "executor"); -static void executor_thread(void *arg); +static void executor_thread(void* arg); -static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { +static size_t run_closures(grpc_closure_list list) { size_t n = 0; - grpc_closure *c = list.head; - while (c != NULL) { - grpc_closure *next = c->next_data.next; - grpc_error *error = c->error_data.error; - if (GRPC_TRACER_ON(executor_trace)) { + grpc_closure* c = list.head; + while (c != nullptr) { + grpc_closure* next = c->next_data.next; + grpc_error* error = c->error_data.error; + if (executor_trace.enabled()) { #ifndef NDEBUG gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, c->file_created, c->line_created); #else - gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); + gpr_log(GPR_INFO, "EXECUTOR: run %p", c); #endif } #ifndef NDEBUG c->scheduled = false; #endif - c->cb(exec_ctx, c->cb_arg, error); + c->cb(c->cb_arg, error); GRPC_ERROR_UNREF(error); c = next; n++; - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); } return n; @@ -88,25 +89,25 @@ bool grpc_executor_is_threaded() { return gpr_atm_no_barrier_load(&g_cur_threads) > 0; } -void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { +void grpc_executor_set_threading(bool threading) { gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads); if (threading) { if (cur_threads > 0) return; g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores()); gpr_atm_no_barrier_store(&g_cur_threads, 1); gpr_tls_init(&g_this_thread_state); - g_thread_state = - (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads); + g_thread_state = static_cast( + gpr_zalloc(sizeof(thread_state) * g_max_threads)); for (size_t i = 0; i < g_max_threads; i++) { gpr_mu_init(&g_thread_state[i].mu); gpr_cv_init(&g_thread_state[i].cv); - g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; + g_thread_state[i].thd = grpc_core::Thread(); + g_thread_state[i].elems = GRPC_CLOSURE_LIST_INIT; } - gpr_thd_options opt = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&opt); - gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0], - &opt); + g_thread_state[0].thd = + grpc_core::Thread("grpc_executor", executor_thread, &g_thread_state[0]); + g_thread_state[0].thd.Start(); } else { if (cur_threads == 0) return; for (size_t i = 0; i < g_max_threads; i++) { @@ -120,111 +121,111 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { gpr_spinlock_lock(&g_adding_thread_lock); gpr_spinlock_unlock(&g_adding_thread_lock); for (gpr_atm i = 0; i < g_cur_threads; i++) { - gpr_thd_join(g_thread_state[i].id); + g_thread_state[i].thd.Join(); } gpr_atm_no_barrier_store(&g_cur_threads, 0); for (size_t i = 0; i < g_max_threads; i++) { gpr_mu_destroy(&g_thread_state[i].mu); gpr_cv_destroy(&g_thread_state[i].cv); - run_closures(exec_ctx, g_thread_state[i].elems); + run_closures(g_thread_state[i].elems); } gpr_free(g_thread_state); gpr_tls_destroy(&g_this_thread_state); } } -void grpc_executor_init(grpc_exec_ctx *exec_ctx) { - grpc_register_tracer(&executor_trace); +void grpc_executor_init() { gpr_atm_no_barrier_store(&g_cur_threads, 0); - grpc_executor_set_threading(exec_ctx, true); + grpc_executor_set_threading(true); } -void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) { - grpc_executor_set_threading(exec_ctx, false); -} +void grpc_executor_shutdown() { grpc_executor_set_threading(false); } -static void executor_thread(void *arg) { - thread_state *ts = (thread_state *)arg; +static void executor_thread(void* arg) { + thread_state* ts = static_cast(arg); gpr_tls_set(&g_this_thread_state, (intptr_t)ts); - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); + grpc_core::ExecCtx exec_ctx(0); size_t subtract_depth = 0; for (;;) { - if (GRPC_TRACER_ON(executor_trace)) { - gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", - (int)(ts - g_thread_state), subtract_depth); + if (executor_trace.enabled()) { + gpr_log(GPR_INFO, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", + static_cast(ts - g_thread_state), subtract_depth); } gpr_mu_lock(&ts->mu); ts->depth -= subtract_depth; while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { ts->queued_long_job = false; - gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); + gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC)); } if (ts->shutdown) { - if (GRPC_TRACER_ON(executor_trace)) { - gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown", - (int)(ts - g_thread_state)); + if (executor_trace.enabled()) { + gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown", + static_cast(ts - g_thread_state)); } gpr_mu_unlock(&ts->mu); break; } - GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); + GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(); grpc_closure_list exec = ts->elems; - ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; + ts->elems = GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); - if (GRPC_TRACER_ON(executor_trace)) { - gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state)); + if (executor_trace.enabled()) { + gpr_log(GPR_INFO, "EXECUTOR[%d]: execute", + static_cast(ts - g_thread_state)); } - subtract_depth = run_closures(&exec_ctx, exec); + grpc_core::ExecCtx::Get()->InvalidateNow(); + subtract_depth = run_closures(exec); } - grpc_exec_ctx_finish(&exec_ctx); } -static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error, bool is_short) { +static void executor_push(grpc_closure* closure, grpc_error* error, + bool is_short) { bool retry_push; if (is_short) { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(); } else { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(); } do { retry_push = false; - size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); + size_t cur_thread_count = + static_cast(gpr_atm_no_barrier_load(&g_cur_threads)); if (cur_thread_count == 0) { - if (GRPC_TRACER_ON(executor_trace)) { + if (executor_trace.enabled()) { #ifndef NDEBUG gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline", closure, closure->file_created, closure->line_created); #else - gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure); + gpr_log(GPR_INFO, "EXECUTOR: schedule %p inline", closure); #endif } - grpc_closure_list_append(&exec_ctx->closure_list, closure, error); + grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), + closure, error); return; } - thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state); - if (ts == NULL) { - ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; + thread_state* ts = (thread_state*)gpr_tls_get(&g_this_thread_state); + if (ts == nullptr) { + ts = &g_thread_state[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(), + cur_thread_count)]; } else { - GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); + GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(); } - thread_state *orig_ts = ts; + thread_state* orig_ts = ts; bool try_new_thread; for (;;) { - if (GRPC_TRACER_ON(executor_trace)) { + if (executor_trace.enabled()) { #ifndef NDEBUG gpr_log( GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d", closure, is_short ? "short" : "long", closure->file_created, - closure->line_created, (int)(ts - g_thread_state)); + closure->line_created, static_cast(ts - g_thread_state)); #else - gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d", + gpr_log(GPR_INFO, "EXECUTOR: try to schedule %p (%s) to thread %d", closure, is_short ? "short" : "long", (int)(ts - g_thread_state)); #endif @@ -236,7 +237,7 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, // guarantee no starvation) // ... spin through queues and try again gpr_mu_unlock(&ts->mu); - size_t idx = (size_t)(ts - g_thread_state); + size_t idx = static_cast(ts - g_thread_state); ts = &g_thread_state[(idx + 1) % cur_thread_count]; if (ts == orig_ts) { retry_push = true; @@ -245,8 +246,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, } continue; } - if (grpc_closure_list_empty(ts->elems)) { - GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx); + if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { + GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(); gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); @@ -258,31 +259,30 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, break; } if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) { - cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads); + cur_thread_count = + static_cast(gpr_atm_no_barrier_load(&g_cur_threads)); if (cur_thread_count < g_max_threads) { gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1); - gpr_thd_options opt = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&opt); - gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread, - &g_thread_state[cur_thread_count], &opt); + g_thread_state[cur_thread_count].thd = + grpc_core::Thread("grpc_executor", executor_thread, + &g_thread_state[cur_thread_count]); + g_thread_state[cur_thread_count].thd.Start(); } gpr_spinlock_unlock(&g_adding_thread_lock); } if (retry_push) { - GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx); + GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(); } } while (retry_push); } -static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - executor_push(exec_ctx, closure, error, true); +static void executor_push_short(grpc_closure* closure, grpc_error* error) { + executor_push(closure, error, true); } -static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_error *error) { - executor_push(exec_ctx, closure, error, false); +static void executor_push_long(grpc_closure* closure, grpc_error* error) { + executor_push(closure, error, false); } static const grpc_closure_scheduler_vtable executor_vtable_short = { @@ -294,7 +294,7 @@ static const grpc_closure_scheduler_vtable executor_vtable_long = { executor_push_long, executor_push_long, "executor"}; static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long}; -grpc_closure_scheduler *grpc_executor_scheduler( +grpc_closure_scheduler* grpc_executor_scheduler( grpc_executor_job_length length) { return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short : &executor_scheduler_long; diff --git a/Sources/CgRPC/src/core/lib/iomgr/executor.h b/Sources/CgRPC/src/core/lib/iomgr/executor.h index 0412c0279..68d540af5 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/executor.h +++ b/Sources/CgRPC/src/core/lib/iomgr/executor.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_EXECUTOR_H #define GRPC_CORE_LIB_IOMGR_EXECUTOR_H +#include + #include "src/core/lib/iomgr/closure.h" typedef enum { @@ -31,18 +33,18 @@ typedef enum { * This mechanism is meant to outsource work (grpc_closure instances) to a * thread, for those cases where blocking isn't an option but there isn't a * non-blocking solution available. */ -void grpc_executor_init(grpc_exec_ctx *exec_ctx); +void grpc_executor_init(); -grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length); +grpc_closure_scheduler* grpc_executor_scheduler(grpc_executor_job_length); /** Shutdown the executor, running all pending work as part of the call */ -void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx); +void grpc_executor_shutdown(); /** Is the executor multi-threaded? */ bool grpc_executor_is_threaded(); /* enable/disable threading - must be called after grpc_executor_init and before grpc_executor_shutdown */ -void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool enable); +void grpc_executor_set_threading(bool enable); #endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/fork_posix.c b/Sources/CgRPC/src/core/lib/iomgr/fork_posix.cc similarity index 76% rename from Sources/CgRPC/src/core/lib/iomgr/fork_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/fork_posix.cc index a55b3a349..f8645ab15 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/fork_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/fork_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_FORK @@ -24,16 +26,14 @@ #include #include -#include -#include +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/fork.h" +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/timer_manager.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/fork.h" -#include "src/core/lib/support/thd_internal.h" #include "src/core/lib/surface/init.h" /* @@ -49,11 +49,11 @@ void grpc_prefork() { return; } if (grpc_is_initialized()) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; grpc_timer_manager_set_threading(false); - grpc_executor_set_threading(&exec_ctx, false); - grpc_exec_ctx_finish(&exec_ctx); - if (!gpr_await_threads( + grpc_executor_set_threading(false); + grpc_core::ExecCtx::Get()->Flush(); + if (!grpc_core::Thread::AwaitAll( gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(3, GPR_TIMESPAN)))) { gpr_log(GPR_ERROR, "gRPC thread still active! Cannot fork!"); @@ -64,24 +64,25 @@ void grpc_prefork() { void grpc_postfork_parent() { if (grpc_is_initialized()) { grpc_timer_manager_set_threading(true); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_executor_set_threading(&exec_ctx, true); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + grpc_executor_set_threading(true); } } void grpc_postfork_child() { if (grpc_is_initialized()) { grpc_timer_manager_set_threading(true); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_executor_set_threading(&exec_ctx, true); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + grpc_executor_set_threading(true); + grpc_core::ExecCtx::Get()->Flush(); } } void grpc_fork_handlers_auto_register() { if (grpc_fork_support_enabled()) { +#ifdef GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK pthread_atfork(grpc_prefork, grpc_postfork_parent, grpc_postfork_child); +#endif // GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK } } diff --git a/Sources/CgRPC/src/core/lib/iomgr/fork_windows.c b/Sources/CgRPC/src/core/lib/iomgr/fork_windows.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/iomgr/fork_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/fork_windows.cc index f9986f33c..798f671bd 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/fork_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/fork_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifndef GRPC_POSIX_FORK diff --git a/Sources/CgRPC/src/core/lib/iomgr/gethostname.h b/Sources/CgRPC/src/core/lib/iomgr/gethostname.h index 9c6b9d8d4..9f10b4afa 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/gethostname.h +++ b/Sources/CgRPC/src/core/lib/iomgr/gethostname.h @@ -21,6 +21,6 @@ // Returns the hostname of the local machine. // Caller takes ownership of result. -char *grpc_gethostname(); +char* grpc_gethostname(); #endif /* GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.c b/Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.cc similarity index 85% rename from Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.c rename to Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.cc index 622946156..65ae81872 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.c +++ b/Sources/CgRPC/src/core/lib/iomgr/gethostname_fallback.cc @@ -16,12 +16,15 @@ * */ +#include + +#include "src/core/lib/iomgr/gethostname.h" #include "src/core/lib/iomgr/port.h" #ifdef GRPC_GETHOSTNAME_FALLBACK #include -char *grpc_gethostname() { return NULL; } +char* grpc_gethostname() { return NULL; } #endif // GRPC_GETHOSTNAME_FALLBACK diff --git a/Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.c b/Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.c rename to Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.cc index 4d0511412..79f5daa8f 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.c +++ b/Sources/CgRPC/src/core/lib/iomgr/gethostname_host_name_max.cc @@ -16,6 +16,9 @@ * */ +#include + +#include "src/core/lib/iomgr/gethostname.h" #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_HOST_NAME_MAX @@ -25,11 +28,11 @@ #include -char *grpc_gethostname() { - char *hostname = (char *)gpr_malloc(HOST_NAME_MAX); +char* grpc_gethostname() { + char* hostname = static_cast(gpr_malloc(HOST_NAME_MAX)); if (gethostname(hostname, HOST_NAME_MAX) != 0) { gpr_free(hostname); - return NULL; + return nullptr; } return hostname; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.c b/Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.c rename to Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.cc index 51bac5d69..92c5de333 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.c +++ b/Sources/CgRPC/src/core/lib/iomgr/gethostname_sysconf.cc @@ -16,6 +16,9 @@ * */ +#include + +#include "src/core/lib/iomgr/gethostname.h" #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SYSCONF @@ -24,12 +27,12 @@ #include -char *grpc_gethostname() { +char* grpc_gethostname() { size_t host_name_max = (size_t)sysconf(_SC_HOST_NAME_MAX); - char *hostname = (char *)gpr_malloc(host_name_max); + char* hostname = (char*)gpr_malloc(host_name_max); if (gethostname(hostname, host_name_max) != 0) { gpr_free(hostname); - return NULL; + return nullptr; } return hostname; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.c b/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.cc similarity index 66% rename from Sources/CgRPC/src/core/lib/iomgr/iocp_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/iocp_windows.cc index c082179c0..ce7723103 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.cc @@ -16,18 +16,21 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET #include +#include #include #include #include -#include #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/iocp_windows.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/socket_windows.h" @@ -40,36 +43,30 @@ static gpr_atm g_custom_events = 0; static HANDLE g_iocp; -static DWORD deadline_to_millis_timeout(gpr_timespec deadline, - gpr_timespec now) { - gpr_timespec timeout; - static const int64_t max_spin_polling_us = 10; - if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) { +static DWORD deadline_to_millis_timeout(grpc_millis deadline) { + if (deadline == GRPC_MILLIS_INF_FUTURE) { return INFINITE; } - if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros( - max_spin_polling_us, - GPR_TIMESPAN))) <= 0) { - return 0; - } - timeout = gpr_time_sub(deadline, now); - return (DWORD)gpr_time_to_millis(gpr_time_add( - timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN))); + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + if (deadline < now) return 0; + grpc_millis timeout = deadline - now; + if (timeout > std::numeric_limits::max()) return INFINITE; + return static_cast(deadline - now); } -grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - gpr_timespec deadline) { +grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) { BOOL success; DWORD bytes = 0; DWORD flags = 0; ULONG_PTR completion_key; LPOVERLAPPED overlapped; - grpc_winsocket *socket; - grpc_winsocket_callback_info *info; - GRPC_STATS_INC_SYSCALL_POLL(exec_ctx); - success = GetQueuedCompletionStatus( - g_iocp, &bytes, &completion_key, &overlapped, - deadline_to_millis_timeout(deadline, gpr_now(deadline.clock_type))); + grpc_winsocket* socket; + grpc_winsocket_callback_info* info; + GRPC_STATS_INC_SYSCALL_POLL(); + success = + GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped, + deadline_to_millis_timeout(deadline)); + grpc_core::ExecCtx::Get()->InvalidateNow(); if (success == 0 && overlapped == NULL) { return GRPC_IOCP_WORK_TIMEOUT; } @@ -84,7 +81,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, abort(); } - socket = (grpc_winsocket *)completion_key; + socket = (grpc_winsocket*)completion_key; if (overlapped == &socket->write_info.overlapped) { info = &socket->write_info; } else if (overlapped == &socket->read_info.overlapped) { @@ -97,7 +94,7 @@ grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, info->bytes_transfered = bytes; info->wsa_error = success ? 0 : WSAGetLastError(); GPR_ASSERT(overlapped == &info->overlapped); - grpc_socket_become_ready(exec_ctx, socket, info); + grpc_socket_become_ready(socket, info); return GRPC_IOCP_WORK_WORK; } @@ -117,32 +114,32 @@ void grpc_iocp_kick(void) { } void grpc_iocp_flush(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; grpc_iocp_work_status work_status; do { - work_status = grpc_iocp_work(&exec_ctx, gpr_inf_past(GPR_CLOCK_MONOTONIC)); + work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST); } while (work_status == GRPC_IOCP_WORK_KICK || - grpc_exec_ctx_flush(&exec_ctx)); + grpc_core::ExecCtx::Get()->Flush()); } void grpc_iocp_shutdown(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; while (gpr_atm_acq_load(&g_custom_events)) { - grpc_iocp_work(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC)); - grpc_exec_ctx_flush(&exec_ctx); + grpc_iocp_work(GRPC_MILLIS_INF_FUTURE); + grpc_core::ExecCtx::Get()->Flush(); } - grpc_exec_ctx_finish(&exec_ctx); + GPR_ASSERT(CloseHandle(g_iocp)); } -void grpc_iocp_add_socket(grpc_winsocket *socket) { +void grpc_iocp_add_socket(grpc_winsocket* socket) { HANDLE ret; if (socket->added_to_iocp) return; ret = CreateIoCompletionPort((HANDLE)socket->socket, g_iocp, (uintptr_t)socket, 0); if (!ret) { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_ERROR, "Unable to add socket to iocp: %s", utf8_message); gpr_free(utf8_message); __debugbreak(); diff --git a/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.h b/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.h index 9c89e868c..68d9de615 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/iocp_windows.h @@ -19,8 +19,15 @@ #ifndef GRPC_CORE_LIB_IOMGR_IOCP_WINDOWS_H #define GRPC_CORE_LIB_IOMGR_IOCP_WINDOWS_H +#include + #include +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET + +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/socket_windows.h" typedef enum { @@ -29,12 +36,13 @@ typedef enum { GRPC_IOCP_WORK_KICK } grpc_iocp_work_status; -grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx *exec_ctx, - gpr_timespec deadline); +grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline); void grpc_iocp_init(void); void grpc_iocp_kick(void); void grpc_iocp_flush(void); void grpc_iocp_shutdown(void); -void grpc_iocp_add_socket(grpc_winsocket *); +void grpc_iocp_add_socket(grpc_winsocket*); + +#endif #endif /* GRPC_CORE_LIB_IOMGR_IOCP_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr.c b/Sources/CgRPC/src/core/lib/iomgr/iomgr.c deleted file mode 100644 index f63f19015..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/iomgr.h" - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/iomgr/iomgr_internal.h" -#include "src/core/lib/iomgr/network_status_tracker.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/iomgr/timer_manager.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" - -static gpr_mu g_mu; -static gpr_cv g_rcv; -static int g_shutdown; -static grpc_iomgr_object g_root_object; - -void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) { - g_shutdown = 0; - gpr_mu_init(&g_mu); - gpr_cv_init(&g_rcv); - grpc_exec_ctx_global_init(); - grpc_executor_init(exec_ctx); - grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); - g_root_object.next = g_root_object.prev = &g_root_object; - g_root_object.name = (char *)"root"; - grpc_network_status_init(); - grpc_iomgr_platform_init(); -} - -void grpc_iomgr_start(grpc_exec_ctx *exec_ctx) { grpc_timer_manager_init(); } - -static size_t count_objects(void) { - grpc_iomgr_object *obj; - size_t n = 0; - for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { - n++; - } - return n; -} - -static void dump_objects(const char *kind) { - grpc_iomgr_object *obj; - for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { - gpr_log(GPR_DEBUG, "%s OBJECT: %s %p", kind, obj->name, obj); - } -} - -void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) { - gpr_timespec shutdown_deadline = gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); - gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); - - grpc_timer_manager_shutdown(); - grpc_iomgr_platform_flush(); - grpc_executor_shutdown(exec_ctx); - - gpr_mu_lock(&g_mu); - g_shutdown = 1; - while (g_root_object.next != &g_root_object) { - if (gpr_time_cmp( - gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), - gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { - if (g_root_object.next != &g_root_object) { - gpr_log(GPR_DEBUG, - "Waiting for %" PRIuPTR " iomgr objects to be destroyed", - count_objects()); - } - last_warning_time = gpr_now(GPR_CLOCK_REALTIME); - } - if (grpc_timer_check(exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL) == - GRPC_TIMERS_FIRED) { - gpr_mu_unlock(&g_mu); - grpc_exec_ctx_flush(exec_ctx); - grpc_iomgr_platform_flush(); - gpr_mu_lock(&g_mu); - continue; - } - if (g_root_object.next != &g_root_object) { - if (grpc_iomgr_abort_on_leaks()) { - gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR - " iomgr objects before shutdown deadline: " - "memory leaks are likely", - count_objects()); - dump_objects("LEAKED"); - abort(); - } - gpr_timespec short_deadline = gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); - if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) { - if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) { - if (g_root_object.next != &g_root_object) { - gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR - " iomgr objects before shutdown deadline: " - "memory leaks are likely", - count_objects()); - dump_objects("LEAKED"); - } - break; - } - } - } - } - gpr_mu_unlock(&g_mu); - - grpc_timer_list_shutdown(exec_ctx); - grpc_exec_ctx_flush(exec_ctx); - - /* ensure all threads have left g_mu */ - gpr_mu_lock(&g_mu); - gpr_mu_unlock(&g_mu); - - grpc_iomgr_platform_shutdown(); - grpc_exec_ctx_global_shutdown(); - grpc_network_status_shutdown(); - gpr_mu_destroy(&g_mu); - gpr_cv_destroy(&g_rcv); -} - -void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name) { - obj->name = gpr_strdup(name); - gpr_mu_lock(&g_mu); - obj->next = &g_root_object; - obj->prev = g_root_object.prev; - obj->next->prev = obj->prev->next = obj; - gpr_mu_unlock(&g_mu); -} - -void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) { - gpr_mu_lock(&g_mu); - obj->next->prev = obj->prev; - obj->prev->next = obj->next; - gpr_cv_signal(&g_rcv); - gpr_mu_unlock(&g_mu); - gpr_free(obj->name); -} - -bool grpc_iomgr_abort_on_leaks(void) { - char *env = gpr_getenv("GRPC_ABORT_ON_LEAKS"); - bool should_we = gpr_is_true(env); - gpr_free(env); - return should_we; -} diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr.cc b/Sources/CgRPC/src/core/lib/iomgr/iomgr.cc new file mode 100644 index 000000000..468814eae --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr.cc @@ -0,0 +1,178 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/iomgr.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/network_status_tracker.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_manager.h" + +static gpr_mu g_mu; +static gpr_cv g_rcv; +static int g_shutdown; +static grpc_iomgr_object g_root_object; + +void grpc_iomgr_init() { + grpc_core::ExecCtx exec_ctx; + grpc_determine_iomgr_platform(); + g_shutdown = 0; + gpr_mu_init(&g_mu); + gpr_cv_init(&g_rcv); + grpc_executor_init(); + grpc_timer_list_init(); + g_root_object.next = g_root_object.prev = &g_root_object; + g_root_object.name = (char*)"root"; + grpc_network_status_init(); + grpc_iomgr_platform_init(); +} + +void grpc_iomgr_start() { grpc_timer_manager_init(); } + +static size_t count_objects(void) { + grpc_iomgr_object* obj; + size_t n = 0; + for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { + n++; + } + return n; +} + +static void dump_objects(const char* kind) { + grpc_iomgr_object* obj; + for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { + gpr_log(GPR_DEBUG, "%s OBJECT: %s %p", kind, obj->name, obj); + } +} + +void grpc_iomgr_shutdown() { + gpr_timespec shutdown_deadline = gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); + gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); + + { + grpc_timer_manager_shutdown(); + grpc_iomgr_platform_flush(); + grpc_executor_shutdown(); + + gpr_mu_lock(&g_mu); + g_shutdown = 1; + while (g_root_object.next != &g_root_object) { + if (gpr_time_cmp( + gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), + gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { + if (g_root_object.next != &g_root_object) { + gpr_log(GPR_DEBUG, + "Waiting for %" PRIuPTR " iomgr objects to be destroyed", + count_objects()); + } + last_warning_time = gpr_now(GPR_CLOCK_REALTIME); + } + grpc_core::ExecCtx::Get()->SetNowIomgrShutdown(); + if (grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED) { + gpr_mu_unlock(&g_mu); + grpc_core::ExecCtx::Get()->Flush(); + grpc_iomgr_platform_flush(); + gpr_mu_lock(&g_mu); + continue; + } + if (g_root_object.next != &g_root_object) { + if (grpc_iomgr_abort_on_leaks()) { + gpr_log(GPR_DEBUG, + "Failed to free %" PRIuPTR + " iomgr objects before shutdown deadline: " + "memory leaks are likely", + count_objects()); + dump_objects("LEAKED"); + abort(); + } + gpr_timespec short_deadline = + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_millis(100, GPR_TIMESPAN)); + if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) { + if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > + 0) { + if (g_root_object.next != &g_root_object) { + gpr_log(GPR_DEBUG, + "Failed to free %" PRIuPTR + " iomgr objects before shutdown deadline: " + "memory leaks are likely", + count_objects()); + dump_objects("LEAKED"); + } + break; + } + } + } + } + gpr_mu_unlock(&g_mu); + grpc_timer_list_shutdown(); + grpc_core::ExecCtx::Get()->Flush(); + } + + /* ensure all threads have left g_mu */ + gpr_mu_lock(&g_mu); + gpr_mu_unlock(&g_mu); + + grpc_iomgr_platform_shutdown(); + grpc_network_status_shutdown(); + gpr_mu_destroy(&g_mu); + gpr_cv_destroy(&g_rcv); +} + +void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name) { + obj->name = gpr_strdup(name); + gpr_mu_lock(&g_mu); + obj->next = &g_root_object; + obj->prev = g_root_object.prev; + obj->next->prev = obj->prev->next = obj; + gpr_mu_unlock(&g_mu); +} + +void grpc_iomgr_unregister_object(grpc_iomgr_object* obj) { + gpr_mu_lock(&g_mu); + obj->next->prev = obj->prev; + obj->prev->next = obj->next; + gpr_cv_signal(&g_rcv); + gpr_mu_unlock(&g_mu); + gpr_free(obj->name); +} + +bool grpc_iomgr_abort_on_leaks(void) { + char* env = gpr_getenv("GRPC_ABORT_ON_LEAKS"); + bool should_we = gpr_is_true(env); + gpr_free(env); + return should_we; +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr.h b/Sources/CgRPC/src/core/lib/iomgr/iomgr.h index e3cd6ebe7..e6d66e545 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr.h +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr.h @@ -19,17 +19,18 @@ #ifndef GRPC_CORE_LIB_IOMGR_IOMGR_H #define GRPC_CORE_LIB_IOMGR_IOMGR_H -#include +#include + #include "src/core/lib/iomgr/port.h" /** Initializes the iomgr. */ -void grpc_iomgr_init(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_init(); /** Starts any background threads for iomgr. */ -void grpc_iomgr_start(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_start(); /** Signals the intention to shutdown the iomgr. Expects to be able to flush * exec_ctx. */ -void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx); +void grpc_iomgr_shutdown(); #endif /* GRPC_CORE_LIB_IOMGR_IOMGR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.cc new file mode 100644 index 000000000..d34c8e7cd --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.cc @@ -0,0 +1,63 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include + +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/pollset_custom.h" +#include "src/core/lib/iomgr/pollset_set_custom.h" +#include "src/core/lib/iomgr/resolve_address_custom.h" + +gpr_thd_id g_init_thread; + +static void iomgr_platform_init(void) { + grpc_core::ExecCtx exec_ctx; + grpc_executor_set_threading(false); + g_init_thread = gpr_thd_currentid(); + grpc_pollset_global_init(); +} +static void iomgr_platform_flush(void) {} +static void iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); } + +static grpc_iomgr_platform_vtable vtable = { + iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown}; + +void grpc_custom_iomgr_init(grpc_socket_vtable* socket, + grpc_custom_resolver_vtable* resolver, + grpc_custom_timer_vtable* timer, + grpc_custom_poller_vtable* poller) { + grpc_custom_endpoint_init(socket); + grpc_custom_timer_init(timer); + grpc_custom_pollset_init(poller); + grpc_custom_pollset_set_init(); + grpc_custom_resolver_init(resolver); + grpc_set_iomgr_platform_vtable(&vtable); +} + +#ifdef GRPC_CUSTOM_SOCKET +grpc_iomgr_platform_vtable* grpc_default_iomgr_platform_vtable() { + return &vtable; +} +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.h b/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.h new file mode 100644 index 000000000..57cc2f9b9 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_custom.h @@ -0,0 +1,47 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H + +#include + +#include "src/core/lib/iomgr/pollset_custom.h" +#include "src/core/lib/iomgr/resolve_address_custom.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/iomgr/timer_custom.h" + +#include + +/* The thread ID of the thread on which grpc was initialized. Used to verify + * that all calls into the custom iomgr are made on that same thread */ +extern gpr_thd_id g_init_thread; + +#ifdef GRPC_CUSTOM_IOMGR_THREAD_CHECK +#define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD() \ + GPR_ASSERT(gpr_thd_currentid() == g_init_thread) +#else +#define GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD() +#endif /* GRPC_CUSTOM_IOMGR_THREAD_CHECK */ + +void grpc_custom_iomgr_init(grpc_socket_vtable* socket, + grpc_custom_resolver_vtable* resolver, + grpc_custom_timer_vtable* timer, + grpc_custom_poller_vtable* poller); + +#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.cc b/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.cc new file mode 100644 index 000000000..32dbabb79 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.cc @@ -0,0 +1,43 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_manager.h" + +static grpc_iomgr_platform_vtable* iomgr_platform_vtable = nullptr; + +void grpc_set_iomgr_platform_vtable(grpc_iomgr_platform_vtable* vtable) { + iomgr_platform_vtable = vtable; +} + +void grpc_determine_iomgr_platform() { + if (iomgr_platform_vtable == nullptr) { + grpc_set_default_iomgr_platform(); + } +} + +void grpc_iomgr_platform_init() { iomgr_platform_vtable->init(); } + +void grpc_iomgr_platform_flush() { iomgr_platform_vtable->flush(); } + +void grpc_iomgr_platform_shutdown() { iomgr_platform_vtable->shutdown(); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.h b/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.h index 836d82515..b011d9c7b 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.h +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_internal.h @@ -19,18 +19,32 @@ #ifndef GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H #define GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H +#include + #include #include "src/core/lib/iomgr/iomgr.h" typedef struct grpc_iomgr_object { - char *name; - struct grpc_iomgr_object *next; - struct grpc_iomgr_object *prev; + char* name; + struct grpc_iomgr_object* next; + struct grpc_iomgr_object* prev; } grpc_iomgr_object; -void grpc_iomgr_register_object(grpc_iomgr_object *obj, const char *name); -void grpc_iomgr_unregister_object(grpc_iomgr_object *obj); +typedef struct grpc_iomgr_platform_vtable { + void (*init)(void); + void (*flush)(void); + void (*shutdown)(void); +} grpc_iomgr_platform_vtable; + +void grpc_iomgr_register_object(grpc_iomgr_object* obj, const char* name); +void grpc_iomgr_unregister_object(grpc_iomgr_object* obj); + +void grpc_determine_iomgr_platform(); + +void grpc_set_iomgr_platform_vtable(grpc_iomgr_platform_vtable* vtable); + +void grpc_set_default_iomgr_platform(); void grpc_iomgr_platform_init(void); /** flush any globally queued work from iomgr */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.c b/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.c deleted file mode 100644 index f5875a247..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_POSIX_SOCKET - -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/iomgr_posix.h" -#include "src/core/lib/iomgr/tcp_posix.h" - -void grpc_iomgr_platform_init(void) { - grpc_wakeup_fd_global_init(); - grpc_event_engine_init(); - grpc_register_tracer(&grpc_tcp_trace); -} - -void grpc_iomgr_platform_flush(void) {} - -void grpc_iomgr_platform_shutdown(void) { - grpc_event_engine_shutdown(); - grpc_wakeup_fd_global_destroy(); -} - -#endif /* GRPC_POSIX_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.cc b/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.cc new file mode 100644 index 000000000..66c9cb7ff --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.cc @@ -0,0 +1,67 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_POSIX_SOCKET + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/iomgr_posix.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_posix.h" +#include "src/core/lib/iomgr/tcp_server.h" +#include "src/core/lib/iomgr/timer.h" + +extern grpc_tcp_server_vtable grpc_posix_tcp_server_vtable; +extern grpc_tcp_client_vtable grpc_posix_tcp_client_vtable; +extern grpc_timer_vtable grpc_generic_timer_vtable; +extern grpc_pollset_vtable grpc_posix_pollset_vtable; +extern grpc_pollset_set_vtable grpc_posix_pollset_set_vtable; +extern grpc_address_resolver_vtable grpc_posix_resolver_vtable; + +static void iomgr_platform_init(void) { + grpc_wakeup_fd_global_init(); + grpc_event_engine_init(); +} + +static void iomgr_platform_flush(void) {} + +static void iomgr_platform_shutdown(void) { + grpc_event_engine_shutdown(); + grpc_wakeup_fd_global_destroy(); +} + +static grpc_iomgr_platform_vtable vtable = { + iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown}; + +void grpc_set_default_iomgr_platform() { + grpc_set_tcp_client_impl(&grpc_posix_tcp_client_vtable); + grpc_set_tcp_server_impl(&grpc_posix_tcp_server_vtable); + grpc_set_timer_impl(&grpc_generic_timer_vtable); + grpc_set_pollset_vtable(&grpc_posix_pollset_vtable); + grpc_set_pollset_set_vtable(&grpc_posix_pollset_set_vtable); + grpc_set_resolver_impl(&grpc_posix_resolver_vtable); + grpc_set_iomgr_platform_vtable(&vtable); +} + +#endif /* GRPC_POSIX_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.h b/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.h index f7a4af6a8..54ec46e1b 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_IOMGR_POSIX_H #define GRPC_CORE_LIB_IOMGR_IOMGR_POSIX_H +#include + #include "src/core/lib/iomgr/iomgr_internal.h" #endif /* GRPC_CORE_LIB_IOMGR_IOMGR_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.c b/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.c deleted file mode 100644 index df5d23af3..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/pollset_uv.h" -#include "src/core/lib/iomgr/tcp_uv.h" - -gpr_thd_id g_init_thread; - -void grpc_iomgr_platform_init(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_pollset_global_init(); - grpc_register_tracer(&grpc_tcp_trace); - grpc_executor_set_threading(&exec_ctx, false); - g_init_thread = gpr_thd_currentid(); - grpc_exec_ctx_finish(&exec_ctx); -} -void grpc_iomgr_platform_flush(void) {} -void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); } - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.cc b/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.cc new file mode 100644 index 000000000..4a984446d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.cc @@ -0,0 +1,40 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#if defined(GRPC_CUSTOM_SOCKET) && defined(GRPC_UV) + +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/iomgr_internal.h" +#include "src/core/lib/iomgr/pollset_custom.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/iomgr/timer_custom.h" + +extern grpc_socket_vtable grpc_uv_socket_vtable; +extern grpc_custom_resolver_vtable uv_resolver_vtable; +extern grpc_custom_timer_vtable uv_timer_vtable; +extern grpc_custom_poller_vtable uv_pollset_vtable; + +void grpc_set_default_iomgr_platform() { + grpc_custom_iomgr_init(&grpc_uv_socket_vtable, &uv_resolver_vtable, + &uv_timer_vtable, &uv_pollset_vtable); +} +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.h b/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.h deleted file mode 100644 index 3b4daaa73..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_uv.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_UV_H -#define GRPC_CORE_LIB_IOMGR_IOMGR_UV_H - -#include "src/core/lib/iomgr/iomgr_internal.h" - -#include - -/* The thread ID of the thread on which grpc was initialized. Used to verify - * that all calls into libuv are made on that same thread */ -extern gpr_thd_id g_init_thread; - -#ifdef GRPC_UV_THREAD_CHECK -#define GRPC_UV_ASSERT_SAME_THREAD() \ - GPR_ASSERT(gpr_thd_currentid() == g_init_thread) -#else -#define GRPC_UV_ASSERT_SAME_THREAD() -#endif /* GRPC_UV_THREAD_CHECK */ - -#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_UV_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.c b/Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.cc similarity index 54% rename from Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.cc index 630370166..cdef89cbf 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/iomgr_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET @@ -27,7 +29,18 @@ #include "src/core/lib/iomgr/iocp_windows.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/iomgr/pollset_windows.h" +#include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/socket_windows.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_server.h" +#include "src/core/lib/iomgr/timer.h" + +extern grpc_tcp_server_vtable grpc_windows_tcp_server_vtable; +extern grpc_tcp_client_vtable grpc_windows_tcp_client_vtable; +extern grpc_timer_vtable grpc_generic_timer_vtable; +extern grpc_pollset_vtable grpc_windows_pollset_vtable; +extern grpc_pollset_set_vtable grpc_windows_pollset_set_vtable; +extern grpc_address_resolver_vtable grpc_windows_resolver_vtable; /* Windows' io manager is going to be fully designed using IO completion ports. All of what we're doing here is basically make sure that @@ -44,18 +57,31 @@ static void winsock_shutdown(void) { GPR_ASSERT(status == 0); } -void grpc_iomgr_platform_init(void) { +static void iomgr_platform_init(void) { winsock_init(); grpc_iocp_init(); grpc_pollset_global_init(); } -void grpc_iomgr_platform_flush(void) { grpc_iocp_flush(); } +static void iomgr_platform_flush(void) { grpc_iocp_flush(); } -void grpc_iomgr_platform_shutdown(void) { +static void iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); grpc_iocp_shutdown(); winsock_shutdown(); } +static grpc_iomgr_platform_vtable vtable = { + iomgr_platform_init, iomgr_platform_flush, iomgr_platform_shutdown}; + +void grpc_set_default_iomgr_platform() { + grpc_set_tcp_client_impl(&grpc_windows_tcp_client_vtable); + grpc_set_tcp_server_impl(&grpc_windows_tcp_server_vtable); + grpc_set_timer_impl(&grpc_generic_timer_vtable); + grpc_set_pollset_vtable(&grpc_windows_pollset_vtable); + grpc_set_pollset_set_vtable(&grpc_windows_pollset_set_vtable); + grpc_set_resolver_impl(&grpc_windows_resolver_vtable); + grpc_set_iomgr_platform_vtable(&vtable); +} + #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.c b/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.cc similarity index 90% rename from Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.c rename to Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.cc index d08844c0d..036b77866 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.c +++ b/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.cc @@ -16,11 +16,13 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/is_epollexclusive_available.h" -#ifdef GRPC_LINUX_EPOLL +#ifdef GRPC_LINUX_EPOLL_CREATE1 #include @@ -37,7 +39,7 @@ bool grpc_is_epollexclusive_available(void) { int fd = epoll_create1(EPOLL_CLOEXEC); if (fd < 0) { if (!logged_why_not) { - gpr_log(GPR_ERROR, + gpr_log(GPR_DEBUG, "epoll_create1 failed with error: %d. Not using epollex polling " "engine.", fd); @@ -48,7 +50,7 @@ bool grpc_is_epollexclusive_available(void) { int evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (evfd < 0) { if (!logged_why_not) { - gpr_log(GPR_ERROR, + gpr_log(GPR_DEBUG, "eventfd failed with error: %d. Not using epollex polling " "engine.", fd); @@ -61,8 +63,9 @@ bool grpc_is_epollexclusive_available(void) { /* choose events that should cause an error on EPOLLEXCLUSIVE enabled kernels - specifically the combination of EPOLLONESHOT and EPOLLEXCLUSIVE */ - ev.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT); - ev.data.ptr = NULL; + ev.events = + static_cast(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE | EPOLLONESHOT); + ev.data.ptr = nullptr; if (epoll_ctl(fd, EPOLL_CTL_ADD, evfd, &ev) != 0) { if (errno != EINVAL) { if (!logged_why_not) { @@ -79,7 +82,7 @@ bool grpc_is_epollexclusive_available(void) { } } else { if (!logged_why_not) { - gpr_log(GPR_ERROR, + gpr_log(GPR_DEBUG, "epoll_ctl with EPOLLEXCLUSIVE | EPOLLONESHOT succeeded. This is " "evidence of no EPOLLEXCLUSIVE support. Not using " "epollex polling engine."); diff --git a/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.h b/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.h index 1d2e133a3..8a44113c3 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.h +++ b/Sources/CgRPC/src/core/lib/iomgr/is_epollexclusive_available.h @@ -19,8 +19,18 @@ #ifndef GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H #define GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H +#include + #include +#ifdef __cplusplus +extern "C" { +#endif + bool grpc_is_epollexclusive_available(void); +#ifdef __cplusplus +} +#endif + #endif /* GRPC_CORE_LIB_IOMGR_IS_EPOLLEXCLUSIVE_AVAILABLE_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/load_file.c b/Sources/CgRPC/src/core/lib/iomgr/load_file.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/iomgr/load_file.c rename to Sources/CgRPC/src/core/lib/iomgr/load_file.cc index 0b4d41ea4..f6431d0f1 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/load_file.c +++ b/Sources/CgRPC/src/core/lib/iomgr/load_file.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/load_file.h" #include @@ -25,30 +27,30 @@ #include #include -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/block_annotate.h" -grpc_error *grpc_load_file(const char *filename, int add_null_terminator, - grpc_slice *output) { - unsigned char *contents = NULL; +grpc_error* grpc_load_file(const char* filename, int add_null_terminator, + grpc_slice* output) { + unsigned char* contents = nullptr; size_t contents_size = 0; grpc_slice result = grpc_empty_slice(); - FILE *file; + FILE* file; size_t bytes_read = 0; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; GRPC_SCHEDULING_START_BLOCKING_REGION; file = fopen(filename, "rb"); - if (file == NULL) { + if (file == nullptr) { error = GRPC_OS_ERROR(errno, "fopen"); goto end; } fseek(file, 0, SEEK_END); /* Converting to size_t on the assumption that it will not fail */ - contents_size = (size_t)ftell(file); + contents_size = static_cast(ftell(file)); fseek(file, 0, SEEK_SET); - contents = (unsigned char *)gpr_malloc(contents_size + - (add_null_terminator ? 1 : 0)); + contents = static_cast( + gpr_malloc(contents_size + (add_null_terminator ? 1 : 0))); bytes_read = fread(contents, 1, contents_size, file); if (bytes_read < contents_size) { error = GRPC_OS_ERROR(errno, "fread"); @@ -62,9 +64,9 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator, end: *output = result; - if (file != NULL) fclose(file); + if (file != nullptr) fclose(file); if (error != GRPC_ERROR_NONE) { - grpc_error *error_out = + grpc_error* error_out = grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed to load file", &error, 1), GRPC_ERROR_STR_FILENAME, @@ -73,6 +75,6 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator, GRPC_ERROR_UNREF(error); error = error_out; } - GRPC_SCHEDULING_END_BLOCKING_REGION; + GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX; return error; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/load_file.h b/Sources/CgRPC/src/core/lib/iomgr/load_file.h index db1ffb3d6..1cb2b5de7 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/load_file.h +++ b/Sources/CgRPC/src/core/lib/iomgr/load_file.h @@ -19,23 +19,17 @@ #ifndef GRPC_CORE_LIB_IOMGR_LOAD_FILE_H #define GRPC_CORE_LIB_IOMGR_LOAD_FILE_H +#include + #include #include #include "src/core/lib/iomgr/error.h" -#ifdef __cplusplus -extern "C" { -#endif - /* Loads the content of a file into a slice. add_null_terminator will add a NULL terminator if non-zero. */ -grpc_error *grpc_load_file(const char *filename, int add_null_terminator, - grpc_slice *slice); - -#ifdef __cplusplus -} -#endif +grpc_error* grpc_load_file(const char* filename, int add_null_terminator, + grpc_slice* slice); #endif /* GRPC_CORE_LIB_IOMGR_LOAD_FILE_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.c b/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/iomgr/lockfree_event.c rename to Sources/CgRPC/src/core/lib/iomgr/lockfree_event.cc index f967b22ba..5b6b79fa9 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.c +++ b/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.cc @@ -16,103 +16,111 @@ * */ +#include + #include "src/core/lib/iomgr/lockfree_event.h" #include #include "src/core/lib/debug/trace.h" -extern grpc_tracer_flag grpc_polling_trace; +extern grpc_core::TraceFlag grpc_polling_trace; /* 'state' holds the to call when the fd is readable or writable respectively. It can contain one of the following values: - CLOSURE_READY : The fd has an I/O event of interest but there is no + kClosureReady : The fd has an I/O event of interest but there is no closure yet to execute - CLOSURE_NOT_READY : The fd has no I/O event of interest + kClosureNotReady : The fd has no I/O event of interest closure ptr : The closure to be executed when the fd has an I/O event of interest - shutdown_error | FD_SHUTDOWN_BIT : - 'shutdown_error' field ORed with FD_SHUTDOWN_BIT. + shutdown_error | kShutdownBit : + 'shutdown_error' field ORed with kShutdownBit. This indicates that the fd is shutdown. Since all memory allocations are word-aligned, the lower two bits of the shutdown_error pointer are always 0. So - it is safe to OR these with FD_SHUTDOWN_BIT + it is safe to OR these with kShutdownBit Valid state transitions: - <-----3------ CLOSURE_NOT_READY ----1----> CLOSURE_READY + <-----3------ kClosureNotReady -----1-------> kClosureReady | | ^ | ^ | | | | | | | | | | +--------------4----------+ 6 +---------2---------------+ | | | | | v | - +-----5-------> [shutdown_error | FD_SHUTDOWN_BIT] <----7---------+ + +-----5-------> [shutdown_error | kShutdownBit] <-------7---------+ - For 1, 4 : See grpc_lfev_set_ready() function - For 2, 3 : See grpc_lfev_notify_on() function - For 5,6,7: See grpc_lfev_set_shutdown() function */ + For 1, 4 : See SetReady() function + For 2, 3 : See NotifyOn() function + For 5,6,7: See SetShutdown() function */ -#define CLOSURE_NOT_READY ((gpr_atm)0) -#define CLOSURE_READY ((gpr_atm)2) +namespace grpc_core { -#define FD_SHUTDOWN_BIT ((gpr_atm)1) +LockfreeEvent::LockfreeEvent() { InitEvent(); } -void grpc_lfev_init(gpr_atm *state) { - gpr_atm_no_barrier_store(state, CLOSURE_NOT_READY); -} +void LockfreeEvent::InitEvent() { + /* Perform an atomic store to start the state machine. -void grpc_lfev_destroy(gpr_atm *state) { - gpr_atm curr = gpr_atm_no_barrier_load(state); - if (curr & FD_SHUTDOWN_BIT) { - GRPC_ERROR_UNREF((grpc_error *)(curr & ~FD_SHUTDOWN_BIT)); - } else { - GPR_ASSERT(curr == CLOSURE_NOT_READY || curr == CLOSURE_READY); - } + Note carefully that LockfreeEvent *MAY* be used whilst in a destroyed + state, while a file descriptor is on a freelist. In such a state it may + be SetReady'd, and so we need to perform an atomic operation here to + ensure no races */ + gpr_atm_no_barrier_store(&state_, kClosureNotReady); } -bool grpc_lfev_is_shutdown(gpr_atm *state) { - gpr_atm curr = gpr_atm_no_barrier_load(state); - return (curr & FD_SHUTDOWN_BIT) != 0; +void LockfreeEvent::DestroyEvent() { + gpr_atm curr; + do { + curr = gpr_atm_no_barrier_load(&state_); + if (curr & kShutdownBit) { + GRPC_ERROR_UNREF((grpc_error*)(curr & ~kShutdownBit)); + } else { + GPR_ASSERT(curr == kClosureNotReady || curr == kClosureReady); + } + /* we CAS in a shutdown, no error value here. If this event is interacted + with post-deletion (see the note in the constructor) we want the bit + pattern to prevent error retention in a deleted object */ + } while (!gpr_atm_no_barrier_cas(&state_, curr, + kShutdownBit /* shutdown, no error */)); } -void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure, const char *variable) { +void LockfreeEvent::NotifyOn(grpc_closure* closure) { while (true) { - gpr_atm curr = gpr_atm_no_barrier_load(state); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable, - state, (void *)curr, closure); + gpr_atm curr = gpr_atm_no_barrier_load(&state_); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this, + (void*)curr, closure); } switch (curr) { - case CLOSURE_NOT_READY: { - /* CLOSURE_NOT_READY -> . + case kClosureNotReady: { + /* kClosureNotReady -> . We're guaranteed by API that there's an acquire barrier before here, so there's no need to double-dip and this can be a release-only. The release itself pairs with the acquire half of a set_ready full barrier. */ - if (gpr_atm_rel_cas(state, CLOSURE_NOT_READY, (gpr_atm)closure)) { + if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) { return; /* Successful. Return */ } break; /* retry */ } - case CLOSURE_READY: { - /* Change the state to CLOSURE_NOT_READY. Schedule the closure if + case kClosureReady: { + /* Change the state to kClosureNotReady. Schedule the closure if successful. If not, the state most likely transitioned to shutdown. We should retry. This can be a no-barrier cas since the state is being transitioned to - CLOSURE_NOT_READY; set_ready and set_shutdown do not schedule any + kClosureNotReady; set_ready and set_shutdown do not schedule any closure when transitioning out of CLOSURE_NO_READY state (i.e there is no other code that needs to 'happen-after' this) */ - if (gpr_atm_no_barrier_cas(state, CLOSURE_READY, CLOSURE_NOT_READY)) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) { + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); return; /* Successful. Return */ } @@ -123,9 +131,9 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, /* 'curr' is either a closure or the fd is shutdown(in which case 'curr' contains a pointer to the shutdown-error). If the fd is shutdown, schedule the closure with the shutdown error */ - if ((curr & FD_SHUTDOWN_BIT) > 0) { - grpc_error *shutdown_err = (grpc_error *)(curr & ~FD_SHUTDOWN_BIT); - GRPC_CLOSURE_SCHED(exec_ctx, closure, + if ((curr & kShutdownBit) > 0) { + grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "FD Shutdown", &shutdown_err, 1)); return; @@ -133,7 +141,8 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, /* There is already a closure!. This indicates a bug in the code */ gpr_log(GPR_ERROR, - "notify_on called with a previous callback still pending"); + "LockfreeEvent::NotifyOn: notify_on called with a previous " + "callback still pending"); abort(); } } @@ -142,22 +151,21 @@ void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, GPR_UNREACHABLE_CODE(return ); } -bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_error *shutdown_err) { - gpr_atm new_state = (gpr_atm)shutdown_err | FD_SHUTDOWN_BIT; +bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) { + gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit; while (true) { - gpr_atm curr = gpr_atm_no_barrier_load(state); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state, - (void *)curr, grpc_error_string(shutdown_err)); + gpr_atm curr = gpr_atm_no_barrier_load(&state_); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_ERROR, "LockfreeEvent::SetShutdown: %p curr=%p err=%s", + &state_, (void*)curr, grpc_error_string(shutdown_err)); } switch (curr) { - case CLOSURE_READY: - case CLOSURE_NOT_READY: + case kClosureReady: + case kClosureNotReady: /* Need a full barrier here so that the initial load in notify_on doesn't need a barrier */ - if (gpr_atm_full_cas(state, curr, new_state)) { + if (gpr_atm_full_cas(&state_, curr, new_state)) { return true; /* early out */ } break; /* retry */ @@ -166,7 +174,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, /* 'curr' is either a closure or the fd is already shutdown */ /* If fd is already shutdown, we are done */ - if ((curr & FD_SHUTDOWN_BIT) > 0) { + if ((curr & kShutdownBit) > 0) { GRPC_ERROR_UNREF(shutdown_err); return false; } @@ -176,8 +184,8 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, Needs an acquire to pair with setting the closure (and get a happens-after on that edge), and a release to pair with anything loading the shutdown state. */ - if (gpr_atm_full_cas(state, curr, new_state)) { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, + if (gpr_atm_full_cas(&state_, curr, new_state)) { + GRPC_CLOSURE_SCHED((grpc_closure*)curr, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "FD Shutdown", &shutdown_err, 1)); return true; @@ -193,26 +201,25 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, GPR_UNREACHABLE_CODE(return false); } -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, - const char *variable) { +void LockfreeEvent::SetReady() { while (true) { - gpr_atm curr = gpr_atm_no_barrier_load(state); + gpr_atm curr = gpr_atm_no_barrier_load(&state_); - if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state, - (void *)curr); + if (grpc_polling_trace.enabled()) { + gpr_log(GPR_ERROR, "LockfreeEvent::SetReady: %p curr=%p", &state_, + (void*)curr); } switch (curr) { - case CLOSURE_READY: { + case kClosureReady: { /* Already ready. We are done here */ return; } - case CLOSURE_NOT_READY: { + case kClosureNotReady: { /* No barrier required as we're transitioning to a state that does not involve a closure */ - if (gpr_atm_no_barrier_cas(state, CLOSURE_NOT_READY, CLOSURE_READY)) { + if (gpr_atm_no_barrier_cas(&state_, kClosureNotReady, kClosureReady)) { return; /* early out */ } break; /* retry */ @@ -220,15 +227,15 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, default: { /* 'curr' is either a closure or the fd is shutdown */ - if ((curr & FD_SHUTDOWN_BIT) > 0) { + if ((curr & kShutdownBit) > 0) { /* The fd is shutdown. Do nothing */ return; } /* Full cas: acquire pairs with this cas' release in the event of a spurious set_ready; release pairs with this or the acquire in notify_on (or set_shutdown) */ - else if (gpr_atm_full_cas(state, curr, CLOSURE_NOT_READY)) { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)curr, GRPC_ERROR_NONE); + else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) { + GRPC_CLOSURE_SCHED((grpc_closure*)curr, GRPC_ERROR_NONE); return; } /* else the state changed again (only possible by either a racing @@ -239,3 +246,5 @@ void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, } } } + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.h b/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.h index 6a14a0f3b..d6a6c226b 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.h +++ b/Sources/CgRPC/src/core/lib/iomgr/lockfree_event.h @@ -21,20 +21,52 @@ /* Lock free event notification for file descriptors */ +#include + #include -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/closure.h" + +namespace grpc_core { + +class LockfreeEvent { + public: + LockfreeEvent(); + + LockfreeEvent(const LockfreeEvent&) = delete; + LockfreeEvent& operator=(const LockfreeEvent&) = delete; + + // These methods are used to initialize and destroy the internal state. These + // cannot be done in constructor and destructor because SetReady may be called + // when the event is destroyed and put in a freelist. + void InitEvent(); + void DestroyEvent(); + + // Returns true if fd has been shutdown, false otherwise. + bool IsShutdown() const { + return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0; + } + + // Schedules \a closure when the event is received (see SetReady()) or the + // shutdown state has been set. Note that the event may have already been + // received, in which case the closure would be scheduled immediately. + // If the shutdown state has already been set, then \a closure is scheduled + // with the shutdown error. + void NotifyOn(grpc_closure* closure); + + // Sets the shutdown state. If a closure had been provided by NotifyOn and has + // not yet been scheduled, it will be scheduled with \a error. + bool SetShutdown(grpc_error* error); + + // Signals that the event has been received. + void SetReady(); + + private: + enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 }; -void grpc_lfev_init(gpr_atm *state); -void grpc_lfev_destroy(gpr_atm *state); -bool grpc_lfev_is_shutdown(gpr_atm *state); + gpr_atm state_; +}; -void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure, const char *variable); -/* Returns true on first successful shutdown */ -bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_error *shutdown_err); -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, - const char *variable); +} // namespace grpc_core #endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/nameser.h b/Sources/CgRPC/src/core/lib/iomgr/nameser.h index daed6de51..22a00cdab 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/nameser.h +++ b/Sources/CgRPC/src/core/lib/iomgr/nameser.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_NAMESER_H #define GRPC_CORE_LIB_IOMGR_NAMESER_H +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_HAVE_ARPA_NAMESER diff --git a/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.c b/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.cc similarity index 80% rename from Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.c rename to Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.cc index 4e5c1d540..d4b7f4a57 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.c +++ b/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.cc @@ -16,7 +16,10 @@ * */ +#include + #include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/iomgr/network_status_tracker.h" void grpc_network_status_shutdown(void) {} @@ -26,8 +29,8 @@ void grpc_network_status_init(void) { void grpc_destroy_network_status_monitor() {} -void grpc_network_status_register_endpoint(grpc_endpoint *ep) { (void)ep; } +void grpc_network_status_register_endpoint(grpc_endpoint* ep) { (void)ep; } -void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) { (void)ep; } +void grpc_network_status_unregister_endpoint(grpc_endpoint* ep) { (void)ep; } void grpc_network_status_shutdown_all_endpoints() {} diff --git a/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.h b/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.h index c0295c1f7..198877f60 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.h +++ b/Sources/CgRPC/src/core/lib/iomgr/network_status_tracker.h @@ -18,13 +18,15 @@ #ifndef GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H #define GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H +#include + #include "src/core/lib/iomgr/endpoint.h" void grpc_network_status_init(void); void grpc_network_status_shutdown(void); -void grpc_network_status_register_endpoint(grpc_endpoint *ep); -void grpc_network_status_unregister_endpoint(grpc_endpoint *ep); +void grpc_network_status_register_endpoint(grpc_endpoint* ep); +void grpc_network_status_unregister_endpoint(grpc_endpoint* ep); void grpc_network_status_shutdown_all_endpoints(); #endif /* GRPC_CORE_LIB_IOMGR_NETWORK_STATUS_TRACKER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/polling_entity.c b/Sources/CgRPC/src/core/lib/iomgr/polling_entity.cc similarity index 57% rename from Sources/CgRPC/src/core/lib/iomgr/polling_entity.c rename to Sources/CgRPC/src/core/lib/iomgr/polling_entity.cc index 8591a5518..9f164f65b 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/polling_entity.c +++ b/Sources/CgRPC/src/core/lib/iomgr/polling_entity.cc @@ -16,13 +16,15 @@ * */ +#include + #include #include #include "src/core/lib/iomgr/polling_entity.h" grpc_polling_entity grpc_polling_entity_create_from_pollset_set( - grpc_pollset_set *pollset_set) { + grpc_pollset_set* pollset_set) { grpc_polling_entity pollent; pollent.pollent.pollset_set = pollset_set; pollent.tag = GRPC_POLLS_POLLSET_SET; @@ -30,58 +32,54 @@ grpc_polling_entity grpc_polling_entity_create_from_pollset_set( } grpc_polling_entity grpc_polling_entity_create_from_pollset( - grpc_pollset *pollset) { + grpc_pollset* pollset) { grpc_polling_entity pollent; pollent.pollent.pollset = pollset; pollent.tag = GRPC_POLLS_POLLSET; return pollent; } -grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent) { +grpc_pollset* grpc_polling_entity_pollset(grpc_polling_entity* pollent) { if (pollent->tag == GRPC_POLLS_POLLSET) { return pollent->pollent.pollset; } - return NULL; + return nullptr; } -grpc_pollset_set *grpc_polling_entity_pollset_set( - grpc_polling_entity *pollent) { +grpc_pollset_set* grpc_polling_entity_pollset_set( + grpc_polling_entity* pollent) { if (pollent->tag == GRPC_POLLS_POLLSET_SET) { return pollent->pollent.pollset_set; } - return NULL; + return nullptr; } -bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent) { +bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent) { return pollent->tag == GRPC_POLLS_NONE; } -void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, - grpc_pollset_set *pss_dst) { +void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent, + grpc_pollset_set* pss_dst) { if (pollent->tag == GRPC_POLLS_POLLSET) { - GPR_ASSERT(pollent->pollent.pollset != NULL); - grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + GPR_ASSERT(pollent->pollent.pollset != nullptr); + grpc_pollset_set_add_pollset(pss_dst, pollent->pollent.pollset); } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { - GPR_ASSERT(pollent->pollent.pollset_set != NULL); - grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst, - pollent->pollent.pollset_set); + GPR_ASSERT(pollent->pollent.pollset_set != nullptr); + grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set); } else { gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); abort(); } } -void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, - grpc_pollset_set *pss_dst) { +void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent, + grpc_pollset_set* pss_dst) { if (pollent->tag == GRPC_POLLS_POLLSET) { - GPR_ASSERT(pollent->pollent.pollset != NULL); - grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset); + GPR_ASSERT(pollent->pollent.pollset != nullptr); + grpc_pollset_set_del_pollset(pss_dst, pollent->pollent.pollset); } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) { - GPR_ASSERT(pollent->pollent.pollset_set != NULL); - grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst, - pollent->pollent.pollset_set); + GPR_ASSERT(pollent->pollent.pollset_set != nullptr); + grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set); } else { gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag); abort(); diff --git a/Sources/CgRPC/src/core/lib/iomgr/polling_entity.h b/Sources/CgRPC/src/core/lib/iomgr/polling_entity.h index a161e1fea..a95e08524 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/polling_entity.h +++ b/Sources/CgRPC/src/core/lib/iomgr/polling_entity.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H #define GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H +#include + #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_set.h" @@ -34,34 +36,33 @@ typedef enum grpc_pollset_tag { typedef struct grpc_polling_entity { union { - grpc_pollset *pollset; - grpc_pollset_set *pollset_set; + grpc_pollset* pollset; + grpc_pollset_set* pollset_set; } pollent; grpc_pollset_tag tag; } grpc_polling_entity; grpc_polling_entity grpc_polling_entity_create_from_pollset_set( - grpc_pollset_set *pollset_set); + grpc_pollset_set* pollset_set); grpc_polling_entity grpc_polling_entity_create_from_pollset( - grpc_pollset *pollset); + grpc_pollset* pollset); /** If \a pollent contains a pollset, return it. Otherwise, return NULL */ -grpc_pollset *grpc_polling_entity_pollset(grpc_polling_entity *pollent); +grpc_pollset* grpc_polling_entity_pollset(grpc_polling_entity* pollent); /** If \a pollent contains a pollset_set, return it. Otherwise, return NULL */ -grpc_pollset_set *grpc_polling_entity_pollset_set(grpc_polling_entity *pollent); +grpc_pollset_set* grpc_polling_entity_pollset_set(grpc_polling_entity* pollent); -bool grpc_polling_entity_is_empty(const grpc_polling_entity *pollent); +bool grpc_polling_entity_is_empty(const grpc_polling_entity* pollent); /** Add the pollset or pollset_set in \a pollent to the destination pollset_set * \a * pss_dst */ -void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, - grpc_pollset_set *pss_dst); +void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent, + grpc_pollset_set* pss_dst); /** Delete the pollset or pollset_set in \a pollent from the destination * pollset_set \a * pss_dst */ -void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_polling_entity *pollent, - grpc_pollset_set *pss_dst); +void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent, + grpc_pollset_set* pss_dst); + #endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset.cc new file mode 100644 index 000000000..ebfef1dbc --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset.cc @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/pollset.h" + +grpc_pollset_vtable* grpc_pollset_impl; + +void grpc_set_pollset_vtable(grpc_pollset_vtable* vtable) { + grpc_pollset_impl = vtable; +} + +void grpc_pollset_global_init() { grpc_pollset_impl->global_init(); } + +void grpc_pollset_global_shutdown() { grpc_pollset_impl->global_shutdown(); } + +void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu) { + grpc_pollset_impl->init(pollset, mu); +} + +void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + grpc_pollset_impl->shutdown(pollset, closure); +} + +void grpc_pollset_destroy(grpc_pollset* pollset) { + grpc_pollset_impl->destroy(pollset); +} + +grpc_error* grpc_pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker, + grpc_millis deadline) { + return grpc_pollset_impl->work(pollset, worker, deadline); +} + +grpc_error* grpc_pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) { + return grpc_pollset_impl->kick(pollset, specific_worker); +} + +size_t grpc_pollset_size(void) { return grpc_pollset_impl->pollset_size(); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset.h b/Sources/CgRPC/src/core/lib/iomgr/pollset.h index a0f6b3a9d..28472b360 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset.h +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset.h @@ -20,14 +20,13 @@ #define GRPC_CORE_LIB_IOMGR_POLLSET_H #include + #include #include #include "src/core/lib/iomgr/exec_ctx.h" -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_fd_refcount; -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount; /* A grpc_pollset is a set of file descriptors that a higher level item is interested in. For example: @@ -39,14 +38,31 @@ extern grpc_tracer_flag grpc_trace_fd_refcount; typedef struct grpc_pollset grpc_pollset; typedef struct grpc_pollset_worker grpc_pollset_worker; +typedef struct grpc_pollset_vtable { + void (*global_init)(void); + void (*global_shutdown)(void); + void (*init)(grpc_pollset* pollset, gpr_mu** mu); + void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure); + void (*destroy)(grpc_pollset* pollset); + grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker, + grpc_millis deadline); + grpc_error* (*kick)(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker); + size_t (*pollset_size)(void); +} grpc_pollset_vtable; + +void grpc_set_pollset_vtable(grpc_pollset_vtable* vtable); + +void grpc_pollset_global_init(void); +void grpc_pollset_global_shutdown(void); + size_t grpc_pollset_size(void); /* Initialize a pollset: assumes *pollset contains all zeros */ -void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu); +void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu); /* Begin shutting down the pollset, and call closure when done. * pollset's mutex must be held */ -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); +void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure); +void grpc_pollset_destroy(grpc_pollset* pollset); /* Do some work on a pollset. May involve invoking asynchronous callbacks, or actually polling file @@ -70,14 +86,14 @@ void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); May call grpc_closure_list_run on grpc_closure_list, without holding the pollset lock */ -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline) GRPC_MUST_USE_RESULT; +grpc_error* grpc_pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker, + grpc_millis deadline) GRPC_MUST_USE_RESULT; /* Break one polling thread out of polling work for this pollset. If specific_worker is non-NULL, then kick that worker. */ -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) +grpc_error* grpc_pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) GRPC_MUST_USE_RESULT; #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.cc new file mode 100644 index 000000000..04bd10405 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.cc @@ -0,0 +1,106 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include +#include + +#include +#include +#include + +#include "src/core/lib/iomgr/closure.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/pollset.h" +#include "src/core/lib/iomgr/pollset_custom.h" +#include "src/core/lib/iomgr/timer.h" + +#include "src/core/lib/debug/trace.h" + +static grpc_custom_poller_vtable* poller_vtable; + +struct grpc_pollset { + gpr_mu mu; +}; + +static size_t pollset_size() { return sizeof(grpc_pollset); } + +static void pollset_global_init() { poller_vtable->init(); } + +static void pollset_global_shutdown() { poller_vtable->shutdown(); } + +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + gpr_mu_init(&pollset->mu); + *mu = &pollset->mu; +} + +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); +} + +static void pollset_destroy(grpc_pollset* pollset) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + gpr_mu_destroy(&pollset->mu); +} + +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + gpr_mu_unlock(&pollset->mu); + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + size_t timeout = 0; + if (deadline > now) { + timeout = deadline - now; + } + // We yield here because the poll() call might yield + // control back to the application + grpc_core::ExecCtx* curr = grpc_core::ExecCtx::Get(); + grpc_core::ExecCtx::Set(nullptr); + poller_vtable->poll(timeout); + grpc_core::ExecCtx::Set(curr); + grpc_core::ExecCtx::Get()->InvalidateNow(); + if (grpc_core::ExecCtx::Get()->HasWork()) { + grpc_core::ExecCtx::Get()->Flush(); + } + gpr_mu_lock(&pollset->mu); + return GRPC_ERROR_NONE; +} + +static grpc_error* pollset_kick(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + poller_vtable->kick(); + return GRPC_ERROR_NONE; +} + +grpc_pollset_vtable custom_pollset_vtable = { + pollset_global_init, pollset_global_shutdown, + pollset_init, pollset_shutdown, + pollset_destroy, pollset_work, + pollset_kick, pollset_size}; + +void grpc_custom_pollset_init(grpc_custom_poller_vtable* vtable) { + poller_vtable = vtable; + grpc_set_pollset_vtable(&custom_pollset_vtable); +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.h new file mode 100644 index 000000000..9e2027f7f --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_custom.h @@ -0,0 +1,35 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H + +#include + +#include + +typedef struct grpc_custom_poller_vtable { + void (*init)(); + void (*poll)(size_t timeout_ms); + void (*kick)(); + void (*shutdown)(); +} grpc_custom_poller_vtable; + +void grpc_custom_pollset_init(grpc_custom_poller_vtable* vtable); + +#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset_set.cc new file mode 100644 index 000000000..42a647a73 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set.cc @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/pollset_set.h" + +grpc_pollset_set_vtable* grpc_pollset_set_impl; + +void grpc_set_pollset_set_vtable(grpc_pollset_set_vtable* vtable) { + grpc_pollset_set_impl = vtable; +} + +grpc_pollset_set* grpc_pollset_set_create() { + return grpc_pollset_set_impl->create(); +} + +void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) { + grpc_pollset_set_impl->destroy(pollset_set); +} + +void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { + grpc_pollset_set_impl->add_pollset(pollset_set, pollset); +} + +void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) { + grpc_pollset_set_impl->del_pollset(pollset_set, pollset); +} + +void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { + grpc_pollset_set_impl->add_pollset_set(bag, item); +} + +void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) { + grpc_pollset_set_impl->del_pollset_set(bag, item); +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_set.h index 29c0f0356..d3355b8ff 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_set.h +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_POLLSET_SET_H #define GRPC_CORE_LIB_IOMGR_POLLSET_SET_H +#include + #include "src/core/lib/iomgr/pollset.h" /* A grpc_pollset_set is a set of pollsets that are interested in an @@ -28,20 +30,26 @@ typedef struct grpc_pollset_set grpc_pollset_set; -grpc_pollset_set *grpc_pollset_set_create(void); -void grpc_pollset_set_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set); -void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset); -void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *pollset_set, - grpc_pollset *pollset); -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item); -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_pollset_set *bag, - grpc_pollset_set *item); +typedef struct grpc_pollset_set_vtable { + grpc_pollset_set* (*create)(void); + void (*destroy)(grpc_pollset_set* pollset_set); + void (*add_pollset)(grpc_pollset_set* pollset_set, grpc_pollset* pollset); + void (*del_pollset)(grpc_pollset_set* pollset_set, grpc_pollset* pollset); + void (*add_pollset_set)(grpc_pollset_set* bag, grpc_pollset_set* item); + void (*del_pollset_set)(grpc_pollset_set* bag, grpc_pollset_set* item); +} grpc_pollset_set_vtable; + +void grpc_set_pollset_set_vtable(grpc_pollset_set_vtable* vtable); + +grpc_pollset_set* grpc_pollset_set_create(void); +void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set); +void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset); +void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset); +void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item); +void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item); #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.cc new file mode 100644 index 000000000..b1ee66020 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.cc @@ -0,0 +1,48 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include "src/core/lib/iomgr/pollset_set.h" + +grpc_pollset_set* pollset_set_create(void) { + return (grpc_pollset_set*)((intptr_t)0xdeafbeef); +} + +void pollset_set_destroy(grpc_pollset_set* pollset_set) {} + +void pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) {} + +void pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) {} + +void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} + +void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} + +static grpc_pollset_set_vtable vtable = { + pollset_set_create, pollset_set_destroy, + pollset_set_add_pollset, pollset_set_del_pollset, + pollset_set_add_pollset_set, pollset_set_del_pollset_set}; + +void grpc_custom_pollset_set_init() { grpc_set_pollset_set_vtable(&vtable); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.h new file mode 100644 index 000000000..80e19a1fe --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_custom.h @@ -0,0 +1,26 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H + +#include + +void grpc_custom_pollset_set_init(); + +#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_uv.c b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_uv.c deleted file mode 100644 index 90186edbb..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_uv.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include "src/core/lib/iomgr/pollset_set.h" - -grpc_pollset_set* grpc_pollset_set_create(void) { - return (grpc_pollset_set*)((intptr_t)0xdeafbeef); -} - -void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set) {} - -void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, - grpc_pollset* pollset) {} - -void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, - grpc_pollset* pollset) {} - -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, - grpc_pollset_set* item) {} - -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, - grpc_pollset_set* item) {} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.c b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.c deleted file mode 100644 index 2105a47ad..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_WINSOCK_SOCKET - -#include "src/core/lib/iomgr/pollset_set_windows.h" - -grpc_pollset_set* grpc_pollset_set_create(void) { - return (grpc_pollset_set*)((intptr_t)0xdeafbeef); -} - -void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set) {} - -void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, - grpc_pollset* pollset) {} - -void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* pollset_set, - grpc_pollset* pollset) {} - -void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, - grpc_pollset_set* item) {} - -void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx, - grpc_pollset_set* bag, - grpc_pollset_set* item) {} - -#endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.cc new file mode 100644 index 000000000..bb9e7f5d2 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.cc @@ -0,0 +1,51 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET + +#include "src/core/lib/iomgr/pollset_set_windows.h" + +static grpc_pollset_set* pollset_set_create(void) { + return (grpc_pollset_set*)((intptr_t)0xdeafbeef); +} + +static void pollset_set_destroy(grpc_pollset_set* pollset_set) {} + +static void pollset_set_add_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) {} + +static void pollset_set_del_pollset(grpc_pollset_set* pollset_set, + grpc_pollset* pollset) {} + +static void pollset_set_add_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} + +static void pollset_set_del_pollset_set(grpc_pollset_set* bag, + grpc_pollset_set* item) {} + +grpc_pollset_set_vtable grpc_windows_pollset_set_vtable = { + pollset_set_create, pollset_set_destroy, + pollset_set_add_pollset, pollset_set_del_pollset, + pollset_set_add_pollset_set, pollset_set_del_pollset_set}; + +#endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.h index 1173f760a..5ac9d1823 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_set_windows.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_POLLSET_SET_WINDOWS_H #define GRPC_CORE_LIB_IOMGR_POLLSET_SET_WINDOWS_H +#include + #include "src/core/lib/iomgr/pollset_set.h" #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.c b/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.c deleted file mode 100644 index 2651325e2..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include - -#include - -#include -#include -#include - -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/pollset.h" -#include "src/core/lib/iomgr/pollset_uv.h" - -#include "src/core/lib/debug/trace.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_fd_refcount = - GRPC_TRACER_INITIALIZER(false, "fd_refcount"); -#endif - -struct grpc_pollset { - uv_timer_t timer; - int shutting_down; -}; - -/* Indicates that grpc_pollset_work should run an iteration of the UV loop - before running callbacks. This defaults to 1, and should be disabled if - grpc_pollset_work will be called within the callstack of uv_run */ -int grpc_pollset_work_run_loop; - -gpr_mu grpc_polling_mu; - -/* This is used solely to kick the uv loop, by setting a callback to be run - immediately in the next loop iteration. - Note: In the future, if there is a bug that involves missing wakeups in the - future, try adding a uv_async_t to kick the loop differently */ -uv_timer_t *dummy_uv_handle; - -size_t grpc_pollset_size() { return sizeof(grpc_pollset); } - -void dummy_timer_cb(uv_timer_t *handle) {} - -void dummy_handle_close_cb(uv_handle_t *handle) { gpr_free(handle); } - -void grpc_pollset_global_init(void) { - gpr_mu_init(&grpc_polling_mu); - dummy_uv_handle = gpr_malloc(sizeof(uv_timer_t)); - uv_timer_init(uv_default_loop(), dummy_uv_handle); - grpc_pollset_work_run_loop = 1; -} - -void grpc_pollset_global_shutdown(void) { - GRPC_UV_ASSERT_SAME_THREAD(); - gpr_mu_destroy(&grpc_polling_mu); - uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb); -} - -static void timer_run_cb(uv_timer_t *timer) {} - -static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } - -void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - GRPC_UV_ASSERT_SAME_THREAD(); - *mu = &grpc_polling_mu; - uv_timer_init(uv_default_loop(), &pollset->timer); - pollset->shutting_down = 0; -} - -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { - GPR_ASSERT(!pollset->shutting_down); - GRPC_UV_ASSERT_SAME_THREAD(); - pollset->shutting_down = 1; - if (grpc_pollset_work_run_loop) { - // Drain any pending UV callbacks without blocking - uv_run(uv_default_loop(), UV_RUN_NOWAIT); - } else { - // kick the loop once - uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); - } - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); -} - -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - GRPC_UV_ASSERT_SAME_THREAD(); - uv_close((uv_handle_t *)&pollset->timer, timer_close_cb); - // timer.data is a boolean indicating that the timer has finished closing - pollset->timer.data = (void *)0; - if (grpc_pollset_work_run_loop) { - while (!pollset->timer.data) { - uv_run(uv_default_loop(), UV_RUN_NOWAIT); - } - } -} - -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { - uint64_t timeout; - GRPC_UV_ASSERT_SAME_THREAD(); - gpr_mu_unlock(&grpc_polling_mu); - if (grpc_pollset_work_run_loop) { - if (gpr_time_cmp(deadline, now) >= 0) { - timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now)); - } else { - timeout = 0; - } - /* We special-case timeout=0 so that we don't bother with the timer when - the loop won't block anyway */ - if (timeout > 0) { - uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0); - /* Run until there is some I/O activity or the timer triggers. It doesn't - matter which happens */ - uv_run(uv_default_loop(), UV_RUN_ONCE); - uv_timer_stop(&pollset->timer); - } else { - uv_run(uv_default_loop(), UV_RUN_NOWAIT); - } - } - if (!grpc_closure_list_empty(exec_ctx->closure_list)) { - grpc_exec_ctx_flush(exec_ctx); - } - gpr_mu_lock(&grpc_polling_mu); - return GRPC_ERROR_NONE; -} - -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { - GRPC_UV_ASSERT_SAME_THREAD(); - uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); - return GRPC_ERROR_NONE; -} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.cc b/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.cc new file mode 100644 index 000000000..bade6eae6 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.cc @@ -0,0 +1,93 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_UV + +#include +#include +#include "src/core/lib/iomgr/pollset_custom.h" + +#include + +/* Indicates that grpc_pollset_work should run an iteration of the UV loop + before running callbacks. This defaults to 1, and should be disabled if + grpc_pollset_work will be called within the callstack of uv_run */ +int grpc_pollset_work_run_loop = 1; + +static bool g_kicked = false; + +typedef struct uv_poller_handle { + uv_timer_t poll_timer; + uv_timer_t kick_timer; + int refs; +} uv_poller_handle; + +static uv_poller_handle* g_handle; + +static void init() { + g_handle = (uv_poller_handle*)gpr_malloc(sizeof(uv_poller_handle)); + g_handle->refs = 2; + uv_timer_init(uv_default_loop(), &g_handle->poll_timer); + uv_timer_init(uv_default_loop(), &g_handle->kick_timer); +} + +static void empty_timer_cb(uv_timer_t* handle) {} + +static void kick_timer_cb(uv_timer_t* handle) { g_kicked = false; } + +static void run_loop(size_t timeout) { + if (grpc_pollset_work_run_loop) { + if (timeout == 0) { + uv_run(uv_default_loop(), UV_RUN_NOWAIT); + } else { + uv_timer_start(&g_handle->poll_timer, empty_timer_cb, timeout, 0); + uv_run(uv_default_loop(), UV_RUN_ONCE); + uv_timer_stop(&g_handle->poll_timer); + } + } +} + +static void kick() { + if (!g_kicked) { + g_kicked = true; + uv_timer_start(&g_handle->kick_timer, kick_timer_cb, 0, 0); + } +} + +static void close_timer_cb(uv_handle_t* handle) { + g_handle->refs--; + if (g_handle->refs == 0) { + gpr_free(g_handle); + } +} + +static void shutdown() { + uv_close((uv_handle_t*)&g_handle->poll_timer, close_timer_cb); + uv_close((uv_handle_t*)&g_handle->kick_timer, close_timer_cb); + if (grpc_pollset_work_run_loop) { + GPR_ASSERT(uv_run(uv_default_loop(), UV_RUN_DEFAULT) == 0); + } +} + +grpc_custom_poller_vtable uv_pollset_vtable = {init, run_loop, kick, shutdown}; + +#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.h deleted file mode 100644 index 566c110ca..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_uv.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_POLLSET_UV_H -#define GRPC_CORE_LIB_IOMGR_POLLSET_UV_H - -extern int grpc_pollset_work_run_loop; - -void grpc_pollset_global_init(void); -void grpc_pollset_global_shutdown(void); - -#endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.c b/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.cc similarity index 72% rename from Sources/CgRPC/src/core/lib/iomgr/pollset_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/pollset_windows.cc index eb295d3ee..e9a808d8a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.cc @@ -16,30 +16,29 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET #include -#include +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/iocp_windows.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_windows.h" -#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1) +#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1) -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_fd_refcount = - GRPC_TRACER_INITIALIZER(false, "fd_refcount"); -#endif +grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount(false, "fd_refcount"); gpr_mu grpc_polling_mu; -static grpc_pollset_worker *g_active_poller; +static grpc_pollset_worker* g_active_poller; static grpc_pollset_worker g_global_root_worker; -void grpc_pollset_global_init(void) { +static void pollset_global_init(void) { gpr_mu_init(&grpc_polling_mu); g_active_poller = NULL; g_global_root_worker.links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next = @@ -47,24 +46,24 @@ void grpc_pollset_global_init(void) { &g_global_root_worker; } -void grpc_pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); } +static void pollset_global_shutdown(void) { gpr_mu_destroy(&grpc_polling_mu); } -static void remove_worker(grpc_pollset_worker *worker, +static void remove_worker(grpc_pollset_worker* worker, grpc_pollset_worker_link_type type) { worker->links[type].prev->links[type].next = worker->links[type].next; worker->links[type].next->links[type].prev = worker->links[type].prev; worker->links[type].next = worker->links[type].prev = worker; } -static int has_workers(grpc_pollset_worker *root, +static int has_workers(grpc_pollset_worker* root, grpc_pollset_worker_link_type type) { return root->links[type].next != root; } -static grpc_pollset_worker *pop_front_worker( - grpc_pollset_worker *root, grpc_pollset_worker_link_type type) { +static grpc_pollset_worker* pop_front_worker( + grpc_pollset_worker* root, grpc_pollset_worker_link_type type) { if (has_workers(root, type)) { - grpc_pollset_worker *w = root->links[type].next; + grpc_pollset_worker* w = root->links[type].next; remove_worker(w, type); return w; } else { @@ -72,45 +71,44 @@ static grpc_pollset_worker *pop_front_worker( } } -static void push_front_worker(grpc_pollset_worker *root, +static void push_front_worker(grpc_pollset_worker* root, grpc_pollset_worker_link_type type, - grpc_pollset_worker *worker) { + grpc_pollset_worker* worker) { worker->links[type].prev = root; worker->links[type].next = worker->links[type].prev->links[type].next; worker->links[type].prev->links[type].next = worker->links[type].next->links[type].prev = worker; } -size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); } +static size_t pollset_size(void) { return sizeof(grpc_pollset); } /* There isn't really any such thing as a pollset under Windows, due to the nature of the IO completion ports. We're still going to provide a minimal set of features for the sake of the rest of grpc. But grpc_pollset_work won't actually do any polling, and return as quickly as possible. */ -void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { +static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) { *mu = &grpc_polling_mu; pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next = pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev = &pollset->root_worker; } -void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure) { +static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) { pollset->shutting_down = 1; - grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST); + grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST); if (!pollset->is_iocp_worker) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } else { pollset->on_shutdown = closure; } } -void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {} +static void pollset_destroy(grpc_pollset* pollset) {} -grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker_hdl, - gpr_timespec now, gpr_timespec deadline) { +static grpc_error* pollset_work(grpc_pollset* pollset, + grpc_pollset_worker** worker_hdl, + grpc_millis deadline) { grpc_pollset_worker worker; if (worker_hdl) *worker_hdl = &worker; @@ -124,13 +122,13 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_cv_init(&worker.cv); if (!pollset->kicked_without_pollers && !pollset->shutting_down) { if (g_active_poller == NULL) { - grpc_pollset_worker *next_worker; + grpc_pollset_worker* next_worker; /* become poller */ pollset->is_iocp_worker = 1; g_active_poller = &worker; gpr_mu_unlock(&grpc_polling_mu); - grpc_iocp_work(exec_ctx, deadline); - grpc_exec_ctx_flush(exec_ctx); + grpc_iocp_work(deadline); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&grpc_polling_mu); pollset->is_iocp_worker = 0; g_active_poller = NULL; @@ -148,7 +146,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } if (pollset->shutting_down && pollset->on_shutdown != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(pollset->on_shutdown, GRPC_ERROR_NONE); pollset->on_shutdown = NULL; } goto done; @@ -159,17 +157,20 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, &worker); added_worker = 1; while (!worker.kicked) { - if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, deadline)) { + if (gpr_cv_wait(&worker.cv, &grpc_polling_mu, + grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { + grpc_core::ExecCtx::Get()->InvalidateNow(); break; } + grpc_core::ExecCtx::Get()->InvalidateNow(); } } else { pollset->kicked_without_pollers = 0; } done: - if (!grpc_closure_list_empty(exec_ctx->closure_list)) { + if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) { gpr_mu_unlock(&grpc_polling_mu); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&grpc_polling_mu); } if (added_worker) { @@ -181,8 +182,8 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, return GRPC_ERROR_NONE; } -grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, - grpc_pollset_worker *specific_worker) { +static grpc_error* pollset_kick(grpc_pollset* p, + grpc_pollset_worker* specific_worker) { if (specific_worker != NULL) { if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) { for (specific_worker = @@ -209,7 +210,7 @@ grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, specific_worker = pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET); if (specific_worker != NULL) { - grpc_pollset_kick(exec_ctx, p, specific_worker); + grpc_pollset_kick(p, specific_worker); } else if (p->is_iocp_worker) { grpc_iocp_kick(); } else { @@ -219,4 +220,10 @@ grpc_error *grpc_pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *p, return GRPC_ERROR_NONE; } +grpc_pollset_vtable grpc_windows_pollset_vtable = { + pollset_global_init, pollset_global_shutdown, + pollset_init, pollset_shutdown, + pollset_destroy, pollset_work, + pollset_kick, pollset_size}; + #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.h b/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.h index 71878c3d3..e89758c69 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/pollset_windows.h @@ -19,8 +19,13 @@ #ifndef GRPC_CORE_LIB_IOMGR_POLLSET_WINDOWS_H #define GRPC_CORE_LIB_IOMGR_POLLSET_WINDOWS_H +#include + #include +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/iomgr/socket_windows.h" /* There isn't really any such thing as a pollset under Windows, due to the @@ -35,8 +40,8 @@ typedef enum { } grpc_pollset_worker_link_type; typedef struct grpc_pollset_worker_link { - struct grpc_pollset_worker *next; - struct grpc_pollset_worker *prev; + struct grpc_pollset_worker* next; + struct grpc_pollset_worker* prev; } grpc_pollset_worker_link; struct grpc_pollset; @@ -45,7 +50,7 @@ typedef struct grpc_pollset grpc_pollset; typedef struct grpc_pollset_worker { gpr_cv cv; int kicked; - struct grpc_pollset *pollset; + struct grpc_pollset* pollset; grpc_pollset_worker_link links[GRPC_POLLSET_WORKER_LINK_TYPES]; } grpc_pollset_worker; @@ -54,10 +59,12 @@ struct grpc_pollset { int kicked_without_pollers; int is_iocp_worker; grpc_pollset_worker root_worker; - grpc_closure *on_shutdown; + grpc_closure* on_shutdown; }; void grpc_pollset_global_init(void); void grpc_pollset_global_shutdown(void); +#endif + #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/port.h b/Sources/CgRPC/src/core/lib/iomgr/port.h index 1970d106d..a39701200 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/port.h +++ b/Sources/CgRPC/src/core/lib/iomgr/port.h @@ -21,8 +21,13 @@ #ifndef GRPC_CORE_LIB_IOMGR_PORT_H #define GRPC_CORE_LIB_IOMGR_PORT_H -#if defined(GRPC_UV) -// Do nothing +#ifdef GRPC_UV +#ifndef GRPC_CUSTOM_SOCKET +#define GRPC_CUSTOM_SOCKET +#endif +#endif +#if defined(GRPC_CUSTOM_SOCKET) +// Do Nothing #elif defined(GPR_MANYLINUX1) #define GRPC_HAVE_ARPA_NAMESER 1 #define GRPC_HAVE_IFADDRS 1 @@ -33,12 +38,10 @@ #define GRPC_POSIX_FORK 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 +#define GRPC_LINUX_EPOLL 1 #elif defined(GPR_WINDOWS) -#define GRPC_TIMER_USE_GENERIC 1 #define GRPC_WINSOCK_SOCKET 1 #define GRPC_WINDOWS_SOCKETUTILS 1 #elif defined(GPR_ANDROID) @@ -48,10 +51,8 @@ #define GRPC_HAVE_UNIX_SOCKET 1 #define GRPC_LINUX_EVENTFD 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 #elif defined(GPR_LINUX) #define GRPC_HAVE_ARPA_NAMESER 1 #define GRPC_HAVE_IFADDRS 1 @@ -63,12 +64,13 @@ #define GRPC_POSIX_FORK 1 #define GRPC_POSIX_HOST_NAME_MAX 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 #ifdef __GLIBC_PREREQ -#if __GLIBC_PREREQ(2, 9) +#if __GLIBC_PREREQ(2, 4) #define GRPC_LINUX_EPOLL 1 +#endif +#if __GLIBC_PREREQ(2, 9) +#define GRPC_LINUX_EPOLL_CREATE1 1 #define GRPC_LINUX_EVENTFD 1 #endif #if __GLIBC_PREREQ(2, 10) @@ -77,6 +79,7 @@ #endif #ifndef __GLIBC__ #define GRPC_LINUX_EPOLL 1 +#define GRPC_LINUX_EPOLL_CREATE1 1 #define GRPC_LINUX_EVENTFD 1 #define GRPC_MSG_IOVLEN_TYPE int #endif @@ -95,11 +98,9 @@ #define GRPC_POSIX_FORK 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_SYSCONF 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 #elif defined(GPR_FREEBSD) #define GRPC_HAVE_ARPA_NAMESER 1 #define GRPC_HAVE_IFADDRS 1 @@ -109,26 +110,31 @@ #define GRPC_POSIX_FORK 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 +#elif defined(GPR_OPENBSD) +#define GRPC_HAVE_IFADDRS 1 +#define GRPC_HAVE_IPV6_RECVPKTINFO 1 +#define GRPC_HAVE_UNIX_SOCKET 1 +#define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 +#define GRPC_POSIX_SOCKET 1 +#define GRPC_POSIX_SOCKETUTILS 1 +#define GRPC_POSIX_WAKEUP_FD 1 #elif defined(GPR_NACL) #define GRPC_HAVE_ARPA_NAMESER 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 #define GRPC_POSIX_SOCKET 1 -#define GRPC_POSIX_SOCKETADDR 1 #define GRPC_POSIX_SOCKETUTILS 1 #define GRPC_POSIX_WAKEUP_FD 1 -#define GRPC_TIMER_USE_GENERIC 1 #elif !defined(GPR_NO_AUTODETECT_PLATFORM) #error "Platform not recognized" #endif #if defined(GRPC_POSIX_SOCKET) + defined(GRPC_WINSOCK_SOCKET) + \ - defined(GRPC_CUSTOM_SOCKET) + defined(GRPC_UV) != \ + defined(GRPC_CUSTOM_SOCKET) != \ 1 -#error Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GPR_CUSTOM_SOCKET +#error \ + "Must define exactly one of GRPC_POSIX_SOCKET, GRPC_WINSOCK_SOCKET, GRPC_CUSTOM_SOCKET" #endif #if defined(GRPC_POSIX_HOST_NAME_MAX) && defined(GRPC_POSIX_SYSCONF) diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address.cc b/Sources/CgRPC/src/core/lib/iomgr/resolve_address.cc new file mode 100644 index 000000000..f2a467636 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address.cc @@ -0,0 +1,50 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include "src/core/lib/iomgr/resolve_address.h" + +grpc_address_resolver_vtable* grpc_resolve_address_impl; + +void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable) { + grpc_resolve_address_impl = vtable; +} + +void grpc_resolve_address(const char* addr, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addresses) { + grpc_resolve_address_impl->resolve_address( + addr, default_port, interested_parties, on_done, addresses); +} + +void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addrs) { + if (addrs != nullptr) { + gpr_free(addrs->addrs); + } + gpr_free(addrs); +} + +grpc_error* grpc_blocking_resolve_address(const char* name, + const char* default_port, + grpc_resolved_addresses** addresses) { + return grpc_resolve_address_impl->blocking_resolve_address(name, default_port, + addresses); +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address.h b/Sources/CgRPC/src/core/lib/iomgr/resolve_address.h index fe1dd7857..fe0d83458 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/resolve_address.h +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address.h @@ -19,37 +19,65 @@ #ifndef GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H #define GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H +#include + #include -#include "src/core/lib/iomgr/exec_ctx.h" + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_UV +#include +#endif + +#ifdef GRPC_WINSOCK_SOCKET +#include +#endif + +#ifdef GRPC_POSIX_SOCKET +#include +#endif + #include "src/core/lib/iomgr/pollset_set.h" #define GRPC_MAX_SOCKADDR_SIZE 128 typedef struct { char addr[GRPC_MAX_SOCKADDR_SIZE]; - size_t len; + socklen_t len; } grpc_resolved_address; typedef struct { size_t naddrs; - grpc_resolved_address *addrs; + grpc_resolved_address* addrs; } grpc_resolved_addresses; +typedef struct grpc_address_resolver_vtable { + void (*resolve_address)(const char* addr, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addresses); + grpc_error* (*blocking_resolve_address)(const char* name, + const char* default_port, + grpc_resolved_addresses** addresses); +} grpc_address_resolver_vtable; + +void grpc_set_resolver_impl(grpc_address_resolver_vtable* vtable); + /* Asynchronously resolve addr. Use default_port if a port isn't designated in addr, otherwise use the port in addr. */ /* TODO(ctiller): add a timeout here */ -extern void (*grpc_resolve_address)(grpc_exec_ctx *exec_ctx, const char *addr, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addresses); +void grpc_resolve_address(const char* addr, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addresses); + /* Destroy resolved addresses */ -void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addresses); +void grpc_resolved_addresses_destroy(grpc_resolved_addresses* addresses); -/* Resolve addr in a blocking fashion. Returns NULL on failure. On success, +/* Resolve addr in a blocking fashion. On success, result must be freed with grpc_resolved_addresses_destroy. */ -extern grpc_error *(*grpc_blocking_resolve_address)( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses); +grpc_error* grpc_blocking_resolve_address(const char* name, + const char* default_port, + grpc_resolved_addresses** addresses); #endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.cc new file mode 100644 index 000000000..9cf7817f6 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.cc @@ -0,0 +1,187 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include +#include + +#include +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" + +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/resolve_address_custom.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" + +#include + +typedef struct grpc_custom_resolver { + grpc_closure* on_done; + grpc_resolved_addresses** addresses; + char* host; + char* port; +} grpc_custom_resolver; + +static grpc_custom_resolver_vtable* resolve_address_vtable = nullptr; + +static int retry_named_port_failure(grpc_custom_resolver* r, + grpc_resolved_addresses** res) { + // This loop is copied from resolve_address_posix.c + const char* svc[][2] = {{"http", "80"}, {"https", "443"}}; + for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) { + if (strcmp(r->port, svc[i][0]) == 0) { + gpr_free(r->port); + r->port = gpr_strdup(svc[i][1]); + if (res) { + grpc_error* error = + resolve_address_vtable->resolve(r->host, r->port, res); + if (error != GRPC_ERROR_NONE) { + GRPC_ERROR_UNREF(error); + return 0; + } + } else { + resolve_address_vtable->resolve_async(r, r->host, r->port); + } + return 1; + } + } + return 0; +} + +void grpc_custom_resolve_callback(grpc_custom_resolver* r, + grpc_resolved_addresses* result, + grpc_error* error) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + grpc_core::ExecCtx exec_ctx; + if (error == GRPC_ERROR_NONE) { + *r->addresses = result; + } else if (retry_named_port_failure(r, nullptr)) { + return; + } + if (r->on_done) { + GRPC_CLOSURE_SCHED(r->on_done, error); + } + gpr_free(r->host); + gpr_free(r->port); + gpr_free(r); +} + +static grpc_error* try_split_host_port(const char* name, + const char* default_port, char** host, + char** port) { + /* parse name, splitting it into host and port parts */ + grpc_error* error; + gpr_split_host_port(name, host, port); + if (*host == nullptr) { + char* msg; + gpr_asprintf(&msg, "unparseable host:port: '%s'", name); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + return error; + } + if (*port == nullptr) { + // TODO(murgatroid99): add tests for this case + if (default_port == nullptr) { + char* msg; + gpr_asprintf(&msg, "no port in name '%s'", name); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + return error; + } + *port = gpr_strdup(default_port); + } + return GRPC_ERROR_NONE; +} + +static grpc_error* blocking_resolve_address_impl( + const char* name, const char* default_port, + grpc_resolved_addresses** addresses) { + char* host; + char* port; + grpc_error* err; + + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + + err = try_split_host_port(name, default_port, &host, &port); + if (err != GRPC_ERROR_NONE) { + gpr_free(host); + gpr_free(port); + return err; + } + + /* Call getaddrinfo */ + grpc_custom_resolver resolver; + resolver.host = host; + resolver.port = port; + + grpc_resolved_addresses* addrs; + grpc_core::ExecCtx* curr = grpc_core::ExecCtx::Get(); + grpc_core::ExecCtx::Set(nullptr); + err = resolve_address_vtable->resolve(host, port, &addrs); + if (err != GRPC_ERROR_NONE) { + if (retry_named_port_failure(&resolver, &addrs)) { + GRPC_ERROR_UNREF(err); + err = GRPC_ERROR_NONE; + } + } + grpc_core::ExecCtx::Set(curr); + if (err == GRPC_ERROR_NONE) { + *addresses = addrs; + } + gpr_free(resolver.host); + gpr_free(resolver.port); + return err; +} + +static void resolve_address_impl(const char* name, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addrs) { + grpc_custom_resolver* r = nullptr; + char* host = nullptr; + char* port = nullptr; + grpc_error* err; + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + err = try_split_host_port(name, default_port, &host, &port); + if (err != GRPC_ERROR_NONE) { + GRPC_CLOSURE_SCHED(on_done, err); + gpr_free(host); + gpr_free(port); + return; + } + r = (grpc_custom_resolver*)gpr_malloc(sizeof(grpc_custom_resolver)); + r->on_done = on_done; + r->addresses = addrs; + r->host = host; + r->port = port; + + /* Call getaddrinfo */ + resolve_address_vtable->resolve_async(r, r->host, r->port); +} + +static grpc_address_resolver_vtable custom_resolver_vtable = { + resolve_address_impl, blocking_resolve_address_impl}; + +void grpc_custom_resolver_init(grpc_custom_resolver_vtable* impl) { + resolve_address_vtable = impl; + grpc_set_resolver_impl(&custom_resolver_vtable); +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.h b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.h new file mode 100644 index 000000000..e0c671408 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_custom.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H + +#include + +#include "src/core/lib/iomgr/port.h" + +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/sockaddr.h" + +typedef struct grpc_custom_resolver grpc_custom_resolver; + +typedef struct grpc_custom_resolver_vtable { + grpc_error* (*resolve)(char* host, char* port, grpc_resolved_addresses** res); + void (*resolve_async)(grpc_custom_resolver* resolver, char* host, char* port); +} grpc_custom_resolver_vtable; + +void grpc_custom_resolve_callback(grpc_custom_resolver* resolver, + grpc_resolved_addresses* result, + grpc_error* error); + +/* Internal APIs */ +void grpc_custom_resolver_init(grpc_custom_resolver_vtable* impl); + +#endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.c b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.cc similarity index 63% rename from Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.cc index 60cfeebd4..a82075542 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -27,28 +29,30 @@ #include #include -#include #include #include -#include #include -#include + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/string.h" -static grpc_error *blocking_resolve_address_impl( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) { +static grpc_error* posix_blocking_resolve_address( + const char* name, const char* default_port, + grpc_resolved_addresses** addresses) { + grpc_core::ExecCtx exec_ctx; struct addrinfo hints; - struct addrinfo *result = NULL, *resp; - char *host; - char *port; + struct addrinfo *result = nullptr, *resp; + char* host; + char* port; int s; size_t i; - grpc_error *err; + grpc_error* err; if (name[0] == 'u' && name[1] == 'n' && name[2] == 'i' && name[3] == 'x' && name[4] == ':' && name[5] != 0) { @@ -57,14 +61,14 @@ static grpc_error *blocking_resolve_address_impl( /* parse name, splitting it into host and port parts */ gpr_split_host_port(name, &host, &port); - if (host == NULL) { + if (host == nullptr) { err = grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("unparseable host:port"), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name)); goto done; } - if (port == NULL) { - if (default_port == NULL) { + if (port == nullptr) { + if (default_port == nullptr) { err = grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("no port in name"), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(name)); @@ -85,7 +89,7 @@ static grpc_error *blocking_resolve_address_impl( if (s != 0) { /* Retry if well-known service name is recognized */ - const char *svc[][2] = {{"http", "80"}, {"https", "443"}}; + const char* svc[][2] = {{"http", "80"}, {"https", "443"}}; for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) { if (strcmp(port, svc[i][0]) == 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; @@ -112,16 +116,16 @@ static grpc_error *blocking_resolve_address_impl( } /* Success path: set addrs non-NULL, fill it in */ - *addresses = - (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses)); + *addresses = static_cast( + gpr_malloc(sizeof(grpc_resolved_addresses))); (*addresses)->naddrs = 0; - for (resp = result; resp != NULL; resp = resp->ai_next) { + for (resp = result; resp != nullptr; resp = resp->ai_next) { (*addresses)->naddrs++; } - (*addresses)->addrs = (grpc_resolved_address *)gpr_malloc( - sizeof(grpc_resolved_address) * (*addresses)->naddrs); + (*addresses)->addrs = static_cast( + gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs)); i = 0; - for (resp = result; resp != NULL; resp = resp->ai_next) { + for (resp = result; resp != nullptr; resp = resp->ai_next) { memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen); (*addresses)->addrs[i].len = resp->ai_addrlen; i++; @@ -137,57 +141,40 @@ static grpc_error *blocking_resolve_address_impl( return err; } -grpc_error *(*grpc_blocking_resolve_address)( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) = blocking_resolve_address_impl; - typedef struct { - char *name; - char *default_port; - grpc_closure *on_done; - grpc_resolved_addresses **addrs_out; + char* name; + char* default_port; + grpc_closure* on_done; + grpc_resolved_addresses** addrs_out; grpc_closure request_closure; - void *arg; + void* arg; } request; /* Callback to be passed to grpc_executor to asynch-ify * grpc_blocking_resolve_address */ -static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, - grpc_error *error) { - request *r = (request *)rp; - GRPC_CLOSURE_SCHED( - exec_ctx, r->on_done, - grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out)); +static void do_request_thread(void* rp, grpc_error* error) { + request* r = static_cast(rp); + GRPC_CLOSURE_SCHED(r->on_done, grpc_blocking_resolve_address( + r->name, r->default_port, r->addrs_out)); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); } -void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { - if (addrs != NULL) { - gpr_free(addrs->addrs); - } - gpr_free(addrs); -} - -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addrs) { - request *r = (request *)gpr_malloc(sizeof(request)); +static void posix_resolve_address(const char* name, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addrs) { + request* r = static_cast(gpr_malloc(sizeof(request))); GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r, grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addrs_out = addrs; - GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, - grpc_pollset_set *interested_parties, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = resolve_address_impl; - +grpc_address_resolver_vtable grpc_posix_resolver_vtable = { + posix_resolve_address, posix_blocking_resolve_address}; #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_uv.c b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_uv.c deleted file mode 100644 index 2d438e8b4..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_uv.c +++ /dev/null @@ -1,280 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" -#ifdef GRPC_UV - -#include - -#include -#include -#include -#include -#include - -#include "src/core/lib/iomgr/closure.h" -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" - -#include - -typedef struct request { - grpc_closure *on_done; - grpc_resolved_addresses **addresses; - struct addrinfo *hints; - char *host; - char *port; -} request; - -static int retry_named_port_failure(int status, request *r, - uv_getaddrinfo_cb getaddrinfo_cb) { - if (status != 0) { - // This loop is copied from resolve_address_posix.c - char *svc[][2] = {{"http", "80"}, {"https", "443"}}; - for (size_t i = 0; i < GPR_ARRAY_SIZE(svc); i++) { - if (strcmp(r->port, svc[i][0]) == 0) { - int retry_status; - uv_getaddrinfo_t *req = gpr_malloc(sizeof(uv_getaddrinfo_t)); - req->data = r; - r->port = gpr_strdup(svc[i][1]); - retry_status = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_cb, - r->host, r->port, r->hints); - if (retry_status < 0 || getaddrinfo_cb == NULL) { - // The callback will not be called - gpr_free(req); - } - return retry_status; - } - } - } - /* If this function calls uv_getaddrinfo, it will return that function's - return value. That function only returns numbers <=0, so we can safely - return 1 to indicate that we never retried */ - return 1; -} - -static grpc_error *handle_addrinfo_result(int status, struct addrinfo *result, - grpc_resolved_addresses **addresses) { - struct addrinfo *resp; - size_t i; - if (status != 0) { - grpc_error *error; - *addresses = NULL; - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - return error; - } - (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses)); - (*addresses)->naddrs = 0; - for (resp = result; resp != NULL; resp = resp->ai_next) { - (*addresses)->naddrs++; - } - (*addresses)->addrs = - gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs); - i = 0; - for (resp = result; resp != NULL; resp = resp->ai_next) { - memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen); - (*addresses)->addrs[i].len = resp->ai_addrlen; - i++; - } - - { - for (i = 0; i < (*addresses)->naddrs; i++) { - char *buf; - grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0); - gpr_free(buf); - } - } - return GRPC_ERROR_NONE; -} - -static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, - struct addrinfo *res) { - request *r = (request *)req->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_error *error; - int retry_status; - char *port = r->port; - - gpr_free(req); - retry_status = retry_named_port_failure(status, r, getaddrinfo_callback); - if (retry_status == 0) { - /* The request is being retried. It is using its own port string, so we free - * the original one */ - gpr_free(port); - return; - } - /* Either no retry was attempted, or the retry failed. Either way, the - original error probably has more interesting information */ - error = handle_addrinfo_result(status, res, r->addresses); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error); - grpc_exec_ctx_finish(&exec_ctx); - gpr_free(r->hints); - gpr_free(r->host); - gpr_free(r->port); - gpr_free(r); - uv_freeaddrinfo(res); -} - -static grpc_error *try_split_host_port(const char *name, - const char *default_port, char **host, - char **port) { - /* parse name, splitting it into host and port parts */ - grpc_error *error; - gpr_split_host_port(name, host, port); - if (*host == NULL) { - char *msg; - gpr_asprintf(&msg, "unparseable host:port: '%s'", name); - error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - return error; - } - if (*port == NULL) { - // TODO(murgatroid99): add tests for this case - if (default_port == NULL) { - char *msg; - gpr_asprintf(&msg, "no port in name '%s'", name); - error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - return error; - } - *port = gpr_strdup(default_port); - } - return GRPC_ERROR_NONE; -} - -static grpc_error *blocking_resolve_address_impl( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) { - char *host; - char *port; - struct addrinfo hints; - uv_getaddrinfo_t req; - int s; - grpc_error *err; - int retry_status; - - GRPC_UV_ASSERT_SAME_THREAD(); - - req.addrinfo = NULL; - - err = try_split_host_port(name, default_port, &host, &port); - if (err != GRPC_ERROR_NONE) { - goto done; - } - - /* Call getaddrinfo */ - memset(&hints, 0, sizeof(hints)); - hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */ - hints.ai_socktype = SOCK_STREAM; /* stream socket */ - hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */ - - s = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints); - request r = { - .addresses = addresses, .hints = &hints, .host = host, .port = port}; - retry_status = retry_named_port_failure(s, &r, NULL); - if (retry_status <= 0) { - s = retry_status; - } - err = handle_addrinfo_result(s, req.addrinfo, addresses); - -done: - gpr_free(host); - gpr_free(port); - if (req.addrinfo) { - uv_freeaddrinfo(req.addrinfo); - } - return err; -} - -grpc_error *(*grpc_blocking_resolve_address)( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) = blocking_resolve_address_impl; - -void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { - if (addrs != NULL) { - gpr_free(addrs->addrs); - } - gpr_free(addrs); -} - -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addrs) { - uv_getaddrinfo_t *req = NULL; - request *r = NULL; - struct addrinfo *hints = NULL; - char *host = NULL; - char *port = NULL; - grpc_error *err; - int s; - GRPC_UV_ASSERT_SAME_THREAD(); - err = try_split_host_port(name, default_port, &host, &port); - if (err != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); - gpr_free(host); - gpr_free(port); - return; - } - r = gpr_malloc(sizeof(request)); - r->on_done = on_done; - r->addresses = addrs; - r->host = host; - r->port = port; - req = gpr_malloc(sizeof(uv_getaddrinfo_t)); - req->data = r; - - /* Call getaddrinfo */ - hints = gpr_malloc(sizeof(struct addrinfo)); - memset(hints, 0, sizeof(struct addrinfo)); - hints->ai_family = AF_UNSPEC; /* ipv4 or ipv6 */ - hints->ai_socktype = SOCK_STREAM; /* stream socket */ - hints->ai_flags = AI_PASSIVE; /* for wildcard IP address */ - r->hints = hints; - - s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port, - hints); - - if (s != 0) { - *addrs = NULL; - err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed"); - err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(s))); - GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); - gpr_free(r); - gpr_free(req); - gpr_free(hints); - gpr_free(host); - gpr_free(port); - } -} - -void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, - grpc_pollset_set *interested_parties, grpc_closure *on_done, - grpc_resolved_addresses **addrs) = resolve_address_impl; - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.c b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.cc similarity index 65% rename from Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.cc index 0cb0029f4..71c92615a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/resolve_address_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET @@ -23,45 +25,48 @@ #include "src/core/lib/iomgr/resolve_address.h" +#include #include #include #include -#include #include #include #include -#include #include + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/thd.h" +#include "src/core/lib/iomgr/block_annotate.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/support/block_annotate.h" -#include "src/core/lib/support/string.h" typedef struct { - char *name; - char *default_port; + char* name; + char* default_port; grpc_closure request_closure; - grpc_closure *on_done; - grpc_resolved_addresses **addresses; + grpc_closure* on_done; + grpc_resolved_addresses** addresses; } request; -static grpc_error *blocking_resolve_address_impl( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) { +static grpc_error* windows_blocking_resolve_address( + const char* name, const char* default_port, + grpc_resolved_addresses** addresses) { + grpc_core::ExecCtx exec_ctx; struct addrinfo hints; struct addrinfo *result = NULL, *resp; - char *host; - char *port; + char* host; + char* port; int s; size_t i; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; /* parse name, splitting it into host and port parts */ gpr_split_host_port(name, &host, &port); if (host == NULL) { - char *msg; + char* msg; gpr_asprintf(&msg, "unparseable host:port: '%s'", name); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); @@ -69,7 +74,7 @@ static grpc_error *blocking_resolve_address_impl( } if (port == NULL) { if (default_port == NULL) { - char *msg; + char* msg; gpr_asprintf(&msg, "no port in name '%s'", name); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); gpr_free(msg); @@ -93,13 +98,14 @@ static grpc_error *blocking_resolve_address_impl( } /* Success path: set addrs non-NULL, fill it in */ - (*addresses) = gpr_malloc(sizeof(grpc_resolved_addresses)); + (*addresses) = + (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses)); (*addresses)->naddrs = 0; for (resp = result; resp != NULL; resp = resp->ai_next) { (*addresses)->naddrs++; } - (*addresses)->addrs = - gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs); + (*addresses)->addrs = (grpc_resolved_address*)gpr_malloc( + sizeof(grpc_resolved_address) * (*addresses)->naddrs); i = 0; for (resp = result; resp != NULL; resp = resp->ai_next) { memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen); @@ -109,7 +115,7 @@ static grpc_error *blocking_resolve_address_impl( { for (i = 0; i < (*addresses)->naddrs; i++) { - char *buf; + char* buf; grpc_sockaddr_to_string(&buf, &(*addresses)->addrs[i], 0); gpr_free(buf); } @@ -124,52 +130,36 @@ static grpc_error *blocking_resolve_address_impl( return error; } -grpc_error *(*grpc_blocking_resolve_address)( - const char *name, const char *default_port, - grpc_resolved_addresses **addresses) = blocking_resolve_address_impl; - /* Callback to be passed to grpc_executor to asynch-ify * grpc_blocking_resolve_address */ -static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp, - grpc_error *error) { - request *r = rp; +static void do_request_thread(void* rp, grpc_error* error) { + request* r = (request*)rp; if (error == GRPC_ERROR_NONE) { error = grpc_blocking_resolve_address(r->name, r->default_port, r->addresses); } else { GRPC_ERROR_REF(error); } - GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error); + GRPC_CLOSURE_SCHED(r->on_done, error); gpr_free(r->name); gpr_free(r->default_port); gpr_free(r); } -void grpc_resolved_addresses_destroy(grpc_resolved_addresses *addrs) { - if (addrs != NULL) { - gpr_free(addrs->addrs); - } - gpr_free(addrs); -} - -static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, - const char *default_port, - grpc_pollset_set *interested_parties, - grpc_closure *on_done, - grpc_resolved_addresses **addresses) { - request *r = gpr_malloc(sizeof(request)); +static void windows_resolve_address(const char* name, const char* default_port, + grpc_pollset_set* interested_parties, + grpc_closure* on_done, + grpc_resolved_addresses** addresses) { + request* r = (request*)gpr_malloc(sizeof(request)); GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r, grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); r->name = gpr_strdup(name); r->default_port = gpr_strdup(default_port); r->on_done = on_done; r->addresses = addresses; - GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE); } -void (*grpc_resolve_address)( - grpc_exec_ctx *exec_ctx, const char *name, const char *default_port, - grpc_pollset_set *interested_parties, grpc_closure *on_done, - grpc_resolved_addresses **addresses) = resolve_address_impl; - +grpc_address_resolver_vtable grpc_windows_resolver_vtable = { + windows_resolve_address, windows_blocking_resolve_address}; #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/resource_quota.c b/Sources/CgRPC/src/core/lib/iomgr/resource_quota.cc similarity index 60% rename from Sources/CgRPC/src/core/lib/iomgr/resource_quota.c rename to Sources/CgRPC/src/core/lib/iomgr/resource_quota.cc index 4d69986fb..8cf4fe992 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/resource_quota.c +++ b/Sources/CgRPC/src/core/lib/iomgr/resource_quota.cc @@ -16,8 +16,11 @@ * */ +#include + #include "src/core/lib/iomgr/resource_quota.h" +#include #include #include #include @@ -26,19 +29,18 @@ #include #include #include -#include +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/combiner.h" -grpc_tracer_flag grpc_resource_quota_trace = - GRPC_TRACER_INITIALIZER(false, "resource_quota"); +grpc_core::TraceFlag grpc_resource_quota_trace(false, "resource_quota"); #define MEMORY_USAGE_ESTIMATION_MAX 65536 /* Internal linked list pointers for a resource user */ typedef struct { - grpc_resource_user *next; - grpc_resource_user *prev; + grpc_resource_user* next; + grpc_resource_user* prev; } grpc_resource_user_link; /* Resource users are kept in (potentially) several intrusive linked lists @@ -59,7 +61,7 @@ typedef enum { struct grpc_resource_user { /* The quota this resource user consumes from */ - grpc_resource_quota *resource_quota; + grpc_resource_quota* resource_quota; /* Closure to schedule an allocation under the resource quota combiner lock */ grpc_closure allocate_closure; @@ -88,16 +90,18 @@ struct grpc_resource_user { grpc_closure_list on_allocated; /* True if we are currently trying to allocate from the quota, false if not */ bool allocating; + /* How many bytes of allocations are outstanding */ + int64_t outstanding_allocations; /* True if we are currently trying to add ourselves to the non-free quota list, false otherwise */ bool added_to_free_pool; /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer */ - grpc_closure *reclaimers[2]; + grpc_closure* reclaimers[2]; /* Reclaimers just posted: once we're in the combiner lock, we'll move them to the array above */ - grpc_closure *new_reclaimers[2]; + grpc_closure* new_reclaimers[2]; /* Trampoline closures to finish reclamation and re-enter the quota combiner lock */ grpc_closure post_reclaimer_closure[2]; @@ -110,7 +114,7 @@ struct grpc_resource_user { grpc_resource_user_link links[GRPC_RULIST_COUNT]; /* The name of this resource user, for debugging/tracing */ - char *name; + char* name; }; struct grpc_resource_quota { @@ -123,7 +127,7 @@ struct grpc_resource_quota { /* Master combiner lock: all activity on a quota executes under this combiner * (so no mutex is needed for this data structure) */ - grpc_combiner *combiner; + grpc_combiner* combiner; /* Size of the resource quota */ int64_t size; /* Amount of free memory in the resource quota */ @@ -143,24 +147,26 @@ struct grpc_resource_quota { /* This is only really usable for debugging: it's always a stale pointer, but a stale pointer that might just be fresh enough to guide us to where the reclamation system is stuck */ - grpc_closure *debug_only_last_initiated_reclaimer; - grpc_resource_user *debug_only_last_reclaimer_resource_user; + grpc_closure* debug_only_last_initiated_reclaimer; + grpc_resource_user* debug_only_last_reclaimer_resource_user; /* Roots of all resource user lists */ - grpc_resource_user *roots[GRPC_RULIST_COUNT]; + grpc_resource_user* roots[GRPC_RULIST_COUNT]; - char *name; + char* name; }; +static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount); + /******************************************************************************* * list management */ -static void rulist_add_head(grpc_resource_user *resource_user, +static void rulist_add_head(grpc_resource_user* resource_user, grpc_rulist list) { - grpc_resource_quota *resource_quota = resource_user->resource_quota; - grpc_resource_user **root = &resource_quota->roots[list]; - if (*root == NULL) { + grpc_resource_quota* resource_quota = resource_user->resource_quota; + grpc_resource_user** root = &resource_quota->roots[list]; + if (*root == nullptr) { *root = resource_user; resource_user->links[list].next = resource_user->links[list].prev = resource_user; @@ -173,11 +179,11 @@ static void rulist_add_head(grpc_resource_user *resource_user, } } -static void rulist_add_tail(grpc_resource_user *resource_user, +static void rulist_add_tail(grpc_resource_user* resource_user, grpc_rulist list) { - grpc_resource_quota *resource_quota = resource_user->resource_quota; - grpc_resource_user **root = &resource_quota->roots[list]; - if (*root == NULL) { + grpc_resource_quota* resource_quota = resource_user->resource_quota; + grpc_resource_user** root = &resource_quota->roots[list]; + if (*root == nullptr) { *root = resource_user; resource_user->links[list].next = resource_user->links[list].prev = resource_user; @@ -189,20 +195,20 @@ static void rulist_add_tail(grpc_resource_user *resource_user, } } -static bool rulist_empty(grpc_resource_quota *resource_quota, +static bool rulist_empty(grpc_resource_quota* resource_quota, grpc_rulist list) { - return resource_quota->roots[list] == NULL; + return resource_quota->roots[list] == nullptr; } -static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota, +static grpc_resource_user* rulist_pop_head(grpc_resource_quota* resource_quota, grpc_rulist list) { - grpc_resource_user **root = &resource_quota->roots[list]; - grpc_resource_user *resource_user = *root; - if (resource_user == NULL) { - return NULL; + grpc_resource_user** root = &resource_quota->roots[list]; + grpc_resource_user* resource_user = *root; + if (resource_user == nullptr) { + return nullptr; } if (resource_user->links[list].next == resource_user) { - *root = NULL; + *root = nullptr; } else { resource_user->links[list].next->links[list].prev = resource_user->links[list].prev; @@ -210,70 +216,65 @@ static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota, resource_user->links[list].next; *root = resource_user->links[list].next; } - resource_user->links[list].next = resource_user->links[list].prev = NULL; + resource_user->links[list].next = resource_user->links[list].prev = nullptr; return resource_user; } -static void rulist_remove(grpc_resource_user *resource_user, grpc_rulist list) { - if (resource_user->links[list].next == NULL) return; - grpc_resource_quota *resource_quota = resource_user->resource_quota; +static void rulist_remove(grpc_resource_user* resource_user, grpc_rulist list) { + if (resource_user->links[list].next == nullptr) return; + grpc_resource_quota* resource_quota = resource_user->resource_quota; if (resource_quota->roots[list] == resource_user) { resource_quota->roots[list] = resource_user->links[list].next; if (resource_quota->roots[list] == resource_user) { - resource_quota->roots[list] = NULL; + resource_quota->roots[list] = nullptr; } } resource_user->links[list].next->links[list].prev = resource_user->links[list].prev; resource_user->links[list].prev->links[list].next = resource_user->links[list].next; - resource_user->links[list].next = resource_user->links[list].prev = NULL; + resource_user->links[list].next = resource_user->links[list].prev = nullptr; } /******************************************************************************* * resource quota state machine */ -static bool rq_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota); +static bool rq_alloc(grpc_resource_quota* resource_quota); static bool rq_reclaim_from_per_user_free_pool( - grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota); -static bool rq_reclaim(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota, bool destructive); + grpc_resource_quota* resource_quota); +static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive); -static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) { - grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq; +static void rq_step(void* rq, grpc_error* error) { + grpc_resource_quota* resource_quota = static_cast(rq); resource_quota->step_scheduled = false; do { - if (rq_alloc(exec_ctx, resource_quota)) goto done; - } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota)); + if (rq_alloc(resource_quota)) goto done; + } while (rq_reclaim_from_per_user_free_pool(resource_quota)); - if (!rq_reclaim(exec_ctx, resource_quota, false)) { - rq_reclaim(exec_ctx, resource_quota, true); + if (!rq_reclaim(resource_quota, false)) { + rq_reclaim(resource_quota, true); } done: - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } -static void rq_step_sched(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { +static void rq_step_sched(grpc_resource_quota* resource_quota) { if (resource_quota->step_scheduled) return; resource_quota->step_scheduled = true; grpc_resource_quota_ref_internal(resource_quota); - GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_quota->rq_step_closure, GRPC_ERROR_NONE); } /* update the atomically available resource estimate - use no barriers since timeliness of delivery really doesn't matter much */ -static void rq_update_estimate(grpc_resource_quota *resource_quota) { +static void rq_update_estimate(grpc_resource_quota* resource_quota) { gpr_atm memory_usage_estimation = MEMORY_USAGE_ESTIMATION_MAX; if (resource_quota->size != 0) { memory_usage_estimation = - GPR_CLAMP((gpr_atm)((1.0 - - ((double)resource_quota->free_pool) / - ((double)resource_quota->size)) * + GPR_CLAMP((gpr_atm)((1.0 - ((double)resource_quota->free_pool) / + ((double)resource_quota->size)) * MEMORY_USAGE_ESTIMATION_MAX), 0, MEMORY_USAGE_ESTIMATION_MAX); } @@ -282,32 +283,53 @@ static void rq_update_estimate(grpc_resource_quota *resource_quota) { } /* returns true if all allocations are completed */ -static bool rq_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { - grpc_resource_user *resource_user; +static bool rq_alloc(grpc_resource_quota* resource_quota) { + grpc_resource_user* resource_user; while ((resource_user = rulist_pop_head(resource_quota, GRPC_RULIST_AWAITING_ALLOCATION))) { gpr_mu_lock(&resource_user->mu); + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, + "RQ: check allocation for user %p shutdown=%" PRIdPTR + " free_pool=%" PRId64, + resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown), + resource_user->free_pool); + } + if (gpr_atm_no_barrier_load(&resource_user->shutdown)) { + resource_user->allocating = false; + grpc_closure_list_fail_all( + &resource_user->on_allocated, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown")); + int64_t aborted_allocations = resource_user->outstanding_allocations; + resource_user->outstanding_allocations = 0; + resource_user->free_pool += aborted_allocations; + GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated); + gpr_mu_unlock(&resource_user->mu); + ru_unref_by(resource_user, static_cast(aborted_allocations)); + continue; + } if (resource_user->free_pool < 0 && -resource_user->free_pool <= resource_quota->free_pool) { int64_t amt = -resource_user->free_pool; resource_user->free_pool = 0; resource_quota->free_pool -= amt; rq_update_estimate(resource_quota); - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64 - " bytes; rq_free_pool -> %" PRId64, + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, + "RQ %s %s: grant alloc %" PRId64 + " bytes; rq_free_pool -> %" PRId64, resource_quota->name, resource_user->name, amt, resource_quota->free_pool); } - } else if (GRPC_TRACER_ON(grpc_resource_quota_trace) && + } else if (grpc_resource_quota_trace.enabled() && resource_user->free_pool >= 0) { - gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request", + gpr_log(GPR_INFO, "RQ %s %s: discard already satisfied alloc request", resource_quota->name, resource_user->name); } if (resource_user->free_pool >= 0) { resource_user->allocating = false; - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated); + resource_user->outstanding_allocations = 0; + GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated); gpr_mu_unlock(&resource_user->mu); } else { rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); @@ -320,8 +342,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx, /* returns true if any memory could be reclaimed from buffers */ static bool rq_reclaim_from_per_user_free_pool( - grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) { - grpc_resource_user *resource_user; + grpc_resource_quota* resource_quota) { + grpc_resource_user* resource_user; while ((resource_user = rulist_pop_head(resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL))) { gpr_mu_lock(&resource_user->mu); @@ -330,9 +352,10 @@ static bool rq_reclaim_from_per_user_free_pool( resource_user->free_pool = 0; resource_quota->free_pool += amt; rq_update_estimate(resource_quota); - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64 - " bytes; rq_free_pool -> %" PRId64, + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, + "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64 + " bytes; rq_free_pool -> %" PRId64, resource_quota->name, resource_user->name, amt, resource_quota->free_pool); } @@ -346,26 +369,24 @@ static bool rq_reclaim_from_per_user_free_pool( } /* returns true if reclamation is proceeding */ -static bool rq_reclaim(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota, bool destructive) { +static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) { if (resource_quota->reclaiming) return true; grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE : GRPC_RULIST_RECLAIMER_BENIGN; - grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list); - if (resource_user == NULL) return false; - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation", - resource_quota->name, resource_user->name, - destructive ? "destructive" : "benign"); + grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list); + if (resource_user == nullptr) return false; + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "RQ %s %s: initiate %s reclamation", resource_quota->name, + resource_user->name, destructive ? "destructive" : "benign"); } resource_quota->reclaiming = true; grpc_resource_quota_ref_internal(resource_quota); - grpc_closure *c = resource_user->reclaimers[destructive]; + grpc_closure* c = resource_user->reclaimers[destructive]; GPR_ASSERT(c); resource_quota->debug_only_last_reclaimer_resource_user = resource_user; resource_quota->debug_only_last_initiated_reclaimer = c; - resource_user->reclaimers[destructive] = NULL; - GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE); + resource_user->reclaimers[destructive] = nullptr; + GRPC_CLOSURE_RUN(c, GRPC_ERROR_NONE); return true; } @@ -376,19 +397,19 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx, typedef struct { grpc_slice_refcount base; gpr_refcount refs; - grpc_resource_user *resource_user; + grpc_resource_user* resource_user; size_t size; } ru_slice_refcount; -static void ru_slice_ref(void *p) { - ru_slice_refcount *rc = (ru_slice_refcount *)p; +static void ru_slice_ref(void* p) { + ru_slice_refcount* rc = static_cast(p); gpr_ref(&rc->refs); } -static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { - ru_slice_refcount *rc = (ru_slice_refcount *)p; +static void ru_slice_unref(void* p) { + ru_slice_refcount* rc = static_cast(p); if (gpr_unref(&rc->refs)) { - grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size); + grpc_resource_user_free(rc->resource_user, rc->size); gpr_free(rc); } } @@ -397,10 +418,10 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = { ru_slice_ref, ru_slice_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl}; -static grpc_slice ru_slice_create(grpc_resource_user *resource_user, +static grpc_slice ru_slice_create(grpc_resource_user* resource_user, size_t size) { - ru_slice_refcount *rc = - (ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size); + ru_slice_refcount* rc = static_cast( + gpr_malloc(sizeof(ru_slice_refcount) + size)); rc->base.vtable = &ru_slice_vtable; rc->base.sub_refcount = &rc->base; gpr_ref_init(&rc->refs, 1); @@ -408,7 +429,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user, rc->size = size; grpc_slice slice; slice.refcount = &rc->base; - slice.data.refcounted.bytes = (uint8_t *)(rc + 1); + slice.data.refcounted.bytes = reinterpret_cast(rc + 1); slice.data.refcounted.length = size; return slice; } @@ -418,61 +439,57 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user, * the combiner */ -static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; +static void ru_allocate(void* ru, grpc_error* error) { + grpc_resource_user* resource_user = static_cast(ru); if (rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); } -static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; +static void ru_add_to_free_pool(void* ru, grpc_error* error) { + grpc_resource_user* resource_user = static_cast(ru); if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL); } -static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +static bool ru_post_reclaimer(grpc_resource_user* resource_user, bool destructive) { - grpc_closure *closure = resource_user->new_reclaimers[destructive]; - GPR_ASSERT(closure != NULL); - resource_user->new_reclaimers[destructive] = NULL; - GPR_ASSERT(resource_user->reclaimers[destructive] == NULL); + grpc_closure* closure = resource_user->new_reclaimers[destructive]; + GPR_ASSERT(closure != nullptr); + resource_user->new_reclaimers[destructive] = nullptr; + GPR_ASSERT(resource_user->reclaimers[destructive] == nullptr); if (gpr_atm_acq_load(&resource_user->shutdown) > 0) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CANCELLED); return false; } resource_user->reclaimers[destructive] = closure; return true; } -static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; - if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return; +static void ru_post_benign_reclaimer(void* ru, grpc_error* error) { + grpc_resource_user* resource_user = static_cast(ru); + if (!ru_post_reclaimer(resource_user, false)) return; if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_NON_EMPTY_FREE_POOL) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_RECLAIMER_BENIGN)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); } -static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, - grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; - if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return; +static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) { + grpc_resource_user* resource_user = static_cast(ru); + if (!ru_post_reclaimer(resource_user, true)) return; if (!rulist_empty(resource_user->resource_quota, GRPC_RULIST_AWAITING_ALLOCATION) && rulist_empty(resource_user->resource_quota, @@ -481,47 +498,50 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, GRPC_RULIST_RECLAIMER_BENIGN) && rulist_empty(resource_user->resource_quota, GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) { - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); } -static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED); - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED); - resource_user->reclaimers[0] = NULL; - resource_user->reclaimers[1] = NULL; +static void ru_shutdown(void* ru, grpc_error* error) { + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "RU shutdown %p", ru); + } + grpc_resource_user* resource_user = static_cast(ru); + gpr_mu_lock(&resource_user->mu); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED); + resource_user->reclaimers[0] = nullptr; + resource_user->reclaimers[1] = nullptr; rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); + if (resource_user->allocating) { + rq_step_sched(resource_user->resource_quota); + } + gpr_mu_unlock(&resource_user->mu); } -static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { - grpc_resource_user *resource_user = (grpc_resource_user *)ru; +static void ru_destroy(void* ru, grpc_error* error) { + grpc_resource_user* resource_user = static_cast(ru); GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { - rulist_remove(resource_user, (grpc_rulist)i); + rulist_remove(resource_user, static_cast(i)); } - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0], - GRPC_ERROR_CANCELLED); - GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1], - GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED); if (resource_user->free_pool != 0) { resource_user->resource_quota->free_pool += resource_user->free_pool; - rq_step_sched(exec_ctx, resource_user->resource_quota); + rq_step_sched(resource_user->resource_quota); } - grpc_resource_quota_unref_internal(exec_ctx, resource_user->resource_quota); + grpc_resource_quota_unref_internal(resource_user->resource_quota); gpr_mu_destroy(&resource_user->mu); gpr_free(resource_user->name); gpr_free(resource_user); } -static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_resource_user_slice_allocator *slice_allocator = - (grpc_resource_user_slice_allocator *)arg; +static void ru_allocated_slices(void* arg, grpc_error* error) { + grpc_resource_user_slice_allocator* slice_allocator = + static_cast(arg); if (error == GRPC_ERROR_NONE) { for (size_t i = 0; i < slice_allocator->count; i++) { grpc_slice_buffer_add_indexed( @@ -529,7 +549,7 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, slice_allocator->length)); } } - GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_RUN(&slice_allocator->on_done, GRPC_ERROR_REF(error)); } /******************************************************************************* @@ -539,27 +559,26 @@ static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg, typedef struct { int64_t size; - grpc_resource_quota *resource_quota; + grpc_resource_quota* resource_quota; grpc_closure closure; } rq_resize_args; -static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { - rq_resize_args *a = (rq_resize_args *)args; +static void rq_resize(void* args, grpc_error* error) { + rq_resize_args* a = static_cast(args); int64_t delta = a->size - a->resource_quota->size; a->resource_quota->size += delta; a->resource_quota->free_pool += delta; rq_update_estimate(a->resource_quota); - rq_step_sched(exec_ctx, a->resource_quota); - grpc_resource_quota_unref_internal(exec_ctx, a->resource_quota); + rq_step_sched(a->resource_quota); + grpc_resource_quota_unref_internal(a->resource_quota); gpr_free(a); } -static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq, - grpc_error *error) { - grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq; +static void rq_reclamation_done(void* rq, grpc_error* error) { + grpc_resource_quota* resource_quota = static_cast(rq); resource_quota->reclaiming = false; - rq_step_sched(exec_ctx, resource_quota); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + rq_step_sched(resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } /******************************************************************************* @@ -567,9 +586,9 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq, */ /* Public API */ -grpc_resource_quota *grpc_resource_quota_create(const char *name) { - grpc_resource_quota *resource_quota = - (grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota)); +grpc_resource_quota* grpc_resource_quota_create(const char* name) { + grpc_resource_quota* resource_quota = + static_cast(gpr_malloc(sizeof(*resource_quota))); gpr_ref_init(&resource_quota->refs, 1); resource_quota->combiner = grpc_combiner_create(); resource_quota->free_pool = INT64_MAX; @@ -578,7 +597,7 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { resource_quota->step_scheduled = false; resource_quota->reclaiming = false; gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0); - if (name != NULL) { + if (name != nullptr) { resource_quota->name = gpr_strdup(name); } else { gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR, @@ -590,94 +609,93 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) { rq_reclamation_done, resource_quota, grpc_combiner_scheduler(resource_quota->combiner)); for (int i = 0; i < GRPC_RULIST_COUNT; i++) { - resource_quota->roots[i] = NULL; + resource_quota->roots[i] = nullptr; } return resource_quota; } -void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota) { +void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) { if (gpr_unref(&resource_quota->refs)) { - GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota"); + GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); gpr_free(resource_quota->name); gpr_free(resource_quota); } } /* Public API */ -void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); - grpc_exec_ctx_finish(&exec_ctx); +void grpc_resource_quota_unref(grpc_resource_quota* resource_quota) { + grpc_core::ExecCtx exec_ctx; + grpc_resource_quota_unref_internal(resource_quota); } -grpc_resource_quota *grpc_resource_quota_ref_internal( - grpc_resource_quota *resource_quota) { +grpc_resource_quota* grpc_resource_quota_ref_internal( + grpc_resource_quota* resource_quota) { gpr_ref(&resource_quota->refs); return resource_quota; } /* Public API */ -void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) { +void grpc_resource_quota_ref(grpc_resource_quota* resource_quota) { grpc_resource_quota_ref_internal(resource_quota); } double grpc_resource_quota_get_memory_pressure( - grpc_resource_quota *resource_quota) { - return ((double)(gpr_atm_no_barrier_load( + grpc_resource_quota* resource_quota) { + return (static_cast(gpr_atm_no_barrier_load( &resource_quota->memory_usage_estimation))) / - ((double)MEMORY_USAGE_ESTIMATION_MAX); + (static_cast(MEMORY_USAGE_ESTIMATION_MAX)); } /* Public API */ -void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, +void grpc_resource_quota_resize(grpc_resource_quota* resource_quota, size_t size) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a)); + grpc_core::ExecCtx exec_ctx; + rq_resize_args* a = static_cast(gpr_malloc(sizeof(*a))); a->resource_quota = grpc_resource_quota_ref_internal(resource_quota); - a->size = (int64_t)size; + a->size = static_cast(size); gpr_atm_no_barrier_store(&resource_quota->last_size, (gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size)); GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CLOSURE_SCHED(&a->closure, GRPC_ERROR_NONE); } -size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota) { - return (size_t)gpr_atm_no_barrier_load(&resource_quota->last_size); +size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) { + return static_cast( + gpr_atm_no_barrier_load(&resource_quota->last_size)); } /******************************************************************************* * grpc_resource_user channel args api */ -grpc_resource_quota *grpc_resource_quota_from_channel_args( - const grpc_channel_args *channel_args) { +grpc_resource_quota* grpc_resource_quota_from_channel_args( + const grpc_channel_args* channel_args) { for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { if (channel_args->args[i].type == GRPC_ARG_POINTER) { return grpc_resource_quota_ref_internal( - (grpc_resource_quota *)channel_args->args[i].value.pointer.p); + static_cast( + channel_args->args[i].value.pointer.p)); } else { gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer"); } } } - return grpc_resource_quota_create(NULL); + return grpc_resource_quota_create(nullptr); } -static void *rq_copy(void *rq) { - grpc_resource_quota_ref((grpc_resource_quota *)rq); +static void* rq_copy(void* rq) { + grpc_resource_quota_ref(static_cast(rq)); return rq; } -static void rq_destroy(grpc_exec_ctx *exec_ctx, void *rq) { - grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota *)rq); +static void rq_destroy(void* rq) { + grpc_resource_quota_unref_internal(static_cast(rq)); } -static int rq_cmp(void *a, void *b) { return GPR_ICMP(a, b); } +static int rq_cmp(void* a, void* b) { return GPR_ICMP(a, b); } -const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) { +const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void) { static const grpc_arg_pointer_vtable vtable = {rq_copy, rq_destroy, rq_cmp}; return &vtable; } @@ -686,10 +704,10 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) { * grpc_resource_user api */ -grpc_resource_user *grpc_resource_user_create( - grpc_resource_quota *resource_quota, const char *name) { - grpc_resource_user *resource_user = - (grpc_resource_user *)gpr_malloc(sizeof(*resource_user)); +grpc_resource_user* grpc_resource_user_create( + grpc_resource_quota* resource_quota, const char* name) { + grpc_resource_user* resource_user = + static_cast(gpr_malloc(sizeof(*resource_user))); resource_user->resource_quota = grpc_resource_quota_ref_internal(resource_quota); GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate, @@ -713,14 +731,15 @@ grpc_resource_user *grpc_resource_user_create( grpc_closure_list_init(&resource_user->on_allocated); resource_user->allocating = false; resource_user->added_to_free_pool = false; - resource_user->reclaimers[0] = NULL; - resource_user->reclaimers[1] = NULL; - resource_user->new_reclaimers[0] = NULL; - resource_user->new_reclaimers[1] = NULL; + resource_user->reclaimers[0] = nullptr; + resource_user->reclaimers[1] = nullptr; + resource_user->new_reclaimers[0] = nullptr; + resource_user->new_reclaimers[1] = nullptr; + resource_user->outstanding_allocations = 0; for (int i = 0; i < GRPC_RULIST_COUNT; i++) { - resource_user->links[i].next = resource_user->links[i].prev = NULL; + resource_user->links[i].next = resource_user->links[i].prev = nullptr; } - if (name != NULL) { + if (name != nullptr) { resource_user->name = gpr_strdup(name); } else { gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR, @@ -729,41 +748,36 @@ grpc_resource_user *grpc_resource_user_create( return resource_user; } -grpc_resource_quota *grpc_resource_user_quota( - grpc_resource_user *resource_user) { +grpc_resource_quota* grpc_resource_user_quota( + grpc_resource_user* resource_user) { return resource_user->resource_quota; } -static void ru_ref_by(grpc_resource_user *resource_user, gpr_atm amount) { +static void ru_ref_by(grpc_resource_user* resource_user, gpr_atm amount) { GPR_ASSERT(amount > 0); GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0); } -static void ru_unref_by(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, gpr_atm amount) { +static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) { GPR_ASSERT(amount > 0); gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount); GPR_ASSERT(old >= amount); if (old == amount) { - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_user->destroy_closure, GRPC_ERROR_NONE); } } -void grpc_resource_user_ref(grpc_resource_user *resource_user) { +void grpc_resource_user_ref(grpc_resource_user* resource_user) { ru_ref_by(resource_user, 1); } -void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - ru_unref_by(exec_ctx, resource_user, 1); +void grpc_resource_user_unref(grpc_resource_user* resource_user) { + ru_unref_by(resource_user, 1); } -void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { +void grpc_resource_user_shutdown(grpc_resource_user* resource_user) { if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) { GRPC_CLOSURE_SCHED( - exec_ctx, GRPC_CLOSURE_CREATE( ru_shutdown, resource_user, grpc_combiner_scheduler(resource_user->resource_quota->combiner)), @@ -771,14 +785,14 @@ void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, } } -void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size, - grpc_closure *optional_on_done) { +void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, + grpc_closure* optional_on_done) { gpr_mu_lock(&resource_user->mu); - ru_ref_by(resource_user, (gpr_atm)size); - resource_user->free_pool -= (int64_t)size; - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64, + ru_ref_by(resource_user, static_cast(size)); + resource_user->free_pool -= static_cast(size); + resource_user->outstanding_allocations += static_cast(size); + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64, resource_user->resource_quota->name, resource_user->name, size, resource_user->free_pool); } @@ -787,22 +801,21 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, GRPC_ERROR_NONE); if (!resource_user->allocating) { resource_user->allocating = true; - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE); } } else { - GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE); + resource_user->outstanding_allocations -= static_cast(size); + GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); } -void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size) { +void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) { gpr_mu_lock(&resource_user->mu); bool was_zero_or_negative = resource_user->free_pool <= 0; - resource_user->free_pool += (int64_t)size; - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64, + resource_user->free_pool += static_cast(size); + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64, resource_user->resource_quota->name, resource_user->name, size, resource_user->free_pool); } @@ -810,38 +823,35 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, if (is_bigger_than_zero && was_zero_or_negative && !resource_user->added_to_free_pool) { resource_user->added_to_free_pool = true; - GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure, + GRPC_CLOSURE_SCHED(&resource_user->add_to_free_pool_closure, GRPC_ERROR_NONE); } gpr_mu_unlock(&resource_user->mu); - ru_unref_by(exec_ctx, resource_user, (gpr_atm)size); + ru_unref_by(resource_user, static_cast(size)); } -void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, +void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user, bool destructive, - grpc_closure *closure) { - GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL); + grpc_closure* closure) { + GPR_ASSERT(resource_user->new_reclaimers[destructive] == nullptr); resource_user->new_reclaimers[destructive] = closure; - GRPC_CLOSURE_SCHED(exec_ctx, - &resource_user->post_reclaimer_closure[destructive], + GRPC_CLOSURE_SCHED(&resource_user->post_reclaimer_closure[destructive], GRPC_ERROR_NONE); } -void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - if (GRPC_TRACER_ON(grpc_resource_quota_trace)) { - gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", +void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) { + if (grpc_resource_quota_trace.enabled()) { + gpr_log(GPR_INFO, "RQ %s %s: reclamation complete", resource_user->resource_quota->name, resource_user->name); } GRPC_CLOSURE_SCHED( - exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure, + &resource_user->resource_quota->rq_reclamation_done_closure, GRPC_ERROR_NONE); } void grpc_resource_user_slice_allocator_init( - grpc_resource_user_slice_allocator *slice_allocator, - grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) { + grpc_resource_user_slice_allocator* slice_allocator, + grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p) { GRPC_CLOSURE_INIT(&slice_allocator->on_allocated, ru_allocated_slices, slice_allocator, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p, @@ -850,19 +860,11 @@ void grpc_resource_user_slice_allocator_init( } void grpc_resource_user_alloc_slices( - grpc_exec_ctx *exec_ctx, - grpc_resource_user_slice_allocator *slice_allocator, size_t length, - size_t count, grpc_slice_buffer *dest) { + grpc_resource_user_slice_allocator* slice_allocator, size_t length, + size_t count, grpc_slice_buffer* dest) { slice_allocator->length = length; slice_allocator->count = count; slice_allocator->dest = dest; - grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user, - count * length, &slice_allocator->on_allocated); -} - -grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, - size_t size) { - grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL); - return ru_slice_create(resource_user, size); + grpc_resource_user_alloc(slice_allocator->resource_user, count * length, + &slice_allocator->on_allocated); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/resource_quota.h b/Sources/CgRPC/src/core/lib/iomgr/resource_quota.h index d66f9ae77..937daf872 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/resource_quota.h +++ b/Sources/CgRPC/src/core/lib/iomgr/resource_quota.h @@ -19,10 +19,12 @@ #ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H #define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H +#include + #include #include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/closure.h" /** \file Tracks resource usage against a pool. @@ -61,60 +63,53 @@ maintain lists of users (which users arrange to leave before they are destroyed) */ -extern grpc_tracer_flag grpc_resource_quota_trace; +extern grpc_core::TraceFlag grpc_resource_quota_trace; -grpc_resource_quota *grpc_resource_quota_ref_internal( - grpc_resource_quota *resource_quota); -void grpc_resource_quota_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_resource_quota *resource_quota); -grpc_resource_quota *grpc_resource_quota_from_channel_args( - const grpc_channel_args *channel_args); +grpc_resource_quota* grpc_resource_quota_ref_internal( + grpc_resource_quota* resource_quota); +void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota); +grpc_resource_quota* grpc_resource_quota_from_channel_args( + const grpc_channel_args* channel_args); /* Return a number indicating current memory pressure: 0.0 ==> no memory usage 1.0 ==> maximum memory usage */ double grpc_resource_quota_get_memory_pressure( - grpc_resource_quota *resource_quota); + grpc_resource_quota* resource_quota); -size_t grpc_resource_quota_peek_size(grpc_resource_quota *resource_quota); +size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota); typedef struct grpc_resource_user grpc_resource_user; -grpc_resource_user *grpc_resource_user_create( - grpc_resource_quota *resource_quota, const char *name); +grpc_resource_user* grpc_resource_user_create( + grpc_resource_quota* resource_quota, const char* name); /* Returns a borrowed reference to the underlying resource quota for this resource user. */ -grpc_resource_quota *grpc_resource_user_quota( - grpc_resource_user *resource_user); +grpc_resource_quota* grpc_resource_user_quota( + grpc_resource_user* resource_user); -void grpc_resource_user_ref(grpc_resource_user *resource_user); -void grpc_resource_user_unref(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); -void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); +void grpc_resource_user_ref(grpc_resource_user* resource_user); +void grpc_resource_user_unref(grpc_resource_user* resource_user); +void grpc_resource_user_shutdown(grpc_resource_user* resource_user); /* Allocate from the resource user (and its quota). If optional_on_done is NULL, then allocate immediately. This may push the quota over-limit, at which point reclamation will kick in. If optional_on_done is non-NULL, it will be scheduled when the allocation has been granted by the quota. */ -void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size, - grpc_closure *optional_on_done); +void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size, + grpc_closure* optional_on_done); /* Release memory back to the quota */ -void grpc_resource_user_free(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, size_t size); +void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size); /* Post a memory reclaimer to the resource user. Only one benign and one destructive reclaimer can be posted at once. When executed, the reclaimer MUST call grpc_resource_user_finish_reclamation before it completes, to return control to the resource quota. */ -void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, - bool destructive, grpc_closure *closure); +void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user, + bool destructive, grpc_closure* closure); /* Finish a reclamation step */ -void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user); +void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user); /* Helper to allocate slices from a resource user */ typedef struct grpc_resource_user_slice_allocator { @@ -127,27 +122,21 @@ typedef struct grpc_resource_user_slice_allocator { /* Number of slices to allocate on the current request */ size_t count; /* Destination for slices to allocate on the current request */ - grpc_slice_buffer *dest; + grpc_slice_buffer* dest; /* Parent resource user */ - grpc_resource_user *resource_user; + grpc_resource_user* resource_user; } grpc_resource_user_slice_allocator; /* Initialize a slice allocator. When an allocation is completed, calls \a cb with arg \p. */ void grpc_resource_user_slice_allocator_init( - grpc_resource_user_slice_allocator *slice_allocator, - grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p); + grpc_resource_user_slice_allocator* slice_allocator, + grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p); /* Allocate \a count slices of length \a length into \a dest. Only one request can be outstanding at a time. */ void grpc_resource_user_alloc_slices( - grpc_exec_ctx *exec_ctx, - grpc_resource_user_slice_allocator *slice_allocator, size_t length, - size_t count, grpc_slice_buffer *dest); - -/* Allocate one slice of length \a size synchronously. */ -grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user, - size_t size); + grpc_resource_user_slice_allocator* slice_allocator, size_t length, + size_t count, grpc_slice_buffer* dest); #endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr.h b/Sources/CgRPC/src/core/lib/iomgr/sockaddr.h index 206d596cc..5edf735cd 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/sockaddr.h +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr.h @@ -23,18 +23,10 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_H #define GRPC_CORE_LIB_IOMGR_SOCKADDR_H -#include "src/core/lib/iomgr/port.h" +#include -#ifdef GRPC_UV -#include -#endif - -#ifdef GPR_WINDOWS -#include "src/core/lib/iomgr/sockaddr_windows.h" -#endif - -#ifdef GRPC_POSIX_SOCKETADDR +#include "src/core/lib/iomgr/sockaddr_custom.h" #include "src/core/lib/iomgr/sockaddr_posix.h" -#endif +#include "src/core/lib/iomgr/sockaddr_windows.h" #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_custom.h b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_custom.h new file mode 100644 index 000000000..d85cc504d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_custom.h @@ -0,0 +1,54 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H + +#include + +#include +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_UV + +#include + +// TODO(kpayson) It would be nice to abstract this so we don't +// depend on anything uv specific +typedef struct sockaddr grpc_sockaddr; +typedef struct sockaddr_in grpc_sockaddr_in; +typedef struct in_addr grpc_in_addr; +typedef struct sockaddr_in6 grpc_sockaddr_in6; +typedef struct in6_addr grpc_in6_addr; + +#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN +#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN + +#define GRPC_SOCK_STREAM SOCK_STREAM +#define GRPC_SOCK_DGRAM SOCK_DGRAM + +#define GRPC_AF_UNSPEC AF_UNSPEC +#define GRPC_AF_UNIX AF_UNIX +#define GRPC_AF_INET AF_INET +#define GRPC_AF_INET6 AF_INET6 + +#define GRPC_AI_PASSIVE AI_PASSIVE + +#endif // GRPC_UV + +#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_posix.h b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_posix.h index 22d57ca6b..5b18bbc46 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_posix.h @@ -19,6 +19,11 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_POSIX_H #define GRPC_CORE_LIB_IOMGR_SOCKADDR_POSIX_H +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_POSIX_SOCKET #include #include #include @@ -26,4 +31,25 @@ #include #include +typedef struct sockaddr grpc_sockaddr; +typedef struct sockaddr_in grpc_sockaddr_in; +typedef struct in_addr grpc_in_addr; +typedef struct sockaddr_in6 grpc_sockaddr_in6; +typedef struct in6_addr grpc_in6_addr; + +#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN +#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN + +#define GRPC_SOCK_STREAM SOCK_STREAM +#define GRPC_SOCK_DGRAM SOCK_DGRAM + +#define GRPC_AF_UNSPEC AF_UNSPEC +#define GRPC_AF_UNIX AF_UNIX +#define GRPC_AF_INET AF_INET +#define GRPC_AF_INET6 AF_INET6 + +#define GRPC_AI_PASSIVE AI_PASSIVE + +#endif + #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.c b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.c deleted file mode 100644 index 3f4145d10..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.c +++ /dev/null @@ -1,262 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/sockaddr_utils.h" - -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/socket_utils.h" -#include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/string.h" - -static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0xff, 0xff}; - -int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *resolved_addr, - grpc_resolved_address *resolved_addr4_out) { - GPR_ASSERT(resolved_addr != resolved_addr4_out); - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - struct sockaddr_in *addr4_out = - resolved_addr4_out == NULL - ? NULL - : (struct sockaddr_in *)resolved_addr4_out->addr; - if (addr->sa_family == AF_INET6) { - const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr; - if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix, - sizeof(kV4MappedPrefix)) == 0) { - if (resolved_addr4_out != NULL) { - /* Normalize ::ffff:0.0.0.0/96 to IPv4. */ - memset(resolved_addr4_out, 0, sizeof(*resolved_addr4_out)); - addr4_out->sin_family = AF_INET; - /* s6_addr32 would be nice, but it's non-standard. */ - memcpy(&addr4_out->sin_addr, &addr6->sin6_addr.s6_addr[12], 4); - addr4_out->sin_port = addr6->sin6_port; - resolved_addr4_out->len = sizeof(struct sockaddr_in); - } - return 1; - } - } - return 0; -} - -int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *resolved_addr, - grpc_resolved_address *resolved_addr6_out) { - GPR_ASSERT(resolved_addr != resolved_addr6_out); - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - struct sockaddr_in6 *addr6_out = - (struct sockaddr_in6 *)resolved_addr6_out->addr; - if (addr->sa_family == AF_INET) { - const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr; - memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out)); - addr6_out->sin6_family = AF_INET6; - memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12); - memcpy(&addr6_out->sin6_addr.s6_addr[12], &addr4->sin_addr, 4); - addr6_out->sin6_port = addr4->sin_port; - resolved_addr6_out->len = sizeof(struct sockaddr_in6); - return 1; - } - return 0; -} - -int grpc_sockaddr_is_wildcard(const grpc_resolved_address *resolved_addr, - int *port_out) { - const struct sockaddr *addr; - grpc_resolved_address addr4_normalized; - if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) { - resolved_addr = &addr4_normalized; - } - addr = (const struct sockaddr *)resolved_addr->addr; - if (addr->sa_family == AF_INET) { - /* Check for 0.0.0.0 */ - const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr; - if (addr4->sin_addr.s_addr != 0) { - return 0; - } - *port_out = ntohs(addr4->sin_port); - return 1; - } else if (addr->sa_family == AF_INET6) { - /* Check for :: */ - const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr; - int i; - for (i = 0; i < 16; i++) { - if (addr6->sin6_addr.s6_addr[i] != 0) { - return 0; - } - } - *port_out = ntohs(addr6->sin6_port); - return 1; - } else { - return 0; - } -} - -void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out, - grpc_resolved_address *wild6_out) { - grpc_sockaddr_make_wildcard4(port, wild4_out); - grpc_sockaddr_make_wildcard6(port, wild6_out); -} - -void grpc_sockaddr_make_wildcard4(int port, - grpc_resolved_address *resolved_wild_out) { - struct sockaddr_in *wild_out = (struct sockaddr_in *)resolved_wild_out->addr; - GPR_ASSERT(port >= 0 && port < 65536); - memset(resolved_wild_out, 0, sizeof(*resolved_wild_out)); - wild_out->sin_family = AF_INET; - wild_out->sin_port = htons((uint16_t)port); - resolved_wild_out->len = sizeof(struct sockaddr_in); -} - -void grpc_sockaddr_make_wildcard6(int port, - grpc_resolved_address *resolved_wild_out) { - struct sockaddr_in6 *wild_out = - (struct sockaddr_in6 *)resolved_wild_out->addr; - GPR_ASSERT(port >= 0 && port < 65536); - memset(resolved_wild_out, 0, sizeof(*resolved_wild_out)); - wild_out->sin6_family = AF_INET6; - wild_out->sin6_port = htons((uint16_t)port); - resolved_wild_out->len = sizeof(struct sockaddr_in6); -} - -int grpc_sockaddr_to_string(char **out, - const grpc_resolved_address *resolved_addr, - int normalize) { - const struct sockaddr *addr; - const int save_errno = errno; - grpc_resolved_address addr_normalized; - char ntop_buf[INET6_ADDRSTRLEN]; - const void *ip = NULL; - int port; - uint32_t sin6_scope_id = 0; - int ret; - - *out = NULL; - if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) { - resolved_addr = &addr_normalized; - } - addr = (const struct sockaddr *)resolved_addr->addr; - if (addr->sa_family == AF_INET) { - const struct sockaddr_in *addr4 = (const struct sockaddr_in *)addr; - ip = &addr4->sin_addr; - port = ntohs(addr4->sin_port); - } else if (addr->sa_family == AF_INET6) { - const struct sockaddr_in6 *addr6 = (const struct sockaddr_in6 *)addr; - ip = &addr6->sin6_addr; - port = ntohs(addr6->sin6_port); - sin6_scope_id = addr6->sin6_scope_id; - } - if (ip != NULL && - grpc_inet_ntop(addr->sa_family, ip, ntop_buf, sizeof(ntop_buf)) != NULL) { - if (sin6_scope_id != 0) { - char *host_with_scope; - /* Enclose sin6_scope_id with the format defined in RFC 6784 section 2. */ - gpr_asprintf(&host_with_scope, "%s%%25%" PRIu32, ntop_buf, sin6_scope_id); - ret = gpr_join_host_port(out, host_with_scope, port); - gpr_free(host_with_scope); - } else { - ret = gpr_join_host_port(out, ntop_buf, port); - } - } else { - ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family); - } - /* This is probably redundant, but we wouldn't want to log the wrong error. */ - errno = save_errno; - return ret; -} - -char *grpc_sockaddr_to_uri(const grpc_resolved_address *resolved_addr) { - grpc_resolved_address addr_normalized; - if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) { - resolved_addr = &addr_normalized; - } - const char *scheme = grpc_sockaddr_get_uri_scheme(resolved_addr); - if (scheme == NULL || strcmp("unix", scheme) == 0) { - return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr); - } - char *path = NULL; - char *uri_str = NULL; - if (grpc_sockaddr_to_string(&path, resolved_addr, - false /* suppress errors */) && - scheme != NULL) { - gpr_asprintf(&uri_str, "%s:%s", scheme, path); - } - gpr_free(path); - return uri_str != NULL ? uri_str : NULL; -} - -const char *grpc_sockaddr_get_uri_scheme( - const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - switch (addr->sa_family) { - case AF_INET: - return "ipv4"; - case AF_INET6: - return "ipv6"; - case AF_UNIX: - return "unix"; - } - return NULL; -} - -int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - return addr->sa_family; -} - -int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - switch (addr->sa_family) { - case AF_INET: - return ntohs(((struct sockaddr_in *)addr)->sin_port); - case AF_INET6: - return ntohs(((struct sockaddr_in6 *)addr)->sin6_port); - default: - if (grpc_is_unix_socket(resolved_addr)) { - return 1; - } - gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", - addr->sa_family); - return 0; - } -} - -int grpc_sockaddr_set_port(const grpc_resolved_address *resolved_addr, - int port) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; - switch (addr->sa_family) { - case AF_INET: - GPR_ASSERT(port >= 0 && port < 65536); - ((struct sockaddr_in *)addr)->sin_port = htons((uint16_t)port); - return 1; - case AF_INET6: - GPR_ASSERT(port >= 0 && port < 65536); - ((struct sockaddr_in6 *)addr)->sin6_port = htons((uint16_t)port); - return 1; - default: - gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", - addr->sa_family); - return 0; - } -} diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.cc b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.cc new file mode 100644 index 000000000..1b66dceb1 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.cc @@ -0,0 +1,298 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/sockaddr_utils.h" + +#include +#include +#include + +#include +#include +#include + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/socket_utils.h" +#include "src/core/lib/iomgr/unix_sockets_posix.h" + +static const uint8_t kV4MappedPrefix[] = {0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0xff, 0xff}; + +int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* resolved_addr, + grpc_resolved_address* resolved_addr4_out) { + GPR_ASSERT(resolved_addr != resolved_addr4_out); + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + grpc_sockaddr_in* addr4_out = + resolved_addr4_out == nullptr + ? nullptr + : reinterpret_cast(resolved_addr4_out->addr); + if (addr->sa_family == GRPC_AF_INET6) { + const grpc_sockaddr_in6* addr6 = + reinterpret_cast(addr); + if (memcmp(addr6->sin6_addr.s6_addr, kV4MappedPrefix, + sizeof(kV4MappedPrefix)) == 0) { + if (resolved_addr4_out != nullptr) { + /* Normalize ::ffff:0.0.0.0/96 to IPv4. */ + memset(resolved_addr4_out, 0, sizeof(*resolved_addr4_out)); + addr4_out->sin_family = GRPC_AF_INET; + /* s6_addr32 would be nice, but it's non-standard. */ + memcpy(&addr4_out->sin_addr, &addr6->sin6_addr.s6_addr[12], 4); + addr4_out->sin_port = addr6->sin6_port; + resolved_addr4_out->len = + static_cast(sizeof(grpc_sockaddr_in)); + } + return 1; + } + } + return 0; +} + +int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* resolved_addr, + grpc_resolved_address* resolved_addr6_out) { + GPR_ASSERT(resolved_addr != resolved_addr6_out); + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + grpc_sockaddr_in6* addr6_out = + reinterpret_cast(resolved_addr6_out->addr); + if (addr->sa_family == GRPC_AF_INET) { + const grpc_sockaddr_in* addr4 = + reinterpret_cast(addr); + memset(resolved_addr6_out, 0, sizeof(*resolved_addr6_out)); + addr6_out->sin6_family = GRPC_AF_INET6; + memcpy(&addr6_out->sin6_addr.s6_addr[0], kV4MappedPrefix, 12); + memcpy(&addr6_out->sin6_addr.s6_addr[12], &addr4->sin_addr, 4); + addr6_out->sin6_port = addr4->sin_port; + resolved_addr6_out->len = static_cast(sizeof(grpc_sockaddr_in6)); + return 1; + } + return 0; +} + +int grpc_sockaddr_is_wildcard(const grpc_resolved_address* resolved_addr, + int* port_out) { + const grpc_sockaddr* addr; + grpc_resolved_address addr4_normalized; + if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr4_normalized)) { + resolved_addr = &addr4_normalized; + } + addr = reinterpret_cast(resolved_addr->addr); + if (addr->sa_family == GRPC_AF_INET) { + /* Check for 0.0.0.0 */ + const grpc_sockaddr_in* addr4 = + reinterpret_cast(addr); + if (addr4->sin_addr.s_addr != 0) { + return 0; + } + *port_out = grpc_ntohs(addr4->sin_port); + return 1; + } else if (addr->sa_family == GRPC_AF_INET6) { + /* Check for :: */ + const grpc_sockaddr_in6* addr6 = + reinterpret_cast(addr); + int i; + for (i = 0; i < 16; i++) { + if (addr6->sin6_addr.s6_addr[i] != 0) { + return 0; + } + } + *port_out = grpc_ntohs(addr6->sin6_port); + return 1; + } else { + return 0; + } +} + +void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out, + grpc_resolved_address* wild6_out) { + grpc_sockaddr_make_wildcard4(port, wild4_out); + grpc_sockaddr_make_wildcard6(port, wild6_out); +} + +void grpc_sockaddr_make_wildcard4(int port, + grpc_resolved_address* resolved_wild_out) { + grpc_sockaddr_in* wild_out = + reinterpret_cast(resolved_wild_out->addr); + GPR_ASSERT(port >= 0 && port < 65536); + memset(resolved_wild_out, 0, sizeof(*resolved_wild_out)); + wild_out->sin_family = GRPC_AF_INET; + wild_out->sin_port = grpc_htons(static_cast(port)); + resolved_wild_out->len = static_cast(sizeof(grpc_sockaddr_in)); +} + +void grpc_sockaddr_make_wildcard6(int port, + grpc_resolved_address* resolved_wild_out) { + grpc_sockaddr_in6* wild_out = + reinterpret_cast(resolved_wild_out->addr); + GPR_ASSERT(port >= 0 && port < 65536); + memset(resolved_wild_out, 0, sizeof(*resolved_wild_out)); + wild_out->sin6_family = GRPC_AF_INET6; + wild_out->sin6_port = grpc_htons(static_cast(port)); + resolved_wild_out->len = static_cast(sizeof(grpc_sockaddr_in6)); +} + +int grpc_sockaddr_to_string(char** out, + const grpc_resolved_address* resolved_addr, + int normalize) { + const grpc_sockaddr* addr; + const int save_errno = errno; + grpc_resolved_address addr_normalized; + char ntop_buf[GRPC_INET6_ADDRSTRLEN]; + const void* ip = nullptr; + int port = 0; + uint32_t sin6_scope_id = 0; + int ret; + + *out = nullptr; + if (normalize && grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) { + resolved_addr = &addr_normalized; + } + addr = reinterpret_cast(resolved_addr->addr); + if (addr->sa_family == GRPC_AF_INET) { + const grpc_sockaddr_in* addr4 = + reinterpret_cast(addr); + ip = &addr4->sin_addr; + port = grpc_ntohs(addr4->sin_port); + } else if (addr->sa_family == GRPC_AF_INET6) { + const grpc_sockaddr_in6* addr6 = + reinterpret_cast(addr); + ip = &addr6->sin6_addr; + port = grpc_ntohs(addr6->sin6_port); + sin6_scope_id = addr6->sin6_scope_id; + } + if (ip != nullptr && grpc_inet_ntop(addr->sa_family, ip, ntop_buf, + sizeof(ntop_buf)) != nullptr) { + if (sin6_scope_id != 0) { + char* host_with_scope; + /* Enclose sin6_scope_id with the format defined in RFC 6784 section 2. */ + gpr_asprintf(&host_with_scope, "%s%%25%" PRIu32, ntop_buf, sin6_scope_id); + ret = gpr_join_host_port(out, host_with_scope, port); + gpr_free(host_with_scope); + } else { + ret = gpr_join_host_port(out, ntop_buf, port); + } + } else { + ret = gpr_asprintf(out, "(sockaddr family=%d)", addr->sa_family); + } + /* This is probably redundant, but we wouldn't want to log the wrong error. */ + errno = save_errno; + return ret; +} + +void grpc_string_to_sockaddr(grpc_resolved_address* out, char* addr, int port) { + grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)out->addr; + grpc_sockaddr_in* addr4 = (grpc_sockaddr_in*)out->addr; + + if (grpc_inet_pton(GRPC_AF_INET6, addr, &addr6->sin6_addr) == 1) { + addr6->sin6_family = GRPC_AF_INET6; + addr6->sin6_flowinfo = 0; + addr6->sin6_scope_id = 0; + out->len = sizeof(grpc_sockaddr_in6); + } else if (grpc_inet_pton(GRPC_AF_INET, addr, &addr4->sin_addr) == 1) { + addr4->sin_family = GRPC_AF_INET; + out->len = sizeof(grpc_sockaddr_in); + } else { + GPR_ASSERT(0); + } + grpc_sockaddr_set_port(out, port); +} + +char* grpc_sockaddr_to_uri(const grpc_resolved_address* resolved_addr) { + grpc_resolved_address addr_normalized; + if (grpc_sockaddr_is_v4mapped(resolved_addr, &addr_normalized)) { + resolved_addr = &addr_normalized; + } + const char* scheme = grpc_sockaddr_get_uri_scheme(resolved_addr); + if (scheme == nullptr || strcmp("unix", scheme) == 0) { + return grpc_sockaddr_to_uri_unix_if_possible(resolved_addr); + } + char* path = nullptr; + char* uri_str = nullptr; + if (grpc_sockaddr_to_string(&path, resolved_addr, + false /* suppress errors */) && + scheme != nullptr) { + gpr_asprintf(&uri_str, "%s:%s", scheme, path); + } + gpr_free(path); + return uri_str != nullptr ? uri_str : nullptr; +} + +const char* grpc_sockaddr_get_uri_scheme( + const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + switch (addr->sa_family) { + case GRPC_AF_INET: + return "ipv4"; + case GRPC_AF_INET6: + return "ipv6"; + case GRPC_AF_UNIX: + return "unix"; + } + return nullptr; +} + +int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + return addr->sa_family; +} + +int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + switch (addr->sa_family) { + case GRPC_AF_INET: + return grpc_ntohs(((grpc_sockaddr_in*)addr)->sin_port); + case GRPC_AF_INET6: + return grpc_ntohs(((grpc_sockaddr_in6*)addr)->sin6_port); + default: + if (grpc_is_unix_socket(resolved_addr)) { + return 1; + } + gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_get_port", + addr->sa_family); + return 0; + } +} + +int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr, + int port) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); + switch (addr->sa_family) { + case GRPC_AF_INET: + GPR_ASSERT(port >= 0 && port < 65536); + ((grpc_sockaddr_in*)addr)->sin_port = + grpc_htons(static_cast(port)); + return 1; + case GRPC_AF_INET6: + GPR_ASSERT(port >= 0 && port < 65536); + ((grpc_sockaddr_in6*)addr)->sin6_port = + grpc_htons(static_cast(port)); + return 1; + default: + gpr_log(GPR_ERROR, "Unknown socket family %d in grpc_sockaddr_set_port", + addr->sa_family); + return 0; + } +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.h b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.h index a589a1970..a4e90a73a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.h +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_utils.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H #define GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H +#include + #include "src/core/lib/iomgr/resolve_address.h" /* Returns true if addr is an IPv4-mapped IPv6 address within the @@ -26,33 +28,33 @@ If addr4_out is non-NULL, the inner IPv4 address will be copied here when returning true. */ -int grpc_sockaddr_is_v4mapped(const grpc_resolved_address *addr, - grpc_resolved_address *addr4_out); +int grpc_sockaddr_is_v4mapped(const grpc_resolved_address* addr, + grpc_resolved_address* addr4_out); /* If addr is an AF_INET address, writes the corresponding ::ffff:0.0.0.0/96 address to addr6_out and returns true. Otherwise returns false. */ -int grpc_sockaddr_to_v4mapped(const grpc_resolved_address *addr, - grpc_resolved_address *addr6_out); +int grpc_sockaddr_to_v4mapped(const grpc_resolved_address* addr, + grpc_resolved_address* addr6_out); /* If addr is ::, 0.0.0.0, or ::ffff:0.0.0.0, writes the port number to *port_out (if not NULL) and returns true, otherwise returns false. */ -int grpc_sockaddr_is_wildcard(const grpc_resolved_address *addr, int *port_out); +int grpc_sockaddr_is_wildcard(const grpc_resolved_address* addr, int* port_out); /* Writes 0.0.0.0:port and [::]:port to separate sockaddrs. */ -void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address *wild4_out, - grpc_resolved_address *wild6_out); +void grpc_sockaddr_make_wildcards(int port, grpc_resolved_address* wild4_out, + grpc_resolved_address* wild6_out); /* Writes 0.0.0.0:port. */ -void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address *wild_out); +void grpc_sockaddr_make_wildcard4(int port, grpc_resolved_address* wild_out); /* Writes [::]:port. */ -void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address *wild_out); +void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address* wild_out); /* Return the IP port number of a sockaddr */ -int grpc_sockaddr_get_port(const grpc_resolved_address *addr); +int grpc_sockaddr_get_port(const grpc_resolved_address* addr); /* Set IP port number of a sockaddr */ -int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port); +int grpc_sockaddr_set_port(const grpc_resolved_address* addr, int port); /* Converts a sockaddr into a newly-allocated human-readable string. @@ -66,15 +68,17 @@ int grpc_sockaddr_set_port(const grpc_resolved_address *addr, int port); In the unlikely event of an error, returns -1 and sets *out to NULL. The existing value of errno is always preserved. */ -int grpc_sockaddr_to_string(char **out, const grpc_resolved_address *addr, +int grpc_sockaddr_to_string(char** out, const grpc_resolved_address* addr, int normalize); +void grpc_string_to_sockaddr(grpc_resolved_address* out, char* addr, int port); + /* Returns the URI string corresponding to \a addr */ -char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr); +char* grpc_sockaddr_to_uri(const grpc_resolved_address* addr); /* Returns the URI scheme corresponding to \a addr */ -const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr); +const char* grpc_sockaddr_get_uri_scheme(const grpc_resolved_address* addr); -int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr); +int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr); #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_windows.h b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_windows.h index cf0f6b914..4d637251a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/sockaddr_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/sockaddr_windows.h @@ -19,10 +19,37 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H #define GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET + #include #include // must be included after the above #include +typedef struct sockaddr grpc_sockaddr; +typedef struct sockaddr_in grpc_sockaddr_in; +typedef struct in_addr grpc_in_addr; +typedef struct sockaddr_in6 grpc_sockaddr_in6; +typedef struct in6_addr grpc_in6_addr; + +#define GRPC_INET_ADDRSTRLEN INET_ADDRSTRLEN +#define GRPC_INET6_ADDRSTRLEN INET6_ADDRSTRLEN + +#define GRPC_SOCK_STREAM SOCK_STREAM +#define GRPC_SOCK_DGRAM SOCK_DGRAM + +#define GRPC_AF_UNSPEC AF_UNSPEC +#define GRPC_AF_UNIX AF_UNIX +#define GRPC_AF_INET AF_INET +#define GRPC_AF_INET6 AF_INET6 + +#define GRPC_AI_PASSIVE AI_PASSIVE + +#endif + #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.c b/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.cc index 8e907703a..1d1e36c0e 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.cc @@ -16,39 +16,41 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/socket_factory_posix.h" #include #include -#include -void grpc_socket_factory_init(grpc_socket_factory *factory, - const grpc_socket_factory_vtable *vtable) { +void grpc_socket_factory_init(grpc_socket_factory* factory, + const grpc_socket_factory_vtable* vtable) { factory->vtable = vtable; gpr_ref_init(&factory->refcount, 1); } -int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain, +int grpc_socket_factory_socket(grpc_socket_factory* factory, int domain, int type, int protocol) { return factory->vtable->socket(factory, domain, type, protocol); } -int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd, - const grpc_resolved_address *addr) { +int grpc_socket_factory_bind(grpc_socket_factory* factory, int sockfd, + const grpc_resolved_address* addr) { return factory->vtable->bind(factory, sockfd, addr); } -int grpc_socket_factory_compare(grpc_socket_factory *a, - grpc_socket_factory *b) { +int grpc_socket_factory_compare(grpc_socket_factory* a, + grpc_socket_factory* b) { int c = GPR_ICMP(a, b); if (c != 0) { - grpc_socket_factory *sma = a; - grpc_socket_factory *smb = b; + grpc_socket_factory* sma = a; + grpc_socket_factory* smb = b; c = GPR_ICMP(sma->vtable, smb->vtable); if (c == 0) { c = sma->vtable->compare(sma, smb); @@ -57,35 +59,35 @@ int grpc_socket_factory_compare(grpc_socket_factory *a, return c; } -grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory) { +grpc_socket_factory* grpc_socket_factory_ref(grpc_socket_factory* factory) { gpr_ref(&factory->refcount); return factory; } -void grpc_socket_factory_unref(grpc_socket_factory *factory) { +void grpc_socket_factory_unref(grpc_socket_factory* factory) { if (gpr_unref(&factory->refcount)) { factory->vtable->destroy(factory); } } -static void *socket_factory_arg_copy(void *p) { - return grpc_socket_factory_ref((grpc_socket_factory *)p); +static void* socket_factory_arg_copy(void* p) { + return grpc_socket_factory_ref(static_cast(p)); } -static void socket_factory_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_socket_factory_unref((grpc_socket_factory *)p); +static void socket_factory_arg_destroy(void* p) { + grpc_socket_factory_unref(static_cast(p)); } -static int socket_factory_cmp(void *a, void *b) { - return grpc_socket_factory_compare((grpc_socket_factory *)a, - (grpc_socket_factory *)b); +static int socket_factory_cmp(void* a, void* b) { + return grpc_socket_factory_compare(static_cast(a), + static_cast(b)); } static const grpc_arg_pointer_vtable socket_factory_arg_vtable = { socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp}; -grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory) { - return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_FACTORY, +grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory* factory) { + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_FACTORY, factory, &socket_factory_arg_vtable); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.h b/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.h index a46938b06..9a52f4ea4 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_factory_posix.h @@ -19,57 +19,51 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H #define GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H +#include + #include #include #include "src/core/lib/iomgr/resolve_address.h" -#ifdef __cplusplus -extern "C" { -#endif - /** The virtual table of grpc_socket_factory */ typedef struct { /** Replacement for socket(2) */ - int (*socket)(grpc_socket_factory *factory, int domain, int type, + int (*socket)(grpc_socket_factory* factory, int domain, int type, int protocol); /** Replacement for bind(2) */ - int (*bind)(grpc_socket_factory *factory, int sockfd, - const grpc_resolved_address *addr); + int (*bind)(grpc_socket_factory* factory, int sockfd, + const grpc_resolved_address* addr); /** Compare socket factory \a a and \a b */ - int (*compare)(grpc_socket_factory *a, grpc_socket_factory *b); + int (*compare)(grpc_socket_factory* a, grpc_socket_factory* b); /** Destroys the socket factory instance */ - void (*destroy)(grpc_socket_factory *factory); + void (*destroy)(grpc_socket_factory* factory); } grpc_socket_factory_vtable; /** The Socket Factory interface allows changes on socket options */ struct grpc_socket_factory { - const grpc_socket_factory_vtable *vtable; + const grpc_socket_factory_vtable* vtable; gpr_refcount refcount; }; /** called by concrete implementations to initialize the base struct */ -void grpc_socket_factory_init(grpc_socket_factory *factory, - const grpc_socket_factory_vtable *vtable); +void grpc_socket_factory_init(grpc_socket_factory* factory, + const grpc_socket_factory_vtable* vtable); /** Wrap \a factory as a grpc_arg */ -grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory *factory); +grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory* factory); /** Perform the equivalent of a socket(2) operation using \a factory */ -int grpc_socket_factory_socket(grpc_socket_factory *factory, int domain, +int grpc_socket_factory_socket(grpc_socket_factory* factory, int domain, int type, int protocol); /** Perform the equivalent of a bind(2) operation using \a factory */ -int grpc_socket_factory_bind(grpc_socket_factory *factory, int sockfd, - const grpc_resolved_address *addr); +int grpc_socket_factory_bind(grpc_socket_factory* factory, int sockfd, + const grpc_resolved_address* addr); /** Compare if \a a and \a b are the same factory or have same settings */ -int grpc_socket_factory_compare(grpc_socket_factory *a, grpc_socket_factory *b); - -grpc_socket_factory *grpc_socket_factory_ref(grpc_socket_factory *factory); -void grpc_socket_factory_unref(grpc_socket_factory *factory); +int grpc_socket_factory_compare(grpc_socket_factory* a, grpc_socket_factory* b); -#ifdef __cplusplus -} -#endif +grpc_socket_factory* grpc_socket_factory_ref(grpc_socket_factory* factory); +void grpc_socket_factory_unref(grpc_socket_factory* factory); #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.c b/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.cc similarity index 55% rename from Sources/CgRPC/src/core/lib/iomgr/socket_mutator.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_mutator.cc index b0435d5a0..b9b8eaf4a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.cc @@ -16,35 +16,37 @@ * */ -#include "src/core/lib/iomgr/socket_mutator.h" +#include -#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/iomgr/socket_mutator.h" #include #include -#include -void grpc_socket_mutator_init(grpc_socket_mutator *mutator, - const grpc_socket_mutator_vtable *vtable) { +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/useful.h" + +void grpc_socket_mutator_init(grpc_socket_mutator* mutator, + const grpc_socket_mutator_vtable* vtable) { mutator->vtable = vtable; gpr_ref_init(&mutator->refcount, 1); } -grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator) { +grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator) { gpr_ref(&mutator->refcount); return mutator; } -bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd) { +bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator* mutator, int fd) { return mutator->vtable->mutate_fd(fd, mutator); } -int grpc_socket_mutator_compare(grpc_socket_mutator *a, - grpc_socket_mutator *b) { +int grpc_socket_mutator_compare(grpc_socket_mutator* a, + grpc_socket_mutator* b) { int c = GPR_ICMP(a, b); if (c != 0) { - grpc_socket_mutator *sma = a; - grpc_socket_mutator *smb = b; + grpc_socket_mutator* sma = a; + grpc_socket_mutator* smb = b; c = GPR_ICMP(sma->vtable, smb->vtable); if (c == 0) { c = sma->vtable->compare(sma, smb); @@ -53,29 +55,29 @@ int grpc_socket_mutator_compare(grpc_socket_mutator *a, return c; } -void grpc_socket_mutator_unref(grpc_socket_mutator *mutator) { +void grpc_socket_mutator_unref(grpc_socket_mutator* mutator) { if (gpr_unref(&mutator->refcount)) { mutator->vtable->destory(mutator); } } -static void *socket_mutator_arg_copy(void *p) { - return grpc_socket_mutator_ref((grpc_socket_mutator *)p); +static void* socket_mutator_arg_copy(void* p) { + return grpc_socket_mutator_ref(static_cast(p)); } -static void socket_mutator_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_socket_mutator_unref((grpc_socket_mutator *)p); +static void socket_mutator_arg_destroy(void* p) { + grpc_socket_mutator_unref(static_cast(p)); } -static int socket_mutator_cmp(void *a, void *b) { - return grpc_socket_mutator_compare((grpc_socket_mutator *)a, - (grpc_socket_mutator *)b); +static int socket_mutator_cmp(void* a, void* b) { + return grpc_socket_mutator_compare(static_cast(a), + static_cast(b)); } static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = { socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp}; -grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator) { - return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SOCKET_MUTATOR, +grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator) { + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_MUTATOR, mutator, &socket_mutator_arg_vtable); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.h b/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.h index ba956e16f..6c7781c51 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.h +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_mutator.h @@ -19,49 +19,43 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H #define GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H +#include + #include #include #include -#ifdef __cplusplus -extern "C" { -#endif - /** The virtual table of grpc_socket_mutator */ typedef struct { - /** Mutates the socket opitons of \a fd */ - bool (*mutate_fd)(int fd, grpc_socket_mutator *mutator); + /** Mutates the socket options of \a fd */ + bool (*mutate_fd)(int fd, grpc_socket_mutator* mutator); /** Compare socket mutator \a a and \a b */ - int (*compare)(grpc_socket_mutator *a, grpc_socket_mutator *b); + int (*compare)(grpc_socket_mutator* a, grpc_socket_mutator* b); /** Destroys the socket mutator instance */ - void (*destory)(grpc_socket_mutator *mutator); + void (*destory)(grpc_socket_mutator* mutator); } grpc_socket_mutator_vtable; /** The Socket Mutator interface allows changes on socket options */ struct grpc_socket_mutator { - const grpc_socket_mutator_vtable *vtable; + const grpc_socket_mutator_vtable* vtable; gpr_refcount refcount; }; /** called by concrete implementations to initialize the base struct */ -void grpc_socket_mutator_init(grpc_socket_mutator *mutator, - const grpc_socket_mutator_vtable *vtable); +void grpc_socket_mutator_init(grpc_socket_mutator* mutator, + const grpc_socket_mutator_vtable* vtable); /** Wrap \a mutator as a grpc_arg */ -grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator *mutator); +grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator); /** Perform the file descriptor mutation operation of \a mutator on \a fd */ -bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator *mutator, int fd); +bool grpc_socket_mutator_mutate_fd(grpc_socket_mutator* mutator, int fd); /** Compare if \a a and \a b are the same mutator or have same settings */ -int grpc_socket_mutator_compare(grpc_socket_mutator *a, grpc_socket_mutator *b); - -grpc_socket_mutator *grpc_socket_mutator_ref(grpc_socket_mutator *mutator); -void grpc_socket_mutator_unref(grpc_socket_mutator *mutator); +int grpc_socket_mutator_compare(grpc_socket_mutator* a, grpc_socket_mutator* b); -#ifdef __cplusplus -} -#endif +grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator); +void grpc_socket_mutator_unref(grpc_socket_mutator* mutator); #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils.h b/Sources/CgRPC/src/core/lib/iomgr/socket_utils.h index 03fe46e5e..cf1a7be64 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils.h +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils.h @@ -19,9 +19,20 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H #define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H +#include + #include +/* A wrapper for htons on POSIX and Windows */ +uint16_t grpc_htons(uint16_t hostshort); + +/* A wrapper for ntohs on POSIX and WINDOWS */ +uint16_t grpc_ntohs(uint16_t netshort); + +/* A wrapper for inet_pton on POSIX and WINDOWS */ +int grpc_inet_pton(int af, const char* src, void* dst); + /* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */ -const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size); +const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size); #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.c b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.cc similarity index 73% rename from Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.cc index b8e2a0cdf..04a176773 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_common_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -36,15 +38,16 @@ #include #include -#include #include -#include #include + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/support/string.h" /* set a socket to non blocking mode */ -grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) { +grpc_error* grpc_set_socket_nonblocking(int fd, int non_blocking) { int oldflags = fcntl(fd, F_GETFL, 0); if (oldflags < 0) { return GRPC_OS_ERROR(errno, "fcntl"); @@ -63,7 +66,7 @@ grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking) { return GRPC_ERROR_NONE; } -grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) { +grpc_error* grpc_set_socket_no_sigpipe_if_possible(int fd) { #ifdef GRPC_HAVE_SO_NOSIGPIPE int val = 1; int newval; @@ -81,7 +84,7 @@ grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd) { return GRPC_ERROR_NONE; } -grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) { +grpc_error* grpc_set_socket_ip_pktinfo_if_possible(int fd) { #ifdef GRPC_HAVE_IP_PKTINFO int get_local_ip = 1; if (0 != setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &get_local_ip, @@ -92,7 +95,7 @@ grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd) { return GRPC_ERROR_NONE; } -grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) { +grpc_error* grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) { #ifdef GRPC_HAVE_IPV6_RECVPKTINFO int get_local_ip = 1; if (0 != setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &get_local_ip, @@ -103,14 +106,14 @@ grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd) { return GRPC_ERROR_NONE; } -grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) { +grpc_error* grpc_set_socket_sndbuf(int fd, int buffer_size_bytes) { return 0 == setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffer_size_bytes, sizeof(buffer_size_bytes)) ? GRPC_ERROR_NONE : GRPC_OS_ERROR(errno, "setsockopt(SO_SNDBUF)"); } -grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) { +grpc_error* grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) { return 0 == setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &buffer_size_bytes, sizeof(buffer_size_bytes)) ? GRPC_ERROR_NONE @@ -118,7 +121,7 @@ grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes) { } /* set a socket to close on exec */ -grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) { +grpc_error* grpc_set_socket_cloexec(int fd, int close_on_exec) { int oldflags = fcntl(fd, F_GETFD, 0); if (oldflags < 0) { return GRPC_OS_ERROR(errno, "fcntl"); @@ -138,7 +141,7 @@ grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec) { } /* set a socket to reuse old addresses */ -grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) { +grpc_error* grpc_set_socket_reuse_addr(int fd, int reuse) { int val = (reuse != 0); int newval; socklen_t intlen = sizeof(newval); @@ -156,7 +159,7 @@ grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse) { } /* set a socket to reuse old addresses */ -grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) { +grpc_error* grpc_set_socket_reuse_port(int fd, int reuse) { #ifndef SO_REUSEPORT return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "SO_REUSEPORT unavailable on compiling system"); @@ -178,8 +181,32 @@ grpc_error *grpc_set_socket_reuse_port(int fd, int reuse) { #endif } +static gpr_once g_probe_so_reuesport_once = GPR_ONCE_INIT; +static int g_support_so_reuseport = false; + +void probe_so_reuseport_once(void) { +#ifndef GPR_MANYLINUX1 + int s = socket(AF_INET, SOCK_STREAM, 0); + if (s < 0) { + /* This might be an ipv6-only environment in which case 'socket(AF_INET,..)' + call would fail. Try creating IPv6 socket in that case */ + s = socket(AF_INET6, SOCK_STREAM, 0); + } + if (s >= 0) { + g_support_so_reuseport = GRPC_LOG_IF_ERROR( + "check for SO_REUSEPORT", grpc_set_socket_reuse_port(s, 1)); + close(s); + } +#endif +} + +bool grpc_is_socket_reuse_port_supported() { + gpr_once_init(&g_probe_so_reuesport_once, probe_so_reuseport_once); + return g_support_so_reuseport; +} + /* disable nagle */ -grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) { +grpc_error* grpc_set_socket_low_latency(int fd, int low_latency) { int val = (low_latency != 0); int newval; socklen_t intlen = sizeof(newval); @@ -196,7 +223,7 @@ grpc_error *grpc_set_socket_low_latency(int fd, int low_latency) { } /* set a socket using a grpc_socket_mutator */ -grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator) { +grpc_error* grpc_set_socket_with_mutator(int fd, grpc_socket_mutator* mutator) { GPR_ASSERT(mutator); if (!grpc_socket_mutator_mutate_fd(mutator, fd)) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_socket_mutator failed."); @@ -213,11 +240,11 @@ static void probe_ipv6_once(void) { if (fd < 0) { gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed."); } else { - struct sockaddr_in6 addr; + grpc_sockaddr_in6 addr; memset(&addr, 0, sizeof(addr)); addr.sin6_family = AF_INET6; addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */ - if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) == 0) { + if (bind(fd, reinterpret_cast(&addr), sizeof(addr)) == 0) { g_ipv6_loopback_available = 1; } else { gpr_log(GPR_INFO, @@ -249,35 +276,36 @@ static int set_socket_dualstack(int fd) { } } -static grpc_error *error_for_fd(int fd, const grpc_resolved_address *addr) { +static grpc_error* error_for_fd(int fd, const grpc_resolved_address* addr) { if (fd >= 0) return GRPC_ERROR_NONE; - char *addr_str; + char* addr_str; grpc_sockaddr_to_string(&addr_str, addr, 0); - grpc_error *err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"), + grpc_error* err = grpc_error_set_str(GRPC_OS_ERROR(errno, "socket"), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(addr_str)); gpr_free(addr_str); return err; } -grpc_error *grpc_create_dualstack_socket( - const grpc_resolved_address *resolved_addr, int type, int protocol, - grpc_dualstack_mode *dsmode, int *newfd) { - return grpc_create_dualstack_socket_using_factory(NULL, resolved_addr, type, - protocol, dsmode, newfd); +grpc_error* grpc_create_dualstack_socket( + const grpc_resolved_address* resolved_addr, int type, int protocol, + grpc_dualstack_mode* dsmode, int* newfd) { + return grpc_create_dualstack_socket_using_factory( + nullptr, resolved_addr, type, protocol, dsmode, newfd); } -static int create_socket(grpc_socket_factory *factory, int domain, int type, +static int create_socket(grpc_socket_factory* factory, int domain, int type, int protocol) { - return (factory != NULL) + return (factory != nullptr) ? grpc_socket_factory_socket(factory, domain, type, protocol) : socket(domain, type, protocol); } -grpc_error *grpc_create_dualstack_socket_using_factory( - grpc_socket_factory *factory, const grpc_resolved_address *resolved_addr, - int type, int protocol, grpc_dualstack_mode *dsmode, int *newfd) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; +grpc_error* grpc_create_dualstack_socket_using_factory( + grpc_socket_factory* factory, const grpc_resolved_address* resolved_addr, + int type, int protocol, grpc_dualstack_mode* dsmode, int* newfd) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); int family = addr->sa_family; if (family == AF_INET6) { if (grpc_ipv6_loopback_available()) { @@ -292,7 +320,7 @@ grpc_error *grpc_create_dualstack_socket_using_factory( return GRPC_ERROR_NONE; } /* If this isn't an IPv4 address, then return whatever we've got. */ - if (!grpc_sockaddr_is_v4mapped(resolved_addr, NULL)) { + if (!grpc_sockaddr_is_v4mapped(resolved_addr, nullptr)) { *dsmode = GRPC_DSMODE_IPV6; return error_for_fd(*newfd, resolved_addr); } @@ -307,9 +335,17 @@ grpc_error *grpc_create_dualstack_socket_using_factory( return error_for_fd(*newfd, resolved_addr); } -const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { +uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); } + +uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); } + +int grpc_inet_pton(int af, const char* src, void* dst) { + return inet_pton(af, src, dst); +} + +const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) { GPR_ASSERT(size <= (socklen_t)-1); - return inet_ntop(af, src, dst, (socklen_t)size); + return inet_ntop(af, src, dst, static_cast(size)); } #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.c b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.cc index e7b094d21..34f93cc4b 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_linux.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_LINUX_SOCKETUTILS @@ -28,15 +30,13 @@ #include #include -int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock, +int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock, int cloexec) { int flags = 0; - GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t)); - GPR_ASSERT(resolved_addr->len <= (socklen_t)-1); flags |= nonblock ? SOCK_NONBLOCK : 0; flags |= cloexec ? SOCK_CLOEXEC : 0; - return accept4(sockfd, (struct sockaddr *)resolved_addr->addr, - (socklen_t *)&resolved_addr->len, flags); + return accept4(sockfd, reinterpret_cast(resolved_addr->addr), + &resolved_addr->len, flags); } #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.c b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.cc index dfd1ffd1e..c48da52ff 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKETUTILS @@ -29,13 +31,11 @@ #include #include "src/core/lib/iomgr/sockaddr.h" -int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock, +int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock, int cloexec) { int fd, flags; - GPR_ASSERT(sizeof(socklen_t) <= sizeof(size_t)); - GPR_ASSERT(resolved_addr->len <= (socklen_t)-1); - fd = accept(sockfd, (struct sockaddr *)resolved_addr->addr, - (socklen_t *)&resolved_addr->len); + fd = accept(sockfd, reinterpret_cast(resolved_addr->addr), + &resolved_addr->len); if (fd >= 0) { if (nonblock) { flags = fcntl(fd, F_GETFL, 0); diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.h b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.h index eef80b439..b3fd58a53 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H #define GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H +#include + #include "src/core/lib/iomgr/resolve_address.h" #include @@ -30,23 +32,26 @@ #include "src/core/lib/iomgr/socket_mutator.h" /* a wrapper for accept or accept4 */ -int grpc_accept4(int sockfd, grpc_resolved_address *resolved_addr, int nonblock, +int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock, int cloexec); /* set a socket to non blocking mode */ -grpc_error *grpc_set_socket_nonblocking(int fd, int non_blocking); +grpc_error* grpc_set_socket_nonblocking(int fd, int non_blocking); /* set a socket to close on exec */ -grpc_error *grpc_set_socket_cloexec(int fd, int close_on_exec); +grpc_error* grpc_set_socket_cloexec(int fd, int close_on_exec); /* set a socket to reuse old addresses */ -grpc_error *grpc_set_socket_reuse_addr(int fd, int reuse); +grpc_error* grpc_set_socket_reuse_addr(int fd, int reuse); + +/* return true if SO_REUSEPORT is supported */ +bool grpc_is_socket_reuse_port_supported(); /* disable nagle */ -grpc_error *grpc_set_socket_low_latency(int fd, int low_latency); +grpc_error* grpc_set_socket_low_latency(int fd, int low_latency); /* set SO_REUSEPORT */ -grpc_error *grpc_set_socket_reuse_port(int fd, int reuse); +grpc_error* grpc_set_socket_reuse_port(int fd, int reuse); /* Returns true if this system can create AF_INET6 sockets bound to ::1. The value is probed once, and cached for the life of the process. @@ -60,24 +65,24 @@ int grpc_ipv6_loopback_available(void); /* Tries to set SO_NOSIGPIPE if available on this platform. If SO_NO_SIGPIPE is not available, returns 1. */ -grpc_error *grpc_set_socket_no_sigpipe_if_possible(int fd); +grpc_error* grpc_set_socket_no_sigpipe_if_possible(int fd); /* Tries to set IP_PKTINFO if available on this platform. If IP_PKTINFO is not available, returns 1. */ -grpc_error *grpc_set_socket_ip_pktinfo_if_possible(int fd); +grpc_error* grpc_set_socket_ip_pktinfo_if_possible(int fd); /* Tries to set IPV6_RECVPKTINFO if available on this platform. If IPV6_RECVPKTINFO is not available, returns 1. */ -grpc_error *grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd); +grpc_error* grpc_set_socket_ipv6_recvpktinfo_if_possible(int fd); /* Tries to set the socket's send buffer to given size. */ -grpc_error *grpc_set_socket_sndbuf(int fd, int buffer_size_bytes); +grpc_error* grpc_set_socket_sndbuf(int fd, int buffer_size_bytes); /* Tries to set the socket's receive buffer to given size. */ -grpc_error *grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes); +grpc_error* grpc_set_socket_rcvbuf(int fd, int buffer_size_bytes); /* Tries to set the socket using a grpc_socket_mutator */ -grpc_error *grpc_set_socket_with_mutator(int fd, grpc_socket_mutator *mutator); +grpc_error* grpc_set_socket_with_mutator(int fd, grpc_socket_mutator* mutator); /* An enum to keep track of IPv4/IPv6 socket modes. @@ -118,15 +123,15 @@ extern int grpc_forbid_dualstack_sockets_for_testing; IPv4, so that bind() or connect() see the correct family. Also, it's important to distinguish between DUALSTACK and IPV6 when listening on the [::] wildcard address. */ -grpc_error *grpc_create_dualstack_socket(const grpc_resolved_address *addr, +grpc_error* grpc_create_dualstack_socket(const grpc_resolved_address* addr, int type, int protocol, - grpc_dualstack_mode *dsmode, - int *newfd); + grpc_dualstack_mode* dsmode, + int* newfd); /* Same as grpc_create_dualstack_socket(), but use the given socket factory (if non-null) to create the socket, rather than calling socket() directly. */ -grpc_error *grpc_create_dualstack_socket_using_factory( - grpc_socket_factory *factory, const grpc_resolved_address *addr, int type, - int protocol, grpc_dualstack_mode *dsmode, int *newfd); +grpc_error* grpc_create_dualstack_socket_using_factory( + grpc_socket_factory* factory, const grpc_resolved_address* addr, int type, + int protocol, grpc_dualstack_mode* dsmode, int* newfd); #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.c b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.cc index 0f7de4dfa..7eba40c46 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_uv.cc @@ -16,17 +16,28 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_UV -#include - +#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/socket_utils.h" #include -const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { +#include + +uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); } + +uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); } + +int grpc_inet_pton(int af, const char* src, void* dst) { + return inet_pton(af, src, dst); +} + +const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) { uv_inet_ntop(af, src, dst, size); return dst; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.c b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.cc similarity index 70% rename from Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.cc index 6e85e4b61..3e7b5b812 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_utils_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINDOWS_SOCKETUTILS @@ -25,9 +27,17 @@ #include -const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { +uint16_t grpc_htons(uint16_t hostshort) { return htons(hostshort); } + +uint16_t grpc_ntohs(uint16_t netshort) { return ntohs(netshort); } + +int grpc_inet_pton(int af, const char* src, void* dst) { + return inet_pton(af, src, dst); +} + +const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size) { /* Windows InetNtopA wants a mutable ip pointer */ - return InetNtopA(af, (void *)src, dst, size); + return InetNtopA(af, (void*)src, dst, size); } #endif /* GRPC_WINDOWS_SOCKETUTILS */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_windows.c b/Sources/CgRPC/src/core/lib/iomgr/socket_windows.cc similarity index 73% rename from Sources/CgRPC/src/core/lib/iomgr/socket_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/socket_windows.cc index a0d731b94..2e2340958 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET @@ -36,9 +38,9 @@ #include "src/core/lib/iomgr/pollset_windows.h" #include "src/core/lib/iomgr/socket_windows.h" -grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) { - char *final_name; - grpc_winsocket *r = gpr_malloc(sizeof(grpc_winsocket)); +grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) { + char* final_name; + grpc_winsocket* r = (grpc_winsocket*)gpr_malloc(sizeof(grpc_winsocket)); memset(r, 0, sizeof(grpc_winsocket)); r->socket = socket; gpr_mu_init(&r->state_mu); @@ -53,7 +55,7 @@ grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name) { operations to abort them. We need to do that this way because of the various callsites of that function, which happens to be in various mutex hold states, and that'd be unsafe to call them directly. */ -void grpc_winsocket_shutdown(grpc_winsocket *winsocket) { +void grpc_winsocket_shutdown(grpc_winsocket* winsocket) { /* Grab the function pointer for DisconnectEx for that specific socket. It may change depending on the interface. */ int status; @@ -76,7 +78,7 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) { if (status == 0) { DisconnectEx(winsocket->socket, NULL, 0, 0); } else { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_INFO, "Unable to retrieve DisconnectEx pointer : %s", utf8_message); gpr_free(utf8_message); @@ -84,19 +86,19 @@ void grpc_winsocket_shutdown(grpc_winsocket *winsocket) { closesocket(winsocket->socket); } -static void destroy(grpc_winsocket *winsocket) { +static void destroy(grpc_winsocket* winsocket) { grpc_iomgr_unregister_object(&winsocket->iomgr_object); gpr_mu_destroy(&winsocket->state_mu); gpr_free(winsocket); } -static bool check_destroyable(grpc_winsocket *winsocket) { +static bool check_destroyable(grpc_winsocket* winsocket) { return winsocket->destroy_called == true && winsocket->write_info.closure == NULL && winsocket->read_info.closure == NULL; } -void grpc_winsocket_destroy(grpc_winsocket *winsocket) { +void grpc_winsocket_destroy(grpc_winsocket* winsocket) { gpr_mu_lock(&winsocket->state_mu); GPR_ASSERT(!winsocket->destroy_called); winsocket->destroy_called = true; @@ -109,37 +111,34 @@ void grpc_winsocket_destroy(grpc_winsocket *winsocket) { -) The IOCP already completed in the background, and we need to call the callback now. -) The IOCP hasn't completed yet, and we're queuing it for later. */ -static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx, - grpc_winsocket *socket, grpc_closure *closure, - grpc_winsocket_callback_info *info) { +static void socket_notify_on_iocp(grpc_winsocket* socket, grpc_closure* closure, + grpc_winsocket_callback_info* info) { GPR_ASSERT(info->closure == NULL); gpr_mu_lock(&socket->state_mu); if (info->has_pending_iocp) { info->has_pending_iocp = 0; - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); } else { info->closure = closure; } gpr_mu_unlock(&socket->state_mu); } -void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx, - grpc_winsocket *socket, - grpc_closure *closure) { - socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info); +void grpc_socket_notify_on_write(grpc_winsocket* socket, + grpc_closure* closure) { + socket_notify_on_iocp(socket, closure, &socket->write_info); } -void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, - grpc_closure *closure) { - socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info); +void grpc_socket_notify_on_read(grpc_winsocket* socket, grpc_closure* closure) { + socket_notify_on_iocp(socket, closure, &socket->read_info); } -void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, - grpc_winsocket_callback_info *info) { +void grpc_socket_become_ready(grpc_winsocket* socket, + grpc_winsocket_callback_info* info) { GPR_ASSERT(!info->has_pending_iocp); gpr_mu_lock(&socket->state_mu); if (info->closure) { - GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(info->closure, GRPC_ERROR_NONE); info->closure = NULL; } else { info->has_pending_iocp = 1; diff --git a/Sources/CgRPC/src/core/lib/iomgr/socket_windows.h b/Sources/CgRPC/src/core/lib/iomgr/socket_windows.h index 67dc4ca53..7bd01eded 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/socket_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/socket_windows.h @@ -20,12 +20,16 @@ #define GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H #include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET #include #include #include -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/iomgr_internal.h" /* This holds the data for an outstanding read or write on a socket. @@ -40,7 +44,7 @@ typedef struct grpc_winsocket_callback_info { OVERLAPPED overlapped; /* The callback information for the pending operation. May be empty if the caller hasn't registered a callback yet. */ - grpc_closure *closure; + grpc_closure* closure; /* A boolean to describe if the IO Completion Port got a notification for that operation. This will happen if the operation completed before the called had time to register a callback. We could avoid that behavior @@ -86,25 +90,24 @@ typedef struct grpc_winsocket { /* Create a wrapped windows handle. This takes ownership of it, meaning that it will be responsible for closing it. */ -grpc_winsocket *grpc_winsocket_create(SOCKET socket, const char *name); +grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name); /* Initiate an asynchronous shutdown of the socket. Will call off any pending operation to cancel them. */ -void grpc_winsocket_shutdown(grpc_winsocket *socket); +void grpc_winsocket_shutdown(grpc_winsocket* socket); /* Destroy a socket. Should only be called if there's no pending operation. */ -void grpc_winsocket_destroy(grpc_winsocket *socket); +void grpc_winsocket_destroy(grpc_winsocket* socket); + +void grpc_socket_notify_on_write(grpc_winsocket* winsocket, + grpc_closure* closure); -void grpc_socket_notify_on_write(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, - grpc_closure *closure); +void grpc_socket_notify_on_read(grpc_winsocket* winsocket, + grpc_closure* closure); -void grpc_socket_notify_on_read(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, - grpc_closure *closure); +void grpc_socket_become_ready(grpc_winsocket* winsocket, + grpc_winsocket_callback_info* ci); -void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, - grpc_winsocket *winsocket, - grpc_winsocket_callback_info *ci); +#endif #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/sys_epoll_wrapper.h b/Sources/CgRPC/src/core/lib/iomgr/sys_epoll_wrapper.h index 3fa535715..d21d85366 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/sys_epoll_wrapper.h +++ b/Sources/CgRPC/src/core/lib/iomgr/sys_epoll_wrapper.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_SYS_EPOLL_WRAPPER_H #define GRPC_CORE_LIB_IOMGR_SYS_EPOLL_WRAPPER_H +#include + #include #ifndef EPOLLEXCLUSIVE diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_client.cc new file mode 100644 index 000000000..6c0ba4078 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client.cc @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/tcp_client.h" + +grpc_tcp_client_vtable* grpc_tcp_client_impl; + +void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_millis deadline) { + grpc_tcp_client_impl->connect(closure, ep, interested_parties, channel_args, + addr, deadline); +} + +void grpc_set_tcp_client_impl(grpc_tcp_client_vtable* impl) { + grpc_tcp_client_impl = impl; +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_client.h index 6c9e51ae8..d209eeb8c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_client.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client.h @@ -19,22 +19,34 @@ #ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H #define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H +#include + #include #include #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/pollset_set.h" #include "src/core/lib/iomgr/resolve_address.h" +typedef struct grpc_tcp_client_vtable { + void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, grpc_millis deadline); +} grpc_tcp_client_vtable; + /* Asynchronously connect to an address (specified as (addr, len)), and call cb with arg and the completed connection when done (or call cb with arg and NULL on failure). interested_parties points to a set of pollsets that would be interested in this connection being established (in order to continue their work) */ -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *on_connect, - grpc_endpoint **endpoint, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline); +void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_millis deadline); + +void grpc_tcp_client_global_init(); + +void grpc_set_tcp_client_impl(grpc_tcp_client_vtable* impl); #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_custom.cc new file mode 100644 index 000000000..932c79ea0 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_custom.cc @@ -0,0 +1,151 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include + +#include +#include + +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/iomgr/timer.h" + +extern grpc_core::TraceFlag grpc_tcp_trace; +extern grpc_socket_vtable* grpc_custom_socket_vtable; + +struct grpc_custom_tcp_connect { + grpc_custom_socket* socket; + grpc_timer alarm; + grpc_closure on_alarm; + grpc_closure* closure; + grpc_endpoint** endpoint; + int refs; + char* addr_name; + grpc_resource_quota* resource_quota; +}; + +static void custom_tcp_connect_cleanup(grpc_custom_tcp_connect* connect) { + grpc_custom_socket* socket = connect->socket; + grpc_resource_quota_unref_internal(connect->resource_quota); + gpr_free(connect->addr_name); + gpr_free(connect); + socket->refs--; + if (socket->refs == 0) { + grpc_custom_socket_vtable->destroy(socket); + gpr_free(socket); + } +} + +static void custom_close_callback(grpc_custom_socket* socket) {} + +static void on_alarm(void* acp, grpc_error* error) { + int done; + grpc_custom_socket* socket = (grpc_custom_socket*)acp; + grpc_custom_tcp_connect* connect = socket->connector; + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s", + connect->addr_name, str); + } + if (error == GRPC_ERROR_NONE) { + /* error == NONE implies that the timer ran out, and wasn't cancelled. If + it was cancelled, then the handler that cancelled it also should close + the handle, if applicable */ + grpc_custom_socket_vtable->close(socket, custom_close_callback); + } + done = (--connect->refs == 0); + if (done) { + custom_tcp_connect_cleanup(connect); + } +} + +static void custom_connect_callback(grpc_custom_socket* socket, + grpc_error* error) { + grpc_core::ExecCtx exec_ctx; + grpc_custom_tcp_connect* connect = socket->connector; + int done; + grpc_closure* closure = connect->closure; + grpc_timer_cancel(&connect->alarm); + if (error == GRPC_ERROR_NONE) { + *connect->endpoint = custom_tcp_endpoint_create( + socket, connect->resource_quota, connect->addr_name); + } + done = (--connect->refs == 0); + if (done) { + grpc_core::ExecCtx::Get()->Flush(); + custom_tcp_connect_cleanup(connect); + } + GRPC_CLOSURE_SCHED(closure, error); +} + +static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* resolved_addr, + grpc_millis deadline) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + (void)channel_args; + (void)interested_parties; + grpc_custom_tcp_connect* connect; + grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr); + if (channel_args != nullptr) { + for (size_t i = 0; i < channel_args->num_args; i++) { + if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { + grpc_resource_quota_unref_internal(resource_quota); + resource_quota = grpc_resource_quota_ref_internal( + (grpc_resource_quota*)channel_args->args[i].value.pointer.p); + } + } + } + grpc_custom_socket* socket = + (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket)); + socket->refs = 2; + grpc_custom_socket_vtable->init(socket, GRPC_AF_UNSPEC); + connect = + (grpc_custom_tcp_connect*)gpr_malloc(sizeof(grpc_custom_tcp_connect)); + connect->closure = closure; + connect->endpoint = ep; + connect->addr_name = grpc_sockaddr_to_uri(resolved_addr); + connect->resource_quota = resource_quota; + connect->socket = socket; + socket->connector = connect; + socket->endpoint = nullptr; + socket->listener = nullptr; + connect->refs = 2; + + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "CLIENT_CONNECT: %p %s: asynchronously connecting", + socket, connect->addr_name); + } + + grpc_custom_socket_vtable->connect( + socket, (const grpc_sockaddr*)resolved_addr->addr, resolved_addr->len, + custom_connect_callback); + GRPC_CLOSURE_INIT(&connect->on_alarm, on_alarm, socket, + grpc_schedule_on_exec_ctx); + grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm); +} + +grpc_tcp_client_vtable custom_tcp_client_vtable = {tcp_connect}; diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.cc similarity index 51% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.cc index 39dbb506e..6144d389f 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -33,36 +35,36 @@ #include #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/iomgr_posix.h" +#include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/socket_mutator.h" #include "src/core/lib/iomgr/socket_utils_posix.h" #include "src/core/lib/iomgr/tcp_posix.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/string.h" -extern grpc_tracer_flag grpc_tcp_trace; +extern grpc_core::TraceFlag grpc_tcp_trace; typedef struct { gpr_mu mu; - grpc_fd *fd; - gpr_timespec deadline; + grpc_fd* fd; grpc_timer alarm; grpc_closure on_alarm; int refs; grpc_closure write_closure; - grpc_pollset_set *interested_parties; - char *addr_str; - grpc_endpoint **ep; - grpc_closure *closure; - grpc_channel_args *channel_args; + grpc_pollset_set* interested_parties; + char* addr_str; + grpc_endpoint** ep; + grpc_closure* closure; + grpc_channel_args* channel_args; } async_connect; -static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd, - const grpc_channel_args *channel_args) { - grpc_error *err = GRPC_ERROR_NONE; +static grpc_error* prepare_socket(const grpc_resolved_address* addr, int fd, + const grpc_channel_args* channel_args) { + grpc_error* err = GRPC_ERROR_NONE; GPR_ASSERT(fd >= 0); @@ -80,8 +82,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd, for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) { GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER); - grpc_socket_mutator *mutator = - (grpc_socket_mutator *)channel_args->args[i].value.pointer.p; + grpc_socket_mutator* mutator = static_cast( + channel_args->args[i].value.pointer.p); err = grpc_set_socket_with_mutator(fd, mutator); if (err != GRPC_ERROR_NONE) goto error; } @@ -97,60 +99,59 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd, return err; } -static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { +static void tc_on_alarm(void* acp, grpc_error* error) { int done; - async_connect *ac = (async_connect *)acp; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str, + async_connect* ac = static_cast(acp); + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str, str); } gpr_mu_lock(&ac->mu); - if (ac->fd != NULL) { - grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "connect() timed out")); + if (ac->fd != nullptr) { + grpc_fd_shutdown( + ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out")); } done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); if (done) { gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_str); - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_free(ac); } } -grpc_endpoint *grpc_tcp_client_create_from_fd( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args, - const char *addr_str) { - return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str); +grpc_endpoint* grpc_tcp_client_create_from_fd( + grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str) { + return grpc_tcp_create(fd, channel_args, addr_str); } -static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { - async_connect *ac = (async_connect *)acp; +static void on_writable(void* acp, grpc_error* error) { + async_connect* ac = static_cast(acp); int so_error = 0; socklen_t so_error_size; int err; int done; - grpc_endpoint **ep = ac->ep; - grpc_closure *closure = ac->closure; - grpc_fd *fd; + grpc_endpoint** ep = ac->ep; + grpc_closure* closure = ac->closure; + grpc_fd* fd; GRPC_ERROR_REF(error); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s", - ac->addr_str, str); + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s", ac->addr_str, + str); } gpr_mu_lock(&ac->mu); GPR_ASSERT(ac->fd); fd = ac->fd; - ac->fd = NULL; + ac->fd = nullptr; gpr_mu_unlock(&ac->mu); - grpc_timer_cancel(exec_ctx, &ac->alarm); + grpc_timer_cancel(&ac->alarm); gpr_mu_lock(&ac->mu); if (error != GRPC_ERROR_NONE) { @@ -172,10 +173,9 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { switch (so_error) { case 0: - grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); - *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args, - ac->addr_str); - fd = NULL; + grpc_pollset_set_del_fd(ac->interested_parties, fd); + *ep = grpc_tcp_client_create_from_fd(fd, ac->channel_args, ac->addr_str); + fd = nullptr; break; case ENOBUFS: /* We will get one of these errors if we have run out of @@ -194,7 +194,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { don't do that! */ gpr_log(GPR_ERROR, "kernel out of buffers"); gpr_mu_unlock(&ac->mu); - grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure); + grpc_fd_notify_on_write(fd, &ac->write_closure); return; case ECONNREFUSED: /* This error shouldn't happen for anything other than connect(). */ @@ -208,149 +208,152 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { } finish: - if (fd != NULL) { - grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd); - grpc_fd_orphan(exec_ctx, fd, NULL, NULL, false /* already_closed */, + if (fd != nullptr) { + grpc_pollset_set_del_fd(ac->interested_parties, fd); + grpc_fd_orphan(fd, nullptr, nullptr, false /* already_closed */, "tcp_client_orphan"); - fd = NULL; + fd = nullptr; } done = (--ac->refs == 0); + // Create a copy of the data from "ac" to be accessed after the unlock, as + // "ac" and its contents may be deallocated by the time they are read. + const grpc_slice addr_str_slice = grpc_slice_from_copied_string(ac->addr_str); gpr_mu_unlock(&ac->mu); if (error != GRPC_ERROR_NONE) { - char *error_descr; + char* error_descr; grpc_slice str; bool ret = grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION, &str); GPR_ASSERT(ret); - char *desc = grpc_slice_to_c_string(str); + char* desc = grpc_slice_to_c_string(str); gpr_asprintf(&error_descr, "Failed to connect to remote host: %s", desc); error = grpc_error_set_str(error, GRPC_ERROR_STR_DESCRIPTION, grpc_slice_from_copied_string(error_descr)); gpr_free(error_descr); gpr_free(desc); error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS, - grpc_slice_from_copied_string(ac->addr_str)); + addr_str_slice /* takes ownership */); + } else { + grpc_slice_unref(addr_str_slice); } if (done) { + // This is safe even outside the lock, because "done", the sentinel, is + // populated *inside* the lock. gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_str); - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_free(ac); } - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); + GRPC_CLOSURE_SCHED(closure, error); } -static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) { - int fd; +grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_resolved_address* mapped_addr, + grpc_fd** fdobj) { grpc_dualstack_mode dsmode; - int err; - async_connect *ac; - grpc_resolved_address addr6_v4mapped; - grpc_resolved_address addr4_copy; - grpc_fd *fdobj; - char *name; - char *addr_str; - grpc_error *error; - - *ep = NULL; - - /* Use dualstack sockets where available. */ - if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { - addr = &addr6_v4mapped; + int fd; + grpc_error* error; + char* name; + char* addr_str; + *fdobj = nullptr; + /* Use dualstack sockets where available. Set mapped to v6 or v4 mapped to + v6. */ + if (!grpc_sockaddr_to_v4mapped(addr, mapped_addr)) { + /* addr is v4 mapped to v6 or v6. */ + memcpy(mapped_addr, addr, sizeof(*mapped_addr)); } - - error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd); + error = + grpc_create_dualstack_socket(mapped_addr, SOCK_STREAM, 0, &dsmode, &fd); if (error != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); - return; + return error; } if (dsmode == GRPC_DSMODE_IPV4) { - /* If we got an AF_INET socket, map the address back to IPv4. */ - GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy)); - addr = &addr4_copy; + /* Original addr is either v4 or v4 mapped to v6. Set mapped_addr to v4. */ + if (!grpc_sockaddr_is_v4mapped(addr, mapped_addr)) { + memcpy(mapped_addr, addr, sizeof(*mapped_addr)); + } } - if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, error); - return; + if ((error = prepare_socket(mapped_addr, fd, channel_args)) != + GRPC_ERROR_NONE) { + return error; } + addr_str = grpc_sockaddr_to_uri(mapped_addr); + gpr_asprintf(&name, "tcp-client:%s", addr_str); + *fdobj = grpc_fd_create(fd, name); + gpr_free(name); + gpr_free(addr_str); + return GRPC_ERROR_NONE; +} +void grpc_tcp_client_create_from_prepared_fd( + grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj, + const grpc_channel_args* channel_args, const grpc_resolved_address* addr, + grpc_millis deadline, grpc_endpoint** ep) { + const int fd = grpc_fd_wrapped_fd(fdobj); + int err; + async_connect* ac; do { - GPR_ASSERT(addr->len < ~(socklen_t)0); - err = - connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len); + err = connect(fd, reinterpret_cast(addr->addr), + addr->len); } while (err < 0 && errno == EINTR); - - addr_str = grpc_sockaddr_to_uri(addr); - gpr_asprintf(&name, "tcp-client:%s", addr_str); - - fdobj = grpc_fd_create(fd, name); - if (err >= 0) { - *ep = - grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str); - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); - goto done; + char* addr_str = grpc_sockaddr_to_uri(addr); + *ep = grpc_tcp_client_create_from_fd(fdobj, channel_args, addr_str); + gpr_free(addr_str); + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); + return; } - if (errno != EWOULDBLOCK && errno != EINPROGRESS) { - grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, false /* already_closed */, + grpc_fd_orphan(fdobj, nullptr, nullptr, false /* already_closed */, "tcp_client_connect_error"); - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect")); - goto done; + GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect")); + return; } - grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj); + grpc_pollset_set_add_fd(interested_parties, fdobj); - ac = (async_connect *)gpr_malloc(sizeof(async_connect)); + ac = static_cast(gpr_malloc(sizeof(async_connect))); ac->closure = closure; ac->ep = ep; ac->fd = fdobj; ac->interested_parties = interested_parties; - ac->addr_str = addr_str; - addr_str = NULL; + ac->addr_str = grpc_sockaddr_to_uri(addr); gpr_mu_init(&ac->mu); ac->refs = 2; GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac, grpc_schedule_on_exec_ctx); ac->channel_args = grpc_channel_args_copy(channel_args); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p", + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: asynchronously connecting fd %p", ac->addr_str, fdobj); } gpr_mu_lock(&ac->mu); GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); - grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure); + grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm); + grpc_fd_notify_on_write(ac->fd, &ac->write_closure); gpr_mu_unlock(&ac->mu); - -done: - gpr_free(name); - gpr_free(addr_str); } -// overridden by api_fuzzer.c -void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) = tcp_client_connect_impl; - -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); +static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_millis deadline) { + grpc_resolved_address mapped_addr; + grpc_fd* fdobj = nullptr; + grpc_error* error; + *ep = nullptr; + if ((error = grpc_tcp_client_prepare_fd(channel_args, addr, &mapped_addr, + &fdobj)) != GRPC_ERROR_NONE) { + GRPC_CLOSURE_SCHED(closure, error); + return; + } + grpc_tcp_client_create_from_prepared_fd(interested_parties, closure, fdobj, + channel_args, &mapped_addr, deadline, + ep); } +grpc_tcp_client_vtable grpc_posix_tcp_client_vtable = {tcp_connect}; #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.h index b5a381479..d0168ef13 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_posix.h @@ -19,12 +19,50 @@ #ifndef GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H #define GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H +#include + #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/tcp_client.h" -grpc_endpoint *grpc_tcp_client_create_from_fd( - grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args, - const char *addr_str); +/* Create an endpoint from a connected grpc_fd. + + fd: a connected FD. Ownership is taken. + channel_args: may contain custom settings for the endpoint + addr_str: destination address in printable format + Returns: a new endpoint +*/ +grpc_endpoint* grpc_tcp_client_create_from_fd( + grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str); + +/* Return a configured, unbound, unconnected TCP client grpc_fd. + + channel_args: may contain custom settings for the fd + addr: the destination address + mapped_addr: out parameter. addr mapped to an address appropriate to the + type of socket FD created. For example, if addr is IPv4 and dual stack + sockets are available, mapped_addr will be an IPv4-mapped IPv6 address + fdobj: out parameter. The new FD + Returns: error, if any. Out parameters are not set on error +*/ +grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_resolved_address* mapped_addr, + grpc_fd** fdobj); + +/* Connect a configured TCP client grpc_fd. + + interested_parties: a set of pollsets that would be interested in this + connection being established (in order to continue their work + closure: called when complete. On success, *ep will be set. + fdobj: an FD returned from grpc_tcp_client_prepare_fd(). Ownership is taken + channel_args: may contain custom settings for the endpoint + deadline: connection deadline + ep: out parameter. Set before closure is called if successful +*/ +void grpc_tcp_client_create_from_prepared_fd( + grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj, + const grpc_channel_args* channel_args, const grpc_resolved_address* addr, + grpc_millis deadline, grpc_endpoint** ep); #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_uv.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_uv.c deleted file mode 100644 index 0d9e7ed5f..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_uv.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include - -#include -#include - -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/tcp_client.h" -#include "src/core/lib/iomgr/tcp_uv.h" -#include "src/core/lib/iomgr/timer.h" - -extern grpc_tracer_flag grpc_tcp_trace; - -typedef struct grpc_uv_tcp_connect { - uv_connect_t connect_req; - grpc_timer alarm; - grpc_closure on_alarm; - uv_tcp_t *tcp_handle; - grpc_closure *closure; - grpc_endpoint **endpoint; - int refs; - char *addr_name; - grpc_resource_quota *resource_quota; -} grpc_uv_tcp_connect; - -static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx, - grpc_uv_tcp_connect *connect) { - grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota); - gpr_free(connect->addr_name); - gpr_free(connect); -} - -static void tcp_close_callback(uv_handle_t *handle) { gpr_free(handle); } - -static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, - grpc_error *error) { - int done; - grpc_uv_tcp_connect *connect = acp; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", - connect->addr_name, str); - } - if (error == GRPC_ERROR_NONE) { - /* error == NONE implies that the timer ran out, and wasn't cancelled. If - it was cancelled, then the handler that cancelled it also should close - the handle, if applicable */ - uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback); - } - done = (--connect->refs == 0); - if (done) { - uv_tcp_connect_cleanup(exec_ctx, connect); - } -} - -static void uv_tc_on_connect(uv_connect_t *req, int status) { - grpc_uv_tcp_connect *connect = req->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_error *error = GRPC_ERROR_NONE; - int done; - grpc_closure *closure = connect->closure; - grpc_timer_cancel(&exec_ctx, &connect->alarm); - if (status == 0) { - *connect->endpoint = grpc_tcp_create( - connect->tcp_handle, connect->resource_quota, connect->addr_name); - } else { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Failed to connect to remote host"); - error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - if (status == UV_ECANCELED) { - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string("Timeout occurred")); - // This should only happen if the handle is already closed - } else { - error = grpc_error_set_str( - error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback); - } - } - done = (--connect->refs == 0); - if (done) { - grpc_exec_ctx_flush(&exec_ctx); - uv_tcp_connect_cleanup(&exec_ctx, connect); - } - GRPC_CLOSURE_SCHED(&exec_ctx, closure, error); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, - grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *resolved_addr, - gpr_timespec deadline) { - grpc_uv_tcp_connect *connect; - grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL); - (void)channel_args; - (void)interested_parties; - - GRPC_UV_ASSERT_SAME_THREAD(); - - if (channel_args != NULL) { - for (size_t i = 0; i < channel_args->num_args; i++) { - if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); - resource_quota = grpc_resource_quota_ref_internal( - channel_args->args[i].value.pointer.p); - } - } - } - - connect = gpr_zalloc(sizeof(grpc_uv_tcp_connect)); - connect->closure = closure; - connect->endpoint = ep; - connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t)); - connect->addr_name = grpc_sockaddr_to_uri(resolved_addr); - connect->resource_quota = resource_quota; - uv_tcp_init(uv_default_loop(), connect->tcp_handle); - connect->connect_req.data = connect; - connect->refs = 2; // One for the connect operation, one for the timer. - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", - connect->addr_name); - } - - // TODO(murgatroid99): figure out what the return value here means - uv_tcp_connect(&connect->connect_req, connect->tcp_handle, - (const struct sockaddr *)resolved_addr->addr, - uv_tc_on_connect); - GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect, - grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &connect->alarm, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &connect->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); -} - -// overridden by api_fuzzer.c -void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) = tcp_client_connect_impl; - -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); -} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.cc similarity index 64% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.cc index fc62105cc..e5b550259 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_client_windows.cc @@ -16,8 +16,12 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" +#include + #ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/iomgr/sockaddr_windows.h" @@ -26,7 +30,6 @@ #include #include #include -#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/iomgr/iocp_windows.h" @@ -38,26 +41,24 @@ #include "src/core/lib/iomgr/timer.h" typedef struct { - grpc_closure *on_done; + grpc_closure* on_done; gpr_mu mu; - grpc_winsocket *socket; - gpr_timespec deadline; + grpc_winsocket* socket; grpc_timer alarm; grpc_closure on_alarm; - char *addr_name; + char* addr_name; int refs; grpc_closure on_connect; - grpc_endpoint **endpoint; - grpc_channel_args *channel_args; + grpc_endpoint** endpoint; + grpc_channel_args* channel_args; } async_connect; -static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx, - async_connect *ac, - grpc_winsocket *socket) { +static void async_connect_unlock_and_cleanup(async_connect* ac, + grpc_winsocket* socket) { int done = (--ac->refs == 0); gpr_mu_unlock(&ac->mu); if (done) { - grpc_channel_args_destroy(exec_ctx, ac->channel_args); + grpc_channel_args_destroy(ac->channel_args); gpr_mu_destroy(&ac->mu); gpr_free(ac->addr_name); gpr_free(ac); @@ -65,31 +66,31 @@ static void async_connect_unlock_and_cleanup(grpc_exec_ctx *exec_ctx, if (socket != NULL) grpc_winsocket_destroy(socket); } -static void on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { - async_connect *ac = acp; +static void on_alarm(void* acp, grpc_error* error) { + async_connect* ac = (async_connect*)acp; gpr_mu_lock(&ac->mu); - grpc_winsocket *socket = ac->socket; + grpc_winsocket* socket = ac->socket; ac->socket = NULL; if (socket != NULL) { grpc_winsocket_shutdown(socket); } - async_connect_unlock_and_cleanup(exec_ctx, ac, socket); + async_connect_unlock_and_cleanup(ac, socket); } -static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { - async_connect *ac = acp; - grpc_endpoint **ep = ac->endpoint; +static void on_connect(void* acp, grpc_error* error) { + async_connect* ac = (async_connect*)acp; + grpc_endpoint** ep = ac->endpoint; GPR_ASSERT(*ep == NULL); - grpc_closure *on_done = ac->on_done; + grpc_closure* on_done = ac->on_done; GRPC_ERROR_REF(error); gpr_mu_lock(&ac->mu); - grpc_winsocket *socket = ac->socket; + grpc_winsocket* socket = ac->socket; ac->socket = NULL; gpr_mu_unlock(&ac->mu); - grpc_timer_cancel(exec_ctx, &ac->alarm); + grpc_timer_cancel(&ac->alarm); gpr_mu_lock(&ac->mu); @@ -103,9 +104,9 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { GPR_ASSERT(transfered_bytes == 0); if (!wsa_success) { error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx"); + closesocket(socket->socket); } else { - *ep = - grpc_tcp_create(exec_ctx, socket, ac->channel_args, ac->addr_name); + *ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name); socket = NULL; } } else { @@ -113,30 +114,31 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) { } } - async_connect_unlock_and_cleanup(exec_ctx, ac, socket); + async_connect_unlock_and_cleanup(ac, socket); /* If the connection was aborted, the callback was already called when the deadline was met. */ - GRPC_CLOSURE_SCHED(exec_ctx, on_done, error); + GRPC_CLOSURE_SCHED(on_done, error); } /* Tries to issue one async connection, then schedules both an IOCP notification request for the connection, and one timeout alert. */ -static void tcp_client_connect_impl( - grpc_exec_ctx *exec_ctx, grpc_closure *on_done, grpc_endpoint **endpoint, - grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, gpr_timespec deadline) { +static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint, + grpc_pollset_set* interested_parties, + const grpc_channel_args* channel_args, + const grpc_resolved_address* addr, + grpc_millis deadline) { SOCKET sock = INVALID_SOCKET; BOOL success; int status; grpc_resolved_address addr6_v4mapped; grpc_resolved_address local_address; - async_connect *ac; - grpc_winsocket *socket = NULL; + async_connect* ac; + grpc_winsocket* socket = NULL; LPFN_CONNECTEX ConnectEx; GUID guid = WSAID_CONNECTEX; DWORD ioctl_num_bytes; - grpc_winsocket_callback_info *info; - grpc_error *error = GRPC_ERROR_NONE; + grpc_winsocket_callback_info* info; + grpc_error* error = GRPC_ERROR_NONE; *endpoint = NULL; @@ -171,8 +173,8 @@ static void tcp_client_connect_impl( grpc_sockaddr_make_wildcard6(0, &local_address); - status = bind(sock, (struct sockaddr *)&local_address.addr, - (int)local_address.len); + status = + bind(sock, (grpc_sockaddr*)&local_address.addr, (int)local_address.len); if (status != 0) { error = GRPC_WSA_ERROR(WSAGetLastError(), "bind"); goto failure; @@ -180,8 +182,8 @@ static void tcp_client_connect_impl( socket = grpc_winsocket_create(sock, "client"); info = &socket->write_info; - success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len, - NULL, 0, NULL, &info->overlapped); + success = ConnectEx(sock, (grpc_sockaddr*)&addr->addr, (int)addr->len, NULL, + 0, NULL, &info->overlapped); /* It wouldn't be unusual to get a success immediately. But we'll still get an IOCP notification, so let's ignore it. */ @@ -193,7 +195,7 @@ static void tcp_client_connect_impl( } } - ac = gpr_malloc(sizeof(async_connect)); + ac = (async_connect*)gpr_malloc(sizeof(async_connect)); ac->on_done = on_done; ac->socket = socket; gpr_mu_init(&ac->mu); @@ -204,15 +206,14 @@ static void tcp_client_connect_impl( GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx); - grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm, - gpr_now(GPR_CLOCK_MONOTONIC)); - grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect); + grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm); + grpc_socket_notify_on_write(socket, &ac->on_connect); return; failure: GPR_ASSERT(error != GRPC_ERROR_NONE); - char *target_uri = grpc_sockaddr_to_uri(addr); - grpc_error *final_error = grpc_error_set_str( + char* target_uri = grpc_sockaddr_to_uri(addr); + grpc_error* final_error = grpc_error_set_str( GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Failed to connect", &error, 1), GRPC_ERROR_STR_TARGET_ADDRESS, grpc_slice_from_copied_string(target_uri)); @@ -222,24 +223,9 @@ static void tcp_client_connect_impl( } else if (sock != INVALID_SOCKET) { closesocket(sock); } - GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error); + GRPC_CLOSURE_SCHED(on_done, final_error); } -// overridden by api_fuzzer.c -void (*grpc_tcp_client_connect_impl)( - grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep, - grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) = tcp_client_connect_impl; - -void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure, - grpc_endpoint **ep, - grpc_pollset_set *interested_parties, - const grpc_channel_args *channel_args, - const grpc_resolved_address *addr, - gpr_timespec deadline) { - grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, - channel_args, addr, deadline); -} +grpc_tcp_client_vtable grpc_windows_tcp_client_vtable = {tcp_connect}; #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.cc new file mode 100644 index 000000000..b3b293401 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.cc @@ -0,0 +1,365 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include +#include + +#include + +#include +#include +#include + +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/network_status_tracker.h" +#include "src/core/lib/iomgr/resource_quota.h" +#include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/iomgr/tcp_server.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192 + +extern grpc_core::TraceFlag grpc_tcp_trace; + +grpc_socket_vtable* grpc_custom_socket_vtable = nullptr; +extern grpc_tcp_server_vtable custom_tcp_server_vtable; +extern grpc_tcp_client_vtable custom_tcp_client_vtable; + +void grpc_custom_endpoint_init(grpc_socket_vtable* impl) { + grpc_custom_socket_vtable = impl; + grpc_set_tcp_client_impl(&custom_tcp_client_vtable); + grpc_set_tcp_server_impl(&custom_tcp_server_vtable); +} + +typedef struct { + grpc_endpoint base; + gpr_refcount refcount; + grpc_custom_socket* socket; + + grpc_closure* read_cb; + grpc_closure* write_cb; + + grpc_slice_buffer* read_slices; + grpc_slice_buffer* write_slices; + + grpc_resource_user* resource_user; + grpc_resource_user_slice_allocator slice_allocator; + + bool shutting_down; + + char* peer_string; +} custom_tcp_endpoint; + +static void tcp_free(grpc_custom_socket* s) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)s->endpoint; + grpc_resource_user_unref(tcp->resource_user); + gpr_free(tcp->peer_string); + gpr_free(tcp); + s->refs--; + if (s->refs == 0) { + grpc_custom_socket_vtable->destroy(s); + gpr_free(s); + } +} + +#ifndef NDEBUG +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) +static void tcp_unref(custom_tcp_endpoint* tcp, const char* reason, + const char* file, int line) { + if (grpc_tcp_trace.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, + "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason, + val, val - 1); + } + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp->socket); + } +} + +static void tcp_ref(custom_tcp_endpoint* tcp, const char* reason, + const char* file, int line) { + if (grpc_tcp_trace.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, + "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason, + val, val + 1); + } + gpr_ref(&tcp->refcount); +} +#else +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_REF(tcp, reason) tcp_ref((tcp)) +static void tcp_unref(custom_tcp_endpoint* tcp) { + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp->socket); + } +} + +static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); } +#endif + +static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) { + grpc_closure* cb = tcp->read_cb; + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb, + cb->cb_arg); + size_t i; + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "read: error=%s", str); + + for (i = 0; i < tcp->read_slices->count; i++) { + char* dump = grpc_dump_slice(tcp->read_slices->slices[i], + GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); + gpr_free(dump); + } + } + TCP_UNREF(tcp, "read"); + tcp->read_slices = nullptr; + tcp->read_cb = nullptr; + GRPC_CLOSURE_RUN(cb, error); +} + +static void custom_read_callback(grpc_custom_socket* socket, size_t nread, + grpc_error* error) { + grpc_core::ExecCtx exec_ctx; + grpc_slice_buffer garbage; + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint; + if (error == GRPC_ERROR_NONE && nread == 0) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"); + } + if (error == GRPC_ERROR_NONE) { + // Successful read + if ((size_t)nread < tcp->read_slices->length) { + /* TODO(murgatroid99): Instead of discarding the unused part of the read + * buffer, reuse it as the next read buffer. */ + grpc_slice_buffer_init(&garbage); + grpc_slice_buffer_trim_end( + tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage); + grpc_slice_buffer_reset_and_unref_internal(&garbage); + } + } else { + grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices); + } + call_read_cb(tcp, error); +} + +static void tcp_read_allocation_done(void* tcpp, grpc_error* error) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp; + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket, + grpc_error_string(error)); + } + if (error == GRPC_ERROR_NONE) { + /* Before calling read, we allocate a buffer with exactly one slice + * to tcp->read_slices and wait for the callback indicating that the + * allocation was successful. So slices[0] should always exist here */ + char* buffer = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]); + size_t len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]); + grpc_custom_socket_vtable->read(tcp->socket, buffer, len, + custom_read_callback); + } else { + grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices); + call_read_cb(tcp, GRPC_ERROR_REF(error)); + } + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str); + } +} + +static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices, + grpc_closure* cb) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + GPR_ASSERT(tcp->read_cb == nullptr); + tcp->read_cb = cb; + tcp->read_slices = read_slices; + grpc_slice_buffer_reset_and_unref_internal(read_slices); + TCP_REF(tcp, "read"); + grpc_resource_user_alloc_slices(&tcp->slice_allocator, + GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1, + tcp->read_slices); +} + +static void custom_write_callback(grpc_custom_socket* socket, + grpc_error* error) { + grpc_core::ExecCtx exec_ctx; + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint; + grpc_closure* cb = tcp->write_cb; + tcp->write_cb = nullptr; + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str); + } + TCP_UNREF(tcp, "write"); + GRPC_CLOSURE_SCHED(cb, error); +} + +static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices, + grpc_closure* cb) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + + if (grpc_tcp_trace.enabled()) { + size_t j; + + for (j = 0; j < write_slices->count; j++) { + char* data = grpc_dump_slice(write_slices->slices[j], + GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string, + data); + gpr_free(data); + } + } + + if (tcp->shutting_down) { + GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "TCP socket is shutting down")); + return; + } + + GPR_ASSERT(tcp->write_cb == nullptr); + tcp->write_slices = write_slices; + GPR_ASSERT(tcp->write_slices->count <= UINT_MAX); + if (tcp->write_slices->count == 0) { + // No slices means we don't have to do anything, + // and libuv doesn't like empty writes + GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE); + return; + } + tcp->write_cb = cb; + TCP_REF(tcp, "write"); + grpc_custom_socket_vtable->write(tcp->socket, tcp->write_slices, + custom_write_callback); +} + +static void endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) { + // No-op. We're ignoring pollsets currently + (void)ep; + (void)pollset; +} + +static void endpoint_add_to_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset) { + // No-op. We're ignoring pollsets currently + (void)ep; + (void)pollset; +} + +static void endpoint_delete_from_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset) { + // No-op. We're ignoring pollsets currently + (void)ep; + (void)pollset; +} + +static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + if (!tcp->shutting_down) { + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(why); + gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str); + } + tcp->shutting_down = true; + // GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why)); + // GRPC_CLOSURE_SCHED(tcp->write_cb, GRPC_ERROR_REF(why)); + // tcp->read_cb = nullptr; + // tcp->write_cb = nullptr; + grpc_resource_user_shutdown(tcp->resource_user); + grpc_custom_socket_vtable->shutdown(tcp->socket); + } + GRPC_ERROR_UNREF(why); +} + +static void custom_close_callback(grpc_custom_socket* socket) { + socket->refs--; + if (socket->refs == 0) { + grpc_custom_socket_vtable->destroy(socket); + gpr_free(socket); + } else if (socket->endpoint) { + grpc_core::ExecCtx exec_ctx; + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint; + TCP_UNREF(tcp, "destroy"); + } +} + +static void endpoint_destroy(grpc_endpoint* ep) { + grpc_network_status_unregister_endpoint(ep); + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + grpc_custom_socket_vtable->close(tcp->socket, custom_close_callback); +} + +static char* endpoint_get_peer(grpc_endpoint* ep) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + return gpr_strdup(tcp->peer_string); +} + +static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) { + custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep; + return tcp->resource_user; +} + +static int endpoint_get_fd(grpc_endpoint* ep) { return -1; } + +static grpc_endpoint_vtable vtable = {endpoint_read, + endpoint_write, + endpoint_add_to_pollset, + endpoint_add_to_pollset_set, + endpoint_delete_from_pollset_set, + endpoint_shutdown, + endpoint_destroy, + endpoint_get_resource_user, + endpoint_get_peer, + endpoint_get_fd}; + +grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket, + grpc_resource_quota* resource_quota, + char* peer_string) { + custom_tcp_endpoint* tcp = + (custom_tcp_endpoint*)gpr_malloc(sizeof(custom_tcp_endpoint)); + grpc_core::ExecCtx exec_ctx; + + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket); + } + memset(tcp, 0, sizeof(custom_tcp_endpoint)); + socket->refs++; + socket->endpoint = (grpc_endpoint*)tcp; + tcp->socket = socket; + tcp->base.vtable = &vtable; + gpr_ref_init(&tcp->refcount, 1); + tcp->peer_string = gpr_strdup(peer_string); + tcp->shutting_down = false; + tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); + grpc_resource_user_slice_allocator_init( + &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp); + /* Tell network status tracking code about the new endpoint */ + grpc_network_status_register_endpoint(&tcp->base); + + return &tcp->base; +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.h new file mode 100644 index 000000000..784ef8422 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_custom.h @@ -0,0 +1,81 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H + +#include + +#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/iomgr/sockaddr.h" + +typedef struct grpc_tcp_listener grpc_tcp_listener; +typedef struct grpc_custom_tcp_connect grpc_custom_tcp_connect; + +typedef struct grpc_custom_socket { + // Implementation defined + void* impl; + grpc_endpoint* endpoint; + grpc_tcp_listener* listener; + grpc_custom_tcp_connect* connector; + int refs; +} grpc_custom_socket; + +typedef void (*grpc_custom_connect_callback)(grpc_custom_socket* socket, + grpc_error* error); +typedef void (*grpc_custom_write_callback)(grpc_custom_socket* socket, + grpc_error* error); +typedef void (*grpc_custom_read_callback)(grpc_custom_socket* socket, + size_t nread, grpc_error* error); +typedef void (*grpc_custom_accept_callback)(grpc_custom_socket* socket, + grpc_custom_socket* client, + grpc_error* error); +typedef void (*grpc_custom_close_callback)(grpc_custom_socket* socket); + +typedef struct grpc_socket_vtable { + grpc_error* (*init)(grpc_custom_socket* socket, int domain); + void (*connect)(grpc_custom_socket* socket, const grpc_sockaddr* addr, + size_t len, grpc_custom_connect_callback cb); + void (*destroy)(grpc_custom_socket* socket); + void (*shutdown)(grpc_custom_socket* socket); + void (*close)(grpc_custom_socket* socket, grpc_custom_close_callback cb); + void (*write)(grpc_custom_socket* socket, grpc_slice_buffer* slices, + grpc_custom_write_callback cb); + void (*read)(grpc_custom_socket* socket, char* buffer, size_t length, + grpc_custom_read_callback cb); + grpc_error* (*getpeername)(grpc_custom_socket* socket, + const grpc_sockaddr* addr, int* len); + grpc_error* (*getsockname)(grpc_custom_socket* socket, + const grpc_sockaddr* addr, int* len); + grpc_error* (*bind)(grpc_custom_socket* socket, const grpc_sockaddr* addr, + size_t len, int flags); + grpc_error* (*listen)(grpc_custom_socket* socket); + void (*accept)(grpc_custom_socket* socket, grpc_custom_socket* client, + grpc_custom_accept_callback cb); +} grpc_socket_vtable; + +/* Internal APIs */ +void grpc_custom_endpoint_init(grpc_socket_vtable* impl); + +void grpc_custom_close_server_callback(grpc_tcp_listener* listener); + +grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket, + grpc_resource_quota* resource_quota, + char* peer_string); + +#endif /* GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.c deleted file mode 100644 index 7e271294f..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.c +++ /dev/null @@ -1,819 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_POSIX_SOCKET - -#include "src/core/lib/iomgr/network_status_tracker.h" -#include "src/core/lib/iomgr/tcp_posix.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" - -#ifdef GRPC_HAVE_MSG_NOSIGNAL -#define SENDMSG_FLAGS MSG_NOSIGNAL -#else -#define SENDMSG_FLAGS 0 -#endif - -#ifdef GRPC_MSG_IOVLEN_TYPE -typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type; -#else -typedef size_t msg_iovlen_type; -#endif - -grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp"); - -typedef struct { - grpc_endpoint base; - grpc_fd *em_fd; - int fd; - bool finished_edge; - double target_length; - double bytes_read_this_round; - gpr_refcount refcount; - gpr_atm shutdown_count; - - int min_read_chunk_size; - int max_read_chunk_size; - - /* garbage after the last read */ - grpc_slice_buffer last_read_buffer; - - grpc_slice_buffer *incoming_buffer; - grpc_slice_buffer *outgoing_buffer; - /** slice within outgoing_buffer to write next */ - size_t outgoing_slice_idx; - /** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */ - size_t outgoing_byte_idx; - - grpc_closure *read_cb; - grpc_closure *write_cb; - grpc_closure *release_fd_cb; - int *release_fd; - - grpc_closure read_done_closure; - grpc_closure write_done_closure; - - char *peer_string; - - grpc_resource_user *resource_user; - grpc_resource_user_slice_allocator slice_allocator; -} grpc_tcp; - -typedef struct backup_poller { - gpr_mu *pollset_mu; - grpc_closure run_poller; -} backup_poller; - -#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1)) - -static gpr_atm g_uncovered_notifications_pending; -static gpr_atm g_backup_poller; /* backup_poller* */ - -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx, - void *arg /* grpc_tcp */, - grpc_error *error); - -static void done_poller(grpc_exec_ctx *exec_ctx, void *bp, - grpc_error *error_ignored) { - backup_poller *p = (backup_poller *)bp; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p); - } - grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p)); - gpr_free(p); -} - -static void run_poller(grpc_exec_ctx *exec_ctx, void *bp, - grpc_error *error_ignored) { - backup_poller *p = (backup_poller *)bp; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p); - } - gpr_mu_lock(p->pollset_mu); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec deadline = - gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN)); - GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx); - GRPC_LOG_IF_ERROR("backup_poller:pollset_work", - grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL, - now, deadline)); - gpr_mu_unlock(p->pollset_mu); - /* last "uncovered" notification is the ref that keeps us polling, if we get - * there try a cas to release it */ - if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 && - gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) { - gpr_mu_lock(p->pollset_mu); - bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok); - } - gpr_mu_unlock(p->pollset_mu); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p); - } - grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p), - GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p, - grpc_schedule_on_exec_ctx)); - } else { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p); - } - GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE); - } -} - -static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller); - gpr_atm old_count = - gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count, - (int)old_count - 1); - } - GPR_ASSERT(old_count != 1); -} - -static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - backup_poller *p; - gpr_atm old_count = - gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count, - 2 + (int)old_count); - } - if (old_count == 0) { - GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx); - p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size()); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p); - } - grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu); - gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p); - GRPC_CLOSURE_SCHED( - exec_ctx, - GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, - grpc_executor_scheduler(GRPC_EXECUTOR_LONG)), - GRPC_ERROR_NONE); - } else { - while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) { - // spin waiting for backup poller - } - } - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp); - } - grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd); - if (old_count != 0) { - drop_uncovered(exec_ctx, tcp); - } -} - -static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp); - } - GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, - grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure); -} - -static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp); - } - cover_self(exec_ctx, tcp); - GRPC_CLOSURE_INIT(&tcp->write_done_closure, - tcp_drop_uncovered_then_handle_write, tcp, - grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure); -} - -static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error)); - } - drop_uncovered(exec_ctx, (grpc_tcp *)arg); - tcp_handle_write(exec_ctx, arg, error); -} - -static void add_to_estimate(grpc_tcp *tcp, size_t bytes) { - tcp->bytes_read_this_round += (double)bytes; -} - -static void finish_estimate(grpc_tcp *tcp) { - /* If we read >80% of the target buffer in one read loop, increase the size - of the target buffer to either the amount read, or twice its previous - value */ - if (tcp->bytes_read_this_round > tcp->target_length * 0.8) { - tcp->target_length = - GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round); - } else { - tcp->target_length = - 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round; - } - tcp->bytes_read_this_round = 0; -} - -static size_t get_target_read_size(grpc_tcp *tcp) { - grpc_resource_quota *rq = grpc_resource_user_quota(tcp->resource_user); - double pressure = grpc_resource_quota_get_memory_pressure(rq); - double target = - tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0); - size_t sz = (((size_t)GPR_CLAMP(target, tcp->min_read_chunk_size, - tcp->max_read_chunk_size)) + - 255) & - ~(size_t)255; - /* don't use more than 1/16th of the overall resource quota for a single read - * alloc */ - size_t rqmax = grpc_resource_quota_peek_size(rq); - if (sz > rqmax / 16 && rqmax > 1024) { - sz = rqmax / 16; - } - return sz; -} - -static grpc_error *tcp_annotate_error(grpc_error *src_error, grpc_tcp *tcp) { - return grpc_error_set_str( - grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd), - GRPC_ERROR_STR_TARGET_ADDRESS, - grpc_slice_from_copied_string(tcp->peer_string)); -} - -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error); - -static void tcp_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_fd_shutdown(exec_ctx, tcp->em_fd, why); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); -} - -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, - false /* already_closed */, "tcp_unref_orphan"); - grpc_slice_buffer_destroy_internal(exec_ctx, &tcp->last_read_buffer); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); - gpr_free(tcp->peer_string); - gpr_free(tcp); -} - -#ifndef NDEBUG -#define TCP_UNREF(cl, tcp, reason) \ - tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__) -#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, - val - 1); - } - if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); - } -} - -static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, - val + 1); - } - gpr_ref(&tcp->refcount); -} -#else -#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp)) -#define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); - } -} - -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } -#endif - -static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { - grpc_network_status_unregister_endpoint(ep); - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer); - TCP_UNREF(exec_ctx, tcp, "destroy"); -} - -static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - grpc_error *error) { - grpc_closure *cb = tcp->read_cb; - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg); - size_t i; - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "read: error=%s", str); - - for (i = 0; i < tcp->incoming_buffer->count; i++) { - char *dump = grpc_dump_slice(tcp->incoming_buffer->slices[i], - GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); - gpr_free(dump); - } - } - - tcp->read_cb = NULL; - tcp->incoming_buffer = NULL; - GRPC_CLOSURE_RUN(exec_ctx, cb, error); -} - -#define MAX_READ_IOVEC 4 -static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - struct msghdr msg; - struct iovec iov[MAX_READ_IOVEC]; - ssize_t read_bytes; - size_t i; - - GPR_ASSERT(!tcp->finished_edge); - GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); - GPR_TIMER_BEGIN("tcp_continue_read", 0); - - for (i = 0; i < tcp->incoming_buffer->count; i++) { - iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); - iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); - } - - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_iov = iov; - msg.msg_iovlen = (msg_iovlen_type)tcp->incoming_buffer->count; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = 0; - - GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length); - GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count); - - GPR_TIMER_BEGIN("recvmsg", 0); - do { - GRPC_STATS_INC_SYSCALL_READ(exec_ctx); - read_bytes = recvmsg(tcp->fd, &msg, 0); - } while (read_bytes < 0 && errno == EINTR); - GPR_TIMER_END("recvmsg", read_bytes >= 0); - - if (read_bytes < 0) { - /* NB: After calling call_read_cb a parallel call of the read handler may - * be running. */ - if (errno == EAGAIN) { - finish_estimate(tcp); - /* We've consumed the edge, request a new one */ - notify_on_read(exec_ctx, tcp); - } else { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - tcp->incoming_buffer); - call_read_cb(exec_ctx, tcp, - tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); - TCP_UNREF(exec_ctx, tcp, "read"); - } - } else if (read_bytes == 0) { - /* 0 read size ==> end of stream */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); - call_read_cb( - exec_ctx, tcp, - tcp_annotate_error( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); - TCP_UNREF(exec_ctx, tcp, "read"); - } else { - GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes); - add_to_estimate(tcp, (size_t)read_bytes); - GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); - if ((size_t)read_bytes < tcp->incoming_buffer->length) { - grpc_slice_buffer_trim_end( - tcp->incoming_buffer, - tcp->incoming_buffer->length - (size_t)read_bytes, - &tcp->last_read_buffer); - } - GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE); - TCP_UNREF(exec_ctx, tcp, "read"); - } - - GPR_TIMER_END("tcp_continue_read", 0); -} - -static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp, - grpc_error *error) { - grpc_tcp *tcp = (grpc_tcp *)tcpp; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp, - grpc_error_string(error)); - } - if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &tcp->last_read_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); - TCP_UNREF(exec_ctx, tcp, "read"); - } else { - tcp_do_read(exec_ctx, tcp); - } -} - -static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - size_t target_read_size = get_target_read_size(tcp); - if (tcp->incoming_buffer->length < target_read_size && - tcp->incoming_buffer->count < MAX_READ_IOVEC) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp); - } - grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator, - target_read_size, 1, tcp->incoming_buffer); - } else { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp); - } - tcp_do_read(exec_ctx, tcp); - } -} - -static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error) { - grpc_tcp *tcp = (grpc_tcp *)arg; - GPR_ASSERT(!tcp->finished_edge); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error)); - } - - if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, - &tcp->last_read_buffer); - call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); - TCP_UNREF(exec_ctx, tcp, "read"); - } else { - tcp_continue_read(exec_ctx, tcp); - } -} - -static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *incoming_buffer, grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - GPR_ASSERT(tcp->read_cb == NULL); - tcp->read_cb = cb; - tcp->incoming_buffer = incoming_buffer; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, incoming_buffer); - grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer); - TCP_REF(tcp, "read"); - if (tcp->finished_edge) { - tcp->finished_edge = false; - notify_on_read(exec_ctx, tcp); - } else { - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE); - } -} - -/* returns true if done, false if pending; if returning true, *error is set */ -#define MAX_WRITE_IOVEC 1000 -static bool tcp_flush(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - grpc_error **error) { - struct msghdr msg; - struct iovec iov[MAX_WRITE_IOVEC]; - msg_iovlen_type iov_size; - ssize_t sent_length; - size_t sending_length; - size_t trailing; - size_t unwind_slice_idx; - size_t unwind_byte_idx; - - for (;;) { - sending_length = 0; - unwind_slice_idx = tcp->outgoing_slice_idx; - unwind_byte_idx = tcp->outgoing_byte_idx; - for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count && - iov_size != MAX_WRITE_IOVEC; - iov_size++) { - iov[iov_size].iov_base = - GRPC_SLICE_START_PTR( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) + - tcp->outgoing_byte_idx; - iov[iov_size].iov_len = - GRPC_SLICE_LENGTH( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) - - tcp->outgoing_byte_idx; - sending_length += iov[iov_size].iov_len; - tcp->outgoing_slice_idx++; - tcp->outgoing_byte_idx = 0; - } - GPR_ASSERT(iov_size > 0); - - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_iov = iov; - msg.msg_iovlen = iov_size; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_flags = 0; - - GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length); - GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size); - - GPR_TIMER_BEGIN("sendmsg", 1); - do { - /* TODO(klempner): Cork if this is a partial write */ - GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx); - sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS); - } while (sent_length < 0 && errno == EINTR); - GPR_TIMER_END("sendmsg", 0); - - if (sent_length < 0) { - if (errno == EAGAIN) { - tcp->outgoing_slice_idx = unwind_slice_idx; - tcp->outgoing_byte_idx = unwind_byte_idx; - return false; - } else if (errno == EPIPE) { - *error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"), - GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAVAILABLE); - return true; - } else { - *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp); - return true; - } - } - - GPR_ASSERT(tcp->outgoing_byte_idx == 0); - trailing = sending_length - (size_t)sent_length; - while (trailing > 0) { - size_t slice_length; - - tcp->outgoing_slice_idx--; - slice_length = GRPC_SLICE_LENGTH( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]); - if (slice_length > trailing) { - tcp->outgoing_byte_idx = slice_length - trailing; - break; - } else { - trailing -= slice_length; - } - } - - if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) { - *error = GRPC_ERROR_NONE; - return true; - } - }; -} - -static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, - grpc_error *error) { - grpc_tcp *tcp = (grpc_tcp *)arg; - grpc_closure *cb; - - if (error != GRPC_ERROR_NONE) { - cb = tcp->write_cb; - tcp->write_cb = NULL; - cb->cb(exec_ctx, cb->cb_arg, error); - TCP_UNREF(exec_ctx, tcp, "write"); - return; - } - - if (!tcp_flush(exec_ctx, tcp, &error)) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "write: delayed"); - } - notify_on_write(exec_ctx, tcp); - } else { - cb = tcp->write_cb; - tcp->write_cb = NULL; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "write: %s", str); - } - - GRPC_CLOSURE_RUN(exec_ctx, cb, error); - TCP_UNREF(exec_ctx, tcp, "write"); - } -} - -static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *buf, grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_error *error = GRPC_ERROR_NONE; - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - size_t i; - - for (i = 0; i < buf->count; i++) { - char *data = - grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data); - gpr_free(data); - } - } - - GPR_TIMER_BEGIN("tcp_write", 0); - GPR_ASSERT(tcp->write_cb == NULL); - - if (buf->length == 0) { - GPR_TIMER_END("tcp_write", 0); - GRPC_CLOSURE_SCHED( - exec_ctx, cb, - grpc_fd_is_shutdown(tcp->em_fd) - ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), - tcp) - : GRPC_ERROR_NONE); - return; - } - tcp->outgoing_buffer = buf; - tcp->outgoing_slice_idx = 0; - tcp->outgoing_byte_idx = 0; - - if (!tcp_flush(exec_ctx, tcp, &error)) { - TCP_REF(tcp, "write"); - tcp->write_cb = cb; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "write: delayed"); - } - notify_on_write(exec_ctx, tcp); - } else { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "write: %s", str); - } - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); - } - - GPR_TIMER_END("tcp_write", 0); -} - -static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd); -} - -static void tcp_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pollset_set) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd); -} - -static char *tcp_get_peer(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - return gpr_strdup(tcp->peer_string); -} - -static int tcp_get_fd(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - return tcp->fd; -} - -static grpc_resource_user *tcp_get_resource_user(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - return tcp->resource_user; -} - -static const grpc_endpoint_vtable vtable = { - tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, - tcp_shutdown, tcp_destroy, tcp_get_resource_user, tcp_get_peer, - tcp_get_fd}; - -#define MAX_CHUNK_SIZE 32 * 1024 * 1024 - -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd, - const grpc_channel_args *channel_args, - const char *peer_string) { - int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE; - int tcp_max_read_chunk_size = 4 * 1024 * 1024; - int tcp_min_read_chunk_size = 256; - grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL); - if (channel_args != NULL) { - for (size_t i = 0; i < channel_args->num_args; i++) { - if (0 == - strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) { - grpc_integer_options options = {(int)tcp_read_chunk_size, 1, - MAX_CHUNK_SIZE}; - tcp_read_chunk_size = - grpc_channel_arg_get_integer(&channel_args->args[i], options); - } else if (0 == strcmp(channel_args->args[i].key, - GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) { - grpc_integer_options options = {(int)tcp_read_chunk_size, 1, - MAX_CHUNK_SIZE}; - tcp_min_read_chunk_size = - grpc_channel_arg_get_integer(&channel_args->args[i], options); - } else if (0 == strcmp(channel_args->args[i].key, - GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) { - grpc_integer_options options = {(int)tcp_read_chunk_size, 1, - MAX_CHUNK_SIZE}; - tcp_max_read_chunk_size = - grpc_channel_arg_get_integer(&channel_args->args[i], options); - } else if (0 == - strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); - resource_quota = grpc_resource_quota_ref_internal( - (grpc_resource_quota *)channel_args->args[i].value.pointer.p); - } - } - } - - if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) { - tcp_min_read_chunk_size = tcp_max_read_chunk_size; - } - tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size, - tcp_max_read_chunk_size); - - grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); - tcp->base.vtable = &vtable; - tcp->peer_string = gpr_strdup(peer_string); - tcp->fd = grpc_fd_wrapped_fd(em_fd); - tcp->read_cb = NULL; - tcp->write_cb = NULL; - tcp->release_fd_cb = NULL; - tcp->release_fd = NULL; - tcp->incoming_buffer = NULL; - tcp->target_length = (double)tcp_read_chunk_size; - tcp->min_read_chunk_size = tcp_min_read_chunk_size; - tcp->max_read_chunk_size = tcp_max_read_chunk_size; - tcp->bytes_read_this_round = 0; - tcp->finished_edge = true; - /* paired with unref in grpc_tcp_destroy */ - gpr_ref_init(&tcp->refcount, 1); - gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); - tcp->em_fd = em_fd; - grpc_slice_buffer_init(&tcp->last_read_buffer); - tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); - grpc_resource_user_slice_allocator_init( - &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp); - /* Tell network status tracker about new endpoint */ - grpc_network_status_register_endpoint(&tcp->base); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); - - return &tcp->base; -} - -int grpc_tcp_fd(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - GPR_ASSERT(ep->vtable == &vtable); - return grpc_fd_wrapped_fd(tcp->em_fd); -} - -void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - int *fd, grpc_closure *done) { - grpc_network_status_unregister_endpoint(ep); - grpc_tcp *tcp = (grpc_tcp *)ep; - GPR_ASSERT(ep->vtable == &vtable); - tcp->release_fd = fd; - tcp->release_fd_cb = done; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer); - TCP_UNREF(exec_ctx, tcp, "destroy"); -} - -#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.cc new file mode 100644 index 000000000..153be05e8 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.cc @@ -0,0 +1,814 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_POSIX_SOCKET + +#include "src/core/lib/iomgr/network_status_tracker.h" +#include "src/core/lib/iomgr/tcp_posix.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +#ifdef GRPC_HAVE_MSG_NOSIGNAL +#define SENDMSG_FLAGS MSG_NOSIGNAL +#else +#define SENDMSG_FLAGS 0 +#endif + +#ifdef GRPC_MSG_IOVLEN_TYPE +typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type; +#else +typedef size_t msg_iovlen_type; +#endif + +extern grpc_core::TraceFlag grpc_tcp_trace; + +namespace { +struct grpc_tcp { + grpc_endpoint base; + grpc_fd* em_fd; + int fd; + bool finished_edge; + double target_length; + double bytes_read_this_round; + gpr_refcount refcount; + gpr_atm shutdown_count; + + int min_read_chunk_size; + int max_read_chunk_size; + + /* garbage after the last read */ + grpc_slice_buffer last_read_buffer; + + grpc_slice_buffer* incoming_buffer; + grpc_slice_buffer* outgoing_buffer; + /** byte within outgoing_buffer->slices[0] to write next */ + size_t outgoing_byte_idx; + + grpc_closure* read_cb; + grpc_closure* write_cb; + grpc_closure* release_fd_cb; + int* release_fd; + + grpc_closure read_done_closure; + grpc_closure write_done_closure; + + char* peer_string; + + grpc_resource_user* resource_user; + grpc_resource_user_slice_allocator slice_allocator; +}; + +struct backup_poller { + gpr_mu* pollset_mu; + grpc_closure run_poller; +}; +} // namespace + +#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1)) + +static gpr_atm g_uncovered_notifications_pending; +static gpr_atm g_backup_poller; /* backup_poller* */ + +static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error); +static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error); +static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */, + grpc_error* error); + +static void done_poller(void* bp, grpc_error* error_ignored) { + backup_poller* p = static_cast(bp); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p); + } + grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p)); + gpr_free(p); +} + +static void run_poller(void* bp, grpc_error* error_ignored) { + backup_poller* p = static_cast(bp); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p); + } + gpr_mu_lock(p->pollset_mu); + grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC; + GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(); + GRPC_LOG_IF_ERROR( + "backup_poller:pollset_work", + grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline)); + gpr_mu_unlock(p->pollset_mu); + /* last "uncovered" notification is the ref that keeps us polling, if we get + * there try a cas to release it */ + if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 && + gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) { + gpr_mu_lock(p->pollset_mu); + bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok); + } + gpr_mu_unlock(p->pollset_mu); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p); + } + grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p), + GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p, + grpc_schedule_on_exec_ctx)); + } else { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p); + } + GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE); + } +} + +static void drop_uncovered(grpc_tcp* tcp) { + backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller); + gpr_atm old_count = + gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, + static_cast(old_count), static_cast(old_count) - 1); + } + GPR_ASSERT(old_count != 1); +} + +static void cover_self(grpc_tcp* tcp) { + backup_poller* p; + gpr_atm old_count = + gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d", + static_cast(old_count), 2 + static_cast(old_count)); + } + if (old_count == 0) { + GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(); + p = static_cast( + gpr_zalloc(sizeof(*p) + grpc_pollset_size())); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p); + } + grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu); + gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, + grpc_executor_scheduler(GRPC_EXECUTOR_LONG)), + GRPC_ERROR_NONE); + } else { + while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) == + nullptr) { + // spin waiting for backup poller + } + } + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp); + } + grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd); + if (old_count != 0) { + drop_uncovered(tcp); + } +} + +static void notify_on_read(grpc_tcp* tcp) { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp); + } + GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, + grpc_schedule_on_exec_ctx); + grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure); +} + +static void notify_on_write(grpc_tcp* tcp) { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp); + } + cover_self(tcp); + GRPC_CLOSURE_INIT(&tcp->write_done_closure, + tcp_drop_uncovered_then_handle_write, tcp, + grpc_schedule_on_exec_ctx); + grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure); +} + +static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error)); + } + drop_uncovered(static_cast(arg)); + tcp_handle_write(arg, error); +} + +static void add_to_estimate(grpc_tcp* tcp, size_t bytes) { + tcp->bytes_read_this_round += static_cast(bytes); +} + +static void finish_estimate(grpc_tcp* tcp) { + /* If we read >80% of the target buffer in one read loop, increase the size + of the target buffer to either the amount read, or twice its previous + value */ + if (tcp->bytes_read_this_round > tcp->target_length * 0.8) { + tcp->target_length = + GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round); + } else { + tcp->target_length = + 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round; + } + tcp->bytes_read_this_round = 0; +} + +static size_t get_target_read_size(grpc_tcp* tcp) { + grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user); + double pressure = grpc_resource_quota_get_memory_pressure(rq); + double target = + tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0); + size_t sz = ((static_cast GPR_CLAMP(target, tcp->min_read_chunk_size, + tcp->max_read_chunk_size)) + + 255) & + ~static_cast(255); + /* don't use more than 1/16th of the overall resource quota for a single read + * alloc */ + size_t rqmax = grpc_resource_quota_peek_size(rq); + if (sz > rqmax / 16 && rqmax > 1024) { + sz = rqmax / 16; + } + return sz; +} + +static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) { + return grpc_error_set_str( + grpc_error_set_int( + grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd), + /* All tcp errors are marked with UNAVAILABLE so that application may + * choose to retry. */ + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE), + GRPC_ERROR_STR_TARGET_ADDRESS, + grpc_slice_from_copied_string(tcp->peer_string)); +} + +static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error); +static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error); + +static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) { + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_fd_shutdown(tcp->em_fd, why); + grpc_resource_user_shutdown(tcp->resource_user); +} + +static void tcp_free(grpc_tcp* tcp) { + grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd, + false /* already_closed */, "tcp_unref_orphan"); + grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer); + grpc_resource_user_unref(tcp->resource_user); + gpr_free(tcp->peer_string); + gpr_free(tcp); +} + +#ifndef NDEBUG +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) +static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file, + int line) { + if (grpc_tcp_trace.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, + val - 1); + } + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); + } +} + +static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file, + int line) { + if (grpc_tcp_trace.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, + val + 1); + } + gpr_ref(&tcp->refcount); +} +#else +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_REF(tcp, reason) tcp_ref((tcp)) +static void tcp_unref(grpc_tcp* tcp) { + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); + } +} + +static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); } +#endif + +static void tcp_destroy(grpc_endpoint* ep) { + grpc_network_status_unregister_endpoint(ep); + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + TCP_UNREF(tcp, "destroy"); +} + +static void call_read_cb(grpc_tcp* tcp, grpc_error* error) { + grpc_closure* cb = tcp->read_cb; + + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg); + size_t i; + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "read: error=%s", str); + + for (i = 0; i < tcp->incoming_buffer->count; i++) { + char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i], + GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); + gpr_free(dump); + } + } + + tcp->read_cb = nullptr; + tcp->incoming_buffer = nullptr; + GRPC_CLOSURE_RUN(cb, error); +} + +#define MAX_READ_IOVEC 4 +static void tcp_do_read(grpc_tcp* tcp) { + GPR_TIMER_SCOPE("tcp_do_read", 0); + struct msghdr msg; + struct iovec iov[MAX_READ_IOVEC]; + ssize_t read_bytes; + size_t i; + + GPR_ASSERT(!tcp->finished_edge); + GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); + + for (i = 0; i < tcp->incoming_buffer->count; i++) { + iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); + iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); + } + + msg.msg_name = nullptr; + msg.msg_namelen = 0; + msg.msg_iov = iov; + msg.msg_iovlen = static_cast(tcp->incoming_buffer->count); + msg.msg_control = nullptr; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length); + GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count); + + do { + GPR_TIMER_SCOPE("recvmsg", 0); + GRPC_STATS_INC_SYSCALL_READ(); + read_bytes = recvmsg(tcp->fd, &msg, 0); + } while (read_bytes < 0 && errno == EINTR); + + if (read_bytes < 0) { + /* NB: After calling call_read_cb a parallel call of the read handler may + * be running. */ + if (errno == EAGAIN) { + finish_estimate(tcp); + /* We've consumed the edge, request a new one */ + notify_on_read(tcp); + } else { + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + call_read_cb(tcp, + tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); + TCP_UNREF(tcp, "read"); + } + } else if (read_bytes == 0) { + /* 0 read size ==> end of stream */ + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + call_read_cb( + tcp, tcp_annotate_error( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp)); + TCP_UNREF(tcp, "read"); + } else { + GRPC_STATS_INC_TCP_READ_SIZE(read_bytes); + add_to_estimate(tcp, static_cast(read_bytes)); + GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); + if (static_cast(read_bytes) < tcp->incoming_buffer->length) { + grpc_slice_buffer_trim_end( + tcp->incoming_buffer, + tcp->incoming_buffer->length - static_cast(read_bytes), + &tcp->last_read_buffer); + } + GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); + call_read_cb(tcp, GRPC_ERROR_NONE); + TCP_UNREF(tcp, "read"); + } +} + +static void tcp_read_allocation_done(void* tcpp, grpc_error* error) { + grpc_tcp* tcp = static_cast(tcpp); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp, + grpc_error_string(error)); + } + if (error != GRPC_ERROR_NONE) { + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + call_read_cb(tcp, GRPC_ERROR_REF(error)); + TCP_UNREF(tcp, "read"); + } else { + tcp_do_read(tcp); + } +} + +static void tcp_continue_read(grpc_tcp* tcp) { + size_t target_read_size = get_target_read_size(tcp); + if (tcp->incoming_buffer->length < target_read_size && + tcp->incoming_buffer->count < MAX_READ_IOVEC) { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp); + } + grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1, + tcp->incoming_buffer); + } else { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p do_read", tcp); + } + tcp_do_read(tcp); + } +} + +static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) { + grpc_tcp* tcp = static_cast(arg); + GPR_ASSERT(!tcp->finished_edge); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error)); + } + + if (error != GRPC_ERROR_NONE) { + grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer); + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + call_read_cb(tcp, GRPC_ERROR_REF(error)); + TCP_UNREF(tcp, "read"); + } else { + tcp_continue_read(tcp); + } +} + +static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer, + grpc_closure* cb) { + grpc_tcp* tcp = reinterpret_cast(ep); + GPR_ASSERT(tcp->read_cb == nullptr); + tcp->read_cb = cb; + tcp->incoming_buffer = incoming_buffer; + grpc_slice_buffer_reset_and_unref_internal(incoming_buffer); + grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer); + TCP_REF(tcp, "read"); + if (tcp->finished_edge) { + tcp->finished_edge = false; + notify_on_read(tcp); + } else { + GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE); + } +} + +/* returns true if done, false if pending; if returning true, *error is set */ +#define MAX_WRITE_IOVEC 1000 +static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) { + struct msghdr msg; + struct iovec iov[MAX_WRITE_IOVEC]; + msg_iovlen_type iov_size; + ssize_t sent_length; + size_t sending_length; + size_t trailing; + size_t unwind_slice_idx; + size_t unwind_byte_idx; + + // We always start at zero, because we eagerly unref and trim the slice + // buffer as we write + size_t outgoing_slice_idx = 0; + + for (;;) { + sending_length = 0; + unwind_slice_idx = outgoing_slice_idx; + unwind_byte_idx = tcp->outgoing_byte_idx; + for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count && + iov_size != MAX_WRITE_IOVEC; + iov_size++) { + iov[iov_size].iov_base = + GRPC_SLICE_START_PTR( + tcp->outgoing_buffer->slices[outgoing_slice_idx]) + + tcp->outgoing_byte_idx; + iov[iov_size].iov_len = + GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) - + tcp->outgoing_byte_idx; + sending_length += iov[iov_size].iov_len; + outgoing_slice_idx++; + tcp->outgoing_byte_idx = 0; + } + GPR_ASSERT(iov_size > 0); + + msg.msg_name = nullptr; + msg.msg_namelen = 0; + msg.msg_iov = iov; + msg.msg_iovlen = iov_size; + msg.msg_control = nullptr; + msg.msg_controllen = 0; + msg.msg_flags = 0; + + GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length); + GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size); + + GPR_TIMER_SCOPE("sendmsg", 1); + do { + /* TODO(klempner): Cork if this is a partial write */ + GRPC_STATS_INC_SYSCALL_WRITE(); + sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS); + } while (sent_length < 0 && errno == EINTR); + + if (sent_length < 0) { + if (errno == EAGAIN) { + tcp->outgoing_byte_idx = unwind_byte_idx; + // unref all and forget about all slices that have been written to this + // point + for (size_t idx = 0; idx < unwind_slice_idx; ++idx) { + grpc_slice_unref_internal( + grpc_slice_buffer_take_first(tcp->outgoing_buffer)); + } + return false; + } else if (errno == EPIPE) { + *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp); + grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer); + return true; + } else { + *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp); + grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer); + return true; + } + } + + GPR_ASSERT(tcp->outgoing_byte_idx == 0); + trailing = sending_length - static_cast(sent_length); + while (trailing > 0) { + size_t slice_length; + + outgoing_slice_idx--; + slice_length = + GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]); + if (slice_length > trailing) { + tcp->outgoing_byte_idx = slice_length - trailing; + break; + } else { + trailing -= slice_length; + } + } + + if (outgoing_slice_idx == tcp->outgoing_buffer->count) { + *error = GRPC_ERROR_NONE; + grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer); + return true; + } + } +} + +static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) { + grpc_tcp* tcp = static_cast(arg); + grpc_closure* cb; + + if (error != GRPC_ERROR_NONE) { + cb = tcp->write_cb; + tcp->write_cb = nullptr; + cb->cb(cb->cb_arg, error); + TCP_UNREF(tcp, "write"); + return; + } + + if (!tcp_flush(tcp, &error)) { + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "write: delayed"); + } + notify_on_write(tcp); + } else { + cb = tcp->write_cb; + tcp->write_cb = nullptr; + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "write: %s", str); + } + + GRPC_CLOSURE_RUN(cb, error); + TCP_UNREF(tcp, "write"); + } +} + +static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf, + grpc_closure* cb) { + GPR_TIMER_SCOPE("tcp_write", 0); + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_error* error = GRPC_ERROR_NONE; + + if (grpc_tcp_trace.enabled()) { + size_t i; + + for (i = 0; i < buf->count; i++) { + char* data = + grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data); + gpr_free(data); + } + } + + GPR_ASSERT(tcp->write_cb == nullptr); + + if (buf->length == 0) { + GRPC_CLOSURE_SCHED( + cb, grpc_fd_is_shutdown(tcp->em_fd) + ? tcp_annotate_error( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp) + : GRPC_ERROR_NONE); + return; + } + tcp->outgoing_buffer = buf; + tcp->outgoing_byte_idx = 0; + + if (!tcp_flush(tcp, &error)) { + TCP_REF(tcp, "write"); + tcp->write_cb = cb; + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "write: delayed"); + } + notify_on_write(tcp); + } else { + if (grpc_tcp_trace.enabled()) { + const char* str = grpc_error_string(error); + gpr_log(GPR_INFO, "write: %s", str); + } + GRPC_CLOSURE_SCHED(cb, error); + } +} + +static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) { + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_pollset_add_fd(pollset, tcp->em_fd); +} + +static void tcp_add_to_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset_set) { + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); +} + +static void tcp_delete_from_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pollset_set) { + grpc_tcp* tcp = reinterpret_cast(ep); + grpc_pollset_set_del_fd(pollset_set, tcp->em_fd); +} + +static char* tcp_get_peer(grpc_endpoint* ep) { + grpc_tcp* tcp = reinterpret_cast(ep); + return gpr_strdup(tcp->peer_string); +} + +static int tcp_get_fd(grpc_endpoint* ep) { + grpc_tcp* tcp = reinterpret_cast(ep); + return tcp->fd; +} + +static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) { + grpc_tcp* tcp = reinterpret_cast(ep); + return tcp->resource_user; +} + +static const grpc_endpoint_vtable vtable = {tcp_read, + tcp_write, + tcp_add_to_pollset, + tcp_add_to_pollset_set, + tcp_delete_from_pollset_set, + tcp_shutdown, + tcp_destroy, + tcp_get_resource_user, + tcp_get_peer, + tcp_get_fd}; + +#define MAX_CHUNK_SIZE 32 * 1024 * 1024 + +grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd, + const grpc_channel_args* channel_args, + const char* peer_string) { + int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE; + int tcp_max_read_chunk_size = 4 * 1024 * 1024; + int tcp_min_read_chunk_size = 256; + grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr); + if (channel_args != nullptr) { + for (size_t i = 0; i < channel_args->num_args; i++) { + if (0 == + strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) { + grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE}; + tcp_read_chunk_size = + grpc_channel_arg_get_integer(&channel_args->args[i], options); + } else if (0 == strcmp(channel_args->args[i].key, + GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) { + grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE}; + tcp_min_read_chunk_size = + grpc_channel_arg_get_integer(&channel_args->args[i], options); + } else if (0 == strcmp(channel_args->args[i].key, + GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) { + grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE}; + tcp_max_read_chunk_size = + grpc_channel_arg_get_integer(&channel_args->args[i], options); + } else if (0 == + strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { + grpc_resource_quota_unref_internal(resource_quota); + resource_quota = + grpc_resource_quota_ref_internal(static_cast( + channel_args->args[i].value.pointer.p)); + } + } + } + + if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) { + tcp_min_read_chunk_size = tcp_max_read_chunk_size; + } + tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size, + tcp_max_read_chunk_size); + + grpc_tcp* tcp = static_cast(gpr_malloc(sizeof(grpc_tcp))); + tcp->base.vtable = &vtable; + tcp->peer_string = gpr_strdup(peer_string); + tcp->fd = grpc_fd_wrapped_fd(em_fd); + tcp->read_cb = nullptr; + tcp->write_cb = nullptr; + tcp->release_fd_cb = nullptr; + tcp->release_fd = nullptr; + tcp->incoming_buffer = nullptr; + tcp->target_length = static_cast(tcp_read_chunk_size); + tcp->min_read_chunk_size = tcp_min_read_chunk_size; + tcp->max_read_chunk_size = tcp_max_read_chunk_size; + tcp->bytes_read_this_round = 0; + tcp->finished_edge = true; + /* paired with unref in grpc_tcp_destroy */ + gpr_ref_init(&tcp->refcount, 1); + gpr_atm_no_barrier_store(&tcp->shutdown_count, 0); + tcp->em_fd = em_fd; + grpc_slice_buffer_init(&tcp->last_read_buffer); + tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); + grpc_resource_user_slice_allocator_init( + &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp); + /* Tell network status tracker about new endpoint */ + grpc_network_status_register_endpoint(&tcp->base); + grpc_resource_quota_unref_internal(resource_quota); + + return &tcp->base; +} + +int grpc_tcp_fd(grpc_endpoint* ep) { + grpc_tcp* tcp = reinterpret_cast(ep); + GPR_ASSERT(ep->vtable == &vtable); + return grpc_fd_wrapped_fd(tcp->em_fd); +} + +void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd, + grpc_closure* done) { + grpc_network_status_unregister_endpoint(ep); + grpc_tcp* tcp = reinterpret_cast(ep); + GPR_ASSERT(ep->vtable == &vtable); + tcp->release_fd = fd; + tcp->release_fd_cb = done; + grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer); + TCP_UNREF(tcp, "destroy"); +} + +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.h index 6831a4a57..af89bd24d 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_posix.h @@ -29,28 +29,29 @@ otherwise specified. */ +#include + #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/ev_posix.h" -extern grpc_tracer_flag grpc_tcp_trace; +extern grpc_core::TraceFlag grpc_tcp_trace; /* Create a tcp endpoint given a file desciptor and a read slice size. Takes ownership of fd. */ -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *fd, - const grpc_channel_args *args, - const char *peer_string); +grpc_endpoint* grpc_tcp_create(grpc_fd* fd, const grpc_channel_args* args, + const char* peer_string); /* Return the tcp endpoint's fd, or -1 if this is not available. Does not release the fd. Requires: ep must be a tcp endpoint. */ -int grpc_tcp_fd(grpc_endpoint *ep); +int grpc_tcp_fd(grpc_endpoint* ep); /* Destroy the tcp endpoint without closing its fd. *fd will be set and done * will be called when the endpoint is destroyed. * Requires: ep must be a tcp endpoint and fd must not be NULL. */ -void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - int *fd, grpc_closure *done); +void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd, + grpc_closure* done); #endif /* GRPC_CORE_LIB_IOMGR_TCP_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_server.cc new file mode 100644 index 000000000..ea745f266 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server.cc @@ -0,0 +1,73 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/tcp_server.h" + +grpc_tcp_server_vtable* grpc_tcp_server_impl; + +grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server) { + return grpc_tcp_server_impl->create(shutdown_complete, args, server); +} + +void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets, + size_t pollset_count, + grpc_tcp_server_cb on_accept_cb, void* cb_arg) { + grpc_tcp_server_impl->start(server, pollsets, pollset_count, on_accept_cb, + cb_arg); +} + +grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s, + const grpc_resolved_address* addr, + int* out_port) { + return grpc_tcp_server_impl->add_port(s, addr, out_port); +} + +unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s, + unsigned port_index) { + return grpc_tcp_server_impl->port_fd_count(s, port_index); +} + +int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index, + unsigned fd_index) { + return grpc_tcp_server_impl->port_fd(s, port_index, fd_index); +} + +grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s) { + return grpc_tcp_server_impl->ref(s); +} + +void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s, + grpc_closure* shutdown_starting) { + grpc_tcp_server_impl->shutdown_starting_add(s, shutdown_starting); +} + +void grpc_tcp_server_unref(grpc_tcp_server* s) { + grpc_tcp_server_impl->unref(s); +} + +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) { + grpc_tcp_server_impl->shutdown_listeners(s); +} + +void grpc_set_tcp_server_impl(grpc_tcp_server_vtable* impl) { + grpc_tcp_server_impl = impl; +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_server.h index 8a126b6de..8fcbb2f68 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_H #define GRPC_CORE_LIB_IOMGR_TCP_SERVER_H +#include + #include #include "src/core/lib/iomgr/closure.h" @@ -31,7 +33,7 @@ typedef struct grpc_tcp_server grpc_tcp_server; typedef struct grpc_tcp_server_acceptor { /* grpc_tcp_server_cb functions share a ref on from_server that is valid until the function returns. */ - grpc_tcp_server *from_server; + grpc_tcp_server* from_server; /* Indices that may be passed to grpc_tcp_server_port_fd(). */ unsigned port_index; unsigned fd_index; @@ -39,23 +41,39 @@ typedef struct grpc_tcp_server_acceptor { /* Called for newly connected TCP connections. Takes ownership of acceptor. */ -typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx *exec_ctx, void *arg, - grpc_endpoint *ep, - grpc_pollset *accepting_pollset, - grpc_tcp_server_acceptor *acceptor); +typedef void (*grpc_tcp_server_cb)(void* arg, grpc_endpoint* ep, + grpc_pollset* accepting_pollset, + grpc_tcp_server_acceptor* acceptor); + +typedef struct grpc_tcp_server_vtable { + grpc_error* (*create)(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server); + void (*start)(grpc_tcp_server* server, grpc_pollset** pollsets, + size_t pollset_count, grpc_tcp_server_cb on_accept_cb, + void* cb_arg); + grpc_error* (*add_port)(grpc_tcp_server* s, const grpc_resolved_address* addr, + int* out_port); + unsigned (*port_fd_count)(grpc_tcp_server* s, unsigned port_index); + int (*port_fd)(grpc_tcp_server* s, unsigned port_index, unsigned fd_index); + grpc_tcp_server* (*ref)(grpc_tcp_server* s); + void (*shutdown_starting_add)(grpc_tcp_server* s, + grpc_closure* shutdown_starting); + void (*unref)(grpc_tcp_server* s); + void (*shutdown_listeners)(grpc_tcp_server* s); +} grpc_tcp_server_vtable; /* Create a server, initially not bound to any ports. The caller owns one ref. If shutdown_complete is not NULL, it will be used by grpc_tcp_server_unref() when the ref count reaches zero. */ -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, - const grpc_channel_args *args, - grpc_tcp_server **server); +grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server); /* Start listening to bound ports */ -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, - grpc_pollset **pollsets, size_t pollset_count, - grpc_tcp_server_cb on_accept_cb, void *cb_arg); +void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets, + size_t pollset_count, + grpc_tcp_server_cb on_accept_cb, void* cb_arg); /* Add a port to the server, returning the newly allocated port on success, or -1 on failure. @@ -66,36 +84,39 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, but not dualstack sockets. */ /* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle all of the multiple socket port matching logic in one place */ -grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, - const grpc_resolved_address *addr, - int *out_port); +grpc_error* grpc_tcp_server_add_port(grpc_tcp_server* s, + const grpc_resolved_address* addr, + int* out_port); /* Number of fds at the given port_index, or 0 if port_index is out of bounds. */ -unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, unsigned port_index); +unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server* s, unsigned port_index); /* Returns the file descriptor of the Mth (fd_index) listening socket of the Nth (port_index) call to add_port() on this server, or -1 if the indices are out of bounds. The file descriptor remains owned by the server, and will be cleaned up when the ref count reaches zero. */ -int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, +int grpc_tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index, unsigned fd_index); /* Ref s and return s. */ -grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s); +grpc_tcp_server* grpc_tcp_server_ref(grpc_tcp_server* s); /* shutdown_starting is called when ref count has reached zero and the server is about to be destroyed. The server will be deleted after it returns. Calling grpc_tcp_server_ref() from it has no effect. */ -void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, - grpc_closure *shutdown_starting); +void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server* s, + grpc_closure* shutdown_starting); /* If the refcount drops to zero, enqueue calls on exec_ctx to shutdown_listeners and delete s. */ -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s); +void grpc_tcp_server_unref(grpc_tcp_server* s); /* Shutdown the fds of listeners. */ -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s); +void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s); + +void grpc_tcp_server_global_init(); + +void grpc_set_tcp_server_impl(grpc_tcp_server_vtable* impl); #endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_custom.cc new file mode 100644 index 000000000..019b35447 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_custom.cc @@ -0,0 +1,472 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include +#include + +#include +#include + +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/iomgr/tcp_server.h" + +extern grpc_core::TraceFlag grpc_tcp_trace; + +extern grpc_socket_vtable* grpc_custom_socket_vtable; + +/* one listening port */ +struct grpc_tcp_listener { + grpc_tcp_server* server; + unsigned port_index; + int port; + + grpc_custom_socket* socket; + + /* linked list */ + struct grpc_tcp_listener* next; + + bool closed; +}; + +struct grpc_tcp_server { + gpr_refcount refs; + + /* Called whenever accept() succeeds on a server port. */ + grpc_tcp_server_cb on_accept_cb; + void* on_accept_cb_arg; + + int open_ports; + + /* linked list of server ports */ + grpc_tcp_listener* head; + grpc_tcp_listener* tail; + + /* List of closures passed to shutdown_starting_add(). */ + grpc_closure_list shutdown_starting; + + /* shutdown callback */ + grpc_closure* shutdown_complete; + + bool shutdown; + + grpc_resource_quota* resource_quota; +}; + +static grpc_error* tcp_server_create(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server) { + grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server)); + s->resource_quota = grpc_resource_quota_create(nullptr); + for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) { + if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) { + if (args->args[i].type == GRPC_ARG_POINTER) { + grpc_resource_quota_unref_internal(s->resource_quota); + s->resource_quota = grpc_resource_quota_ref_internal( + (grpc_resource_quota*)args->args[i].value.pointer.p); + } else { + grpc_resource_quota_unref_internal(s->resource_quota); + gpr_free(s); + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool"); + } + } + } + gpr_ref_init(&s->refs, 1); + s->on_accept_cb = nullptr; + s->on_accept_cb_arg = nullptr; + s->open_ports = 0; + s->head = nullptr; + s->tail = nullptr; + s->shutdown_starting.head = nullptr; + s->shutdown_starting.tail = nullptr; + s->shutdown_complete = shutdown_complete; + s->shutdown = false; + *server = s; + return GRPC_ERROR_NONE; +} + +static grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + gpr_ref(&s->refs); + return s; +} + +static void tcp_server_shutdown_starting_add(grpc_tcp_server* s, + grpc_closure* shutdown_starting) { + grpc_closure_list_append(&s->shutdown_starting, shutdown_starting, + GRPC_ERROR_NONE); +} + +static void finish_shutdown(grpc_tcp_server* s) { + GPR_ASSERT(s->shutdown); + if (s->shutdown_complete != nullptr) { + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); + } + + while (s->head) { + grpc_tcp_listener* sp = s->head; + s->head = sp->next; + sp->next = nullptr; + gpr_free(sp); + } + grpc_resource_quota_unref_internal(s->resource_quota); + gpr_free(s); +} + +static void custom_close_callback(grpc_custom_socket* socket) { + grpc_tcp_listener* sp = socket->listener; + if (sp) { + grpc_core::ExecCtx exec_ctx; + sp->server->open_ports--; + if (sp->server->open_ports == 0 && sp->server->shutdown) { + finish_shutdown(sp->server); + } + } + socket->refs--; + if (socket->refs == 0) { + grpc_custom_socket_vtable->destroy(socket); + gpr_free(socket); + } +} + +void grpc_custom_close_server_callback(grpc_tcp_listener* sp) { + if (sp) { + grpc_core::ExecCtx exec_ctx; + sp->server->open_ports--; + if (sp->server->open_ports == 0 && sp->server->shutdown) { + finish_shutdown(sp->server); + } + } +} + +static void close_listener(grpc_tcp_listener* sp) { + grpc_custom_socket* socket = sp->socket; + if (!sp->closed) { + sp->closed = true; + grpc_custom_socket_vtable->close(socket, custom_close_callback); + } +} + +static void tcp_server_destroy(grpc_tcp_server* s) { + int immediately_done = 0; + grpc_tcp_listener* sp; + + GPR_ASSERT(!s->shutdown); + s->shutdown = true; + + if (s->open_ports == 0) { + immediately_done = 1; + } + for (sp = s->head; sp; sp = sp->next) { + close_listener(sp); + } + + if (immediately_done) { + finish_shutdown(s); + } +} + +static void tcp_server_unref(grpc_tcp_server* s) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + if (gpr_unref(&s->refs)) { + /* Complete shutdown_starting work before destroying. */ + grpc_core::ExecCtx exec_ctx; + GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting); + grpc_core::ExecCtx::Get()->Flush(); + tcp_server_destroy(s); + } +} + +static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) { + grpc_tcp_server_acceptor* acceptor = + (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor)); + grpc_endpoint* ep = nullptr; + grpc_resolved_address peer_name; + char* peer_name_string; + grpc_error* err; + + peer_name_string = nullptr; + memset(&peer_name, 0, sizeof(grpc_resolved_address)); + peer_name.len = GRPC_MAX_SOCKADDR_SIZE; + err = grpc_custom_socket_vtable->getpeername( + socket, (grpc_sockaddr*)&peer_name.addr, (int*)&peer_name.len); + if (err == GRPC_ERROR_NONE) { + peer_name_string = grpc_sockaddr_to_uri(&peer_name); + } else { + GRPC_LOG_IF_ERROR("getpeername error", err); + GRPC_ERROR_UNREF(err); + } + if (grpc_tcp_trace.enabled()) { + if (peer_name_string) { + gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s", + sp->server, peer_name_string); + } else { + gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection", sp->server); + } + } + ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota, + peer_name_string); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; + sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, nullptr, acceptor); + gpr_free(peer_name_string); +} + +static void custom_accept_callback(grpc_custom_socket* socket, + grpc_custom_socket* client, + grpc_error* error); + +static void custom_accept_callback(grpc_custom_socket* socket, + grpc_custom_socket* client, + grpc_error* error) { + grpc_core::ExecCtx exec_ctx; + grpc_tcp_listener* sp = socket->listener; + if (error != GRPC_ERROR_NONE) { + if (!sp->closed) { + gpr_log(GPR_ERROR, "Accept failed: %s", grpc_error_string(error)); + } + gpr_free(client); + GRPC_ERROR_UNREF(error); + return; + } + finish_accept(sp, client); + if (!sp->closed) { + grpc_custom_socket* new_socket = + (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket)); + new_socket->endpoint = nullptr; + new_socket->listener = nullptr; + new_socket->connector = nullptr; + new_socket->refs = 1; + grpc_custom_socket_vtable->accept(sp->socket, new_socket, + custom_accept_callback); + } +} + +static grpc_error* add_socket_to_server(grpc_tcp_server* s, + grpc_custom_socket* socket, + const grpc_resolved_address* addr, + unsigned port_index, + grpc_tcp_listener** listener) { + grpc_tcp_listener* sp = nullptr; + int port = -1; + grpc_error* error; + grpc_resolved_address sockname_temp; + + // The last argument to uv_tcp_bind is flags + error = grpc_custom_socket_vtable->bind(socket, (grpc_sockaddr*)addr->addr, + addr->len, 0); + if (error != GRPC_ERROR_NONE) { + return error; + } + + error = grpc_custom_socket_vtable->listen(socket); + if (error != GRPC_ERROR_NONE) { + return error; + } + + sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE; + error = grpc_custom_socket_vtable->getsockname( + socket, (grpc_sockaddr*)&sockname_temp.addr, (int*)&sockname_temp.len); + if (error != GRPC_ERROR_NONE) { + return error; + } + + port = grpc_sockaddr_get_port(&sockname_temp); + + GPR_ASSERT(port >= 0); + GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); + sp = (grpc_tcp_listener*)gpr_zalloc(sizeof(grpc_tcp_listener)); + sp->next = nullptr; + if (s->head == nullptr) { + s->head = sp; + } else { + s->tail->next = sp; + } + s->tail = sp; + sp->server = s; + sp->socket = socket; + sp->port = port; + sp->port_index = port_index; + sp->closed = false; + s->open_ports++; + *listener = sp; + + return GRPC_ERROR_NONE; +} + +static grpc_error* tcp_server_add_port(grpc_tcp_server* s, + const grpc_resolved_address* addr, + int* port) { + // This function is mostly copied from tcp_server_windows.c + grpc_tcp_listener* sp = nullptr; + grpc_custom_socket* socket; + grpc_resolved_address addr6_v4mapped; + grpc_resolved_address wildcard; + grpc_resolved_address* allocated_addr = nullptr; + grpc_resolved_address sockname_temp; + unsigned port_index = 0; + grpc_error* error = GRPC_ERROR_NONE; + int family; + + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + + if (s->tail != nullptr) { + port_index = s->tail->port_index + 1; + } + + /* Check if this is a wildcard port, and if so, try to keep the port the same + as some previously created listener. */ + if (grpc_sockaddr_get_port(addr) == 0) { + for (sp = s->head; sp; sp = sp->next) { + socket = sp->socket; + sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE; + if (nullptr == grpc_custom_socket_vtable->getsockname( + socket, (grpc_sockaddr*)&sockname_temp.addr, + (int*)&sockname_temp.len)) { + *port = grpc_sockaddr_get_port(&sockname_temp); + if (*port > 0) { + allocated_addr = + (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address)); + memcpy(allocated_addr, addr, sizeof(grpc_resolved_address)); + grpc_sockaddr_set_port(allocated_addr, *port); + addr = allocated_addr; + break; + } + } + } + } + + if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { + addr = &addr6_v4mapped; + } + + /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ + if (grpc_sockaddr_is_wildcard(addr, port)) { + grpc_sockaddr_make_wildcard6(*port, &wildcard); + + addr = &wildcard; + } + + if (grpc_tcp_trace.enabled()) { + char* port_string; + grpc_sockaddr_to_string(&port_string, addr, 0); + const char* str = grpc_error_string(error); + if (port_string) { + gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s, port_string, str); + gpr_free(port_string); + } else { + gpr_log(GPR_INFO, "SERVER %p add_port error=%s", s, str); + } + } + + family = grpc_sockaddr_get_family(addr); + socket = (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket)); + socket->refs = 1; + socket->endpoint = nullptr; + socket->listener = nullptr; + socket->connector = nullptr; + grpc_custom_socket_vtable->init(socket, family); + + if (error == GRPC_ERROR_NONE) { + error = add_socket_to_server(s, socket, addr, port_index, &sp); + } + gpr_free(allocated_addr); + + if (error != GRPC_ERROR_NONE) { + grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Failed to add port to server", &error, 1); + GRPC_ERROR_UNREF(error); + error = error_out; + *port = -1; + } else { + GPR_ASSERT(sp != nullptr); + *port = sp->port; + } + socket->listener = sp; + return error; +} + +static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets, + size_t pollset_count, + grpc_tcp_server_cb on_accept_cb, void* cb_arg) { + grpc_tcp_listener* sp; + (void)pollsets; + (void)pollset_count; + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "SERVER_START %p", server); + } + GPR_ASSERT(on_accept_cb); + GPR_ASSERT(!server->on_accept_cb); + server->on_accept_cb = on_accept_cb; + server->on_accept_cb_arg = cb_arg; + for (sp = server->head; sp; sp = sp->next) { + grpc_custom_socket* new_socket = + (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket)); + new_socket->endpoint = nullptr; + new_socket->listener = nullptr; + new_socket->connector = nullptr; + new_socket->refs = 1; + grpc_custom_socket_vtable->accept(sp->socket, new_socket, + custom_accept_callback); + } +} + +static unsigned tcp_server_port_fd_count(grpc_tcp_server* s, + unsigned port_index) { + return 0; +} + +static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index, + unsigned fd_index) { + return -1; +} + +static void tcp_server_shutdown_listeners(grpc_tcp_server* s) { + for (grpc_tcp_listener* sp = s->head; sp; sp = sp->next) { + if (!sp->closed) { + sp->closed = true; + grpc_custom_socket_vtable->close(sp->socket, custom_close_callback); + } + } +} + +grpc_tcp_server_vtable custom_tcp_server_vtable = { + tcp_server_create, + tcp_server_start, + tcp_server_add_port, + tcp_server_port_fd_count, + tcp_server_port_fd, + tcp_server_ref, + tcp_server_shutdown_starting_add, + tcp_server_unref, + tcp_server_shutdown_listeners}; + +#ifdef GRPC_UV_TEST +grpc_tcp_server_vtable* default_tcp_server_vtable = &custom_tcp_server_vtable; +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.cc index 06612d639..524beba9a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_posix.cc @@ -21,6 +21,8 @@ #define _GNU_SOURCE #endif +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -42,9 +44,9 @@ #include #include #include -#include #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -52,36 +54,19 @@ #include "src/core/lib/iomgr/tcp_posix.h" #include "src/core/lib/iomgr/tcp_server_utils_posix.h" #include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/string.h" - -static gpr_once check_init = GPR_ONCE_INIT; -static bool has_so_reuseport = false; - -static void init(void) { -#ifndef GPR_MANYLINUX1 - int s = socket(AF_INET, SOCK_STREAM, 0); - if (s >= 0) { - has_so_reuseport = GRPC_LOG_IF_ERROR("check for SO_REUSEPORT", - grpc_set_socket_reuse_port(s, 1)); - close(s); - } -#endif -} - -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, - const grpc_channel_args *args, - grpc_tcp_server **server) { - gpr_once_init(&check_init, init); - grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server)); - s->so_reuseport = has_so_reuseport; +static grpc_error* tcp_server_create(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server) { + grpc_tcp_server* s = + static_cast(gpr_zalloc(sizeof(grpc_tcp_server))); + s->so_reuseport = grpc_is_socket_reuse_port_supported(); s->expand_wildcard_addrs = false; - for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { + for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) { if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) { if (args->args[i].type == GRPC_ARG_INTEGER) { - s->so_reuseport = - has_so_reuseport && (args->args[i].value.integer != 0); + s->so_reuseport = grpc_is_socket_reuse_port_supported() && + (args->args[i].value.integer != 0); } else { gpr_free(s); return GRPC_ERROR_CREATE_FROM_STATIC_STRING(GRPC_ARG_ALLOW_REUSEPORT @@ -102,13 +87,13 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, s->active_ports = 0; s->destroyed_ports = 0; s->shutdown = false; - s->shutdown_starting.head = NULL; - s->shutdown_starting.tail = NULL; + s->shutdown_starting.head = nullptr; + s->shutdown_starting.tail = nullptr; s->shutdown_complete = shutdown_complete; - s->on_accept_cb = NULL; - s->on_accept_cb_arg = NULL; - s->head = NULL; - s->tail = NULL; + s->on_accept_cb = nullptr; + s->on_accept_cb_arg = nullptr; + s->head = nullptr; + s->tail = nullptr; s->nports = 0; s->channel_args = grpc_channel_args_copy(args); gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0); @@ -116,34 +101,33 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void finish_shutdown(grpc_tcp_server* s) { gpr_mu_lock(&s->mu); GPR_ASSERT(s->shutdown); gpr_mu_unlock(&s->mu); - if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + if (s->shutdown_complete != nullptr) { + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } gpr_mu_destroy(&s->mu); while (s->head) { - grpc_tcp_listener *sp = s->head; + grpc_tcp_listener* sp = s->head; s->head = sp->next; gpr_free(sp); } - grpc_channel_args_destroy(exec_ctx, s->channel_args); + grpc_channel_args_destroy(s->channel_args); gpr_free(s); } -static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, - grpc_error *error) { - grpc_tcp_server *s = (grpc_tcp_server *)server; +static void destroyed_port(void* server, grpc_error* error) { + grpc_tcp_server* s = static_cast(server); gpr_mu_lock(&s->mu); s->destroyed_ports++; if (s->destroyed_ports == s->nports) { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } else { GPR_ASSERT(s->destroyed_ports < s->nports); gpr_mu_unlock(&s->mu); @@ -153,29 +137,29 @@ static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, /* called when all listening endpoints have been shutdown, so no further events will be received on them - at this point it's safe to destroy things */ -static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void deactivated_all_ports(grpc_tcp_server* s) { /* delete ALL the things */ gpr_mu_lock(&s->mu); GPR_ASSERT(s->shutdown); if (s->head) { - grpc_tcp_listener *sp; + grpc_tcp_listener* sp; for (sp = s->head; sp; sp = sp->next) { grpc_unlink_if_unix_domain_socket(&sp->addr); GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s, grpc_schedule_on_exec_ctx); - grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, + grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr, false /* already_closed */, "tcp_listener_shutdown"); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); + finish_shutdown(s); } } -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_destroy(grpc_tcp_server* s) { gpr_mu_lock(&s->mu); GPR_ASSERT(!s->shutdown); @@ -183,37 +167,38 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { /* shutdown all fd's */ if (s->active_ports) { - grpc_tcp_listener *sp; + grpc_tcp_listener* sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Server destroyed")); + grpc_fd_shutdown( + sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed")); } gpr_mu_unlock(&s->mu); } else { gpr_mu_unlock(&s->mu); - deactivated_all_ports(exec_ctx, s); + deactivated_all_ports(s); } } /* event manager callback when reads are ready */ -static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { - grpc_tcp_listener *sp = (grpc_tcp_listener *)arg; - grpc_pollset *read_notifier_pollset; +static void on_read(void* arg, grpc_error* err) { + grpc_tcp_listener* sp = static_cast(arg); + grpc_pollset* read_notifier_pollset; if (err != GRPC_ERROR_NONE) { goto error; } read_notifier_pollset = - sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( - &sp->server->next_pollset_to_assign, 1) % + sp->server->pollsets[static_cast(gpr_atm_no_barrier_fetch_add( + &sp->server->next_pollset_to_assign, 1)) % sp->server->pollset_count]; /* loop until accept4 returns EAGAIN, and then re-arm notification */ for (;;) { grpc_resolved_address addr; - char *addr_str; - char *name; - addr.len = sizeof(struct sockaddr_storage); + char* addr_str; + char* name; + memset(&addr, 0, sizeof(addr)); + addr.len = static_cast(sizeof(struct sockaddr_storage)); /* Note: If we ever decide to return this address to the user, remember to strip off the ::ffff:0.0.0.0/96 prefix first. */ int fd = grpc_accept4(sp->fd, &addr, 1, 1); @@ -222,7 +207,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { case EINTR: continue; case EAGAIN: - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); return; default: gpr_mu_lock(&sp->server->mu); @@ -242,24 +227,24 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { addr_str = grpc_sockaddr_to_uri(&addr); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); + if (grpc_tcp_trace.enabled()) { + gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str); } - grpc_fd *fdobj = grpc_fd_create(fd, name); + grpc_fd* fdobj = grpc_fd_create(fd, name); - grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); + grpc_pollset_add_fd(read_notifier_pollset, fdobj); // Create acceptor. - grpc_tcp_server_acceptor *acceptor = - (grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor)); + grpc_tcp_server_acceptor* acceptor = + static_cast(gpr_malloc(sizeof(*acceptor))); acceptor->from_server = sp->server; acceptor->port_index = sp->port_index; acceptor->fd_index = sp->fd_index; sp->server->on_accept_cb( - exec_ctx, sp->server->on_accept_cb_arg, - grpc_tcp_create(exec_ctx, fdobj, sp->server->channel_args, addr_str), + sp->server->on_accept_cb_arg, + grpc_tcp_create(fdobj, sp->server->channel_args, addr_str), read_notifier_pollset, acceptor); gpr_free(name); @@ -272,25 +257,25 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports && sp->server->shutdown) { gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); + deactivated_all_ports(sp->server); } else { gpr_mu_unlock(&sp->server->mu); } } /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ -static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, +static grpc_error* add_wildcard_addrs_to_server(grpc_tcp_server* s, unsigned port_index, int requested_port, - int *out_port) { + int* out_port) { grpc_resolved_address wild4; grpc_resolved_address wild6; unsigned fd_index = 0; grpc_dualstack_mode dsmode; - grpc_tcp_listener *sp = NULL; - grpc_tcp_listener *sp2 = NULL; - grpc_error *v6_err = GRPC_ERROR_NONE; - grpc_error *v4_err = GRPC_ERROR_NONE; + grpc_tcp_listener* sp = nullptr; + grpc_tcp_listener* sp2 = nullptr; + grpc_error* v6_err = GRPC_ERROR_NONE; + grpc_error* v4_err = GRPC_ERROR_NONE; *out_port = -1; if (grpc_tcp_server_have_ifaddrs() && s->expand_wildcard_addrs) { @@ -313,7 +298,7 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, if ((v4_err = grpc_tcp_server_add_addr(s, &wild4, port_index, fd_index, &dsmode, &sp2)) == GRPC_ERROR_NONE) { *out_port = sp2->port; - if (sp != NULL) { + if (sp != nullptr) { sp2->is_sibling = 1; sp->sibling = sp2; } @@ -335,7 +320,7 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, } return GRPC_ERROR_NONE; } else { - grpc_error *root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + grpc_error* root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to add any wildcard listeners"); GPR_ASSERT(v6_err != GRPC_ERROR_NONE && v4_err != GRPC_ERROR_NONE); root_err = grpc_error_add_child(root_err, v6_err); @@ -344,13 +329,13 @@ static grpc_error *add_wildcard_addrs_to_server(grpc_tcp_server *s, } } -static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { - grpc_tcp_listener *sp = NULL; - char *addr_str; - char *name; - grpc_error *err; +static grpc_error* clone_port(grpc_tcp_listener* listener, unsigned count) { + grpc_tcp_listener* sp = nullptr; + char* addr_str; + char* name; + grpc_error* err; - for (grpc_tcp_listener *l = listener->next; l && l->is_sibling; l = l->next) { + for (grpc_tcp_listener* l = listener->next; l && l->is_sibling; l = l->next) { l->fd_index += count; } @@ -366,7 +351,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { listener->server->nports++; grpc_sockaddr_to_string(&addr_str, &listener->addr, 1); gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i); - sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener)); + sp = static_cast(gpr_malloc(sizeof(grpc_tcp_listener))); sp->next = listener->next; listener->next = sp; /* sp (the new listener) is a sibling of 'listener' (the original @@ -382,7 +367,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { sp->port_index = listener->port_index; sp->fd_index = listener->fd_index + count - i; GPR_ASSERT(sp->emfd); - while (listener->server->tail->next != NULL) { + while (listener->server->tail->next != nullptr) { listener->server->tail = listener->server->tail->next; } gpr_free(addr_str); @@ -392,18 +377,18 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) { return GRPC_ERROR_NONE; } -grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, - const grpc_resolved_address *addr, - int *out_port) { - grpc_tcp_listener *sp; +static grpc_error* tcp_server_add_port(grpc_tcp_server* s, + const grpc_resolved_address* addr, + int* out_port) { + grpc_tcp_listener* sp; grpc_resolved_address sockname_temp; grpc_resolved_address addr6_v4mapped; int requested_port = grpc_sockaddr_get_port(addr); unsigned port_index = 0; grpc_dualstack_mode dsmode; - grpc_error *err; + grpc_error* err; *out_port = -1; - if (s->tail != NULL) { + if (s->tail != nullptr) { port_index = s->tail->port_index + 1; } grpc_unlink_if_unix_domain_socket(addr); @@ -412,9 +397,12 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, as some previously created listener. */ if (requested_port == 0) { for (sp = s->head; sp; sp = sp->next) { - sockname_temp.len = sizeof(struct sockaddr_storage); - if (0 == getsockname(sp->fd, (struct sockaddr *)&sockname_temp.addr, - (socklen_t *)&sockname_temp.len)) { + sockname_temp.len = + static_cast(sizeof(struct sockaddr_storage)); + if (0 == + getsockname(sp->fd, + reinterpret_cast(&sockname_temp.addr), + &sockname_temp.len)) { int used_port = grpc_sockaddr_get_port(&sockname_temp); if (used_port > 0) { memcpy(&sockname_temp, addr, sizeof(grpc_resolved_address)); @@ -442,10 +430,10 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, /* Return listener at port_index or NULL. Should only be called with s->mu locked. */ -static grpc_tcp_listener *get_port_index(grpc_tcp_server *s, +static grpc_tcp_listener* get_port_index(grpc_tcp_server* s, unsigned port_index) { unsigned num_ports = 0; - grpc_tcp_listener *sp; + grpc_tcp_listener* sp; for (sp = s->head; sp; sp = sp->next) { if (!sp->is_sibling) { if (++num_ports > port_index) { @@ -453,14 +441,13 @@ static grpc_tcp_listener *get_port_index(grpc_tcp_server *s, } } } - return NULL; + return nullptr; } -unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, - unsigned port_index) { +unsigned tcp_server_port_fd_count(grpc_tcp_server* s, unsigned port_index) { unsigned num_fds = 0; gpr_mu_lock(&s->mu); - grpc_tcp_listener *sp = get_port_index(s, port_index); + grpc_tcp_listener* sp = get_port_index(s, port_index); for (; sp; sp = sp->sibling) { ++num_fds; } @@ -468,10 +455,10 @@ unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s, return num_fds; } -int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, - unsigned fd_index) { +static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index, + unsigned fd_index) { gpr_mu_lock(&s->mu); - grpc_tcp_listener *sp = get_port_index(s, port_index); + grpc_tcp_listener* sp = get_port_index(s, port_index); for (; sp; sp = sp->sibling, --fd_index) { if (fd_index == 0) { gpr_mu_unlock(&s->mu); @@ -482,12 +469,12 @@ int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index, return -1; } -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, - grpc_pollset **pollsets, size_t pollset_count, - grpc_tcp_server_cb on_accept_cb, - void *on_accept_cb_arg) { +static void tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets, + size_t pollset_count, + grpc_tcp_server_cb on_accept_cb, + void* on_accept_cb_arg) { size_t i; - grpc_tcp_listener *sp; + grpc_tcp_listener* sp; GPR_ASSERT(on_accept_cb); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb); @@ -497,26 +484,26 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, s->pollsets = pollsets; s->pollset_count = pollset_count; sp = s->head; - while (sp != NULL) { + while (sp != nullptr) { if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr) && pollset_count > 1) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); + grpc_pollset_add_fd(pollsets[i], sp->emfd); GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } } else { for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); + grpc_pollset_add_fd(pollsets[i], sp->emfd); } GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); + grpc_fd_notify_on_read(sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } @@ -524,42 +511,51 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, gpr_mu_unlock(&s->mu); } -grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { +grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) { gpr_ref_non_zero(&s->refs); return s; } -void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, - grpc_closure *shutdown_starting) { +static void tcp_server_shutdown_starting_add(grpc_tcp_server* s, + grpc_closure* shutdown_starting) { gpr_mu_lock(&s->mu); grpc_closure_list_append(&s->shutdown_starting, shutdown_starting, GRPC_ERROR_NONE); gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_unref(grpc_tcp_server* s) { if (gpr_unref(&s->refs)) { - grpc_tcp_server_shutdown_listeners(exec_ctx, s); + grpc_tcp_server_shutdown_listeners(s); gpr_mu_lock(&s->mu); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting); + GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting); gpr_mu_unlock(&s->mu); - tcp_server_destroy(exec_ctx, s); + tcp_server_destroy(s); } } -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) { +static void tcp_server_shutdown_listeners(grpc_tcp_server* s) { gpr_mu_lock(&s->mu); s->shutdown_listeners = true; /* shutdown all fd's */ if (s->active_ports) { - grpc_tcp_listener *sp; + grpc_tcp_listener* sp; for (sp = s->head; sp; sp = sp->next) { - grpc_fd_shutdown(exec_ctx, sp->emfd, + grpc_fd_shutdown(sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown")); } } gpr_mu_unlock(&s->mu); } +grpc_tcp_server_vtable grpc_posix_tcp_server_vtable = { + tcp_server_create, + tcp_server_start, + tcp_server_add_port, + tcp_server_port_fd_count, + tcp_server_port_fd, + tcp_server_ref, + tcp_server_shutdown_starting_add, + tcp_server_unref, + tcp_server_shutdown_listeners}; #endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix.h index 85dea515d..34d68130c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H #define GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H +#include + #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/socket_utils_posix.h" @@ -27,22 +29,22 @@ /* one listening port */ typedef struct grpc_tcp_listener { int fd; - grpc_fd *emfd; - grpc_tcp_server *server; + grpc_fd* emfd; + grpc_tcp_server* server; grpc_resolved_address addr; int port; unsigned port_index; unsigned fd_index; grpc_closure read_closure; grpc_closure destroyed_closure; - struct grpc_tcp_listener *next; + struct grpc_tcp_listener* next; /* sibling is a linked list of all listeners for a given port. add_port and clone_port place all new listeners in the same sibling list. A member of the 'sibling' list is also a member of the 'next' list. The head of each sibling list has is_sibling==0, and subsequent members of sibling lists have is_sibling==1. is_sibling allows separate sibling lists to be identified while iterating through 'next'. */ - struct grpc_tcp_listener *sibling; + struct grpc_tcp_listener* sibling; int is_sibling; } grpc_tcp_listener; @@ -51,7 +53,7 @@ struct grpc_tcp_server { gpr_refcount refs; /* Called whenever accept() succeeds on a server port. */ grpc_tcp_server_cb on_accept_cb; - void *on_accept_cb_arg; + void* on_accept_cb_arg; gpr_mu mu; @@ -70,18 +72,18 @@ struct grpc_tcp_server { bool expand_wildcard_addrs; /* linked list of server ports */ - grpc_tcp_listener *head; - grpc_tcp_listener *tail; + grpc_tcp_listener* head; + grpc_tcp_listener* tail; unsigned nports; /* List of closures passed to shutdown_starting_add(). */ grpc_closure_list shutdown_starting; /* shutdown callback */ - grpc_closure *shutdown_complete; + grpc_closure* shutdown_complete; /* all pollsets interested in new connections */ - grpc_pollset **pollsets; + grpc_pollset** pollsets; /* number of pollsets in the pollsets array */ size_t pollset_count; @@ -89,31 +91,31 @@ struct grpc_tcp_server { gpr_atm next_pollset_to_assign; /* channel args for this server */ - grpc_channel_args *channel_args; + grpc_channel_args* channel_args; }; /* If successful, add a listener to \a s for \a addr, set \a dsmode for the socket, and return the \a listener. */ -grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s, - const grpc_resolved_address *addr, +grpc_error* grpc_tcp_server_add_addr(grpc_tcp_server* s, + const grpc_resolved_address* addr, unsigned port_index, unsigned fd_index, - grpc_dualstack_mode *dsmode, - grpc_tcp_listener **listener); + grpc_dualstack_mode* dsmode, + grpc_tcp_listener** listener); /* Get all addresses assigned to network interfaces on the machine and create a listener for each. requested_port is the port to use for every listener, or 0 to select one random port that will be used for every listener. Set *out_port to the port selected. Return GRPC_ERROR_NONE only if all listeners were added. */ -grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, +grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s, unsigned port_index, int requested_port, - int *out_port); + int* out_port); /* Prepare a recently-created socket for listening. */ -grpc_error *grpc_tcp_server_prepare_socket(int fd, - const grpc_resolved_address *addr, - bool so_reuseport, int *port); +grpc_error* grpc_tcp_server_prepare_socket(int fd, + const grpc_resolved_address* addr, + bool so_reuseport, int* port); /* Ruturn true if the platform supports ifaddrs */ bool grpc_tcp_server_have_ifaddrs(void); diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.cc similarity index 80% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.cc index a828bee07..9f4e58ca1 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_common.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_SOCKET @@ -46,17 +48,17 @@ static int s_max_accept_queue_size; static void init_max_accept_queue_size(void) { int n = SOMAXCONN; char buf[64]; - FILE *fp = fopen("/proc/sys/net/core/somaxconn", "r"); - if (fp == NULL) { + FILE* fp = fopen("/proc/sys/net/core/somaxconn", "r"); + if (fp == nullptr) { /* 2.4 kernel. */ s_max_accept_queue_size = SOMAXCONN; return; } if (fgets(buf, sizeof buf, fp)) { - char *end; + char* end; long i = strtol(buf, &end, 10); - if (i > 0 && i <= INT_MAX && end && *end == 0) { - n = (int)i; + if (i > 0 && i <= INT_MAX && end && *end == '\n') { + n = static_cast(i); } } fclose(fp); @@ -75,16 +77,16 @@ static int get_max_accept_queue_size(void) { return s_max_accept_queue_size; } -static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, - const grpc_resolved_address *addr, +static grpc_error* add_socket_to_server(grpc_tcp_server* s, int fd, + const grpc_resolved_address* addr, unsigned port_index, unsigned fd_index, - grpc_tcp_listener **listener) { - grpc_tcp_listener *sp = NULL; + grpc_tcp_listener** listener) { + grpc_tcp_listener* sp = nullptr; int port = -1; - char *addr_str; - char *name; + char* addr_str; + char* name; - grpc_error *err = + grpc_error* err = grpc_tcp_server_prepare_socket(fd, addr, s->so_reuseport, &port); if (err == GRPC_ERROR_NONE) { GPR_ASSERT(port > 0); @@ -93,9 +95,9 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, gpr_mu_lock(&s->mu); s->nports++; GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); - sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener)); - sp->next = NULL; - if (s->head == NULL) { + sp = static_cast(gpr_malloc(sizeof(grpc_tcp_listener))); + sp->next = nullptr; + if (s->head == nullptr) { s->head = sp; } else { s->tail->next = sp; @@ -109,7 +111,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, sp->port_index = port_index; sp->fd_index = fd_index; sp->is_sibling = 0; - sp->sibling = NULL; + sp->sibling = nullptr; GPR_ASSERT(sp->emfd); gpr_mu_unlock(&s->mu); gpr_free(addr_str); @@ -122,14 +124,14 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd, /* If successful, add a listener to s for addr, set *dsmode for the socket, and return the *listener. */ -grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s, - const grpc_resolved_address *addr, +grpc_error* grpc_tcp_server_add_addr(grpc_tcp_server* s, + const grpc_resolved_address* addr, unsigned port_index, unsigned fd_index, - grpc_dualstack_mode *dsmode, - grpc_tcp_listener **listener) { + grpc_dualstack_mode* dsmode, + grpc_tcp_listener** listener) { grpc_resolved_address addr4_copy; int fd; - grpc_error *err = + grpc_error* err = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd); if (err != GRPC_ERROR_NONE) { return err; @@ -142,11 +144,11 @@ grpc_error *grpc_tcp_server_add_addr(grpc_tcp_server *s, } /* Prepare a recently-created socket for listening. */ -grpc_error *grpc_tcp_server_prepare_socket(int fd, - const grpc_resolved_address *addr, - bool so_reuseport, int *port) { +grpc_error* grpc_tcp_server_prepare_socket(int fd, + const grpc_resolved_address* addr, + bool so_reuseport, int* port) { grpc_resolved_address sockname_temp; - grpc_error *err = GRPC_ERROR_NONE; + grpc_error* err = GRPC_ERROR_NONE; GPR_ASSERT(fd >= 0); @@ -168,8 +170,8 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd, err = grpc_set_socket_no_sigpipe_if_possible(fd); if (err != GRPC_ERROR_NONE) goto error; - GPR_ASSERT(addr->len < ~(socklen_t)0); - if (bind(fd, (struct sockaddr *)addr->addr, (socklen_t)addr->len) < 0) { + if (bind(fd, reinterpret_cast(const_cast(addr->addr)), + addr->len) < 0) { err = GRPC_OS_ERROR(errno, "bind"); goto error; } @@ -179,10 +181,10 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd, goto error; } - sockname_temp.len = sizeof(struct sockaddr_storage); + sockname_temp.len = static_cast(sizeof(struct sockaddr_storage)); - if (getsockname(fd, (struct sockaddr *)sockname_temp.addr, - (socklen_t *)&sockname_temp.len) < 0) { + if (getsockname(fd, reinterpret_cast(sockname_temp.addr), + &sockname_temp.len) < 0) { err = GRPC_OS_ERROR(errno, "getsockname"); goto error; } @@ -195,7 +197,7 @@ grpc_error *grpc_tcp_server_prepare_socket(int fd, if (fd >= 0) { close(fd); } - grpc_error *ret = + grpc_error* ret = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Unable to configure socket", &err, 1), GRPC_ERROR_INT_FD, fd); diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc similarity index 79% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc index a243b69f3..7fd86c57e 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_HAVE_IFADDRS @@ -36,11 +38,11 @@ #include "src/core/lib/iomgr/sockaddr_utils.h" /* Return the listener in s with address addr or NULL. */ -static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s, - grpc_resolved_address *addr) { - grpc_tcp_listener *l; +static grpc_tcp_listener* find_listener_with_addr(grpc_tcp_server* s, + grpc_resolved_address* addr) { + grpc_tcp_listener* l; gpr_mu_lock(&s->mu); - for (l = s->head; l != NULL; l = l->next) { + for (l = s->head; l != nullptr; l = l->next) { if (l->addr.len != addr->len) { continue; } @@ -53,12 +55,12 @@ static grpc_tcp_listener *find_listener_with_addr(grpc_tcp_server *s, } /* Bind to "::" to get a port number not used by any address. */ -static grpc_error *get_unused_port(int *port) { +static grpc_error* get_unused_port(int* port) { grpc_resolved_address wild; grpc_sockaddr_make_wildcard6(0, &wild); grpc_dualstack_mode dsmode; int fd; - grpc_error *err = + grpc_error* err = grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd); if (err != GRPC_ERROR_NONE) { return err; @@ -66,12 +68,13 @@ static grpc_error *get_unused_port(int *port) { if (dsmode == GRPC_DSMODE_IPV4) { grpc_sockaddr_make_wildcard4(0, &wild); } - if (bind(fd, (const struct sockaddr *)wild.addr, (socklen_t)wild.len) != 0) { + if (bind(fd, reinterpret_cast(wild.addr), wild.len) != + 0) { err = GRPC_OS_ERROR(errno, "bind"); close(fd); return err; } - if (getsockname(fd, (struct sockaddr *)wild.addr, (socklen_t *)&wild.len) != + if (getsockname(fd, reinterpret_cast(wild.addr), &wild.len) != 0) { err = GRPC_OS_ERROR(errno, "getsockname"); close(fd); @@ -83,15 +86,15 @@ static grpc_error *get_unused_port(int *port) { : GRPC_ERROR_NONE; } -grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, +grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s, unsigned port_index, int requested_port, - int *out_port) { - struct ifaddrs *ifa = NULL; - struct ifaddrs *ifa_it; + int* out_port) { + struct ifaddrs* ifa = nullptr; + struct ifaddrs* ifa_it; unsigned fd_index = 0; - grpc_tcp_listener *sp = NULL; - grpc_error *err = GRPC_ERROR_NONE; + grpc_tcp_listener* sp = nullptr; + grpc_error* err = GRPC_ERROR_NONE; if (requested_port == 0) { /* Note: There could be a race where some local addrs can listen on the selected port and some can't. The sane way to handle this would be to @@ -104,21 +107,21 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, } gpr_log(GPR_DEBUG, "Picked unused port %d", requested_port); } - if (getifaddrs(&ifa) != 0 || ifa == NULL) { + if (getifaddrs(&ifa) != 0 || ifa == nullptr) { return GRPC_OS_ERROR(errno, "getifaddrs"); } - for (ifa_it = ifa; ifa_it != NULL; ifa_it = ifa_it->ifa_next) { + for (ifa_it = ifa; ifa_it != nullptr; ifa_it = ifa_it->ifa_next) { grpc_resolved_address addr; - char *addr_str = NULL; + char* addr_str = nullptr; grpc_dualstack_mode dsmode; - grpc_tcp_listener *new_sp = NULL; - const char *ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : ""); - if (ifa_it->ifa_addr == NULL) { + grpc_tcp_listener* new_sp = nullptr; + const char* ifa_name = (ifa_it->ifa_name ? ifa_it->ifa_name : ""); + if (ifa_it->ifa_addr == nullptr) { continue; } else if (ifa_it->ifa_addr->sa_family == AF_INET) { - addr.len = sizeof(struct sockaddr_in); + addr.len = static_cast(sizeof(grpc_sockaddr_in)); } else if (ifa_it->ifa_addr->sa_family == AF_INET6) { - addr.len = sizeof(struct sockaddr_in6); + addr.len = static_cast(sizeof(grpc_sockaddr_in6)); } else { continue; } @@ -136,7 +139,7 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, ifa_name, ifa_it->ifa_flags, addr_str); /* We could have multiple interfaces with the same address (e.g., bonding), so look for duplicates. */ - if (find_listener_with_addr(s, &addr) != NULL) { + if (find_listener_with_addr(s, &addr) != nullptr) { gpr_log(GPR_DEBUG, "Skipping duplicate addr %s on interface %s", addr_str, ifa_name); gpr_free(addr_str); @@ -144,8 +147,8 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, } if ((err = grpc_tcp_server_add_addr(s, &addr, port_index, fd_index, &dsmode, &new_sp)) != GRPC_ERROR_NONE) { - char *err_str = NULL; - grpc_error *root_err; + char* err_str = nullptr; + grpc_error* root_err; if (gpr_asprintf(&err_str, "Failed to add listener: %s", addr_str) < 0) { err_str = gpr_strdup("Failed to add listener"); } @@ -157,7 +160,7 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, } else { GPR_ASSERT(requested_port == new_sp->port); ++fd_index; - if (sp != NULL) { + if (sp != nullptr) { new_sp->is_sibling = 1; sp->sibling = new_sp; } @@ -168,7 +171,7 @@ grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, freeifaddrs(ifa); if (err != GRPC_ERROR_NONE) { return err; - } else if (sp == NULL) { + } else if (sp == nullptr) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No local addresses"); } else { *out_port = sp->port; diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc similarity index 86% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc index 34eab20d6..86ee14f28 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc @@ -16,16 +16,18 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #if defined(GRPC_POSIX_SOCKET) && !defined(GRPC_HAVE_IFADDRS) #include "src/core/lib/iomgr/tcp_server_utils_posix.h" -grpc_error *grpc_tcp_server_add_all_local_addrs(grpc_tcp_server *s, +grpc_error* grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s, unsigned port_index, int requested_port, - int *out_port) { + int* out_port) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING("no ifaddrs available"); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_uv.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_uv.c deleted file mode 100644 index 3b9332321..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_uv.c +++ /dev/null @@ -1,454 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include -#include - -#include -#include - -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/tcp_server.h" -#include "src/core/lib/iomgr/tcp_uv.h" - -/* one listening port */ -typedef struct grpc_tcp_listener grpc_tcp_listener; -struct grpc_tcp_listener { - uv_tcp_t *handle; - grpc_tcp_server *server; - unsigned port_index; - int port; - /* linked list */ - struct grpc_tcp_listener *next; - - bool closed; - - bool has_pending_connection; -}; - -struct grpc_tcp_server { - gpr_refcount refs; - - /* Called whenever accept() succeeds on a server port. */ - grpc_tcp_server_cb on_accept_cb; - void *on_accept_cb_arg; - - int open_ports; - - /* linked list of server ports */ - grpc_tcp_listener *head; - grpc_tcp_listener *tail; - - /* List of closures passed to shutdown_starting_add(). */ - grpc_closure_list shutdown_starting; - - /* shutdown callback */ - grpc_closure *shutdown_complete; - - bool shutdown; - - grpc_resource_quota *resource_quota; -}; - -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, - const grpc_channel_args *args, - grpc_tcp_server **server) { - grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); - s->resource_quota = grpc_resource_quota_create(NULL); - for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) { - if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) { - if (args->args[i].type == GRPC_ARG_POINTER) { - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); - s->resource_quota = - grpc_resource_quota_ref_internal(args->args[i].value.pointer.p); - } else { - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); - gpr_free(s); - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool"); - } - } - } - gpr_ref_init(&s->refs, 1); - s->on_accept_cb = NULL; - s->on_accept_cb_arg = NULL; - s->open_ports = 0; - s->head = NULL; - s->tail = NULL; - s->shutdown_starting.head = NULL; - s->shutdown_starting.tail = NULL; - s->shutdown_complete = shutdown_complete; - s->shutdown = false; - *server = s; - return GRPC_ERROR_NONE; -} - -grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { - GRPC_UV_ASSERT_SAME_THREAD(); - gpr_ref(&s->refs); - return s; -} - -void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, - grpc_closure *shutdown_starting) { - grpc_closure_list_append(&s->shutdown_starting, shutdown_starting, - GRPC_ERROR_NONE); -} - -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { - GPR_ASSERT(s->shutdown); - if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); - } - - while (s->head) { - grpc_tcp_listener *sp = s->head; - s->head = sp->next; - sp->next = NULL; - gpr_free(sp->handle); - gpr_free(sp); - } - grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota); - gpr_free(s); -} - -static void handle_close_callback(uv_handle_t *handle) { - grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - sp->server->open_ports--; - if (sp->server->open_ports == 0 && sp->server->shutdown) { - finish_shutdown(&exec_ctx, sp->server); - } - grpc_exec_ctx_finish(&exec_ctx); -} - -static void close_listener(grpc_tcp_listener *sp) { - if (!sp->closed) { - sp->closed = true; - uv_close((uv_handle_t *)sp->handle, handle_close_callback); - } -} - -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { - int immediately_done = 0; - grpc_tcp_listener *sp; - - GPR_ASSERT(!s->shutdown); - s->shutdown = true; - - if (s->open_ports == 0) { - immediately_done = 1; - } - for (sp = s->head; sp; sp = sp->next) { - close_listener(sp); - } - - if (immediately_done) { - finish_shutdown(exec_ctx, s); - } -} - -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { - GRPC_UV_ASSERT_SAME_THREAD(); - if (gpr_unref(&s->refs)) { - /* Complete shutdown_starting work before destroying. */ - grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting); - if (exec_ctx == NULL) { - grpc_exec_ctx_flush(&local_exec_ctx); - tcp_server_destroy(&local_exec_ctx, s); - grpc_exec_ctx_finish(&local_exec_ctx); - } else { - grpc_exec_ctx_finish(&local_exec_ctx); - tcp_server_destroy(exec_ctx, s); - } - } -} - -static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) { - grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); - uv_tcp_t *client; - grpc_endpoint *ep = NULL; - grpc_resolved_address peer_name; - char *peer_name_string; - int err; - uv_tcp_t *server = sp->handle; - - client = gpr_malloc(sizeof(uv_tcp_t)); - uv_tcp_init(uv_default_loop(), client); - // UV documentation says this is guaranteed to succeed - uv_accept((uv_stream_t *)server, (uv_stream_t *)client); - peer_name_string = NULL; - memset(&peer_name, 0, sizeof(grpc_resolved_address)); - peer_name.len = sizeof(struct sockaddr_storage); - err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr, - (int *)&peer_name.len); - if (err == 0) { - peer_name_string = grpc_sockaddr_to_uri(&peer_name); - } else { - gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(err)); - } - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - if (peer_name_string) { - gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s", - sp->server, peer_name_string); - } else { - gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server); - } - } - ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); - acceptor->from_server = sp->server; - acceptor->port_index = sp->port_index; - acceptor->fd_index = 0; - sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - acceptor); - gpr_free(peer_name_string); -} - -static void on_connect(uv_stream_t *server, int status) { - grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - if (status < 0) { - switch (status) { - case UV_EINTR: - case UV_EAGAIN: - return; - default: - close_listener(sp); - return; - } - } - - GPR_ASSERT(!sp->has_pending_connection); - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server); - } - - // Create acceptor. - if (sp->server->on_accept_cb) { - finish_accept(&exec_ctx, sp); - } else { - sp->has_pending_connection = true; - } - grpc_exec_ctx_finish(&exec_ctx); -} - -static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle, - const grpc_resolved_address *addr, - unsigned port_index, - grpc_tcp_listener **listener) { - grpc_tcp_listener *sp = NULL; - int port = -1; - int status; - grpc_error *error; - grpc_resolved_address sockname_temp; - - // The last argument to uv_tcp_bind is flags - status = uv_tcp_bind(handle, (struct sockaddr *)addr->addr, 0); - if (status != 0) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to bind to port"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - return error; - } - - status = uv_listen((uv_stream_t *)handle, SOMAXCONN, on_connect); - if (status != 0) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to listen to port"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - return error; - } - - sockname_temp.len = (int)sizeof(struct sockaddr_storage); - status = uv_tcp_getsockname(handle, (struct sockaddr *)&sockname_temp.addr, - (int *)&sockname_temp.len); - if (status != 0) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getsockname failed"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - return error; - } - - port = grpc_sockaddr_get_port(&sockname_temp); - - GPR_ASSERT(port >= 0); - GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); - sp = gpr_zalloc(sizeof(grpc_tcp_listener)); - sp->next = NULL; - if (s->head == NULL) { - s->head = sp; - } else { - s->tail->next = sp; - } - s->tail = sp; - sp->server = s; - sp->handle = handle; - sp->port = port; - sp->port_index = port_index; - sp->closed = false; - handle->data = sp; - s->open_ports++; - GPR_ASSERT(sp->handle); - *listener = sp; - - return GRPC_ERROR_NONE; -} - -grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, - const grpc_resolved_address *addr, - int *port) { - // This function is mostly copied from tcp_server_windows.c - grpc_tcp_listener *sp = NULL; - uv_tcp_t *handle; - grpc_resolved_address addr6_v4mapped; - grpc_resolved_address wildcard; - grpc_resolved_address *allocated_addr = NULL; - grpc_resolved_address sockname_temp; - unsigned port_index = 0; - int status; - grpc_error *error = GRPC_ERROR_NONE; - int family; - - GRPC_UV_ASSERT_SAME_THREAD(); - - if (s->tail != NULL) { - port_index = s->tail->port_index + 1; - } - - /* Check if this is a wildcard port, and if so, try to keep the port the same - as some previously created listener. */ - if (grpc_sockaddr_get_port(addr) == 0) { - for (sp = s->head; sp; sp = sp->next) { - sockname_temp.len = sizeof(struct sockaddr_storage); - if (0 == uv_tcp_getsockname(sp->handle, - (struct sockaddr *)&sockname_temp.addr, - (int *)&sockname_temp.len)) { - *port = grpc_sockaddr_get_port(&sockname_temp); - if (*port > 0) { - allocated_addr = gpr_malloc(sizeof(grpc_resolved_address)); - memcpy(allocated_addr, addr, sizeof(grpc_resolved_address)); - grpc_sockaddr_set_port(allocated_addr, *port); - addr = allocated_addr; - break; - } - } - } - } - - if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { - addr = &addr6_v4mapped; - } - - /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ - if (grpc_sockaddr_is_wildcard(addr, port)) { - grpc_sockaddr_make_wildcard6(*port, &wildcard); - - addr = &wildcard; - } - - handle = gpr_malloc(sizeof(uv_tcp_t)); - - family = grpc_sockaddr_get_family(addr); - status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family); -#if defined(GPR_LINUX) && defined(SO_REUSEPORT) - if (family == AF_INET || family == AF_INET6) { - int fd; - uv_fileno((uv_handle_t *)handle, &fd); - int enable = 1; - setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable)); - } -#endif /* GPR_LINUX && SO_REUSEPORT */ - - if (status == 0) { - error = add_socket_to_server(s, handle, addr, port_index, &sp); - } else { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Failed to initialize UV tcp handle"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - } - - gpr_free(allocated_addr); - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - char *port_string; - grpc_sockaddr_to_string(&port_string, addr, 0); - const char *str = grpc_error_string(error); - if (port_string) { - gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str); - gpr_free(port_string); - } else { - gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str); - } - } - - if (error != GRPC_ERROR_NONE) { - grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Failed to add port to server", &error, 1); - GRPC_ERROR_UNREF(error); - error = error_out; - *port = -1; - } else { - GPR_ASSERT(sp != NULL); - *port = sp->port; - } - return error; -} - -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, - grpc_pollset **pollsets, size_t pollset_count, - grpc_tcp_server_cb on_accept_cb, void *cb_arg) { - grpc_tcp_listener *sp; - (void)pollsets; - (void)pollset_count; - GRPC_UV_ASSERT_SAME_THREAD(); - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "SERVER_START %p", server); - } - GPR_ASSERT(on_accept_cb); - GPR_ASSERT(!server->on_accept_cb); - server->on_accept_cb = on_accept_cb; - server->on_accept_cb_arg = cb_arg; - for (sp = server->head; sp; sp = sp->next) { - if (sp->has_pending_connection) { - finish_accept(exec_ctx, sp); - sp->has_pending_connection = false; - } - } -} - -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) {} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.cc index 0162afc1a..b01afdcc9 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_server_windows.cc @@ -16,12 +16,15 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/iomgr/sockaddr.h" +#include #include #include @@ -47,15 +50,15 @@ typedef struct grpc_tcp_listener grpc_tcp_listener; struct grpc_tcp_listener { /* This seemingly magic number comes from AcceptEx's documentation. each address buffer needs to have at least 16 more bytes at their end. */ - uint8_t addresses[(sizeof(struct sockaddr_in6) + 16) * 2]; + uint8_t addresses[(sizeof(grpc_sockaddr_in6) + 16) * 2]; /* This will hold the socket for the next accept. */ SOCKET new_socket; /* The listener winsocket. */ - grpc_winsocket *socket; + grpc_winsocket* socket; /* The actual TCP port number. */ int port; unsigned port_index; - grpc_tcp_server *server; + grpc_tcp_server* server; /* The cached AcceptEx for that port. */ LPFN_ACCEPTEX AcceptEx; int shutting_down; @@ -63,7 +66,7 @@ struct grpc_tcp_listener { /* closure for socket notification of accept being ready */ grpc_closure on_accept; /* linked list */ - struct grpc_tcp_listener *next; + struct grpc_tcp_listener* next; }; /* the overall server */ @@ -71,7 +74,7 @@ struct grpc_tcp_server { gpr_refcount refs; /* Called whenever accept() succeeds on a server port. */ grpc_tcp_server_cb on_accept_cb; - void *on_accept_cb_arg; + void* on_accept_cb_arg; gpr_mu mu; @@ -79,25 +82,24 @@ struct grpc_tcp_server { int active_ports; /* linked list of server ports */ - grpc_tcp_listener *head; - grpc_tcp_listener *tail; + grpc_tcp_listener* head; + grpc_tcp_listener* tail; /* List of closures passed to shutdown_starting_add(). */ grpc_closure_list shutdown_starting; /* shutdown callback */ - grpc_closure *shutdown_complete; + grpc_closure* shutdown_complete; - grpc_channel_args *channel_args; + grpc_channel_args* channel_args; }; /* Public function. Allocates the proper data structures to hold a grpc_tcp_server. */ -grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, - grpc_closure *shutdown_complete, - const grpc_channel_args *args, - grpc_tcp_server **server) { - grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server)); +static grpc_error* tcp_server_create(grpc_closure* shutdown_complete, + const grpc_channel_args* args, + grpc_tcp_server** server) { + grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server)); s->channel_args = grpc_channel_args_copy(args); gpr_ref_init(&s->refs, 1); gpr_mu_init(&s->mu); @@ -113,56 +115,55 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_server(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_tcp_server *s = arg; +static void destroy_server(void* arg, grpc_error* error) { + grpc_tcp_server* s = (grpc_tcp_server*)arg; /* Now that the accepts have been aborted, we can destroy the sockets. The IOCP won't get notified on these, so we can flag them as already closed by the system. */ while (s->head) { - grpc_tcp_listener *sp = s->head; + grpc_tcp_listener* sp = s->head; s->head = sp->next; sp->next = NULL; grpc_winsocket_destroy(sp->socket); gpr_free(sp); } - grpc_channel_args_destroy(exec_ctx, s->channel_args); + grpc_channel_args_destroy(s->channel_args); + gpr_mu_destroy(&s->mu); gpr_free(s); } -static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) { +static void finish_shutdown_locked(grpc_tcp_server* s) { if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); } - GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s, - grpc_schedule_on_exec_ctx), - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx), + GRPC_ERROR_NONE); } -grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { +static grpc_tcp_server* tcp_server_ref(grpc_tcp_server* s) { gpr_ref_non_zero(&s->refs); return s; } -void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s, - grpc_closure *shutdown_starting) { +static void tcp_server_shutdown_starting_add(grpc_tcp_server* s, + grpc_closure* shutdown_starting) { gpr_mu_lock(&s->mu); grpc_closure_list_append(&s->shutdown_starting, shutdown_starting, GRPC_ERROR_NONE); gpr_mu_unlock(&s->mu); } -static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { - grpc_tcp_listener *sp; +static void tcp_server_destroy(grpc_tcp_server* s) { + grpc_tcp_listener* sp; gpr_mu_lock(&s->mu); /* First, shutdown all fd's. This will queue abortion calls for all of the pending accepts due to the normal operation mechanism. */ if (s->active_ports == 0) { - finish_shutdown_locked(exec_ctx, s); + finish_shutdown_locked(s); } else { for (sp = s->head; sp; sp = sp->next) { sp->shutting_down = 1; @@ -172,29 +173,30 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { +static void tcp_server_unref(grpc_tcp_server* s) { if (gpr_unref(&s->refs)) { - grpc_tcp_server_shutdown_listeners(exec_ctx, s); + grpc_tcp_server_shutdown_listeners(s); gpr_mu_lock(&s->mu); - GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting); + GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting); gpr_mu_unlock(&s->mu); - tcp_server_destroy(exec_ctx, s); + tcp_server_destroy(s); } } /* Prepare (bind) a recently-created socket for listening. */ -static grpc_error *prepare_socket(SOCKET sock, - const grpc_resolved_address *addr, - int *port) { +static grpc_error* prepare_socket(SOCKET sock, + const grpc_resolved_address* addr, + int* port) { grpc_resolved_address sockname_temp; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; + int sockname_temp_len; error = grpc_tcp_prepare_socket(sock); if (error != GRPC_ERROR_NONE) { goto failure; } - if (bind(sock, (const struct sockaddr *)addr->addr, (int)addr->len) == + if (bind(sock, (const grpc_sockaddr*)addr->addr, (int)addr->len) == SOCKET_ERROR) { error = GRPC_WSA_ERROR(WSAGetLastError(), "bind"); goto failure; @@ -205,8 +207,8 @@ static grpc_error *prepare_socket(SOCKET sock, goto failure; } - int sockname_temp_len = sizeof(struct sockaddr_storage); - if (getsockname(sock, (struct sockaddr *)sockname_temp.addr, + sockname_temp_len = sizeof(struct sockaddr_storage); + if (getsockname(sock, (grpc_sockaddr*)sockname_temp.addr, &sockname_temp_len) == SOCKET_ERROR) { error = GRPC_WSA_ERROR(WSAGetLastError(), "getsockname"); goto failure; @@ -218,7 +220,7 @@ static grpc_error *prepare_socket(SOCKET sock, failure: GPR_ASSERT(error != GRPC_ERROR_NONE); - char *tgtaddr = grpc_sockaddr_to_uri(addr); + char* tgtaddr = grpc_sockaddr_to_uri(addr); grpc_error_set_int( grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed to prepare server socket", &error, 1), @@ -231,24 +233,22 @@ static grpc_error *prepare_socket(SOCKET sock, return error; } -static void decrement_active_ports_and_notify_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_listener *sp) { +static void decrement_active_ports_and_notify_locked(grpc_tcp_listener* sp) { sp->shutting_down = 0; GPR_ASSERT(sp->server->active_ports > 0); if (0 == --sp->server->active_ports) { - finish_shutdown_locked(exec_ctx, sp->server); + finish_shutdown_locked(sp->server); } } /* In order to do an async accept, we need to create a socket first which will be the one assigned to the new incoming connection. */ -static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx, - grpc_tcp_listener *port) { +static grpc_error* start_accept_locked(grpc_tcp_listener* port) { SOCKET sock = INVALID_SOCKET; BOOL success; - DWORD addrlen = sizeof(struct sockaddr_in6) + 16; + DWORD addrlen = sizeof(grpc_sockaddr_in6) + 16; DWORD bytes_received = 0; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; if (port->shutting_down) { return GRPC_ERROR_NONE; @@ -282,7 +282,7 @@ static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx, /* We're ready to do the accept. Calling grpc_socket_notify_on_read may immediately process an accept that happened in the meantime. */ port->new_socket = sock; - grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept); + grpc_socket_notify_on_read(port->socket, &port->on_accept); port->outstanding_calls++; return error; @@ -293,14 +293,14 @@ static grpc_error *start_accept_locked(grpc_exec_ctx *exec_ctx, } /* Event manager callback when reads are ready. */ -static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_tcp_listener *sp = arg; +static void on_accept(void* arg, grpc_error* error) { + grpc_tcp_listener* sp = (grpc_tcp_listener*)arg; SOCKET sock = sp->new_socket; - grpc_winsocket_callback_info *info = &sp->socket->read_info; - grpc_endpoint *ep = NULL; + grpc_winsocket_callback_info* info = &sp->socket->read_info; + grpc_endpoint* ep = NULL; grpc_resolved_address peer_name; - char *peer_name_string; - char *fd_name; + char* peer_name_string; + char* fd_name; DWORD transfered_bytes; DWORD flags; BOOL wsa_success; @@ -314,7 +314,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { this is necessary in the read/write case, it's useless for the accept case. We only need to adjust the pending callback count */ if (error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(error); + const char* msg = grpc_error_string(error); gpr_log(GPR_INFO, "Skipping on_accept due to error: %s", msg); gpr_mu_unlock(&sp->server->mu); @@ -328,7 +328,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { &transfered_bytes, FALSE, &flags); if (!wsa_success) { if (!sp->shutting_down) { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message); gpr_free(utf8_message); } @@ -337,25 +337,24 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { if (!sp->shutting_down) { peer_name_string = NULL; err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, - (char *)&sp->socket->socket, sizeof(sp->socket->socket)); + (char*)&sp->socket->socket, sizeof(sp->socket->socket)); if (err) { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message); gpr_free(utf8_message); } int peer_name_len = (int)peer_name.len; - err = - getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len); + err = getpeername(sock, (grpc_sockaddr*)peer_name.addr, &peer_name_len); peer_name.len = (size_t)peer_name_len; if (!err) { peer_name_string = grpc_sockaddr_to_uri(&peer_name); } else { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message); gpr_free(utf8_message); } gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string); - ep = grpc_tcp_create(exec_ctx, grpc_winsocket_create(sock, fd_name), + ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name), sp->server->channel_args, peer_name_string); gpr_free(fd_name); gpr_free(peer_name_string); @@ -368,36 +367,35 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { managed to accept a connection, and created an endpoint. */ if (ep) { // Create acceptor. - grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); + grpc_tcp_server_acceptor* acceptor = + (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor)); acceptor->from_server = sp->server; acceptor->port_index = sp->port_index; acceptor->fd_index = 0; - sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - acceptor); + sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor); } /* As we were notified from the IOCP of one and exactly one accept, the former socked we created has now either been destroy or assigned to the new connection. We need to create a new one for the next connection. */ - GPR_ASSERT( - GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp))); if (0 == --sp->outstanding_calls) { - decrement_active_ports_and_notify_locked(exec_ctx, sp); + decrement_active_ports_and_notify_locked(sp); } gpr_mu_unlock(&sp->server->mu); } -static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, - const grpc_resolved_address *addr, +static grpc_error* add_socket_to_server(grpc_tcp_server* s, SOCKET sock, + const grpc_resolved_address* addr, unsigned port_index, - grpc_tcp_listener **listener) { - grpc_tcp_listener *sp = NULL; + grpc_tcp_listener** listener) { + grpc_tcp_listener* sp = NULL; int port = -1; int status; GUID guid = WSAID_ACCEPTEX; DWORD ioctl_num_bytes; LPFN_ACCEPTEX AcceptEx; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; /* We need to grab the AcceptEx pointer for that port, as it may be interface-dependent. We'll cache it to avoid doing that again. */ @@ -406,7 +404,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, &AcceptEx, sizeof(AcceptEx), &ioctl_num_bytes, NULL, NULL); if (status != 0) { - char *utf8_message = gpr_format_message(WSAGetLastError()); + char* utf8_message = gpr_format_message(WSAGetLastError()); gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message); gpr_free(utf8_message); closesocket(sock); @@ -421,7 +419,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, GPR_ASSERT(port >= 0); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); - sp = gpr_malloc(sizeof(grpc_tcp_listener)); + sp = (grpc_tcp_listener*)gpr_malloc(sizeof(grpc_tcp_listener)); sp->next = NULL; if (s->head == NULL) { s->head = sp; @@ -445,17 +443,17 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, SOCKET sock, return GRPC_ERROR_NONE; } -grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, - const grpc_resolved_address *addr, - int *port) { - grpc_tcp_listener *sp = NULL; +static grpc_error* tcp_server_add_port(grpc_tcp_server* s, + const grpc_resolved_address* addr, + int* port) { + grpc_tcp_listener* sp = NULL; SOCKET sock; grpc_resolved_address addr6_v4mapped; grpc_resolved_address wildcard; - grpc_resolved_address *allocated_addr = NULL; + grpc_resolved_address* allocated_addr = NULL; grpc_resolved_address sockname_temp; unsigned port_index = 0; - grpc_error *error = GRPC_ERROR_NONE; + grpc_error* error = GRPC_ERROR_NONE; if (s->tail != NULL) { port_index = s->tail->port_index + 1; @@ -467,12 +465,13 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, for (sp = s->head; sp; sp = sp->next) { int sockname_temp_len = sizeof(struct sockaddr_storage); if (0 == getsockname(sp->socket->socket, - (struct sockaddr *)sockname_temp.addr, + (grpc_sockaddr*)sockname_temp.addr, &sockname_temp_len)) { sockname_temp.len = (size_t)sockname_temp_len; *port = grpc_sockaddr_get_port(&sockname_temp); if (*port > 0) { - allocated_addr = gpr_malloc(sizeof(grpc_resolved_address)); + allocated_addr = + (grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address)); memcpy(allocated_addr, addr, sizeof(grpc_resolved_address)); grpc_sockaddr_set_port(allocated_addr, *port); addr = allocated_addr; @@ -506,7 +505,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, gpr_free(allocated_addr); if (error != GRPC_ERROR_NONE) { - grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + grpc_error* error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed to add port to server", &error, 1); GRPC_ERROR_UNREF(error); error = error_out; @@ -518,11 +517,11 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, return error; } -void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, - grpc_pollset **pollset, size_t pollset_count, - grpc_tcp_server_cb on_accept_cb, - void *on_accept_cb_arg) { - grpc_tcp_listener *sp; +static void tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset, + size_t pollset_count, + grpc_tcp_server_cb on_accept_cb, + void* on_accept_cb_arg) { + grpc_tcp_listener* sp; GPR_ASSERT(on_accept_cb); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb); @@ -530,14 +529,32 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, s->on_accept_cb = on_accept_cb; s->on_accept_cb_arg = on_accept_cb_arg; for (sp = s->head; sp; sp = sp->next) { - GPR_ASSERT( - GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp))); + GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp))); s->active_ports++; } gpr_mu_unlock(&s->mu); } -void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx *exec_ctx, - grpc_tcp_server *s) {} +static unsigned tcp_server_port_fd_count(grpc_tcp_server* s, + unsigned port_index) { + return 0; +} + +static int tcp_server_port_fd(grpc_tcp_server* s, unsigned port_index, + unsigned fd_index) { + return -1; +} +static void tcp_server_shutdown_listeners(grpc_tcp_server* s) {} + +grpc_tcp_server_vtable grpc_windows_tcp_server_vtable = { + tcp_server_create, + tcp_server_start, + tcp_server_add_port, + tcp_server_port_fd_count, + tcp_server_port_fd, + tcp_server_ref, + tcp_server_shutdown_starting_add, + tcp_server_unref, + tcp_server_shutdown_listeners}; #endif /* GRPC_WINSOCK_SOCKET */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.c deleted file mode 100644 index a05c19b4a..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.c +++ /dev/null @@ -1,381 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_UV - -#include -#include - -#include - -#include -#include -#include - -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/network_status_tracker.h" -#include "src/core/lib/iomgr/resource_quota.h" -#include "src/core/lib/iomgr/tcp_uv.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" - -grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp"); - -typedef struct { - grpc_endpoint base; - gpr_refcount refcount; - - uv_write_t write_req; - uv_shutdown_t shutdown_req; - - uv_tcp_t *handle; - - grpc_closure *read_cb; - grpc_closure *write_cb; - - grpc_slice read_slice; - grpc_slice_buffer *read_slices; - grpc_slice_buffer *write_slices; - uv_buf_t *write_buffers; - - grpc_resource_user *resource_user; - - bool shutting_down; - - char *peer_string; - grpc_pollset *pollset; -} grpc_tcp; - -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); - gpr_free(tcp->handle); - gpr_free(tcp->peer_string); - gpr_free(tcp); -} - -#ifndef NDEBUG -#define TCP_UNREF(exec_ctx, tcp, reason) \ - tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__) -#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, - val - 1); - } - if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); - } -} - -static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, - val + 1); - } - gpr_ref(&tcp->refcount); -} -#else -#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp)) -#define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { - if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); - } -} - -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } -#endif - -static void uv_close_callback(uv_handle_t *handle) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_tcp *tcp = handle->data; - TCP_UNREF(&exec_ctx, tcp, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); -} - -static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx, - grpc_resource_user *resource_user) { - return grpc_resource_user_slice_malloc(exec_ctx, resource_user, - GRPC_TCP_DEFAULT_READ_SLICE_SIZE); -} - -static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size, - uv_buf_t *buf) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_tcp *tcp = handle->data; - (void)suggested_size; - buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice); - buf->len = GRPC_SLICE_LENGTH(tcp->read_slice); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void read_callback(uv_stream_t *stream, ssize_t nread, - const uv_buf_t *buf) { - grpc_slice sub; - grpc_error *error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_tcp *tcp = stream->data; - grpc_closure *cb = tcp->read_cb; - if (nread == 0) { - // Nothing happened. Wait for the next callback - return; - } - TCP_UNREF(&exec_ctx, tcp, "read"); - tcp->read_cb = NULL; - // TODO(murgatroid99): figure out what the return value here means - uv_read_stop(stream); - if (nread == UV_EOF) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"); - } else if (nread > 0) { - // Successful read - sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread); - grpc_slice_buffer_add(tcp->read_slices, sub); - tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user); - error = GRPC_ERROR_NONE; - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - size_t i; - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "read: error=%s", str); - - for (i = 0; i < tcp->read_slices->count; i++) { - char *dump = grpc_dump_slice(tcp->read_slices->slices[i], - GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, - dump); - gpr_free(dump); - } - } - } else { - // nread < 0: Error - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"); - } - GRPC_CLOSURE_SCHED(&exec_ctx, cb, error); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *read_slices, grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - int status; - grpc_error *error = GRPC_ERROR_NONE; - GRPC_UV_ASSERT_SAME_THREAD(); - GPR_ASSERT(tcp->read_cb == NULL); - tcp->read_cb = cb; - tcp->read_slices = read_slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices); - TCP_REF(tcp, "read"); - // TODO(murgatroid99): figure out what the return value here means - status = - uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback); - if (status != 0) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed at start"); - error = - grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, - grpc_slice_from_static_string(uv_strerror(status))); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); - } - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str); - } -} - -static void write_callback(uv_write_t *req, int status) { - grpc_tcp *tcp = req->data; - grpc_error *error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_closure *cb = tcp->write_cb; - tcp->write_cb = NULL; - TCP_UNREF(&exec_ctx, tcp, "write"); - if (status == 0) { - error = GRPC_ERROR_NONE; - } else { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed"); - } - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(error); - gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str); - } - gpr_free(tcp->write_buffers); - grpc_resource_user_free(&exec_ctx, tcp->resource_user, - sizeof(uv_buf_t) * tcp->write_slices->count); - GRPC_CLOSURE_SCHED(&exec_ctx, cb, error); - grpc_exec_ctx_finish(&exec_ctx); -} - -static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *write_slices, - grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - uv_buf_t *buffers; - unsigned int buffer_count; - unsigned int i; - grpc_slice *slice; - uv_write_t *write_req; - GRPC_UV_ASSERT_SAME_THREAD(); - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - size_t j; - - for (j = 0; j < write_slices->count; j++) { - char *data = grpc_dump_slice(write_slices->slices[j], - GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data); - gpr_free(data); - } - } - - if (tcp->shutting_down) { - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "TCP socket is shutting down")); - return; - } - - GPR_ASSERT(tcp->write_cb == NULL); - tcp->write_slices = write_slices; - GPR_ASSERT(tcp->write_slices->count <= UINT_MAX); - if (tcp->write_slices->count == 0) { - // No slices means we don't have to do anything, - // and libuv doesn't like empty writes - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE); - return; - } - - tcp->write_cb = cb; - buffer_count = (unsigned int)tcp->write_slices->count; - buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count); - grpc_resource_user_alloc(exec_ctx, tcp->resource_user, - sizeof(uv_buf_t) * buffer_count, NULL); - for (i = 0; i < buffer_count; i++) { - slice = &tcp->write_slices->slices[i]; - buffers[i].base = (char *)GRPC_SLICE_START_PTR(*slice); - buffers[i].len = GRPC_SLICE_LENGTH(*slice); - } - tcp->write_buffers = buffers; - write_req = &tcp->write_req; - write_req->data = tcp; - TCP_REF(tcp, "write"); - // TODO(murgatroid99): figure out what the return value here means - uv_write(write_req, (uv_stream_t *)tcp->handle, buffers, buffer_count, - write_callback); -} - -static void uv_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *pollset) { - // No-op. We're ignoring pollsets currently - (void)exec_ctx; - (void)ep; - (void)pollset; - grpc_tcp *tcp = (grpc_tcp *)ep; - tcp->pollset = pollset; -} - -static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pollset) { - // No-op. We're ignoring pollsets currently - (void)exec_ctx; - (void)ep; - (void)pollset; -} - -static void shutdown_callback(uv_shutdown_t *req, int status) {} - -static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { - grpc_tcp *tcp = (grpc_tcp *)ep; - if (!tcp->shutting_down) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - const char *str = grpc_error_string(why); - gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str); - } - tcp->shutting_down = true; - uv_shutdown_t *req = &tcp->shutdown_req; - uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); - } - GRPC_ERROR_UNREF(why); -} - -static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { - grpc_network_status_unregister_endpoint(ep); - grpc_tcp *tcp = (grpc_tcp *)ep; - uv_close((uv_handle_t *)tcp->handle, uv_close_callback); -} - -static char *uv_get_peer(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - return gpr_strdup(tcp->peer_string); -} - -static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; - return tcp->resource_user; -} - -static int uv_get_fd(grpc_endpoint *ep) { return -1; } - -static grpc_endpoint_vtable vtable = { - uv_endpoint_read, uv_endpoint_write, uv_add_to_pollset, - uv_add_to_pollset_set, uv_endpoint_shutdown, uv_destroy, - uv_get_resource_user, uv_get_peer, uv_get_fd}; - -grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, - grpc_resource_quota *resource_quota, - char *peer_string) { - grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - if (GRPC_TRACER_ON(grpc_tcp_trace)) { - gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp); - } - - /* Disable Nagle's Algorithm */ - uv_tcp_nodelay(handle, 1); - - memset(tcp, 0, sizeof(grpc_tcp)); - tcp->base.vtable = &vtable; - tcp->handle = handle; - handle->data = tcp; - gpr_ref_init(&tcp->refcount, 1); - tcp->peer_string = gpr_strdup(peer_string); - tcp->shutting_down = false; - tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); - tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user); - /* Tell network status tracking code about the new endpoint */ - grpc_network_status_register_endpoint(&tcp->base); - -#ifndef GRPC_UV_TCP_HOLD_LOOP - uv_unref((uv_handle_t *)handle); -#endif - - grpc_exec_ctx_finish(&exec_ctx); - return &tcp->base; -} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.cc b/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.cc new file mode 100644 index 000000000..8d0e4a5e7 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.cc @@ -0,0 +1,420 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_UV +#include +#include + +#include + +#include +#include +#include + +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/network_status_tracker.h" +#include "src/core/lib/iomgr/resolve_address_custom.h" +#include "src/core/lib/iomgr/resource_quota.h" +#include "src/core/lib/iomgr/tcp_custom.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +#include + +#define IGNORE_CONST(addr) ((grpc_sockaddr*)(uintptr_t)(addr)) + +typedef struct uv_socket_t { + uv_connect_t connect_req; + uv_write_t write_req; + uv_shutdown_t shutdown_req; + uv_tcp_t* handle; + uv_buf_t* write_buffers; + + char* read_buf; + size_t read_len; + + bool pending_connection; + grpc_custom_socket* accept_socket; + grpc_error* accept_error; + + grpc_custom_connect_callback connect_cb; + grpc_custom_write_callback write_cb; + grpc_custom_read_callback read_cb; + grpc_custom_accept_callback accept_cb; + grpc_custom_close_callback close_cb; + +} uv_socket_t; + +static grpc_error* tcp_error_create(const char* desc, int status) { + if (status == 0) { + return GRPC_ERROR_NONE; + } + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(desc); + /* All tcp errors are marked with UNAVAILABLE so that application may + * choose to retry. */ + error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); + return grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, + grpc_slice_from_static_string(uv_strerror(status))); +} + +static void uv_socket_destroy(grpc_custom_socket* socket) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + gpr_free(uv_socket->handle); + gpr_free(uv_socket); +} + +static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size, + uv_buf_t* buf) { + uv_socket_t* uv_socket = + (uv_socket_t*)((grpc_custom_socket*)handle->data)->impl; + (void)suggested_size; + buf->base = uv_socket->read_buf; + buf->len = uv_socket->read_len; +} + +static void uv_read_callback(uv_stream_t* stream, ssize_t nread, + const uv_buf_t* buf) { + grpc_error* error = GRPC_ERROR_NONE; + if (nread == 0) { + // Nothing happened. Wait for the next callback + return; + } + // TODO(murgatroid99): figure out what the return value here means + uv_read_stop(stream); + if (nread == UV_EOF) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"); + } else if (nread < 0) { + error = tcp_error_create("TCP Read failed", nread); + } + grpc_custom_socket* socket = (grpc_custom_socket*)stream->data; + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_socket->read_cb(socket, (size_t)nread, error); +} + +static void uv_close_callback(uv_handle_t* handle) { + grpc_custom_socket* socket = (grpc_custom_socket*)handle->data; + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + if (uv_socket->accept_socket) { + uv_socket->accept_cb(socket, uv_socket->accept_socket, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("socket closed")); + } + uv_socket->close_cb(socket); +} + +static void uv_socket_read(grpc_custom_socket* socket, char* buffer, + size_t length, grpc_custom_read_callback read_cb) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + int status; + grpc_error* error; + uv_socket->read_cb = read_cb; + uv_socket->read_buf = buffer; + uv_socket->read_len = length; + // TODO(murgatroid99): figure out what the return value here means + status = + uv_read_start((uv_stream_t*)uv_socket->handle, (uv_alloc_cb)alloc_uv_buf, + (uv_read_cb)uv_read_callback); + if (status != 0) { + error = tcp_error_create("TCP Read failed at start", status); + uv_socket->read_cb(socket, 0, error); + } +} + +static void uv_write_callback(uv_write_t* req, int status) { + grpc_custom_socket* socket = (grpc_custom_socket*)req->data; + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + gpr_free(uv_socket->write_buffers); + uv_socket->write_cb(socket, tcp_error_create("TCP Write failed", status)); +} + +void uv_socket_write(grpc_custom_socket* socket, + grpc_slice_buffer* write_slices, + grpc_custom_write_callback write_cb) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_socket->write_cb = write_cb; + uv_buf_t* uv_buffers; + uv_write_t* write_req; + + uv_buffers = (uv_buf_t*)gpr_malloc(sizeof(uv_buf_t) * write_slices->count); + for (size_t i = 0; i < write_slices->count; i++) { + uv_buffers[i].base = (char*)GRPC_SLICE_START_PTR(write_slices->slices[i]); + uv_buffers[i].len = GRPC_SLICE_LENGTH(write_slices->slices[i]); + } + + uv_socket->write_buffers = uv_buffers; + write_req = &uv_socket->write_req; + write_req->data = socket; + // TODO(murgatroid99): figure out what the return value here means + uv_write(write_req, (uv_stream_t*)uv_socket->handle, uv_buffers, + write_slices->count, uv_write_callback); +} + +static void shutdown_callback(uv_shutdown_t* req, int status) {} + +static void uv_socket_shutdown(grpc_custom_socket* socket) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_shutdown_t* req = &uv_socket->shutdown_req; + uv_shutdown(req, (uv_stream_t*)uv_socket->handle, shutdown_callback); +} + +static void uv_socket_close(grpc_custom_socket* socket, + grpc_custom_close_callback close_cb) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_socket->close_cb = close_cb; + uv_close((uv_handle_t*)uv_socket->handle, uv_close_callback); +} + +static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) { + uv_tcp_t* tcp = (uv_tcp_t*)gpr_malloc(sizeof(uv_tcp_t)); + uv_socket->handle = tcp; + int status = uv_tcp_init_ex(uv_default_loop(), tcp, (unsigned int)domain); + if (status != 0) { + return tcp_error_create("Failed to initialize UV tcp handle", status); + } +#if defined(GPR_LINUX) && defined(SO_REUSEPORT) + if (domain == AF_INET || domain == AF_INET6) { + int enable = 1; + int fd; + uv_fileno((uv_handle_t*)tcp, &fd); + // TODO Handle error here. + setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable)); + } +#endif + uv_socket->write_buffers = nullptr; + uv_socket->read_len = 0; + uv_tcp_nodelay(uv_socket->handle, 1); + // Node uses a garbage collector to call destructors, so we don't + // want to hold the uv loop open with active gRPC objects. + uv_unref((uv_handle_t*)uv_socket->handle); + uv_socket->pending_connection = false; + uv_socket->accept_socket = nullptr; + uv_socket->accept_error = GRPC_ERROR_NONE; + return GRPC_ERROR_NONE; +} + +static grpc_error* uv_socket_init(grpc_custom_socket* socket, int domain) { + uv_socket_t* uv_socket = (uv_socket_t*)gpr_malloc(sizeof(uv_socket_t)); + grpc_error* error = uv_socket_init_helper(uv_socket, domain); + if (error != GRPC_ERROR_NONE) { + return error; + } + uv_socket->handle->data = socket; + socket->impl = uv_socket; + return GRPC_ERROR_NONE; +} + +static grpc_error* uv_socket_getpeername(grpc_custom_socket* socket, + const grpc_sockaddr* addr, + int* addr_len) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + int err = uv_tcp_getpeername(uv_socket->handle, + (struct sockaddr*)IGNORE_CONST(addr), addr_len); + return tcp_error_create("getpeername failed", err); +} + +static grpc_error* uv_socket_getsockname(grpc_custom_socket* socket, + const grpc_sockaddr* addr, + int* addr_len) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + int err = uv_tcp_getsockname(uv_socket->handle, + (struct sockaddr*)IGNORE_CONST(addr), addr_len); + return tcp_error_create("getsockname failed", err); +} + +static void accept_new_connection(grpc_custom_socket* socket) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + if (!uv_socket->pending_connection || !uv_socket->accept_socket) { + return; + } + grpc_custom_socket* new_socket = uv_socket->accept_socket; + grpc_error* error = uv_socket->accept_error; + uv_socket->accept_socket = nullptr; + uv_socket->accept_error = GRPC_ERROR_NONE; + uv_socket->pending_connection = false; + if (uv_socket->accept_error != GRPC_ERROR_NONE) { + uv_stream_t dummy_handle; + uv_accept((uv_stream_t*)uv_socket->handle, &dummy_handle); + uv_socket->accept_cb(socket, new_socket, error); + } else { + uv_socket_t* uv_new_socket = (uv_socket_t*)gpr_malloc(sizeof(uv_socket_t)); + uv_socket_init_helper(uv_new_socket, AF_UNSPEC); + // UV documentation says this is guaranteed to succeed + GPR_ASSERT(uv_accept((uv_stream_t*)uv_socket->handle, + (uv_stream_t*)uv_new_socket->handle) == 0); + new_socket->impl = uv_new_socket; + uv_new_socket->handle->data = new_socket; + uv_socket->accept_cb(socket, new_socket, error); + } +} + +static void uv_on_connect(uv_stream_t* server, int status) { + grpc_custom_socket* socket = (grpc_custom_socket*)server->data; + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + GPR_ASSERT(!uv_socket->pending_connection); + uv_socket->pending_connection = true; + if (status < 0) { + switch (status) { + case UV_EINTR: + case UV_EAGAIN: + return; + default: + uv_socket->accept_error = tcp_error_create("accept failed", status); + } + } + accept_new_connection(socket); +} + +void uv_socket_accept(grpc_custom_socket* socket, + grpc_custom_socket* new_socket, + grpc_custom_accept_callback accept_cb) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_socket->accept_cb = accept_cb; + GPR_ASSERT(uv_socket->accept_socket == nullptr); + uv_socket->accept_socket = new_socket; + accept_new_connection(socket); +} + +static grpc_error* uv_socket_bind(grpc_custom_socket* socket, + const grpc_sockaddr* addr, size_t len, + int flags) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + int status = + uv_tcp_bind((uv_tcp_t*)uv_socket->handle, (struct sockaddr*)addr, 0); + return tcp_error_create("Failed to bind to port", status); +} + +static grpc_error* uv_socket_listen(grpc_custom_socket* socket) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + int status = + uv_listen((uv_stream_t*)uv_socket->handle, SOMAXCONN, uv_on_connect); + return tcp_error_create("Failed to listen to port", status); +} + +static void uv_tc_on_connect(uv_connect_t* req, int status) { + grpc_custom_socket* socket = (grpc_custom_socket*)req->data; + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + grpc_error* error; + if (status == UV_ECANCELED) { + // This should only happen if the handle is already closed + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timeout occurred"); + } else { + error = tcp_error_create("Failed to connect to remote host", status); + } + uv_socket->connect_cb(socket, error); +} + +static void uv_socket_connect(grpc_custom_socket* socket, + const grpc_sockaddr* addr, size_t len, + grpc_custom_connect_callback connect_cb) { + uv_socket_t* uv_socket = (uv_socket_t*)socket->impl; + uv_socket->connect_cb = connect_cb; + uv_socket->connect_req.data = socket; + int status = uv_tcp_connect(&uv_socket->connect_req, uv_socket->handle, + (struct sockaddr*)addr, uv_tc_on_connect); + if (status != 0) { + // The callback will not be called + uv_socket->connect_cb(socket, tcp_error_create("connect failed", status)); + } +} + +static grpc_resolved_addresses* handle_addrinfo_result( + struct addrinfo* result) { + struct addrinfo* resp; + size_t i; + grpc_resolved_addresses* addresses = + (grpc_resolved_addresses*)gpr_malloc(sizeof(grpc_resolved_addresses)); + addresses->naddrs = 0; + for (resp = result; resp != nullptr; resp = resp->ai_next) { + addresses->naddrs++; + } + addresses->addrs = (grpc_resolved_address*)gpr_malloc( + sizeof(grpc_resolved_address) * addresses->naddrs); + for (resp = result, i = 0; resp != nullptr; resp = resp->ai_next, i++) { + memcpy(&addresses->addrs[i].addr, resp->ai_addr, resp->ai_addrlen); + addresses->addrs[i].len = resp->ai_addrlen; + } + // addrinfo objects are allocated by libuv (e.g. in uv_getaddrinfo) + // and not by gpr_malloc + uv_freeaddrinfo(result); + return addresses; +} + +static void uv_resolve_callback(uv_getaddrinfo_t* req, int status, + struct addrinfo* res) { + grpc_custom_resolver* r = (grpc_custom_resolver*)req->data; + gpr_free(req); + grpc_resolved_addresses* result = nullptr; + if (status == 0) { + result = handle_addrinfo_result(res); + } + grpc_custom_resolve_callback(r, result, + tcp_error_create("getaddrinfo failed", status)); +} + +static grpc_error* uv_resolve(char* host, char* port, + grpc_resolved_addresses** result) { + int status; + uv_getaddrinfo_t req; + struct addrinfo hints; + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; /* ipv4 or ipv6 */ + hints.ai_socktype = SOCK_STREAM; /* stream socket */ + hints.ai_flags = AI_PASSIVE; /* for wildcard IP address */ + status = uv_getaddrinfo(uv_default_loop(), &req, NULL, host, port, &hints); + if (status != 0) { + *result = nullptr; + } else { + *result = handle_addrinfo_result(req.addrinfo); + } + return tcp_error_create("getaddrinfo failed", status); +} + +static void uv_resolve_async(grpc_custom_resolver* r, char* host, char* port) { + int status; + uv_getaddrinfo_t* req = + (uv_getaddrinfo_t*)gpr_malloc(sizeof(uv_getaddrinfo_t)); + req->data = r; + struct addrinfo hints; + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = GRPC_AF_UNSPEC; /* ipv4 or ipv6 */ + hints.ai_socktype = GRPC_SOCK_STREAM; /* stream socket */ + hints.ai_flags = GRPC_AI_PASSIVE; /* for wildcard IP address */ + status = uv_getaddrinfo(uv_default_loop(), req, uv_resolve_callback, host, + port, &hints); + if (status != 0) { + gpr_free(req); + grpc_error* error = tcp_error_create("getaddrinfo failed", status); + grpc_custom_resolve_callback(r, NULL, error); + } +} + +grpc_custom_resolver_vtable uv_resolver_vtable = {uv_resolve, uv_resolve_async}; + +grpc_socket_vtable grpc_uv_socket_vtable = { + uv_socket_init, uv_socket_connect, uv_socket_destroy, + uv_socket_shutdown, uv_socket_close, uv_socket_write, + uv_socket_read, uv_socket_getpeername, uv_socket_getsockname, + uv_socket_bind, uv_socket_listen, uv_socket_accept}; + +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.h deleted file mode 100644 index 0e67481d3..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_uv.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_TCP_UV_H -#define GRPC_CORE_LIB_IOMGR_TCP_UV_H -/* - Low level TCP "bottom half" implementation, for use by transports built on - top of a TCP connection. - - Note that this file does not (yet) include APIs for creating the socket in - the first place. - - All calls passing slice transfer ownership of a slice refcount unless - otherwise specified. -*/ - -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/endpoint.h" - -#include - -extern grpc_tracer_flag grpc_tcp_trace; - -#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192 - -grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, - grpc_resource_quota *resource_quota, - char *peer_string); - -#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.c b/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.cc similarity index 63% rename from Sources/CgRPC/src/core/lib/iomgr/tcp_windows.c rename to Sources/CgRPC/src/core/lib/iomgr/tcp_windows.cc index 9b634a2a1..5d316d477 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.c +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_WINSOCK_SOCKET @@ -30,13 +32,14 @@ #include #include #include -#include +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/iocp_windows.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/socket_windows.h" #include "src/core/lib/iomgr/tcp_client.h" +#include "src/core/lib/iomgr/tcp_windows.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/slice/slice_internal.h" @@ -48,9 +51,9 @@ #define GRPC_FIONBIO FIONBIO #endif -grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false, "tcp"); +extern grpc_core::TraceFlag grpc_tcp_trace; -static grpc_error *set_non_block(SOCKET sock) { +static grpc_error* set_non_block(SOCKET sock) { int status; uint32_t param = 1; DWORD ret; @@ -61,22 +64,38 @@ static grpc_error *set_non_block(SOCKET sock) { : GRPC_WSA_ERROR(WSAGetLastError(), "WSAIoctl(GRPC_FIONBIO)"); } -static grpc_error *set_dualstack(SOCKET sock) { +static grpc_error* set_dualstack(SOCKET sock) { int status; unsigned long param = 0; - status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)¶m, + status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)¶m, sizeof(param)); return status == 0 ? GRPC_ERROR_NONE : GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)"); } -grpc_error *grpc_tcp_prepare_socket(SOCKET sock) { - grpc_error *err; +static grpc_error* enable_loopback_fast_path(SOCKET sock) { + int status; + uint32_t param = 1; + DWORD ret; + status = WSAIoctl(sock, /*SIO_LOOPBACK_FAST_PATH==*/_WSAIOW(IOC_VENDOR, 16), + ¶m, sizeof(param), NULL, 0, &ret, 0, 0); + if (status == SOCKET_ERROR) { + status = WSAGetLastError(); + } + return status == 0 || status == WSAEOPNOTSUPP + ? GRPC_ERROR_NONE + : GRPC_WSA_ERROR(status, "WSAIoctl(SIO_LOOPBACK_FAST_PATH)"); +} + +grpc_error* grpc_tcp_prepare_socket(SOCKET sock) { + grpc_error* err; err = set_non_block(sock); if (err != GRPC_ERROR_NONE) return err; err = set_dualstack(sock); if (err != GRPC_ERROR_NONE) return err; + err = enable_loopback_fast_path(sock); + if (err != GRPC_ERROR_NONE) return err; return GRPC_ERROR_NONE; } @@ -84,59 +103,58 @@ typedef struct grpc_tcp { /* This is our C++ class derivation emulation. */ grpc_endpoint base; /* The one socket this endpoint is using. */ - grpc_winsocket *socket; + grpc_winsocket* socket; /* Refcounting how many operations are in progress. */ gpr_refcount refcount; grpc_closure on_read; grpc_closure on_write; - grpc_closure *read_cb; - grpc_closure *write_cb; + grpc_closure* read_cb; + grpc_closure* write_cb; grpc_slice read_slice; - grpc_slice_buffer *write_slices; - grpc_slice_buffer *read_slices; + grpc_slice_buffer* write_slices; + grpc_slice_buffer* read_slices; - grpc_resource_user *resource_user; + grpc_resource_user* resource_user; /* The IO Completion Port runs from another thread. We need some mechanism to protect ourselves when requesting a shutdown. */ gpr_mu mu; int shutting_down; - grpc_error *shutdown_error; + grpc_error* shutdown_error; - char *peer_string; + char* peer_string; } grpc_tcp; -static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_free(grpc_tcp* tcp) { grpc_winsocket_destroy(tcp->socket); gpr_mu_destroy(&tcp->mu); gpr_free(tcp->peer_string); - grpc_resource_user_unref(exec_ctx, tcp->resource_user); + grpc_resource_user_unref(tcp->resource_user); if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error); gpr_free(tcp); } #ifndef NDEBUG -#define TCP_UNREF(exec_ctx, tcp, reason) \ - tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { +static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file, + int line) { + if (grpc_tcp_trace.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, val - 1); } if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } -static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, +static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file, int line) { - if (GRPC_TRACER_ON(grpc_tcp_trace)) { + if (grpc_tcp_trace.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val, @@ -145,39 +163,39 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, gpr_ref(&tcp->refcount); } #else -#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp)) +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { +static void tcp_unref(grpc_tcp* tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(exec_ctx, tcp); + tcp_free(tcp); } } -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } +static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); } #endif /* Asynchronous callback from the IOCP, or the background thread. */ -static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { - grpc_tcp *tcp = tcpp; - grpc_closure *cb = tcp->read_cb; - grpc_winsocket *socket = tcp->socket; +static void on_read(void* tcpp, grpc_error* error) { + grpc_tcp* tcp = (grpc_tcp*)tcpp; + grpc_closure* cb = tcp->read_cb; + grpc_winsocket* socket = tcp->socket; grpc_slice sub; - grpc_winsocket_callback_info *info = &socket->read_info; + grpc_winsocket_callback_info* info = &socket->read_info; GRPC_ERROR_REF(error); if (error == GRPC_ERROR_NONE) { if (info->wsa_error != 0 && !tcp->shutting_down) { - char *utf8_message = gpr_format_message(info->wsa_error); + char* utf8_message = gpr_format_message(info->wsa_error); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message); gpr_free(utf8_message); - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); + grpc_slice_unref_internal(tcp->read_slice); } else { if (info->bytes_transfered != 0 && !tcp->shutting_down) { sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); grpc_slice_buffer_add(tcp->read_slices, sub); } else { - grpc_slice_unref_internal(exec_ctx, tcp->read_slice); + grpc_slice_unref_internal(tcp->read_slice); error = tcp->shutting_down ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "TCP stream shutting down", &tcp->shutdown_error, 1) @@ -187,15 +205,15 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } tcp->read_cb = NULL; - TCP_UNREF(exec_ctx, tcp, "read"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + TCP_UNREF(tcp, "read"); + GRPC_CLOSURE_SCHED(cb, error); } -static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *read_slices, grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_winsocket *handle = tcp->socket; - grpc_winsocket_callback_info *info = &handle->read_info; +static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices, + grpc_closure* cb) { + grpc_tcp* tcp = (grpc_tcp*)ep; + grpc_winsocket* handle = tcp->socket; + grpc_winsocket_callback_info* info = &handle->read_info; int status; DWORD bytes_read = 0; DWORD flags = 0; @@ -203,21 +221,20 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (tcp->shutting_down) { GRPC_CLOSURE_SCHED( - exec_ctx, cb, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "TCP socket is shutting down", &tcp->shutdown_error, 1)); + cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "TCP socket is shutting down", &tcp->shutdown_error, 1)); return; } tcp->read_cb = cb; tcp->read_slices = read_slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices); + grpc_slice_buffer_reset_and_unref_internal(read_slices); tcp->read_slice = GRPC_SLICE_MALLOC(8192); buffer.len = (ULONG)GRPC_SLICE_LENGTH( tcp->read_slice); // we know slice size fits in 32bit. - buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice); + buffer.buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slice); TCP_REF(tcp, "read"); @@ -229,7 +246,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { info->bytes_transfered = bytes_read; - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_ERROR_NONE); return; } @@ -242,21 +259,21 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { info->wsa_error = wsa_error; - GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, + GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_WSA_ERROR(info->wsa_error, "WSARecv")); return; } } - grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read); + grpc_socket_notify_on_read(tcp->socket, &tcp->on_read); } /* Asynchronous callback from the IOCP, or the background thread. */ -static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { - grpc_tcp *tcp = (grpc_tcp *)tcpp; - grpc_winsocket *handle = tcp->socket; - grpc_winsocket_callback_info *info = &handle->write_info; - grpc_closure *cb; +static void on_write(void* tcpp, grpc_error* error) { + grpc_tcp* tcp = (grpc_tcp*)tcpp; + grpc_winsocket* handle = tcp->socket; + grpc_winsocket_callback_info* info = &handle->write_info; + grpc_closure* cb; GRPC_ERROR_REF(error); @@ -273,29 +290,28 @@ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { } } - TCP_UNREF(exec_ctx, tcp, "write"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + TCP_UNREF(tcp, "write"); + GRPC_CLOSURE_SCHED(cb, error); } /* Initiates a write. */ -static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_slice_buffer *slices, grpc_closure *cb) { - grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_winsocket *socket = tcp->socket; - grpc_winsocket_callback_info *info = &socket->write_info; +static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + grpc_tcp* tcp = (grpc_tcp*)ep; + grpc_winsocket* socket = tcp->socket; + grpc_winsocket_callback_info* info = &socket->write_info; unsigned i; DWORD bytes_sent; int status; WSABUF local_buffers[16]; - WSABUF *allocated = NULL; - WSABUF *buffers = local_buffers; + WSABUF* allocated = NULL; + WSABUF* buffers = local_buffers; size_t len; if (tcp->shutting_down) { GRPC_CLOSURE_SCHED( - exec_ctx, cb, - GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "TCP socket is shutting down", &tcp->shutdown_error, 1)); + cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "TCP socket is shutting down", &tcp->shutdown_error, 1)); return; } @@ -303,7 +319,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, tcp->write_slices = slices; GPR_ASSERT(tcp->write_slices->count <= UINT_MAX); if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) { - buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); + buffers = (WSABUF*)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); allocated = buffers; } @@ -311,7 +327,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, len = GRPC_SLICE_LENGTH(tcp->write_slices->slices[i]); GPR_ASSERT(len <= ULONG_MAX); buffers[i].len = (ULONG)len; - buffers[i].buf = (char *)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]); + buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]); } /* First, let's try a synchronous, non-blocking write. */ @@ -323,10 +339,10 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, connection that has its send queue filled up. But if we don't, then we can avoid doing an async write operation at all. */ if (info->wsa_error != WSAEWOULDBLOCK) { - grpc_error *error = status == 0 + grpc_error* error = status == 0 ? GRPC_ERROR_NONE : GRPC_WSA_ERROR(info->wsa_error, "WSASend"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, error); + GRPC_CLOSURE_SCHED(cb, error); if (allocated) gpr_free(allocated); return; } @@ -343,42 +359,42 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { - TCP_UNREF(exec_ctx, tcp, "write"); - GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); + TCP_UNREF(tcp, "write"); + GRPC_CLOSURE_SCHED(cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); return; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ - grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write); + grpc_socket_notify_on_write(socket, &tcp->on_write); } -static void win_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset *ps) { - grpc_tcp *tcp; +static void win_add_to_pollset(grpc_endpoint* ep, grpc_pollset* ps) { + grpc_tcp* tcp; (void)ps; - tcp = (grpc_tcp *)ep; + tcp = (grpc_tcp*)ep; grpc_iocp_add_socket(tcp->socket); } -static void win_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_pollset_set *pss) { - grpc_tcp *tcp; +static void win_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pss) { + grpc_tcp* tcp; (void)pss; - tcp = (grpc_tcp *)ep; + tcp = (grpc_tcp*)ep; grpc_iocp_add_socket(tcp->socket); } +static void win_delete_from_pollset_set(grpc_endpoint* ep, + grpc_pollset_set* pss) {} + /* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks for the potential read and write operations. It is up to the caller to guarantee this isn't called in parallel to a read or write request, so we're not going to protect against these. However the IO Completion Port callback will happen from another thread, so we need to protect against concurrent access of the data structure in that regard. */ -static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, - grpc_error *why) { - grpc_tcp *tcp = (grpc_tcp *)ep; +static void win_shutdown(grpc_endpoint* ep, grpc_error* why) { + grpc_tcp* tcp = (grpc_tcp*)ep; gpr_mu_lock(&tcp->mu); /* At that point, what may happen is that we're already inside the IOCP callback. See the comments in on_read and on_write. */ @@ -390,46 +406,52 @@ static void win_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, } grpc_winsocket_shutdown(tcp->socket); gpr_mu_unlock(&tcp->mu); - grpc_resource_user_shutdown(exec_ctx, tcp->resource_user); + grpc_resource_user_shutdown(tcp->resource_user); } -static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { +static void win_destroy(grpc_endpoint* ep) { grpc_network_status_unregister_endpoint(ep); - grpc_tcp *tcp = (grpc_tcp *)ep; - TCP_UNREF(exec_ctx, tcp, "destroy"); + grpc_tcp* tcp = (grpc_tcp*)ep; + TCP_UNREF(tcp, "destroy"); } -static char *win_get_peer(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; +static char* win_get_peer(grpc_endpoint* ep) { + grpc_tcp* tcp = (grpc_tcp*)ep; return gpr_strdup(tcp->peer_string); } -static grpc_resource_user *win_get_resource_user(grpc_endpoint *ep) { - grpc_tcp *tcp = (grpc_tcp *)ep; +static grpc_resource_user* win_get_resource_user(grpc_endpoint* ep) { + grpc_tcp* tcp = (grpc_tcp*)ep; return tcp->resource_user; } -static int win_get_fd(grpc_endpoint *ep) { return -1; } - -static grpc_endpoint_vtable vtable = { - win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, - win_shutdown, win_destroy, win_get_resource_user, win_get_peer, - win_get_fd}; - -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, - grpc_channel_args *channel_args, - char *peer_string) { - grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL); +static int win_get_fd(grpc_endpoint* ep) { return -1; } + +static grpc_endpoint_vtable vtable = {win_read, + win_write, + win_add_to_pollset, + win_add_to_pollset_set, + win_delete_from_pollset_set, + win_shutdown, + win_destroy, + win_get_resource_user, + win_get_peer, + win_get_fd}; + +grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket, + grpc_channel_args* channel_args, + const char* peer_string) { + grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL); if (channel_args != NULL) { for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); resource_quota = grpc_resource_quota_ref_internal( - channel_args->args[i].value.pointer.p); + (grpc_resource_quota*)channel_args->args[i].value.pointer.p); } } } - grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); + grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp)); memset(tcp, 0, sizeof(grpc_tcp)); tcp->base.vtable = &vtable; tcp->socket = socket; @@ -441,7 +463,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); /* Tell network status tracking code about the new endpoint */ grpc_network_status_register_endpoint(&tcp->base); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); return &tcp->base; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.h b/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.h index 864184ce8..161a545a2 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.h +++ b/Sources/CgRPC/src/core/lib/iomgr/tcp_windows.h @@ -29,16 +29,23 @@ otherwise specified. */ +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_WINSOCK_SOCKET #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/socket_windows.h" /* Create a tcp endpoint given a winsock handle. * Takes ownership of the handle. */ -grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket, - grpc_channel_args *channel_args, - char *peer_string); +grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket, + grpc_channel_args* channel_args, + const char* peer_string); + +grpc_error* grpc_tcp_prepare_socket(SOCKET sock); -grpc_error *grpc_tcp_prepare_socket(SOCKET sock); +#endif #endif /* GRPC_CORE_LIB_IOMGR_TCP_WINDOWS_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.c b/Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.cc similarity index 98% rename from Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.c rename to Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.cc index 3bddec04d..6369e48db 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.c +++ b/Sources/CgRPC/src/core/lib/iomgr/time_averaged_stats.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/time_averaged_stats.h" void grpc_time_averaged_stats_init(grpc_time_averaged_stats* stats, diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer.cc b/Sources/CgRPC/src/core/lib/iomgr/timer.cc new file mode 100644 index 000000000..e647cdefa --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/timer.cc @@ -0,0 +1,45 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_manager.h" + +grpc_timer_vtable* grpc_timer_impl; + +void grpc_set_timer_impl(grpc_timer_vtable* vtable) { + grpc_timer_impl = vtable; +} + +void grpc_timer_init(grpc_timer* timer, grpc_millis deadline, + grpc_closure* closure) { + grpc_timer_impl->init(timer, deadline, closure); +} + +void grpc_timer_cancel(grpc_timer* timer) { grpc_timer_impl->cancel(timer); } + +grpc_timer_check_result grpc_timer_check(grpc_millis* next) { + return grpc_timer_impl->check(next); +} + +void grpc_timer_list_init() { grpc_timer_impl->list_init(); } + +void grpc_timer_list_shutdown() { grpc_timer_impl->list_shutdown(); } + +void grpc_timer_consume_kick() { grpc_timer_impl->consume_kick(); } diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer.h b/Sources/CgRPC/src/core/lib/iomgr/timer.h index ac392f87f..5ff10d3ae 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer.h +++ b/Sources/CgRPC/src/core/lib/iomgr/timer.h @@ -19,20 +19,45 @@ #ifndef GRPC_CORE_LIB_IOMGR_TIMER_H #define GRPC_CORE_LIB_IOMGR_TIMER_H -#include "src/core/lib/iomgr/port.h" +#include -#ifdef GRPC_UV -#include "src/core/lib/iomgr/timer_uv.h" -#else -#include "src/core/lib/iomgr/timer_generic.h" -#endif /* GRPC_UV */ +#include "src/core/lib/iomgr/port.h" -#include #include #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/iomgr/iomgr.h" -typedef struct grpc_timer grpc_timer; +typedef struct grpc_timer { + gpr_atm deadline; + uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */ + bool pending; + struct grpc_timer* next; + struct grpc_timer* prev; + grpc_closure* closure; +#ifndef NDEBUG + struct grpc_timer* hash_table_next; +#endif + + // Optional field used by custom timers + void* custom_timer; +} grpc_timer; + +typedef enum { + GRPC_TIMERS_NOT_CHECKED, + GRPC_TIMERS_CHECKED_AND_EMPTY, + GRPC_TIMERS_FIRED, +} grpc_timer_check_result; + +typedef struct grpc_timer_vtable { + void (*init)(grpc_timer* timer, grpc_millis, grpc_closure* closure); + void (*cancel)(grpc_timer* timer); + + /* Internal API */ + grpc_timer_check_result (*check)(grpc_millis* next); + void (*list_init)(); + void (*list_shutdown)(void); + void (*consume_kick)(void); +} grpc_timer_vtable; /* Initialize *timer. When expired or canceled, closure will be called with error set to indicate if it expired (GRPC_ERROR_NONE) or was canceled @@ -40,13 +65,12 @@ typedef struct grpc_timer grpc_timer; application code should check the error to determine how it was invoked. The application callback is also responsible for maintaining information about when to free up any user-level state. */ -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - gpr_timespec deadline, grpc_closure *closure, - gpr_timespec now); +void grpc_timer_init(grpc_timer* timer, grpc_millis deadline, + grpc_closure* closure); /* Initialize *timer without setting it. This can later be passed through the regular init or cancel */ -void grpc_timer_init_unset(grpc_timer *timer); +void grpc_timer_init_unset(grpc_timer* timer); /* Note that there is no timer destroy function. This is because the timer is a one-time occurrence with a guarantee that the callback will @@ -74,16 +98,10 @@ void grpc_timer_init_unset(grpc_timer *timer); matches this aim. Requires: cancel() must happen after init() on a given timer */ -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer); +void grpc_timer_cancel(grpc_timer* timer); /* iomgr internal api for dealing with timers */ -typedef enum { - GRPC_TIMERS_NOT_CHECKED, - GRPC_TIMERS_CHECKED_AND_EMPTY, - GRPC_TIMERS_FIRED, -} grpc_timer_check_result; - /* Check for timers to be run, and run them. Return true if timer callbacks were executed. If next is non-null, TRY to update *next with the next running timer @@ -91,16 +109,17 @@ typedef enum { *next is never guaranteed to be updated on any given execution; however, with high probability at least one thread in the system will see an update at any time slice. */ -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - gpr_timespec now, gpr_timespec *next); -void grpc_timer_list_init(gpr_timespec now); -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx); +grpc_timer_check_result grpc_timer_check(grpc_millis* next); +void grpc_timer_list_init(); +void grpc_timer_list_shutdown(); /* Consume a kick issued by grpc_kick_poller */ void grpc_timer_consume_kick(void); /* the following must be implemented by each iomgr implementation */ - void grpc_kick_poller(void); +/* Sets the timer implementation */ +void grpc_set_timer_impl(grpc_timer_vtable* vtable); + #endif /* GRPC_CORE_LIB_IOMGR_TIMER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_custom.cc b/Sources/CgRPC/src/core/lib/iomgr/timer_custom.cc new file mode 100644 index 000000000..71d825ff9 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_custom.cc @@ -0,0 +1,93 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#include +#include + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_custom.h" + +static grpc_custom_timer_vtable* custom_timer_impl; + +void grpc_custom_timer_callback(grpc_custom_timer* t, grpc_error* error) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + grpc_core::ExecCtx exec_ctx; + grpc_timer* timer = t->original; + GPR_ASSERT(timer->pending); + timer->pending = 0; + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE); + custom_timer_impl->stop(t); + gpr_free(t); +} + +static void timer_init(grpc_timer* timer, grpc_millis deadline, + grpc_closure* closure) { + uint64_t timeout; + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + if (deadline <= grpc_core::ExecCtx::Get()->Now()) { + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); + timer->pending = false; + return; + } else { + timeout = deadline - now; + } + timer->pending = true; + timer->closure = closure; + grpc_custom_timer* timer_wrapper = + (grpc_custom_timer*)gpr_malloc(sizeof(grpc_custom_timer)); + timer_wrapper->timeout_ms = timeout; + timer->custom_timer = (void*)timer_wrapper; + timer_wrapper->original = timer; + custom_timer_impl->start(timer_wrapper); +} + +static void timer_cancel(grpc_timer* timer) { + GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); + grpc_custom_timer* tw = (grpc_custom_timer*)timer->custom_timer; + if (timer->pending) { + timer->pending = 0; + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED); + custom_timer_impl->stop(tw); + gpr_free(tw); + } +} + +static grpc_timer_check_result timer_check(grpc_millis* next) { + return GRPC_TIMERS_NOT_CHECKED; +} + +static void timer_list_init() {} +static void timer_list_shutdown() {} + +static void timer_consume_kick(void) {} + +static grpc_timer_vtable custom_timer_vtable = { + timer_init, timer_cancel, timer_check, + timer_list_init, timer_list_shutdown, timer_consume_kick}; + +void grpc_custom_timer_init(grpc_custom_timer_vtable* impl) { + custom_timer_impl = impl; + grpc_set_timer_impl(&custom_timer_vtable); +} diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_custom.h b/Sources/CgRPC/src/core/lib/iomgr/timer_custom.h new file mode 100644 index 000000000..bfea8bafa --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_custom.h @@ -0,0 +1,43 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H +#define GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H + +#include + +#include "src/core/lib/iomgr/timer.h" + +typedef struct grpc_custom_timer { + // Implementation defined + void* timer; + uint64_t timeout_ms; + + grpc_timer* original; +} grpc_custom_timer; + +typedef struct grpc_custom_timer_vtable { + void (*start)(grpc_custom_timer* t); + void (*stop)(grpc_custom_timer* t); +} grpc_custom_timer_vtable; + +void grpc_custom_timer_init(grpc_custom_timer_vtable* impl); + +void grpc_custom_timer_callback(grpc_custom_timer* t, grpc_error* error); + +#endif /* GRPC_CORE_LIB_IOMGR_TIMER_CUSTOM_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_generic.c b/Sources/CgRPC/src/core/lib/iomgr/timer_generic.cc similarity index 62% rename from Sources/CgRPC/src/core/lib/iomgr/timer_generic.c rename to Sources/CgRPC/src/core/lib/iomgr/timer_generic.cc index 2472cf26b..de2256f7c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_generic.c +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_generic.cc @@ -16,34 +16,35 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" -#ifdef GRPC_TIMER_USE_GENERIC +#include #include "src/core/lib/iomgr/timer.h" #include +#include #include #include #include -#include -#include + #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/spinlock.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/iomgr/time_averaged_stats.h" #include "src/core/lib/iomgr/timer_heap.h" -#include "src/core/lib/support/spinlock.h" #define INVALID_HEAP_INDEX 0xffffffffu -#define LOG2_NUM_SHARDS 5 -#define NUM_SHARDS (1 << LOG2_NUM_SHARDS) #define ADD_DEADLINE_SCALE 0.33 #define MIN_QUEUE_WINDOW_DURATION 0.01 #define MAX_QUEUE_WINDOW_DURATION 1 -grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer"); -grpc_tracer_flag grpc_timer_check_trace = - GRPC_TRACER_INITIALIZER(false, "timer_check"); +grpc_core::TraceFlag grpc_timer_trace(false, "timer"); +grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check"); /* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with * deadlines earlier than 'queue_deadline" cap are maintained in the heap and @@ -70,14 +71,16 @@ typedef struct { grpc_timer list; } timer_shard; +static size_t g_num_shards; + /* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address * is hashed to select the timer shard to add the timer to */ -static timer_shard g_shards[NUM_SHARDS]; +static timer_shard* g_shards; /* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e * the deadline of the next timer in each shard). * Access to this is protected by g_shared_mutables.mu */ -static timer_shard *g_shard_queue[NUM_SHARDS]; +static timer_shard** g_shard_queue; #ifndef NDEBUG @@ -86,7 +89,7 @@ static timer_shard *g_shard_queue[NUM_SHARDS]; #define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */ static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */ -static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL}; +static grpc_timer* g_timer_ht[NUM_HASH_BUCKETS] = {nullptr}; static void init_timer_ht() { for (int i = 0; i < NUM_HASH_BUCKETS; i++) { @@ -94,12 +97,18 @@ static void init_timer_ht() { } } -static bool is_in_ht(grpc_timer *t) { +static void destroy_timer_ht() { + for (int i = 0; i < NUM_HASH_BUCKETS; i++) { + gpr_mu_destroy(&g_hash_mu[i]); + } +} + +static bool is_in_ht(grpc_timer* t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); gpr_mu_lock(&g_hash_mu[i]); - grpc_timer *p = g_timer_ht[i]; - while (p != NULL && p != t) { + grpc_timer* p = g_timer_ht[i]; + while (p != nullptr && p != t) { p = p->hash_table_next; } gpr_mu_unlock(&g_hash_mu[i]); @@ -107,18 +116,18 @@ static bool is_in_ht(grpc_timer *t) { return (p == t); } -static void add_to_ht(grpc_timer *t) { +static void add_to_ht(grpc_timer* t) { GPR_ASSERT(!t->hash_table_next); size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); gpr_mu_lock(&g_hash_mu[i]); - grpc_timer *p = g_timer_ht[i]; - while (p != NULL && p != t) { + grpc_timer* p = g_timer_ht[i]; + while (p != nullptr && p != t) { p = p->hash_table_next; } if (p == t) { - grpc_closure *c = t->closure; + grpc_closure* c = t->closure; gpr_log(GPR_ERROR, "** Duplicate timer (%p) being added. Closure: (%p), created at: " "(%s:%d), scheduled at: (%s:%d) **", @@ -133,7 +142,7 @@ static void add_to_ht(grpc_timer *t) { gpr_mu_unlock(&g_hash_mu[i]); } -static void remove_from_ht(grpc_timer *t) { +static void remove_from_ht(grpc_timer* t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); bool removed = false; @@ -141,9 +150,9 @@ static void remove_from_ht(grpc_timer *t) { if (g_timer_ht[i] == t) { g_timer_ht[i] = g_timer_ht[i]->hash_table_next; removed = true; - } else if (g_timer_ht[i] != NULL) { - grpc_timer *p = g_timer_ht[i]; - while (p->hash_table_next != NULL && p->hash_table_next != t) { + } else if (g_timer_ht[i] != nullptr) { + grpc_timer* p = g_timer_ht[i]; + while (p->hash_table_next != nullptr && p->hash_table_next != t) { p = p->hash_table_next; } @@ -155,7 +164,7 @@ static void remove_from_ht(grpc_timer *t) { gpr_mu_unlock(&g_hash_mu[i]); if (!removed) { - grpc_closure *c = t->closure; + grpc_closure* c = t->closure; gpr_log(GPR_ERROR, "** Removing timer (%p) that is not added to hash table. Closure " "(%p), created at: (%s:%d), scheduled at: (%s:%d) **", @@ -164,16 +173,16 @@ static void remove_from_ht(grpc_timer *t) { abort(); } - t->hash_table_next = NULL; + t->hash_table_next = nullptr; } /* If a timer is added to a timer shard (either heap or a list), it cannot * be pending. A timer is added to hash table only-if it is added to the * timer shard. * Therefore, if timer->pending is false, it cannot be in hash table */ -static void validate_non_pending_timer(grpc_timer *t) { +static void validate_non_pending_timer(grpc_timer* t) { if (!t->pending && is_in_ht(t)) { - grpc_closure *c = t->closure; + grpc_closure* c = t->closure; gpr_log(GPR_ERROR, "** gpr_timer_cancel() called on a non-pending timer (%p) which " "is in the hash table. Closure: (%p), created at: (%s:%d), " @@ -185,6 +194,7 @@ static void validate_non_pending_timer(grpc_timer *t) { } #define INIT_TIMER_HASH_TABLE() init_timer_ht() +#define DESTROY_TIMER_HASH_TABLE() destroy_timer_ht() #define ADD_TO_HASH_TABLE(t) add_to_ht((t)) #define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t)) #define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t)) @@ -192,6 +202,7 @@ static void validate_non_pending_timer(grpc_timer *t) { #else #define INIT_TIMER_HASH_TABLE() +#define DESTROY_TIMER_HASH_TABLE() #define ADD_TO_HASH_TABLE(t) #define REMOVE_FROM_HASH_TABLE(t) #define VALIDATE_NON_PENDING_TIMER(t) @@ -216,9 +227,6 @@ struct shared_mutables { static struct shared_mutables g_shared_mutables; -static gpr_clock_type g_clock_type; -static gpr_timespec g_start_time; - static gpr_atm saturating_add(gpr_atm a, gpr_atm b) { if (a > GPR_ATM_MAX - b) { return GPR_ATM_MAX; @@ -226,64 +234,34 @@ static gpr_atm saturating_add(gpr_atm a, gpr_atm b) { return a + b; } -static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, - gpr_atm now, - gpr_atm *next, - grpc_error *error); - -static gpr_timespec dbl_to_ts(double d) { - gpr_timespec ts; - ts.tv_sec = (int64_t)d; - ts.tv_nsec = (int32_t)(1e9 * (d - (double)ts.tv_sec)); - ts.clock_type = GPR_TIMESPAN; - return ts; -} - -static gpr_atm timespec_to_atm_round_up(gpr_timespec ts) { - ts = gpr_time_sub(ts, g_start_time); - double x = GPR_MS_PER_SEC * (double)ts.tv_sec + - (double)ts.tv_nsec / GPR_NS_PER_MS + - (double)(GPR_NS_PER_SEC - 1) / (double)GPR_NS_PER_SEC; - if (x < 0) return 0; - if (x > GPR_ATM_MAX) return GPR_ATM_MAX; - return (gpr_atm)x; -} +static grpc_timer_check_result run_some_expired_timers(gpr_atm now, + gpr_atm* next, + grpc_error* error); -static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) { - ts = gpr_time_sub(ts, g_start_time); - double x = - GPR_MS_PER_SEC * (double)ts.tv_sec + (double)ts.tv_nsec / GPR_NS_PER_MS; - if (x < 0) return 0; - if (x > GPR_ATM_MAX) return GPR_ATM_MAX; - return (gpr_atm)x; -} - -static gpr_timespec atm_to_timespec(gpr_atm x) { - return gpr_time_add(g_start_time, dbl_to_ts((double)x / 1000.0)); -} - -static gpr_atm compute_min_deadline(timer_shard *shard) { +static gpr_atm compute_min_deadline(timer_shard* shard) { return grpc_timer_heap_is_empty(&shard->heap) ? saturating_add(shard->queue_deadline_cap, 1) : grpc_timer_heap_top(&shard->heap)->deadline; } -void grpc_timer_list_init(gpr_timespec now) { +static void timer_list_init() { uint32_t i; + g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores()); + g_shards = + static_cast(gpr_zalloc(g_num_shards * sizeof(*g_shards))); + g_shard_queue = static_cast( + gpr_zalloc(g_num_shards * sizeof(*g_shard_queue))); + g_shared_mutables.initialized = true; g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER; gpr_mu_init(&g_shared_mutables.mu); - g_clock_type = now.clock_type; - g_start_time = now; - g_shared_mutables.min_timer = timespec_to_atm_round_down(now); + g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now(); gpr_tls_init(&g_last_seen_min_timer); gpr_tls_set(&g_last_seen_min_timer, 0); - grpc_register_tracer(&grpc_timer_trace); - grpc_register_tracer(&grpc_timer_check_trace); - for (i = 0; i < NUM_SHARDS; i++) { - timer_shard *shard = &g_shards[i]; + for (i = 0; i < g_num_shards; i++) { + timer_shard* shard = &g_shards[i]; gpr_mu_init(&shard->mu); grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1, 0.5); @@ -298,39 +276,39 @@ void grpc_timer_list_init(gpr_timespec now) { INIT_TIMER_HASH_TABLE(); } -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { - int i; +static void timer_list_shutdown() { + size_t i; run_some_expired_timers( - exec_ctx, GPR_ATM_MAX, NULL, + GPR_ATM_MAX, nullptr, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown")); - for (i = 0; i < NUM_SHARDS; i++) { - timer_shard *shard = &g_shards[i]; + for (i = 0; i < g_num_shards; i++) { + timer_shard* shard = &g_shards[i]; gpr_mu_destroy(&shard->mu); grpc_timer_heap_destroy(&shard->heap); } gpr_mu_destroy(&g_shared_mutables.mu); gpr_tls_destroy(&g_last_seen_min_timer); + gpr_free(g_shards); + gpr_free(g_shard_queue); g_shared_mutables.initialized = false; -} -static double ts_to_dbl(gpr_timespec ts) { - return (double)ts.tv_sec + 1e-9 * ts.tv_nsec; + DESTROY_TIMER_HASH_TABLE(); } /* returns true if the first element in the list */ -static void list_join(grpc_timer *head, grpc_timer *timer) { +static void list_join(grpc_timer* head, grpc_timer* timer) { timer->next = head; timer->prev = head->prev; timer->next->prev = timer->prev->next = timer; } -static void list_remove(grpc_timer *timer) { +static void list_remove(grpc_timer* timer) { timer->next->prev = timer->prev; timer->prev->next = timer->next; } static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) { - timer_shard *temp; + timer_shard* temp; temp = g_shard_queue[first_shard_queue_index]; g_shard_queue[first_shard_queue_index] = g_shard_queue[first_shard_queue_index + 1]; @@ -341,45 +319,41 @@ static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) { first_shard_queue_index + 1; } -static void note_deadline_change(timer_shard *shard) { +static void note_deadline_change(timer_shard* shard) { while (shard->shard_queue_index > 0 && shard->min_deadline < g_shard_queue[shard->shard_queue_index - 1]->min_deadline) { swap_adjacent_shards_in_queue(shard->shard_queue_index - 1); } - while (shard->shard_queue_index < NUM_SHARDS - 1 && + while (shard->shard_queue_index < g_num_shards - 1 && shard->min_deadline > g_shard_queue[shard->shard_queue_index + 1]->min_deadline) { swap_adjacent_shards_in_queue(shard->shard_queue_index); } } -void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = false; } +void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; } -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - gpr_timespec deadline, grpc_closure *closure, - gpr_timespec now) { +static void timer_init(grpc_timer* timer, grpc_millis deadline, + grpc_closure* closure) { int is_first_timer = 0; - timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; - GPR_ASSERT(deadline.clock_type == g_clock_type); - GPR_ASSERT(now.clock_type == g_clock_type); + timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)]; timer->closure = closure; - gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); + timer->deadline = deadline; #ifndef NDEBUG - timer->hash_table_next = NULL; + timer->hash_table_next = nullptr; #endif - if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR - "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", - timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec, - now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb); + if (grpc_timer_trace.enabled()) { + gpr_log(GPR_INFO, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", + timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure, + closure->cb); } if (!g_shared_mutables.initialized) { timer->pending = false; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Attempt to create timer before initialization")); return; @@ -387,29 +361,31 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_mu_lock(&shard->mu); timer->pending = true; - if (gpr_time_cmp(deadline, now) <= 0) { + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); + if (deadline <= now) { timer->pending = false; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE); gpr_mu_unlock(&shard->mu); /* early out */ return; } - grpc_time_averaged_stats_add_sample(&shard->stats, - ts_to_dbl(gpr_time_sub(deadline, now))); + grpc_time_averaged_stats_add_sample( + &shard->stats, static_cast(deadline - now) / 1000.0); ADD_TO_HASH_TABLE(timer); - if (deadline_atm < shard->queue_deadline_cap) { + if (deadline < shard->queue_deadline_cap) { is_first_timer = grpc_timer_heap_add(&shard->heap, timer); } else { timer->heap_index = INVALID_HEAP_INDEX; list_join(&shard->list, timer); } - if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR - " => is_first_timer=%s", - (int)(shard - g_shards), shard->queue_deadline_cap, + if (grpc_timer_trace.enabled()) { + gpr_log(GPR_INFO, + " .. add to shard %d with queue_deadline_cap=%" PRIdPTR + " => is_first_timer=%s", + static_cast(shard - g_shards), shard->queue_deadline_cap, is_first_timer ? "true" : "false"); } gpr_mu_unlock(&shard->mu); @@ -427,16 +403,16 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, grpc_timer_check. */ if (is_first_timer) { gpr_mu_lock(&g_shared_mutables.mu); - if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR, + if (grpc_timer_trace.enabled()) { + gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRIdPTR, shard->min_deadline); } - if (deadline_atm < shard->min_deadline) { + if (deadline < shard->min_deadline) { gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline; - shard->min_deadline = deadline_atm; + shard->min_deadline = deadline; note_deadline_change(shard); - if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) { - gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm); + if (shard->shard_queue_index == 0 && deadline < old_min_deadline) { + gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline); grpc_kick_poller(); } } @@ -444,28 +420,28 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } } -void grpc_timer_consume_kick(void) { +static void timer_consume_kick(void) { /* force re-evaluation of last seeen min */ gpr_tls_set(&g_last_seen_min_timer, 0); } -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { +static void timer_cancel(grpc_timer* timer) { if (!g_shared_mutables.initialized) { /* must have already been cancelled, also the shard mutex is invalid */ return; } - timer_shard *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)]; + timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)]; gpr_mu_lock(&shard->mu); - if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer, + if (grpc_timer_trace.enabled()) { + gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer, timer->pending ? "true" : "false"); } if (timer->pending) { REMOVE_FROM_HASH_TABLE(timer); - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED); timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { list_remove(timer); @@ -483,7 +459,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { 'queue_deadline_cap') into into shard->heap. Returns 'true' if shard->heap has atleast ONE element REQUIRES: shard->mu locked */ -static int refill_heap(timer_shard *shard, gpr_atm now) { +static int refill_heap(timer_shard* shard, gpr_atm now) { /* Compute the new queue window width and bound by the limits: */ double computed_deadline_delta = grpc_time_averaged_stats_update_average(&shard->stats) * @@ -496,18 +472,18 @@ static int refill_heap(timer_shard *shard, gpr_atm now) { /* Compute the new cap and put all timers under it into the queue: */ shard->queue_deadline_cap = saturating_add(GPR_MAX(now, shard->queue_deadline_cap), - (gpr_atm)(deadline_delta * 1000.0)); + static_cast(deadline_delta * 1000.0)); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR, - (int)(shard - g_shards), shard->queue_deadline_cap); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR, + static_cast(shard - g_shards), shard->queue_deadline_cap); } for (timer = shard->list.next; timer != &shard->list; timer = next) { next = timer->next; if (timer->deadline < shard->queue_deadline_cap) { - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap", + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. add timer with deadline %" PRIdPTR " to heap", timer->deadline); } list_remove(timer); @@ -520,28 +496,29 @@ static int refill_heap(timer_shard *shard, gpr_atm now) { /* This pops the next non-cancelled timer with deadline <= now from the queue, or returns NULL if there isn't one. REQUIRES: shard->mu locked */ -static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) { - grpc_timer *timer; +static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) { + grpc_timer* timer; for (;;) { - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s", - (int)(shard - g_shards), + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s", + static_cast(shard - g_shards), grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false"); } if (grpc_timer_heap_is_empty(&shard->heap)) { - if (now < shard->queue_deadline_cap) return NULL; - if (!refill_heap(shard, now)) return NULL; + if (now < shard->queue_deadline_cap) return nullptr; + if (!refill_heap(shard, now)) return nullptr; } timer = grpc_timer_heap_top(&shard->heap); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR, timer->deadline, now); } - if (timer->deadline > now) return NULL; - if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer, - now - timer->deadline); + if (timer->deadline > now) return nullptr; + if (grpc_timer_trace.enabled()) { + gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler", + timer, now - timer->deadline, + timer->closure->scheduler->vtable->name); } timer->pending = false; grpc_timer_heap_pop(&shard->heap); @@ -550,32 +527,34 @@ static grpc_timer *pop_one(timer_shard *shard, gpr_atm now) { } /* REQUIRES: shard->mu unlocked */ -static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, - gpr_atm now, gpr_atm *new_min_deadline, - grpc_error *error) { +static size_t pop_timers(timer_shard* shard, gpr_atm now, + gpr_atm* new_min_deadline, grpc_error* error) { size_t n = 0; - grpc_timer *timer; + grpc_timer* timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { REMOVE_FROM_HASH_TABLE(timer); - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_REF(error)); n++; } *new_min_deadline = compute_min_deadline(shard); gpr_mu_unlock(&shard->mu); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR, + static_cast(shard - g_shards), n); + } return n; } -static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, - gpr_atm now, - gpr_atm *next, - grpc_error *error) { +static grpc_timer_check_result run_some_expired_timers(gpr_atm now, + gpr_atm* next, + grpc_error* error) { grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED; gpr_atm min_timer = gpr_atm_no_barrier_load(&g_shared_mutables.min_timer); gpr_tls_set(&g_last_seen_min_timer, min_timer); if (now < min_timer) { - if (next != NULL) *next = GPR_MIN(*next, min_timer); + if (next != nullptr) *next = GPR_MIN(*next, min_timer); return GRPC_TIMERS_CHECKED_AND_EMPTY; } @@ -583,9 +562,9 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_mu_lock(&g_shared_mutables.mu); result = GRPC_TIMERS_CHECKED_AND_EMPTY; - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR, - (int)(g_shard_queue[0] - g_shards), + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRIdPTR, + static_cast(g_shard_queue[0] - g_shards), g_shard_queue[0]->min_deadline); } @@ -596,17 +575,16 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, /* For efficiency, we pop as many available timers as we can from the shard. This may violate perfect timer deadline ordering, but that shouldn't be a big deal because we don't make ordering guarantees. */ - if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, - error) > 0) { + if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) { result = GRPC_TIMERS_FIRED; } - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, " .. result --> %d" ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR ", now=%" PRIdPTR, - result, (int)(g_shard_queue[0] - g_shards), + result, static_cast(g_shard_queue[0] - g_shards), g_shard_queue[0]->min_deadline, new_min_deadline, now); } @@ -634,72 +612,62 @@ static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx *exec_ctx, return result; } -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - gpr_timespec now, gpr_timespec *next) { +static grpc_timer_check_result timer_check(grpc_millis* next) { // prelude - GPR_ASSERT(now.clock_type == g_clock_type); - gpr_atm now_atm = timespec_to_atm_round_down(now); + grpc_millis now = grpc_core::ExecCtx::Get()->Now(); /* fetch from a thread-local first: this avoids contention on a globally mutable cacheline in the common case */ - gpr_atm min_timer = gpr_tls_get(&g_last_seen_min_timer); - if (now_atm < min_timer) { - if (next != NULL) { - *next = - atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer)); + grpc_millis min_timer = gpr_tls_get(&g_last_seen_min_timer); + if (now < min_timer) { + if (next != nullptr) { + *next = GPR_MIN(*next, min_timer); } - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, - "TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR, - now_atm, min_timer); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, + "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now, + min_timer); } return GRPC_TIMERS_CHECKED_AND_EMPTY; } - grpc_error *shutdown_error = - gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0 + grpc_error* shutdown_error = + now != GRPC_MILLIS_INF_FUTURE ? GRPC_ERROR_NONE : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system"); // tracing - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - char *next_str; - if (next == NULL) { + if (grpc_timer_check_trace.enabled()) { + char* next_str; + if (next == nullptr) { next_str = gpr_strdup("NULL"); } else { - gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, - next->tv_nsec, timespec_to_atm_round_down(*next)); + gpr_asprintf(&next_str, "%" PRIdPTR, *next); } - gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR - "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, - now.tv_sec, now.tv_nsec, now_atm, next_str, - gpr_tls_get(&g_last_seen_min_timer), + gpr_log(GPR_INFO, + "TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR + " glob_min=%" PRIdPTR, + now, next_str, gpr_tls_get(&g_last_seen_min_timer), gpr_atm_no_barrier_load(&g_shared_mutables.min_timer)); gpr_free(next_str); } // actual code - grpc_timer_check_result r; - gpr_atm next_atm; - if (next == NULL) { - r = run_some_expired_timers(exec_ctx, now_atm, NULL, shutdown_error); - } else { - next_atm = timespec_to_atm_round_down(*next); - r = run_some_expired_timers(exec_ctx, now_atm, &next_atm, shutdown_error); - *next = atm_to_timespec(next_atm); - } + grpc_timer_check_result r = + run_some_expired_timers(now, next, shutdown_error); // tracing - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - char *next_str; - if (next == NULL) { + if (grpc_timer_check_trace.enabled()) { + char* next_str; + if (next == nullptr) { next_str = gpr_strdup("NULL"); } else { - gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, - next->tv_nsec, next_atm); + gpr_asprintf(&next_str, "%" PRIdPTR, *next); } - gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str); + gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str); gpr_free(next_str); } return r; } -#endif /* GRPC_TIMER_USE_GENERIC */ +grpc_timer_vtable grpc_generic_timer_vtable = { + timer_init, timer_cancel, timer_check, + timer_list_init, timer_list_shutdown, timer_consume_kick}; diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_generic.h b/Sources/CgRPC/src/core/lib/iomgr/timer_generic.h deleted file mode 100644 index f0597f6ea..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_generic.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H -#define GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H - -#include -#include "src/core/lib/iomgr/exec_ctx.h" - -struct grpc_timer { - gpr_atm deadline; - uint32_t heap_index; /* INVALID_HEAP_INDEX if not in heap */ - bool pending; - struct grpc_timer *next; - struct grpc_timer *prev; - grpc_closure *closure; -#ifndef NDEBUG - struct grpc_timer *hash_table_next; -#endif -}; - -#endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_heap.c b/Sources/CgRPC/src/core/lib/iomgr/timer_heap.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/iomgr/timer_heap.c rename to Sources/CgRPC/src/core/lib/iomgr/timer_heap.cc index 2648d5da5..0c17d607e 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_heap.c +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_heap.cc @@ -16,25 +16,26 @@ * */ -#include "src/core/lib/iomgr/port.h" +#include -#ifdef GRPC_TIMER_USE_GENERIC +#include "src/core/lib/iomgr/port.h" #include "src/core/lib/iomgr/timer_heap.h" #include #include -#include + +#include "src/core/lib/gpr/useful.h" /* Adjusts a heap so as to move a hole at position i closer to the root, until a suitable position is found for element t. Then, copies t into that position. This functor is called each time immediately after modifying a value in the underlying container, with the offset of the modified element as its argument. */ -static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) { +static void adjust_upwards(grpc_timer** first, uint32_t i, grpc_timer* t) { while (i > 0) { - uint32_t parent = (uint32_t)(((int)i - 1) / 2); + uint32_t parent = static_cast((static_cast(i) - 1) / 2); if (first[parent]->deadline <= t->deadline) break; first[i] = first[parent]; first[i]->heap_index = i; @@ -47,17 +48,16 @@ static void adjust_upwards(grpc_timer **first, uint32_t i, grpc_timer *t) { /* Adjusts a heap so as to move a hole at position i farther away from the root, until a suitable position is found for element t. Then, copies t into that position. */ -static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length, - grpc_timer *t) { +static void adjust_downwards(grpc_timer** first, uint32_t i, uint32_t length, + grpc_timer* t) { for (;;) { uint32_t left_child = 1u + 2u * i; if (left_child >= length) break; uint32_t right_child = left_child + 1; - uint32_t next_i = - right_child < length && - first[left_child]->deadline > first[right_child]->deadline - ? right_child - : left_child; + uint32_t next_i = right_child < length && first[left_child]->deadline > + first[right_child]->deadline + ? right_child + : left_child; if (t->deadline <= first[next_i]->deadline) break; first[i] = first[next_i]; first[i]->heap_index = i; @@ -70,18 +70,18 @@ static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length, #define SHRINK_MIN_ELEMS 8 #define SHRINK_FULLNESS_FACTOR 2 -static void maybe_shrink(grpc_timer_heap *heap) { +static void maybe_shrink(grpc_timer_heap* heap) { if (heap->timer_count >= 8 && heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) { heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR; - heap->timers = (grpc_timer **)gpr_realloc( - heap->timers, heap->timer_capacity * sizeof(grpc_timer *)); + heap->timers = static_cast( + gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer*))); } } -static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) { +static void note_changed_priority(grpc_timer_heap* heap, grpc_timer* timer) { uint32_t i = timer->heap_index; - uint32_t parent = (uint32_t)(((int)i - 1) / 2); + uint32_t parent = static_cast((static_cast(i) - 1) / 2); if (heap->timers[parent]->deadline > timer->deadline) { adjust_upwards(heap->timers, i, timer); } else { @@ -89,18 +89,18 @@ static void note_changed_priority(grpc_timer_heap *heap, grpc_timer *timer) { } } -void grpc_timer_heap_init(grpc_timer_heap *heap) { +void grpc_timer_heap_init(grpc_timer_heap* heap) { memset(heap, 0, sizeof(*heap)); } -void grpc_timer_heap_destroy(grpc_timer_heap *heap) { gpr_free(heap->timers); } +void grpc_timer_heap_destroy(grpc_timer_heap* heap) { gpr_free(heap->timers); } -int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) { +int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer) { if (heap->timer_count == heap->timer_capacity) { heap->timer_capacity = GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2); - heap->timers = (grpc_timer **)gpr_realloc( - heap->timers, heap->timer_capacity * sizeof(grpc_timer *)); + heap->timers = static_cast( + gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer*))); } timer->heap_index = heap->timer_count; adjust_upwards(heap->timers, heap->timer_count, timer); @@ -108,7 +108,7 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) { return timer->heap_index == 0; } -void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) { +void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer) { uint32_t i = timer->heap_index; if (i == heap->timer_count - 1) { heap->timer_count--; @@ -122,16 +122,14 @@ void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer) { note_changed_priority(heap, heap->timers[i]); } -int grpc_timer_heap_is_empty(grpc_timer_heap *heap) { +int grpc_timer_heap_is_empty(grpc_timer_heap* heap) { return heap->timer_count == 0; } -grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap) { +grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap) { return heap->timers[0]; } -void grpc_timer_heap_pop(grpc_timer_heap *heap) { +void grpc_timer_heap_pop(grpc_timer_heap* heap) { grpc_timer_heap_remove(heap, grpc_timer_heap_top(heap)); } - -#endif /* GRPC_TIMER_USE_GENERIC */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_heap.h b/Sources/CgRPC/src/core/lib/iomgr/timer_heap.h index 0d64199ab..503365d4c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_heap.h +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_heap.h @@ -19,24 +19,26 @@ #ifndef GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H #define GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H +#include + #include "src/core/lib/iomgr/timer.h" typedef struct { - grpc_timer **timers; + grpc_timer** timers; uint32_t timer_count; uint32_t timer_capacity; } grpc_timer_heap; /* return 1 if the new timer is the first timer in the heap */ -int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer); +int grpc_timer_heap_add(grpc_timer_heap* heap, grpc_timer* timer); -void grpc_timer_heap_init(grpc_timer_heap *heap); -void grpc_timer_heap_destroy(grpc_timer_heap *heap); +void grpc_timer_heap_init(grpc_timer_heap* heap); +void grpc_timer_heap_destroy(grpc_timer_heap* heap); -void grpc_timer_heap_remove(grpc_timer_heap *heap, grpc_timer *timer); -grpc_timer *grpc_timer_heap_top(grpc_timer_heap *heap); -void grpc_timer_heap_pop(grpc_timer_heap *heap); +void grpc_timer_heap_remove(grpc_timer_heap* heap, grpc_timer* timer); +grpc_timer* grpc_timer_heap_top(grpc_timer_heap* heap); +void grpc_timer_heap_pop(grpc_timer_heap* heap); -int grpc_timer_heap_is_empty(grpc_timer_heap *heap); +int grpc_timer_heap_is_empty(grpc_timer_heap* heap); #endif /* GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_manager.c b/Sources/CgRPC/src/core/lib/iomgr/timer_manager.cc similarity index 66% rename from Sources/CgRPC/src/core/lib/iomgr/timer_manager.c rename to Sources/CgRPC/src/core/lib/iomgr/timer_manager.cc index 04ca44563..35e791456 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_manager.c +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_manager.cc @@ -16,21 +16,24 @@ * */ -#include "src/core/lib/iomgr/timer_manager.h" +#include + +#include #include #include -#include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_manager.h" -typedef struct completed_thread { - gpr_thd_id t; - struct completed_thread *next; -} completed_thread; +struct completed_thread { + grpc_core::Thread thd; + completed_thread* next; +}; -extern grpc_tracer_flag grpc_timer_check_trace; +extern grpc_core::TraceFlag grpc_timer_check_trace; // global mutex static gpr_mu g_mu; @@ -45,27 +48,27 @@ static int g_thread_count; // number of threads sitting around waiting static int g_waiter_count; // linked list of threads that have completed (and need joining) -static completed_thread *g_completed_threads; +static completed_thread* g_completed_threads; // was the manager kicked by the timer system static bool g_kicked; // is there a thread waiting until the next timer should fire? static bool g_has_timed_waiter; // the deadline of the current timed waiter thread (only relevant if // g_has_timed_waiter is true) -static gpr_timespec g_timed_waiter_deadline; +static grpc_millis g_timed_waiter_deadline; // generation counter to track which thread is waiting for the next timer static uint64_t g_timed_waiter_generation; -static void timer_thread(void *completed_thread_ptr); +static void timer_thread(void* completed_thread_ptr); static void gc_completed_threads(void) { - if (g_completed_threads != NULL) { - completed_thread *to_gc = g_completed_threads; - g_completed_threads = NULL; + if (g_completed_threads != nullptr) { + completed_thread* to_gc = g_completed_threads; + g_completed_threads = nullptr; gpr_mu_unlock(&g_mu); - while (to_gc != NULL) { - gpr_thd_join(to_gc->t); - completed_thread *next = to_gc->next; + while (to_gc != nullptr) { + to_gc->thd.Join(); + completed_thread* next = to_gc->next; gpr_free(to_gc); to_gc = next; } @@ -78,31 +81,22 @@ static void start_timer_thread_and_unlock(void) { ++g_waiter_count; ++g_thread_count; gpr_mu_unlock(&g_mu); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "Spawn timer thread"); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "Spawn timer thread"); } - gpr_thd_options opt = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&opt); - completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct)); - // The call to gpr_thd_new() has to be under the same lock used by - // gc_completed_threads(), particularly due to ct->t, which is written here - // (internally by gpr_thd_new) and read there. Otherwise it's possible for ct - // to leak through g_completed_threads and be freed in gc_completed_threads() - // before "&ct->t" is written to, causing a use-after-free. - gpr_mu_lock(&g_mu); - gpr_thd_new(&ct->t, timer_thread, ct, &opt); - gpr_mu_unlock(&g_mu); + completed_thread* ct = + static_cast(gpr_malloc(sizeof(*ct))); + ct->thd = grpc_core::Thread("grpc_global_timer", timer_thread, ct); + ct->thd.Start(); } void grpc_timer_manager_tick() { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - gpr_timespec next = gpr_inf_future(GPR_CLOCK_MONOTONIC); - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - grpc_timer_check(&exec_ctx, now, &next); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + grpc_millis next = GRPC_MILLIS_INF_FUTURE; + grpc_timer_check(&next); } -static void run_some_timers(grpc_exec_ctx *exec_ctx) { +static void run_some_timers() { // if there's something to execute... gpr_mu_lock(&g_mu); // remove a waiter from the pool, and start another thread if necessary @@ -113,15 +107,18 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { // if there's no thread waiting with a timeout, kick an existing // waiter so that the next deadline is not missed if (!g_has_timed_waiter) { - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "kick untimed waiter"); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "kick untimed waiter"); } gpr_cv_signal(&g_cv_wait); } gpr_mu_unlock(&g_mu); } // without our lock, flush the exec_ctx - grpc_exec_ctx_flush(exec_ctx); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "flush exec_ctx"); + } + grpc_core::ExecCtx::Get()->Flush(); gpr_mu_lock(&g_mu); // garbage collect any threads hanging out that are dead gc_completed_threads(); @@ -133,8 +130,7 @@ static void run_some_timers(grpc_exec_ctx *exec_ctx) { // wait until 'next' (or forever if there is already a timed waiter in the pool) // returns true if the thread should continue executing (false if it should // shutdown) -static bool wait_until(gpr_timespec next) { - const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC); +static bool wait_until(grpc_millis next) { gpr_mu_lock(&g_mu); // if we're not threaded anymore, leave if (!g_threaded) { @@ -168,33 +164,30 @@ static bool wait_until(gpr_timespec next) { unless their 'next' is earlier than the current timed-waiter's deadline (in which case the thread with earlier 'next' takes over as the new timed waiter) */ - if (gpr_time_cmp(next, inf_future) != 0) { - if (!g_has_timed_waiter || - (gpr_time_cmp(next, g_timed_waiter_deadline) < 0)) { + if (next != GRPC_MILLIS_INF_FUTURE) { + if (!g_has_timed_waiter || (next < g_timed_waiter_deadline)) { my_timed_waiter_generation = ++g_timed_waiter_generation; g_has_timed_waiter = true; g_timed_waiter_deadline = next; - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_timespec wait_time = - gpr_time_sub(next, gpr_now(GPR_CLOCK_MONOTONIC)); - gpr_log(GPR_DEBUG, "sleep for a %" PRId64 ".%09d seconds", - wait_time.tv_sec, wait_time.tv_nsec); + if (grpc_timer_check_trace.enabled()) { + grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now(); + gpr_log(GPR_INFO, "sleep for a %" PRIdPTR " milliseconds", wait_time); } } else { // g_timed_waiter == true && next >= g_timed_waiter_deadline - next = inf_future; + next = GRPC_MILLIS_INF_FUTURE; } } - if (GRPC_TRACER_ON(grpc_timer_check_trace) && - gpr_time_cmp(next, inf_future) == 0) { - gpr_log(GPR_DEBUG, "sleep until kicked"); + if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) { + gpr_log(GPR_INFO, "sleep until kicked"); } - gpr_cv_wait(&g_cv_wait, &g_mu, next); + gpr_cv_wait(&g_cv_wait, &g_mu, + grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC)); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d", + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d", my_timed_waiter_generation == g_timed_waiter_generation, g_kicked); } @@ -203,7 +196,7 @@ static bool wait_until(gpr_timespec next) { // there's work to do after checking timers (code above) if (my_timed_waiter_generation == g_timed_waiter_generation) { g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; } } @@ -218,15 +211,15 @@ static bool wait_until(gpr_timespec next) { return true; } -static void timer_main_loop(grpc_exec_ctx *exec_ctx) { - const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC); +static void timer_main_loop() { for (;;) { - gpr_timespec next = inf_future; - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + grpc_millis next = GRPC_MILLIS_INF_FUTURE; + grpc_core::ExecCtx::Get()->InvalidateNow(); + // check timer state, updates next to the next time to run a check - switch (grpc_timer_check(exec_ctx, now, &next)) { + switch (grpc_timer_check(&next)) { case GRPC_TIMERS_FIRED: - run_some_timers(exec_ctx); + run_some_timers(); break; case GRPC_TIMERS_NOT_CHECKED: /* This case only happens under contention, meaning more than one timer @@ -238,10 +231,10 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) { Consequently, we can just sleep forever here and be happy at some saved wakeup cycles. */ - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "timers not checked: expect another thread to"); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "timers not checked: expect another thread to"); } - next = inf_future; + next = GRPC_MILLIS_INF_FUTURE; /* fall through */ case GRPC_TIMERS_CHECKED_AND_EMPTY: if (!wait_until(next)) { @@ -252,7 +245,7 @@ static void timer_main_loop(grpc_exec_ctx *exec_ctx) { } } -static void timer_thread_cleanup(completed_thread *ct) { +static void timer_thread_cleanup(completed_thread* ct) { gpr_mu_lock(&g_mu); // terminate the thread: drop the waiter count, thread count, and let whomever // stopped the threading stuff know that we're done @@ -264,19 +257,18 @@ static void timer_thread_cleanup(completed_thread *ct) { ct->next = g_completed_threads; g_completed_threads = ct; gpr_mu_unlock(&g_mu); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "End timer thread"); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "End timer thread"); } } -static void timer_thread(void *completed_thread_ptr) { +static void timer_thread(void* completed_thread_ptr) { // this threads exec_ctx: we try to run things through to completion here // since it's easy to spin up new threads - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); - timer_main_loop(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - timer_thread_cleanup((completed_thread *)completed_thread_ptr); + grpc_core::ExecCtx exec_ctx(0); + timer_main_loop(); + + timer_thread_cleanup(static_cast(completed_thread_ptr)); } static void start_threads(void) { @@ -297,29 +289,29 @@ void grpc_timer_manager_init(void) { g_threaded = false; g_thread_count = 0; g_waiter_count = 0; - g_completed_threads = NULL; + g_completed_threads = nullptr; g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; start_threads(); } static void stop_threads(void) { gpr_mu_lock(&g_mu); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded); } if (g_threaded) { g_threaded = false; gpr_cv_broadcast(&g_cv_wait); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count); } while (g_thread_count > 0) { - gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); - if (GRPC_TRACER_ON(grpc_timer_check_trace)) { - gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count); + gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC)); + if (grpc_timer_check_trace.enabled()) { + gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count); } gc_completed_threads(); } @@ -347,7 +339,7 @@ void grpc_kick_poller(void) { gpr_mu_lock(&g_mu); g_kicked = true; g_has_timed_waiter = false; - g_timed_waiter_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + g_timed_waiter_deadline = GRPC_MILLIS_INF_FUTURE; ++g_timed_waiter_generation; gpr_cv_signal(&g_cv_wait); gpr_mu_unlock(&g_mu); diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_manager.h b/Sources/CgRPC/src/core/lib/iomgr/timer_manager.h index 0ba502928..3c4cdda2c 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_manager.h +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_manager.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_TIMER_MANAGER_H #define GRPC_CORE_LIB_IOMGR_TIMER_MANAGER_H +#include + #include /* Timer Manager tries to keep one thread waiting for the next timeout at all diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_uv.c b/Sources/CgRPC/src/core/lib/iomgr/timer_uv.c deleted file mode 100644 index adced41f5..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_uv.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/iomgr/port.h" - -#if GRPC_UV - -#include -#include - -#include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/iomgr_uv.h" -#include "src/core/lib/iomgr/timer.h" - -#include - -grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false, "timer"); -grpc_tracer_flag grpc_timer_check_trace = - GRPC_TRACER_INITIALIZER(false, "timer_check"); - -static void timer_close_callback(uv_handle_t *handle) { gpr_free(handle); } - -static void stop_uv_timer(uv_timer_t *handle) { - uv_timer_stop(handle); - uv_unref((uv_handle_t *)handle); - uv_close((uv_handle_t *)handle, timer_close_callback); -} - -void run_expired_timer(uv_timer_t *handle) { - grpc_timer *timer = (grpc_timer *)handle->data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_UV_ASSERT_SAME_THREAD(); - GPR_ASSERT(timer->pending); - timer->pending = 0; - GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE); - stop_uv_timer(handle); - grpc_exec_ctx_finish(&exec_ctx); -} - -void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, - gpr_timespec deadline, grpc_closure *closure, - gpr_timespec now) { - uint64_t timeout; - uv_timer_t *uv_timer; - GRPC_UV_ASSERT_SAME_THREAD(); - timer->closure = closure; - if (gpr_time_cmp(deadline, now) <= 0) { - timer->pending = 0; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE); - return; - } - timer->pending = 1; - timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now)); - uv_timer = gpr_malloc(sizeof(uv_timer_t)); - uv_timer_init(uv_default_loop(), uv_timer); - uv_timer->data = timer; - timer->uv_timer = uv_timer; - uv_timer_start(uv_timer, run_expired_timer, timeout, 0); - /* We assume that gRPC timers are only used alongside other active gRPC - objects, and that there will therefore always be something else keeping - the uv loop alive whenever there is a timer */ - uv_unref((uv_handle_t *)uv_timer); -} - -void grpc_timer_init_unset(grpc_timer *timer) { timer->pending = 0; } - -void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { - GRPC_UV_ASSERT_SAME_THREAD(); - if (timer->pending) { - timer->pending = 0; - GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); - stop_uv_timer((uv_timer_t *)timer->uv_timer); - } -} - -grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, - gpr_timespec now, gpr_timespec *next) { - return GRPC_TIMERS_NOT_CHECKED; -} - -void grpc_timer_list_init(gpr_timespec now) {} -void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) {} - -void grpc_timer_consume_kick(void) {} - -#endif /* GRPC_UV */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_uv.cc b/Sources/CgRPC/src/core/lib/iomgr/timer_uv.cc new file mode 100644 index 000000000..8b7c82eb7 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/timer_uv.cc @@ -0,0 +1,66 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_UV + +#include +#include + +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/iomgr_custom.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/iomgr/timer_custom.h" + +#include + +static void timer_close_callback(uv_handle_t* handle) { gpr_free(handle); } + +static void stop_uv_timer(uv_timer_t* handle) { + uv_timer_stop(handle); + uv_unref((uv_handle_t*)handle); + uv_close((uv_handle_t*)handle, timer_close_callback); +} + +void run_expired_timer(uv_timer_t* handle) { + grpc_custom_timer* timer_wrapper = (grpc_custom_timer*)handle->data; + grpc_custom_timer_callback(timer_wrapper, GRPC_ERROR_NONE); +} + +static void timer_start(grpc_custom_timer* t) { + uv_timer_t* uv_timer; + uv_timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t)); + uv_timer_init(uv_default_loop(), uv_timer); + uv_timer->data = t; + t->timer = (void*)uv_timer; + uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0); + // Node uses a garbage collector to call destructors, so we don't + // want to hold the uv loop open with active gRPC objects. + uv_unref((uv_handle_t*)uv_timer); +} + +static void timer_stop(grpc_custom_timer* t) { + stop_uv_timer((uv_timer_t*)t->timer); +} + +grpc_custom_timer_vtable uv_timer_vtable = {timer_start, timer_stop}; + +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/timer_uv.h b/Sources/CgRPC/src/core/lib/iomgr/timer_uv.h deleted file mode 100644 index 8a4c17c84..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/timer_uv.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_IOMGR_TIMER_UV_H -#define GRPC_CORE_LIB_IOMGR_TIMER_UV_H - -#include "src/core/lib/iomgr/exec_ctx.h" - -struct grpc_timer { - grpc_closure *closure; - /* This is actually a uv_timer_t*, but we want to keep platform-specific - types out of headers */ - void *uv_timer; - int pending; -}; - -#endif /* GRPC_CORE_LIB_IOMGR_TIMER_UV_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/udp_server.c b/Sources/CgRPC/src/core/lib/iomgr/udp_server.c deleted file mode 100644 index 00b2e68bb..000000000 --- a/Sources/CgRPC/src/core/lib/iomgr/udp_server.c +++ /dev/null @@ -1,549 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* FIXME: "posix" files shouldn't be depending on _GNU_SOURCE */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include "src/core/lib/iomgr/port.h" - -#ifdef GRPC_POSIX_SOCKET - -#include "src/core/lib/iomgr/udp_server.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/error.h" -#include "src/core/lib/iomgr/ev_posix.h" -#include "src/core/lib/iomgr/resolve_address.h" -#include "src/core/lib/iomgr/sockaddr.h" -#include "src/core/lib/iomgr/sockaddr_utils.h" -#include "src/core/lib/iomgr/socket_factory_posix.h" -#include "src/core/lib/iomgr/socket_utils_posix.h" -#include "src/core/lib/iomgr/unix_sockets_posix.h" -#include "src/core/lib/support/string.h" - -/* one listening port */ -typedef struct grpc_udp_listener grpc_udp_listener; -struct grpc_udp_listener { - int fd; - grpc_fd *emfd; - grpc_udp_server *server; - grpc_resolved_address addr; - grpc_closure read_closure; - grpc_closure write_closure; - // To be called when corresponding QuicGrpcServer closes all active - // connections. - grpc_closure orphan_fd_closure; - grpc_closure destroyed_closure; - grpc_udp_server_read_cb read_cb; - grpc_udp_server_write_cb write_cb; - grpc_udp_server_orphan_cb orphan_cb; - // True if orphan_cb is trigered. - bool orphan_notified; - - struct grpc_udp_listener *next; -}; - -struct shutdown_fd_args { - grpc_fd *fd; - gpr_mu *server_mu; -}; - -/* the overall server */ -struct grpc_udp_server { - gpr_mu mu; - - /* factory to use for creating and binding sockets, or NULL */ - grpc_socket_factory *socket_factory; - - /* active port count: how many ports are actually still listening */ - size_t active_ports; - /* destroyed port count: how many ports are completely destroyed */ - size_t destroyed_ports; - - /* is this server shutting down? (boolean) */ - int shutdown; - - /* linked list of server ports */ - grpc_udp_listener *head; - grpc_udp_listener *tail; - unsigned nports; - - /* shutdown callback */ - grpc_closure *shutdown_complete; - - /* all pollsets interested in new connections */ - grpc_pollset **pollsets; - /* number of pollsets in the pollsets array */ - size_t pollset_count; - /* opaque object to pass to callbacks */ - void *user_data; -}; - -static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) { - if (args) { - const grpc_arg *arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY); - if (arg) { - GPR_ASSERT(arg->type == GRPC_ARG_POINTER); - return (grpc_socket_factory *)arg->value.pointer.p; - } - } - return NULL; -} - -grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) { - grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server)); - gpr_mu_init(&s->mu); - s->socket_factory = get_socket_factory(args); - if (s->socket_factory) { - grpc_socket_factory_ref(s->socket_factory); - } - s->active_ports = 0; - s->destroyed_ports = 0; - s->shutdown = 0; - s->head = NULL; - s->tail = NULL; - s->nports = 0; - - return s; -} - -static void shutdown_fd(grpc_exec_ctx *exec_ctx, void *args, - grpc_error *error) { - struct shutdown_fd_args *shutdown_args = (struct shutdown_fd_args *)args; - gpr_mu_lock(shutdown_args->server_mu); - grpc_fd_shutdown(exec_ctx, shutdown_args->fd, GRPC_ERROR_REF(error)); - gpr_mu_unlock(shutdown_args->server_mu); - gpr_free(shutdown_args); -} - -static void dummy_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - // No-op. -} - -static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { - if (s->shutdown_complete != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE); - } - - gpr_mu_destroy(&s->mu); - - while (s->head) { - grpc_udp_listener *sp = s->head; - s->head = sp->next; - gpr_free(sp); - } - - if (s->socket_factory) { - grpc_socket_factory_unref(s->socket_factory); - } - - gpr_free(s); -} - -static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, - grpc_error *error) { - grpc_udp_server *s = (grpc_udp_server *)server; - gpr_mu_lock(&s->mu); - s->destroyed_ports++; - if (s->destroyed_ports == s->nports) { - gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); - } else { - gpr_mu_unlock(&s->mu); - } -} - -/* called when all listening endpoints have been shutdown, so no further - events will be received on them - at this point it's safe to destroy - things */ -static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { - /* delete ALL the things */ - gpr_mu_lock(&s->mu); - - GPR_ASSERT(s->shutdown); - - if (s->head) { - grpc_udp_listener *sp; - for (sp = s->head; sp; sp = sp->next) { - grpc_unlink_if_unix_domain_socket(&sp->addr); - - GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s, - grpc_schedule_on_exec_ctx); - if (!sp->orphan_notified) { - /* Call the orphan_cb to signal that the FD is about to be closed and - * should no longer be used. Because at this point, all listening ports - * have been shutdown already, no need to shutdown again.*/ - GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd, - grpc_schedule_on_exec_ctx); - GPR_ASSERT(sp->orphan_cb); - sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, - sp->server->user_data); - } - grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL, - false /* already_closed */, "udp_listener_shutdown"); - } - gpr_mu_unlock(&s->mu); - } else { - gpr_mu_unlock(&s->mu); - finish_shutdown(exec_ctx, s); - } -} - -void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, - grpc_closure *on_done) { - grpc_udp_listener *sp; - gpr_mu_lock(&s->mu); - - GPR_ASSERT(!s->shutdown); - s->shutdown = 1; - - s->shutdown_complete = on_done; - - /* shutdown all fd's */ - if (s->active_ports) { - for (sp = s->head; sp; sp = sp->next) { - GPR_ASSERT(sp->orphan_cb); - struct shutdown_fd_args *args = - (struct shutdown_fd_args *)gpr_malloc(sizeof(*args)); - args->fd = sp->emfd; - args->server_mu = &s->mu; - GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args, - grpc_schedule_on_exec_ctx); - sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure, - sp->server->user_data); - sp->orphan_notified = true; - } - gpr_mu_unlock(&s->mu); - } else { - gpr_mu_unlock(&s->mu); - deactivated_all_ports(exec_ctx, s); - } -} - -static int bind_socket(grpc_socket_factory *socket_factory, int sockfd, - const grpc_resolved_address *addr) { - return (socket_factory != NULL) - ? grpc_socket_factory_bind(socket_factory, sockfd, addr) - : bind(sockfd, (struct sockaddr *)addr->addr, - (socklen_t)addr->len); -} - -/* Prepare a recently-created socket for listening. */ -static int prepare_socket(grpc_socket_factory *socket_factory, int fd, - const grpc_resolved_address *addr) { - grpc_resolved_address sockname_temp; - struct sockaddr *addr_ptr = (struct sockaddr *)addr->addr; - /* Set send/receive socket buffers to 1 MB */ - int buffer_size_bytes = 1024 * 1024; - - if (fd < 0) { - goto error; - } - - if (grpc_set_socket_nonblocking(fd, 1) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Unable to set nonblocking %d: %s", fd, strerror(errno)); - goto error; - } - if (grpc_set_socket_cloexec(fd, 1) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Unable to set cloexec %d: %s", fd, strerror(errno)); - goto error; - } - - if (grpc_set_socket_ip_pktinfo_if_possible(fd) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Unable to set ip_pktinfo."); - goto error; - } else if (addr_ptr->sa_family == AF_INET6) { - if (grpc_set_socket_ipv6_recvpktinfo_if_possible(fd) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Unable to set ipv6_recvpktinfo."); - goto error; - } - } - - GPR_ASSERT(addr->len < ~(socklen_t)0); - if (bind_socket(socket_factory, fd, addr) < 0) { - char *addr_str; - grpc_sockaddr_to_string(&addr_str, addr, 0); - gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno)); - gpr_free(addr_str); - goto error; - } - - sockname_temp.len = sizeof(struct sockaddr_storage); - - if (getsockname(fd, (struct sockaddr *)sockname_temp.addr, - (socklen_t *)&sockname_temp.len) < 0) { - goto error; - } - - if (grpc_set_socket_sndbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes", - buffer_size_bytes); - goto error; - } - - if (grpc_set_socket_rcvbuf(fd, buffer_size_bytes) != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes", - buffer_size_bytes); - goto error; - } - - return grpc_sockaddr_get_port(&sockname_temp); - -error: - if (fd >= 0) { - close(fd); - } - return -1; -} - -/* event manager callback when reads are ready */ -static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_udp_listener *sp = (grpc_udp_listener *)arg; - - gpr_mu_lock(&sp->server->mu); - if (error != GRPC_ERROR_NONE) { - if (0 == --sp->server->active_ports && sp->server->shutdown) { - gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); - } else { - gpr_mu_unlock(&sp->server->mu); - } - return; - } - - /* Tell the registered callback that data is available to read. */ - GPR_ASSERT(sp->read_cb); - sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data); - - /* Re-arm the notification event so we get another chance to read. */ - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); - gpr_mu_unlock(&sp->server->mu); -} - -static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_udp_listener *sp = (grpc_udp_listener *)arg; - - gpr_mu_lock(&(sp->server->mu)); - if (error != GRPC_ERROR_NONE) { - if (0 == --sp->server->active_ports && sp->server->shutdown) { - gpr_mu_unlock(&sp->server->mu); - deactivated_all_ports(exec_ctx, sp->server); - } else { - gpr_mu_unlock(&sp->server->mu); - } - return; - } - - /* Tell the registered callback that the socket is writeable. */ - GPR_ASSERT(sp->write_cb); - sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data); - - /* Re-arm the notification event so we get another chance to write. */ - grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); - gpr_mu_unlock(&sp->server->mu); -} - -static int add_socket_to_server(grpc_udp_server *s, int fd, - const grpc_resolved_address *addr, - grpc_udp_server_read_cb read_cb, - grpc_udp_server_write_cb write_cb, - grpc_udp_server_orphan_cb orphan_cb) { - grpc_udp_listener *sp; - int port; - char *addr_str; - char *name; - - port = prepare_socket(s->socket_factory, fd, addr); - if (port >= 0) { - grpc_sockaddr_to_string(&addr_str, addr, 1); - gpr_asprintf(&name, "udp-server-listener:%s", addr_str); - gpr_free(addr_str); - gpr_mu_lock(&s->mu); - s->nports++; - sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener)); - sp->next = NULL; - if (s->head == NULL) { - s->head = sp; - } else { - s->tail->next = sp; - } - s->tail = sp; - sp->server = s; - sp->fd = fd; - sp->emfd = grpc_fd_create(fd, name); - memcpy(&sp->addr, addr, sizeof(grpc_resolved_address)); - sp->read_cb = read_cb; - sp->write_cb = write_cb; - sp->orphan_cb = orphan_cb; - sp->orphan_notified = false; - GPR_ASSERT(sp->emfd); - gpr_mu_unlock(&s->mu); - gpr_free(name); - } - - return port; -} - -int grpc_udp_server_add_port(grpc_udp_server *s, - const grpc_resolved_address *addr, - grpc_udp_server_read_cb read_cb, - grpc_udp_server_write_cb write_cb, - grpc_udp_server_orphan_cb orphan_cb) { - grpc_udp_listener *sp; - int allocated_port1 = -1; - int allocated_port2 = -1; - int fd; - grpc_dualstack_mode dsmode; - grpc_resolved_address addr6_v4mapped; - grpc_resolved_address wild4; - grpc_resolved_address wild6; - grpc_resolved_address addr4_copy; - grpc_resolved_address *allocated_addr = NULL; - grpc_resolved_address sockname_temp; - int port; - - /* Check if this is a wildcard port, and if so, try to keep the port the same - as some previously created listener. */ - if (grpc_sockaddr_get_port(addr) == 0) { - for (sp = s->head; sp; sp = sp->next) { - sockname_temp.len = sizeof(struct sockaddr_storage); - if (0 == getsockname(sp->fd, (struct sockaddr *)sockname_temp.addr, - (socklen_t *)&sockname_temp.len)) { - port = grpc_sockaddr_get_port(&sockname_temp); - if (port > 0) { - allocated_addr = (grpc_resolved_address *)gpr_malloc( - sizeof(grpc_resolved_address)); - memcpy(allocated_addr, addr, sizeof(grpc_resolved_address)); - grpc_sockaddr_set_port(allocated_addr, port); - addr = allocated_addr; - break; - } - } - } - } - - if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { - addr = &addr6_v4mapped; - } - - /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ - if (grpc_sockaddr_is_wildcard(addr, &port)) { - grpc_sockaddr_make_wildcards(port, &wild4, &wild6); - - /* Try listening on IPv6 first. */ - addr = &wild6; - // TODO(rjshade): Test and propagate the returned grpc_error*: - GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory( - s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); - allocated_port1 = - add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb); - if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { - goto done; - } - - /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */ - if (port == 0 && allocated_port1 > 0) { - grpc_sockaddr_set_port(&wild4, allocated_port1); - } - addr = &wild4; - } - - // TODO(rjshade): Test and propagate the returned grpc_error*: - GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory( - s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); - if (fd < 0) { - gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno)); - } - if (dsmode == GRPC_DSMODE_IPV4 && - grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { - addr = &addr4_copy; - } - allocated_port2 = - add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb); - -done: - gpr_free(allocated_addr); - return allocated_port1 >= 0 ? allocated_port1 : allocated_port2; -} - -int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index) { - grpc_udp_listener *sp; - if (port_index >= s->nports) { - return -1; - } - - for (sp = s->head; sp && port_index != 0; sp = sp->next) { - --port_index; - } - return sp->fd; -} - -void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, - grpc_pollset **pollsets, size_t pollset_count, - void *user_data) { - size_t i; - gpr_mu_lock(&s->mu); - grpc_udp_listener *sp; - GPR_ASSERT(s->active_ports == 0); - s->pollsets = pollsets; - s->user_data = user_data; - - sp = s->head; - while (sp != NULL) { - for (i = 0; i < pollset_count; i++) { - grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); - } - GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, - grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); - - GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp, - grpc_schedule_on_exec_ctx); - grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); - - /* Registered for both read and write callbacks: increment active_ports - * twice to account for this, and delay free-ing of memory until both - * on_read and on_write have fired. */ - s->active_ports += 2; - - sp = sp->next; - } - - gpr_mu_unlock(&s->mu); -} - -#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/udp_server.cc b/Sources/CgRPC/src/core/lib/iomgr/udp_server.cc new file mode 100644 index 000000000..51d17eb17 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/iomgr/udp_server.cc @@ -0,0 +1,747 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* FIXME: "posix" files shouldn't be depending on _GNU_SOURCE */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef SO_RXQ_OVFL +#define SO_RXQ_OVFL 40 +#endif + +#include + +#include "src/core/lib/iomgr/port.h" + +#ifdef GRPC_POSIX_SOCKET + +#include "src/core/lib/iomgr/udp_server.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/ev_posix.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/iomgr/resolve_address.h" +#include "src/core/lib/iomgr/sockaddr.h" +#include "src/core/lib/iomgr/sockaddr_utils.h" +#include "src/core/lib/iomgr/socket_factory_posix.h" +#include "src/core/lib/iomgr/socket_utils_posix.h" +#include "src/core/lib/iomgr/unix_sockets_posix.h" + +/* A listener which implements basic features of Listening on a port for + * I/O events*/ +class GrpcUdpListener { + public: + GrpcUdpListener(grpc_udp_server* server, int fd, + const grpc_resolved_address* addr); + ~GrpcUdpListener(); + + /* Called when grpc server starts to listening on the grpc_fd. */ + void StartListening(grpc_pollset** pollsets, size_t pollset_count, + GrpcUdpHandlerFactory* handler_factory); + + /* Called when data is available to read from the socket. + * Return true if there is more data to read from fd. */ + void OnRead(grpc_error* error, void* do_read_arg); + + /* Called when the socket is writeable. The given closure should be scheduled + * when the socket becomes blocked next time. */ + void OnCanWrite(grpc_error* error, void* do_write_arg); + + /* Called when the grpc_fd is about to be orphaned (and the FD closed). */ + void OnFdAboutToOrphan(); + + /* Called to orphan fd of this listener.*/ + void OrphanFd(); + + /* Called when this listener is going to be destroyed. */ + void OnDestroy(); + + int fd() const { return fd_; } + + protected: + grpc_fd* emfd() const { return emfd_; } + + gpr_mu* mutex() { return &mutex_; } + + private: + /* event manager callback when reads are ready */ + static void on_read(void* arg, grpc_error* error); + static void on_write(void* arg, grpc_error* error); + + static void do_read(void* arg, grpc_error* error); + static void do_write(void* arg, grpc_error* error); + // Wrapper of grpc_fd_notify_on_write() with a grpc_closure callback + // interface. + static void fd_notify_on_write_wrapper(void* arg, grpc_error* error); + + static void shutdown_fd(void* args, grpc_error* error); + + int fd_; + grpc_fd* emfd_; + grpc_udp_server* server_; + grpc_resolved_address addr_; + grpc_closure read_closure_; + grpc_closure write_closure_; + // To be called when corresponding QuicGrpcServer closes all active + // connections. + grpc_closure orphan_fd_closure_; + grpc_closure destroyed_closure_; + // To be scheduled on another thread to actually read/write. + grpc_closure do_read_closure_; + grpc_closure do_write_closure_; + grpc_closure notify_on_write_closure_; + // True if orphan_cb is trigered. + bool orphan_notified_; + // True if grpc_fd_notify_on_write() is called after on_write() call. + bool notify_on_write_armed_; + // True if fd has been shutdown. + bool already_shutdown_; + // Object actually handles I/O events. Assigned in StartListening(). + GrpcUdpHandler* udp_handler_ = nullptr; + // To be notified on destruction. + GrpcUdpHandlerFactory* handler_factory_ = nullptr; + // Required to access above fields. + gpr_mu mutex_; +}; + +GrpcUdpListener::GrpcUdpListener(grpc_udp_server* server, int fd, + const grpc_resolved_address* addr) + : fd_(fd), + server_(server), + orphan_notified_(false), + already_shutdown_(false) { + char* addr_str; + char* name; + grpc_sockaddr_to_string(&addr_str, addr, 1); + gpr_asprintf(&name, "udp-server-listener:%s", addr_str); + gpr_free(addr_str); + emfd_ = grpc_fd_create(fd, name); + memcpy(&addr_, addr, sizeof(grpc_resolved_address)); + GPR_ASSERT(emfd_); + gpr_free(name); + gpr_mu_init(&mutex_); +} + +GrpcUdpListener::~GrpcUdpListener() { gpr_mu_destroy(&mutex_); } + +/* the overall server */ +struct grpc_udp_server { + gpr_mu mu; + + /* factory to use for creating and binding sockets, or NULL */ + grpc_socket_factory* socket_factory; + + /* active port count: how many ports are actually still listening */ + size_t active_ports; + /* destroyed port count: how many ports are completely destroyed */ + size_t destroyed_ports; + + /* is this server shutting down? (boolean) */ + int shutdown; + + /* An array of listeners */ + grpc_core::InlinedVector listeners; + + /* factory for use to create udp listeners */ + GrpcUdpHandlerFactory* handler_factory; + + /* shutdown callback */ + grpc_closure* shutdown_complete; + + /* all pollsets interested in new connections */ + grpc_pollset** pollsets; + /* number of pollsets in the pollsets array */ + size_t pollset_count; + /* opaque object to pass to callbacks */ + void* user_data; + + /* latch has_so_reuseport during server creation */ + bool so_reuseport; +}; + +static grpc_socket_factory* get_socket_factory(const grpc_channel_args* args) { + if (args) { + const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_SOCKET_FACTORY); + if (arg) { + GPR_ASSERT(arg->type == GRPC_ARG_POINTER); + return static_cast(arg->value.pointer.p); + } + } + return nullptr; +} + +grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args) { + grpc_udp_server* s = grpc_core::New(); + gpr_mu_init(&s->mu); + s->socket_factory = get_socket_factory(args); + if (s->socket_factory) { + grpc_socket_factory_ref(s->socket_factory); + } + s->active_ports = 0; + s->destroyed_ports = 0; + s->shutdown = 0; + s->so_reuseport = grpc_is_socket_reuse_port_supported(); + return s; +} + +// static +void GrpcUdpListener::shutdown_fd(void* args, grpc_error* error) { + if (args == nullptr) { + // No-op if shutdown args are null. + return; + } + auto sp = static_cast(args); + gpr_mu_lock(sp->mutex()); + gpr_log(GPR_DEBUG, "shutdown fd %d", sp->fd_); + grpc_fd_shutdown(sp->emfd_, GRPC_ERROR_REF(error)); + sp->already_shutdown_ = true; + if (!sp->notify_on_write_armed_) { + // Re-arm write notification to notify listener with error. This is + // necessary to decrement active_ports. + sp->notify_on_write_armed_ = true; + grpc_fd_notify_on_write(sp->emfd_, &sp->write_closure_); + } + gpr_mu_unlock(sp->mutex()); +} + +static void finish_shutdown(grpc_udp_server* s) { + if (s->shutdown_complete != nullptr) { + GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE); + } + + gpr_mu_destroy(&s->mu); + + gpr_log(GPR_DEBUG, "Destroy all listeners."); + for (size_t i = 0; i < s->listeners.size(); ++i) { + s->listeners[i].OnDestroy(); + } + + if (s->socket_factory) { + grpc_socket_factory_unref(s->socket_factory); + } + + grpc_core::Delete(s); +} + +static void destroyed_port(void* server, grpc_error* error) { + grpc_udp_server* s = static_cast(server); + gpr_mu_lock(&s->mu); + s->destroyed_ports++; + if (s->destroyed_ports == s->listeners.size()) { + gpr_mu_unlock(&s->mu); + finish_shutdown(s); + } else { + gpr_mu_unlock(&s->mu); + } +} + +/* called when all listening endpoints have been shutdown, so no further + events will be received on them - at this point it's safe to destroy + things */ +static void deactivated_all_ports(grpc_udp_server* s) { + /* delete ALL the things */ + gpr_mu_lock(&s->mu); + + GPR_ASSERT(s->shutdown); + + if (s->listeners.size() == 0) { + gpr_mu_unlock(&s->mu); + finish_shutdown(s); + return; + } + for (size_t i = 0; i < s->listeners.size(); ++i) { + s->listeners[i].OrphanFd(); + } + gpr_mu_unlock(&s->mu); +} + +void GrpcUdpListener::OrphanFd() { + gpr_log(GPR_DEBUG, "Orphan fd %d, emfd %p", fd_, emfd_); + grpc_unlink_if_unix_domain_socket(&addr_); + + GRPC_CLOSURE_INIT(&destroyed_closure_, destroyed_port, server_, + grpc_schedule_on_exec_ctx); + /* Because at this point, all listening sockets have been shutdown already, no + * need to call OnFdAboutToOrphan() to notify the handler again. */ + grpc_fd_orphan(emfd_, &destroyed_closure_, nullptr, + false /* already_closed */, "udp_listener_shutdown"); +} + +void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) { + gpr_mu_lock(&s->mu); + + GPR_ASSERT(!s->shutdown); + s->shutdown = 1; + + s->shutdown_complete = on_done; + + gpr_log(GPR_DEBUG, "start to destroy udp_server"); + /* shutdown all fd's */ + if (s->active_ports) { + for (size_t i = 0; i < s->listeners.size(); ++i) { + GrpcUdpListener* sp = &s->listeners[i]; + sp->OnFdAboutToOrphan(); + } + gpr_mu_unlock(&s->mu); + } else { + gpr_mu_unlock(&s->mu); + deactivated_all_ports(s); + } +} + +void GrpcUdpListener::OnFdAboutToOrphan() { + gpr_mu_lock(&mutex_); + grpc_unlink_if_unix_domain_socket(&addr_); + + GRPC_CLOSURE_INIT(&destroyed_closure_, destroyed_port, server_, + grpc_schedule_on_exec_ctx); + if (!orphan_notified_ && udp_handler_ != nullptr) { + /* Singals udp_handler that the FD is about to be closed and + * should no longer be used. */ + GRPC_CLOSURE_INIT(&orphan_fd_closure_, shutdown_fd, this, + grpc_schedule_on_exec_ctx); + gpr_log(GPR_DEBUG, "fd %d about to be orphaned", fd_); + udp_handler_->OnFdAboutToOrphan(&orphan_fd_closure_, server_->user_data); + orphan_notified_ = true; + } + gpr_mu_unlock(&mutex_); +} + +static int bind_socket(grpc_socket_factory* socket_factory, int sockfd, + const grpc_resolved_address* addr) { + return (socket_factory != nullptr) + ? grpc_socket_factory_bind(socket_factory, sockfd, addr) + : bind(sockfd, + reinterpret_cast( + const_cast(addr->addr)), + addr->len); +} + +/* Prepare a recently-created socket for listening. */ +static int prepare_socket(grpc_socket_factory* socket_factory, int fd, + const grpc_resolved_address* addr, int rcv_buf_size, + int snd_buf_size, bool so_reuseport) { + grpc_resolved_address sockname_temp; + grpc_sockaddr* addr_ptr = + reinterpret_cast(const_cast(addr->addr)); + + if (fd < 0) { + goto error; + } + + if (grpc_set_socket_nonblocking(fd, 1) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set nonblocking %d: %s", fd, strerror(errno)); + goto error; + } + if (grpc_set_socket_cloexec(fd, 1) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set cloexec %d: %s", fd, strerror(errno)); + goto error; + } + + if (grpc_set_socket_ip_pktinfo_if_possible(fd) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set ip_pktinfo."); + goto error; + } else if (addr_ptr->sa_family == AF_INET6) { + if (grpc_set_socket_ipv6_recvpktinfo_if_possible(fd) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Unable to set ipv6_recvpktinfo."); + goto error; + } + } + + if (grpc_set_socket_sndbuf(fd, snd_buf_size) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Failed to set send buffer size to %d bytes", + snd_buf_size); + goto error; + } + + if (grpc_set_socket_rcvbuf(fd, rcv_buf_size) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Failed to set receive buffer size to %d bytes", + rcv_buf_size); + goto error; + } + + { + int get_overflow = 1; + if (0 != setsockopt(fd, SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, + sizeof(get_overflow))) { + gpr_log(GPR_INFO, "Failed to set socket overflow support"); + } + } + + if (so_reuseport && !grpc_is_unix_socket(addr) && + grpc_set_socket_reuse_port(fd, 1) != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Failed to set SO_REUSEPORT for fd %d", fd); + goto error; + } + + if (bind_socket(socket_factory, fd, addr) < 0) { + char* addr_str; + grpc_sockaddr_to_string(&addr_str, addr, 0); + gpr_log(GPR_ERROR, "bind addr=%s: %s", addr_str, strerror(errno)); + gpr_free(addr_str); + goto error; + } + + sockname_temp.len = static_cast(sizeof(struct sockaddr_storage)); + + if (getsockname(fd, reinterpret_cast(sockname_temp.addr), + &sockname_temp.len) < 0) { + gpr_log(GPR_ERROR, "Unable to get the address socket %d is bound to: %s", + fd, strerror(errno)); + goto error; + } + + return grpc_sockaddr_get_port(&sockname_temp); + +error: + if (fd >= 0) { + close(fd); + } + return -1; +} + +// static +void GrpcUdpListener::do_read(void* arg, grpc_error* error) { + GrpcUdpListener* sp = static_cast(arg); + GPR_ASSERT(error == GRPC_ERROR_NONE); + /* TODO: the reason we hold server->mu here is merely to prevent fd + * shutdown while we are reading. However, it blocks do_write(). Switch to + * read lock if available. */ + gpr_mu_lock(sp->mutex()); + /* Tell the registered callback that data is available to read. */ + if (!sp->already_shutdown_ && sp->udp_handler_->Read()) { + /* There maybe more packets to read. Schedule read_more_cb_ closure to run + * after finishing this event loop. */ + GRPC_CLOSURE_SCHED(&sp->do_read_closure_, GRPC_ERROR_NONE); + } else { + /* Finish reading all the packets, re-arm the notification event so we can + * get another chance to read. Or fd already shutdown, re-arm to get a + * notification with shutdown error. */ + grpc_fd_notify_on_read(sp->emfd_, &sp->read_closure_); + } + gpr_mu_unlock(sp->mutex()); +} + +// static +void GrpcUdpListener::on_read(void* arg, grpc_error* error) { + GrpcUdpListener* sp = static_cast(arg); + sp->OnRead(error, arg); +} + +void GrpcUdpListener::OnRead(grpc_error* error, void* do_read_arg) { + if (error != GRPC_ERROR_NONE) { + gpr_mu_lock(&server_->mu); + if (0 == --server_->active_ports && server_->shutdown) { + gpr_mu_unlock(&server_->mu); + deactivated_all_ports(server_); + } else { + gpr_mu_unlock(&server_->mu); + } + return; + } + + /* Read once. If there is more data to read, off load the work to another + * thread to finish. */ + if (udp_handler_->Read()) { + /* There maybe more packets to read. Schedule read_more_cb_ closure to run + * after finishing this event loop. */ + GRPC_CLOSURE_INIT(&do_read_closure_, do_read, do_read_arg, + grpc_executor_scheduler(GRPC_EXECUTOR_LONG)); + GRPC_CLOSURE_SCHED(&do_read_closure_, GRPC_ERROR_NONE); + } else { + /* Finish reading all the packets, re-arm the notification event so we can + * get another chance to read. Or fd already shutdown, re-arm to get a + * notification with shutdown error. */ + grpc_fd_notify_on_read(emfd_, &read_closure_); + } +} + +// static +// Wrapper of grpc_fd_notify_on_write() with a grpc_closure callback interface. +void GrpcUdpListener::fd_notify_on_write_wrapper(void* arg, grpc_error* error) { + GrpcUdpListener* sp = static_cast(arg); + gpr_mu_lock(sp->mutex()); + if (!sp->notify_on_write_armed_) { + grpc_fd_notify_on_write(sp->emfd_, &sp->write_closure_); + sp->notify_on_write_armed_ = true; + } + gpr_mu_unlock(sp->mutex()); +} + +// static +void GrpcUdpListener::do_write(void* arg, grpc_error* error) { + GrpcUdpListener* sp = static_cast(arg); + gpr_mu_lock(sp->mutex()); + if (sp->already_shutdown_) { + // If fd has been shutdown, don't write any more and re-arm notification. + grpc_fd_notify_on_write(sp->emfd_, &sp->write_closure_); + } else { + sp->notify_on_write_armed_ = false; + /* Tell the registered callback that the socket is writeable. */ + GPR_ASSERT(error == GRPC_ERROR_NONE); + GRPC_CLOSURE_INIT(&sp->notify_on_write_closure_, fd_notify_on_write_wrapper, + arg, grpc_schedule_on_exec_ctx); + sp->udp_handler_->OnCanWrite(sp->server_->user_data, + &sp->notify_on_write_closure_); + } + gpr_mu_unlock(sp->mutex()); +} + +// static +void GrpcUdpListener::on_write(void* arg, grpc_error* error) { + GrpcUdpListener* sp = static_cast(arg); + sp->OnCanWrite(error, arg); +} + +void GrpcUdpListener::OnCanWrite(grpc_error* error, void* do_write_arg) { + if (error != GRPC_ERROR_NONE) { + gpr_mu_lock(&server_->mu); + if (0 == --server_->active_ports && server_->shutdown) { + gpr_mu_unlock(&server_->mu); + deactivated_all_ports(server_); + } else { + gpr_mu_unlock(&server_->mu); + } + return; + } + + /* Schedule actual write in another thread. */ + GRPC_CLOSURE_INIT(&do_write_closure_, do_write, do_write_arg, + grpc_executor_scheduler(GRPC_EXECUTOR_LONG)); + + GRPC_CLOSURE_SCHED(&do_write_closure_, GRPC_ERROR_NONE); +} + +static int add_socket_to_server(grpc_udp_server* s, int fd, + const grpc_resolved_address* addr, + int rcv_buf_size, int snd_buf_size) { + gpr_log(GPR_DEBUG, "add socket %d to server", fd); + + int port = prepare_socket(s->socket_factory, fd, addr, rcv_buf_size, + snd_buf_size, s->so_reuseport); + if (port >= 0) { + gpr_mu_lock(&s->mu); + s->listeners.emplace_back(s, fd, addr); + gpr_log(GPR_DEBUG, + "add socket %d to server for port %d, %zu listener(s) in total", fd, + port, s->listeners.size()); + gpr_mu_unlock(&s->mu); + } + return port; +} + +int grpc_udp_server_add_port(grpc_udp_server* s, + const grpc_resolved_address* addr, + int rcv_buf_size, int snd_buf_size, + GrpcUdpHandlerFactory* handler_factory, + size_t num_listeners) { + if (num_listeners > 1 && !s->so_reuseport) { + gpr_log(GPR_ERROR, + "Try to have multiple listeners on same port, but SO_REUSEPORT is " + "not supported. Only create 1 listener."); + } + char* addr_str; + grpc_sockaddr_to_string(&addr_str, addr, 1); + gpr_log(GPR_DEBUG, "add address: %s to server", addr_str); + gpr_free(addr_str); + + int allocated_port1 = -1; + int allocated_port2 = -1; + int fd; + grpc_dualstack_mode dsmode; + grpc_resolved_address addr6_v4mapped; + grpc_resolved_address wild4; + grpc_resolved_address wild6; + grpc_resolved_address addr4_copy; + grpc_resolved_address* allocated_addr = nullptr; + grpc_resolved_address sockname_temp; + int port = 0; + + /* Check if this is a wildcard port, and if so, try to keep the port the same + as some previously created listener. */ + if (grpc_sockaddr_get_port(addr) == 0) { + /* Loop through existing listeners to find the port in use. */ + for (size_t i = 0; i < s->listeners.size(); ++i) { + sockname_temp.len = + static_cast(sizeof(struct sockaddr_storage)); + if (0 == getsockname(s->listeners[i].fd(), + reinterpret_cast(sockname_temp.addr), + &sockname_temp.len)) { + port = grpc_sockaddr_get_port(&sockname_temp); + if (port > 0) { + /* Found such a port, update |addr| to reflects this port. */ + allocated_addr = static_cast( + gpr_malloc(sizeof(grpc_resolved_address))); + memcpy(allocated_addr, addr, sizeof(grpc_resolved_address)); + grpc_sockaddr_set_port(allocated_addr, port); + addr = allocated_addr; + break; + } + } + } + } + + if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) { + addr = &addr6_v4mapped; + } + + s->handler_factory = handler_factory; + for (size_t i = 0; i < num_listeners; ++i) { + /* Treat :: or 0.0.0.0 as a family-agnostic wildcard. */ + if (grpc_sockaddr_is_wildcard(addr, &port)) { + grpc_sockaddr_make_wildcards(port, &wild4, &wild6); + + /* Try listening on IPv6 first. */ + addr = &wild6; + // TODO(rjshade): Test and propagate the returned grpc_error*: + GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory( + s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); + allocated_port1 = + add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size); + if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) { + if (port == 0) { + /* This is the first time to bind to |addr|. If its port is still + * wildcard port, update |addr| with the ephermeral port returned by + * kernel. Thus |addr| can have a specific port in following + * iterations. */ + grpc_sockaddr_set_port(addr, allocated_port1); + port = allocated_port1; + } else if (allocated_port1 >= 0) { + /* The following sucessfully created socket should have same port as + * the first one. */ + GPR_ASSERT(port == allocated_port1); + } + /* A dualstack socket is created, no need to create corresponding IPV4 + * socket. */ + continue; + } + + /* If we didn't get a dualstack socket, also listen on 0.0.0.0. */ + if (port == 0 && allocated_port1 > 0) { + /* |port| hasn't been assigned to an emphemeral port yet, |wild4| must + * have a wildcard port. Update it with the emphemeral port created + * during binding.*/ + grpc_sockaddr_set_port(&wild4, allocated_port1); + port = allocated_port1; + } + /* |wild4| should have been updated with an emphemeral port by now. Use + * this IPV4 address to create a IPV4 socket. */ + addr = &wild4; + } + + // TODO(rjshade): Test and propagate the returned grpc_error*: + GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory( + s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd)); + if (fd < 0) { + gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno)); + } + if (dsmode == GRPC_DSMODE_IPV4 && + grpc_sockaddr_is_v4mapped(addr, &addr4_copy)) { + addr = &addr4_copy; + } + allocated_port2 = + add_socket_to_server(s, fd, addr, rcv_buf_size, snd_buf_size); + if (port == 0) { + /* Update |addr| with the ephermeral port returned by kernel. So |addr| + * can have a specific port in following iterations. */ + grpc_sockaddr_set_port(addr, allocated_port2); + port = allocated_port2; + } else if (allocated_port2 >= 0) { + GPR_ASSERT(port == allocated_port2); + } + } + + gpr_free(allocated_addr); + return port; +} + +int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index) { + if (port_index >= s->listeners.size()) { + return -1; + } + + return s->listeners[port_index].fd(); +} + +void grpc_udp_server_start(grpc_udp_server* s, grpc_pollset** pollsets, + size_t pollset_count, void* user_data) { + gpr_log(GPR_DEBUG, "grpc_udp_server_start"); + gpr_mu_lock(&s->mu); + GPR_ASSERT(s->active_ports == 0); + s->pollsets = pollsets; + s->user_data = user_data; + + for (size_t i = 0; i < s->listeners.size(); ++i) { + s->listeners[i].StartListening(pollsets, pollset_count, s->handler_factory); + } + + gpr_mu_unlock(&s->mu); +} + +void GrpcUdpListener::StartListening(grpc_pollset** pollsets, + size_t pollset_count, + GrpcUdpHandlerFactory* handler_factory) { + gpr_mu_lock(&mutex_); + handler_factory_ = handler_factory; + udp_handler_ = handler_factory->CreateUdpHandler(emfd_, server_->user_data); + for (size_t i = 0; i < pollset_count; i++) { + grpc_pollset_add_fd(pollsets[i], emfd_); + } + GRPC_CLOSURE_INIT(&read_closure_, on_read, this, grpc_schedule_on_exec_ctx); + grpc_fd_notify_on_read(emfd_, &read_closure_); + + GRPC_CLOSURE_INIT(&write_closure_, on_write, this, grpc_schedule_on_exec_ctx); + notify_on_write_armed_ = true; + grpc_fd_notify_on_write(emfd_, &write_closure_); + + /* Registered for both read and write callbacks: increment active_ports + * twice to account for this, and delay free-ing of memory until both + * on_read and on_write have fired. */ + server_->active_ports += 2; + gpr_mu_unlock(&mutex_); +} + +void GrpcUdpListener::OnDestroy() { + if (udp_handler_ != nullptr) { + handler_factory_->DestroyUdpHandler(udp_handler_); + } +} + +#endif diff --git a/Sources/CgRPC/src/core/lib/iomgr/udp_server.h b/Sources/CgRPC/src/core/lib/iomgr/udp_server.h index 881468ea2..3656791c1 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/udp_server.h +++ b/Sources/CgRPC/src/core/lib/iomgr/udp_server.h @@ -19,6 +19,9 @@ #ifndef GRPC_CORE_LIB_IOMGR_UDP_SERVER_H #define GRPC_CORE_LIB_IOMGR_UDP_SERVER_H +#include + +#include "src/core/lib/gprpp/abstract.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/ev_posix.h" #include "src/core/lib/iomgr/resolve_address.h" @@ -30,47 +33,75 @@ struct grpc_server; /* Forward decl of grpc_udp_server */ typedef struct grpc_udp_server grpc_udp_server; -/* Called when data is available to read from the socket. */ -typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, - void *user_data); +/* An interface associated with a socket. udp server delivers I/O event on that + * socket to the subclass of this interface which is created through + * GrpcUdpHandlerFactory. + * Its implementation should do the real IO work, e.g. read packet and write. */ +class GrpcUdpHandler { + public: + GrpcUdpHandler(grpc_fd* emfd, void* user_data) {} + virtual ~GrpcUdpHandler() {} + + // Interfaces to be implemented by subclasses to do the actual setup/tear down + // or I/O. + + // Called when data is available to read from the socket. Returns true if + // there is more data to read after this call. + virtual bool Read() GRPC_ABSTRACT; + // Called when socket becomes write unblocked. The given closure should be + // scheduled when the socket becomes blocked next time. + virtual void OnCanWrite(void* user_data, + grpc_closure* notify_on_write_closure) GRPC_ABSTRACT; + // Called before the gRPC FD is orphaned. Notify udp server to continue + // orphaning fd by scheduling the given closure, afterwards the associated fd + // will be closed. + virtual void OnFdAboutToOrphan(grpc_closure* orphan_fd_closure, + void* user_data) GRPC_ABSTRACT; -/* Called when the socket is writeable. */ -typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx *exec_ctx, grpc_fd *emfd, - void *user_data); + GRPC_ABSTRACT_BASE_CLASS +}; -/* Called when the grpc_fd is about to be orphaned (and the FD closed). */ -typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx *exec_ctx, - grpc_fd *emfd, - grpc_closure *shutdown_fd_callback, - void *user_data); +class GrpcUdpHandlerFactory { + public: + virtual ~GrpcUdpHandlerFactory() {} + /* Called when start to listen on a socket. + * Return an instance of the implementation of GrpcUdpHandler interface which + * will process I/O events for this socket from now on. */ + virtual GrpcUdpHandler* CreateUdpHandler(grpc_fd* emfd, + void* user_data) GRPC_ABSTRACT; + virtual void DestroyUdpHandler(GrpcUdpHandler* handler) GRPC_ABSTRACT; + + GRPC_ABSTRACT_BASE_CLASS +}; /* Create a server, initially not bound to any ports */ -grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args); +grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args); /* Start listening to bound ports. user_data is passed to callbacks. */ -void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *udp_server, - grpc_pollset **pollsets, size_t pollset_count, - void *user_data); +void grpc_udp_server_start(grpc_udp_server* udp_server, grpc_pollset** pollsets, + size_t pollset_count, void* user_data); -int grpc_udp_server_get_fd(grpc_udp_server *s, unsigned port_index); +int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index); /* Add a port to the server, returning port number on success, or negative on failure. + Create |num_listeners| sockets for given address to listen on using + SO_REUSEPORT if supported. + The :: and 0.0.0.0 wildcard addresses are treated identically, accepting - both IPv4 and IPv6 connections, but :: is the preferred style. This usually - creates one socket, but possibly two on systems which support IPv6, - but not dualstack sockets. */ + both IPv4 and IPv6 connections, but :: is the preferred style. This usually + creates |num_listeners| sockets, but possibly 2 * |num_listeners| on systems + which support IPv6, but not dualstack sockets. */ /* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle all of the multiple socket port matching logic in one place */ -int grpc_udp_server_add_port(grpc_udp_server *s, - const grpc_resolved_address *addr, - grpc_udp_server_read_cb read_cb, - grpc_udp_server_write_cb write_cb, - grpc_udp_server_orphan_cb orphan_cb); - -void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *server, - grpc_closure *on_done); +int grpc_udp_server_add_port(grpc_udp_server* s, + const grpc_resolved_address* addr, + int rcv_buf_size, int snd_buf_size, + GrpcUdpHandlerFactory* handler_factory, + size_t num_listeners); + +void grpc_udp_server_destroy(grpc_udp_server* server, grpc_closure* on_done); #endif /* GRPC_CORE_LIB_IOMGR_UDP_SERVER_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.c b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.cc similarity index 54% rename from Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.cc index 35f898f13..22fcaf57f 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.cc @@ -15,6 +15,8 @@ * limitations under the License. * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_HAVE_UNIX_SOCKET @@ -30,18 +32,20 @@ #include #include -#include + +#include "src/core/lib/gpr/useful.h" void grpc_create_socketpair_if_unix(int sv[2]) { GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); } -grpc_error *grpc_resolve_unix_domain_address(const char *name, - grpc_resolved_addresses **addrs) { - struct sockaddr_un *un; - if (strlen(name) > GPR_ARRAY_SIZE(((struct sockaddr_un *)0)->sun_path) - 1) { - char *err_msg; - grpc_error *err; +grpc_error* grpc_resolve_unix_domain_address(const char* name, + grpc_resolved_addresses** addrs) { + struct sockaddr_un* un; + if (strlen(name) > + GPR_ARRAY_SIZE(((struct sockaddr_un*)nullptr)->sun_path) - 1) { + char* err_msg; + grpc_error* err; gpr_asprintf(&err_msg, "Path name should not have more than %" PRIuPTR " characters.", GPR_ARRAY_SIZE(un->sun_path) - 1); @@ -49,30 +53,34 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name, gpr_free(err_msg); return err; } - *addrs = - (grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses)); + *addrs = static_cast( + gpr_malloc(sizeof(grpc_resolved_addresses))); (*addrs)->naddrs = 1; - (*addrs)->addrs = - (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address)); - un = (struct sockaddr_un *)(*addrs)->addrs->addr; + (*addrs)->addrs = static_cast( + gpr_malloc(sizeof(grpc_resolved_address))); + un = reinterpret_cast((*addrs)->addrs->addr); un->sun_family = AF_UNIX; - strcpy(un->sun_path, name); - (*addrs)->addrs->len = strlen(un->sun_path) + sizeof(un->sun_family) + 1; + strncpy(un->sun_path, name, sizeof(un->sun_path)); + (*addrs)->addrs->len = + static_cast(strlen(un->sun_path) + sizeof(un->sun_family) + 1); return GRPC_ERROR_NONE; } -int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; +int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); return addr->sa_family == AF_UNIX; } void grpc_unlink_if_unix_domain_socket( - const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; + const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); if (addr->sa_family != AF_UNIX) { return; } - struct sockaddr_un *un = (struct sockaddr_un *)resolved_addr->addr; + struct sockaddr_un* un = reinterpret_cast( + const_cast(resolved_addr->addr)); struct stat st; if (stat(un->sun_path, &st) == 0 && (st.st_mode & S_IFMT) == S_IFSOCK) { @@ -80,15 +88,16 @@ void grpc_unlink_if_unix_domain_socket( } } -char *grpc_sockaddr_to_uri_unix_if_possible( - const grpc_resolved_address *resolved_addr) { - const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; +char* grpc_sockaddr_to_uri_unix_if_possible( + const grpc_resolved_address* resolved_addr) { + const grpc_sockaddr* addr = + reinterpret_cast(resolved_addr->addr); if (addr->sa_family != AF_UNIX) { - return NULL; + return nullptr; } - char *result; - gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un *)addr)->sun_path); + char* result; + gpr_asprintf(&result, "unix:%s", ((struct sockaddr_un*)addr)->sun_path); return result; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.h b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.h index 25b64b3ee..917d0327a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H #define GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H +#include + #include "src/core/lib/iomgr/port.h" #include @@ -27,15 +29,15 @@ void grpc_create_socketpair_if_unix(int sv[2]); -grpc_error *grpc_resolve_unix_domain_address( - const char *name, grpc_resolved_addresses **addresses); +grpc_error* grpc_resolve_unix_domain_address( + const char* name, grpc_resolved_addresses** addresses); -int grpc_is_unix_socket(const grpc_resolved_address *resolved_addr); +int grpc_is_unix_socket(const grpc_resolved_address* resolved_addr); void grpc_unlink_if_unix_domain_socket( - const grpc_resolved_address *resolved_addr); + const grpc_resolved_address* resolved_addr); -char *grpc_sockaddr_to_uri_unix_if_possible( - const grpc_resolved_address *resolved_addr); +char* grpc_sockaddr_to_uri_unix_if_possible( + const grpc_resolved_address* resolved_addr); #endif /* GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.c b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.cc similarity index 79% rename from Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.c rename to Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.cc index e46b1c003..dfab3e0ac 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.c +++ b/Sources/CgRPC/src/core/lib/iomgr/unix_sockets_posix_noop.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/unix_sockets_posix.h" #ifndef GRPC_HAVE_UNIX_SOCKET @@ -29,18 +31,18 @@ void grpc_create_socketpair_if_unix(int sv[2]) { GPR_ASSERT(0); } -grpc_error *grpc_resolve_unix_domain_address( - const char *name, grpc_resolved_addresses **addresses) { +grpc_error* grpc_resolve_unix_domain_address( + const char* name, grpc_resolved_addresses** addresses) { *addresses = NULL; return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Unix domain sockets are not supported on Windows"); } -int grpc_is_unix_socket(const grpc_resolved_address *addr) { return false; } +int grpc_is_unix_socket(const grpc_resolved_address* addr) { return false; } -void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address *addr) {} +void grpc_unlink_if_unix_domain_socket(const grpc_resolved_address* addr) {} -char *grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address *addr) { +char* grpc_sockaddr_to_uri_unix_if_possible(const grpc_resolved_address* addr) { return NULL; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.c b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.cc similarity index 86% rename from Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.c rename to Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.cc index 268e0175d..74faa6379 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.c +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_WAKEUP_FD @@ -28,13 +30,14 @@ #include #include #include -#include #include -#include + +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/thd.h" #define MAX_TABLE_RESIZE 256 -extern cv_fd_table g_cvfds; +extern grpc_cv_fd_table g_cvfds; static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) { unsigned int i, newsize; @@ -42,20 +45,20 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) { gpr_mu_lock(&g_cvfds.mu); if (!g_cvfds.free_fds) { newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE); - g_cvfds.cvfds = - (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize); + g_cvfds.cvfds = static_cast( + gpr_realloc(g_cvfds.cvfds, sizeof(grpc_fd_node) * newsize)); for (i = g_cvfds.size; i < newsize; i++) { g_cvfds.cvfds[i].is_set = 0; - g_cvfds.cvfds[i].cvs = NULL; + g_cvfds.cvfds[i].cvs = nullptr; g_cvfds.cvfds[i].next_free = g_cvfds.free_fds; g_cvfds.free_fds = &g_cvfds.cvfds[i]; } g_cvfds.size = newsize; } - idx = (int)(g_cvfds.free_fds - g_cvfds.cvfds); + idx = static_cast(g_cvfds.free_fds - g_cvfds.cvfds); g_cvfds.free_fds = g_cvfds.free_fds->next_free; - g_cvfds.cvfds[idx].cvs = NULL; + g_cvfds.cvfds[idx].cvs = nullptr; g_cvfds.cvfds[idx].is_set = 0; fd_info->read_fd = GRPC_IDX_TO_FD(idx); fd_info->write_fd = -1; @@ -64,7 +67,7 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) { } static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) { - cv_node* cvn; + grpc_cv_node* cvn; gpr_mu_lock(&g_cvfds.mu); g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1; cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs; diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.h b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.h index dc170ad5b..86365f07e 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.h +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_cv.h @@ -33,6 +33,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H #define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H +#include + #include #include "src/core/lib/iomgr/ev_posix.h" @@ -40,26 +42,28 @@ #define GRPC_FD_TO_IDX(fd) (-(fd)-1) #define GRPC_IDX_TO_FD(idx) (-(idx)-1) -typedef struct cv_node { +typedef struct grpc_cv_node { gpr_cv* cv; - struct cv_node* next; - struct cv_node* prev; -} cv_node; + struct grpc_cv_node* next; + struct grpc_cv_node* prev; +} grpc_cv_node; -typedef struct fd_node { +typedef struct grpc_fd_node { int is_set; - cv_node* cvs; - struct fd_node* next_free; -} fd_node; + grpc_cv_node* cvs; + struct grpc_fd_node* next_free; +} grpc_fd_node; -typedef struct cv_fd_table { +typedef struct grpc_cv_fd_table { gpr_mu mu; gpr_refcount pollcount; gpr_cv shutdown_cv; - fd_node* cvfds; - fd_node* free_fds; + grpc_fd_node* cvfds; + grpc_fd_node* free_fds; unsigned int size; grpc_poll_function_type poll; -} cv_fd_table; +} grpc_cv_fd_table; + +extern const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable; #endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.c b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.c rename to Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.cc index 81cb7ee28..dcf7dab71 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.c +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_eventfd.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_LINUX_EVENTFD @@ -52,15 +54,14 @@ static grpc_error* eventfd_consume(grpc_wakeup_fd* fd_info) { } static grpc_error* eventfd_wakeup(grpc_wakeup_fd* fd_info) { + GPR_TIMER_SCOPE("eventfd_wakeup", 0); int err; - GPR_TIMER_BEGIN("eventfd_wakeup", 0); do { err = eventfd_write(fd_info->read_fd, 1); } while (err < 0 && errno == EINTR); if (err < 0) { return GRPC_OS_ERROR(errno, "eventfd_write"); } - GPR_TIMER_END("eventfd_wakeup", 0); return GRPC_ERROR_NONE; } diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.c b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.cc similarity index 90% rename from Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.c rename to Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.cc index 4c20b8c1b..64778929f 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.c +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_nospecial.cc @@ -21,6 +21,8 @@ * systems without anything better than pipe. */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_NO_SPECIAL_WAKEUP_FD @@ -31,6 +33,6 @@ static int check_availability_invalid(void) { return 0; } const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable = { - NULL, NULL, NULL, NULL, check_availability_invalid}; + nullptr, nullptr, nullptr, nullptr, check_availability_invalid}; #endif /* GRPC_POSIX_NO_SPECIAL_WAKEUP_FD */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.c b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.c rename to Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.cc index 4189488f8..cb173903a 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.c +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.cc @@ -16,10 +16,13 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_WAKEUP_FD +#include "src/core/lib/iomgr/wakeup_fd_pipe.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" #include diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.h b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.h index f860406bd..175697630 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.h +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_pipe.h @@ -19,8 +19,10 @@ #ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_PIPE_H #define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_PIPE_H +#include + #include "src/core/lib/iomgr/wakeup_fd_posix.h" -extern grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable; +extern const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable; #endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_PIPE_H */ diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.c b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.c rename to Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.cc index 25daa7d3f..b5b8b37a9 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.c +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/iomgr/port.h" #ifdef GRPC_POSIX_WAKEUP_FD @@ -25,8 +27,7 @@ #include "src/core/lib/iomgr/wakeup_fd_pipe.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h" -extern grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable; -static const grpc_wakeup_fd_vtable *wakeup_fd_vtable = NULL; +static const grpc_wakeup_fd_vtable* wakeup_fd_vtable = nullptr; int grpc_allow_specialized_wakeup_fd = 1; int grpc_allow_pipe_wakeup_fd = 1; @@ -46,7 +47,7 @@ void grpc_wakeup_fd_global_init(void) { } } -void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = NULL; } +void grpc_wakeup_fd_global_destroy(void) { wakeup_fd_vtable = nullptr; } int grpc_has_wakeup_fd(void) { return has_real_wakeup_fd; } @@ -54,28 +55,28 @@ int grpc_cv_wakeup_fds_enabled(void) { return cv_wakeup_fds_enabled; } void grpc_enable_cv_wakeup_fds(int enable) { cv_wakeup_fds_enabled = enable; } -grpc_error *grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) { +grpc_error* grpc_wakeup_fd_init(grpc_wakeup_fd* fd_info) { if (cv_wakeup_fds_enabled) { return grpc_cv_wakeup_fd_vtable.init(fd_info); } return wakeup_fd_vtable->init(fd_info); } -grpc_error *grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) { +grpc_error* grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd* fd_info) { if (cv_wakeup_fds_enabled) { return grpc_cv_wakeup_fd_vtable.consume(fd_info); } return wakeup_fd_vtable->consume(fd_info); } -grpc_error *grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) { +grpc_error* grpc_wakeup_fd_wakeup(grpc_wakeup_fd* fd_info) { if (cv_wakeup_fds_enabled) { return grpc_cv_wakeup_fd_vtable.wakeup(fd_info); } return wakeup_fd_vtable->wakeup(fd_info); } -void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) { +void grpc_wakeup_fd_destroy(grpc_wakeup_fd* fd_info) { if (cv_wakeup_fds_enabled) { grpc_cv_wakeup_fd_vtable.destroy(fd_info); } else { diff --git a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.h b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.h index a9584d0d4..670c31959 100644 --- a/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.h +++ b/Sources/CgRPC/src/core/lib/iomgr/wakeup_fd_posix.h @@ -47,6 +47,8 @@ #ifndef GRPC_CORE_LIB_IOMGR_WAKEUP_FD_POSIX_H #define GRPC_CORE_LIB_IOMGR_WAKEUP_FD_POSIX_H +#include + #include "src/core/lib/iomgr/error.h" void grpc_wakeup_fd_global_init(void); diff --git a/Sources/CgRPC/src/core/lib/json/json.c b/Sources/CgRPC/src/core/lib/json/json.c deleted file mode 100644 index 4ad51f662..000000000 --- a/Sources/CgRPC/src/core/lib/json/json.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include - -#include "src/core/lib/json/json.h" - -grpc_json* grpc_json_create(grpc_json_type type) { - grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json)); - json->type = type; - - return json; -} - -void grpc_json_destroy(grpc_json* json) { - while (json->child) { - grpc_json_destroy(json->child); - } - - if (json->next) { - json->next->prev = json->prev; - } - - if (json->prev) { - json->prev->next = json->next; - } else if (json->parent) { - json->parent->child = json->next; - } - - gpr_free(json); -} diff --git a/Sources/CgRPC/src/core/lib/json/json.cc b/Sources/CgRPC/src/core/lib/json/json.cc new file mode 100644 index 000000000..816241bbf --- /dev/null +++ b/Sources/CgRPC/src/core/lib/json/json.cc @@ -0,0 +1,86 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include +#include + +#include "src/core/lib/json/json.h" + +grpc_json* grpc_json_create(grpc_json_type type) { + grpc_json* json = static_cast(gpr_zalloc(sizeof(*json))); + json->type = type; + + return json; +} + +void grpc_json_destroy(grpc_json* json) { + while (json->child) { + grpc_json_destroy(json->child); + } + + if (json->next) { + json->next->prev = json->prev; + } + + if (json->prev) { + json->prev->next = json->next; + } else if (json->parent) { + json->parent->child = json->next; + } + + if (json->owns_value) { + gpr_free((void*)json->value); + } + + gpr_free(json); +} + +grpc_json* grpc_json_link_child(grpc_json* parent, grpc_json* child, + grpc_json* sibling) { + // first child case. + if (parent->child == nullptr) { + GPR_ASSERT(sibling == nullptr); + parent->child = child; + return child; + } + if (sibling == nullptr) { + sibling = parent->child; + } + // always find the right most sibling. + while (sibling->next != nullptr) { + sibling = sibling->next; + } + sibling->next = child; + return child; +} + +grpc_json* grpc_json_create_child(grpc_json* sibling, grpc_json* parent, + const char* key, const char* value, + grpc_json_type type, bool owns_value) { + grpc_json* child = grpc_json_create(type); + grpc_json_link_child(parent, child, sibling); + child->owns_value = owns_value; + child->parent = parent; + child->value = value; + child->key = key; + return child; +} diff --git a/Sources/CgRPC/src/core/lib/json/json.h b/Sources/CgRPC/src/core/lib/json/json.h index bbd43025e..f93b43048 100644 --- a/Sources/CgRPC/src/core/lib/json/json.h +++ b/Sources/CgRPC/src/core/lib/json/json.h @@ -19,6 +19,9 @@ #ifndef GRPC_CORE_LIB_JSON_JSON_H #define GRPC_CORE_LIB_JSON_JSON_H +#include + +#include #include #include "src/core/lib/json/json_common.h" @@ -35,6 +38,9 @@ typedef struct grpc_json { grpc_json_type type; const char* key; const char* value; + + /* if set, destructor will free value */ + bool owns_value; } grpc_json; /* The next two functions are going to parse the input string, and @@ -65,9 +71,24 @@ char* grpc_json_dump_to_string(grpc_json* json, int indent); /* Use these to create or delete a grpc_json object. * Deletion is recursive. We will not attempt to free any of the strings - * in any of the objects of that tree. + * in any of the objects of that tree, unless the boolean, owns_value, + * is true. */ grpc_json* grpc_json_create(grpc_json_type type); void grpc_json_destroy(grpc_json* json); +/* Links the child json object into the parent's json tree. If the parent + * already has children, then passing in the most recently added child as the + * sibling parameter is an optimization. For if sibling is NULL, this function + * will manually traverse the tree in order to find the right most sibling. + */ +grpc_json* grpc_json_link_child(grpc_json* parent, grpc_json* child, + grpc_json* sibling); + +/* Creates a child json object into the parent's json tree then links it in + * as described above. */ +grpc_json* grpc_json_create_child(grpc_json* sibling, grpc_json* parent, + const char* key, const char* value, + grpc_json_type type, bool owns_value); + #endif /* GRPC_CORE_LIB_JSON_JSON_H */ diff --git a/Sources/CgRPC/src/core/lib/json/json_reader.c b/Sources/CgRPC/src/core/lib/json/json_reader.cc similarity index 93% rename from Sources/CgRPC/src/core/lib/json/json_reader.c rename to Sources/CgRPC/src/core/lib/json/json_reader.cc index 094a35176..819572e4d 100644 --- a/Sources/CgRPC/src/core/lib/json/json_reader.c +++ b/Sources/CgRPC/src/core/lib/json/json_reader.cc @@ -16,68 +16,68 @@ * */ -#include - #include +#include + #include #include "src/core/lib/json/json_reader.h" -static void json_reader_string_clear(grpc_json_reader *reader) { +static void json_reader_string_clear(grpc_json_reader* reader) { reader->vtable->string_clear(reader->userdata); } -static void json_reader_string_add_char(grpc_json_reader *reader, uint32_t c) { +static void json_reader_string_add_char(grpc_json_reader* reader, uint32_t c) { reader->vtable->string_add_char(reader->userdata, c); } -static void json_reader_string_add_utf32(grpc_json_reader *reader, +static void json_reader_string_add_utf32(grpc_json_reader* reader, uint32_t utf32) { reader->vtable->string_add_utf32(reader->userdata, utf32); } -static uint32_t grpc_json_reader_read_char(grpc_json_reader *reader) { +static uint32_t grpc_json_reader_read_char(grpc_json_reader* reader) { return reader->vtable->read_char(reader->userdata); } -static void json_reader_container_begins(grpc_json_reader *reader, +static void json_reader_container_begins(grpc_json_reader* reader, grpc_json_type type) { reader->vtable->container_begins(reader->userdata, type); } static grpc_json_type grpc_json_reader_container_ends( - grpc_json_reader *reader) { + grpc_json_reader* reader) { return reader->vtable->container_ends(reader->userdata); } -static void json_reader_set_key(grpc_json_reader *reader) { +static void json_reader_set_key(grpc_json_reader* reader) { reader->vtable->set_key(reader->userdata); } -static void json_reader_set_string(grpc_json_reader *reader) { +static void json_reader_set_string(grpc_json_reader* reader) { reader->vtable->set_string(reader->userdata); } -static int json_reader_set_number(grpc_json_reader *reader) { +static int json_reader_set_number(grpc_json_reader* reader) { return reader->vtable->set_number(reader->userdata); } -static void json_reader_set_true(grpc_json_reader *reader) { +static void json_reader_set_true(grpc_json_reader* reader) { reader->vtable->set_true(reader->userdata); } -static void json_reader_set_false(grpc_json_reader *reader) { +static void json_reader_set_false(grpc_json_reader* reader) { reader->vtable->set_false(reader->userdata); } -static void json_reader_set_null(grpc_json_reader *reader) { +static void json_reader_set_null(grpc_json_reader* reader) { reader->vtable->set_null(reader->userdata); } /* Call this function to initialize the reader structure. */ -void grpc_json_reader_init(grpc_json_reader *reader, - grpc_json_reader_vtable *vtable, void *userdata) { +void grpc_json_reader_init(grpc_json_reader* reader, + grpc_json_reader_vtable* vtable, void* userdata) { memset(reader, 0, sizeof(*reader)); reader->vtable = vtable; reader->userdata = userdata; @@ -85,13 +85,13 @@ void grpc_json_reader_init(grpc_json_reader *reader, reader->state = GRPC_JSON_STATE_VALUE_BEGIN; } -int grpc_json_reader_is_complete(grpc_json_reader *reader) { +int grpc_json_reader_is_complete(grpc_json_reader* reader) { return ((reader->depth == 0) && ((reader->state == GRPC_JSON_STATE_END) || (reader->state == GRPC_JSON_STATE_VALUE_END))); } -grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) { +grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader) { uint32_t c, success; /* This state-machine is a strict implementation of ECMA-404 */ @@ -138,7 +138,7 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) { case GRPC_JSON_STATE_VALUE_NUMBER_WITH_DECIMAL: case GRPC_JSON_STATE_VALUE_NUMBER_ZERO: case GRPC_JSON_STATE_VALUE_NUMBER_EPM: - success = (uint32_t)json_reader_set_number(reader); + success = static_cast(json_reader_set_number(reader)); if (!success) return GRPC_JSON_PARSE_ERROR; json_reader_string_clear(reader); reader->state = GRPC_JSON_STATE_VALUE_END; @@ -173,12 +173,12 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) { } else if ((c == ']') && !reader->in_array) { return GRPC_JSON_PARSE_ERROR; } - success = (uint32_t)json_reader_set_number(reader); + success = static_cast(json_reader_set_number(reader)); if (!success) return GRPC_JSON_PARSE_ERROR; json_reader_string_clear(reader); reader->state = GRPC_JSON_STATE_VALUE_END; - /* The missing break here is intentional. */ - /* fallthrough */ + /* The missing break here is intentional. */ + /* fallthrough */ case GRPC_JSON_STATE_VALUE_END: case GRPC_JSON_STATE_OBJECT_KEY_BEGIN: @@ -417,8 +417,10 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) { } else { return GRPC_JSON_PARSE_ERROR; } - reader->unicode_char = (uint16_t)(reader->unicode_char << 4); - reader->unicode_char = (uint16_t)(reader->unicode_char | c); + reader->unicode_char = + static_cast(reader->unicode_char << 4); + reader->unicode_char = + static_cast(reader->unicode_char | c); switch (reader->state) { case GRPC_JSON_STATE_STRING_ESCAPE_U1: @@ -445,9 +447,9 @@ grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader) { if (reader->unicode_high_surrogate == 0) return GRPC_JSON_PARSE_ERROR; utf32 = 0x10000; - utf32 += (uint32_t)( + utf32 += static_cast( (reader->unicode_high_surrogate - 0xd800) * 0x400); - utf32 += (uint32_t)(reader->unicode_char - 0xdc00); + utf32 += static_cast(reader->unicode_char - 0xdc00); json_reader_string_add_utf32(reader, utf32); reader->unicode_high_surrogate = 0; } else { diff --git a/Sources/CgRPC/src/core/lib/json/json_reader.h b/Sources/CgRPC/src/core/lib/json/json_reader.h index 577fbbbaf..78f7ad9f3 100644 --- a/Sources/CgRPC/src/core/lib/json/json_reader.h +++ b/Sources/CgRPC/src/core/lib/json/json_reader.h @@ -20,6 +20,7 @@ #define GRPC_CORE_LIB_JSON_JSON_READER_H #include + #include "src/core/lib/json/json_common.h" typedef enum { @@ -67,27 +68,27 @@ struct grpc_json_reader; typedef struct grpc_json_reader_vtable { /* Clears your internal string scratchpad. */ - void (*string_clear)(void *userdata); + void (*string_clear)(void* userdata); /* Adds a char to the string scratchpad. */ - void (*string_add_char)(void *userdata, uint32_t c); + void (*string_add_char)(void* userdata, uint32_t c); /* Adds a utf32 char to the string scratchpad. */ - void (*string_add_utf32)(void *userdata, uint32_t c); + void (*string_add_utf32)(void* userdata, uint32_t c); /* Reads a character from your input. May be utf-8, 16 or 32. */ - uint32_t (*read_char)(void *userdata); + uint32_t (*read_char)(void* userdata); /* Starts a container of type GRPC_JSON_ARRAY or GRPC_JSON_OBJECT. */ - void (*container_begins)(void *userdata, grpc_json_type type); + void (*container_begins)(void* userdata, grpc_json_type type); /* Ends the current container. Must return the type of its parent. */ - grpc_json_type (*container_ends)(void *userdata); + grpc_json_type (*container_ends)(void* userdata); /* Your internal string scratchpad is an object's key. */ - void (*set_key)(void *userdata); + void (*set_key)(void* userdata); /* Your internal string scratchpad is a string value. */ - void (*set_string)(void *userdata); + void (*set_string)(void* userdata); /* Your internal string scratchpad is a numerical value. Return 1 if valid. */ - int (*set_number)(void *userdata); + int (*set_number)(void* userdata); /* Sets the values true, false or null. */ - void (*set_true)(void *userdata); - void (*set_false)(void *userdata); - void (*set_null)(void *userdata); + void (*set_true)(void* userdata); + void (*set_false)(void* userdata); + void (*set_null)(void* userdata); } grpc_json_reader_vtable; typedef struct grpc_json_reader { @@ -95,8 +96,8 @@ typedef struct grpc_json_reader { * The definition is public so you can put it on your stack. */ - void *userdata; - grpc_json_reader_vtable *vtable; + void* userdata; + grpc_json_reader_vtable* vtable; int depth; int in_object; int in_array; @@ -129,17 +130,17 @@ typedef enum { * . GRPC_JSON_INTERNAL_ERROR if the parser somehow ended into an invalid * internal state. */ -grpc_json_reader_status grpc_json_reader_run(grpc_json_reader *reader); +grpc_json_reader_status grpc_json_reader_run(grpc_json_reader* reader); /* Call this function to initialize the reader structure. */ -void grpc_json_reader_init(grpc_json_reader *reader, - grpc_json_reader_vtable *vtable, void *userdata); +void grpc_json_reader_init(grpc_json_reader* reader, + grpc_json_reader_vtable* vtable, void* userdata); /* You may call this from the read_char callback if you don't know where is the * end of your input stream, and you'd like the json reader to hint you that it * has completed reading its input, so you can return an EOF to it. Note that * there might still be trailing whitespaces after that point. */ -int grpc_json_reader_is_complete(grpc_json_reader *reader); +int grpc_json_reader_is_complete(grpc_json_reader* reader); #endif /* GRPC_CORE_LIB_JSON_JSON_READER_H */ diff --git a/Sources/CgRPC/src/core/lib/json/json_string.c b/Sources/CgRPC/src/core/lib/json/json_string.cc similarity index 73% rename from Sources/CgRPC/src/core/lib/json/json_string.c rename to Sources/CgRPC/src/core/lib/json/json_string.cc index 3178d2d2b..4f9175b9e 100644 --- a/Sources/CgRPC/src/core/lib/json/json_string.c +++ b/Sources/CgRPC/src/core/lib/json/json_string.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -38,13 +40,13 @@ * input size, and never expands it. */ typedef struct { - grpc_json *top; - grpc_json *current_container; - grpc_json *current_value; - uint8_t *input; - uint8_t *key; - uint8_t *string; - uint8_t *string_ptr; + grpc_json* top; + grpc_json* current_container; + grpc_json* current_value; + uint8_t* input; + uint8_t* key; + uint8_t* string; + uint8_t* string_ptr; size_t remaining_input; } json_reader_userdata; @@ -52,7 +54,7 @@ typedef struct { * The point is that we allocate that string in chunks of 256 bytes. */ typedef struct { - char *output; + char* output; size_t free_space; size_t string_len; size_t allocated; @@ -62,35 +64,36 @@ typedef struct { * and will enlarge it if necessary. We're only allocating chunks of 256 * bytes at a time (or multiples thereof). */ -static void json_writer_output_check(void *userdata, size_t needed) { - json_writer_userdata *state = (json_writer_userdata *)userdata; +static void json_writer_output_check(void* userdata, size_t needed) { + json_writer_userdata* state = static_cast(userdata); if (state->free_space >= needed) return; needed -= state->free_space; /* Round up by 256 bytes. */ needed = (needed + 0xff) & ~0xffU; - state->output = (char *)gpr_realloc(state->output, state->allocated + needed); + state->output = + static_cast(gpr_realloc(state->output, state->allocated + needed)); state->free_space += needed; state->allocated += needed; } /* These are needed by the writer's implementation. */ -static void json_writer_output_char(void *userdata, char c) { - json_writer_userdata *state = (json_writer_userdata *)userdata; +static void json_writer_output_char(void* userdata, char c) { + json_writer_userdata* state = static_cast(userdata); json_writer_output_check(userdata, 1); state->output[state->string_len++] = c; state->free_space--; } -static void json_writer_output_string_with_len(void *userdata, const char *str, +static void json_writer_output_string_with_len(void* userdata, const char* str, size_t len) { - json_writer_userdata *state = (json_writer_userdata *)userdata; + json_writer_userdata* state = static_cast(userdata); json_writer_output_check(userdata, len); memcpy(state->output + state->string_len, str, len); state->string_len += len; state->free_space -= len; } -static void json_writer_output_string(void *userdata, const char *str) { +static void json_writer_output_string(void* userdata, const char* str) { size_t len = strlen(str); json_writer_output_string_with_len(userdata, str, len); } @@ -98,8 +101,8 @@ static void json_writer_output_string(void *userdata, const char *str) { /* The reader asks us to clear our scratchpad. In our case, we'll simply mark * the end of the current string, and advance our output pointer. */ -static void json_reader_string_clear(void *userdata) { - json_reader_userdata *state = (json_reader_userdata *)userdata; +static void json_reader_string_clear(void* userdata) { + json_reader_userdata* state = static_cast(userdata); if (state->string) { GPR_ASSERT(state->string_ptr < state->input); *state->string_ptr++ = 0; @@ -107,17 +110,17 @@ static void json_reader_string_clear(void *userdata) { state->string = state->string_ptr; } -static void json_reader_string_add_char(void *userdata, uint32_t c) { - json_reader_userdata *state = (json_reader_userdata *)userdata; +static void json_reader_string_add_char(void* userdata, uint32_t c) { + json_reader_userdata* state = static_cast(userdata); GPR_ASSERT(state->string_ptr < state->input); GPR_ASSERT(c <= 0xff); - *state->string_ptr++ = (uint8_t)c; + *state->string_ptr++ = static_cast(c); } /* We are converting a UTF-32 character into UTF-8 here, * as described by RFC3629. */ -static void json_reader_string_add_utf32(void *userdata, uint32_t c) { +static void json_reader_string_add_utf32(void* userdata, uint32_t c) { if (c <= 0x7f) { json_reader_string_add_char(userdata, c); } else if (c <= 0x7ff) { @@ -147,9 +150,9 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) { /* We consider that the input may be a zero-terminated string. So we * can end up hitting eof before the end of the alleged string length. */ -static uint32_t json_reader_read_char(void *userdata) { +static uint32_t json_reader_read_char(void* userdata) { uint32_t r; - json_reader_userdata *state = (json_reader_userdata *)userdata; + json_reader_userdata* state = static_cast(userdata); if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF; @@ -167,9 +170,9 @@ static uint32_t json_reader_read_char(void *userdata) { /* Helper function to create a new grpc_json object and link it into * our tree-in-progress inside our opaque structure. */ -static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) { - json_reader_userdata *state = (json_reader_userdata *)userdata; - grpc_json *json = grpc_json_create(type); +static grpc_json* json_create_and_link(void* userdata, grpc_json_type type) { + json_reader_userdata* state = static_cast(userdata); + grpc_json* json = grpc_json_create(type); json->parent = state->current_container; json->prev = state->current_value; @@ -183,7 +186,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) { json->parent->child = json; } if (json->parent->type == GRPC_JSON_OBJECT) { - json->key = (char *)state->key; + json->key = reinterpret_cast(state->key); } } if (!state->top) { @@ -193,15 +196,15 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) { return json; } -static void json_reader_container_begins(void *userdata, grpc_json_type type) { - json_reader_userdata *state = (json_reader_userdata *)userdata; - grpc_json *container; +static void json_reader_container_begins(void* userdata, grpc_json_type type) { + json_reader_userdata* state = static_cast(userdata); + grpc_json* container; GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT); container = json_create_and_link(userdata, type); state->current_container = container; - state->current_value = NULL; + state->current_value = nullptr; } /* It's important to remember that the reader is mostly stateless, so it @@ -213,9 +216,9 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) { * Also note that if we're at the top of the tree, and the last container * ends, we have to return GRPC_JSON_TOP_LEVEL. */ -static grpc_json_type json_reader_container_ends(void *userdata) { +static grpc_json_type json_reader_container_ends(void* userdata) { grpc_json_type container_type = GRPC_JSON_TOP_LEVEL; - json_reader_userdata *state = (json_reader_userdata *)userdata; + json_reader_userdata* state = static_cast(userdata); GPR_ASSERT(state->current_container); @@ -235,36 +238,36 @@ static grpc_json_type json_reader_container_ends(void *userdata) { * Note that in the set_number case, we're not going to try interpreting it. * We'll keep it as a string, and leave it to the caller to evaluate it. */ -static void json_reader_set_key(void *userdata) { - json_reader_userdata *state = (json_reader_userdata *)userdata; +static void json_reader_set_key(void* userdata) { + json_reader_userdata* state = static_cast(userdata); state->key = state->string; } -static void json_reader_set_string(void *userdata) { - json_reader_userdata *state = (json_reader_userdata *)userdata; - grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING); - json->value = (char *)state->string; +static void json_reader_set_string(void* userdata) { + json_reader_userdata* state = static_cast(userdata); + grpc_json* json = json_create_and_link(userdata, GRPC_JSON_STRING); + json->value = reinterpret_cast(state->string); } -static int json_reader_set_number(void *userdata) { - json_reader_userdata *state = (json_reader_userdata *)userdata; - grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER); - json->value = (char *)state->string; +static int json_reader_set_number(void* userdata) { + json_reader_userdata* state = static_cast(userdata); + grpc_json* json = json_create_and_link(userdata, GRPC_JSON_NUMBER); + json->value = reinterpret_cast(state->string); return 1; } /* The object types true, false and null are self-sufficient, and don't need * any more information beside their type. */ -static void json_reader_set_true(void *userdata) { +static void json_reader_set_true(void* userdata) { json_create_and_link(userdata, GRPC_JSON_TRUE); } -static void json_reader_set_false(void *userdata) { +static void json_reader_set_false(void* userdata) { json_create_and_link(userdata, GRPC_JSON_FALSE); } -static void json_reader_set_null(void *userdata) { +static void json_reader_set_null(void* userdata) { json_create_and_link(userdata, GRPC_JSON_NULL); } @@ -277,17 +280,17 @@ static grpc_json_reader_vtable reader_vtable = { json_reader_set_false, json_reader_set_null}; /* And finally, let's define our public API. */ -grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) { +grpc_json* grpc_json_parse_string_with_len(char* input, size_t size) { grpc_json_reader reader; json_reader_userdata state; - grpc_json *json = NULL; + grpc_json* json = nullptr; grpc_json_reader_status status; - if (!input) return NULL; + if (!input) return nullptr; - state.top = state.current_container = state.current_value = NULL; - state.string = state.key = NULL; - state.string_ptr = state.input = (uint8_t *)input; + state.top = state.current_container = state.current_value = nullptr; + state.string = state.key = nullptr; + state.string_ptr = state.input = reinterpret_cast(input); state.remaining_input = size; grpc_json_reader_init(&reader, &reader_vtable, &state); @@ -296,7 +299,7 @@ grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) { if ((status != GRPC_JSON_DONE) && json) { grpc_json_destroy(json); - json = NULL; + json = nullptr; } return json; @@ -304,11 +307,11 @@ grpc_json *grpc_json_parse_string_with_len(char *input, size_t size) { #define UNBOUND_JSON_STRING_LENGTH 0x7fffffff -grpc_json *grpc_json_parse_string(char *input) { +grpc_json* grpc_json_parse_string(char* input) { return grpc_json_parse_string_with_len(input, UNBOUND_JSON_STRING_LENGTH); } -static void json_dump_recursive(grpc_json_writer *writer, grpc_json *json, +static void json_dump_recursive(grpc_json_writer* writer, grpc_json* json, int in_object) { while (json) { if (in_object) grpc_json_writer_object_key(writer, json->key); @@ -348,11 +351,11 @@ static grpc_json_writer_vtable writer_vtable = { json_writer_output_char, json_writer_output_string, json_writer_output_string_with_len}; -char *grpc_json_dump_to_string(grpc_json *json, int indent) { +char* grpc_json_dump_to_string(grpc_json* json, int indent) { grpc_json_writer writer; json_writer_userdata state; - state.output = NULL; + state.output = nullptr; state.free_space = state.string_len = state.allocated = 0; grpc_json_writer_init(&writer, indent, &writer_vtable, &state); diff --git a/Sources/CgRPC/src/core/lib/json/json_writer.c b/Sources/CgRPC/src/core/lib/json/json_writer.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/json/json_writer.c rename to Sources/CgRPC/src/core/lib/json/json_writer.cc index eab1bff7a..7bbdccc7a 100644 --- a/Sources/CgRPC/src/core/lib/json/json_writer.c +++ b/Sources/CgRPC/src/core/lib/json/json_writer.cc @@ -16,28 +16,28 @@ * */ -#include - #include +#include + #include "src/core/lib/json/json_writer.h" -static void json_writer_output_char(grpc_json_writer *writer, char c) { +static void json_writer_output_char(grpc_json_writer* writer, char c) { writer->vtable->output_char(writer->userdata, c); } -static void json_writer_output_string(grpc_json_writer *writer, - const char *str) { +static void json_writer_output_string(grpc_json_writer* writer, + const char* str) { writer->vtable->output_string(writer->userdata, str); } -static void json_writer_output_string_with_len(grpc_json_writer *writer, - const char *str, size_t len) { +static void json_writer_output_string_with_len(grpc_json_writer* writer, + const char* str, size_t len) { writer->vtable->output_string_with_len(writer->userdata, str, len); } -void grpc_json_writer_init(grpc_json_writer *writer, int indent, - grpc_json_writer_vtable *vtable, void *userdata) { +void grpc_json_writer_init(grpc_json_writer* writer, int indent, + grpc_json_writer_vtable* vtable, void* userdata) { memset(writer, 0, sizeof(*writer)); writer->container_empty = 1; writer->indent = indent; @@ -45,14 +45,14 @@ void grpc_json_writer_init(grpc_json_writer *writer, int indent, writer->userdata = userdata; } -static void json_writer_output_indent(grpc_json_writer *writer) { +static void json_writer_output_indent(grpc_json_writer* writer) { static const char spacesstr[] = " " " " " " " "; - unsigned spaces = (unsigned)(writer->depth * writer->indent); + unsigned spaces = static_cast(writer->depth * writer->indent); if (writer->indent == 0) return; @@ -64,7 +64,7 @@ static void json_writer_output_indent(grpc_json_writer *writer) { while (spaces >= (sizeof(spacesstr) - 1)) { json_writer_output_string_with_len(writer, spacesstr, sizeof(spacesstr) - 1); - spaces -= (unsigned)(sizeof(spacesstr) - 1); + spaces -= static_cast(sizeof(spacesstr) - 1); } if (spaces == 0) return; @@ -73,7 +73,7 @@ static void json_writer_output_indent(grpc_json_writer *writer) { writer, spacesstr + sizeof(spacesstr) - 1 - spaces, spaces); } -static void json_writer_value_end(grpc_json_writer *writer) { +static void json_writer_value_end(grpc_json_writer* writer) { if (writer->container_empty) { writer->container_empty = 0; if ((writer->indent == 0) || (writer->depth == 0)) return; @@ -85,7 +85,7 @@ static void json_writer_value_end(grpc_json_writer *writer) { } } -static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) { +static void json_writer_escape_utf16(grpc_json_writer* writer, uint16_t utf16) { static const char hex[] = "0123456789abcdef"; json_writer_output_string_with_len(writer, "\\u", 2); @@ -95,17 +95,17 @@ static void json_writer_escape_utf16(grpc_json_writer *writer, uint16_t utf16) { json_writer_output_char(writer, hex[(utf16)&0x0f]); } -static void json_writer_escape_string(grpc_json_writer *writer, - const char *string) { +static void json_writer_escape_string(grpc_json_writer* writer, + const char* string) { json_writer_output_char(writer, '"'); for (;;) { - uint8_t c = (uint8_t)*string++; + uint8_t c = static_cast(*string++); if (c == 0) { break; } else if ((c >= 32) && (c <= 126)) { if ((c == '\\') || (c == '"')) json_writer_output_char(writer, '\\'); - json_writer_output_char(writer, (char)c); + json_writer_output_char(writer, static_cast(c)); } else if ((c < 32) || (c == 127)) { switch (c) { case '\b': @@ -146,7 +146,7 @@ static void json_writer_escape_string(grpc_json_writer *writer, } for (i = 0; i < extra; i++) { utf32 <<= 6; - c = (uint8_t)(*string++); + c = static_cast(*string++); /* Breaks out and bail on any invalid UTF-8 sequence, including \0. */ if ((c & 0xc0) != 0x80) { valid = 0; @@ -179,10 +179,12 @@ static void json_writer_escape_string(grpc_json_writer *writer, * That range is exactly 20 bits. */ utf32 -= 0x10000; - json_writer_escape_utf16(writer, (uint16_t)(0xd800 | (utf32 >> 10))); - json_writer_escape_utf16(writer, (uint16_t)(0xdc00 | (utf32 & 0x3ff))); + json_writer_escape_utf16(writer, + static_cast(0xd800 | (utf32 >> 10))); + json_writer_escape_utf16( + writer, static_cast(0xdc00 | (utf32 & 0x3ff))); } else { - json_writer_escape_utf16(writer, (uint16_t)utf32); + json_writer_escape_utf16(writer, static_cast(utf32)); } } } @@ -190,7 +192,7 @@ static void json_writer_escape_string(grpc_json_writer *writer, json_writer_output_char(writer, '"'); } -void grpc_json_writer_container_begins(grpc_json_writer *writer, +void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type) { if (!writer->got_key) json_writer_value_end(writer); json_writer_output_indent(writer); @@ -200,7 +202,7 @@ void grpc_json_writer_container_begins(grpc_json_writer *writer, writer->depth++; } -void grpc_json_writer_container_ends(grpc_json_writer *writer, +void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type) { if (writer->indent && !writer->container_empty) json_writer_output_char(writer, '\n'); @@ -211,7 +213,7 @@ void grpc_json_writer_container_ends(grpc_json_writer *writer, writer->got_key = 0; } -void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) { +void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string) { json_writer_value_end(writer); json_writer_output_indent(writer); json_writer_escape_string(writer, string); @@ -219,23 +221,23 @@ void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string) { writer->got_key = 1; } -void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string) { +void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string) { if (!writer->got_key) json_writer_value_end(writer); json_writer_output_indent(writer); json_writer_output_string(writer, string); writer->got_key = 0; } -void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer, - const char *string, size_t len) { +void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, + const char* string, size_t len) { if (!writer->got_key) json_writer_value_end(writer); json_writer_output_indent(writer); json_writer_output_string_with_len(writer, string, len); writer->got_key = 0; } -void grpc_json_writer_value_string(grpc_json_writer *writer, - const char *string) { +void grpc_json_writer_value_string(grpc_json_writer* writer, + const char* string) { if (!writer->got_key) json_writer_value_end(writer); json_writer_output_indent(writer); json_writer_escape_string(writer, string); diff --git a/Sources/CgRPC/src/core/lib/json/json_writer.h b/Sources/CgRPC/src/core/lib/json/json_writer.h index 8779039d4..ba0bedde7 100644 --- a/Sources/CgRPC/src/core/lib/json/json_writer.h +++ b/Sources/CgRPC/src/core/lib/json/json_writer.h @@ -31,23 +31,25 @@ #ifndef GRPC_CORE_LIB_JSON_JSON_WRITER_H #define GRPC_CORE_LIB_JSON_JSON_WRITER_H +#include + #include #include "src/core/lib/json/json_common.h" typedef struct grpc_json_writer_vtable { /* Adds a character to the output stream. */ - void (*output_char)(void *userdata, char); + void (*output_char)(void* userdata, char); /* Adds a zero-terminated string to the output stream. */ - void (*output_string)(void *userdata, const char *str); + void (*output_string)(void* userdata, const char* str); /* Adds a fixed-length string to the output stream. */ - void (*output_string_with_len)(void *userdata, const char *str, size_t len); + void (*output_string_with_len)(void* userdata, const char* str, size_t len); } grpc_json_writer_vtable; typedef struct grpc_json_writer { - void *userdata; - grpc_json_writer_vtable *vtable; + void* userdata; + grpc_json_writer_vtable* vtable; int indent; int depth; int container_empty; @@ -59,24 +61,24 @@ typedef struct grpc_json_writer { * use indent=0, then the output will not have any newlines either, thus * emitting a condensed json output. */ -void grpc_json_writer_init(grpc_json_writer *writer, int indent, - grpc_json_writer_vtable *vtable, void *userdata); +void grpc_json_writer_init(grpc_json_writer* writer, int indent, + grpc_json_writer_vtable* vtable, void* userdata); /* Signals the beginning of a container. */ -void grpc_json_writer_container_begins(grpc_json_writer *writer, +void grpc_json_writer_container_begins(grpc_json_writer* writer, grpc_json_type type); /* Signals the end of a container. */ -void grpc_json_writer_container_ends(grpc_json_writer *writer, +void grpc_json_writer_container_ends(grpc_json_writer* writer, grpc_json_type type); /* Writes down an object key for the next value. */ -void grpc_json_writer_object_key(grpc_json_writer *writer, const char *string); +void grpc_json_writer_object_key(grpc_json_writer* writer, const char* string); /* Sets a raw value. Useful for numbers. */ -void grpc_json_writer_value_raw(grpc_json_writer *writer, const char *string); +void grpc_json_writer_value_raw(grpc_json_writer* writer, const char* string); /* Sets a raw value with its length. Useful for values like true or false. */ -void grpc_json_writer_value_raw_with_len(grpc_json_writer *writer, - const char *string, size_t len); +void grpc_json_writer_value_raw_with_len(grpc_json_writer* writer, + const char* string, size_t len); /* Sets a string value. It'll be escaped, and utf-8 validated. */ -void grpc_json_writer_value_string(grpc_json_writer *writer, - const char *string); +void grpc_json_writer_value_string(grpc_json_writer* writer, + const char* string); #endif /* GRPC_CORE_LIB_JSON_JSON_WRITER_H */ diff --git a/Sources/CgRPC/src/core/lib/profiling/basic_timers.c b/Sources/CgRPC/src/core/lib/profiling/basic_timers.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/profiling/basic_timers.c rename to Sources/CgRPC/src/core/lib/profiling/basic_timers.cc index c7645b74f..b19ad9fc2 100644 --- a/Sources/CgRPC/src/core/lib/profiling/basic_timers.c +++ b/Sources/CgRPC/src/core/lib/profiling/basic_timers.cc @@ -18,26 +18,27 @@ #include -#ifdef GRPC_BASIC_PROFILER - #include "src/core/lib/profiling/timers.h" +#ifdef GRPC_BASIC_PROFILER + #include #include #include -#include #include +#include +#include #include #include -#include "src/core/lib/support/env.h" +#include "src/core/lib/gpr/env.h" typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type; typedef struct gpr_timer_entry { gpr_timespec tm; - const char *tagstr; - const char *file; + const char* tagstr; + const char* file; short line; char type; uint8_t important; @@ -48,32 +49,32 @@ typedef struct gpr_timer_entry { typedef struct gpr_timer_log { size_t num_entries; - struct gpr_timer_log *next; - struct gpr_timer_log *prev; + struct gpr_timer_log* next; + struct gpr_timer_log* prev; gpr_timer_entry log[MAX_COUNT]; } gpr_timer_log; typedef struct gpr_timer_log_list { - gpr_timer_log *head; + gpr_timer_log* head; /* valid iff head!=NULL */ - gpr_timer_log *tail; + gpr_timer_log* tail; } gpr_timer_log_list; -static __thread gpr_timer_log *g_thread_log; +static __thread gpr_timer_log* g_thread_log; static gpr_once g_once_init = GPR_ONCE_INIT; -static FILE *output_file; -static const char *output_filename_or_null = NULL; +static FILE* output_file; +static const char* output_filename_or_null = NULL; static pthread_mutex_t g_mu; static pthread_cond_t g_cv; static gpr_timer_log_list g_in_progress_logs; static gpr_timer_log_list g_done_logs; static int g_shutdown; -static gpr_thd_id g_writing_thread; +static pthread_t g_writing_thread; static __thread int g_thread_id; static int g_next_thread_id; static int g_writing_enabled = 1; -static const char *output_filename() { +static const char* output_filename() { if (output_filename_or_null == NULL) { output_filename_or_null = gpr_getenv("LATENCY_TRACE"); if (output_filename_or_null == NULL || @@ -84,7 +85,7 @@ static const char *output_filename() { return output_filename_or_null; } -static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) { +static int timer_log_push_back(gpr_timer_log_list* list, gpr_timer_log* log) { if (list->head == NULL) { list->head = list->tail = log; log->next = log->prev = NULL; @@ -98,8 +99,8 @@ static int timer_log_push_back(gpr_timer_log_list *list, gpr_timer_log *log) { } } -static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) { - gpr_timer_log *out = list->head; +static gpr_timer_log* timer_log_pop_front(gpr_timer_log_list* list) { + gpr_timer_log* out = list->head; if (out != NULL) { list->head = out->next; if (list->head != NULL) { @@ -111,7 +112,7 @@ static gpr_timer_log *timer_log_pop_front(gpr_timer_log_list *list) { return out; } -static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) { +static void timer_log_remove(gpr_timer_log_list* list, gpr_timer_log* log) { if (log->prev == NULL) { list->head = log->next; if (list->head != NULL) { @@ -130,13 +131,13 @@ static void timer_log_remove(gpr_timer_log_list *list, gpr_timer_log *log) { } } -static void write_log(gpr_timer_log *log) { +static void write_log(gpr_timer_log* log) { size_t i; if (output_file == NULL) { output_file = fopen(output_filename(), "w"); } for (i = 0; i < log->num_entries; i++) { - gpr_timer_entry *entry = &(log->log[i]); + gpr_timer_entry* entry = &(log->log[i]); if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) { entry->tm = gpr_time_0(entry->tm.clock_type); } @@ -149,8 +150,8 @@ static void write_log(gpr_timer_log *log) { } } -static void writing_thread(void *unused) { - gpr_timer_log *log; +static void* writing_thread(void* unused) { + gpr_timer_log* log; pthread_mutex_lock(&g_mu); for (;;) { while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) { @@ -164,13 +165,13 @@ static void writing_thread(void *unused) { } if (g_shutdown) { pthread_mutex_unlock(&g_mu); - return; + return NULL; } } } -static void flush_logs(gpr_timer_log_list *list) { - gpr_timer_log *log; +static void flush_logs(gpr_timer_log_list* list) { + gpr_timer_log* log; while ((log = timer_log_pop_front(list)) != NULL) { write_log(log); free(log); @@ -182,7 +183,7 @@ static void finish_writing(void) { g_shutdown = 1; pthread_cond_signal(&g_cv); pthread_mutex_unlock(&g_mu); - gpr_thd_join(g_writing_thread); + pthread_join(g_writing_thread, NULL); gpr_log(GPR_INFO, "flushing logs"); @@ -196,22 +197,25 @@ static void finish_writing(void) { } } -void gpr_timers_set_log_filename(const char *filename) { +void gpr_timers_set_log_filename(const char* filename) { output_filename_or_null = filename; } static void init_output() { - gpr_thd_options options = gpr_thd_options_default(); - gpr_thd_options_set_joinable(&options); - GPR_ASSERT(gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options)); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + pthread_create(&g_writing_thread, &attr, &writing_thread, NULL); + pthread_attr_destroy(&attr); + atexit(finish_writing); } static void rotate_log() { /* Using malloc here, as this code could end up being called by gpr_malloc */ - gpr_timer_log *new = malloc(sizeof(*new)); + gpr_timer_log* log = static_cast(malloc(sizeof(*log))); gpr_once_init(&g_once_init, init_output); - new->num_entries = 0; + log->num_entries = 0; pthread_mutex_lock(&g_mu); if (g_thread_log != NULL) { timer_log_remove(&g_in_progress_logs, g_thread_log); @@ -221,14 +225,14 @@ static void rotate_log() { } else { g_thread_id = g_next_thread_id++; } - timer_log_push_back(&g_in_progress_logs, new); + timer_log_push_back(&g_in_progress_logs, log); pthread_mutex_unlock(&g_mu); - g_thread_log = new; + g_thread_log = log; } -static void gpr_timers_log_add(const char *tagstr, marker_type type, - int important, const char *file, int line) { - gpr_timer_entry *entry; +static void gpr_timers_log_add(const char* tagstr, marker_type type, + int important, const char* file, int line) { + gpr_timer_entry* entry; if (!g_writing_enabled) { return; @@ -250,17 +254,17 @@ static void gpr_timers_log_add(const char *tagstr, marker_type type, } /* Latency profiler API implementation. */ -void gpr_timer_add_mark(const char *tagstr, int important, const char *file, +void gpr_timer_add_mark(const char* tagstr, int important, const char* file, int line) { gpr_timers_log_add(tagstr, MARK, important, file, line); } -void gpr_timer_begin(const char *tagstr, int important, const char *file, +void gpr_timer_begin(const char* tagstr, int important, const char* file, int line) { gpr_timers_log_add(tagstr, BEGIN, important, file, line); } -void gpr_timer_end(const char *tagstr, int important, const char *file, +void gpr_timer_end(const char* tagstr, int important, const char* file, int line) { gpr_timers_log_add(tagstr, END, important, file, line); } @@ -277,7 +281,7 @@ void gpr_timers_global_init(void) {} void gpr_timers_global_destroy(void) {} -void gpr_timers_set_log_filename(const char *filename) {} +void gpr_timers_set_log_filename(const char* filename) {} void gpr_timer_set_enabled(int enabled) {} #endif /* GRPC_BASIC_PROFILER */ diff --git a/Sources/CgRPC/src/core/lib/profiling/stap_timers.c b/Sources/CgRPC/src/core/lib/profiling/stap_timers.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/profiling/stap_timers.c rename to Sources/CgRPC/src/core/lib/profiling/stap_timers.cc index c86d74f05..5ee1c4317 100644 --- a/Sources/CgRPC/src/core/lib/profiling/stap_timers.c +++ b/Sources/CgRPC/src/core/lib/profiling/stap_timers.cc @@ -27,22 +27,22 @@ #include "src/core/lib/profiling/stap_probes.h" /* Latency profiler API implementation. */ -void gpr_timer_add_mark(int tag, const char *tagstr, void *id, const char *file, +void gpr_timer_add_mark(int tag, const char* tagstr, void* id, const char* file, int line) { _STAP_ADD_MARK(tag); } -void gpr_timer_add_important_mark(int tag, const char *tagstr, void *id, - const char *file, int line) { +void gpr_timer_add_important_mark(int tag, const char* tagstr, void* id, + const char* file, int line) { _STAP_ADD_IMPORTANT_MARK(tag); } -void gpr_timer_begin(int tag, const char *tagstr, void *id, const char *file, +void gpr_timer_begin(int tag, const char* tagstr, void* id, const char* file, int line) { _STAP_TIMING_NS_BEGIN(tag); } -void gpr_timer_end(int tag, const char *tagstr, void *id, const char *file, +void gpr_timer_end(int tag, const char* tagstr, void* id, const char* file, int line) { _STAP_TIMING_NS_END(tag); } diff --git a/Sources/CgRPC/src/core/lib/profiling/timers.h b/Sources/CgRPC/src/core/lib/profiling/timers.h index 7f02b4bf8..7ff72783e 100644 --- a/Sources/CgRPC/src/core/lib/profiling/timers.h +++ b/Sources/CgRPC/src/core/lib/profiling/timers.h @@ -19,21 +19,17 @@ #ifndef GRPC_CORE_LIB_PROFILING_TIMERS_H #define GRPC_CORE_LIB_PROFILING_TIMERS_H -#ifdef __cplusplus -extern "C" { -#endif - void gpr_timers_global_init(void); void gpr_timers_global_destroy(void); -void gpr_timer_add_mark(const char *tagstr, int important, const char *file, +void gpr_timer_add_mark(const char* tagstr, int important, const char* file, int line); -void gpr_timer_begin(const char *tagstr, int important, const char *file, +void gpr_timer_begin(const char* tagstr, int important, const char* file, int line); -void gpr_timer_end(const char *tagstr, int important, const char *file, +void gpr_timer_end(const char* tagstr, int important, const char* file, int line); -void gpr_timers_set_log_filename(const char *filename); +void gpr_timers_set_log_filename(const char* filename); void gpr_timer_set_enabled(int enabled); @@ -44,14 +40,10 @@ void gpr_timer_set_enabled(int enabled); do { \ } while (0) -#define GPR_TIMER_BEGIN(tag, important) \ +#define GPR_TIMER_SCOPE(tag, important) \ do { \ } while (0) -#define GPR_TIMER_END(tag, important) \ - do { \ - } while (0) - #else /* at least one profiler requested... */ /* ... hopefully only one. */ #if defined(GRPC_STAP_PROFILER) && defined(GRPC_BASIC_PROFILER) @@ -68,12 +60,6 @@ void gpr_timer_set_enabled(int enabled); #define GPR_TIMER_MARK(tag, important) \ gpr_timer_add_mark(tag, important, __FILE__, __LINE__); -#define GPR_TIMER_BEGIN(tag, important) \ - gpr_timer_begin(tag, important, __FILE__, __LINE__); - -#define GPR_TIMER_END(tag, important) \ - gpr_timer_end(tag, important, __FILE__, __LINE__); - #ifdef GRPC_STAP_PROFILER /* Empty placeholder for now. */ #endif /* GRPC_STAP_PROFILER */ @@ -82,35 +68,27 @@ void gpr_timer_set_enabled(int enabled); /* Empty placeholder for now. */ #endif /* GRPC_BASIC_PROFILER */ -#endif /* at least one profiler requested. */ - -#ifdef __cplusplus -} - -#if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \ - defined(GRPC_CUSTOM_PROFILER)) namespace grpc { class ProfileScope { public: - ProfileScope(const char *desc, bool important, const char *file, int line) + ProfileScope(const char* desc, bool important, const char* file, int line) : desc_(desc) { gpr_timer_begin(desc_, important ? 1 : 0, file, line); } ~ProfileScope() { gpr_timer_end(desc_, 0, "n/a", 0); } private: - const char *const desc_; + const char* const desc_; }; } // namespace grpc -#define GPR_TIMER_SCOPE(tag, important) \ - ::grpc::ProfileScope _profile_scope_##__LINE__((tag), (important), __FILE__, \ - __LINE__) -#else -#define GPR_TIMER_SCOPE(tag, important) \ - do { \ - } while (false) -#endif -#endif +#define GPR_TIMER_SCOPE_NAME_INTERNAL(prefix, line) prefix##line +#define GPR_TIMER_SCOPE_NAME(prefix, line) \ + GPR_TIMER_SCOPE_NAME_INTERNAL(prefix, line) +#define GPR_TIMER_SCOPE(tag, important) \ + ::grpc::ProfileScope GPR_TIMER_SCOPE_NAME(_profile_scope_, __LINE__)( \ + (tag), (important), __FILE__, __LINE__) + +#endif /* at least one profiler requested. */ #endif /* GRPC_CORE_LIB_PROFILING_TIMERS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/context/security_context.c b/Sources/CgRPC/src/core/lib/security/context/security_context.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/security/context/security_context.c rename to Sources/CgRPC/src/core/lib/security/context/security_context.cc index 8fff2c92c..14051a3f0 100644 --- a/Sources/CgRPC/src/core/lib/security/context/security_context.c +++ b/Sources/CgRPC/src/core/lib/security/context/security_context.cc @@ -16,11 +16,13 @@ * */ +#include + #include #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/security/context/security_context.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" @@ -29,84 +31,85 @@ #include #include -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_auth_context_refcount = - GRPC_TRACER_INITIALIZER(false, "auth_context_refcount"); -#endif +grpc_core::DebugOnlyTraceFlag grpc_trace_auth_context_refcount( + false, "auth_context_refcount"); /* --- grpc_call --- */ -grpc_call_error grpc_call_set_credentials(grpc_call *call, - grpc_call_credentials *creds) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_client_security_context *ctx = NULL; +grpc_call_error grpc_call_set_credentials(grpc_call* call, + grpc_call_credentials* creds) { + grpc_core::ExecCtx exec_ctx; + grpc_client_security_context* ctx = nullptr; GRPC_API_TRACE("grpc_call_set_credentials(call=%p, creds=%p)", 2, (call, creds)); if (!grpc_call_is_client(call)) { gpr_log(GPR_ERROR, "Method is client-side only."); return GRPC_CALL_ERROR_NOT_ON_SERVER; } - ctx = (grpc_client_security_context *)grpc_call_context_get( - call, GRPC_CONTEXT_SECURITY); - if (ctx == NULL) { + ctx = static_cast( + grpc_call_context_get(call, GRPC_CONTEXT_SECURITY)); + if (ctx == nullptr) { ctx = grpc_client_security_context_create(); ctx->creds = grpc_call_credentials_ref(creds); grpc_call_context_set(call, GRPC_CONTEXT_SECURITY, ctx, grpc_client_security_context_destroy); } else { - grpc_call_credentials_unref(&exec_ctx, ctx->creds); + grpc_call_credentials_unref(ctx->creds); ctx->creds = grpc_call_credentials_ref(creds); } - grpc_exec_ctx_finish(&exec_ctx); + return GRPC_CALL_OK; } -grpc_auth_context *grpc_call_auth_context(grpc_call *call) { - void *sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY); +grpc_auth_context* grpc_call_auth_context(grpc_call* call) { + void* sec_ctx = grpc_call_context_get(call, GRPC_CONTEXT_SECURITY); GRPC_API_TRACE("grpc_call_auth_context(call=%p)", 1, (call)); - if (sec_ctx == NULL) return NULL; + if (sec_ctx == nullptr) return nullptr; return grpc_call_is_client(call) ? GRPC_AUTH_CONTEXT_REF( - ((grpc_client_security_context *)sec_ctx)->auth_context, + ((grpc_client_security_context*)sec_ctx)->auth_context, "grpc_call_auth_context client") : GRPC_AUTH_CONTEXT_REF( - ((grpc_server_security_context *)sec_ctx)->auth_context, + ((grpc_server_security_context*)sec_ctx)->auth_context, "grpc_call_auth_context server"); } -void grpc_auth_context_release(grpc_auth_context *context) { +void grpc_auth_context_release(grpc_auth_context* context) { GRPC_API_TRACE("grpc_auth_context_release(context=%p)", 1, (context)); GRPC_AUTH_CONTEXT_UNREF(context, "grpc_auth_context_unref"); } /* --- grpc_client_security_context --- */ -grpc_client_security_context *grpc_client_security_context_create(void) { - return gpr_zalloc(sizeof(grpc_client_security_context)); +grpc_client_security_context* grpc_client_security_context_create(void) { + return static_cast( + gpr_zalloc(sizeof(grpc_client_security_context))); } -void grpc_client_security_context_destroy(void *ctx) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_client_security_context *c = (grpc_client_security_context *)ctx; - grpc_call_credentials_unref(&exec_ctx, c->creds); +void grpc_client_security_context_destroy(void* ctx) { + grpc_core::ExecCtx exec_ctx; + grpc_client_security_context* c = + static_cast(ctx); + grpc_call_credentials_unref(c->creds); GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context"); - if (c->extension.instance != NULL && c->extension.destroy != NULL) { + if (c->extension.instance != nullptr && c->extension.destroy != nullptr) { c->extension.destroy(c->extension.instance); } gpr_free(ctx); - grpc_exec_ctx_finish(&exec_ctx); } /* --- grpc_server_security_context --- */ -grpc_server_security_context *grpc_server_security_context_create(void) { - return gpr_zalloc(sizeof(grpc_server_security_context)); +grpc_server_security_context* grpc_server_security_context_create(void) { + return static_cast( + gpr_zalloc(sizeof(grpc_server_security_context))); } -void grpc_server_security_context_destroy(void *ctx) { - grpc_server_security_context *c = (grpc_server_security_context *)ctx; +void grpc_server_security_context_destroy(void* ctx) { + grpc_server_security_context* c = + static_cast(ctx); GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context"); - if (c->extension.instance != NULL && c->extension.destroy != NULL) { + if (c->extension.instance != nullptr && c->extension.destroy != nullptr) { c->extension.destroy(c->extension.instance); } gpr_free(ctx); @@ -114,12 +117,13 @@ void grpc_server_security_context_destroy(void *ctx) { /* --- grpc_auth_context --- */ -static grpc_auth_property_iterator empty_iterator = {NULL, 0, NULL}; +static grpc_auth_property_iterator empty_iterator = {nullptr, 0, nullptr}; -grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) { - grpc_auth_context *ctx = gpr_zalloc(sizeof(grpc_auth_context)); +grpc_auth_context* grpc_auth_context_create(grpc_auth_context* chained) { + grpc_auth_context* ctx = + static_cast(gpr_zalloc(sizeof(grpc_auth_context))); gpr_ref_init(&ctx->refcount, 1); - if (chained != NULL) { + if (chained != nullptr) { ctx->chained = GRPC_AUTH_CONTEXT_REF(chained, "chained"); ctx->peer_identity_property_name = ctx->chained->peer_identity_property_name; @@ -128,42 +132,42 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained) { } #ifndef NDEBUG -grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx, - const char *file, int line, - const char *reason) { - if (ctx == NULL) return NULL; - if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) { +grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* ctx, + const char* file, int line, + const char* reason) { + if (ctx == nullptr) return nullptr; + if (grpc_trace_auth_context_refcount.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "AUTH_CONTEXT:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val, val + 1, reason); } #else -grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) { - if (ctx == NULL) return NULL; +grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* ctx) { + if (ctx == nullptr) return nullptr; #endif gpr_ref(&ctx->refcount); return ctx; } #ifndef NDEBUG -void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line, - const char *reason) { - if (ctx == NULL) return; - if (GRPC_TRACER_ON(grpc_trace_auth_context_refcount)) { +void grpc_auth_context_unref(grpc_auth_context* ctx, const char* file, int line, + const char* reason) { + if (ctx == nullptr) return; + if (grpc_trace_auth_context_refcount.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&ctx->refcount.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "AUTH_CONTEXT:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", ctx, val, val - 1, reason); } #else -void grpc_auth_context_unref(grpc_auth_context *ctx) { - if (ctx == NULL) return; +void grpc_auth_context_unref(grpc_auth_context* ctx) { + if (ctx == nullptr) return; #endif if (gpr_unref(&ctx->refcount)) { size_t i; GRPC_AUTH_CONTEXT_UNREF(ctx->chained, "chained"); - if (ctx->properties.array != NULL) { + if (ctx->properties.array != nullptr) { for (i = 0; i < ctx->properties.count; i++) { grpc_auth_property_reset(&ctx->properties.array[i]); } @@ -173,59 +177,59 @@ void grpc_auth_context_unref(grpc_auth_context *ctx) { } } -const char *grpc_auth_context_peer_identity_property_name( - const grpc_auth_context *ctx) { +const char* grpc_auth_context_peer_identity_property_name( + const grpc_auth_context* ctx) { GRPC_API_TRACE("grpc_auth_context_peer_identity_property_name(ctx=%p)", 1, (ctx)); return ctx->peer_identity_property_name; } -int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx, - const char *name) { +int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context* ctx, + const char* name) { grpc_auth_property_iterator it = grpc_auth_context_find_properties_by_name(ctx, name); - const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it); + const grpc_auth_property* prop = grpc_auth_property_iterator_next(&it); GRPC_API_TRACE( "grpc_auth_context_set_peer_identity_property_name(ctx=%p, name=%s)", 2, (ctx, name)); - if (prop == NULL) { + if (prop == nullptr) { gpr_log(GPR_ERROR, "Property name %s not found in auth context.", - name != NULL ? name : "NULL"); + name != nullptr ? name : "NULL"); return 0; } ctx->peer_identity_property_name = prop->name; return 1; } -int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx) { +int grpc_auth_context_peer_is_authenticated(const grpc_auth_context* ctx) { GRPC_API_TRACE("grpc_auth_context_peer_is_authenticated(ctx=%p)", 1, (ctx)); - return ctx->peer_identity_property_name == NULL ? 0 : 1; + return ctx->peer_identity_property_name == nullptr ? 0 : 1; } grpc_auth_property_iterator grpc_auth_context_property_iterator( - const grpc_auth_context *ctx) { + const grpc_auth_context* ctx) { grpc_auth_property_iterator it = empty_iterator; GRPC_API_TRACE("grpc_auth_context_property_iterator(ctx=%p)", 1, (ctx)); - if (ctx == NULL) return it; + if (ctx == nullptr) return it; it.ctx = ctx; return it; } -const grpc_auth_property *grpc_auth_property_iterator_next( - grpc_auth_property_iterator *it) { +const grpc_auth_property* grpc_auth_property_iterator_next( + grpc_auth_property_iterator* it) { GRPC_API_TRACE("grpc_auth_property_iterator_next(it=%p)", 1, (it)); - if (it == NULL || it->ctx == NULL) return NULL; + if (it == nullptr || it->ctx == nullptr) return nullptr; while (it->index == it->ctx->properties.count) { - if (it->ctx->chained == NULL) return NULL; + if (it->ctx->chained == nullptr) return nullptr; it->ctx = it->ctx->chained; it->index = 0; } - if (it->name == NULL) { + if (it->name == nullptr) { return &it->ctx->properties.array[it->index++]; } else { while (it->index < it->ctx->properties.count) { - const grpc_auth_property *prop = &it->ctx->properties.array[it->index++]; - GPR_ASSERT(prop->name != NULL); + const grpc_auth_property* prop = &it->ctx->properties.array[it->index++]; + GPR_ASSERT(prop->name != nullptr); if (strcmp(it->name, prop->name) == 0) { return prop; } @@ -236,55 +240,56 @@ const grpc_auth_property *grpc_auth_property_iterator_next( } grpc_auth_property_iterator grpc_auth_context_find_properties_by_name( - const grpc_auth_context *ctx, const char *name) { + const grpc_auth_context* ctx, const char* name) { grpc_auth_property_iterator it = empty_iterator; GRPC_API_TRACE("grpc_auth_context_find_properties_by_name(ctx=%p, name=%s)", 2, (ctx, name)); - if (ctx == NULL || name == NULL) return empty_iterator; + if (ctx == nullptr || name == nullptr) return empty_iterator; it.ctx = ctx; it.name = name; return it; } grpc_auth_property_iterator grpc_auth_context_peer_identity( - const grpc_auth_context *ctx) { + const grpc_auth_context* ctx) { GRPC_API_TRACE("grpc_auth_context_peer_identity(ctx=%p)", 1, (ctx)); - if (ctx == NULL) return empty_iterator; + if (ctx == nullptr) return empty_iterator; return grpc_auth_context_find_properties_by_name( ctx, ctx->peer_identity_property_name); } -static void ensure_auth_context_capacity(grpc_auth_context *ctx) { +static void ensure_auth_context_capacity(grpc_auth_context* ctx) { if (ctx->properties.count == ctx->properties.capacity) { ctx->properties.capacity = GPR_MAX(ctx->properties.capacity + 8, ctx->properties.capacity * 2); - ctx->properties.array = + ctx->properties.array = static_cast( gpr_realloc(ctx->properties.array, - ctx->properties.capacity * sizeof(grpc_auth_property)); + ctx->properties.capacity * sizeof(grpc_auth_property))); } } -void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name, - const char *value, size_t value_length) { - grpc_auth_property *prop; +void grpc_auth_context_add_property(grpc_auth_context* ctx, const char* name, + const char* value, size_t value_length) { + grpc_auth_property* prop; GRPC_API_TRACE( "grpc_auth_context_add_property(ctx=%p, name=%s, value=%*.*s, " "value_length=%lu)", - 6, (ctx, name, (int)value_length, (int)value_length, value, - (unsigned long)value_length)); + 6, + (ctx, name, (int)value_length, (int)value_length, value, + (unsigned long)value_length)); ensure_auth_context_capacity(ctx); prop = &ctx->properties.array[ctx->properties.count++]; prop->name = gpr_strdup(name); - prop->value = gpr_malloc(value_length + 1); + prop->value = static_cast(gpr_malloc(value_length + 1)); memcpy(prop->value, value, value_length); prop->value[value_length] = '\0'; prop->value_length = value_length; } -void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx, - const char *name, - const char *value) { - grpc_auth_property *prop; +void grpc_auth_context_add_cstring_property(grpc_auth_context* ctx, + const char* name, + const char* value) { + grpc_auth_property* prop; GRPC_API_TRACE( "grpc_auth_context_add_cstring_property(ctx=%p, name=%s, value=%s)", 3, (ctx, name, value)); @@ -295,48 +300,49 @@ void grpc_auth_context_add_cstring_property(grpc_auth_context *ctx, prop->value_length = strlen(value); } -void grpc_auth_property_reset(grpc_auth_property *property) { +void grpc_auth_property_reset(grpc_auth_property* property) { gpr_free(property->name); gpr_free(property->value); memset(property, 0, sizeof(grpc_auth_property)); } -static void auth_context_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - GRPC_AUTH_CONTEXT_UNREF(p, "auth_context_pointer_arg"); +static void auth_context_pointer_arg_destroy(void* p) { + GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context*)p, "auth_context_pointer_arg"); } -static void *auth_context_pointer_arg_copy(void *p) { - return GRPC_AUTH_CONTEXT_REF(p, "auth_context_pointer_arg"); +static void* auth_context_pointer_arg_copy(void* p) { + return GRPC_AUTH_CONTEXT_REF((grpc_auth_context*)p, + "auth_context_pointer_arg"); } -static int auth_context_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); } +static int auth_context_pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); } static const grpc_arg_pointer_vtable auth_context_pointer_vtable = { auth_context_pointer_arg_copy, auth_context_pointer_arg_destroy, auth_context_pointer_cmp}; -grpc_arg grpc_auth_context_to_arg(grpc_auth_context *p) { - return grpc_channel_arg_pointer_create(GRPC_AUTH_CONTEXT_ARG, p, +grpc_arg grpc_auth_context_to_arg(grpc_auth_context* p) { + return grpc_channel_arg_pointer_create((char*)GRPC_AUTH_CONTEXT_ARG, p, &auth_context_pointer_vtable); } -grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg) { - if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return NULL; +grpc_auth_context* grpc_auth_context_from_arg(const grpc_arg* arg) { + if (strcmp(arg->key, GRPC_AUTH_CONTEXT_ARG) != 0) return nullptr; if (arg->type != GRPC_ARG_POINTER) { gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, GRPC_AUTH_CONTEXT_ARG); - return NULL; + return nullptr; } - return arg->value.pointer.p; + return static_cast(arg->value.pointer.p); } -grpc_auth_context *grpc_find_auth_context_in_args( - const grpc_channel_args *args) { +grpc_auth_context* grpc_find_auth_context_in_args( + const grpc_channel_args* args) { size_t i; - if (args == NULL) return NULL; + if (args == nullptr) return nullptr; for (i = 0; i < args->num_args; i++) { - grpc_auth_context *p = grpc_auth_context_from_arg(&args->args[i]); - if (p != NULL) return p; + grpc_auth_context* p = grpc_auth_context_from_arg(&args->args[i]); + if (p != nullptr) return p; } - return NULL; + return nullptr; } diff --git a/Sources/CgRPC/src/core/lib/security/context/security_context.h b/Sources/CgRPC/src/core/lib/security/context/security_context.h index 0df39257a..e782e4f28 100644 --- a/Sources/CgRPC/src/core/lib/security/context/security_context.h +++ b/Sources/CgRPC/src/core/lib/security/context/security_context.h @@ -19,16 +19,12 @@ #ifndef GRPC_CORE_LIB_SECURITY_CONTEXT_SECURITY_CONTEXT_H #define GRPC_CORE_LIB_SECURITY_CONTEXT_SECURITY_CONTEXT_H +#include + #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/security/credentials/credentials.h" -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_auth_context_refcount; -#endif - -#ifdef __cplusplus -extern "C" { -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_auth_context_refcount; /* --- grpc_auth_context --- @@ -37,21 +33,21 @@ extern "C" { /* Property names are always NULL terminated. */ typedef struct { - grpc_auth_property *array; + grpc_auth_property* array; size_t count; size_t capacity; } grpc_auth_property_array; struct grpc_auth_context { - struct grpc_auth_context *chained; + struct grpc_auth_context* chained; grpc_auth_property_array properties; gpr_refcount refcount; - const char *peer_identity_property_name; - grpc_pollset *pollset; + const char* peer_identity_property_name; + grpc_pollset* pollset; }; /* Creation. */ -grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained); +grpc_auth_context* grpc_auth_context_create(grpc_auth_context* chained); /* Refcounting. */ #ifndef NDEBUG @@ -59,19 +55,19 @@ grpc_auth_context *grpc_auth_context_create(grpc_auth_context *chained); grpc_auth_context_ref((p), __FILE__, __LINE__, (r)) #define GRPC_AUTH_CONTEXT_UNREF(p, r) \ grpc_auth_context_unref((p), __FILE__, __LINE__, (r)) -grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *policy, - const char *file, int line, - const char *reason); -void grpc_auth_context_unref(grpc_auth_context *policy, const char *file, - int line, const char *reason); +grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* policy, + const char* file, int line, + const char* reason); +void grpc_auth_context_unref(grpc_auth_context* policy, const char* file, + int line, const char* reason); #else #define GRPC_AUTH_CONTEXT_REF(p, r) grpc_auth_context_ref((p)) #define GRPC_AUTH_CONTEXT_UNREF(p, r) grpc_auth_context_unref((p)) -grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *policy); -void grpc_auth_context_unref(grpc_auth_context *policy); +grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* policy); +void grpc_auth_context_unref(grpc_auth_context* policy); #endif -void grpc_auth_property_reset(grpc_auth_property *property); +void grpc_auth_property_reset(grpc_auth_property* property); /* --- grpc_security_context_extension --- @@ -79,8 +75,8 @@ void grpc_auth_property_reset(grpc_auth_property *property); later by a higher level method on a grpc_call object. */ typedef struct { - void *instance; - void (*destroy)(void *); + void* instance; + void (*destroy)(void*); } grpc_security_context_extension; /* --- grpc_client_security_context --- @@ -88,36 +84,32 @@ typedef struct { Internal client-side security context. */ typedef struct { - grpc_call_credentials *creds; - grpc_auth_context *auth_context; + grpc_call_credentials* creds; + grpc_auth_context* auth_context; grpc_security_context_extension extension; } grpc_client_security_context; -grpc_client_security_context *grpc_client_security_context_create(void); -void grpc_client_security_context_destroy(void *ctx); +grpc_client_security_context* grpc_client_security_context_create(void); +void grpc_client_security_context_destroy(void* ctx); /* --- grpc_server_security_context --- Internal server-side security context. */ typedef struct { - grpc_auth_context *auth_context; + grpc_auth_context* auth_context; grpc_security_context_extension extension; } grpc_server_security_context; -grpc_server_security_context *grpc_server_security_context_create(void); -void grpc_server_security_context_destroy(void *ctx); +grpc_server_security_context* grpc_server_security_context_create(void); +void grpc_server_security_context_destroy(void* ctx); /* --- Channel args for auth context --- */ #define GRPC_AUTH_CONTEXT_ARG "grpc.auth_context" -grpc_arg grpc_auth_context_to_arg(grpc_auth_context *c); -grpc_auth_context *grpc_auth_context_from_arg(const grpc_arg *arg); -grpc_auth_context *grpc_find_auth_context_in_args( - const grpc_channel_args *args); - -#ifdef __cplusplus -} -#endif +grpc_arg grpc_auth_context_to_arg(grpc_auth_context* c); +grpc_auth_context* grpc_auth_context_from_arg(const grpc_arg* arg); +grpc_auth_context* grpc_find_auth_context_in_args( + const grpc_channel_args* args); #endif /* GRPC_CORE_LIB_SECURITY_CONTEXT_SECURITY_CONTEXT_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.cc new file mode 100644 index 000000000..fa05d901b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.cc @@ -0,0 +1,119 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/alts/alts_credentials.h" + +#include + +#include +#include +#include +#include + +#include "src/core/lib/security/credentials/alts/check_gcp_environment.h" +#include "src/core/lib/security/security_connector/alts_security_connector.h" + +#define GRPC_CREDENTIALS_TYPE_ALTS "Alts" +#define GRPC_ALTS_HANDSHAKER_SERVICE_URL "metadata.google.internal:8080" + +static void alts_credentials_destruct(grpc_channel_credentials* creds) { + grpc_alts_credentials* alts_creds = + reinterpret_cast(creds); + grpc_alts_credentials_options_destroy(alts_creds->options); + gpr_free(alts_creds->handshaker_service_url); +} + +static void alts_server_credentials_destruct(grpc_server_credentials* creds) { + grpc_alts_server_credentials* alts_creds = + reinterpret_cast(creds); + grpc_alts_credentials_options_destroy(alts_creds->options); + gpr_free(alts_creds->handshaker_service_url); +} + +static grpc_security_status alts_create_security_connector( + grpc_channel_credentials* creds, + grpc_call_credentials* request_metadata_creds, const char* target_name, + const grpc_channel_args* args, grpc_channel_security_connector** sc, + grpc_channel_args** new_args) { + return grpc_alts_channel_security_connector_create( + creds, request_metadata_creds, target_name, sc); +} + +static grpc_security_status alts_server_create_security_connector( + grpc_server_credentials* creds, grpc_server_security_connector** sc) { + return grpc_alts_server_security_connector_create(creds, sc); +} + +static const grpc_channel_credentials_vtable alts_credentials_vtable = { + alts_credentials_destruct, alts_create_security_connector, + /*duplicate_without_call_credentials=*/nullptr}; + +static const grpc_server_credentials_vtable alts_server_credentials_vtable = { + alts_server_credentials_destruct, alts_server_create_security_connector}; + +grpc_channel_credentials* grpc_alts_credentials_create_customized( + const grpc_alts_credentials_options* options, + const char* handshaker_service_url, bool enable_untrusted_alts) { + if (!enable_untrusted_alts && !grpc_alts_is_running_on_gcp()) { + return nullptr; + } + auto creds = static_cast( + gpr_zalloc(sizeof(grpc_alts_credentials))); + creds->options = grpc_alts_credentials_options_copy(options); + creds->handshaker_service_url = + handshaker_service_url == nullptr + ? gpr_strdup(GRPC_ALTS_HANDSHAKER_SERVICE_URL) + : gpr_strdup(handshaker_service_url); + creds->base.type = GRPC_CREDENTIALS_TYPE_ALTS; + creds->base.vtable = &alts_credentials_vtable; + gpr_ref_init(&creds->base.refcount, 1); + return &creds->base; +} + +grpc_server_credentials* grpc_alts_server_credentials_create_customized( + const grpc_alts_credentials_options* options, + const char* handshaker_service_url, bool enable_untrusted_alts) { + if (!enable_untrusted_alts && !grpc_alts_is_running_on_gcp()) { + return nullptr; + } + auto creds = static_cast( + gpr_zalloc(sizeof(grpc_alts_server_credentials))); + creds->options = grpc_alts_credentials_options_copy(options); + creds->handshaker_service_url = + handshaker_service_url == nullptr + ? gpr_strdup(GRPC_ALTS_HANDSHAKER_SERVICE_URL) + : gpr_strdup(handshaker_service_url); + creds->base.type = GRPC_CREDENTIALS_TYPE_ALTS; + creds->base.vtable = &alts_server_credentials_vtable; + gpr_ref_init(&creds->base.refcount, 1); + return &creds->base; +} + +grpc_channel_credentials* grpc_alts_credentials_create( + const grpc_alts_credentials_options* options) { + return grpc_alts_credentials_create_customized( + options, GRPC_ALTS_HANDSHAKER_SERVICE_URL, false); +} + +grpc_server_credentials* grpc_alts_server_credentials_create( + const grpc_alts_credentials_options* options) { + return grpc_alts_server_credentials_create_customized( + options, GRPC_ALTS_HANDSHAKER_SERVICE_URL, false); +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.h new file mode 100644 index 000000000..810117f2b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/alts_credentials.h @@ -0,0 +1,82 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_ALTS_CREDENTIALS_H +#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_ALTS_CREDENTIALS_H + +#include + +#include + +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" +#include "src/core/lib/security/credentials/credentials.h" + +/* Main struct for grpc ALTS channel credential. */ +typedef struct grpc_alts_credentials { + grpc_channel_credentials base; + grpc_alts_credentials_options* options; + char* handshaker_service_url; +} grpc_alts_credentials; + +/* Main struct for grpc ALTS server credential. */ +typedef struct grpc_alts_server_credentials { + grpc_server_credentials base; + grpc_alts_credentials_options* options; + char* handshaker_service_url; +} grpc_alts_server_credentials; + +/** + * This method creates an ALTS channel credential object with customized + * information provided by caller. + * + * - options: grpc ALTS credentials options instance for client. + * - handshaker_service_url: address of ALTS handshaker service in the format of + * "host:port". If it's nullptr, the address of default metadata server will + * be used. + * - enable_untrusted_alts: a boolean flag used to enable ALTS in untrusted + * mode. This mode can be enabled when we are sure ALTS is running on GCP or + * for testing purpose. + * + * It returns nullptr if the flag is disabled AND ALTS is not running on GCP. + * Otherwise, it returns the created credential object. + */ + +grpc_channel_credentials* grpc_alts_credentials_create_customized( + const grpc_alts_credentials_options* options, + const char* handshaker_service_url, bool enable_untrusted_alts); + +/** + * This method creates an ALTS server credential object with customized + * information provided by caller. + * + * - options: grpc ALTS credentials options instance for server. + * - handshaker_service_url: address of ALTS handshaker service in the format of + * "host:port". If it's nullptr, the address of default metadata server will + * be used. + * - enable_untrusted_alts: a boolean flag used to enable ALTS in untrusted + * mode. This mode can be enabled when we are sure ALTS is running on GCP or + * for testing purpose. + * + * It returns nullptr if the flag is disabled and ALTS is not running on GCP. + * Otherwise, it returns the created credential object. + */ +grpc_server_credentials* grpc_alts_server_credentials_create_customized( + const grpc_alts_credentials_options* options, + const char* handshaker_service_url, bool enable_untrusted_alts); + +#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_ALTS_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.cc new file mode 100644 index 000000000..96807876c --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.cc @@ -0,0 +1,72 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/alts/check_gcp_environment.h" + +#include +#include +#include + +#include +#include + +const size_t kBiosDataBufferSize = 256; + +static char* trim(const char* src) { + if (src == nullptr) { + return nullptr; + } + char* des = nullptr; + size_t start = 0, end = strlen(src) - 1; + /* find the last character that is not a whitespace. */ + while (end != 0 && isspace(src[end])) { + end--; + } + /* find the first character that is not a whitespace. */ + while (start < strlen(src) && isspace(src[start])) { + start++; + } + if (start <= end) { + des = static_cast( + gpr_zalloc(sizeof(char) * (end - start + 2 /* '\0' */))); + memcpy(des, src + start, end - start + 1); + } + return des; +} + +namespace grpc_core { +namespace internal { + +char* read_bios_file(const char* bios_file) { + FILE* fp = fopen(bios_file, "r"); + if (!fp) { + gpr_log(GPR_ERROR, "BIOS data file cannot be opened."); + return nullptr; + } + char buf[kBiosDataBufferSize + 1]; + size_t ret = fread(buf, sizeof(char), kBiosDataBufferSize, fp); + buf[ret] = '\0'; + char* trimmed_buf = trim(buf); + fclose(fp); + return trimmed_buf; +} + +} // namespace internal +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.h b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.h new file mode 100644 index 000000000..aea4cea64 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment.h @@ -0,0 +1,57 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_CHECK_GCP_ENVIRONMENT_H +#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_CHECK_GCP_ENVIRONMENT_H + +namespace grpc_core { +namespace internal { + +/** + * This method is a helper function that reads a file containing system bios + * data. Exposed for testing only. + * + * - bios_file: a file containing BIOS data used to determine GCE tenancy + * information. + * + * It returns a buffer containing the data read from the file. + */ +char* read_bios_file(const char* bios_file); + +/** + * This method checks if system BIOS data contains Google-specific phrases. + * Exposed for testing only. + * + * - bios_data: a buffer containing system BIOS data. + * + * It returns true if the BIOS data contains Google-specific phrases, and false + * otherwise. + */ +bool check_bios_data(const char* bios_data); + +} // namespace internal +} // namespace grpc_core + +/** + * This method checks if a VM (Windows or Linux) is running within Google + * compute Engine (GCE) or not. It returns true if the VM is running in GCE and + * false otherwise. + */ +bool grpc_alts_is_running_on_gcp(); + +#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_CHECK_GCP_ENVIRONMENT_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc new file mode 100644 index 000000000..7c4d7a71c --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc @@ -0,0 +1,67 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#ifdef GPR_LINUX + +#include "src/core/lib/security/credentials/alts/check_gcp_environment.h" + +#include +#include + +#include + +#define GRPC_ALTS_EXPECT_NAME_GOOGLE "Google" +#define GRPC_ALTS_EXPECT_NAME_GCE "Google Compute Engine" +#define GRPC_ALTS_PRODUCT_NAME_FILE "/sys/class/dmi/id/product_name" + +static bool g_compute_engine_detection_done = false; +static bool g_is_on_compute_engine = false; +static gpr_mu g_mu; +static gpr_once g_once = GPR_ONCE_INIT; + +namespace grpc_core { +namespace internal { + +bool check_bios_data(const char* bios_data_file) { + char* bios_data = read_bios_file(bios_data_file); + bool result = (!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GOOGLE)) || + (!strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GCE)); + gpr_free(bios_data); + return result; +} + +} // namespace internal +} // namespace grpc_core + +static void init_mu(void) { gpr_mu_init(&g_mu); } + +bool grpc_alts_is_running_on_gcp() { + gpr_once_init(&g_once, init_mu); + gpr_mu_lock(&g_mu); + if (!g_compute_engine_detection_done) { + g_is_on_compute_engine = + grpc_core::internal::check_bios_data(GRPC_ALTS_PRODUCT_NAME_FILE); + g_compute_engine_detection_done = true; + } + gpr_mu_unlock(&g_mu); + return g_is_on_compute_engine; +} + +#endif // GPR_LINUX diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc new file mode 100644 index 000000000..d97681b86 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc @@ -0,0 +1,33 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#if !defined(GPR_LINUX) && !defined(GPR_WINDOWS) + +#include "src/core/lib/security/credentials/alts/check_gcp_environment.h" + +#include + +bool grpc_alts_is_running_on_gcp() { + gpr_log(GPR_ERROR, + "Platforms other than Linux and Windows are not supported"); + return false; +} + +#endif // !defined(LINUX) && !defined(GPR_WINDOWS) diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc new file mode 100644 index 000000000..55efe0e9d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc @@ -0,0 +1,114 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#ifdef GPR_WINDOWS + +#include "src/core/lib/security/credentials/alts/check_gcp_environment.h" + +#include +#include +#include +#include + +#include +#include +#include + +#define GRPC_ALTS_EXPECT_NAME_GOOGLE "Google" +#define GRPC_ALTS_WINDOWS_CHECK_COMMAND "powershell.exe" +#define GRPC_ALTS_WINDOWS_CHECK_COMMAND_ARGS \ + "(Get-WmiObject -Class Win32_BIOS).Manufacturer" +#define GRPC_ALTS_WINDOWS_CHECK_BIOS_FILE "windows_bios.data" + +const size_t kBiosDataBufferSize = 256; + +static bool g_compute_engine_detection_done = false; +static bool g_is_on_compute_engine = false; +static gpr_mu g_mu; +static gpr_once g_once = GPR_ONCE_INIT; + +namespace grpc_core { +namespace internal { + +bool check_bios_data(const char* bios_data_file) { + char* bios_data = read_bios_file(bios_data_file); + bool result = !strcmp(bios_data, GRPC_ALTS_EXPECT_NAME_GOOGLE); + remove(GRPC_ALTS_WINDOWS_CHECK_BIOS_FILE); + gpr_free(bios_data); + return result; +} + +} // namespace internal +} // namespace grpc_core + +static void init_mu(void) { gpr_mu_init(&g_mu); } + +static bool run_powershell() { + SECURITY_ATTRIBUTES sa; + sa.nLength = sizeof(sa); + sa.lpSecurityDescriptor = NULL; + sa.bInheritHandle = TRUE; + HANDLE h = CreateFile(_T(GRPC_ALTS_WINDOWS_CHECK_BIOS_FILE), GENERIC_WRITE, + FILE_SHARE_WRITE | FILE_SHARE_READ, &sa, OPEN_ALWAYS, + FILE_ATTRIBUTE_NORMAL, NULL); + if (h == INVALID_HANDLE_VALUE) { + gpr_log(GPR_ERROR, "CreateFile failed (%d).", GetLastError()); + return false; + } + PROCESS_INFORMATION pi; + STARTUPINFO si; + DWORD flags = CREATE_NO_WINDOW; + ZeroMemory(&pi, sizeof(pi)); + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + si.dwFlags |= STARTF_USESTDHANDLES; + si.hStdInput = NULL; + si.hStdError = h; + si.hStdOutput = h; + TCHAR cmd[kBiosDataBufferSize]; + _sntprintf(cmd, kBiosDataBufferSize, _T("%s %s"), + _T(GRPC_ALTS_WINDOWS_CHECK_COMMAND), + _T(GRPC_ALTS_WINDOWS_CHECK_COMMAND_ARGS)); + if (!CreateProcess(NULL, cmd, NULL, NULL, TRUE, flags, NULL, NULL, &si, + &pi)) { + gpr_log(GPR_ERROR, "CreateProcess failed (%d).\n", GetLastError()); + return false; + } + WaitForSingleObject(pi.hProcess, INFINITE); + CloseHandle(pi.hProcess); + CloseHandle(pi.hThread); + CloseHandle(h); + return true; +} + +bool grpc_alts_is_running_on_gcp() { + gpr_once_init(&g_once, init_mu); + gpr_mu_lock(&g_mu); + if (!g_compute_engine_detection_done) { + g_is_on_compute_engine = + run_powershell() && + grpc_core::internal::check_bios_data(GRPC_ALTS_WINDOWS_CHECK_BIOS_FILE); + g_compute_engine_detection_done = true; + } + gpr_mu_unlock(&g_mu); + return g_is_on_compute_engine; +} + +#endif // GPR_WINDOWS diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc new file mode 100644 index 000000000..0a39c6c48 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc @@ -0,0 +1,126 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include + +#include +#include +#include + +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" + +static grpc_alts_credentials_options* alts_client_options_copy( + const grpc_alts_credentials_options* options); + +static void alts_client_options_destroy(grpc_alts_credentials_options* options); + +static target_service_account* target_service_account_create( + const char* service_account) { + if (service_account == nullptr) { + return nullptr; + } + auto* sa = static_cast( + gpr_zalloc(sizeof(target_service_account))); + sa->data = gpr_strdup(service_account); + return sa; +} + +void grpc_alts_credentials_client_options_add_target_service_account( + grpc_alts_credentials_options* options, const char* service_account) { + if (options == nullptr || service_account == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_alts_credentials_client_options_add_target_service_account()"); + return; + } + auto client_options = + reinterpret_cast(options); + target_service_account* node = target_service_account_create(service_account); + node->next = client_options->target_account_list_head; + client_options->target_account_list_head = node; +} + +static void target_service_account_destroy( + target_service_account* service_account) { + if (service_account == nullptr) { + return; + } + gpr_free(service_account->data); + gpr_free(service_account); +} + +static const grpc_alts_credentials_options_vtable vtable = { + alts_client_options_copy, alts_client_options_destroy}; + +grpc_alts_credentials_options* grpc_alts_credentials_client_options_create() { + auto client_options = static_cast( + gpr_zalloc(sizeof(grpc_alts_credentials_client_options))); + client_options->base.vtable = &vtable; + return &client_options->base; +} + +static grpc_alts_credentials_options* alts_client_options_copy( + const grpc_alts_credentials_options* options) { + if (options == nullptr) { + return nullptr; + } + grpc_alts_credentials_options* new_options = + grpc_alts_credentials_client_options_create(); + auto new_client_options = + reinterpret_cast(new_options); + /* Copy target service accounts. */ + target_service_account* prev = nullptr; + auto node = + (reinterpret_cast(options)) + ->target_account_list_head; + while (node != nullptr) { + target_service_account* new_node = + target_service_account_create(node->data); + if (prev == nullptr) { + new_client_options->target_account_list_head = new_node; + } else { + prev->next = new_node; + } + prev = new_node; + node = node->next; + } + /* Copy rpc protocol versions. */ + grpc_gcp_rpc_protocol_versions_copy(&options->rpc_versions, + &new_options->rpc_versions); + return new_options; +} + +static void alts_client_options_destroy( + grpc_alts_credentials_options* options) { + if (options == nullptr) { + return; + } + auto* client_options = + reinterpret_cast(options); + target_service_account* node = client_options->target_account_list_head; + while (node != nullptr) { + target_service_account* next_node = node->next; + target_service_account_destroy(node); + node = next_node; + } +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc new file mode 100644 index 000000000..d42817154 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc @@ -0,0 +1,46 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" + +#include +#include + +grpc_alts_credentials_options* grpc_alts_credentials_options_copy( + const grpc_alts_credentials_options* options) { + if (options != nullptr && options->vtable != nullptr && + options->vtable->copy != nullptr) { + return options->vtable->copy(options); + } + /* An error occurred. */ + gpr_log(GPR_ERROR, + "Invalid arguments to grpc_alts_credentials_options_copy()"); + return nullptr; +} + +void grpc_alts_credentials_options_destroy( + grpc_alts_credentials_options* options) { + if (options != nullptr) { + if (options->vtable != nullptr && options->vtable->destruct != nullptr) { + options->vtable->destruct(options); + } + gpr_free(options); + } +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h new file mode 100644 index 000000000..320af718b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h @@ -0,0 +1,75 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_GRPC_ALTS_CREDENTIALS_OPTIONS_H +#define GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_GRPC_ALTS_CREDENTIALS_OPTIONS_H + +#include + +#include + +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" + +/* V-table for grpc_alts_credentials_options */ +typedef struct grpc_alts_credentials_options_vtable { + grpc_alts_credentials_options* (*copy)( + const grpc_alts_credentials_options* options); + void (*destruct)(grpc_alts_credentials_options* options); +} grpc_alts_credentials_options_vtable; + +struct grpc_alts_credentials_options { + const struct grpc_alts_credentials_options_vtable* vtable; + grpc_gcp_rpc_protocol_versions rpc_versions; +}; + +typedef struct target_service_account { + struct target_service_account* next; + char* data; +} target_service_account; + +/** + * Main struct for ALTS client credentials options. The options contain a + * a list of target service accounts (if specified) used for secure naming + * check. + */ +typedef struct grpc_alts_credentials_client_options { + grpc_alts_credentials_options base; + target_service_account* target_account_list_head; +} grpc_alts_credentials_client_options; + +/** + * Main struct for ALTS server credentials options. The options currently + * do not contain any server-specific fields. + */ +typedef struct grpc_alts_credentials_server_options { + grpc_alts_credentials_options base; +} grpc_alts_credentials_server_options; + +/** + * This method performs a deep copy on grpc_alts_credentials_options instance. + * + * - options: a grpc_alts_credentials_options instance that needs to be copied. + * + * It returns a new grpc_alts_credentials_options instance on success and NULL + * on failure. + */ +grpc_alts_credentials_options* grpc_alts_credentials_options_copy( + const grpc_alts_credentials_options* options); + +#endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_ALTS_GRPC_ALTS_CREDENTIALS_OPTIONS_H \ + */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc new file mode 100644 index 000000000..62aa7a620 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include +#include + +#include +#include + +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" + +static grpc_alts_credentials_options* alts_server_options_copy( + const grpc_alts_credentials_options* options); + +static void alts_server_options_destroy( + grpc_alts_credentials_options* options) {} + +static const grpc_alts_credentials_options_vtable vtable = { + alts_server_options_copy, alts_server_options_destroy}; + +grpc_alts_credentials_options* grpc_alts_credentials_server_options_create() { + grpc_alts_credentials_server_options* server_options = + static_cast( + gpr_zalloc(sizeof(*server_options))); + server_options->base.vtable = &vtable; + return &server_options->base; +} + +static grpc_alts_credentials_options* alts_server_options_copy( + const grpc_alts_credentials_options* options) { + if (options == nullptr) { + return nullptr; + } + grpc_alts_credentials_options* new_options = + grpc_alts_credentials_server_options_create(); + /* Copy rpc protocol versions. */ + grpc_gcp_rpc_protocol_versions_copy(&options->rpc_versions, + &new_options->rpc_versions); + return new_options; +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.cc similarity index 55% rename from Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.cc index b67ff48d0..b8f409260 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/composite/composite_credentials.h" #include @@ -30,56 +32,57 @@ /* -- Composite call credentials. -- */ typedef struct { - grpc_composite_call_credentials *composite_creds; + grpc_composite_call_credentials* composite_creds; size_t creds_index; - grpc_polling_entity *pollent; + grpc_polling_entity* pollent; grpc_auth_metadata_context auth_md_context; - grpc_credentials_mdelem_array *md_array; - grpc_closure *on_request_metadata; + grpc_credentials_mdelem_array* md_array; + grpc_closure* on_request_metadata; grpc_closure internal_on_request_metadata; } grpc_composite_call_credentials_metadata_context; -static void composite_call_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; +static void composite_call_destruct(grpc_call_credentials* creds) { + grpc_composite_call_credentials* c = + reinterpret_cast(creds); for (size_t i = 0; i < c->inner.num_creds; i++) { - grpc_call_credentials_unref(exec_ctx, c->inner.creds_array[i]); + grpc_call_credentials_unref(c->inner.creds_array[i]); } gpr_free(c->inner.creds_array); } -static void composite_call_metadata_cb(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_composite_call_credentials_metadata_context *ctx = - (grpc_composite_call_credentials_metadata_context *)arg; +static void composite_call_metadata_cb(void* arg, grpc_error* error) { + grpc_composite_call_credentials_metadata_context* ctx = + static_cast(arg); if (error == GRPC_ERROR_NONE) { /* See if we need to get some more metadata. */ if (ctx->creds_index < ctx->composite_creds->inner.num_creds) { - grpc_call_credentials *inner_creds = + grpc_call_credentials* inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; if (grpc_call_credentials_get_request_metadata( - exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context, - ctx->md_array, &ctx->internal_on_request_metadata, &error)) { + inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array, + &ctx->internal_on_request_metadata, &error)) { // Synchronous response, so call ourselves recursively. - composite_call_metadata_cb(exec_ctx, arg, error); + composite_call_metadata_cb(arg, error); GRPC_ERROR_UNREF(error); } return; } // We're done! } - GRPC_CLOSURE_SCHED(exec_ctx, ctx->on_request_metadata, GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(ctx->on_request_metadata, GRPC_ERROR_REF(error)); gpr_free(ctx); } static bool composite_call_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context auth_md_context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; - grpc_composite_call_credentials_metadata_context *ctx; - ctx = gpr_zalloc(sizeof(grpc_composite_call_credentials_metadata_context)); + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context auth_md_context, + grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata, + grpc_error** error) { + grpc_composite_call_credentials* c = + reinterpret_cast(creds); + grpc_composite_call_credentials_metadata_context* ctx; + ctx = static_cast( + gpr_zalloc(sizeof(grpc_composite_call_credentials_metadata_context))); ctx->composite_creds = c; ctx->pollent = pollent; ctx->auth_md_context = auth_md_context; @@ -89,11 +92,11 @@ static bool composite_call_get_request_metadata( composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx); bool synchronous = true; while (ctx->creds_index < ctx->composite_creds->inner.num_creds) { - grpc_call_credentials *inner_creds = + grpc_call_credentials* inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; if (grpc_call_credentials_get_request_metadata( - exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context, - ctx->md_array, &ctx->internal_on_request_metadata, error)) { + inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array, + &ctx->internal_on_request_metadata, error)) { if (*error != GRPC_ERROR_NONE) break; } else { synchronous = false; // Async return. @@ -105,12 +108,13 @@ static bool composite_call_get_request_metadata( } static void composite_call_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { - grpc_composite_call_credentials *c = (grpc_composite_call_credentials *)creds; + grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { + grpc_composite_call_credentials* c = + reinterpret_cast(creds); for (size_t i = 0; i < c->inner.num_creds; ++i) { grpc_call_credentials_cancel_get_request_metadata( - exec_ctx, c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error)); + c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error)); } GRPC_ERROR_UNREF(error); } @@ -120,9 +124,9 @@ static grpc_call_credentials_vtable composite_call_credentials_vtable = { composite_call_cancel_get_request_metadata}; static grpc_call_credentials_array get_creds_array( - grpc_call_credentials **creds_addr) { + grpc_call_credentials** creds_addr) { grpc_call_credentials_array result; - grpc_call_credentials *creds = *creds_addr; + grpc_call_credentials* creds = *creds_addr; result.creds_array = creds_addr; result.num_creds = 1; if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) { @@ -131,113 +135,113 @@ static grpc_call_credentials_array get_creds_array( return result; } -grpc_call_credentials *grpc_composite_call_credentials_create( - grpc_call_credentials *creds1, grpc_call_credentials *creds2, - void *reserved) { +grpc_call_credentials* grpc_composite_call_credentials_create( + grpc_call_credentials* creds1, grpc_call_credentials* creds2, + void* reserved) { size_t i; size_t creds_array_byte_size; grpc_call_credentials_array creds1_array; grpc_call_credentials_array creds2_array; - grpc_composite_call_credentials *c; + grpc_composite_call_credentials* c; GRPC_API_TRACE( "grpc_composite_call_credentials_create(creds1=%p, creds2=%p, " "reserved=%p)", 3, (creds1, creds2, reserved)); - GPR_ASSERT(reserved == NULL); - GPR_ASSERT(creds1 != NULL); - GPR_ASSERT(creds2 != NULL); - c = gpr_zalloc(sizeof(grpc_composite_call_credentials)); + GPR_ASSERT(reserved == nullptr); + GPR_ASSERT(creds1 != nullptr); + GPR_ASSERT(creds2 != nullptr); + c = static_cast( + gpr_zalloc(sizeof(grpc_composite_call_credentials))); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE; c->base.vtable = &composite_call_credentials_vtable; gpr_ref_init(&c->base.refcount, 1); creds1_array = get_creds_array(&creds1); creds2_array = get_creds_array(&creds2); c->inner.num_creds = creds1_array.num_creds + creds2_array.num_creds; - creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials *); - c->inner.creds_array = gpr_zalloc(creds_array_byte_size); + creds_array_byte_size = c->inner.num_creds * sizeof(grpc_call_credentials*); + c->inner.creds_array = + static_cast(gpr_zalloc(creds_array_byte_size)); for (i = 0; i < creds1_array.num_creds; i++) { - grpc_call_credentials *cur_creds = creds1_array.creds_array[i]; + grpc_call_credentials* cur_creds = creds1_array.creds_array[i]; c->inner.creds_array[i] = grpc_call_credentials_ref(cur_creds); } for (i = 0; i < creds2_array.num_creds; i++) { - grpc_call_credentials *cur_creds = creds2_array.creds_array[i]; + grpc_call_credentials* cur_creds = creds2_array.creds_array[i]; c->inner.creds_array[i + creds1_array.num_creds] = grpc_call_credentials_ref(cur_creds); } return &c->base; } -const grpc_call_credentials_array * -grpc_composite_call_credentials_get_credentials(grpc_call_credentials *creds) { - const grpc_composite_call_credentials *c = - (const grpc_composite_call_credentials *)creds; +const grpc_call_credentials_array* +grpc_composite_call_credentials_get_credentials(grpc_call_credentials* creds) { + const grpc_composite_call_credentials* c = + reinterpret_cast(creds); GPR_ASSERT(strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0); return &c->inner; } -grpc_call_credentials *grpc_credentials_contains_type( - grpc_call_credentials *creds, const char *type, - grpc_call_credentials **composite_creds) { +grpc_call_credentials* grpc_credentials_contains_type( + grpc_call_credentials* creds, const char* type, + grpc_call_credentials** composite_creds) { size_t i; if (strcmp(creds->type, type) == 0) { - if (composite_creds != NULL) *composite_creds = NULL; + if (composite_creds != nullptr) *composite_creds = nullptr; return creds; } else if (strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0) { - const grpc_call_credentials_array *inner_creds_array = + const grpc_call_credentials_array* inner_creds_array = grpc_composite_call_credentials_get_credentials(creds); for (i = 0; i < inner_creds_array->num_creds; i++) { if (strcmp(type, inner_creds_array->creds_array[i]->type) == 0) { - if (composite_creds != NULL) *composite_creds = creds; + if (composite_creds != nullptr) *composite_creds = creds; return inner_creds_array->creds_array[i]; } } } - return NULL; + return nullptr; } /* -- Composite channel credentials. -- */ -static void composite_channel_destruct(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { - grpc_composite_channel_credentials *c = - (grpc_composite_channel_credentials *)creds; - grpc_channel_credentials_unref(exec_ctx, c->inner_creds); - grpc_call_credentials_unref(exec_ctx, c->call_creds); +static void composite_channel_destruct(grpc_channel_credentials* creds) { + grpc_composite_channel_credentials* c = + reinterpret_cast(creds); + grpc_channel_credentials_unref(c->inner_creds); + grpc_call_credentials_unref(c->call_creds); } static grpc_security_status composite_channel_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { - grpc_composite_channel_credentials *c = - (grpc_composite_channel_credentials *)creds; + grpc_channel_credentials* creds, grpc_call_credentials* call_creds, + const char* target, const grpc_channel_args* args, + grpc_channel_security_connector** sc, grpc_channel_args** new_args) { + grpc_composite_channel_credentials* c = + reinterpret_cast(creds); grpc_security_status status = GRPC_SECURITY_ERROR; - GPR_ASSERT(c->inner_creds != NULL && c->call_creds != NULL && - c->inner_creds->vtable != NULL && - c->inner_creds->vtable->create_security_connector != NULL); + GPR_ASSERT(c->inner_creds != nullptr && c->call_creds != nullptr && + c->inner_creds->vtable != nullptr && + c->inner_creds->vtable->create_security_connector != nullptr); /* If we are passed a call_creds, create a call composite to pass it downstream. */ - if (call_creds != NULL) { - grpc_call_credentials *composite_call_creds = - grpc_composite_call_credentials_create(c->call_creds, call_creds, NULL); + if (call_creds != nullptr) { + grpc_call_credentials* composite_call_creds = + grpc_composite_call_credentials_create(c->call_creds, call_creds, + nullptr); status = c->inner_creds->vtable->create_security_connector( - exec_ctx, c->inner_creds, composite_call_creds, target, args, sc, - new_args); - grpc_call_credentials_unref(exec_ctx, composite_call_creds); + c->inner_creds, composite_call_creds, target, args, sc, new_args); + grpc_call_credentials_unref(composite_call_creds); } else { status = c->inner_creds->vtable->create_security_connector( - exec_ctx, c->inner_creds, c->call_creds, target, args, sc, new_args); + c->inner_creds, c->call_creds, target, args, sc, new_args); } return status; } -static grpc_channel_credentials * +static grpc_channel_credentials* composite_channel_duplicate_without_call_credentials( - grpc_channel_credentials *creds) { - grpc_composite_channel_credentials *c = - (grpc_composite_channel_credentials *)creds; + grpc_channel_credentials* creds) { + grpc_composite_channel_credentials* c = + reinterpret_cast(creds); return grpc_channel_credentials_ref(c->inner_creds); } @@ -245,11 +249,13 @@ static grpc_channel_credentials_vtable composite_channel_credentials_vtable = { composite_channel_destruct, composite_channel_create_security_connector, composite_channel_duplicate_without_call_credentials}; -grpc_channel_credentials *grpc_composite_channel_credentials_create( - grpc_channel_credentials *channel_creds, grpc_call_credentials *call_creds, - void *reserved) { - grpc_composite_channel_credentials *c = gpr_zalloc(sizeof(*c)); - GPR_ASSERT(channel_creds != NULL && call_creds != NULL && reserved == NULL); +grpc_channel_credentials* grpc_composite_channel_credentials_create( + grpc_channel_credentials* channel_creds, grpc_call_credentials* call_creds, + void* reserved) { + grpc_composite_channel_credentials* c = + static_cast(gpr_zalloc(sizeof(*c))); + GPR_ASSERT(channel_creds != nullptr && call_creds != nullptr && + reserved == nullptr); GRPC_API_TRACE( "grpc_composite_channel_credentials_create(channel_creds=%p, " "call_creds=%p, reserved=%p)", diff --git a/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.h index 3076afcb7..a952ad57f 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/composite/composite_credentials.h @@ -19,31 +19,33 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" typedef struct { - grpc_call_credentials **creds_array; + grpc_call_credentials** creds_array; size_t num_creds; } grpc_call_credentials_array; -const grpc_call_credentials_array * +const grpc_call_credentials_array* grpc_composite_call_credentials_get_credentials( - grpc_call_credentials *composite_creds); + grpc_call_credentials* composite_creds); /* Returns creds if creds is of the specified type or the inner creds of the specified type (if found), if the creds is of type COMPOSITE. If composite_creds is not NULL, *composite_creds will point to creds if of type COMPOSITE in case of success. */ -grpc_call_credentials *grpc_credentials_contains_type( - grpc_call_credentials *creds, const char *type, - grpc_call_credentials **composite_creds); +grpc_call_credentials* grpc_credentials_contains_type( + grpc_call_credentials* creds, const char* type, + grpc_call_credentials** composite_creds); /* -- Composite channel credentials. -- */ typedef struct { grpc_channel_credentials base; - grpc_channel_credentials *inner_creds; - grpc_call_credentials *call_creds; + grpc_channel_credentials* inner_creds; + grpc_call_credentials* call_creds; } grpc_composite_channel_credentials; /* -- Composite call credentials. -- */ @@ -54,4 +56,4 @@ typedef struct { } grpc_composite_call_credentials; #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/credentials.c deleted file mode 100644 index 8a67c9865..000000000 --- a/Sources/CgRPC/src/core/lib/security/credentials/credentials.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/security/credentials/credentials.h" - -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/http/httpcli.h" -#include "src/core/lib/http/parser.h" -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/json/json.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/api_trace.h" - -#include -#include -#include -#include -#include - -/* -- Common. -- */ - -grpc_credentials_metadata_request *grpc_credentials_metadata_request_create( - grpc_call_credentials *creds) { - grpc_credentials_metadata_request *r = - gpr_zalloc(sizeof(grpc_credentials_metadata_request)); - r->creds = grpc_call_credentials_ref(creds); - return r; -} - -void grpc_credentials_metadata_request_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r) { - grpc_call_credentials_unref(exec_ctx, r->creds); - grpc_http_response_destroy(&r->response); - gpr_free(r); -} - -grpc_channel_credentials *grpc_channel_credentials_ref( - grpc_channel_credentials *creds) { - if (creds == NULL) return NULL; - gpr_ref(&creds->refcount); - return creds; -} - -void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { - if (creds == NULL) return; - if (gpr_unref(&creds->refcount)) { - if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); - } - gpr_free(creds); - } -} - -void grpc_channel_credentials_release(grpc_channel_credentials *creds) { - GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_channel_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); -} - -grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds) { - if (creds == NULL) return NULL; - gpr_ref(&creds->refcount); - return creds; -} - -void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - if (creds == NULL) return; - if (gpr_unref(&creds->refcount)) { - if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); - } - gpr_free(creds); - } -} - -void grpc_call_credentials_release(grpc_call_credentials *creds) { - GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_call_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); -} - -bool grpc_call_credentials_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { - if (creds == NULL || creds->vtable->get_request_metadata == NULL) { - return true; - } - return creds->vtable->get_request_metadata( - exec_ctx, creds, pollent, context, md_array, on_request_metadata, error); -} - -void grpc_call_credentials_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { - if (creds == NULL || creds->vtable->cancel_get_request_metadata == NULL) { - return; - } - creds->vtable->cancel_get_request_metadata(exec_ctx, creds, md_array, error); -} - -grpc_security_status grpc_channel_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *channel_creds, - const char *target, const grpc_channel_args *args, - grpc_channel_security_connector **sc, grpc_channel_args **new_args) { - *new_args = NULL; - if (channel_creds == NULL) { - return GRPC_SECURITY_ERROR; - } - GPR_ASSERT(channel_creds->vtable->create_security_connector != NULL); - return channel_creds->vtable->create_security_connector( - exec_ctx, channel_creds, NULL, target, args, sc, new_args); -} - -grpc_channel_credentials * -grpc_channel_credentials_duplicate_without_call_credentials( - grpc_channel_credentials *channel_creds) { - if (channel_creds != NULL && channel_creds->vtable != NULL && - channel_creds->vtable->duplicate_without_call_credentials != NULL) { - return channel_creds->vtable->duplicate_without_call_credentials( - channel_creds); - } else { - return grpc_channel_credentials_ref(channel_creds); - } -} - -static void credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_channel_credentials_unref(exec_ctx, p); -} - -static void *credentials_pointer_arg_copy(void *p) { - return grpc_channel_credentials_ref(p); -} - -static int credentials_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); } - -static const grpc_arg_pointer_vtable credentials_pointer_vtable = { - credentials_pointer_arg_copy, credentials_pointer_arg_destroy, - credentials_pointer_cmp}; - -grpc_arg grpc_channel_credentials_to_arg( - grpc_channel_credentials *credentials) { - return grpc_channel_arg_pointer_create( - GRPC_ARG_CHANNEL_CREDENTIALS, credentials, &credentials_pointer_vtable); -} - -grpc_channel_credentials *grpc_channel_credentials_from_arg( - const grpc_arg *arg) { - if (strcmp(arg->key, GRPC_ARG_CHANNEL_CREDENTIALS)) return NULL; - if (arg->type != GRPC_ARG_POINTER) { - gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, - GRPC_ARG_CHANNEL_CREDENTIALS); - return NULL; - } - return arg->value.pointer.p; -} - -grpc_channel_credentials *grpc_channel_credentials_find_in_args( - const grpc_channel_args *args) { - size_t i; - if (args == NULL) return NULL; - for (i = 0; i < args->num_args; i++) { - grpc_channel_credentials *credentials = - grpc_channel_credentials_from_arg(&args->args[i]); - if (credentials != NULL) return credentials; - } - return NULL; -} - -grpc_server_credentials *grpc_server_credentials_ref( - grpc_server_credentials *creds) { - if (creds == NULL) return NULL; - gpr_ref(&creds->refcount); - return creds; -} - -void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds) { - if (creds == NULL) return; - if (gpr_unref(&creds->refcount)) { - if (creds->vtable->destruct != NULL) { - creds->vtable->destruct(exec_ctx, creds); - } - if (creds->processor.destroy != NULL && creds->processor.state != NULL) { - creds->processor.destroy(creds->processor.state); - } - gpr_free(creds); - } -} - -void grpc_server_credentials_release(grpc_server_credentials *creds) { - GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_server_credentials_unref(&exec_ctx, creds); - grpc_exec_ctx_finish(&exec_ctx); -} - -grpc_security_status grpc_server_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc) { - if (creds == NULL || creds->vtable->create_security_connector == NULL) { - gpr_log(GPR_ERROR, "Server credentials cannot create security context."); - return GRPC_SECURITY_ERROR; - } - return creds->vtable->create_security_connector(exec_ctx, creds, sc); -} - -void grpc_server_credentials_set_auth_metadata_processor( - grpc_server_credentials *creds, grpc_auth_metadata_processor processor) { - GRPC_API_TRACE( - "grpc_server_credentials_set_auth_metadata_processor(" - "creds=%p, " - "processor=grpc_auth_metadata_processor { process: %p, state: %p })", - 3, (creds, (void *)(intptr_t)processor.process, processor.state)); - if (creds == NULL) return; - if (creds->processor.destroy != NULL && creds->processor.state != NULL) { - creds->processor.destroy(creds->processor.state); - } - creds->processor = processor; -} - -static void server_credentials_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, - void *p) { - grpc_server_credentials_unref(exec_ctx, p); -} - -static void *server_credentials_pointer_arg_copy(void *p) { - return grpc_server_credentials_ref(p); -} - -static int server_credentials_pointer_cmp(void *a, void *b) { - return GPR_ICMP(a, b); -} - -static const grpc_arg_pointer_vtable cred_ptr_vtable = { - server_credentials_pointer_arg_copy, server_credentials_pointer_arg_destroy, - server_credentials_pointer_cmp}; - -grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *p) { - return grpc_channel_arg_pointer_create(GRPC_SERVER_CREDENTIALS_ARG, p, - &cred_ptr_vtable); -} - -grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg) { - if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return NULL; - if (arg->type != GRPC_ARG_POINTER) { - gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, - GRPC_SERVER_CREDENTIALS_ARG); - return NULL; - } - return arg->value.pointer.p; -} - -grpc_server_credentials *grpc_find_server_credentials_in_args( - const grpc_channel_args *args) { - size_t i; - if (args == NULL) return NULL; - for (i = 0; i < args->num_args; i++) { - grpc_server_credentials *p = - grpc_server_credentials_from_arg(&args->args[i]); - if (p != NULL) return p; - } - return NULL; -} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/credentials.cc b/Sources/CgRPC/src/core/lib/security/credentials/credentials.cc new file mode 100644 index 000000000..c43cb440e --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/credentials.cc @@ -0,0 +1,286 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/credentials.h" + +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/http/httpcli.h" +#include "src/core/lib/http/parser.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/json/json.h" +#include "src/core/lib/surface/api_trace.h" + +#include +#include +#include +#include +#include + +/* -- Common. -- */ + +grpc_credentials_metadata_request* grpc_credentials_metadata_request_create( + grpc_call_credentials* creds) { + grpc_credentials_metadata_request* r = + static_cast( + gpr_zalloc(sizeof(grpc_credentials_metadata_request))); + r->creds = grpc_call_credentials_ref(creds); + return r; +} + +void grpc_credentials_metadata_request_destroy( + grpc_credentials_metadata_request* r) { + grpc_call_credentials_unref(r->creds); + grpc_http_response_destroy(&r->response); + gpr_free(r); +} + +grpc_channel_credentials* grpc_channel_credentials_ref( + grpc_channel_credentials* creds) { + if (creds == nullptr) return nullptr; + gpr_ref(&creds->refcount); + return creds; +} + +void grpc_channel_credentials_unref(grpc_channel_credentials* creds) { + if (creds == nullptr) return; + if (gpr_unref(&creds->refcount)) { + if (creds->vtable->destruct != nullptr) { + creds->vtable->destruct(creds); + } + gpr_free(creds); + } +} + +void grpc_channel_credentials_release(grpc_channel_credentials* creds) { + GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds)); + grpc_core::ExecCtx exec_ctx; + grpc_channel_credentials_unref(creds); +} + +grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds) { + if (creds == nullptr) return nullptr; + gpr_ref(&creds->refcount); + return creds; +} + +void grpc_call_credentials_unref(grpc_call_credentials* creds) { + if (creds == nullptr) return; + if (gpr_unref(&creds->refcount)) { + if (creds->vtable->destruct != nullptr) { + creds->vtable->destruct(creds); + } + gpr_free(creds); + } +} + +void grpc_call_credentials_release(grpc_call_credentials* creds) { + GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds)); + grpc_core::ExecCtx exec_ctx; + grpc_call_credentials_unref(creds); +} + +bool grpc_call_credentials_get_request_metadata( + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, grpc_error** error) { + if (creds == nullptr || creds->vtable->get_request_metadata == nullptr) { + return true; + } + return creds->vtable->get_request_metadata(creds, pollent, context, md_array, + on_request_metadata, error); +} + +void grpc_call_credentials_cancel_get_request_metadata( + grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { + if (creds == nullptr || + creds->vtable->cancel_get_request_metadata == nullptr) { + return; + } + creds->vtable->cancel_get_request_metadata(creds, md_array, error); +} + +grpc_security_status grpc_channel_credentials_create_security_connector( + grpc_channel_credentials* channel_creds, const char* target, + const grpc_channel_args* args, grpc_channel_security_connector** sc, + grpc_channel_args** new_args) { + *new_args = nullptr; + if (channel_creds == nullptr) { + return GRPC_SECURITY_ERROR; + } + GPR_ASSERT(channel_creds->vtable->create_security_connector != nullptr); + return channel_creds->vtable->create_security_connector( + channel_creds, nullptr, target, args, sc, new_args); +} + +grpc_channel_credentials* +grpc_channel_credentials_duplicate_without_call_credentials( + grpc_channel_credentials* channel_creds) { + if (channel_creds != nullptr && channel_creds->vtable != nullptr && + channel_creds->vtable->duplicate_without_call_credentials != nullptr) { + return channel_creds->vtable->duplicate_without_call_credentials( + channel_creds); + } else { + return grpc_channel_credentials_ref(channel_creds); + } +} + +static void credentials_pointer_arg_destroy(void* p) { + grpc_channel_credentials_unref(static_cast(p)); +} + +static void* credentials_pointer_arg_copy(void* p) { + return grpc_channel_credentials_ref( + static_cast(p)); +} + +static int credentials_pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); } + +static const grpc_arg_pointer_vtable credentials_pointer_vtable = { + credentials_pointer_arg_copy, credentials_pointer_arg_destroy, + credentials_pointer_cmp}; + +grpc_arg grpc_channel_credentials_to_arg( + grpc_channel_credentials* credentials) { + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CHANNEL_CREDENTIALS, + credentials, + &credentials_pointer_vtable); +} + +grpc_channel_credentials* grpc_channel_credentials_from_arg( + const grpc_arg* arg) { + if (strcmp(arg->key, GRPC_ARG_CHANNEL_CREDENTIALS)) return nullptr; + if (arg->type != GRPC_ARG_POINTER) { + gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, + GRPC_ARG_CHANNEL_CREDENTIALS); + return nullptr; + } + return static_cast(arg->value.pointer.p); +} + +grpc_channel_credentials* grpc_channel_credentials_find_in_args( + const grpc_channel_args* args) { + size_t i; + if (args == nullptr) return nullptr; + for (i = 0; i < args->num_args; i++) { + grpc_channel_credentials* credentials = + grpc_channel_credentials_from_arg(&args->args[i]); + if (credentials != nullptr) return credentials; + } + return nullptr; +} + +grpc_server_credentials* grpc_server_credentials_ref( + grpc_server_credentials* creds) { + if (creds == nullptr) return nullptr; + gpr_ref(&creds->refcount); + return creds; +} + +void grpc_server_credentials_unref(grpc_server_credentials* creds) { + if (creds == nullptr) return; + if (gpr_unref(&creds->refcount)) { + if (creds->vtable->destruct != nullptr) { + creds->vtable->destruct(creds); + } + if (creds->processor.destroy != nullptr && + creds->processor.state != nullptr) { + creds->processor.destroy(creds->processor.state); + } + gpr_free(creds); + } +} + +void grpc_server_credentials_release(grpc_server_credentials* creds) { + GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds)); + grpc_core::ExecCtx exec_ctx; + grpc_server_credentials_unref(creds); +} + +grpc_security_status grpc_server_credentials_create_security_connector( + grpc_server_credentials* creds, grpc_server_security_connector** sc) { + if (creds == nullptr || creds->vtable->create_security_connector == nullptr) { + gpr_log(GPR_ERROR, "Server credentials cannot create security context."); + return GRPC_SECURITY_ERROR; + } + return creds->vtable->create_security_connector(creds, sc); +} + +void grpc_server_credentials_set_auth_metadata_processor( + grpc_server_credentials* creds, grpc_auth_metadata_processor processor) { + GRPC_API_TRACE( + "grpc_server_credentials_set_auth_metadata_processor(" + "creds=%p, " + "processor=grpc_auth_metadata_processor { process: %p, state: %p })", + 3, (creds, (void*)(intptr_t)processor.process, processor.state)); + if (creds == nullptr) return; + if (creds->processor.destroy != nullptr && + creds->processor.state != nullptr) { + creds->processor.destroy(creds->processor.state); + } + creds->processor = processor; +} + +static void server_credentials_pointer_arg_destroy(void* p) { + grpc_server_credentials_unref(static_cast(p)); +} + +static void* server_credentials_pointer_arg_copy(void* p) { + return grpc_server_credentials_ref(static_cast(p)); +} + +static int server_credentials_pointer_cmp(void* a, void* b) { + return GPR_ICMP(a, b); +} + +static const grpc_arg_pointer_vtable cred_ptr_vtable = { + server_credentials_pointer_arg_copy, server_credentials_pointer_arg_destroy, + server_credentials_pointer_cmp}; + +grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials* p) { + return grpc_channel_arg_pointer_create((char*)GRPC_SERVER_CREDENTIALS_ARG, p, + &cred_ptr_vtable); +} + +grpc_server_credentials* grpc_server_credentials_from_arg(const grpc_arg* arg) { + if (strcmp(arg->key, GRPC_SERVER_CREDENTIALS_ARG) != 0) return nullptr; + if (arg->type != GRPC_ARG_POINTER) { + gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, + GRPC_SERVER_CREDENTIALS_ARG); + return nullptr; + } + return static_cast(arg->value.pointer.p); +} + +grpc_server_credentials* grpc_find_server_credentials_in_args( + const grpc_channel_args* args) { + size_t i; + if (args == nullptr) return nullptr; + for (i = 0; i < args->num_args; i++) { + grpc_server_credentials* p = + grpc_server_credentials_from_arg(&args->args[i]); + if (p != nullptr) return p; + } + return nullptr; +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/credentials.h index 04a54b0ca..b1421e83c 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/credentials.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_CREDENTIALS_H +#include + #include #include #include @@ -27,7 +29,7 @@ #include "src/core/lib/http/httpcli.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/polling_entity.h" -#include "src/core/lib/security/transport/security_connector.h" +#include "src/core/lib/security/security_connector/security_connector.h" struct grpc_http_response; @@ -73,13 +75,13 @@ typedef enum { /* --- Google utils --- */ /* It is the caller's responsibility to gpr_free the result if not NULL. */ -char *grpc_get_well_known_google_credentials_file_path(void); +char* grpc_get_well_known_google_credentials_file_path(void); /* Implementation function for the different platforms. */ -char *grpc_get_well_known_google_credentials_file_path_impl(void); +char* grpc_get_well_known_google_credentials_file_path_impl(void); /* Override for testing only. Not thread-safe */ -typedef char *(*grpc_well_known_credentials_path_getter)(void); +typedef char* (*grpc_well_known_credentials_path_getter)(void); void grpc_override_well_known_credentials_path_getter( grpc_well_known_credentials_path_getter getter); @@ -88,168 +90,157 @@ void grpc_override_well_known_credentials_path_getter( #define GRPC_ARG_CHANNEL_CREDENTIALS "grpc.channel_credentials" typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c); + void (*destruct)(grpc_channel_credentials* c); grpc_security_status (*create_security_connector)( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args); + grpc_channel_credentials* c, grpc_call_credentials* call_creds, + const char* target, const grpc_channel_args* args, + grpc_channel_security_connector** sc, grpc_channel_args** new_args); - grpc_channel_credentials *(*duplicate_without_call_credentials)( - grpc_channel_credentials *c); + grpc_channel_credentials* (*duplicate_without_call_credentials)( + grpc_channel_credentials* c); } grpc_channel_credentials_vtable; struct grpc_channel_credentials { - const grpc_channel_credentials_vtable *vtable; - const char *type; + const grpc_channel_credentials_vtable* vtable; + const char* type; gpr_refcount refcount; }; -grpc_channel_credentials *grpc_channel_credentials_ref( - grpc_channel_credentials *creds); -void grpc_channel_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds); +grpc_channel_credentials* grpc_channel_credentials_ref( + grpc_channel_credentials* creds); +void grpc_channel_credentials_unref(grpc_channel_credentials* creds); /* Creates a security connector for the channel. May also create new channel args for the channel to be used in place of the passed in const args if returned non NULL. In that case the caller is responsible for destroying new_args after channel creation. */ grpc_security_status grpc_channel_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - const char *target, const grpc_channel_args *args, - grpc_channel_security_connector **sc, grpc_channel_args **new_args); + grpc_channel_credentials* creds, const char* target, + const grpc_channel_args* args, grpc_channel_security_connector** sc, + grpc_channel_args** new_args); /* Creates a version of the channel credentials without any attached call credentials. This can be used in order to open a channel to a non-trusted gRPC load balancer. */ -grpc_channel_credentials * +grpc_channel_credentials* grpc_channel_credentials_duplicate_without_call_credentials( - grpc_channel_credentials *creds); + grpc_channel_credentials* creds); /* Util to encapsulate the channel credentials in a channel arg. */ -grpc_arg grpc_channel_credentials_to_arg(grpc_channel_credentials *credentials); +grpc_arg grpc_channel_credentials_to_arg(grpc_channel_credentials* credentials); /* Util to get the channel credentials from a channel arg. */ -grpc_channel_credentials *grpc_channel_credentials_from_arg( - const grpc_arg *arg); +grpc_channel_credentials* grpc_channel_credentials_from_arg( + const grpc_arg* arg); /* Util to find the channel credentials from channel args. */ -grpc_channel_credentials *grpc_channel_credentials_find_in_args( - const grpc_channel_args *args); +grpc_channel_credentials* grpc_channel_credentials_find_in_args( + const grpc_channel_args* args); /* --- grpc_credentials_mdelem_array. --- */ typedef struct { - grpc_mdelem *md; + grpc_mdelem* md; size_t size; } grpc_credentials_mdelem_array; /// Takes a new ref to \a md. -void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list, +void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array* list, grpc_mdelem md); /// Appends all elements from \a src to \a dst, taking a new ref to each one. -void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst, - grpc_credentials_mdelem_array *src); +void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array* dst, + grpc_credentials_mdelem_array* src); -void grpc_credentials_mdelem_array_destroy(grpc_exec_ctx *exec_ctx, - grpc_credentials_mdelem_array *list); +void grpc_credentials_mdelem_array_destroy(grpc_credentials_mdelem_array* list); /* --- grpc_call_credentials. --- */ typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_call_credentials *c); - bool (*get_request_metadata)(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *c, - grpc_polling_entity *pollent, + void (*destruct)(grpc_call_credentials* c); + bool (*get_request_metadata)(grpc_call_credentials* c, + grpc_polling_entity* pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, - grpc_closure *on_request_metadata, - grpc_error **error); - void (*cancel_get_request_metadata)(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, - grpc_error *error); + grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, + grpc_error** error); + void (*cancel_get_request_metadata)(grpc_call_credentials* c, + grpc_credentials_mdelem_array* md_array, + grpc_error* error); } grpc_call_credentials_vtable; struct grpc_call_credentials { - const grpc_call_credentials_vtable *vtable; - const char *type; + const grpc_call_credentials_vtable* vtable; + const char* type; gpr_refcount refcount; }; -grpc_call_credentials *grpc_call_credentials_ref(grpc_call_credentials *creds); -void grpc_call_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds); +grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds); +void grpc_call_credentials_unref(grpc_call_credentials* creds); /// Returns true if completed synchronously, in which case \a error will /// be set to indicate the result. Otherwise, \a on_request_metadata will /// be invoked asynchronously when complete. \a md_array will be populated /// with the resulting metadata once complete. bool grpc_call_credentials_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error); + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, grpc_error** error); /// Cancels a pending asynchronous operation started by /// grpc_call_credentials_get_request_metadata() with the corresponding /// value of \a md_array. void grpc_call_credentials_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error); + grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array, + grpc_error* error); /* Metadata-only credentials with the specified key and value where asynchronicity can be simulated for testing. */ -grpc_call_credentials *grpc_md_only_test_credentials_create( - grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value, - bool is_async); +grpc_call_credentials* grpc_md_only_test_credentials_create( + const char* md_key, const char* md_value, bool is_async); /* --- grpc_server_credentials. --- */ typedef struct { - void (*destruct)(grpc_exec_ctx *exec_ctx, grpc_server_credentials *c); + void (*destruct)(grpc_server_credentials* c); grpc_security_status (*create_security_connector)( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *c, - grpc_server_security_connector **sc); + grpc_server_credentials* c, grpc_server_security_connector** sc); } grpc_server_credentials_vtable; struct grpc_server_credentials { - const grpc_server_credentials_vtable *vtable; - const char *type; + const grpc_server_credentials_vtable* vtable; + const char* type; gpr_refcount refcount; grpc_auth_metadata_processor processor; }; grpc_security_status grpc_server_credentials_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc); + grpc_server_credentials* creds, grpc_server_security_connector** sc); -grpc_server_credentials *grpc_server_credentials_ref( - grpc_server_credentials *creds); +grpc_server_credentials* grpc_server_credentials_ref( + grpc_server_credentials* creds); -void grpc_server_credentials_unref(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds); +void grpc_server_credentials_unref(grpc_server_credentials* creds); #define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials" -grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials *c); -grpc_server_credentials *grpc_server_credentials_from_arg(const grpc_arg *arg); -grpc_server_credentials *grpc_find_server_credentials_in_args( - const grpc_channel_args *args); +grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials* c); +grpc_server_credentials* grpc_server_credentials_from_arg(const grpc_arg* arg); +grpc_server_credentials* grpc_find_server_credentials_in_args( + const grpc_channel_args* args); /* -- Credentials Metadata Request. -- */ typedef struct { - grpc_call_credentials *creds; + grpc_call_credentials* creds; grpc_http_response response; } grpc_credentials_metadata_request; -grpc_credentials_metadata_request *grpc_credentials_metadata_request_create( - grpc_call_credentials *creds); +grpc_credentials_metadata_request* grpc_credentials_metadata_request_create( + grpc_call_credentials* creds); void grpc_credentials_metadata_request_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *r); + grpc_credentials_metadata_request* r); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.c b/Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.cc similarity index 86% rename from Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.c rename to Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.cc index ccd39e610..703de4aaa 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/credentials_metadata.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/credentials.h" #include @@ -24,7 +26,7 @@ #include "src/core/lib/slice/slice_internal.h" -static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array *list, +static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array* list, size_t additional_space_needed) { size_t target_size = list->size + additional_space_needed; // Find the next power of two greater than the target size (i.e., @@ -33,17 +35,18 @@ static void mdelem_list_ensure_capacity(grpc_credentials_mdelem_array *list, while (new_size < target_size) { new_size *= 2; } - list->md = gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size); + list->md = static_cast( + gpr_realloc(list->md, sizeof(grpc_mdelem) * new_size)); } -void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array *list, +void grpc_credentials_mdelem_array_add(grpc_credentials_mdelem_array* list, grpc_mdelem md) { mdelem_list_ensure_capacity(list, 1); list->md[list->size++] = GRPC_MDELEM_REF(md); } -void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst, - grpc_credentials_mdelem_array *src) { +void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array* dst, + grpc_credentials_mdelem_array* src) { mdelem_list_ensure_capacity(dst, src->size); for (size_t i = 0; i < src->size; ++i) { dst->md[dst->size++] = GRPC_MDELEM_REF(src->md[i]); @@ -51,9 +54,9 @@ void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array *dst, } void grpc_credentials_mdelem_array_destroy( - grpc_exec_ctx *exec_ctx, grpc_credentials_mdelem_array *list) { + grpc_credentials_mdelem_array* list) { for (size_t i = 0; i < list->size; ++i) { - GRPC_MDELEM_UNREF(exec_ctx, list->md[i]); + GRPC_MDELEM_UNREF(list->md[i]); } gpr_free(list->md); } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.c deleted file mode 100644 index ac9017850..000000000 --- a/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/security/credentials/fake/fake_credentials.h" - -#include - -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/support/string.h" - -/* -- Fake transport security credentials. -- */ - -#define GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS \ - "grpc.fake_security.expected_targets" - -static grpc_security_status fake_transport_security_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *c, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { - *sc = grpc_fake_channel_security_connector_create(call_creds, target, args); - return GRPC_SECURITY_OK; -} - -static grpc_security_status -fake_transport_security_server_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *c, - grpc_server_security_connector **sc) { - *sc = grpc_fake_server_security_connector_create(); - return GRPC_SECURITY_OK; -} - -static grpc_channel_credentials_vtable - fake_transport_security_credentials_vtable = { - NULL, fake_transport_security_create_security_connector, NULL}; - -static grpc_server_credentials_vtable - fake_transport_security_server_credentials_vtable = { - NULL, fake_transport_security_server_create_security_connector}; - -grpc_channel_credentials *grpc_fake_transport_security_credentials_create( - void) { - grpc_channel_credentials *c = gpr_zalloc(sizeof(grpc_channel_credentials)); - c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY; - c->vtable = &fake_transport_security_credentials_vtable; - gpr_ref_init(&c->refcount, 1); - return c; -} - -grpc_server_credentials *grpc_fake_transport_security_server_credentials_create( - void) { - grpc_server_credentials *c = gpr_malloc(sizeof(grpc_server_credentials)); - memset(c, 0, sizeof(grpc_server_credentials)); - c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY; - gpr_ref_init(&c->refcount, 1); - c->vtable = &fake_transport_security_server_credentials_vtable; - return c; -} - -grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets) { - return grpc_channel_arg_string_create(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, - expected_targets); -} - -const char *grpc_fake_transport_get_expected_targets( - const grpc_channel_args *args) { - const grpc_arg *expected_target_arg = - grpc_channel_args_find(args, GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS); - if (expected_target_arg != NULL && - expected_target_arg->type == GRPC_ARG_STRING) { - return expected_target_arg->value.string; - } - return NULL; -} - -/* -- Metadata-only test credentials. -- */ - -static void md_only_test_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->md); -} - -static bool md_only_test_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds; - grpc_credentials_mdelem_array_add(md_array, c->md); - if (c->is_async) { - GRPC_CLOSURE_SCHED(exec_ctx, on_request_metadata, GRPC_ERROR_NONE); - return false; - } - return true; -} - -static void md_only_test_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { - GRPC_ERROR_UNREF(error); -} - -static grpc_call_credentials_vtable md_only_test_vtable = { - md_only_test_destruct, md_only_test_get_request_metadata, - md_only_test_cancel_get_request_metadata}; - -grpc_call_credentials *grpc_md_only_test_credentials_create( - grpc_exec_ctx *exec_ctx, const char *md_key, const char *md_value, - bool is_async) { - grpc_md_only_test_credentials *c = - gpr_zalloc(sizeof(grpc_md_only_test_credentials)); - c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; - c->base.vtable = &md_only_test_vtable; - gpr_ref_init(&c->base.refcount, 1); - c->md = - grpc_mdelem_from_slices(exec_ctx, grpc_slice_from_copied_string(md_key), - grpc_slice_from_copied_string(md_value)); - c->is_async = is_async; - return &c->base; -} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.cc b/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.cc new file mode 100644 index 000000000..858ab6b41 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.cc @@ -0,0 +1,136 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/fake/fake_credentials.h" + +#include + +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/executor.h" + +/* -- Fake transport security credentials. -- */ + +static grpc_security_status fake_transport_security_create_security_connector( + grpc_channel_credentials* c, grpc_call_credentials* call_creds, + const char* target, const grpc_channel_args* args, + grpc_channel_security_connector** sc, grpc_channel_args** new_args) { + *sc = + grpc_fake_channel_security_connector_create(c, call_creds, target, args); + return GRPC_SECURITY_OK; +} + +static grpc_security_status +fake_transport_security_server_create_security_connector( + grpc_server_credentials* c, grpc_server_security_connector** sc) { + *sc = grpc_fake_server_security_connector_create(c); + return GRPC_SECURITY_OK; +} + +static grpc_channel_credentials_vtable + fake_transport_security_credentials_vtable = { + nullptr, fake_transport_security_create_security_connector, nullptr}; + +static grpc_server_credentials_vtable + fake_transport_security_server_credentials_vtable = { + nullptr, fake_transport_security_server_create_security_connector}; + +grpc_channel_credentials* grpc_fake_transport_security_credentials_create( + void) { + grpc_channel_credentials* c = static_cast( + gpr_zalloc(sizeof(grpc_channel_credentials))); + c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY; + c->vtable = &fake_transport_security_credentials_vtable; + gpr_ref_init(&c->refcount, 1); + return c; +} + +grpc_server_credentials* grpc_fake_transport_security_server_credentials_create( + void) { + grpc_server_credentials* c = static_cast( + gpr_malloc(sizeof(grpc_server_credentials))); + memset(c, 0, sizeof(grpc_server_credentials)); + c->type = GRPC_CHANNEL_CREDENTIALS_TYPE_FAKE_TRANSPORT_SECURITY; + gpr_ref_init(&c->refcount, 1); + c->vtable = &fake_transport_security_server_credentials_vtable; + return c; +} + +grpc_arg grpc_fake_transport_expected_targets_arg(char* expected_targets) { + return grpc_channel_arg_string_create( + (char*)GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets); +} + +const char* grpc_fake_transport_get_expected_targets( + const grpc_channel_args* args) { + const grpc_arg* expected_target_arg = + grpc_channel_args_find(args, GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS); + return grpc_channel_arg_get_string(expected_target_arg); +} + +/* -- Metadata-only test credentials. -- */ + +static void md_only_test_destruct(grpc_call_credentials* creds) { + grpc_md_only_test_credentials* c = + reinterpret_cast(creds); + GRPC_MDELEM_UNREF(c->md); +} + +static bool md_only_test_get_request_metadata( + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, grpc_error** error) { + grpc_md_only_test_credentials* c = + reinterpret_cast(creds); + grpc_credentials_mdelem_array_add(md_array, c->md); + if (c->is_async) { + GRPC_CLOSURE_SCHED(on_request_metadata, GRPC_ERROR_NONE); + return false; + } + return true; +} + +static void md_only_test_cancel_get_request_metadata( + grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { + GRPC_ERROR_UNREF(error); +} + +static grpc_call_credentials_vtable md_only_test_vtable = { + md_only_test_destruct, md_only_test_get_request_metadata, + md_only_test_cancel_get_request_metadata}; + +grpc_call_credentials* grpc_md_only_test_credentials_create( + const char* md_key, const char* md_value, bool is_async) { + grpc_md_only_test_credentials* c = + static_cast( + gpr_zalloc(sizeof(grpc_md_only_test_credentials))); + c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; + c->base.vtable = &md_only_test_vtable; + gpr_ref_init(&c->base.refcount, 1); + c->md = grpc_mdelem_from_slices(grpc_slice_from_copied_string(md_key), + grpc_slice_from_copied_string(md_value)); + c->is_async = is_async; + return &c->base; +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.h index aa0f3b6e2..e89e6e24c 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/fake/fake_credentials.h @@ -19,15 +19,20 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" +#define GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS \ + "grpc.fake_security.expected_targets" + /* -- Fake transport security credentials. -- */ /* Creates a fake transport security credentials object for testing. */ -grpc_channel_credentials *grpc_fake_transport_security_credentials_create(void); +grpc_channel_credentials* grpc_fake_transport_security_credentials_create(void); /* Creates a fake server transport security credentials object for testing. */ -grpc_server_credentials *grpc_fake_transport_security_server_credentials_create( +grpc_server_credentials* grpc_fake_transport_security_server_credentials_create( void); /* Used to verify the target names given to the fake transport security @@ -42,11 +47,11 @@ grpc_server_credentials *grpc_fake_transport_security_server_credentials_create( * That is to say, LB channels have a heading list of LB targets separated from * the list of backend targets by a semicolon. For non-LB channels, only the * latter is present. */ -grpc_arg grpc_fake_transport_expected_targets_arg(char *expected_targets); +grpc_arg grpc_fake_transport_expected_targets_arg(char* expected_targets); /* Return the value associated with the expected targets channel arg or NULL */ -const char *grpc_fake_transport_get_expected_targets( - const grpc_channel_args *args); +const char* grpc_fake_transport_get_expected_targets( + const grpc_channel_args* args); /* -- Metadata-only Test credentials. -- */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.c b/Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.cc similarity index 76% rename from Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.c rename to Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.cc index 4f79718f3..10ff0f620 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/google_default/credentials_generic.cc @@ -16,22 +16,24 @@ * */ +#include + #include "src/core/lib/security/credentials/google_default/google_default_credentials.h" #include #include #include -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" -char *grpc_get_well_known_google_credentials_file_path_impl(void) { - char *result = NULL; - char *base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR); - if (base == NULL) { - gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_ENV_VAR +char* grpc_get_well_known_google_credentials_file_path_impl(void) { + char* result = nullptr; + char* base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR); + if (base == nullptr) { + gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR " environment variable."); - return NULL; + return nullptr; } gpr_asprintf(&result, "%s/%s", base, GRPC_GOOGLE_CREDENTIALS_PATH_SUFFIX); gpr_free(base); diff --git a/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.cc similarity index 66% rename from Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.cc index 691d66df6..70d4c3ea5 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/credentials.h" #include @@ -24,16 +26,17 @@ #include #include +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/http/httpcli.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/iomgr/polling_entity.h" +#include "src/core/lib/security/credentials/google_default/google_default_credentials.h" #include "src/core/lib/security/credentials/jwt/jwt_credentials.h" #include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/surface/api_trace.h" /* -- Constants. -- */ @@ -42,10 +45,10 @@ /* -- Default credentials. -- */ -static grpc_channel_credentials *default_credentials = NULL; +static grpc_channel_credentials* default_credentials = nullptr; static int compute_engine_detection_done = 0; static gpr_mu g_state_mu; -static gpr_mu *g_polling_mu; +static gpr_mu* g_polling_mu; static gpr_once g_once = GPR_ONCE_INIT; static void init_default_credentials(void) { gpr_mu_init(&g_state_mu); } @@ -57,17 +60,17 @@ typedef struct { grpc_http_response response; } compute_engine_detector; -static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx, - void *user_data, - grpc_error *error) { - compute_engine_detector *detector = (compute_engine_detector *)user_data; +static void on_compute_engine_detection_http_response(void* user_data, + grpc_error* error) { + compute_engine_detector* detector = + static_cast(user_data); if (error == GRPC_ERROR_NONE && detector->response.status == 200 && detector->response.hdr_count > 0) { /* Internet providers can return a generic response to all requests, so it is necessary to check that metadata header is present also. */ size_t i; for (i = 0; i < detector->response.hdr_count; i++) { - grpc_http_header *header = &detector->response.hdrs[i]; + grpc_http_header* header = &detector->response.hdrs[i]; if (strcmp(header->key, "Metadata-Flavor") == 0 && strcmp(header->value, "Google") == 0) { detector->success = 1; @@ -79,16 +82,16 @@ static void on_compute_engine_detection_http_response(grpc_exec_ctx *exec_ctx, detector->is_done = 1; GRPC_LOG_IF_ERROR( "Pollset kick", - grpc_pollset_kick(exec_ctx, - grpc_polling_entity_pollset(&detector->pollent), NULL)); + grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent), + nullptr)); gpr_mu_unlock(g_polling_mu); } -static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, grpc_error *e) { - grpc_pollset_destroy(exec_ctx, p); +static void destroy_pollset(void* p, grpc_error* e) { + grpc_pollset_destroy(static_cast(p)); } -static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { +static int is_stack_running_on_compute_engine() { compute_engine_detector detector; grpc_httpcli_request request; grpc_httpcli_context context; @@ -96,9 +99,10 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ - gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN); + grpc_millis max_detection_delay = GPR_MS_PER_SEC; - grpc_pollset *pollset = gpr_zalloc(grpc_pollset_size()); + grpc_pollset* pollset = + static_cast(gpr_zalloc(grpc_pollset_size())); grpc_pollset_init(pollset, &g_polling_mu); detector.pollent = grpc_polling_entity_create_from_pollset(pollset); detector.is_done = 0; @@ -106,49 +110,46 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { memset(&detector.response, 0, sizeof(detector.response)); memset(&request, 0, sizeof(grpc_httpcli_request)); - request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST; - request.http.path = "/"; + request.host = (char*)GRPC_COMPUTE_ENGINE_DETECTION_HOST; + request.http.path = (char*)"/"; grpc_httpcli_context_init(&context); - grpc_resource_quota *resource_quota = + grpc_resource_quota* resource_quota = grpc_resource_quota_create("google_default_credentials"); grpc_httpcli_get( - exec_ctx, &context, &detector.pollent, resource_quota, &request, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay), + &context, &detector.pollent, resource_quota, &request, + grpc_core::ExecCtx::Get()->Now() + max_detection_delay, GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector, grpc_schedule_on_exec_ctx), &detector.response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); - grpc_exec_ctx_flush(exec_ctx); + grpc_core::ExecCtx::Get()->Flush(); /* Block until we get the response. This is not ideal but this should only be called once for the lifetime of the process by the default credentials. */ gpr_mu_lock(g_polling_mu); while (!detector.is_done) { - grpc_pollset_worker *worker = NULL; + grpc_pollset_worker* worker = nullptr; if (!GRPC_LOG_IF_ERROR( "pollset_work", - grpc_pollset_work(exec_ctx, - grpc_polling_entity_pollset(&detector.pollent), - &worker, gpr_now(GPR_CLOCK_MONOTONIC), - gpr_inf_future(GPR_CLOCK_MONOTONIC)))) { + grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent), + &worker, GRPC_MILLIS_INF_FUTURE))) { detector.is_done = 1; detector.success = 0; } } gpr_mu_unlock(g_polling_mu); - grpc_httpcli_context_destroy(exec_ctx, &context); + grpc_httpcli_context_destroy(&context); GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset, grpc_polling_entity_pollset(&detector.pollent), grpc_schedule_on_exec_ctx); - grpc_pollset_shutdown(exec_ctx, - grpc_polling_entity_pollset(&detector.pollent), + grpc_pollset_shutdown(grpc_polling_entity_pollset(&detector.pollent), &destroy_closure); - g_polling_mu = NULL; - grpc_exec_ctx_flush(exec_ctx); + g_polling_mu = nullptr; + grpc_core::ExecCtx::Get()->Flush(); gpr_free(grpc_polling_entity_pollset(&detector.pollent)); grpc_http_response_destroy(&detector.response); @@ -157,15 +158,15 @@ static int is_stack_running_on_compute_engine(grpc_exec_ctx *exec_ctx) { } /* Takes ownership of creds_path if not NULL. */ -static grpc_error *create_default_creds_from_path( - grpc_exec_ctx *exec_ctx, char *creds_path, grpc_call_credentials **creds) { - grpc_json *json = NULL; +static grpc_error* create_default_creds_from_path( + char* creds_path, grpc_call_credentials** creds) { + grpc_json* json = nullptr; grpc_auth_json_key key; grpc_auth_refresh_token token; - grpc_call_credentials *result = NULL; + grpc_call_credentials* result = nullptr; grpc_slice creds_data = grpc_empty_slice(); - grpc_error *error = GRPC_ERROR_NONE; - if (creds_path == NULL) { + grpc_error* error = GRPC_ERROR_NONE; + if (creds_path == nullptr) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("creds_path unset"); goto end; } @@ -174,8 +175,9 @@ static grpc_error *create_default_creds_from_path( goto end; } json = grpc_json_parse_string_with_len( - (char *)GRPC_SLICE_START_PTR(creds_data), GRPC_SLICE_LENGTH(creds_data)); - if (json == NULL) { + reinterpret_cast GRPC_SLICE_START_PTR(creds_data), + GRPC_SLICE_LENGTH(creds_data)); + if (json == nullptr) { error = grpc_error_set_str( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed to parse JSON"), GRPC_ERROR_STR_RAW_BYTES, grpc_slice_ref_internal(creds_data)); @@ -187,8 +189,8 @@ static grpc_error *create_default_creds_from_path( if (grpc_auth_json_key_is_valid(&key)) { result = grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - exec_ctx, key, grpc_max_auth_token_lifetime()); - if (result == NULL) { + key, grpc_max_auth_token_lifetime()); + if (result == nullptr) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "grpc_service_account_jwt_access_credentials_create_from_auth_json_" "key failed"); @@ -201,7 +203,7 @@ static grpc_error *create_default_creds_from_path( if (grpc_auth_refresh_token_is_valid(&token)) { result = grpc_refresh_token_credentials_create_from_auth_refresh_token(token); - if (result == NULL) { + if (result == nullptr) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "grpc_refresh_token_credentials_create_from_auth_refresh_token " "failed"); @@ -210,21 +212,21 @@ static grpc_error *create_default_creds_from_path( } end: - GPR_ASSERT((result == NULL) + (error == GRPC_ERROR_NONE) == 1); - if (creds_path != NULL) gpr_free(creds_path); - grpc_slice_unref_internal(exec_ctx, creds_data); - if (json != NULL) grpc_json_destroy(json); + GPR_ASSERT((result == nullptr) + (error == GRPC_ERROR_NONE) == 1); + if (creds_path != nullptr) gpr_free(creds_path); + grpc_slice_unref_internal(creds_data); + if (json != nullptr) grpc_json_destroy(json); *creds = result; return error; } -grpc_channel_credentials *grpc_google_default_credentials_create(void) { - grpc_channel_credentials *result = NULL; - grpc_call_credentials *call_creds = NULL; - grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( +grpc_channel_credentials* grpc_google_default_credentials_create(void) { + grpc_channel_credentials* result = nullptr; + grpc_call_credentials* call_creds = nullptr; + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to create Google credentials"); - grpc_error *err; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_error* err; + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_google_default_credentials_create(void)", 0, ()); @@ -232,33 +234,31 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) { gpr_mu_lock(&g_state_mu); - if (default_credentials != NULL) { + if (default_credentials != nullptr) { result = grpc_channel_credentials_ref(default_credentials); goto end; } /* First, try the environment variable. */ err = create_default_creds_from_path( - &exec_ctx, gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds); + gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds); if (err == GRPC_ERROR_NONE) goto end; error = grpc_error_add_child(error, err); /* Then the well-known file. */ err = create_default_creds_from_path( - &exec_ctx, grpc_get_well_known_google_credentials_file_path(), - &call_creds); + grpc_get_well_known_google_credentials_file_path(), &call_creds); if (err == GRPC_ERROR_NONE) goto end; error = grpc_error_add_child(error, err); /* At last try to see if we're on compute engine (do the detection only once since it requires a network test). */ if (!compute_engine_detection_done) { - int need_compute_engine_creds = - is_stack_running_on_compute_engine(&exec_ctx); + int need_compute_engine_creds = is_stack_running_on_compute_engine(); compute_engine_detection_done = 1; if (need_compute_engine_creds) { - call_creds = grpc_google_compute_engine_credentials_create(NULL); - if (call_creds == NULL) { + call_creds = grpc_google_compute_engine_credentials_create(nullptr); + if (call_creds == nullptr) { error = grpc_error_add_child( error, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to get credentials from network")); @@ -267,53 +267,52 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) { } end: - if (result == NULL) { - if (call_creds != NULL) { + if (result == nullptr) { + if (call_creds != nullptr) { /* Blend with default ssl credentials and add a global reference so that it can be cached and re-served. */ - grpc_channel_credentials *ssl_creds = - grpc_ssl_credentials_create(NULL, NULL, NULL); + grpc_channel_credentials* ssl_creds = + grpc_ssl_credentials_create(nullptr, nullptr, nullptr); default_credentials = grpc_channel_credentials_ref( grpc_composite_channel_credentials_create(ssl_creds, call_creds, - NULL)); - GPR_ASSERT(default_credentials != NULL); - grpc_channel_credentials_unref(&exec_ctx, ssl_creds); - grpc_call_credentials_unref(&exec_ctx, call_creds); + nullptr)); + GPR_ASSERT(default_credentials != nullptr); + grpc_channel_credentials_unref(ssl_creds); + grpc_call_credentials_unref(call_creds); result = default_credentials; } else { gpr_log(GPR_ERROR, "Could not create google default credentials."); } } gpr_mu_unlock(&g_state_mu); - if (result == NULL) { + if (result == nullptr) { GRPC_LOG_IF_ERROR("grpc_google_default_credentials_create", error); } else { GRPC_ERROR_UNREF(error); } - grpc_exec_ctx_finish(&exec_ctx); + return result; } void grpc_flush_cached_google_default_credentials(void) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; gpr_once_init(&g_once, init_default_credentials); gpr_mu_lock(&g_state_mu); - if (default_credentials != NULL) { - grpc_channel_credentials_unref(&exec_ctx, default_credentials); - default_credentials = NULL; + if (default_credentials != nullptr) { + grpc_channel_credentials_unref(default_credentials); + default_credentials = nullptr; } compute_engine_detection_done = 0; gpr_mu_unlock(&g_state_mu); - grpc_exec_ctx_finish(&exec_ctx); } /* -- Well known credentials path. -- */ -static grpc_well_known_credentials_path_getter creds_path_getter = NULL; +static grpc_well_known_credentials_path_getter creds_path_getter = nullptr; -char *grpc_get_well_known_google_credentials_file_path(void) { - if (creds_path_getter != NULL) return creds_path_getter(); +char* grpc_get_well_known_google_credentials_file_path(void) { + if (creds_path_getter != nullptr) return creds_path_getter(); return grpc_get_well_known_google_credentials_file_path_impl(); } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.h index c3755e01a..b163e4863 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/google_default/google_default_credentials.h @@ -42,4 +42,4 @@ void grpc_flush_cached_google_default_credentials(void); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \ - */ + */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.cc similarity index 62% rename from Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.cc index 3de8319d9..5d92fa88c 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/iam/iam_credentials.h" #include @@ -27,59 +29,58 @@ #include #include -static void iam_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds; - grpc_credentials_mdelem_array_destroy(exec_ctx, &c->md_array); +static void iam_destruct(grpc_call_credentials* creds) { + grpc_google_iam_credentials* c = + reinterpret_cast(creds); + grpc_credentials_mdelem_array_destroy(&c->md_array); } -static bool iam_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, - grpc_polling_entity *pollent, +static bool iam_get_request_metadata(grpc_call_credentials* creds, + grpc_polling_entity* pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, - grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_google_iam_credentials *c = (grpc_google_iam_credentials *)creds; + grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, + grpc_error** error) { + grpc_google_iam_credentials* c = + reinterpret_cast(creds); grpc_credentials_mdelem_array_append(md_array, &c->md_array); return true; } static void iam_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { GRPC_ERROR_UNREF(error); } static grpc_call_credentials_vtable iam_vtable = { iam_destruct, iam_get_request_metadata, iam_cancel_get_request_metadata}; -grpc_call_credentials *grpc_google_iam_credentials_create( - const char *token, const char *authority_selector, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +grpc_call_credentials* grpc_google_iam_credentials_create( + const char* token, const char* authority_selector, void* reserved) { + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE( "grpc_iam_credentials_create(token=%s, authority_selector=%s, " "reserved=%p)", 3, (token, authority_selector, reserved)); - GPR_ASSERT(reserved == NULL); - GPR_ASSERT(token != NULL); - GPR_ASSERT(authority_selector != NULL); - grpc_google_iam_credentials *c = gpr_zalloc(sizeof(*c)); + GPR_ASSERT(reserved == nullptr); + GPR_ASSERT(token != nullptr); + GPR_ASSERT(authority_selector != nullptr); + grpc_google_iam_credentials* c = + static_cast(gpr_zalloc(sizeof(*c))); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_IAM; c->base.vtable = &iam_vtable; gpr_ref_init(&c->base.refcount, 1); grpc_mdelem md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY), grpc_slice_from_copied_string(token)); grpc_credentials_mdelem_array_add(&c->md_array, md); - GRPC_MDELEM_UNREF(&exec_ctx, md); + GRPC_MDELEM_UNREF(md); md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY), grpc_slice_from_copied_string(authority_selector)); grpc_credentials_mdelem_array_add(&c->md_array, md); - GRPC_MDELEM_UNREF(&exec_ctx, md); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_MDELEM_UNREF(md); + return &c->base; } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.h index 5e3cf65ba..a45710fe0 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/iam/iam_credentials.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_IAM_IAM_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_IAM_IAM_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" typedef struct { diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.c b/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.cc similarity index 65% rename from Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.c rename to Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.cc index fff71255a..1c4827df0 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.cc @@ -16,22 +16,27 @@ * */ +#include + #include "src/core/lib/security/credentials/jwt/json_token.h" #include +#include #include #include #include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/security/util/json_util.h" #include "src/core/lib/slice/b64.h" -#include "src/core/lib/support/string.h" +extern "C" { #include #include #include +} /* --- Constants. --- */ @@ -49,30 +54,31 @@ gpr_timespec grpc_max_auth_token_lifetime() { /* --- Override for testing. --- */ -static grpc_jwt_encode_and_sign_override g_jwt_encode_and_sign_override = NULL; +static grpc_jwt_encode_and_sign_override g_jwt_encode_and_sign_override = + nullptr; /* --- grpc_auth_json_key. --- */ -int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key) { - return (json_key != NULL) && +int grpc_auth_json_key_is_valid(const grpc_auth_json_key* json_key) { + return (json_key != nullptr) && strcmp(json_key->type, GRPC_AUTH_JSON_TYPE_INVALID); } -grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) { +grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json* json) { grpc_auth_json_key result; - BIO *bio = NULL; - const char *prop_value; + BIO* bio = nullptr; + const char* prop_value; int success = 0; memset(&result, 0, sizeof(grpc_auth_json_key)); result.type = GRPC_AUTH_JSON_TYPE_INVALID; - if (json == NULL) { + if (json == nullptr) { gpr_log(GPR_ERROR, "Invalid json."); goto end; } prop_value = grpc_json_get_string_property(json, "type"); - if (prop_value == NULL || + if (prop_value == nullptr || strcmp(prop_value, GRPC_AUTH_JSON_TYPE_SERVICE_ACCOUNT)) { goto end; } @@ -87,65 +93,66 @@ grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json) { } prop_value = grpc_json_get_string_property(json, "private_key"); - if (prop_value == NULL) { + if (prop_value == nullptr) { goto end; } bio = BIO_new(BIO_s_mem()); success = BIO_puts(bio, prop_value); - if ((success < 0) || ((size_t)success != strlen(prop_value))) { + if ((success < 0) || (static_cast(success) != strlen(prop_value))) { gpr_log(GPR_ERROR, "Could not write into openssl BIO."); goto end; } - result.private_key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, ""); - if (result.private_key == NULL) { + result.private_key = + PEM_read_bio_RSAPrivateKey(bio, nullptr, nullptr, (void*)""); + if (result.private_key == nullptr) { gpr_log(GPR_ERROR, "Could not deserialize private key."); goto end; } success = 1; end: - if (bio != NULL) BIO_free(bio); + if (bio != nullptr) BIO_free(bio); if (!success) grpc_auth_json_key_destruct(&result); return result; } grpc_auth_json_key grpc_auth_json_key_create_from_string( - const char *json_string) { - char *scratchpad = gpr_strdup(json_string); - grpc_json *json = grpc_json_parse_string(scratchpad); + const char* json_string) { + char* scratchpad = gpr_strdup(json_string); + grpc_json* json = grpc_json_parse_string(scratchpad); grpc_auth_json_key result = grpc_auth_json_key_create_from_json(json); - if (json != NULL) grpc_json_destroy(json); + if (json != nullptr) grpc_json_destroy(json); gpr_free(scratchpad); return result; } -void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key) { - if (json_key == NULL) return; +void grpc_auth_json_key_destruct(grpc_auth_json_key* json_key) { + if (json_key == nullptr) return; json_key->type = GRPC_AUTH_JSON_TYPE_INVALID; - if (json_key->client_id != NULL) { + if (json_key->client_id != nullptr) { gpr_free(json_key->client_id); - json_key->client_id = NULL; + json_key->client_id = nullptr; } - if (json_key->private_key_id != NULL) { + if (json_key->private_key_id != nullptr) { gpr_free(json_key->private_key_id); - json_key->private_key_id = NULL; + json_key->private_key_id = nullptr; } - if (json_key->client_email != NULL) { + if (json_key->client_email != nullptr) { gpr_free(json_key->client_email); - json_key->client_email = NULL; + json_key->client_email = nullptr; } - if (json_key->private_key != NULL) { + if (json_key->private_key != nullptr) { RSA_free(json_key->private_key); - json_key->private_key = NULL; + json_key->private_key = nullptr; } } /* --- jwt encoding and signature. --- */ -static grpc_json *create_child(grpc_json *brother, grpc_json *parent, - const char *key, const char *value, +static grpc_json* create_child(grpc_json* brother, grpc_json* parent, + const char* key, const char* value, grpc_json_type type) { - grpc_json *child = grpc_json_create(type); + grpc_json* child = grpc_json_create(type); if (brother) brother->next = child; if (!parent->child) parent->child = child; child->parent = parent; @@ -154,13 +161,13 @@ static grpc_json *create_child(grpc_json *brother, grpc_json *parent, return child; } -static char *encoded_jwt_header(const char *key_id, const char *algorithm) { - grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT); - grpc_json *child = NULL; - char *json_str = NULL; - char *result = NULL; +static char* encoded_jwt_header(const char* key_id, const char* algorithm) { + grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT); + grpc_json* child = nullptr; + char* json_str = nullptr; + char* result = nullptr; - child = create_child(NULL, json, "alg", algorithm, GRPC_JSON_STRING); + child = create_child(nullptr, json, "alg", algorithm, GRPC_JSON_STRING); child = create_child(child, json, "typ", GRPC_JWT_TYPE, GRPC_JSON_STRING); create_child(child, json, "kid", key_id, GRPC_JSON_STRING); @@ -171,13 +178,13 @@ static char *encoded_jwt_header(const char *key_id, const char *algorithm) { return result; } -static char *encoded_jwt_claim(const grpc_auth_json_key *json_key, - const char *audience, - gpr_timespec token_lifetime, const char *scope) { - grpc_json *json = grpc_json_create(GRPC_JSON_OBJECT); - grpc_json *child = NULL; - char *json_str = NULL; - char *result = NULL; +static char* encoded_jwt_claim(const grpc_auth_json_key* json_key, + const char* audience, + gpr_timespec token_lifetime, const char* scope) { + grpc_json* json = grpc_json_create(GRPC_JSON_OBJECT); + grpc_json* child = nullptr; + char* json_str = nullptr; + char* result = nullptr; gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME); gpr_timespec expiration = gpr_time_add(now, token_lifetime); char now_str[GPR_LTOA_MIN_BUFSIZE]; @@ -189,9 +196,9 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key, int64_ttoa(now.tv_sec, now_str); int64_ttoa(expiration.tv_sec, expiration_str); - child = - create_child(NULL, json, "iss", json_key->client_email, GRPC_JSON_STRING); - if (scope != NULL) { + child = create_child(nullptr, json, "iss", json_key->client_email, + GRPC_JSON_STRING); + if (scope != nullptr) { child = create_child(child, json, "scope", scope, GRPC_JSON_STRING); } else { /* Unscoped JWTs need a sub field. */ @@ -210,12 +217,13 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key, return result; } -static char *dot_concat_and_free_strings(char *str1, char *str2) { +static char* dot_concat_and_free_strings(char* str1, char* str2) { size_t str1_len = strlen(str1); size_t str2_len = strlen(str2); size_t result_len = str1_len + 1 /* dot */ + str2_len; - char *result = gpr_malloc(result_len + 1 /* NULL terminated */); - char *current = result; + char* result = + static_cast(gpr_malloc(result_len + 1 /* NULL terminated */)); + char* current = result; memcpy(current, str1, str1_len); current += str1_len; *(current++) = '.'; @@ -229,32 +237,32 @@ static char *dot_concat_and_free_strings(char *str1, char *str2) { return result; } -const EVP_MD *openssl_digest_from_algorithm(const char *algorithm) { +const EVP_MD* openssl_digest_from_algorithm(const char* algorithm) { if (strcmp(algorithm, GRPC_JWT_RSA_SHA256_ALGORITHM) == 0) { return EVP_sha256(); } else { gpr_log(GPR_ERROR, "Unknown algorithm %s.", algorithm); - return NULL; + return nullptr; } } -char *compute_and_encode_signature(const grpc_auth_json_key *json_key, - const char *signature_algorithm, - const char *to_sign) { - const EVP_MD *md = openssl_digest_from_algorithm(signature_algorithm); - EVP_MD_CTX *md_ctx = NULL; - EVP_PKEY *key = EVP_PKEY_new(); +char* compute_and_encode_signature(const grpc_auth_json_key* json_key, + const char* signature_algorithm, + const char* to_sign) { + const EVP_MD* md = openssl_digest_from_algorithm(signature_algorithm); + EVP_MD_CTX* md_ctx = nullptr; + EVP_PKEY* key = EVP_PKEY_new(); size_t sig_len = 0; - unsigned char *sig = NULL; - char *result = NULL; - if (md == NULL) return NULL; + unsigned char* sig = nullptr; + char* result = nullptr; + if (md == nullptr) return nullptr; md_ctx = EVP_MD_CTX_create(); - if (md_ctx == NULL) { + if (md_ctx == nullptr) { gpr_log(GPR_ERROR, "Could not create MD_CTX"); goto end; } EVP_PKEY_set1_RSA(key, json_key->private_key); - if (EVP_DigestSignInit(md_ctx, NULL, md, NULL, key) != 1) { + if (EVP_DigestSignInit(md_ctx, nullptr, md, nullptr, key) != 1) { gpr_log(GPR_ERROR, "DigestInit failed."); goto end; } @@ -262,11 +270,11 @@ char *compute_and_encode_signature(const grpc_auth_json_key *json_key, gpr_log(GPR_ERROR, "DigestUpdate failed."); goto end; } - if (EVP_DigestSignFinal(md_ctx, NULL, &sig_len) != 1) { + if (EVP_DigestSignFinal(md_ctx, nullptr, &sig_len) != 1) { gpr_log(GPR_ERROR, "DigestFinal (get signature length) failed."); goto end; } - sig = gpr_malloc(sig_len); + sig = static_cast(gpr_malloc(sig_len)); if (EVP_DigestSignFinal(md_ctx, sig, &sig_len) != 1) { gpr_log(GPR_ERROR, "DigestFinal (signature compute) failed."); goto end; @@ -274,27 +282,27 @@ char *compute_and_encode_signature(const grpc_auth_json_key *json_key, result = grpc_base64_encode(sig, sig_len, 1, 0); end: - if (key != NULL) EVP_PKEY_free(key); - if (md_ctx != NULL) EVP_MD_CTX_destroy(md_ctx); - if (sig != NULL) gpr_free(sig); + if (key != nullptr) EVP_PKEY_free(key); + if (md_ctx != nullptr) EVP_MD_CTX_destroy(md_ctx); + if (sig != nullptr) gpr_free(sig); return result; } -char *grpc_jwt_encode_and_sign(const grpc_auth_json_key *json_key, - const char *audience, - gpr_timespec token_lifetime, const char *scope) { - if (g_jwt_encode_and_sign_override != NULL) { +char* grpc_jwt_encode_and_sign(const grpc_auth_json_key* json_key, + const char* audience, + gpr_timespec token_lifetime, const char* scope) { + if (g_jwt_encode_and_sign_override != nullptr) { return g_jwt_encode_and_sign_override(json_key, audience, token_lifetime, scope); } else { - const char *sig_algo = GRPC_JWT_RSA_SHA256_ALGORITHM; - char *to_sign = dot_concat_and_free_strings( + const char* sig_algo = GRPC_JWT_RSA_SHA256_ALGORITHM; + char* to_sign = dot_concat_and_free_strings( encoded_jwt_header(json_key->private_key_id, sig_algo), encoded_jwt_claim(json_key, audience, token_lifetime, scope)); - char *sig = compute_and_encode_signature(json_key, sig_algo, to_sign); - if (sig == NULL) { + char* sig = compute_and_encode_signature(json_key, sig_algo, to_sign); + if (sig == nullptr) { gpr_free(to_sign); - return NULL; + return nullptr; } return dot_concat_and_free_strings(to_sign, sig); } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.h b/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.h index e50790ef2..d0fb4ebd0 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/json_token.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H +#include + #include #include @@ -31,40 +33,40 @@ /* --- auth_json_key parsing. --- */ typedef struct { - const char *type; - char *private_key_id; - char *client_id; - char *client_email; - RSA *private_key; + const char* type; + char* private_key_id; + char* client_id; + char* client_email; + RSA* private_key; } grpc_auth_json_key; /* Returns 1 if the object is valid, 0 otherwise. */ -int grpc_auth_json_key_is_valid(const grpc_auth_json_key *json_key); +int grpc_auth_json_key_is_valid(const grpc_auth_json_key* json_key); /* Creates a json_key object from string. Returns an invalid object if a parsing error has been encountered. */ grpc_auth_json_key grpc_auth_json_key_create_from_string( - const char *json_string); + const char* json_string); /* Creates a json_key object from parsed json. Returns an invalid object if a parsing error has been encountered. */ -grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json *json); +grpc_auth_json_key grpc_auth_json_key_create_from_json(const grpc_json* json); /* Destructs the object. */ -void grpc_auth_json_key_destruct(grpc_auth_json_key *json_key); +void grpc_auth_json_key_destruct(grpc_auth_json_key* json_key); /* --- json token encoding and signing. --- */ /* Caller is responsible for calling gpr_free on the returned value. May return NULL on invalid input. The scope parameter may be NULL. */ -char *grpc_jwt_encode_and_sign(const grpc_auth_json_key *json_key, - const char *audience, - gpr_timespec token_lifetime, const char *scope); +char* grpc_jwt_encode_and_sign(const grpc_auth_json_key* json_key, + const char* audience, + gpr_timespec token_lifetime, const char* scope); /* Override encode_and_sign function for testing. */ -typedef char *(*grpc_jwt_encode_and_sign_override)( - const grpc_auth_json_key *json_key, const char *audience, - gpr_timespec token_lifetime, const char *scope); +typedef char* (*grpc_jwt_encode_and_sign_override)( + const grpc_auth_json_key* json_key, const char* audience, + gpr_timespec token_lifetime, const char* scope); /* Set a custom encode_and_sign override for testing. */ void grpc_jwt_encode_and_sign_set_override( diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.cc similarity index 64% rename from Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.cc index 02c82e99b..05c08a68b 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.cc @@ -16,8 +16,11 @@ * */ +#include + #include "src/core/lib/security/credentials/jwt/jwt_credentials.h" +#include #include #include "src/core/lib/surface/api_trace.h" @@ -27,35 +30,32 @@ #include #include -static void jwt_reset_cache(grpc_exec_ctx *exec_ctx, - grpc_service_account_jwt_access_credentials *c) { - GRPC_MDELEM_UNREF(exec_ctx, c->cached.jwt_md); +static void jwt_reset_cache(grpc_service_account_jwt_access_credentials* c) { + GRPC_MDELEM_UNREF(c->cached.jwt_md); c->cached.jwt_md = GRPC_MDNULL; - if (c->cached.service_url != NULL) { + if (c->cached.service_url != nullptr) { gpr_free(c->cached.service_url); - c->cached.service_url = NULL; + c->cached.service_url = nullptr; } c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); } -static void jwt_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_service_account_jwt_access_credentials *c = - (grpc_service_account_jwt_access_credentials *)creds; +static void jwt_destruct(grpc_call_credentials* creds) { + grpc_service_account_jwt_access_credentials* c = + reinterpret_cast(creds); grpc_auth_json_key_destruct(&c->key); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); gpr_mu_destroy(&c->cache_mu); } -static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, - grpc_polling_entity *pollent, +static bool jwt_get_request_metadata(grpc_call_credentials* creds, + grpc_polling_entity* pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, - grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_service_account_jwt_access_credentials *c = - (grpc_service_account_jwt_access_credentials *)creds; + grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, + grpc_error** error) { + grpc_service_account_jwt_access_credentials* c = + reinterpret_cast(creds); gpr_timespec refresh_threshold = gpr_time_from_seconds( GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN); @@ -63,7 +63,7 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, grpc_mdelem jwt_md = GRPC_MDNULL; { gpr_mu_lock(&c->cache_mu); - if (c->cached.service_url != NULL && + if (c->cached.service_url != nullptr && strcmp(c->cached.service_url, context.service_url) == 0 && !GRPC_MDISNULL(c->cached.jwt_md) && (gpr_time_cmp(gpr_time_sub(c->cached.jwt_expiration, @@ -75,21 +75,20 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, } if (GRPC_MDISNULL(jwt_md)) { - char *jwt = NULL; + char* jwt = nullptr; /* Generate a new jwt. */ gpr_mu_lock(&c->cache_mu); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); jwt = grpc_jwt_encode_and_sign(&c->key, context.service_url, - c->jwt_lifetime, NULL); - if (jwt != NULL) { - char *md_value; + c->jwt_lifetime, nullptr); + if (jwt != nullptr) { + char* md_value; gpr_asprintf(&md_value, "Bearer %s", jwt); gpr_free(jwt); c->cached.jwt_expiration = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c->jwt_lifetime); c->cached.service_url = gpr_strdup(context.service_url); c->cached.jwt_md = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(md_value)); gpr_free(md_value); @@ -100,7 +99,7 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, if (!GRPC_MDISNULL(jwt_md)) { grpc_credentials_mdelem_array_add(md_array, jwt_md); - GRPC_MDELEM_UNREF(exec_ctx, jwt_md); + GRPC_MDELEM_UNREF(jwt_md); } else { *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Could not generate JWT."); } @@ -108,24 +107,24 @@ static bool jwt_get_request_metadata(grpc_exec_ctx *exec_ctx, } static void jwt_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { GRPC_ERROR_UNREF(error); } static grpc_call_credentials_vtable jwt_vtable = { jwt_destruct, jwt_get_request_metadata, jwt_cancel_get_request_metadata}; -grpc_call_credentials * +grpc_call_credentials* grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - grpc_exec_ctx *exec_ctx, grpc_auth_json_key key, - gpr_timespec token_lifetime) { - grpc_service_account_jwt_access_credentials *c; + grpc_auth_json_key key, gpr_timespec token_lifetime) { + grpc_service_account_jwt_access_credentials* c; if (!grpc_auth_json_key_is_valid(&key)) { gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation"); - return NULL; + return nullptr; } - c = gpr_zalloc(sizeof(grpc_service_account_jwt_access_credentials)); + c = static_cast( + gpr_zalloc(sizeof(grpc_service_account_jwt_access_credentials))); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_JWT; gpr_ref_init(&c->base.refcount, 1); c->base.vtable = &jwt_vtable; @@ -134,42 +133,42 @@ grpc_service_account_jwt_access_credentials_create_from_auth_json_key( if (gpr_time_cmp(token_lifetime, max_token_lifetime) > 0) { gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value (%d secs).", - (int)max_token_lifetime.tv_sec); + static_cast(max_token_lifetime.tv_sec)); token_lifetime = grpc_max_auth_token_lifetime(); } c->jwt_lifetime = token_lifetime; gpr_mu_init(&c->cache_mu); - jwt_reset_cache(exec_ctx, c); + jwt_reset_cache(c); return &c->base; } -static char *redact_private_key(const char *json_key) { - char *json_copy = gpr_strdup(json_key); - grpc_json *json = grpc_json_parse_string(json_copy); +static char* redact_private_key(const char* json_key) { + char* json_copy = gpr_strdup(json_key); + grpc_json* json = grpc_json_parse_string(json_copy); if (!json) { gpr_free(json_copy); return gpr_strdup(""); } - const char *redacted = ""; - grpc_json *current = json->child; + const char* redacted = ""; + grpc_json* current = json->child; while (current) { if (current->type == GRPC_JSON_STRING && strcmp(current->key, "private_key") == 0) { - current->value = (char *)redacted; + current->value = const_cast(redacted); break; } current = current->next; } - char *clean_json = grpc_json_dump_to_string(json, 2); + char* clean_json = grpc_json_dump_to_string(json, 2); gpr_free(json_copy); grpc_json_destroy(json); return clean_json; } -grpc_call_credentials *grpc_service_account_jwt_access_credentials_create( - const char *json_key, gpr_timespec token_lifetime, void *reserved) { - if (GRPC_TRACER_ON(grpc_api_trace)) { - char *clean_json = redact_private_key(json_key); +grpc_call_credentials* grpc_service_account_jwt_access_credentials_create( + const char* json_key, gpr_timespec token_lifetime, void* reserved) { + if (grpc_api_trace.enabled()) { + char* clean_json = redact_private_key(json_key); gpr_log(GPR_INFO, "grpc_service_account_jwt_access_credentials_create(" "json_key=%s, " @@ -178,15 +177,14 @@ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create( ", tv_nsec: %d, clock_type: %d }, " "reserved=%p)", clean_json, token_lifetime.tv_sec, token_lifetime.tv_nsec, - (int)token_lifetime.clock_type, reserved); + static_cast(token_lifetime.clock_type), reserved); gpr_free(clean_json); } - GPR_ASSERT(reserved == NULL); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_call_credentials *creds = + GPR_ASSERT(reserved == nullptr); + grpc_core::ExecCtx exec_ctx; + grpc_call_credentials* creds = grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - &exec_ctx, grpc_auth_json_key_create_from_string(json_key), - token_lifetime); - grpc_exec_ctx_finish(&exec_ctx); + grpc_auth_json_key_create_from_string(json_key), token_lifetime); + return creds; } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.h index 07f402266..5c3d34aa5 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_credentials.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/credentials/jwt/json_token.h" @@ -30,7 +32,7 @@ typedef struct { gpr_mu cache_mu; struct { grpc_mdelem jwt_md; - char *service_url; + char* service_url; gpr_timespec jwt_expiration; } cached; @@ -40,9 +42,8 @@ typedef struct { // Private constructor for jwt credentials from an already parsed json key. // Takes ownership of the key. -grpc_call_credentials * +grpc_call_credentials* grpc_service_account_jwt_access_credentials_create_from_auth_json_key( - grpc_exec_ctx *exec_ctx, grpc_auth_json_key key, - gpr_timespec token_lifetime); + grpc_auth_json_key key, gpr_timespec token_lifetime); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.c b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.cc similarity index 57% rename from Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.c rename to Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.cc index a27284bc5..5c47276e3 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/jwt/jwt_verifier.h" #include @@ -25,19 +27,21 @@ #include #include #include -#include + +extern "C" { #include +} +#include "src/core/lib/gpr/string.h" #include "src/core/lib/http/httpcli.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/slice/b64.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" #include "src/core/tsi/ssl_types.h" /* --- Utils. --- */ -const char *grpc_jwt_verifier_status_to_string( +const char* grpc_jwt_verifier_status_to_string( grpc_jwt_verifier_status status) { switch (status) { case GRPC_JWT_VERIFIER_OK: @@ -59,7 +63,7 @@ const char *grpc_jwt_verifier_status_to_string( } } -static const EVP_MD *evp_md_from_alg(const char *alg) { +static const EVP_MD* evp_md_from_alg(const char* alg) { if (strcmp(alg, "RS256") == 0) { return EVP_sha256(); } else if (strcmp(alg, "RS384") == 0) { @@ -67,91 +71,90 @@ static const EVP_MD *evp_md_from_alg(const char *alg) { } else if (strcmp(alg, "RS512") == 0) { return EVP_sha512(); } else { - return NULL; + return nullptr; } } -static grpc_json *parse_json_part_from_jwt(grpc_exec_ctx *exec_ctx, - const char *str, size_t len, - grpc_slice *buffer) { - grpc_json *json; +static grpc_json* parse_json_part_from_jwt(const char* str, size_t len, + grpc_slice* buffer) { + grpc_json* json; - *buffer = grpc_base64_decode_with_len(exec_ctx, str, len, 1); + *buffer = grpc_base64_decode_with_len(str, len, 1); if (GRPC_SLICE_IS_EMPTY(*buffer)) { gpr_log(GPR_ERROR, "Invalid base64."); - return NULL; + return nullptr; } - json = grpc_json_parse_string_with_len((char *)GRPC_SLICE_START_PTR(*buffer), - GRPC_SLICE_LENGTH(*buffer)); - if (json == NULL) { - grpc_slice_unref_internal(exec_ctx, *buffer); + json = grpc_json_parse_string_with_len( + reinterpret_cast GRPC_SLICE_START_PTR(*buffer), + GRPC_SLICE_LENGTH(*buffer)); + if (json == nullptr) { + grpc_slice_unref_internal(*buffer); gpr_log(GPR_ERROR, "JSON parsing error."); } return json; } -static const char *validate_string_field(const grpc_json *json, - const char *key) { +static const char* validate_string_field(const grpc_json* json, + const char* key) { if (json->type != GRPC_JSON_STRING) { gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value); - return NULL; + return nullptr; } return json->value; } -static gpr_timespec validate_time_field(const grpc_json *json, - const char *key) { +static gpr_timespec validate_time_field(const grpc_json* json, + const char* key) { gpr_timespec result = gpr_time_0(GPR_CLOCK_REALTIME); if (json->type != GRPC_JSON_NUMBER) { gpr_log(GPR_ERROR, "Invalid %s field [%s]", key, json->value); return result; } - result.tv_sec = strtol(json->value, NULL, 10); + result.tv_sec = strtol(json->value, nullptr, 10); return result; } /* --- JOSE header. see http://tools.ietf.org/html/rfc7515#section-4 --- */ typedef struct { - const char *alg; - const char *kid; - const char *typ; + const char* alg; + const char* kid; + const char* typ; /* TODO(jboeuf): Add others as needed (jku, jwk, x5u, x5c and so on...). */ grpc_slice buffer; } jose_header; -static void jose_header_destroy(grpc_exec_ctx *exec_ctx, jose_header *h) { - grpc_slice_unref_internal(exec_ctx, h->buffer); +static void jose_header_destroy(jose_header* h) { + grpc_slice_unref_internal(h->buffer); gpr_free(h); } /* Takes ownership of json and buffer. */ -static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer) { - grpc_json *cur; - jose_header *h = gpr_zalloc(sizeof(jose_header)); +static jose_header* jose_header_from_json(grpc_json* json, grpc_slice buffer) { + grpc_json* cur; + jose_header* h = static_cast(gpr_zalloc(sizeof(jose_header))); h->buffer = buffer; - for (cur = json->child; cur != NULL; cur = cur->next) { + for (cur = json->child; cur != nullptr; cur = cur->next) { if (strcmp(cur->key, "alg") == 0) { /* We only support RSA-1.5 signatures for now. Beware of this if we add HMAC support: https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/ */ if (cur->type != GRPC_JSON_STRING || strncmp(cur->value, "RS", 2) || - evp_md_from_alg(cur->value) == NULL) { + evp_md_from_alg(cur->value) == nullptr) { gpr_log(GPR_ERROR, "Invalid alg field [%s]", cur->value); goto error; } h->alg = cur->value; } else if (strcmp(cur->key, "typ") == 0) { h->typ = validate_string_field(cur, "typ"); - if (h->typ == NULL) goto error; + if (h->typ == nullptr) goto error; } else if (strcmp(cur->key, "kid") == 0) { h->kid = validate_string_field(cur, "kid"); - if (h->kid == NULL) goto error; + if (h->kid == nullptr) goto error; } } - if (h->alg == NULL) { + if (h->alg == nullptr) { gpr_log(GPR_ERROR, "Missing alg field."); goto error; } @@ -161,77 +164,77 @@ static jose_header *jose_header_from_json(grpc_exec_ctx *exec_ctx, error: grpc_json_destroy(json); - jose_header_destroy(exec_ctx, h); - return NULL; + jose_header_destroy(h); + return nullptr; } /* --- JWT claims. see http://tools.ietf.org/html/rfc7519#section-4.1 */ struct grpc_jwt_claims { /* Well known properties already parsed. */ - const char *sub; - const char *iss; - const char *aud; - const char *jti; + const char* sub; + const char* iss; + const char* aud; + const char* jti; gpr_timespec iat; gpr_timespec exp; gpr_timespec nbf; - grpc_json *json; + grpc_json* json; grpc_slice buffer; }; -void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims) { +void grpc_jwt_claims_destroy(grpc_jwt_claims* claims) { grpc_json_destroy(claims->json); - grpc_slice_unref_internal(exec_ctx, claims->buffer); + grpc_slice_unref_internal(claims->buffer); gpr_free(claims); } -const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims) { - if (claims == NULL) return NULL; +const grpc_json* grpc_jwt_claims_json(const grpc_jwt_claims* claims) { + if (claims == nullptr) return nullptr; return claims->json; } -const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims) { - if (claims == NULL) return NULL; +const char* grpc_jwt_claims_subject(const grpc_jwt_claims* claims) { + if (claims == nullptr) return nullptr; return claims->sub; } -const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims) { - if (claims == NULL) return NULL; +const char* grpc_jwt_claims_issuer(const grpc_jwt_claims* claims) { + if (claims == nullptr) return nullptr; return claims->iss; } -const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims) { - if (claims == NULL) return NULL; +const char* grpc_jwt_claims_id(const grpc_jwt_claims* claims) { + if (claims == nullptr) return nullptr; return claims->jti; } -const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims) { - if (claims == NULL) return NULL; +const char* grpc_jwt_claims_audience(const grpc_jwt_claims* claims) { + if (claims == nullptr) return nullptr; return claims->aud; } -gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME); +gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims* claims) { + if (claims == nullptr) return gpr_inf_past(GPR_CLOCK_REALTIME); return claims->iat; } -gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_future(GPR_CLOCK_REALTIME); +gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims* claims) { + if (claims == nullptr) return gpr_inf_future(GPR_CLOCK_REALTIME); return claims->exp; } -gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims) { - if (claims == NULL) return gpr_inf_past(GPR_CLOCK_REALTIME); +gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims) { + if (claims == nullptr) return gpr_inf_past(GPR_CLOCK_REALTIME); return claims->nbf; } /* Takes ownership of json and buffer even in case of failure. */ -grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer) { - grpc_json *cur; - grpc_jwt_claims *claims = gpr_malloc(sizeof(grpc_jwt_claims)); +grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer) { + grpc_json* cur; + grpc_jwt_claims* claims = + static_cast(gpr_malloc(sizeof(grpc_jwt_claims))); memset(claims, 0, sizeof(grpc_jwt_claims)); claims->json = json; claims->buffer = buffer; @@ -240,19 +243,19 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME); /* Per the spec, all fields are optional. */ - for (cur = json->child; cur != NULL; cur = cur->next) { + for (cur = json->child; cur != nullptr; cur = cur->next) { if (strcmp(cur->key, "sub") == 0) { claims->sub = validate_string_field(cur, "sub"); - if (claims->sub == NULL) goto error; + if (claims->sub == nullptr) goto error; } else if (strcmp(cur->key, "iss") == 0) { claims->iss = validate_string_field(cur, "iss"); - if (claims->iss == NULL) goto error; + if (claims->iss == nullptr) goto error; } else if (strcmp(cur->key, "aud") == 0) { claims->aud = validate_string_field(cur, "aud"); - if (claims->aud == NULL) goto error; + if (claims->aud == nullptr) goto error; } else if (strcmp(cur->key, "jti") == 0) { claims->jti = validate_string_field(cur, "jti"); - if (claims->jti == NULL) goto error; + if (claims->jti == nullptr) goto error; } else if (strcmp(cur->key, "iat") == 0) { claims->iat = validate_time_field(cur, "iat"); if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0) @@ -270,16 +273,16 @@ grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, return claims; error: - grpc_jwt_claims_destroy(exec_ctx, claims); - return NULL; + grpc_jwt_claims_destroy(claims); + return nullptr; } -grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, - const char *audience) { +grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims, + const char* audience) { gpr_timespec skewed_now; int audience_ok; - GPR_ASSERT(claims != NULL); + GPR_ASSERT(claims != nullptr); skewed_now = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_clock_skew); @@ -297,23 +300,23 @@ grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, /* This should be probably up to the upper layer to decide but let's harcode the 99% use case here for email issuers, where the JWT must be self issued. */ - if (grpc_jwt_issuer_email_domain(claims->iss) != NULL && - claims->sub != NULL && strcmp(claims->iss, claims->sub) != 0) { + if (grpc_jwt_issuer_email_domain(claims->iss) != nullptr && + claims->sub != nullptr && strcmp(claims->iss, claims->sub) != 0) { gpr_log(GPR_ERROR, "Email issuer (%s) cannot assert another subject (%s) than itself.", claims->iss, claims->sub); return GRPC_JWT_VERIFIER_BAD_SUBJECT; } - if (audience == NULL) { - audience_ok = claims->aud == NULL; + if (audience == nullptr) { + audience_ok = claims->aud == nullptr; } else { - audience_ok = claims->aud != NULL && strcmp(audience, claims->aud) == 0; + audience_ok = claims->aud != nullptr && strcmp(audience, claims->aud) == 0; } if (!audience_ok) { gpr_log(GPR_ERROR, "Audience mismatch: expected %s and found %s.", - audience == NULL ? "NULL" : audience, - claims->aud == NULL ? "NULL" : claims->aud); + audience == nullptr ? "NULL" : audience, + claims->aud == nullptr ? "NULL" : claims->aud); return GRPC_JWT_VERIFIER_BAD_AUDIENCE; } return GRPC_JWT_VERIFIER_OK; @@ -328,26 +331,27 @@ typedef enum { } http_response_index; typedef struct { - grpc_jwt_verifier *verifier; + grpc_jwt_verifier* verifier; grpc_polling_entity pollent; - jose_header *header; - grpc_jwt_claims *claims; - char *audience; + jose_header* header; + grpc_jwt_claims* claims; + char* audience; grpc_slice signature; grpc_slice signed_data; - void *user_data; + void* user_data; grpc_jwt_verification_done_cb user_cb; grpc_http_response responses[HTTP_RESPONSE_COUNT]; } verifier_cb_ctx; /* Takes ownership of the header, claims and signature. */ -static verifier_cb_ctx *verifier_cb_ctx_create( - grpc_jwt_verifier *verifier, grpc_pollset *pollset, jose_header *header, - grpc_jwt_claims *claims, const char *audience, grpc_slice signature, - const char *signed_jwt, size_t signed_jwt_len, void *user_data, +static verifier_cb_ctx* verifier_cb_ctx_create( + grpc_jwt_verifier* verifier, grpc_pollset* pollset, jose_header* header, + grpc_jwt_claims* claims, const char* audience, grpc_slice signature, + const char* signed_jwt, size_t signed_jwt_len, void* user_data, grpc_jwt_verification_done_cb cb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - verifier_cb_ctx *ctx = gpr_zalloc(sizeof(verifier_cb_ctx)); + grpc_core::ExecCtx exec_ctx; + verifier_cb_ctx* ctx = + static_cast(gpr_zalloc(sizeof(verifier_cb_ctx))); ctx->verifier = verifier; ctx->pollent = grpc_polling_entity_create_from_pollset(pollset); ctx->header = header; @@ -357,16 +361,16 @@ static verifier_cb_ctx *verifier_cb_ctx_create( ctx->signed_data = grpc_slice_from_copied_buffer(signed_jwt, signed_jwt_len); ctx->user_data = user_data; ctx->user_cb = cb; - grpc_exec_ctx_finish(&exec_ctx); + return ctx; } -void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { - if (ctx->audience != NULL) gpr_free(ctx->audience); - if (ctx->claims != NULL) grpc_jwt_claims_destroy(exec_ctx, ctx->claims); - grpc_slice_unref_internal(exec_ctx, ctx->signature); - grpc_slice_unref_internal(exec_ctx, ctx->signed_data); - jose_header_destroy(exec_ctx, ctx->header); +void verifier_cb_ctx_destroy(verifier_cb_ctx* ctx) { + if (ctx->audience != nullptr) gpr_free(ctx->audience); + if (ctx->claims != nullptr) grpc_jwt_claims_destroy(ctx->claims); + grpc_slice_unref_internal(ctx->signature); + grpc_slice_unref_internal(ctx->signed_data); + jose_header_destroy(ctx->header); for (size_t i = 0; i < HTTP_RESPONSE_COUNT; i++) { grpc_http_response_destroy(&ctx->responses[i]); } @@ -380,63 +384,63 @@ void verifier_cb_ctx_destroy(grpc_exec_ctx *exec_ctx, verifier_cb_ctx *ctx) { gpr_timespec grpc_jwt_verifier_clock_skew = {60, 0, GPR_TIMESPAN}; /* Max delay defaults to one minute. */ -gpr_timespec grpc_jwt_verifier_max_delay = {60, 0, GPR_TIMESPAN}; +grpc_millis grpc_jwt_verifier_max_delay = 60 * GPR_MS_PER_SEC; typedef struct { - char *email_domain; - char *key_url_prefix; + char* email_domain; + char* key_url_prefix; } email_key_mapping; struct grpc_jwt_verifier { - email_key_mapping *mappings; + email_key_mapping* mappings; size_t num_mappings; /* Should be very few, linear search ok. */ size_t allocated_mappings; grpc_httpcli_context http_ctx; }; -static grpc_json *json_from_http(const grpc_httpcli_response *response) { - grpc_json *json = NULL; +static grpc_json* json_from_http(const grpc_httpcli_response* response) { + grpc_json* json = nullptr; - if (response == NULL) { + if (response == nullptr) { gpr_log(GPR_ERROR, "HTTP response is NULL."); - return NULL; + return nullptr; } if (response->status != 200) { gpr_log(GPR_ERROR, "Call to http server failed with error %d.", response->status); - return NULL; + return nullptr; } json = grpc_json_parse_string_with_len(response->body, response->body_length); - if (json == NULL) { + if (json == nullptr) { gpr_log(GPR_ERROR, "Invalid JSON found in response."); } return json; } -static const grpc_json *find_property_by_name(const grpc_json *json, - const char *name) { - const grpc_json *cur; - for (cur = json->child; cur != NULL; cur = cur->next) { +static const grpc_json* find_property_by_name(const grpc_json* json, + const char* name) { + const grpc_json* cur; + for (cur = json->child; cur != nullptr; cur = cur->next) { if (strcmp(cur->key, name) == 0) return cur; } - return NULL; + return nullptr; } -static EVP_PKEY *extract_pkey_from_x509(const char *x509_str) { - X509 *x509 = NULL; - EVP_PKEY *result = NULL; - BIO *bio = BIO_new(BIO_s_mem()); +static EVP_PKEY* extract_pkey_from_x509(const char* x509_str) { + X509* x509 = nullptr; + EVP_PKEY* result = nullptr; + BIO* bio = BIO_new(BIO_s_mem()); size_t len = strlen(x509_str); GPR_ASSERT(len < INT_MAX); - BIO_write(bio, x509_str, (int)len); - x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL); - if (x509 == NULL) { + BIO_write(bio, x509_str, static_cast(len)); + x509 = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr); + if (x509 == nullptr) { gpr_log(GPR_ERROR, "Unable to parse x509 cert."); goto end; } result = X509_get_pubkey(x509); - if (result == NULL) { + if (result == nullptr) { gpr_log(GPR_ERROR, "Cannot find public key in X509 cert."); } @@ -446,43 +450,43 @@ static EVP_PKEY *extract_pkey_from_x509(const char *x509_str) { return result; } -static BIGNUM *bignum_from_base64(grpc_exec_ctx *exec_ctx, const char *b64) { - BIGNUM *result = NULL; +static BIGNUM* bignum_from_base64(const char* b64) { + BIGNUM* result = nullptr; grpc_slice bin; - if (b64 == NULL) return NULL; - bin = grpc_base64_decode(exec_ctx, b64, 1); + if (b64 == nullptr) return nullptr; + bin = grpc_base64_decode(b64, 1); if (GRPC_SLICE_IS_EMPTY(bin)) { gpr_log(GPR_ERROR, "Invalid base64 for big num."); - return NULL; + return nullptr; } result = BN_bin2bn(GRPC_SLICE_START_PTR(bin), - TSI_SIZE_AS_SIZE(GRPC_SLICE_LENGTH(bin)), NULL); - grpc_slice_unref_internal(exec_ctx, bin); + TSI_SIZE_AS_SIZE(GRPC_SLICE_LENGTH(bin)), nullptr); + grpc_slice_unref_internal(bin); return result; } #if OPENSSL_VERSION_NUMBER < 0x10100000L // Provide compatibility across OpenSSL 1.02 and 1.1. -static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) { +static int RSA_set0_key(RSA* r, BIGNUM* n, BIGNUM* e, BIGNUM* d) { /* If the fields n and e in r are NULL, the corresponding input * parameters MUST be non-NULL for n and e. d may be * left NULL (in case only the public key is used). */ - if ((r->n == NULL && n == NULL) || (r->e == NULL && e == NULL)) { + if ((r->n == nullptr && n == nullptr) || (r->e == nullptr && e == nullptr)) { return 0; } - if (n != NULL) { + if (n != nullptr) { BN_free(r->n); r->n = n; } - if (e != NULL) { + if (e != nullptr) { BN_free(r->e); r->e = e; } - if (d != NULL) { + if (d != nullptr) { BN_free(r->d); r->d = d; } @@ -491,46 +495,43 @@ static int RSA_set0_key(RSA *r, BIGNUM *n, BIGNUM *e, BIGNUM *d) { } #endif // OPENSSL_VERSION_NUMBER < 0x10100000L -static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json, - const char *kty) { - const grpc_json *key_prop; - RSA *rsa = NULL; - EVP_PKEY *result = NULL; - BIGNUM *tmp_n = NULL; - BIGNUM *tmp_e = NULL; +static EVP_PKEY* pkey_from_jwk(const grpc_json* json, const char* kty) { + const grpc_json* key_prop; + RSA* rsa = nullptr; + EVP_PKEY* result = nullptr; + BIGNUM* tmp_n = nullptr; + BIGNUM* tmp_e = nullptr; - GPR_ASSERT(kty != NULL && json != NULL); + GPR_ASSERT(kty != nullptr && json != nullptr); if (strcmp(kty, "RSA") != 0) { gpr_log(GPR_ERROR, "Unsupported key type %s.", kty); goto end; } rsa = RSA_new(); - if (rsa == NULL) { + if (rsa == nullptr) { gpr_log(GPR_ERROR, "Could not create rsa key."); goto end; } - for (key_prop = json->child; key_prop != NULL; key_prop = key_prop->next) { + for (key_prop = json->child; key_prop != nullptr; key_prop = key_prop->next) { if (strcmp(key_prop->key, "n") == 0) { - tmp_n = - bignum_from_base64(exec_ctx, validate_string_field(key_prop, "n")); - if (tmp_n == NULL) goto end; + tmp_n = bignum_from_base64(validate_string_field(key_prop, "n")); + if (tmp_n == nullptr) goto end; } else if (strcmp(key_prop->key, "e") == 0) { - tmp_e = - bignum_from_base64(exec_ctx, validate_string_field(key_prop, "e")); - if (tmp_e == NULL) goto end; + tmp_e = bignum_from_base64(validate_string_field(key_prop, "e")); + if (tmp_e == nullptr) goto end; } } - if (tmp_e == NULL || tmp_n == NULL) { + if (tmp_e == nullptr || tmp_n == nullptr) { gpr_log(GPR_ERROR, "Missing RSA public key field."); goto end; } - if (!RSA_set0_key(rsa, tmp_n, tmp_e, NULL)) { + if (!RSA_set0_key(rsa, tmp_n, tmp_e, nullptr)) { gpr_log(GPR_ERROR, "Cannot set RSA key from inputs."); goto end; } /* RSA_set0_key takes ownership on success. */ - tmp_n = NULL; - tmp_e = NULL; + tmp_n = nullptr; + tmp_e = nullptr; result = EVP_PKEY_new(); EVP_PKEY_set1_RSA(result, rsa); /* uprefs rsa. */ @@ -541,38 +542,38 @@ static EVP_PKEY *pkey_from_jwk(grpc_exec_ctx *exec_ctx, const grpc_json *json, return result; } -static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx, - const grpc_json *json, - const char *header_alg, - const char *header_kid) { - const grpc_json *jkey; - const grpc_json *jwk_keys; +static EVP_PKEY* find_verification_key(const grpc_json* json, + const char* header_alg, + const char* header_kid) { + const grpc_json* jkey; + const grpc_json* jwk_keys; /* Try to parse the json as a JWK set: https://tools.ietf.org/html/rfc7517#section-5. */ jwk_keys = find_property_by_name(json, "keys"); - if (jwk_keys == NULL) { + if (jwk_keys == nullptr) { /* Use the google proprietary format which is: { : , : , ... } */ - const grpc_json *cur = find_property_by_name(json, header_kid); - if (cur == NULL) return NULL; + const grpc_json* cur = find_property_by_name(json, header_kid); + if (cur == nullptr) return nullptr; return extract_pkey_from_x509(cur->value); } if (jwk_keys->type != GRPC_JSON_ARRAY) { gpr_log(GPR_ERROR, "Unexpected value type of keys property in jwks key set."); - return NULL; + return nullptr; } /* Key format is specified in: https://tools.ietf.org/html/rfc7518#section-6. */ - for (jkey = jwk_keys->child; jkey != NULL; jkey = jkey->next) { - grpc_json *key_prop; - const char *alg = NULL; - const char *kid = NULL; - const char *kty = NULL; + for (jkey = jwk_keys->child; jkey != nullptr; jkey = jkey->next) { + grpc_json* key_prop; + const char* alg = nullptr; + const char* kid = nullptr; + const char* kty = nullptr; if (jkey->type != GRPC_JSON_OBJECT) continue; - for (key_prop = jkey->child; key_prop != NULL; key_prop = key_prop->next) { + for (key_prop = jkey->child; key_prop != nullptr; + key_prop = key_prop->next) { if (strcmp(key_prop->key, "alg") == 0 && key_prop->type == GRPC_JSON_STRING) { alg = key_prop->value; @@ -584,29 +585,29 @@ static EVP_PKEY *find_verification_key(grpc_exec_ctx *exec_ctx, kty = key_prop->value; } } - if (alg != NULL && kid != NULL && kty != NULL && + if (alg != nullptr && kid != nullptr && kty != nullptr && strcmp(kid, header_kid) == 0 && strcmp(alg, header_alg) == 0) { - return pkey_from_jwk(exec_ctx, jkey, kty); + return pkey_from_jwk(jkey, kty); } } gpr_log(GPR_ERROR, "Could not find matching key in key set for kid=%s and alg=%s", header_kid, header_alg); - return NULL; + return nullptr; } -static int verify_jwt_signature(EVP_PKEY *key, const char *alg, +static int verify_jwt_signature(EVP_PKEY* key, const char* alg, grpc_slice signature, grpc_slice signed_data) { - EVP_MD_CTX *md_ctx = EVP_MD_CTX_create(); - const EVP_MD *md = evp_md_from_alg(alg); + EVP_MD_CTX* md_ctx = EVP_MD_CTX_create(); + const EVP_MD* md = evp_md_from_alg(alg); int result = 0; - GPR_ASSERT(md != NULL); /* Checked before. */ - if (md_ctx == NULL) { + GPR_ASSERT(md != nullptr); /* Checked before. */ + if (md_ctx == nullptr) { gpr_log(GPR_ERROR, "Could not create EVP_MD_CTX."); goto end; } - if (EVP_DigestVerifyInit(md_ctx, NULL, md, NULL, key) != 1) { + if (EVP_DigestVerifyInit(md_ctx, nullptr, md, nullptr, key) != 1) { gpr_log(GPR_ERROR, "EVP_DigestVerifyInit failed."); goto end; } @@ -627,21 +628,20 @@ static int verify_jwt_signature(EVP_PKEY *key, const char *alg, return result; } -static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { - verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data; - grpc_json *json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]); - EVP_PKEY *verification_key = NULL; +static void on_keys_retrieved(void* user_data, grpc_error* error) { + verifier_cb_ctx* ctx = static_cast(user_data); + grpc_json* json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]); + EVP_PKEY* verification_key = nullptr; grpc_jwt_verifier_status status = GRPC_JWT_VERIFIER_GENERIC_ERROR; - grpc_jwt_claims *claims = NULL; + grpc_jwt_claims* claims = nullptr; - if (json == NULL) { + if (json == nullptr) { status = GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR; goto end; } verification_key = - find_verification_key(exec_ctx, json, ctx->header->alg, ctx->header->kid); - if (verification_key == NULL) { + find_verification_key(json, ctx->header->alg, ctx->header->kid); + if (verification_key == nullptr) { gpr_log(GPR_ERROR, "Could not find verification key with kid %s.", ctx->header->kid); status = GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR; @@ -658,34 +658,34 @@ static void on_keys_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, if (status == GRPC_JWT_VERIFIER_OK) { /* Pass ownership. */ claims = ctx->claims; - ctx->claims = NULL; + ctx->claims = nullptr; } end: - if (json != NULL) grpc_json_destroy(json); + if (json != nullptr) grpc_json_destroy(json); EVP_PKEY_free(verification_key); - ctx->user_cb(exec_ctx, ctx->user_data, status, claims); - verifier_cb_ctx_destroy(exec_ctx, ctx); + ctx->user_cb(ctx->user_data, status, claims); + verifier_cb_ctx_destroy(ctx); } -static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { - const grpc_json *cur; - verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data; - const grpc_http_response *response = &ctx->responses[HTTP_RESPONSE_OPENID]; - grpc_json *json = json_from_http(response); +static void on_openid_config_retrieved(void* user_data, grpc_error* error) { + const grpc_json* cur; + verifier_cb_ctx* ctx = static_cast(user_data); + const grpc_http_response* response = &ctx->responses[HTTP_RESPONSE_OPENID]; + grpc_json* json = json_from_http(response); grpc_httpcli_request req; - const char *jwks_uri; + const char* jwks_uri; + grpc_resource_quota* resource_quota = nullptr; /* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */ - if (json == NULL) goto error; + if (json == nullptr) goto error; cur = find_property_by_name(json, "jwks_uri"); - if (cur == NULL) { + if (cur == nullptr) { gpr_log(GPR_ERROR, "Could not find jwks_uri in openid config."); goto error; } jwks_uri = validate_string_field(cur, "jwks_uri"); - if (jwks_uri == NULL) goto error; + if (jwks_uri == nullptr) goto error; if (strstr(jwks_uri, "https://") != jwks_uri) { gpr_log(GPR_ERROR, "Invalid non https jwks_uri: %s.", jwks_uri); goto error; @@ -693,9 +693,9 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, jwks_uri += 8; req.handshaker = &grpc_httpcli_ssl; req.host = gpr_strdup(jwks_uri); - req.http.path = strchr(jwks_uri, '/'); - if (req.http.path == NULL) { - req.http.path = ""; + req.http.path = const_cast(strchr(jwks_uri, '/')); + if (req.http.path == nullptr) { + req.http.path = (char*)""; } else { *(req.host + (req.http.path - jwks_uri)) = '\0'; } @@ -703,42 +703,40 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data, /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host channel. This would allow us to cancel an authentication query when under extreme memory pressure. */ - grpc_resource_quota *resource_quota = - grpc_resource_quota_create("jwt_verifier"); + resource_quota = grpc_resource_quota_create("jwt_verifier"); grpc_httpcli_get( - exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), + &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, + grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay, GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx), &ctx->responses[HTTP_RESPONSE_KEYS]); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); grpc_json_destroy(json); gpr_free(req.host); return; error: - if (json != NULL) grpc_json_destroy(json); - ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, - NULL); - verifier_cb_ctx_destroy(exec_ctx, ctx); + if (json != nullptr) grpc_json_destroy(json); + ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, nullptr); + verifier_cb_ctx_destroy(ctx); } -static email_key_mapping *verifier_get_mapping(grpc_jwt_verifier *v, - const char *email_domain) { +static email_key_mapping* verifier_get_mapping(grpc_jwt_verifier* v, + const char* email_domain) { size_t i; - if (v->mappings == NULL) return NULL; + if (v->mappings == nullptr) return nullptr; for (i = 0; i < v->num_mappings; i++) { if (strcmp(email_domain, v->mappings[i].email_domain) == 0) { return &v->mappings[i]; } } - return NULL; + return nullptr; } -static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain, - const char *key_url_prefix) { - email_key_mapping *mapping = verifier_get_mapping(v, email_domain); +static void verifier_put_mapping(grpc_jwt_verifier* v, const char* email_domain, + const char* key_url_prefix) { + email_key_mapping* mapping = verifier_get_mapping(v, email_domain); GPR_ASSERT(v->num_mappings < v->allocated_mappings); - if (mapping != NULL) { + if (mapping != nullptr) { gpr_free(mapping->key_url_prefix); mapping->key_url_prefix = gpr_strdup(key_url_prefix); return; @@ -751,39 +749,41 @@ static void verifier_put_mapping(grpc_jwt_verifier *v, const char *email_domain, /* Very non-sophisticated way to detect an email address. Should be good enough for now... */ -const char *grpc_jwt_issuer_email_domain(const char *issuer) { - const char *at_sign = strchr(issuer, '@'); - if (at_sign == NULL) return NULL; - const char *email_domain = at_sign + 1; - if (*email_domain == '\0') return NULL; - const char *dot = strrchr(email_domain, '.'); - if (dot == NULL || dot == email_domain) return email_domain; +const char* grpc_jwt_issuer_email_domain(const char* issuer) { + const char* at_sign = strchr(issuer, '@'); + if (at_sign == nullptr) return nullptr; + const char* email_domain = at_sign + 1; + if (*email_domain == '\0') return nullptr; + const char* dot = strrchr(email_domain, '.'); + if (dot == nullptr || dot == email_domain) return email_domain; GPR_ASSERT(dot > email_domain); /* There may be a subdomain, we just want the domain. */ - dot = gpr_memrchr(email_domain, '.', (size_t)(dot - email_domain)); - if (dot == NULL) return email_domain; + dot = static_cast(gpr_memrchr( + (void*)email_domain, '.', static_cast(dot - email_domain))); + if (dot == nullptr) return email_domain; return dot + 1; } /* Takes ownership of ctx. */ -static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, - verifier_cb_ctx *ctx) { - const char *email_domain; - grpc_closure *http_cb; - char *path_prefix = NULL; - const char *iss; +static void retrieve_key_and_verify(verifier_cb_ctx* ctx) { + const char* email_domain; + grpc_closure* http_cb; + char* path_prefix = nullptr; + const char* iss; grpc_httpcli_request req; + grpc_resource_quota* resource_quota = nullptr; memset(&req, 0, sizeof(grpc_httpcli_request)); req.handshaker = &grpc_httpcli_ssl; http_response_index rsp_idx; - GPR_ASSERT(ctx != NULL && ctx->header != NULL && ctx->claims != NULL); + GPR_ASSERT(ctx != nullptr && ctx->header != nullptr && + ctx->claims != nullptr); iss = ctx->claims->iss; - if (ctx->header->kid == NULL) { + if (ctx->header->kid == nullptr) { gpr_log(GPR_ERROR, "Missing kid in jose header."); goto error; } - if (iss == NULL) { + if (iss == nullptr) { gpr_log(GPR_ERROR, "Missing iss in claims."); goto error; } @@ -794,17 +794,17 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, so we will rely instead on email/url mappings if we detect such an issuer. Part 4, on the other hand is implemented by both google and salesforce. */ email_domain = grpc_jwt_issuer_email_domain(iss); - if (email_domain != NULL) { - email_key_mapping *mapping; - GPR_ASSERT(ctx->verifier != NULL); + if (email_domain != nullptr) { + email_key_mapping* mapping; + GPR_ASSERT(ctx->verifier != nullptr); mapping = verifier_get_mapping(ctx->verifier, email_domain); - if (mapping == NULL) { + if (mapping == nullptr) { gpr_log(GPR_ERROR, "Missing mapping for issuer email."); goto error; } req.host = gpr_strdup(mapping->key_url_prefix); path_prefix = strchr(req.host, '/'); - if (path_prefix == NULL) { + if (path_prefix == nullptr) { gpr_asprintf(&req.http.path, "/%s", iss); } else { *(path_prefix++) = '\0'; @@ -816,7 +816,7 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, } else { req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss); path_prefix = strchr(req.host, '/'); - if (path_prefix == NULL) { + if (path_prefix == nullptr) { req.http.path = gpr_strdup(GRPC_OPENID_CONFIG_URL_SUFFIX); } else { *(path_prefix++) = 0; @@ -831,86 +831,85 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx, /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host channel. This would allow us to cancel an authentication query when under extreme memory pressure. */ - grpc_resource_quota *resource_quota = - grpc_resource_quota_create("jwt_verifier"); + resource_quota = grpc_resource_quota_create("jwt_verifier"); grpc_httpcli_get( - exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, - gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay), - http_cb, &ctx->responses[rsp_idx]); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req, + grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay, http_cb, + &ctx->responses[rsp_idx]); + grpc_resource_quota_unref_internal(resource_quota); gpr_free(req.host); gpr_free(req.http.path); return; error: - ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, - NULL); - verifier_cb_ctx_destroy(exec_ctx, ctx); + ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, nullptr); + verifier_cb_ctx_destroy(ctx); } -void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier, - grpc_pollset *pollset, const char *jwt, - const char *audience, +void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier, + grpc_pollset* pollset, const char* jwt, + const char* audience, grpc_jwt_verification_done_cb cb, - void *user_data) { - const char *dot = NULL; - grpc_json *json; - jose_header *header = NULL; - grpc_jwt_claims *claims = NULL; + void* user_data) { + const char* dot = nullptr; + grpc_json* json; + jose_header* header = nullptr; + grpc_jwt_claims* claims = nullptr; grpc_slice header_buffer; grpc_slice claims_buffer; grpc_slice signature; size_t signed_jwt_len; - const char *cur = jwt; + const char* cur = jwt; - GPR_ASSERT(verifier != NULL && jwt != NULL && audience != NULL && cb != NULL); + GPR_ASSERT(verifier != nullptr && jwt != nullptr && audience != nullptr && + cb != nullptr); dot = strchr(cur, '.'); - if (dot == NULL) goto error; - json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur), + if (dot == nullptr) goto error; + json = parse_json_part_from_jwt(cur, static_cast(dot - cur), &header_buffer); - if (json == NULL) goto error; - header = jose_header_from_json(exec_ctx, json, header_buffer); - if (header == NULL) goto error; + if (json == nullptr) goto error; + header = jose_header_from_json(json, header_buffer); + if (header == nullptr) goto error; cur = dot + 1; dot = strchr(cur, '.'); - if (dot == NULL) goto error; - json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur), + if (dot == nullptr) goto error; + json = parse_json_part_from_jwt(cur, static_cast(dot - cur), &claims_buffer); - if (json == NULL) goto error; - claims = grpc_jwt_claims_from_json(exec_ctx, json, claims_buffer); - if (claims == NULL) goto error; + if (json == nullptr) goto error; + claims = grpc_jwt_claims_from_json(json, claims_buffer); + if (claims == nullptr) goto error; - signed_jwt_len = (size_t)(dot - jwt); + signed_jwt_len = static_cast(dot - jwt); cur = dot + 1; - signature = grpc_base64_decode(exec_ctx, cur, 1); + signature = grpc_base64_decode(cur, 1); if (GRPC_SLICE_IS_EMPTY(signature)) goto error; retrieve_key_and_verify( - exec_ctx, verifier_cb_ctx_create(verifier, pollset, header, claims, audience, signature, jwt, signed_jwt_len, user_data, cb)); return; error: - if (header != NULL) jose_header_destroy(exec_ctx, header); - if (claims != NULL) grpc_jwt_claims_destroy(exec_ctx, claims); - cb(exec_ctx, user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, NULL); + if (header != nullptr) jose_header_destroy(header); + if (claims != nullptr) grpc_jwt_claims_destroy(claims); + cb(user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, nullptr); } -grpc_jwt_verifier *grpc_jwt_verifier_create( - const grpc_jwt_verifier_email_domain_key_url_mapping *mappings, +grpc_jwt_verifier* grpc_jwt_verifier_create( + const grpc_jwt_verifier_email_domain_key_url_mapping* mappings, size_t num_mappings) { - grpc_jwt_verifier *v = gpr_zalloc(sizeof(grpc_jwt_verifier)); + grpc_jwt_verifier* v = + static_cast(gpr_zalloc(sizeof(grpc_jwt_verifier))); grpc_httpcli_context_init(&v->http_ctx); /* We know at least of one mapping. */ v->allocated_mappings = 1 + num_mappings; - v->mappings = gpr_malloc(v->allocated_mappings * sizeof(email_key_mapping)); + v->mappings = static_cast( + gpr_malloc(v->allocated_mappings * sizeof(email_key_mapping))); verifier_put_mapping(v, GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN, GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX); /* User-Provided mappings. */ - if (mappings != NULL) { + if (mappings != nullptr) { size_t i; for (i = 0; i < num_mappings; i++) { verifier_put_mapping(v, mappings[i].email_domain, @@ -920,11 +919,11 @@ grpc_jwt_verifier *grpc_jwt_verifier_create( return v; } -void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_verifier *v) { +void grpc_jwt_verifier_destroy(grpc_jwt_verifier* v) { size_t i; - if (v == NULL) return; - grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx); - if (v->mappings != NULL) { + if (v == nullptr) return; + grpc_httpcli_context_destroy(&v->http_ctx); + if (v->mappings != nullptr) { for (i = 0; i < v->num_mappings; i++) { gpr_free(v->mappings[i].email_domain); gpr_free(v->mappings[i].key_url_prefix); diff --git a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.h b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.h index 8fac452d4..cdb09870b 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/jwt/jwt_verifier.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H +#include + #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/json/json.h" @@ -45,25 +47,25 @@ typedef enum { GRPC_JWT_VERIFIER_GENERIC_ERROR } grpc_jwt_verifier_status; -const char *grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status); +const char* grpc_jwt_verifier_status_to_string(grpc_jwt_verifier_status status); /* --- grpc_jwt_claims. --- */ typedef struct grpc_jwt_claims grpc_jwt_claims; -void grpc_jwt_claims_destroy(grpc_exec_ctx *exec_ctx, grpc_jwt_claims *claims); +void grpc_jwt_claims_destroy(grpc_jwt_claims* claims); /* Returns the whole JSON tree of the claims. */ -const grpc_json *grpc_jwt_claims_json(const grpc_jwt_claims *claims); +const grpc_json* grpc_jwt_claims_json(const grpc_jwt_claims* claims); /* Access to registered claims in https://tools.ietf.org/html/rfc7519#page-9 */ -const char *grpc_jwt_claims_subject(const grpc_jwt_claims *claims); -const char *grpc_jwt_claims_issuer(const grpc_jwt_claims *claims); -const char *grpc_jwt_claims_id(const grpc_jwt_claims *claims); -const char *grpc_jwt_claims_audience(const grpc_jwt_claims *claims); -gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims *claims); -gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims *claims); -gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims *claims); +const char* grpc_jwt_claims_subject(const grpc_jwt_claims* claims); +const char* grpc_jwt_claims_issuer(const grpc_jwt_claims* claims); +const char* grpc_jwt_claims_id(const grpc_jwt_claims* claims); +const char* grpc_jwt_claims_audience(const grpc_jwt_claims* claims); +gpr_timespec grpc_jwt_claims_issued_at(const grpc_jwt_claims* claims); +gpr_timespec grpc_jwt_claims_expires_at(const grpc_jwt_claims* claims); +gpr_timespec grpc_jwt_claims_not_before(const grpc_jwt_claims* claims); /* --- grpc_jwt_verifier. --- */ @@ -71,17 +73,17 @@ typedef struct grpc_jwt_verifier grpc_jwt_verifier; typedef struct { /* The email domain is the part after the @ sign. */ - const char *email_domain; + const char* email_domain; /* The key url prefix will be used to get the public key from the issuer: https:/// Therefore the key_url_prefix must NOT contain https://. */ - const char *key_url_prefix; + const char* key_url_prefix; } grpc_jwt_verifier_email_domain_key_url_mapping; /* Globals to control the verifier. Not thread-safe. */ extern gpr_timespec grpc_jwt_verifier_clock_skew; -extern gpr_timespec grpc_jwt_verifier_max_delay; +extern grpc_millis grpc_jwt_verifier_max_delay; /* The verifier can be created with some custom mappings to help with key discovery in the case where the issuer is an email address. @@ -89,37 +91,33 @@ extern gpr_timespec grpc_jwt_verifier_max_delay; A verifier object has one built-in mapping (unless overridden): GRPC_GOOGLE_SERVICE_ACCOUNTS_EMAIL_DOMAIN -> GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX.*/ -grpc_jwt_verifier *grpc_jwt_verifier_create( - const grpc_jwt_verifier_email_domain_key_url_mapping *mappings, +grpc_jwt_verifier* grpc_jwt_verifier_create( + const grpc_jwt_verifier_email_domain_key_url_mapping* mappings, size_t num_mappings); /*The verifier must not be destroyed if there are still outstanding callbacks.*/ -void grpc_jwt_verifier_destroy(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier); +void grpc_jwt_verifier_destroy(grpc_jwt_verifier* verifier); /* User provided callback that will be called when the verification of the JWT is done (maybe in another thread). It is the responsibility of the callee to call grpc_jwt_claims_destroy on the claims. */ -typedef void (*grpc_jwt_verification_done_cb)(grpc_exec_ctx *exec_ctx, - void *user_data, +typedef void (*grpc_jwt_verification_done_cb)(void* user_data, grpc_jwt_verifier_status status, - grpc_jwt_claims *claims); + grpc_jwt_claims* claims); /* Verifies for the JWT for the given expected audience. */ -void grpc_jwt_verifier_verify(grpc_exec_ctx *exec_ctx, - grpc_jwt_verifier *verifier, - grpc_pollset *pollset, const char *jwt, - const char *audience, +void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier, + grpc_pollset* pollset, const char* jwt, + const char* audience, grpc_jwt_verification_done_cb cb, - void *user_data); + void* user_data); /* --- TESTING ONLY exposed functions. --- */ -grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_exec_ctx *exec_ctx, - grpc_json *json, grpc_slice buffer); -grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims *claims, - const char *audience); -const char *grpc_jwt_issuer_email_domain(const char *issuer); +grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer); +grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims, + const char* audience); +const char* grpc_jwt_issuer_email_domain(const char* issuer); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc similarity index 57% rename from Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc index 10b270c49..212902973 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/oauth2/oauth2_credentials.h" #include @@ -32,26 +34,26 @@ // int grpc_auth_refresh_token_is_valid( - const grpc_auth_refresh_token *refresh_token) { - return (refresh_token != NULL) && + const grpc_auth_refresh_token* refresh_token) { + return (refresh_token != nullptr) && strcmp(refresh_token->type, GRPC_AUTH_JSON_TYPE_INVALID); } grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json( - const grpc_json *json) { + const grpc_json* json) { grpc_auth_refresh_token result; - const char *prop_value; + const char* prop_value; int success = 0; memset(&result, 0, sizeof(grpc_auth_refresh_token)); result.type = GRPC_AUTH_JSON_TYPE_INVALID; - if (json == NULL) { + if (json == nullptr) { gpr_log(GPR_ERROR, "Invalid json."); goto end; } prop_value = grpc_json_get_string_property(json, "type"); - if (prop_value == NULL || + if (prop_value == nullptr || strcmp(prop_value, GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER)) { goto end; } @@ -72,30 +74,30 @@ grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json( } grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string( - const char *json_string) { - char *scratchpad = gpr_strdup(json_string); - grpc_json *json = grpc_json_parse_string(scratchpad); + const char* json_string) { + char* scratchpad = gpr_strdup(json_string); + grpc_json* json = grpc_json_parse_string(scratchpad); grpc_auth_refresh_token result = grpc_auth_refresh_token_create_from_json(json); - if (json != NULL) grpc_json_destroy(json); + if (json != nullptr) grpc_json_destroy(json); gpr_free(scratchpad); return result; } -void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) { - if (refresh_token == NULL) return; +void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token* refresh_token) { + if (refresh_token == nullptr) return; refresh_token->type = GRPC_AUTH_JSON_TYPE_INVALID; - if (refresh_token->client_id != NULL) { + if (refresh_token->client_id != nullptr) { gpr_free(refresh_token->client_id); - refresh_token->client_id = NULL; + refresh_token->client_id = nullptr; } - if (refresh_token->client_secret != NULL) { + if (refresh_token->client_secret != nullptr) { gpr_free(refresh_token->client_secret); - refresh_token->client_secret = NULL; + refresh_token->client_secret = nullptr; } - if (refresh_token->refresh_token != NULL) { + if (refresh_token->refresh_token != nullptr) { gpr_free(refresh_token->refresh_token); - refresh_token->refresh_token = NULL; + refresh_token->refresh_token = nullptr; } } @@ -103,34 +105,33 @@ void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token) { // Oauth2 Token Fetcher credentials. // -static void oauth2_token_fetcher_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_oauth2_token_fetcher_credentials *c = - (grpc_oauth2_token_fetcher_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md); +static void oauth2_token_fetcher_destruct(grpc_call_credentials* creds) { + grpc_oauth2_token_fetcher_credentials* c = + reinterpret_cast(creds); + GRPC_MDELEM_UNREF(c->access_token_md); gpr_mu_destroy(&c->mu); - grpc_pollset_set_destroy(exec_ctx, - grpc_polling_entity_pollset_set(&c->pollent)); - grpc_httpcli_context_destroy(exec_ctx, &c->httpcli_context); + grpc_pollset_set_destroy(grpc_polling_entity_pollset_set(&c->pollent)); + grpc_httpcli_context_destroy(&c->httpcli_context); } grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - grpc_exec_ctx *exec_ctx, const grpc_http_response *response, - grpc_mdelem *token_md, gpr_timespec *token_lifetime) { - char *null_terminated_body = NULL; - char *new_access_token = NULL; + const grpc_http_response* response, grpc_mdelem* token_md, + grpc_millis* token_lifetime) { + char* null_terminated_body = nullptr; + char* new_access_token = nullptr; grpc_credentials_status status = GRPC_CREDENTIALS_OK; - grpc_json *json = NULL; + grpc_json* json = nullptr; - if (response == NULL) { + if (response == nullptr) { gpr_log(GPR_ERROR, "Received NULL response."); status = GRPC_CREDENTIALS_ERROR; goto end; } if (response->body_length > 0) { - null_terminated_body = gpr_malloc(response->body_length + 1); + null_terminated_body = + static_cast(gpr_malloc(response->body_length + 1)); null_terminated_body[response->body_length] = '\0'; memcpy(null_terminated_body, response->body, response->body_length); } @@ -138,16 +139,16 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( if (response->status != 200) { gpr_log(GPR_ERROR, "Call to http server ended with error %d [%s].", response->status, - null_terminated_body != NULL ? null_terminated_body : ""); + null_terminated_body != nullptr ? null_terminated_body : ""); status = GRPC_CREDENTIALS_ERROR; goto end; } else { - grpc_json *access_token = NULL; - grpc_json *token_type = NULL; - grpc_json *expires_in = NULL; - grpc_json *ptr; + grpc_json* access_token = nullptr; + grpc_json* token_type = nullptr; + grpc_json* expires_in = nullptr; + grpc_json* ptr; json = grpc_json_parse_string(null_terminated_body); - if (json == NULL) { + if (json == nullptr) { gpr_log(GPR_ERROR, "Could not parse JSON from %s", null_terminated_body); status = GRPC_CREDENTIALS_ERROR; goto end; @@ -166,29 +167,26 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( expires_in = ptr; } } - if (access_token == NULL || access_token->type != GRPC_JSON_STRING) { + if (access_token == nullptr || access_token->type != GRPC_JSON_STRING) { gpr_log(GPR_ERROR, "Missing or invalid access_token in JSON."); status = GRPC_CREDENTIALS_ERROR; goto end; } - if (token_type == NULL || token_type->type != GRPC_JSON_STRING) { + if (token_type == nullptr || token_type->type != GRPC_JSON_STRING) { gpr_log(GPR_ERROR, "Missing or invalid token_type in JSON."); status = GRPC_CREDENTIALS_ERROR; goto end; } - if (expires_in == NULL || expires_in->type != GRPC_JSON_NUMBER) { + if (expires_in == nullptr || expires_in->type != GRPC_JSON_NUMBER) { gpr_log(GPR_ERROR, "Missing or invalid expires_in in JSON."); status = GRPC_CREDENTIALS_ERROR; goto end; } gpr_asprintf(&new_access_token, "%s %s", token_type->value, access_token->value); - token_lifetime->tv_sec = strtol(expires_in->value, NULL, 10); - token_lifetime->tv_nsec = 0; - token_lifetime->clock_type = GPR_TIMESPAN; - if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md); + *token_lifetime = strtol(expires_in->value, nullptr, 10) * GPR_MS_PER_SEC; + if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(*token_md); *token_md = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(new_access_token)); status = GRPC_CREDENTIALS_OK; @@ -196,42 +194,40 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response( end: if (status != GRPC_CREDENTIALS_OK && !GRPC_MDISNULL(*token_md)) { - GRPC_MDELEM_UNREF(exec_ctx, *token_md); + GRPC_MDELEM_UNREF(*token_md); *token_md = GRPC_MDNULL; } - if (null_terminated_body != NULL) gpr_free(null_terminated_body); - if (new_access_token != NULL) gpr_free(new_access_token); - if (json != NULL) grpc_json_destroy(json); + if (null_terminated_body != nullptr) gpr_free(null_terminated_body); + if (new_access_token != nullptr) gpr_free(new_access_token); + if (json != nullptr) grpc_json_destroy(json); return status; } -static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, - void *user_data, - grpc_error *error) { +static void on_oauth2_token_fetcher_http_response(void* user_data, + grpc_error* error) { GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error)); - grpc_credentials_metadata_request *r = - (grpc_credentials_metadata_request *)user_data; - grpc_oauth2_token_fetcher_credentials *c = - (grpc_oauth2_token_fetcher_credentials *)r->creds; + grpc_credentials_metadata_request* r = + static_cast(user_data); + grpc_oauth2_token_fetcher_credentials* c = + reinterpret_cast(r->creds); grpc_mdelem access_token_md = GRPC_MDNULL; - gpr_timespec token_lifetime; + grpc_millis token_lifetime; grpc_credentials_status status = grpc_oauth2_token_fetcher_credentials_parse_server_response( - exec_ctx, &r->response, &access_token_md, &token_lifetime); + &r->response, &access_token_md, &token_lifetime); // Update cache and grab list of pending requests. gpr_mu_lock(&c->mu); c->token_fetch_pending = false; c->access_token_md = GRPC_MDELEM_REF(access_token_md); - c->token_expiration = - status == GRPC_CREDENTIALS_OK - ? gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime) - : gpr_inf_past(GPR_CLOCK_REALTIME); - grpc_oauth2_pending_get_request_metadata *pending_request = + c->token_expiration = status == GRPC_CREDENTIALS_OK + ? grpc_core::ExecCtx::Get()->Now() + token_lifetime + : 0; + grpc_oauth2_pending_get_request_metadata* pending_request = c->pending_requests; - c->pending_requests = NULL; + c->pending_requests = nullptr; gpr_mu_unlock(&c->mu); // Invoke callbacks for all pending requests. - while (pending_request != NULL) { + while (pending_request != nullptr) { if (status == GRPC_CREDENTIALS_OK) { grpc_credentials_mdelem_array_add(pending_request->md_array, access_token_md); @@ -239,53 +235,50 @@ static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Error occured when fetching oauth2 token.", &error, 1); } - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, error); + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, error); grpc_polling_entity_del_from_pollset_set( - exec_ctx, pending_request->pollent, - grpc_polling_entity_pollset_set(&c->pollent)); - grpc_oauth2_pending_get_request_metadata *prev = pending_request; + pending_request->pollent, grpc_polling_entity_pollset_set(&c->pollent)); + grpc_oauth2_pending_get_request_metadata* prev = pending_request; pending_request = pending_request->next; gpr_free(prev); } - GRPC_MDELEM_UNREF(exec_ctx, access_token_md); - grpc_call_credentials_unref(exec_ctx, r->creds); - grpc_credentials_metadata_request_destroy(exec_ctx, r); + GRPC_MDELEM_UNREF(access_token_md); + grpc_call_credentials_unref(r->creds); + grpc_credentials_metadata_request_destroy(r); } static bool oauth2_token_fetcher_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_oauth2_token_fetcher_credentials *c = - (grpc_oauth2_token_fetcher_credentials *)creds; + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, grpc_error** error) { + grpc_oauth2_token_fetcher_credentials* c = + reinterpret_cast(creds); // Check if we can use the cached token. - gpr_timespec refresh_threshold = gpr_time_from_seconds( - GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS, GPR_TIMESPAN); + grpc_millis refresh_threshold = + GRPC_SECURE_TOKEN_REFRESH_THRESHOLD_SECS * GPR_MS_PER_SEC; grpc_mdelem cached_access_token_md = GRPC_MDNULL; gpr_mu_lock(&c->mu); if (!GRPC_MDISNULL(c->access_token_md) && - (gpr_time_cmp( - gpr_time_sub(c->token_expiration, gpr_now(GPR_CLOCK_REALTIME)), - refresh_threshold) > 0)) { + (c->token_expiration - grpc_core::ExecCtx::Get()->Now() > + refresh_threshold)) { cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md); } if (!GRPC_MDISNULL(cached_access_token_md)) { gpr_mu_unlock(&c->mu); grpc_credentials_mdelem_array_add(md_array, cached_access_token_md); - GRPC_MDELEM_UNREF(exec_ctx, cached_access_token_md); + GRPC_MDELEM_UNREF(cached_access_token_md); return true; } // Couldn't get the token from the cache. // Add request to c->pending_requests and start a new fetch if needed. - grpc_oauth2_pending_get_request_metadata *pending_request = - (grpc_oauth2_pending_get_request_metadata *)gpr_malloc( - sizeof(*pending_request)); + grpc_oauth2_pending_get_request_metadata* pending_request = + static_cast( + gpr_malloc(sizeof(*pending_request))); pending_request->md_array = md_array; pending_request->on_request_metadata = on_request_metadata; pending_request->pollent = pollent; grpc_polling_entity_add_to_pollset_set( - exec_ctx, pollent, grpc_polling_entity_pollset_set(&c->pollent)); + pollent, grpc_polling_entity_pollset_set(&c->pollent)); pending_request->next = c->pending_requests; c->pending_requests = pending_request; bool start_fetch = false; @@ -296,33 +289,33 @@ static bool oauth2_token_fetcher_get_request_metadata( gpr_mu_unlock(&c->mu); if (start_fetch) { grpc_call_credentials_ref(creds); - c->fetch_func( - exec_ctx, grpc_credentials_metadata_request_create(creds), - &c->httpcli_context, &c->pollent, on_oauth2_token_fetcher_http_response, - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), refresh_threshold)); + c->fetch_func(grpc_credentials_metadata_request_create(creds), + &c->httpcli_context, &c->pollent, + on_oauth2_token_fetcher_http_response, + grpc_core::ExecCtx::Get()->Now() + refresh_threshold); } return false; } static void oauth2_token_fetcher_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { - grpc_oauth2_token_fetcher_credentials *c = - (grpc_oauth2_token_fetcher_credentials *)creds; + grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { + grpc_oauth2_token_fetcher_credentials* c = + reinterpret_cast(creds); gpr_mu_lock(&c->mu); - grpc_oauth2_pending_get_request_metadata *prev = NULL; - grpc_oauth2_pending_get_request_metadata *pending_request = + grpc_oauth2_pending_get_request_metadata* prev = nullptr; + grpc_oauth2_pending_get_request_metadata* pending_request = c->pending_requests; - while (pending_request != NULL) { + while (pending_request != nullptr) { if (pending_request->md_array == md_array) { // Remove matching pending request from the list. - if (prev != NULL) { + if (prev != nullptr) { prev->next = pending_request->next; } else { c->pending_requests = pending_request->next; } // Invoke the callback immediately with an error. - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, GRPC_ERROR_REF(error)); gpr_free(pending_request); break; @@ -334,13 +327,13 @@ static void oauth2_token_fetcher_cancel_get_request_metadata( GRPC_ERROR_UNREF(error); } -static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials *c, +static void init_oauth2_token_fetcher(grpc_oauth2_token_fetcher_credentials* c, grpc_fetch_oauth2_func fetch_func) { memset(c, 0, sizeof(grpc_oauth2_token_fetcher_credentials)); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; gpr_ref_init(&c->base.refcount, 1); gpr_mu_init(&c->mu); - c->token_expiration = gpr_inf_past(GPR_CLOCK_REALTIME); + c->token_expiration = 0; c->fetch_func = fetch_func; c->pollent = grpc_polling_entity_create_from_pollset_set(grpc_pollset_set_create()); @@ -356,35 +349,36 @@ static grpc_call_credentials_vtable compute_engine_vtable = { oauth2_token_fetcher_cancel_get_request_metadata}; static void compute_engine_fetch_oauth2( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, - grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, - grpc_iomgr_cb_func response_cb, gpr_timespec deadline) { - grpc_http_header header = {"Metadata-Flavor", "Google"}; + grpc_credentials_metadata_request* metadata_req, + grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent, + grpc_iomgr_cb_func response_cb, grpc_millis deadline) { + grpc_http_header header = {(char*)"Metadata-Flavor", (char*)"Google"}; grpc_httpcli_request request; memset(&request, 0, sizeof(grpc_httpcli_request)); - request.host = GRPC_COMPUTE_ENGINE_METADATA_HOST; - request.http.path = GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH; + request.host = (char*)GRPC_COMPUTE_ENGINE_METADATA_HOST; + request.http.path = (char*)GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH; request.http.hdr_count = 1; request.http.hdrs = &header; /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host channel. This would allow us to cancel an authentication query when under extreme memory pressure. */ - grpc_resource_quota *resource_quota = + grpc_resource_quota* resource_quota = grpc_resource_quota_create("oauth2_credentials"); grpc_httpcli_get( - exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline, + httpcli_context, pollent, resource_quota, &request, deadline, GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx), &metadata_req->response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); } -grpc_call_credentials *grpc_google_compute_engine_credentials_create( - void *reserved) { - grpc_oauth2_token_fetcher_credentials *c = - gpr_malloc(sizeof(grpc_oauth2_token_fetcher_credentials)); +grpc_call_credentials* grpc_google_compute_engine_credentials_create( + void* reserved) { + grpc_oauth2_token_fetcher_credentials* c = + static_cast( + gpr_malloc(sizeof(grpc_oauth2_token_fetcher_credentials))); GRPC_API_TRACE("grpc_compute_engine_credentials_create(reserved=%p)", 1, (reserved)); - GPR_ASSERT(reserved == NULL); + GPR_ASSERT(reserved == nullptr); init_oauth2_token_fetcher(c, compute_engine_fetch_oauth2); c->base.vtable = &compute_engine_vtable; return &c->base; @@ -394,12 +388,11 @@ grpc_call_credentials *grpc_google_compute_engine_credentials_create( // Google Refresh Token credentials. // -static void refresh_token_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_google_refresh_token_credentials *c = - (grpc_google_refresh_token_credentials *)creds; +static void refresh_token_destruct(grpc_call_credentials* creds) { + grpc_google_refresh_token_credentials* c = + reinterpret_cast(creds); grpc_auth_refresh_token_destruct(&c->refresh_token); - oauth2_token_fetcher_destruct(exec_ctx, &c->base.base); + oauth2_token_fetcher_destruct(&c->base.base); } static grpc_call_credentials_vtable refresh_token_vtable = { @@ -407,58 +400,60 @@ static grpc_call_credentials_vtable refresh_token_vtable = { oauth2_token_fetcher_cancel_get_request_metadata}; static void refresh_token_fetch_oauth2( - grpc_exec_ctx *exec_ctx, grpc_credentials_metadata_request *metadata_req, - grpc_httpcli_context *httpcli_context, grpc_polling_entity *pollent, - grpc_iomgr_cb_func response_cb, gpr_timespec deadline) { - grpc_google_refresh_token_credentials *c = - (grpc_google_refresh_token_credentials *)metadata_req->creds; - grpc_http_header header = {"Content-Type", - "application/x-www-form-urlencoded"}; + grpc_credentials_metadata_request* metadata_req, + grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent, + grpc_iomgr_cb_func response_cb, grpc_millis deadline) { + grpc_google_refresh_token_credentials* c = + reinterpret_cast( + metadata_req->creds); + grpc_http_header header = {(char*)"Content-Type", + (char*)"application/x-www-form-urlencoded"}; grpc_httpcli_request request; - char *body = NULL; + char* body = nullptr; gpr_asprintf(&body, GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING, c->refresh_token.client_id, c->refresh_token.client_secret, c->refresh_token.refresh_token); memset(&request, 0, sizeof(grpc_httpcli_request)); - request.host = GRPC_GOOGLE_OAUTH2_SERVICE_HOST; - request.http.path = GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH; + request.host = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_HOST; + request.http.path = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH; request.http.hdr_count = 1; request.http.hdrs = &header; request.handshaker = &grpc_httpcli_ssl; /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host channel. This would allow us to cancel an authentication query when under extreme memory pressure. */ - grpc_resource_quota *resource_quota = + grpc_resource_quota* resource_quota = grpc_resource_quota_create("oauth2_credentials_refresh"); grpc_httpcli_post( - exec_ctx, httpcli_context, pollent, resource_quota, &request, body, - strlen(body), deadline, + httpcli_context, pollent, resource_quota, &request, body, strlen(body), + deadline, GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx), &metadata_req->response); - grpc_resource_quota_unref_internal(exec_ctx, resource_quota); + grpc_resource_quota_unref_internal(resource_quota); gpr_free(body); } -grpc_call_credentials * +grpc_call_credentials* grpc_refresh_token_credentials_create_from_auth_refresh_token( grpc_auth_refresh_token refresh_token) { - grpc_google_refresh_token_credentials *c; + grpc_google_refresh_token_credentials* c; if (!grpc_auth_refresh_token_is_valid(&refresh_token)) { gpr_log(GPR_ERROR, "Invalid input for refresh token credentials creation"); - return NULL; + return nullptr; } - c = gpr_zalloc(sizeof(grpc_google_refresh_token_credentials)); + c = static_cast( + gpr_zalloc(sizeof(grpc_google_refresh_token_credentials))); init_oauth2_token_fetcher(&c->base, refresh_token_fetch_oauth2); c->base.base.vtable = &refresh_token_vtable; c->refresh_token = refresh_token; return &c->base.base; } -static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) { +static char* create_loggable_refresh_token(grpc_auth_refresh_token* token) { if (strcmp(token->type, GRPC_AUTH_JSON_TYPE_INVALID) == 0) { return gpr_strdup(""); } - char *loggable_token = NULL; + char* loggable_token = nullptr; gpr_asprintf(&loggable_token, "{\n type: %s\n client_id: %s\n client_secret: " "\n refresh_token: \n}", @@ -466,19 +461,19 @@ static char *create_loggable_refresh_token(grpc_auth_refresh_token *token) { return loggable_token; } -grpc_call_credentials *grpc_google_refresh_token_credentials_create( - const char *json_refresh_token, void *reserved) { +grpc_call_credentials* grpc_google_refresh_token_credentials_create( + const char* json_refresh_token, void* reserved) { grpc_auth_refresh_token token = grpc_auth_refresh_token_create_from_string(json_refresh_token); - if (GRPC_TRACER_ON(grpc_api_trace)) { - char *loggable_token = create_loggable_refresh_token(&token); + if (grpc_api_trace.enabled()) { + char* loggable_token = create_loggable_refresh_token(&token); gpr_log(GPR_INFO, "grpc_refresh_token_credentials_create(json_refresh_token=%s, " "reserved=%p)", loggable_token, reserved); gpr_free(loggable_token); } - GPR_ASSERT(reserved == NULL); + GPR_ASSERT(reserved == nullptr); return grpc_refresh_token_credentials_create_from_auth_refresh_token(token); } @@ -486,25 +481,25 @@ grpc_call_credentials *grpc_google_refresh_token_credentials_create( // Oauth2 Access Token credentials. // -static void access_token_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds; - GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md); +static void access_token_destruct(grpc_call_credentials* creds) { + grpc_access_token_credentials* c = + reinterpret_cast(creds); + GRPC_MDELEM_UNREF(c->access_token_md); } static bool access_token_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_polling_entity *pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_access_token_credentials *c = (grpc_access_token_credentials *)creds; + grpc_call_credentials* creds, grpc_polling_entity* pollent, + grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, grpc_error** error) { + grpc_access_token_credentials* c = + reinterpret_cast(creds); grpc_credentials_mdelem_array_add(md_array, c->access_token_md); return true; } static void access_token_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *c, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { + grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { GRPC_ERROR_UNREF(error); } @@ -512,25 +507,26 @@ static grpc_call_credentials_vtable access_token_vtable = { access_token_destruct, access_token_get_request_metadata, access_token_cancel_get_request_metadata}; -grpc_call_credentials *grpc_access_token_credentials_create( - const char *access_token, void *reserved) { - grpc_access_token_credentials *c = - gpr_zalloc(sizeof(grpc_access_token_credentials)); +grpc_call_credentials* grpc_access_token_credentials_create( + const char* access_token, void* reserved) { + grpc_access_token_credentials* c = + static_cast( + gpr_zalloc(sizeof(grpc_access_token_credentials))); GRPC_API_TRACE( "grpc_access_token_credentials_create(access_token=, " "reserved=%p)", 1, (reserved)); - GPR_ASSERT(reserved == NULL); + GPR_ASSERT(reserved == nullptr); c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2; c->base.vtable = &access_token_vtable; gpr_ref_init(&c->base.refcount, 1); - char *token_md_value; + char* token_md_value; gpr_asprintf(&token_md_value, "Bearer %s", access_token); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; c->access_token_md = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), + grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY), grpc_slice_from_copied_string(token_md_value)); - grpc_exec_ctx_finish(&exec_ctx); + gpr_free(token_md_value); return &c->base; } diff --git a/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.h index d9ad6691b..c0dd1546e 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/oauth2/oauth2_credentials.h @@ -19,60 +19,61 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H +#include + #include "src/core/lib/json/json.h" #include "src/core/lib/security/credentials/credentials.h" // auth_refresh_token parsing. typedef struct { - const char *type; - char *client_id; - char *client_secret; - char *refresh_token; + const char* type; + char* client_id; + char* client_secret; + char* refresh_token; } grpc_auth_refresh_token; /// Returns 1 if the object is valid, 0 otherwise. int grpc_auth_refresh_token_is_valid( - const grpc_auth_refresh_token *refresh_token); + const grpc_auth_refresh_token* refresh_token); /// Creates a refresh token object from string. Returns an invalid object if a /// parsing error has been encountered. grpc_auth_refresh_token grpc_auth_refresh_token_create_from_string( - const char *json_string); + const char* json_string); /// Creates a refresh token object from parsed json. Returns an invalid object /// if a parsing error has been encountered. grpc_auth_refresh_token grpc_auth_refresh_token_create_from_json( - const grpc_json *json); + const grpc_json* json); /// Destructs the object. -void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token *refresh_token); +void grpc_auth_refresh_token_destruct(grpc_auth_refresh_token* refresh_token); // -- Oauth2 Token Fetcher credentials -- // // This object is a base for credentials that need to acquire an oauth2 token // from an http service. -typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx *exec_ctx, - grpc_credentials_metadata_request *req, - grpc_httpcli_context *http_context, - grpc_polling_entity *pollent, +typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request* req, + grpc_httpcli_context* http_context, + grpc_polling_entity* pollent, grpc_iomgr_cb_func cb, - gpr_timespec deadline); + grpc_millis deadline); typedef struct grpc_oauth2_pending_get_request_metadata { - grpc_credentials_mdelem_array *md_array; - grpc_closure *on_request_metadata; - grpc_polling_entity *pollent; - struct grpc_oauth2_pending_get_request_metadata *next; + grpc_credentials_mdelem_array* md_array; + grpc_closure* on_request_metadata; + grpc_polling_entity* pollent; + struct grpc_oauth2_pending_get_request_metadata* next; } grpc_oauth2_pending_get_request_metadata; typedef struct { grpc_call_credentials base; gpr_mu mu; grpc_mdelem access_token_md; - gpr_timespec token_expiration; + grpc_millis token_expiration; bool token_fetch_pending; - grpc_oauth2_pending_get_request_metadata *pending_requests; + grpc_oauth2_pending_get_request_metadata* pending_requests; grpc_httpcli_context httpcli_context; grpc_fetch_oauth2_func fetch_func; grpc_polling_entity pollent; @@ -92,14 +93,14 @@ typedef struct { // Private constructor for refresh token credentials from an already parsed // refresh token. Takes ownership of the refresh token. -grpc_call_credentials * +grpc_call_credentials* grpc_refresh_token_credentials_create_from_auth_refresh_token( grpc_auth_refresh_token token); // Exposed for testing only. grpc_credentials_status grpc_oauth2_token_fetcher_credentials_parse_server_response( - grpc_exec_ctx *exec_ctx, const struct grpc_http_response *response, - grpc_mdelem *token_md, gpr_timespec *token_lifetime); + const struct grpc_http_response* response, grpc_mdelem* token_md, + grpc_millis* token_lifetime); #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.cc similarity index 64% rename from Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.c rename to Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.cc index ee20241e3..73946ce03 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.c +++ b/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/credentials/plugin/plugin_credentials.h" #include @@ -31,27 +33,26 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/validate_metadata.h" -grpc_tracer_flag grpc_plugin_credentials_trace = - GRPC_TRACER_INITIALIZER(false, "plugin_credentials"); +grpc_core::TraceFlag grpc_plugin_credentials_trace(false, "plugin_credentials"); -static void plugin_destruct(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds) { - grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; +static void plugin_destruct(grpc_call_credentials* creds) { + grpc_plugin_credentials* c = + reinterpret_cast(creds); gpr_mu_destroy(&c->mu); - if (c->plugin.state != NULL && c->plugin.destroy != NULL) { + if (c->plugin.state != nullptr && c->plugin.destroy != nullptr) { c->plugin.destroy(c->plugin.state); } } static void pending_request_remove_locked( - grpc_plugin_credentials *c, - grpc_plugin_credentials_pending_request *pending_request) { - if (pending_request->prev == NULL) { + grpc_plugin_credentials* c, + grpc_plugin_credentials_pending_request* pending_request) { + if (pending_request->prev == nullptr) { c->pending_requests = pending_request->next; } else { pending_request->prev->next = pending_request->next; } - if (pending_request->next != NULL) { + if (pending_request->next != nullptr) { pending_request->next->prev = pending_request->prev; } } @@ -62,21 +63,20 @@ static void pending_request_remove_locked( // When this returns, r->cancelled indicates whether the request was // cancelled before completion. static void pending_request_complete( - grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) { + grpc_plugin_credentials_pending_request* r) { gpr_mu_lock(&r->creds->mu); if (!r->cancelled) pending_request_remove_locked(r->creds, r); gpr_mu_unlock(&r->creds->mu); // Ref to credentials not needed anymore. - grpc_call_credentials_unref(exec_ctx, &r->creds->base); + grpc_call_credentials_unref(&r->creds->base); } -static grpc_error *process_plugin_result( - grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r, - const grpc_metadata *md, size_t num_md, grpc_status_code status, - const char *error_details) { - grpc_error *error = GRPC_ERROR_NONE; +static grpc_error* process_plugin_result( + grpc_plugin_credentials_pending_request* r, const grpc_metadata* md, + size_t num_md, grpc_status_code status, const char* error_details) { + grpc_error* error = GRPC_ERROR_NONE; if (status != GRPC_STATUS_OK) { - char *msg; + char* msg; gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s", error_details); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); @@ -101,78 +101,76 @@ static grpc_error *process_plugin_result( error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"); } else { for (size_t i = 0; i < num_md; ++i) { - grpc_mdelem mdelem = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_ref_internal(md[i].key), - grpc_slice_ref_internal(md[i].value)); + grpc_mdelem mdelem = + grpc_mdelem_from_slices(grpc_slice_ref_internal(md[i].key), + grpc_slice_ref_internal(md[i].value)); grpc_credentials_mdelem_array_add(r->md_array, mdelem); - GRPC_MDELEM_UNREF(exec_ctx, mdelem); + GRPC_MDELEM_UNREF(mdelem); } } } return error; } -static void plugin_md_request_metadata_ready(void *request, - const grpc_metadata *md, +static void plugin_md_request_metadata_ready(void* request, + const grpc_metadata* md, size_t num_md, grpc_status_code status, - const char *error_details) { + const char* error_details) { /* called from application code */ - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER( - GRPC_EXEC_CTX_FLAG_IS_FINISHED | GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP, - NULL, NULL); - grpc_plugin_credentials_pending_request *r = - (grpc_plugin_credentials_pending_request *)request; - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_FINISHED | + GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP); + grpc_plugin_credentials_pending_request* r = + static_cast(request); + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: plugin returned " "asynchronously", r->creds, r); } // Remove request from pending list if not previously cancelled. - pending_request_complete(&exec_ctx, r); + pending_request_complete(r); // If it has not been cancelled, process it. if (!r->cancelled) { - grpc_error *error = - process_plugin_result(&exec_ctx, r, md, num_md, status, error_details); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error); - } else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + grpc_error* error = + process_plugin_result(r, md, num_md, status, error_details); + GRPC_CLOSURE_SCHED(r->on_request_metadata, error); + } else if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: plugin was previously " "cancelled", r->creds, r); } gpr_free(r); - grpc_exec_ctx_finish(&exec_ctx); } -static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_credentials *creds, - grpc_polling_entity *pollent, +static bool plugin_get_request_metadata(grpc_call_credentials* creds, + grpc_polling_entity* pollent, grpc_auth_metadata_context context, - grpc_credentials_mdelem_array *md_array, - grpc_closure *on_request_metadata, - grpc_error **error) { - grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; + grpc_credentials_mdelem_array* md_array, + grpc_closure* on_request_metadata, + grpc_error** error) { + grpc_plugin_credentials* c = + reinterpret_cast(creds); bool retval = true; // Synchronous return. - if (c->plugin.get_metadata != NULL) { + if (c->plugin.get_metadata != nullptr) { // Create pending_request object. - grpc_plugin_credentials_pending_request *pending_request = - (grpc_plugin_credentials_pending_request *)gpr_zalloc( - sizeof(*pending_request)); + grpc_plugin_credentials_pending_request* pending_request = + static_cast( + gpr_zalloc(sizeof(*pending_request))); pending_request->creds = c; pending_request->md_array = md_array; pending_request->on_request_metadata = on_request_metadata; // Add it to the pending list. gpr_mu_lock(&c->mu); - if (c->pending_requests != NULL) { + if (c->pending_requests != nullptr) { c->pending_requests->prev = pending_request; } pending_request->next = c->pending_requests; c->pending_requests = pending_request; gpr_mu_unlock(&c->mu); // Invoke the plugin. The callback holds a ref to us. - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin", c, pending_request); } @@ -180,12 +178,12 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX]; size_t num_creds_md = 0; grpc_status_code status = GRPC_STATUS_OK; - const char *error_details = NULL; + const char* error_details = nullptr; if (!c->plugin.get_metadata(c->plugin.state, context, plugin_md_request_metadata_ready, pending_request, creds_md, &num_creds_md, &status, &error_details)) { - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: plugin will return " "asynchronously", @@ -195,12 +193,12 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, } // Returned synchronously. // Remove request from pending list if not previously cancelled. - pending_request_complete(exec_ctx, pending_request); + pending_request_complete(pending_request); // If the request was cancelled, the error will have been returned // asynchronously by plugin_cancel_get_request_metadata(), so return // false. Otherwise, process the result. if (pending_request->cancelled) { - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p was cancelled, error " "will be returned asynchronously", @@ -208,41 +206,42 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, } retval = false; } else { - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: plugin returned " "synchronously", c, pending_request); } - *error = process_plugin_result(exec_ctx, pending_request, creds_md, - num_creds_md, status, error_details); + *error = process_plugin_result(pending_request, creds_md, num_creds_md, + status, error_details); } // Clean up. for (size_t i = 0; i < num_creds_md; ++i) { - grpc_slice_unref_internal(exec_ctx, creds_md[i].key); - grpc_slice_unref_internal(exec_ctx, creds_md[i].value); + grpc_slice_unref_internal(creds_md[i].key); + grpc_slice_unref_internal(creds_md[i].value); } - gpr_free((void *)error_details); + gpr_free((void*)error_details); gpr_free(pending_request); } return retval; } static void plugin_cancel_get_request_metadata( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds, - grpc_credentials_mdelem_array *md_array, grpc_error *error) { - grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; + grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array, + grpc_error* error) { + grpc_plugin_credentials* c = + reinterpret_cast(creds); gpr_mu_lock(&c->mu); - for (grpc_plugin_credentials_pending_request *pending_request = + for (grpc_plugin_credentials_pending_request* pending_request = c->pending_requests; - pending_request != NULL; pending_request = pending_request->next) { + pending_request != nullptr; pending_request = pending_request->next) { if (pending_request->md_array == md_array) { - if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + if (grpc_plugin_credentials_trace.enabled()) { gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c, pending_request); } pending_request->cancelled = true; - GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, + GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, GRPC_ERROR_REF(error)); pending_request_remove_locked(c, pending_request); break; @@ -256,12 +255,13 @@ static grpc_call_credentials_vtable plugin_vtable = { plugin_destruct, plugin_get_request_metadata, plugin_cancel_get_request_metadata}; -grpc_call_credentials *grpc_metadata_credentials_create_from_plugin( - grpc_metadata_credentials_plugin plugin, void *reserved) { - grpc_plugin_credentials *c = gpr_zalloc(sizeof(*c)); +grpc_call_credentials* grpc_metadata_credentials_create_from_plugin( + grpc_metadata_credentials_plugin plugin, void* reserved) { + grpc_plugin_credentials* c = + static_cast(gpr_zalloc(sizeof(*c))); GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1, (reserved)); - GPR_ASSERT(reserved == NULL); + GPR_ASSERT(reserved == nullptr); c->base.type = plugin.type; c->base.vtable = &plugin_vtable; gpr_ref_init(&c->base.refcount, 1); diff --git a/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.h index f56df9eac..caf990efa 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/plugin/plugin_credentials.h @@ -19,26 +19,28 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" -extern grpc_tracer_flag grpc_plugin_credentials_trace; +extern grpc_core::TraceFlag grpc_plugin_credentials_trace; struct grpc_plugin_credentials; typedef struct grpc_plugin_credentials_pending_request { bool cancelled; - struct grpc_plugin_credentials *creds; - grpc_credentials_mdelem_array *md_array; - grpc_closure *on_request_metadata; - struct grpc_plugin_credentials_pending_request *prev; - struct grpc_plugin_credentials_pending_request *next; + struct grpc_plugin_credentials* creds; + grpc_credentials_mdelem_array* md_array; + grpc_closure* on_request_metadata; + struct grpc_plugin_credentials_pending_request* prev; + struct grpc_plugin_credentials_pending_request* next; } grpc_plugin_credentials_pending_request; typedef struct grpc_plugin_credentials { grpc_call_credentials base; grpc_metadata_credentials_plugin plugin; gpr_mu mu; - grpc_plugin_credentials_pending_request *pending_requests; + grpc_plugin_credentials_pending_request* pending_requests; } grpc_plugin_credentials; #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_PLUGIN_PLUGIN_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.c b/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.c deleted file mode 100644 index 006db1ec7..000000000 --- a/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.c +++ /dev/null @@ -1,194 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/security/credentials/ssl/ssl_credentials.h" - -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/surface/api_trace.h" - -#include -#include -#include - -// -// SSL Channel Credentials. -// - -static void ssl_config_pem_key_cert_pair_destroy( - tsi_ssl_pem_key_cert_pair *kp) { - if (kp == NULL) return; - gpr_free((void *)kp->private_key); - gpr_free((void *)kp->cert_chain); -} - -static void ssl_destruct(grpc_exec_ctx *exec_ctx, - grpc_channel_credentials *creds) { - grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds; - gpr_free(c->config.pem_root_certs); - ssl_config_pem_key_cert_pair_destroy(&c->config.pem_key_cert_pair); -} - -static grpc_security_status ssl_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_channel_credentials *creds, - grpc_call_credentials *call_creds, const char *target, - const grpc_channel_args *args, grpc_channel_security_connector **sc, - grpc_channel_args **new_args) { - grpc_ssl_credentials *c = (grpc_ssl_credentials *)creds; - grpc_security_status status = GRPC_SECURITY_OK; - const char *overridden_target_name = NULL; - for (size_t i = 0; args && i < args->num_args; i++) { - grpc_arg *arg = &args->args[i]; - if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 && - arg->type == GRPC_ARG_STRING) { - overridden_target_name = arg->value.string; - break; - } - } - status = grpc_ssl_channel_security_connector_create( - exec_ctx, call_creds, &c->config, target, overridden_target_name, sc); - if (status != GRPC_SECURITY_OK) { - return status; - } - grpc_arg new_arg = - grpc_channel_arg_string_create(GRPC_ARG_HTTP2_SCHEME, "https"); - *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1); - return status; -} - -static grpc_channel_credentials_vtable ssl_vtable = { - ssl_destruct, ssl_create_security_connector, NULL}; - -static void ssl_build_config(const char *pem_root_certs, - grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, - grpc_ssl_config *config) { - if (pem_root_certs != NULL) { - config->pem_root_certs = gpr_strdup(pem_root_certs); - } - if (pem_key_cert_pair != NULL) { - GPR_ASSERT(pem_key_cert_pair->private_key != NULL); - GPR_ASSERT(pem_key_cert_pair->cert_chain != NULL); - config->pem_key_cert_pair.cert_chain = - gpr_strdup(pem_key_cert_pair->cert_chain); - config->pem_key_cert_pair.private_key = - gpr_strdup(pem_key_cert_pair->private_key); - } -} - -grpc_channel_credentials *grpc_ssl_credentials_create( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair, - void *reserved) { - grpc_ssl_credentials *c = gpr_zalloc(sizeof(grpc_ssl_credentials)); - GRPC_API_TRACE( - "grpc_ssl_credentials_create(pem_root_certs=%s, " - "pem_key_cert_pair=%p, " - "reserved=%p)", - 3, (pem_root_certs, pem_key_cert_pair, reserved)); - GPR_ASSERT(reserved == NULL); - c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL; - c->base.vtable = &ssl_vtable; - gpr_ref_init(&c->base.refcount, 1); - ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config); - return &c->base; -} - -// -// SSL Server Credentials. -// - -static void ssl_server_destruct(grpc_exec_ctx *exec_ctx, - grpc_server_credentials *creds) { - grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; - size_t i; - for (i = 0; i < c->config.num_key_cert_pairs; i++) { - ssl_config_pem_key_cert_pair_destroy(&c->config.pem_key_cert_pairs[i]); - } - gpr_free(c->config.pem_key_cert_pairs); - gpr_free(c->config.pem_root_certs); -} - -static grpc_security_status ssl_server_create_security_connector( - grpc_exec_ctx *exec_ctx, grpc_server_credentials *creds, - grpc_server_security_connector **sc) { - grpc_ssl_server_credentials *c = (grpc_ssl_server_credentials *)creds; - return grpc_ssl_server_security_connector_create(exec_ctx, &c->config, sc); -} - -static grpc_server_credentials_vtable ssl_server_vtable = { - ssl_server_destruct, ssl_server_create_security_connector}; - -static void ssl_build_server_config( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, - grpc_ssl_client_certificate_request_type client_certificate_request, - grpc_ssl_server_config *config) { - size_t i; - config->client_certificate_request = client_certificate_request; - if (pem_root_certs != NULL) { - config->pem_root_certs = gpr_strdup(pem_root_certs); - } - if (num_key_cert_pairs > 0) { - GPR_ASSERT(pem_key_cert_pairs != NULL); - config->pem_key_cert_pairs = - gpr_zalloc(num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair)); - } - config->num_key_cert_pairs = num_key_cert_pairs; - for (i = 0; i < num_key_cert_pairs; i++) { - GPR_ASSERT(pem_key_cert_pairs[i].private_key != NULL); - GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != NULL); - config->pem_key_cert_pairs[i].cert_chain = - gpr_strdup(pem_key_cert_pairs[i].cert_chain); - config->pem_key_cert_pairs[i].private_key = - gpr_strdup(pem_key_cert_pairs[i].private_key); - } -} - -grpc_server_credentials *grpc_ssl_server_credentials_create( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, int force_client_auth, void *reserved) { - return grpc_ssl_server_credentials_create_ex( - pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs, - force_client_auth - ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY - : GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, - reserved); -} - -grpc_server_credentials *grpc_ssl_server_credentials_create_ex( - const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, - grpc_ssl_client_certificate_request_type client_certificate_request, - void *reserved) { - grpc_ssl_server_credentials *c = - gpr_zalloc(sizeof(grpc_ssl_server_credentials)); - GRPC_API_TRACE( - "grpc_ssl_server_credentials_create_ex(" - "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, " - "client_certificate_request=%d, reserved=%p)", - 5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs, - client_certificate_request, reserved)); - GPR_ASSERT(reserved == NULL); - c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL; - gpr_ref_init(&c->base.refcount, 1); - c->base.vtable = &ssl_server_vtable; - ssl_build_server_config(pem_root_certs, pem_key_cert_pairs, - num_key_cert_pairs, client_certificate_request, - &c->config); - return &c->base; -} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.cc b/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.cc new file mode 100644 index 000000000..2b6377d3e --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.cc @@ -0,0 +1,349 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/credentials/ssl/ssl_credentials.h" + +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/tsi/ssl_transport_security.h" + +#include +#include +#include + +// +// SSL Channel Credentials. +// + +void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp, + size_t num_key_cert_pairs) { + if (kp == nullptr) return; + for (size_t i = 0; i < num_key_cert_pairs; i++) { + gpr_free((void*)kp[i].private_key); + gpr_free((void*)kp[i].cert_chain); + } + gpr_free(kp); +} + +static void ssl_destruct(grpc_channel_credentials* creds) { + grpc_ssl_credentials* c = reinterpret_cast(creds); + gpr_free(c->config.pem_root_certs); + grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pair, 1); +} + +static grpc_security_status ssl_create_security_connector( + grpc_channel_credentials* creds, grpc_call_credentials* call_creds, + const char* target, const grpc_channel_args* args, + grpc_channel_security_connector** sc, grpc_channel_args** new_args) { + grpc_ssl_credentials* c = reinterpret_cast(creds); + grpc_security_status status = GRPC_SECURITY_OK; + const char* overridden_target_name = nullptr; + tsi_ssl_session_cache* ssl_session_cache = nullptr; + for (size_t i = 0; args && i < args->num_args; i++) { + grpc_arg* arg = &args->args[i]; + if (strcmp(arg->key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG) == 0 && + arg->type == GRPC_ARG_STRING) { + overridden_target_name = arg->value.string; + } + if (strcmp(arg->key, GRPC_SSL_SESSION_CACHE_ARG) == 0 && + arg->type == GRPC_ARG_POINTER) { + ssl_session_cache = + static_cast(arg->value.pointer.p); + } + } + status = grpc_ssl_channel_security_connector_create( + creds, call_creds, &c->config, target, overridden_target_name, + ssl_session_cache, sc); + if (status != GRPC_SECURITY_OK) { + return status; + } + grpc_arg new_arg = grpc_channel_arg_string_create( + (char*)GRPC_ARG_HTTP2_SCHEME, (char*)"https"); + *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1); + return status; +} + +static grpc_channel_credentials_vtable ssl_vtable = { + ssl_destruct, ssl_create_security_connector, nullptr}; + +static void ssl_build_config(const char* pem_root_certs, + grpc_ssl_pem_key_cert_pair* pem_key_cert_pair, + grpc_ssl_config* config) { + if (pem_root_certs != nullptr) { + config->pem_root_certs = gpr_strdup(pem_root_certs); + } + if (pem_key_cert_pair != nullptr) { + GPR_ASSERT(pem_key_cert_pair->private_key != nullptr); + GPR_ASSERT(pem_key_cert_pair->cert_chain != nullptr); + config->pem_key_cert_pair = static_cast( + gpr_zalloc(sizeof(tsi_ssl_pem_key_cert_pair))); + config->pem_key_cert_pair->cert_chain = + gpr_strdup(pem_key_cert_pair->cert_chain); + config->pem_key_cert_pair->private_key = + gpr_strdup(pem_key_cert_pair->private_key); + } +} + +grpc_channel_credentials* grpc_ssl_credentials_create( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair, + void* reserved) { + grpc_ssl_credentials* c = static_cast( + gpr_zalloc(sizeof(grpc_ssl_credentials))); + GRPC_API_TRACE( + "grpc_ssl_credentials_create(pem_root_certs=%s, " + "pem_key_cert_pair=%p, " + "reserved=%p)", + 3, (pem_root_certs, pem_key_cert_pair, reserved)); + GPR_ASSERT(reserved == nullptr); + c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL; + c->base.vtable = &ssl_vtable; + gpr_ref_init(&c->base.refcount, 1); + ssl_build_config(pem_root_certs, pem_key_cert_pair, &c->config); + return &c->base; +} + +// +// SSL Server Credentials. +// + +struct grpc_ssl_server_credentials_options { + grpc_ssl_client_certificate_request_type client_certificate_request; + grpc_ssl_server_certificate_config* certificate_config; + grpc_ssl_server_certificate_config_fetcher* certificate_config_fetcher; +}; + +static void ssl_server_destruct(grpc_server_credentials* creds) { + grpc_ssl_server_credentials* c = + reinterpret_cast(creds); + grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pairs, + c->config.num_key_cert_pairs); + gpr_free(c->config.pem_root_certs); +} + +static grpc_security_status ssl_server_create_security_connector( + grpc_server_credentials* creds, grpc_server_security_connector** sc) { + return grpc_ssl_server_security_connector_create(creds, sc); +} + +static grpc_server_credentials_vtable ssl_server_vtable = { + ssl_server_destruct, ssl_server_create_security_connector}; + +tsi_ssl_pem_key_cert_pair* grpc_convert_grpc_to_tsi_cert_pairs( + const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs) { + tsi_ssl_pem_key_cert_pair* tsi_pairs = nullptr; + if (num_key_cert_pairs > 0) { + GPR_ASSERT(pem_key_cert_pairs != nullptr); + tsi_pairs = static_cast( + gpr_zalloc(num_key_cert_pairs * sizeof(tsi_ssl_pem_key_cert_pair))); + } + for (size_t i = 0; i < num_key_cert_pairs; i++) { + GPR_ASSERT(pem_key_cert_pairs[i].private_key != nullptr); + GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != nullptr); + tsi_pairs[i].cert_chain = gpr_strdup(pem_key_cert_pairs[i].cert_chain); + tsi_pairs[i].private_key = gpr_strdup(pem_key_cert_pairs[i].private_key); + } + return tsi_pairs; +} + +static void ssl_build_server_config( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, + grpc_ssl_client_certificate_request_type client_certificate_request, + grpc_ssl_server_config* config) { + config->client_certificate_request = client_certificate_request; + if (pem_root_certs != nullptr) { + config->pem_root_certs = gpr_strdup(pem_root_certs); + } + config->pem_key_cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs( + pem_key_cert_pairs, num_key_cert_pairs); + config->num_key_cert_pairs = num_key_cert_pairs; +} + +grpc_ssl_server_certificate_config* grpc_ssl_server_certificate_config_create( + const char* pem_root_certs, + const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs) { + grpc_ssl_server_certificate_config* config = + static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_certificate_config))); + if (pem_root_certs != nullptr) { + config->pem_root_certs = gpr_strdup(pem_root_certs); + } + if (num_key_cert_pairs > 0) { + GPR_ASSERT(pem_key_cert_pairs != nullptr); + config->pem_key_cert_pairs = static_cast( + gpr_zalloc(num_key_cert_pairs * sizeof(grpc_ssl_pem_key_cert_pair))); + } + config->num_key_cert_pairs = num_key_cert_pairs; + for (size_t i = 0; i < num_key_cert_pairs; i++) { + GPR_ASSERT(pem_key_cert_pairs[i].private_key != nullptr); + GPR_ASSERT(pem_key_cert_pairs[i].cert_chain != nullptr); + config->pem_key_cert_pairs[i].cert_chain = + gpr_strdup(pem_key_cert_pairs[i].cert_chain); + config->pem_key_cert_pairs[i].private_key = + gpr_strdup(pem_key_cert_pairs[i].private_key); + } + return config; +} + +void grpc_ssl_server_certificate_config_destroy( + grpc_ssl_server_certificate_config* config) { + if (config == nullptr) return; + for (size_t i = 0; i < config->num_key_cert_pairs; i++) { + gpr_free((void*)config->pem_key_cert_pairs[i].private_key); + gpr_free((void*)config->pem_key_cert_pairs[i].cert_chain); + } + gpr_free(config->pem_key_cert_pairs); + gpr_free(config->pem_root_certs); + gpr_free(config); +} + +grpc_ssl_server_credentials_options* +grpc_ssl_server_credentials_create_options_using_config( + grpc_ssl_client_certificate_request_type client_certificate_request, + grpc_ssl_server_certificate_config* config) { + grpc_ssl_server_credentials_options* options = nullptr; + if (config == nullptr) { + gpr_log(GPR_ERROR, "Certificate config must not be NULL."); + goto done; + } + options = static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_credentials_options))); + options->client_certificate_request = client_certificate_request; + options->certificate_config = config; +done: + return options; +} + +grpc_ssl_server_credentials_options* +grpc_ssl_server_credentials_create_options_using_config_fetcher( + grpc_ssl_client_certificate_request_type client_certificate_request, + grpc_ssl_server_certificate_config_callback cb, void* user_data) { + if (cb == nullptr) { + gpr_log(GPR_ERROR, "Invalid certificate config callback parameter."); + return nullptr; + } + + grpc_ssl_server_certificate_config_fetcher* fetcher = + static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_certificate_config_fetcher))); + fetcher->cb = cb; + fetcher->user_data = user_data; + + grpc_ssl_server_credentials_options* options = + static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_credentials_options))); + options->client_certificate_request = client_certificate_request; + options->certificate_config_fetcher = fetcher; + + return options; +} + +grpc_server_credentials* grpc_ssl_server_credentials_create( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, int force_client_auth, void* reserved) { + return grpc_ssl_server_credentials_create_ex( + pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs, + force_client_auth + ? GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY + : GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE, + reserved); +} + +grpc_server_credentials* grpc_ssl_server_credentials_create_ex( + const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, + grpc_ssl_client_certificate_request_type client_certificate_request, + void* reserved) { + GRPC_API_TRACE( + "grpc_ssl_server_credentials_create_ex(" + "pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, " + "client_certificate_request=%d, reserved=%p)", + 5, + (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs, + client_certificate_request, reserved)); + GPR_ASSERT(reserved == nullptr); + + grpc_ssl_server_certificate_config* cert_config = + grpc_ssl_server_certificate_config_create( + pem_root_certs, pem_key_cert_pairs, num_key_cert_pairs); + grpc_ssl_server_credentials_options* options = + grpc_ssl_server_credentials_create_options_using_config( + client_certificate_request, cert_config); + + return grpc_ssl_server_credentials_create_with_options(options); +} + +grpc_server_credentials* grpc_ssl_server_credentials_create_with_options( + grpc_ssl_server_credentials_options* options) { + grpc_server_credentials* retval = nullptr; + grpc_ssl_server_credentials* c = nullptr; + + if (options == nullptr) { + gpr_log(GPR_ERROR, + "Invalid options trying to create SSL server credentials."); + goto done; + } + + if (options->certificate_config == nullptr && + options->certificate_config_fetcher == nullptr) { + gpr_log(GPR_ERROR, + "SSL server credentials options must specify either " + "certificate config or fetcher."); + goto done; + } else if (options->certificate_config_fetcher != nullptr && + options->certificate_config_fetcher->cb == nullptr) { + gpr_log(GPR_ERROR, "Certificate config fetcher callback must not be NULL."); + goto done; + } + + c = static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_credentials))); + c->base.type = GRPC_CHANNEL_CREDENTIALS_TYPE_SSL; + gpr_ref_init(&c->base.refcount, 1); + c->base.vtable = &ssl_server_vtable; + + if (options->certificate_config_fetcher != nullptr) { + c->config.client_certificate_request = options->client_certificate_request; + c->certificate_config_fetcher = *options->certificate_config_fetcher; + } else { + ssl_build_server_config(options->certificate_config->pem_root_certs, + options->certificate_config->pem_key_cert_pairs, + options->certificate_config->num_key_cert_pairs, + options->client_certificate_request, &c->config); + } + + retval = &c->base; + +done: + grpc_ssl_server_credentials_options_destroy(options); + return retval; +} + +void grpc_ssl_server_credentials_options_destroy( + grpc_ssl_server_credentials_options* o) { + if (o == nullptr) return; + gpr_free(o->certificate_config_fetcher); + grpc_ssl_server_certificate_config_destroy(o->certificate_config); + gpr_free(o); +} diff --git a/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.h b/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.h index b43c656cd..712d34c73 100644 --- a/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.h +++ b/Sources/CgRPC/src/core/lib/security/credentials/ssl/ssl_credentials.h @@ -18,6 +18,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_SSL_SSL_CREDENTIALS_H #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_SSL_SSL_CREDENTIALS_H +#include + #include "src/core/lib/security/credentials/credentials.h" typedef struct { @@ -25,9 +27,28 @@ typedef struct { grpc_ssl_config config; } grpc_ssl_credentials; +struct grpc_ssl_server_certificate_config { + grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs; + size_t num_key_cert_pairs; + char* pem_root_certs; +}; + +typedef struct { + grpc_ssl_server_certificate_config_callback cb; + void* user_data; +} grpc_ssl_server_certificate_config_fetcher; + typedef struct { grpc_server_credentials base; grpc_ssl_server_config config; + grpc_ssl_server_certificate_config_fetcher certificate_config_fetcher; } grpc_ssl_server_credentials; +tsi_ssl_pem_key_cert_pair* grpc_convert_grpc_to_tsi_cert_pairs( + const grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs); + +void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp, + size_t num_key_cert_pairs); + #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_SSL_SSL_CREDENTIALS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.cc b/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.cc new file mode 100644 index 000000000..5ff7d7938 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.cc @@ -0,0 +1,287 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/security_connector/alts_security_connector.h" + +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/security/credentials/alts/alts_credentials.h" +#include "src/core/lib/security/transport/security_handshaker.h" +#include "src/core/lib/transport/transport.h" +#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" + +typedef struct { + grpc_channel_security_connector base; + char* target_name; +} grpc_alts_channel_security_connector; + +typedef struct { + grpc_server_security_connector base; +} grpc_alts_server_security_connector; + +static void alts_channel_destroy(grpc_security_connector* sc) { + if (sc == nullptr) { + return; + } + auto c = reinterpret_cast(sc); + grpc_call_credentials_unref(c->base.request_metadata_creds); + grpc_channel_credentials_unref(c->base.channel_creds); + gpr_free(c->target_name); + gpr_free(sc); +} + +static void alts_server_destroy(grpc_security_connector* sc) { + if (sc == nullptr) { + return; + } + auto c = reinterpret_cast(sc); + grpc_server_credentials_unref(c->base.server_creds); + gpr_free(sc); +} + +static void alts_channel_add_handshakers( + grpc_channel_security_connector* sc, + grpc_handshake_manager* handshake_manager) { + tsi_handshaker* handshaker = nullptr; + auto c = reinterpret_cast(sc); + grpc_alts_credentials* creds = + reinterpret_cast(c->base.channel_creds); + GPR_ASSERT(alts_tsi_handshaker_create(creds->options, c->target_name, + creds->handshaker_service_url, true, + &handshaker) == TSI_OK); + grpc_handshake_manager_add(handshake_manager, grpc_security_handshaker_create( + handshaker, &sc->base)); +} + +static void alts_server_add_handshakers( + grpc_server_security_connector* sc, + grpc_handshake_manager* handshake_manager) { + tsi_handshaker* handshaker = nullptr; + auto c = reinterpret_cast(sc); + grpc_alts_server_credentials* creds = + reinterpret_cast(c->base.server_creds); + GPR_ASSERT(alts_tsi_handshaker_create(creds->options, nullptr, + creds->handshaker_service_url, false, + &handshaker) == TSI_OK); + grpc_handshake_manager_add(handshake_manager, grpc_security_handshaker_create( + handshaker, &sc->base)); +} + +static void alts_set_rpc_protocol_versions( + grpc_gcp_rpc_protocol_versions* rpc_versions) { + grpc_gcp_rpc_protocol_versions_set_max(rpc_versions, + GRPC_PROTOCOL_VERSION_MAX_MAJOR, + GRPC_PROTOCOL_VERSION_MAX_MINOR); + grpc_gcp_rpc_protocol_versions_set_min(rpc_versions, + GRPC_PROTOCOL_VERSION_MIN_MAJOR, + GRPC_PROTOCOL_VERSION_MIN_MINOR); +} + +namespace grpc_core { +namespace internal { + +grpc_security_status grpc_alts_auth_context_from_tsi_peer( + const tsi_peer* peer, grpc_auth_context** ctx) { + if (peer == nullptr || ctx == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to grpc_alts_auth_context_from_tsi_peer()"); + return GRPC_SECURITY_ERROR; + } + *ctx = nullptr; + /* Validate certificate type. */ + const tsi_peer_property* cert_type_prop = + tsi_peer_get_property_by_name(peer, TSI_CERTIFICATE_TYPE_PEER_PROPERTY); + if (cert_type_prop == nullptr || + strncmp(cert_type_prop->value.data, TSI_ALTS_CERTIFICATE_TYPE, + cert_type_prop->value.length) != 0) { + gpr_log(GPR_ERROR, "Invalid or missing certificate type property."); + return GRPC_SECURITY_ERROR; + } + /* Validate RPC protocol versions. */ + const tsi_peer_property* rpc_versions_prop = + tsi_peer_get_property_by_name(peer, TSI_ALTS_RPC_VERSIONS); + if (rpc_versions_prop == nullptr) { + gpr_log(GPR_ERROR, "Missing rpc protocol versions property."); + return GRPC_SECURITY_ERROR; + } + grpc_gcp_rpc_protocol_versions local_versions, peer_versions; + alts_set_rpc_protocol_versions(&local_versions); + grpc_slice slice = grpc_slice_from_copied_buffer( + rpc_versions_prop->value.data, rpc_versions_prop->value.length); + bool decode_result = + grpc_gcp_rpc_protocol_versions_decode(slice, &peer_versions); + grpc_slice_unref(slice); + if (!decode_result) { + gpr_log(GPR_ERROR, "Invalid peer rpc protocol versions."); + return GRPC_SECURITY_ERROR; + } + /* TODO: Pass highest common rpc protocol version to grpc caller. */ + bool check_result = grpc_gcp_rpc_protocol_versions_check( + &local_versions, &peer_versions, nullptr); + if (!check_result) { + gpr_log(GPR_ERROR, "Mismatch of local and peer rpc protocol versions."); + return GRPC_SECURITY_ERROR; + } + /* Create auth context. */ + *ctx = grpc_auth_context_create(nullptr); + grpc_auth_context_add_cstring_property( + *ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, + GRPC_ALTS_TRANSPORT_SECURITY_TYPE); + size_t i = 0; + for (i = 0; i < peer->property_count; i++) { + const tsi_peer_property* tsi_prop = &peer->properties[i]; + /* Add service account to auth context. */ + if (strcmp(tsi_prop->name, TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY) == 0) { + grpc_auth_context_add_property( + *ctx, TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY, tsi_prop->value.data, + tsi_prop->value.length); + GPR_ASSERT(grpc_auth_context_set_peer_identity_property_name( + *ctx, TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY) == 1); + } + } + if (!grpc_auth_context_peer_is_authenticated(*ctx)) { + gpr_log(GPR_ERROR, "Invalid unauthenticated peer."); + GRPC_AUTH_CONTEXT_UNREF(*ctx, "test"); + *ctx = nullptr; + return GRPC_SECURITY_ERROR; + } + return GRPC_SECURITY_OK; +} + +} // namespace internal +} // namespace grpc_core + +static void alts_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + grpc_security_status status; + status = grpc_core::internal::grpc_alts_auth_context_from_tsi_peer( + &peer, auth_context); + tsi_peer_destruct(&peer); + grpc_error* error = + status == GRPC_SECURITY_OK + ? GRPC_ERROR_NONE + : GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Could not get ALTS auth context from TSI peer"); + GRPC_CLOSURE_SCHED(on_peer_checked, error); +} + +static int alts_channel_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + grpc_alts_channel_security_connector* c1 = + reinterpret_cast(sc1); + grpc_alts_channel_security_connector* c2 = + reinterpret_cast(sc2); + int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base); + if (c != 0) return c; + return strcmp(c1->target_name, c2->target_name); +} + +static int alts_server_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + grpc_alts_server_security_connector* c1 = + reinterpret_cast(sc1); + grpc_alts_server_security_connector* c2 = + reinterpret_cast(sc2); + return grpc_server_security_connector_cmp(&c1->base, &c2->base); +} + +static grpc_security_connector_vtable alts_channel_vtable = { + alts_channel_destroy, alts_check_peer, alts_channel_cmp}; + +static grpc_security_connector_vtable alts_server_vtable = { + alts_server_destroy, alts_check_peer, alts_server_cmp}; + +static bool alts_check_call_host(grpc_channel_security_connector* sc, + const char* host, + grpc_auth_context* auth_context, + grpc_closure* on_call_host_checked, + grpc_error** error) { + grpc_alts_channel_security_connector* alts_sc = + reinterpret_cast(sc); + if (host == nullptr || alts_sc == nullptr || + strcmp(host, alts_sc->target_name) != 0) { + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "ALTS call host does not match target name"); + } + return true; +} + +static void alts_cancel_check_call_host(grpc_channel_security_connector* sc, + grpc_closure* on_call_host_checked, + grpc_error* error) { + GRPC_ERROR_UNREF(error); +} + +grpc_security_status grpc_alts_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, const char* target_name, + grpc_channel_security_connector** sc) { + if (channel_creds == nullptr || sc == nullptr || target_name == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid arguments to grpc_alts_channel_security_connector_create()"); + return GRPC_SECURITY_ERROR; + } + auto c = static_cast( + gpr_zalloc(sizeof(grpc_alts_channel_security_connector))); + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.vtable = &alts_channel_vtable; + c->base.add_handshakers = alts_channel_add_handshakers; + c->base.channel_creds = grpc_channel_credentials_ref(channel_creds); + c->base.request_metadata_creds = + grpc_call_credentials_ref(request_metadata_creds); + c->base.check_call_host = alts_check_call_host; + c->base.cancel_check_call_host = alts_cancel_check_call_host; + grpc_alts_credentials* creds = + reinterpret_cast(c->base.channel_creds); + alts_set_rpc_protocol_versions(&creds->options->rpc_versions); + c->target_name = gpr_strdup(target_name); + *sc = &c->base; + return GRPC_SECURITY_OK; +} + +grpc_security_status grpc_alts_server_security_connector_create( + grpc_server_credentials* server_creds, + grpc_server_security_connector** sc) { + if (server_creds == nullptr || sc == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid arguments to grpc_alts_server_security_connector_create()"); + return GRPC_SECURITY_ERROR; + } + auto c = static_cast( + gpr_zalloc(sizeof(grpc_alts_server_security_connector))); + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.vtable = &alts_server_vtable; + c->base.server_creds = grpc_server_credentials_ref(server_creds); + c->base.add_handshakers = alts_server_add_handshakers; + grpc_alts_server_credentials* creds = + reinterpret_cast(c->base.server_creds); + alts_set_rpc_protocol_versions(&creds->options->rpc_versions); + *sc = &c->base; + return GRPC_SECURITY_OK; +} diff --git a/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.h b/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.h new file mode 100644 index 000000000..e7e4cffe2 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/security_connector/alts_security_connector.h @@ -0,0 +1,69 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_ALTS_SECURITY_CONNECTOR_H +#define GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_ALTS_SECURITY_CONNECTOR_H + +#include + +#include "src/core/lib/security/context/security_context.h" +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" + +#define GRPC_ALTS_TRANSPORT_SECURITY_TYPE "alts" + +/** + * This method creates an ALTS channel security connector. + * + * - channel_creds: channel credential instance. + * - request_metadata_creds: credential object which will be sent with each + * request. This parameter can be nullptr. + * - target_name: the name of the endpoint that the channel is connecting to. + * - sc: address of ALTS channel security connector instance to be returned from + * the method. + * + * It returns GRPC_SECURITY_OK on success, and an error stauts code on failure. + */ +grpc_security_status grpc_alts_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, const char* target_name, + grpc_channel_security_connector** sc); + +/** + * This method creates an ALTS server security connector. + * + * - server_creds: server credential instance. + * - sc: address of ALTS server security connector instance to be returned from + * the method. + * + * It returns GRPC_SECURITY_OK on success, and an error status code on failure. + */ +grpc_security_status grpc_alts_server_security_connector_create( + grpc_server_credentials* server_creds, grpc_server_security_connector** sc); + +namespace grpc_core { +namespace internal { + +/* Exposed only for testing. */ +grpc_security_status grpc_alts_auth_context_from_tsi_peer( + const tsi_peer* peer, grpc_auth_context** ctx); + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_ALTS_SECURITY_CONNECTOR_H \ + */ diff --git a/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.cc b/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.cc new file mode 100644 index 000000000..6eae30a6e --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.cc @@ -0,0 +1,1203 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/security_connector/security_connector.h" + +#include +#include + +#include +#include +#include +#include + +#include "src/core/ext/transport/chttp2/alpn/alpn.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/handshaker.h" +#include "src/core/lib/gpr/env.h" +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/load_file.h" +#include "src/core/lib/security/context/security_context.h" +#include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/credentials/fake/fake_credentials.h" +#include "src/core/lib/security/credentials/ssl/ssl_credentials.h" +#include "src/core/lib/security/transport/secure_endpoint.h" +#include "src/core/lib/security/transport/security_handshaker.h" +#include "src/core/lib/security/transport/target_authority_table.h" +#include "src/core/tsi/fake_transport_security.h" +#include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_adapter.h" + +grpc_core::DebugOnlyTraceFlag grpc_trace_security_connector_refcount( + false, "security_connector_refcount"); + +/* -- Constants. -- */ + +#ifndef INSTALL_PREFIX +static const char* installed_roots_path = "/usr/share/grpc/roots.pem"; +#else +static const char* installed_roots_path = + INSTALL_PREFIX "/share/grpc/roots.pem"; +#endif + +/* -- Overridden default roots. -- */ + +static grpc_ssl_roots_override_callback ssl_roots_override_cb = nullptr; + +void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb) { + ssl_roots_override_cb = cb; +} + +/* -- Cipher suites. -- */ + +/* Defines the cipher suites that we accept by default. All these cipher suites + are compliant with HTTP2. */ +#define GRPC_SSL_CIPHER_SUITES \ + "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384" + +static gpr_once cipher_suites_once = GPR_ONCE_INIT; +static const char* cipher_suites = nullptr; + +static void init_cipher_suites(void) { + char* overridden = gpr_getenv("GRPC_SSL_CIPHER_SUITES"); + cipher_suites = overridden != nullptr ? overridden : GRPC_SSL_CIPHER_SUITES; +} + +static const char* ssl_cipher_suites(void) { + gpr_once_init(&cipher_suites_once, init_cipher_suites); + return cipher_suites; +} + +/* -- Common methods. -- */ + +/* Returns the first property with that name. */ +const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer, + const char* name) { + size_t i; + if (peer == nullptr) return nullptr; + for (i = 0; i < peer->property_count; i++) { + const tsi_peer_property* property = &peer->properties[i]; + if (name == nullptr && property->name == nullptr) { + return property; + } + if (name != nullptr && property->name != nullptr && + strcmp(property->name, name) == 0) { + return property; + } + } + return nullptr; +} + +void grpc_channel_security_connector_add_handshakers( + grpc_channel_security_connector* connector, + grpc_handshake_manager* handshake_mgr) { + if (connector != nullptr) { + connector->add_handshakers(connector, handshake_mgr); + } +} + +void grpc_server_security_connector_add_handshakers( + grpc_server_security_connector* connector, + grpc_handshake_manager* handshake_mgr) { + if (connector != nullptr) { + connector->add_handshakers(connector, handshake_mgr); + } +} + +void grpc_security_connector_check_peer(grpc_security_connector* sc, + tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + if (sc == nullptr) { + GRPC_CLOSURE_SCHED(on_peer_checked, + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "cannot check peer -- no security connector")); + tsi_peer_destruct(&peer); + } else { + sc->vtable->check_peer(sc, peer, auth_context, on_peer_checked); + } +} + +int grpc_security_connector_cmp(grpc_security_connector* sc, + grpc_security_connector* other) { + if (sc == nullptr || other == nullptr) return GPR_ICMP(sc, other); + int c = GPR_ICMP(sc->vtable, other->vtable); + if (c != 0) return c; + return sc->vtable->cmp(sc, other); +} + +int grpc_channel_security_connector_cmp(grpc_channel_security_connector* sc1, + grpc_channel_security_connector* sc2) { + GPR_ASSERT(sc1->channel_creds != nullptr); + GPR_ASSERT(sc2->channel_creds != nullptr); + int c = GPR_ICMP(sc1->channel_creds, sc2->channel_creds); + if (c != 0) return c; + c = GPR_ICMP(sc1->request_metadata_creds, sc2->request_metadata_creds); + if (c != 0) return c; + c = GPR_ICMP((void*)sc1->check_call_host, (void*)sc2->check_call_host); + if (c != 0) return c; + c = GPR_ICMP((void*)sc1->cancel_check_call_host, + (void*)sc2->cancel_check_call_host); + if (c != 0) return c; + return GPR_ICMP((void*)sc1->add_handshakers, (void*)sc2->add_handshakers); +} + +int grpc_server_security_connector_cmp(grpc_server_security_connector* sc1, + grpc_server_security_connector* sc2) { + GPR_ASSERT(sc1->server_creds != nullptr); + GPR_ASSERT(sc2->server_creds != nullptr); + int c = GPR_ICMP(sc1->server_creds, sc2->server_creds); + if (c != 0) return c; + return GPR_ICMP((void*)sc1->add_handshakers, (void*)sc2->add_handshakers); +} + +bool grpc_channel_security_connector_check_call_host( + grpc_channel_security_connector* sc, const char* host, + grpc_auth_context* auth_context, grpc_closure* on_call_host_checked, + grpc_error** error) { + if (sc == nullptr || sc->check_call_host == nullptr) { + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "cannot check call host -- no security connector"); + return true; + } + return sc->check_call_host(sc, host, auth_context, on_call_host_checked, + error); +} + +void grpc_channel_security_connector_cancel_check_call_host( + grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked, + grpc_error* error) { + if (sc == nullptr || sc->cancel_check_call_host == nullptr) { + GRPC_ERROR_UNREF(error); + return; + } + sc->cancel_check_call_host(sc, on_call_host_checked, error); +} + +#ifndef NDEBUG +grpc_security_connector* grpc_security_connector_ref( + grpc_security_connector* sc, const char* file, int line, + const char* reason) { + if (sc == nullptr) return nullptr; + if (grpc_trace_security_connector_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "SECURITY_CONNECTOR:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", sc, + val, val + 1, reason); + } +#else +grpc_security_connector* grpc_security_connector_ref( + grpc_security_connector* sc) { + if (sc == nullptr) return nullptr; +#endif + gpr_ref(&sc->refcount); + return sc; +} + +#ifndef NDEBUG +void grpc_security_connector_unref(grpc_security_connector* sc, + const char* file, int line, + const char* reason) { + if (sc == nullptr) return; + if (grpc_trace_security_connector_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "SECURITY_CONNECTOR:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", sc, + val, val - 1, reason); + } +#else +void grpc_security_connector_unref(grpc_security_connector* sc) { + if (sc == nullptr) return; +#endif + if (gpr_unref(&sc->refcount)) sc->vtable->destroy(sc); +} + +static void connector_arg_destroy(void* p) { + GRPC_SECURITY_CONNECTOR_UNREF((grpc_security_connector*)p, + "connector_arg_destroy"); +} + +static void* connector_arg_copy(void* p) { + return GRPC_SECURITY_CONNECTOR_REF((grpc_security_connector*)p, + "connector_arg_copy"); +} + +static int connector_cmp(void* a, void* b) { + return grpc_security_connector_cmp(static_cast(a), + static_cast(b)); +} + +static const grpc_arg_pointer_vtable connector_arg_vtable = { + connector_arg_copy, connector_arg_destroy, connector_cmp}; + +grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc) { + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SECURITY_CONNECTOR, sc, + &connector_arg_vtable); +} + +grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg) { + if (strcmp(arg->key, GRPC_ARG_SECURITY_CONNECTOR)) return nullptr; + if (arg->type != GRPC_ARG_POINTER) { + gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, + GRPC_ARG_SECURITY_CONNECTOR); + return nullptr; + } + return static_cast(arg->value.pointer.p); +} + +grpc_security_connector* grpc_security_connector_find_in_args( + const grpc_channel_args* args) { + size_t i; + if (args == nullptr) return nullptr; + for (i = 0; i < args->num_args; i++) { + grpc_security_connector* sc = + grpc_security_connector_from_arg(&args->args[i]); + if (sc != nullptr) return sc; + } + return nullptr; +} + +static tsi_client_certificate_request_type +get_tsi_client_certificate_request_type( + grpc_ssl_client_certificate_request_type grpc_request_type) { + switch (grpc_request_type) { + case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE: + return TSI_DONT_REQUEST_CLIENT_CERTIFICATE; + + case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY; + + case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: + return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY; + + case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY; + + case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: + return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY; + + default: + return TSI_DONT_REQUEST_CLIENT_CERTIFICATE; + } +} + +/* -- Fake implementation. -- */ + +typedef struct { + grpc_channel_security_connector base; + char* target; + char* expected_targets; + bool is_lb_channel; + char* target_name_override; +} grpc_fake_channel_security_connector; + +static void fake_channel_destroy(grpc_security_connector* sc) { + grpc_fake_channel_security_connector* c = + reinterpret_cast(sc); + grpc_call_credentials_unref(c->base.request_metadata_creds); + gpr_free(c->target); + gpr_free(c->expected_targets); + gpr_free(c->target_name_override); + gpr_free(c); +} + +static void fake_server_destroy(grpc_security_connector* sc) { gpr_free(sc); } + +static bool fake_check_target(const char* target_type, const char* target, + const char* set_str) { + GPR_ASSERT(target_type != nullptr); + GPR_ASSERT(target != nullptr); + char** set = nullptr; + size_t set_size = 0; + gpr_string_split(set_str, ",", &set, &set_size); + bool found = false; + for (size_t i = 0; i < set_size; ++i) { + if (set[i] != nullptr && strcmp(target, set[i]) == 0) found = true; + } + for (size_t i = 0; i < set_size; ++i) { + gpr_free(set[i]); + } + gpr_free(set); + return found; +} + +static void fake_secure_name_check(const char* target, + const char* expected_targets, + bool is_lb_channel) { + if (expected_targets == nullptr) return; + char** lbs_and_backends = nullptr; + size_t lbs_and_backends_size = 0; + bool success = false; + gpr_string_split(expected_targets, ";", &lbs_and_backends, + &lbs_and_backends_size); + if (lbs_and_backends_size > 2 || lbs_and_backends_size == 0) { + gpr_log(GPR_ERROR, "Invalid expected targets arg value: '%s'", + expected_targets); + goto done; + } + if (is_lb_channel) { + if (lbs_and_backends_size != 2) { + gpr_log(GPR_ERROR, + "Invalid expected targets arg value: '%s'. Expectations for LB " + "channels must be of the form 'be1,be2,be3,...;lb1,lb2,...", + expected_targets); + goto done; + } + if (!fake_check_target("LB", target, lbs_and_backends[1])) { + gpr_log(GPR_ERROR, "LB target '%s' not found in expected set '%s'", + target, lbs_and_backends[1]); + goto done; + } + success = true; + } else { + if (!fake_check_target("Backend", target, lbs_and_backends[0])) { + gpr_log(GPR_ERROR, "Backend target '%s' not found in expected set '%s'", + target, lbs_and_backends[0]); + goto done; + } + success = true; + } +done: + for (size_t i = 0; i < lbs_and_backends_size; ++i) { + gpr_free(lbs_and_backends[i]); + } + gpr_free(lbs_and_backends); + if (!success) abort(); +} + +static void fake_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + const char* prop_name; + grpc_error* error = GRPC_ERROR_NONE; + *auth_context = nullptr; + if (peer.property_count != 1) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Fake peers should only have 1 property."); + goto end; + } + prop_name = peer.properties[0].name; + if (prop_name == nullptr || + strcmp(prop_name, TSI_CERTIFICATE_TYPE_PEER_PROPERTY)) { + char* msg; + gpr_asprintf(&msg, "Unexpected property in fake peer: %s.", + prop_name == nullptr ? "" : prop_name); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + goto end; + } + if (strncmp(peer.properties[0].value.data, TSI_FAKE_CERTIFICATE_TYPE, + peer.properties[0].value.length)) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Invalid value for cert type property."); + goto end; + } + *auth_context = grpc_auth_context_create(nullptr); + grpc_auth_context_add_cstring_property( + *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, + GRPC_FAKE_TRANSPORT_SECURITY_TYPE); +end: + GRPC_CLOSURE_SCHED(on_peer_checked, error); + tsi_peer_destruct(&peer); +} + +static void fake_channel_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + fake_check_peer(sc, peer, auth_context, on_peer_checked); + grpc_fake_channel_security_connector* c = + reinterpret_cast(sc); + fake_secure_name_check(c->target, c->expected_targets, c->is_lb_channel); +} + +static void fake_server_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + fake_check_peer(sc, peer, auth_context, on_peer_checked); +} + +static int fake_channel_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + grpc_fake_channel_security_connector* c1 = + reinterpret_cast(sc1); + grpc_fake_channel_security_connector* c2 = + reinterpret_cast(sc2); + int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base); + if (c != 0) return c; + c = strcmp(c1->target, c2->target); + if (c != 0) return c; + if (c1->expected_targets == nullptr || c2->expected_targets == nullptr) { + c = GPR_ICMP(c1->expected_targets, c2->expected_targets); + } else { + c = strcmp(c1->expected_targets, c2->expected_targets); + } + if (c != 0) return c; + return GPR_ICMP(c1->is_lb_channel, c2->is_lb_channel); +} + +static int fake_server_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + return grpc_server_security_connector_cmp( + reinterpret_cast(sc1), + reinterpret_cast(sc2)); +} + +static bool fake_channel_check_call_host(grpc_channel_security_connector* sc, + const char* host, + grpc_auth_context* auth_context, + grpc_closure* on_call_host_checked, + grpc_error** error) { + grpc_fake_channel_security_connector* c = + reinterpret_cast(sc); + char* authority_hostname = nullptr; + char* authority_ignored_port = nullptr; + char* target_hostname = nullptr; + char* target_ignored_port = nullptr; + gpr_split_host_port(host, &authority_hostname, &authority_ignored_port); + gpr_split_host_port(c->target, &target_hostname, &target_ignored_port); + if (c->target_name_override != nullptr) { + char* fake_security_target_name_override_hostname = nullptr; + char* fake_security_target_name_override_ignored_port = nullptr; + gpr_split_host_port(c->target_name_override, + &fake_security_target_name_override_hostname, + &fake_security_target_name_override_ignored_port); + if (strcmp(authority_hostname, + fake_security_target_name_override_hostname) != 0) { + gpr_log(GPR_ERROR, + "Authority (host) '%s' != Fake Security Target override '%s'", + host, fake_security_target_name_override_hostname); + abort(); + } + gpr_free(fake_security_target_name_override_hostname); + gpr_free(fake_security_target_name_override_ignored_port); + } else if (strcmp(authority_hostname, target_hostname) != 0) { + gpr_log(GPR_ERROR, "Authority (host) '%s' != Target '%s'", + authority_hostname, target_hostname); + abort(); + } + gpr_free(authority_hostname); + gpr_free(authority_ignored_port); + gpr_free(target_hostname); + gpr_free(target_ignored_port); + return true; +} + +static void fake_channel_cancel_check_call_host( + grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked, + grpc_error* error) { + GRPC_ERROR_UNREF(error); +} + +static void fake_channel_add_handshakers( + grpc_channel_security_connector* sc, + grpc_handshake_manager* handshake_mgr) { + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + tsi_create_fake_handshaker(true /* is_client */), &sc->base)); +} + +static void fake_server_add_handshakers(grpc_server_security_connector* sc, + grpc_handshake_manager* handshake_mgr) { + grpc_handshake_manager_add( + handshake_mgr, + grpc_security_handshaker_create( + tsi_create_fake_handshaker(false /* is_client */), &sc->base)); +} + +static grpc_security_connector_vtable fake_channel_vtable = { + fake_channel_destroy, fake_channel_check_peer, fake_channel_cmp}; + +static grpc_security_connector_vtable fake_server_vtable = { + fake_server_destroy, fake_server_check_peer, fake_server_cmp}; + +grpc_channel_security_connector* grpc_fake_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, const char* target, + const grpc_channel_args* args) { + grpc_fake_channel_security_connector* c = + static_cast( + gpr_zalloc(sizeof(*c))); + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; + c->base.base.vtable = &fake_channel_vtable; + c->base.channel_creds = channel_creds; + c->base.request_metadata_creds = + grpc_call_credentials_ref(request_metadata_creds); + c->base.check_call_host = fake_channel_check_call_host; + c->base.cancel_check_call_host = fake_channel_cancel_check_call_host; + c->base.add_handshakers = fake_channel_add_handshakers; + c->target = gpr_strdup(target); + const char* expected_targets = grpc_fake_transport_get_expected_targets(args); + c->expected_targets = gpr_strdup(expected_targets); + c->is_lb_channel = grpc_core::FindTargetAuthorityTableInArgs(args) != nullptr; + const grpc_arg* target_name_override_arg = + grpc_channel_args_find(args, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); + if (target_name_override_arg != nullptr) { + c->target_name_override = + gpr_strdup(grpc_channel_arg_get_string(target_name_override_arg)); + } + return &c->base; +} + +grpc_server_security_connector* grpc_fake_server_security_connector_create( + grpc_server_credentials* server_creds) { + grpc_server_security_connector* c = + static_cast( + gpr_zalloc(sizeof(grpc_server_security_connector))); + gpr_ref_init(&c->base.refcount, 1); + c->base.vtable = &fake_server_vtable; + c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; + c->server_creds = server_creds; + c->add_handshakers = fake_server_add_handshakers; + return c; +} + +/* --- Ssl implementation. --- */ + +grpc_ssl_session_cache* grpc_ssl_session_cache_create_lru(size_t capacity) { + tsi_ssl_session_cache* cache = tsi_ssl_session_cache_create_lru(capacity); + return reinterpret_cast(cache); +} + +void grpc_ssl_session_cache_destroy(grpc_ssl_session_cache* cache) { + tsi_ssl_session_cache* tsi_cache = + reinterpret_cast(cache); + tsi_ssl_session_cache_unref(tsi_cache); +} + +static void* grpc_ssl_session_cache_arg_copy(void* p) { + tsi_ssl_session_cache* tsi_cache = + reinterpret_cast(p); + // destroy call below will unref the pointer. + tsi_ssl_session_cache_ref(tsi_cache); + return p; +} + +static void grpc_ssl_session_cache_arg_destroy(void* p) { + tsi_ssl_session_cache* tsi_cache = + reinterpret_cast(p); + tsi_ssl_session_cache_unref(tsi_cache); +} + +static int grpc_ssl_session_cache_arg_cmp(void* p, void* q) { + return GPR_ICMP(p, q); +} + +grpc_arg grpc_ssl_session_cache_create_channel_arg( + grpc_ssl_session_cache* cache) { + static const grpc_arg_pointer_vtable vtable = { + grpc_ssl_session_cache_arg_copy, + grpc_ssl_session_cache_arg_destroy, + grpc_ssl_session_cache_arg_cmp, + }; + return grpc_channel_arg_pointer_create( + const_cast(GRPC_SSL_SESSION_CACHE_ARG), cache, &vtable); +} + +typedef struct { + grpc_channel_security_connector base; + tsi_ssl_client_handshaker_factory* client_handshaker_factory; + char* target_name; + char* overridden_target_name; +} grpc_ssl_channel_security_connector; + +typedef struct { + grpc_server_security_connector base; + tsi_ssl_server_handshaker_factory* server_handshaker_factory; +} grpc_ssl_server_security_connector; + +static bool server_connector_has_cert_config_fetcher( + grpc_ssl_server_security_connector* c) { + GPR_ASSERT(c != nullptr); + grpc_ssl_server_credentials* server_creds = + reinterpret_cast(c->base.server_creds); + GPR_ASSERT(server_creds != nullptr); + return server_creds->certificate_config_fetcher.cb != nullptr; +} + +static void ssl_channel_destroy(grpc_security_connector* sc) { + grpc_ssl_channel_security_connector* c = + reinterpret_cast(sc); + grpc_channel_credentials_unref(c->base.channel_creds); + grpc_call_credentials_unref(c->base.request_metadata_creds); + tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory); + c->client_handshaker_factory = nullptr; + if (c->target_name != nullptr) gpr_free(c->target_name); + if (c->overridden_target_name != nullptr) gpr_free(c->overridden_target_name); + gpr_free(sc); +} + +static void ssl_server_destroy(grpc_security_connector* sc) { + grpc_ssl_server_security_connector* c = + reinterpret_cast(sc); + grpc_server_credentials_unref(c->base.server_creds); + tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory); + c->server_handshaker_factory = nullptr; + gpr_free(sc); +} + +static void ssl_channel_add_handshakers(grpc_channel_security_connector* sc, + grpc_handshake_manager* handshake_mgr) { + grpc_ssl_channel_security_connector* c = + reinterpret_cast(sc); + // Instantiate TSI handshaker. + tsi_handshaker* tsi_hs = nullptr; + tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( + c->client_handshaker_factory, + c->overridden_target_name != nullptr ? c->overridden_target_name + : c->target_name, + &tsi_hs); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", + tsi_result_to_string(result)); + return; + } + // Create handshakers. + grpc_handshake_manager_add( + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(tsi_hs), &sc->base)); +} + +static const char** fill_alpn_protocol_strings(size_t* num_alpn_protocols) { + GPR_ASSERT(num_alpn_protocols != nullptr); + *num_alpn_protocols = grpc_chttp2_num_alpn_versions(); + const char** alpn_protocol_strings = static_cast( + gpr_malloc(sizeof(const char*) * (*num_alpn_protocols))); + for (size_t i = 0; i < *num_alpn_protocols; i++) { + alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i); + } + return alpn_protocol_strings; +} + +/* Attempts to replace the server_handshaker_factory with a new factory using + * the provided grpc_ssl_server_certificate_config. Should new factory creation + * fail, the existing factory will not be replaced. Returns true on success (new + * factory created). */ +static bool try_replace_server_handshaker_factory( + grpc_ssl_server_security_connector* sc, + const grpc_ssl_server_certificate_config* config) { + if (config == nullptr) { + gpr_log(GPR_ERROR, + "Server certificate config callback returned invalid (NULL) " + "config."); + return false; + } + gpr_log(GPR_DEBUG, "Using new server certificate config (%p).", config); + + size_t num_alpn_protocols = 0; + const char** alpn_protocol_strings = + fill_alpn_protocol_strings(&num_alpn_protocols); + tsi_ssl_pem_key_cert_pair* cert_pairs = grpc_convert_grpc_to_tsi_cert_pairs( + config->pem_key_cert_pairs, config->num_key_cert_pairs); + tsi_ssl_server_handshaker_factory* new_handshaker_factory = nullptr; + grpc_ssl_server_credentials* server_creds = + reinterpret_cast(sc->base.server_creds); + tsi_result result = tsi_create_ssl_server_handshaker_factory_ex( + cert_pairs, config->num_key_cert_pairs, config->pem_root_certs, + get_tsi_client_certificate_request_type( + server_creds->config.client_certificate_request), + ssl_cipher_suites(), alpn_protocol_strings, + static_cast(num_alpn_protocols), &new_handshaker_factory); + gpr_free(cert_pairs); + gpr_free((void*)alpn_protocol_strings); + + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", + tsi_result_to_string(result)); + return false; + } + tsi_ssl_server_handshaker_factory_unref(sc->server_handshaker_factory); + sc->server_handshaker_factory = new_handshaker_factory; + return true; +} + +/* Attempts to fetch the server certificate config if a callback is available. + * Current certificate config will continue to be used if the callback returns + * an error. Returns true if new credentials were sucessfully loaded. */ +static bool try_fetch_ssl_server_credentials( + grpc_ssl_server_security_connector* sc) { + grpc_ssl_server_certificate_config* certificate_config = nullptr; + bool status; + + GPR_ASSERT(sc != nullptr); + if (!server_connector_has_cert_config_fetcher(sc)) return false; + + grpc_ssl_server_credentials* server_creds = + reinterpret_cast(sc->base.server_creds); + grpc_ssl_certificate_config_reload_status cb_result = + server_creds->certificate_config_fetcher.cb( + server_creds->certificate_config_fetcher.user_data, + &certificate_config); + if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED) { + gpr_log(GPR_DEBUG, "No change in SSL server credentials."); + status = false; + } else if (cb_result == GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW) { + status = try_replace_server_handshaker_factory(sc, certificate_config); + } else { + // Log error, continue using previously-loaded credentials. + gpr_log(GPR_ERROR, + "Failed fetching new server credentials, continuing to " + "use previously-loaded credentials."); + status = false; + } + + if (certificate_config != nullptr) { + grpc_ssl_server_certificate_config_destroy(certificate_config); + } + return status; +} + +static void ssl_server_add_handshakers(grpc_server_security_connector* sc, + grpc_handshake_manager* handshake_mgr) { + grpc_ssl_server_security_connector* c = + reinterpret_cast(sc); + // Instantiate TSI handshaker. + try_fetch_ssl_server_credentials(c); + tsi_handshaker* tsi_hs = nullptr; + tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker( + c->server_handshaker_factory, &tsi_hs); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", + tsi_result_to_string(result)); + return; + } + // Create handshakers. + grpc_handshake_manager_add( + handshake_mgr, grpc_security_handshaker_create( + tsi_create_adapter_handshaker(tsi_hs), &sc->base)); +} + +int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) { + char* allocated_name = nullptr; + int r; + + char* ignored_port; + gpr_split_host_port(peer_name, &allocated_name, &ignored_port); + gpr_free(ignored_port); + peer_name = allocated_name; + if (!peer_name) return 0; + + // IPv6 zone-id should not be included in comparisons. + char* const zone_id = strchr(allocated_name, '%'); + if (zone_id != nullptr) *zone_id = '\0'; + + r = tsi_ssl_peer_matches_name(peer, peer_name); + gpr_free(allocated_name); + return r; +} + +grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer) { + size_t i; + grpc_auth_context* ctx = nullptr; + const char* peer_identity_property_name = nullptr; + + /* The caller has checked the certificate type property. */ + GPR_ASSERT(peer->property_count >= 1); + ctx = grpc_auth_context_create(nullptr); + grpc_auth_context_add_cstring_property( + ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, + GRPC_SSL_TRANSPORT_SECURITY_TYPE); + for (i = 0; i < peer->property_count; i++) { + const tsi_peer_property* prop = &peer->properties[i]; + if (prop->name == nullptr) continue; + if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) { + /* If there is no subject alt name, have the CN as the identity. */ + if (peer_identity_property_name == nullptr) { + peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME; + } + grpc_auth_context_add_property(ctx, GRPC_X509_CN_PROPERTY_NAME, + prop->value.data, prop->value.length); + } else if (strcmp(prop->name, + TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) { + peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME; + grpc_auth_context_add_property(ctx, GRPC_X509_SAN_PROPERTY_NAME, + prop->value.data, prop->value.length); + } else if (strcmp(prop->name, TSI_X509_PEM_CERT_PROPERTY) == 0) { + grpc_auth_context_add_property(ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME, + prop->value.data, prop->value.length); + } else if (strcmp(prop->name, TSI_SSL_SESSION_REUSED_PEER_PROPERTY) == 0) { + grpc_auth_context_add_property(ctx, GRPC_SSL_SESSION_REUSED_PROPERTY, + prop->value.data, prop->value.length); + } + } + if (peer_identity_property_name != nullptr) { + GPR_ASSERT(grpc_auth_context_set_peer_identity_property_name( + ctx, peer_identity_property_name) == 1); + } + return ctx; +} + +static grpc_error* ssl_check_peer(grpc_security_connector* sc, + const char* peer_name, const tsi_peer* peer, + grpc_auth_context** auth_context) { + /* Check the ALPN. */ + const tsi_peer_property* p = + tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL); + if (p == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Cannot check peer: missing selected ALPN property."); + } + if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Cannot check peer: invalid ALPN value."); + } + + /* Check the peer name if specified. */ + if (peer_name != nullptr && !grpc_ssl_host_matches_name(peer, peer_name)) { + char* msg; + gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name); + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + return error; + } + *auth_context = grpc_ssl_peer_to_auth_context(peer); + return GRPC_ERROR_NONE; +} + +static void ssl_channel_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + grpc_ssl_channel_security_connector* c = + reinterpret_cast(sc); + grpc_error* error = ssl_check_peer(sc, + c->overridden_target_name != nullptr + ? c->overridden_target_name + : c->target_name, + &peer, auth_context); + GRPC_CLOSURE_SCHED(on_peer_checked, error); + tsi_peer_destruct(&peer); +} + +static void ssl_server_check_peer(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked) { + grpc_error* error = ssl_check_peer(sc, nullptr, &peer, auth_context); + tsi_peer_destruct(&peer); + GRPC_CLOSURE_SCHED(on_peer_checked, error); +} + +static int ssl_channel_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + grpc_ssl_channel_security_connector* c1 = + reinterpret_cast(sc1); + grpc_ssl_channel_security_connector* c2 = + reinterpret_cast(sc2); + int c = grpc_channel_security_connector_cmp(&c1->base, &c2->base); + if (c != 0) return c; + c = strcmp(c1->target_name, c2->target_name); + if (c != 0) return c; + return (c1->overridden_target_name == nullptr || + c2->overridden_target_name == nullptr) + ? GPR_ICMP(c1->overridden_target_name, c2->overridden_target_name) + : strcmp(c1->overridden_target_name, c2->overridden_target_name); +} + +static int ssl_server_cmp(grpc_security_connector* sc1, + grpc_security_connector* sc2) { + return grpc_server_security_connector_cmp( + reinterpret_cast(sc1), + reinterpret_cast(sc2)); +} + +static void add_shallow_auth_property_to_peer(tsi_peer* peer, + const grpc_auth_property* prop, + const char* tsi_prop_name) { + tsi_peer_property* tsi_prop = &peer->properties[peer->property_count++]; + tsi_prop->name = const_cast(tsi_prop_name); + tsi_prop->value.data = prop->value; + tsi_prop->value.length = prop->value_length; +} + +tsi_peer grpc_shallow_peer_from_ssl_auth_context( + const grpc_auth_context* auth_context) { + size_t max_num_props = 0; + grpc_auth_property_iterator it; + const grpc_auth_property* prop; + tsi_peer peer; + memset(&peer, 0, sizeof(peer)); + + it = grpc_auth_context_property_iterator(auth_context); + while (grpc_auth_property_iterator_next(&it) != nullptr) max_num_props++; + + if (max_num_props > 0) { + peer.properties = static_cast( + gpr_malloc(max_num_props * sizeof(tsi_peer_property))); + it = grpc_auth_context_property_iterator(auth_context); + while ((prop = grpc_auth_property_iterator_next(&it)) != nullptr) { + if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) { + add_shallow_auth_property_to_peer( + &peer, prop, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY); + } else if (strcmp(prop->name, GRPC_X509_CN_PROPERTY_NAME) == 0) { + add_shallow_auth_property_to_peer( + &peer, prop, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY); + } else if (strcmp(prop->name, GRPC_X509_PEM_CERT_PROPERTY_NAME) == 0) { + add_shallow_auth_property_to_peer(&peer, prop, + TSI_X509_PEM_CERT_PROPERTY); + } + } + } + return peer; +} + +void grpc_shallow_peer_destruct(tsi_peer* peer) { + if (peer->properties != nullptr) gpr_free(peer->properties); +} + +static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc, + const char* host, + grpc_auth_context* auth_context, + grpc_closure* on_call_host_checked, + grpc_error** error) { + grpc_ssl_channel_security_connector* c = + reinterpret_cast(sc); + grpc_security_status status = GRPC_SECURITY_ERROR; + tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context); + if (grpc_ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK; + /* If the target name was overridden, then the original target_name was + 'checked' transitively during the previous peer check at the end of the + handshake. */ + if (c->overridden_target_name != nullptr && + strcmp(host, c->target_name) == 0) { + status = GRPC_SECURITY_OK; + } + if (status != GRPC_SECURITY_OK) { + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "call host does not match SSL server name"); + } + grpc_shallow_peer_destruct(&peer); + return true; +} + +static void ssl_channel_cancel_check_call_host( + grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked, + grpc_error* error) { + GRPC_ERROR_UNREF(error); +} + +static grpc_security_connector_vtable ssl_channel_vtable = { + ssl_channel_destroy, ssl_channel_check_peer, ssl_channel_cmp}; + +static grpc_security_connector_vtable ssl_server_vtable = { + ssl_server_destroy, ssl_server_check_peer, ssl_server_cmp}; + +grpc_security_status grpc_ssl_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, + const grpc_ssl_config* config, const char* target_name, + const char* overridden_target_name, + tsi_ssl_session_cache* ssl_session_cache, + grpc_channel_security_connector** sc) { + tsi_result result = TSI_OK; + grpc_ssl_channel_security_connector* c; + char* port; + bool has_key_cert_pair; + tsi_ssl_client_handshaker_options options; + memset(&options, 0, sizeof(options)); + options.alpn_protocols = + fill_alpn_protocol_strings(&options.num_alpn_protocols); + + if (config == nullptr || target_name == nullptr) { + gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name."); + goto error; + } + if (config->pem_root_certs == nullptr) { + // Use default root certificates. + options.pem_root_certs = grpc_core::DefaultSslRootStore::GetPemRootCerts(); + options.root_store = grpc_core::DefaultSslRootStore::GetRootStore(); + if (options.pem_root_certs == nullptr) { + gpr_log(GPR_ERROR, "Could not get default pem root certs."); + goto error; + } + } else { + options.pem_root_certs = config->pem_root_certs; + } + c = static_cast( + gpr_zalloc(sizeof(grpc_ssl_channel_security_connector))); + + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.vtable = &ssl_channel_vtable; + c->base.base.url_scheme = GRPC_SSL_URL_SCHEME; + c->base.channel_creds = grpc_channel_credentials_ref(channel_creds); + c->base.request_metadata_creds = + grpc_call_credentials_ref(request_metadata_creds); + c->base.check_call_host = ssl_channel_check_call_host; + c->base.cancel_check_call_host = ssl_channel_cancel_check_call_host; + c->base.add_handshakers = ssl_channel_add_handshakers; + gpr_split_host_port(target_name, &c->target_name, &port); + gpr_free(port); + if (overridden_target_name != nullptr) { + c->overridden_target_name = gpr_strdup(overridden_target_name); + } + + has_key_cert_pair = config->pem_key_cert_pair != nullptr && + config->pem_key_cert_pair->private_key != nullptr && + config->pem_key_cert_pair->cert_chain != nullptr; + if (has_key_cert_pair) { + options.pem_key_cert_pair = config->pem_key_cert_pair; + } + options.cipher_suites = ssl_cipher_suites(); + options.session_cache = ssl_session_cache; + result = tsi_create_ssl_client_handshaker_factory_with_options( + &options, &c->client_handshaker_factory); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", + tsi_result_to_string(result)); + ssl_channel_destroy(&c->base.base); + *sc = nullptr; + goto error; + } + *sc = &c->base; + gpr_free((void*)options.alpn_protocols); + return GRPC_SECURITY_OK; + +error: + gpr_free((void*)options.alpn_protocols); + return GRPC_SECURITY_ERROR; +} + +static grpc_ssl_server_security_connector* +grpc_ssl_server_security_connector_initialize( + grpc_server_credentials* server_creds) { + grpc_ssl_server_security_connector* c = + static_cast( + gpr_zalloc(sizeof(grpc_ssl_server_security_connector))); + gpr_ref_init(&c->base.base.refcount, 1); + c->base.base.url_scheme = GRPC_SSL_URL_SCHEME; + c->base.base.vtable = &ssl_server_vtable; + c->base.add_handshakers = ssl_server_add_handshakers; + c->base.server_creds = grpc_server_credentials_ref(server_creds); + return c; +} + +grpc_security_status grpc_ssl_server_security_connector_create( + grpc_server_credentials* gsc, grpc_server_security_connector** sc) { + tsi_result result = TSI_OK; + grpc_ssl_server_credentials* server_credentials = + reinterpret_cast(gsc); + grpc_security_status retval = GRPC_SECURITY_OK; + + GPR_ASSERT(server_credentials != nullptr); + GPR_ASSERT(sc != nullptr); + + grpc_ssl_server_security_connector* c = + grpc_ssl_server_security_connector_initialize(gsc); + if (server_connector_has_cert_config_fetcher(c)) { + // Load initial credentials from certificate_config_fetcher: + if (!try_fetch_ssl_server_credentials(c)) { + gpr_log(GPR_ERROR, "Failed loading SSL server credentials from fetcher."); + retval = GRPC_SECURITY_ERROR; + } + } else { + size_t num_alpn_protocols = 0; + const char** alpn_protocol_strings = + fill_alpn_protocol_strings(&num_alpn_protocols); + result = tsi_create_ssl_server_handshaker_factory_ex( + server_credentials->config.pem_key_cert_pairs, + server_credentials->config.num_key_cert_pairs, + server_credentials->config.pem_root_certs, + get_tsi_client_certificate_request_type( + server_credentials->config.client_certificate_request), + ssl_cipher_suites(), alpn_protocol_strings, + static_cast(num_alpn_protocols), + &c->server_handshaker_factory); + gpr_free((void*)alpn_protocol_strings); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", + tsi_result_to_string(result)); + retval = GRPC_SECURITY_ERROR; + } + } + + if (retval == GRPC_SECURITY_OK) { + *sc = &c->base; + } else { + if (c != nullptr) ssl_server_destroy(&c->base.base); + if (sc != nullptr) *sc = nullptr; + } + return retval; +} + +namespace grpc_core { + +tsi_ssl_root_certs_store* DefaultSslRootStore::default_root_store_; +grpc_slice DefaultSslRootStore::default_pem_root_certs_; + +const tsi_ssl_root_certs_store* DefaultSslRootStore::GetRootStore() { + InitRootStore(); + return default_root_store_; +} + +const char* DefaultSslRootStore::GetPemRootCerts() { + InitRootStore(); + return GRPC_SLICE_IS_EMPTY(default_pem_root_certs_) + ? nullptr + : reinterpret_cast + GRPC_SLICE_START_PTR(default_pem_root_certs_); +} + +grpc_slice DefaultSslRootStore::ComputePemRootCerts() { + grpc_slice result = grpc_empty_slice(); + // First try to load the roots from the environment. + char* default_root_certs_path = + gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR); + if (default_root_certs_path != nullptr) { + GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(default_root_certs_path, 1, &result)); + gpr_free(default_root_certs_path); + } + // Try overridden roots if needed. + grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL; + if (GRPC_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != nullptr) { + char* pem_root_certs = nullptr; + ovrd_res = ssl_roots_override_cb(&pem_root_certs); + if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) { + GPR_ASSERT(pem_root_certs != nullptr); + result = grpc_slice_from_copied_buffer( + pem_root_certs, + strlen(pem_root_certs) + 1); // nullptr terminator. + } + gpr_free(pem_root_certs); + } + // Fall back to installed certs if needed. + if (GRPC_SLICE_IS_EMPTY(result) && + ovrd_res != GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY) { + GRPC_LOG_IF_ERROR("load_file", + grpc_load_file(installed_roots_path, 1, &result)); + } + return result; +} + +void DefaultSslRootStore::InitRootStore() { + static gpr_once once = GPR_ONCE_INIT; + gpr_once_init(&once, DefaultSslRootStore::InitRootStoreOnce); +} + +void DefaultSslRootStore::InitRootStoreOnce() { + default_pem_root_certs_ = ComputePemRootCerts(); + if (!GRPC_SLICE_IS_EMPTY(default_pem_root_certs_)) { + default_root_store_ = + tsi_ssl_root_certs_store_create(reinterpret_cast( + GRPC_SLICE_START_PTR(default_pem_root_certs_))); + } +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.h b/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.h new file mode 100644 index 000000000..f9723166d --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/security_connector/security_connector.h @@ -0,0 +1,284 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_SECURITY_CONNECTOR_H +#define GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_SECURITY_CONNECTOR_H + +#include + +#include + +#include + +#include "src/core/lib/channel/handshaker.h" +#include "src/core/lib/iomgr/endpoint.h" +#include "src/core/lib/iomgr/tcp_server.h" +#include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security_interface.h" + +extern grpc_core::DebugOnlyTraceFlag grpc_trace_security_connector_refcount; + +/* --- status enum. --- */ + +typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status; + +/* --- URL schemes. --- */ + +#define GRPC_SSL_URL_SCHEME "https" +#define GRPC_FAKE_SECURITY_URL_SCHEME "http+fake_security" + +/* --- security_connector object. --- + + A security connector object represents away to configure the underlying + transport security mechanism and check the resulting trusted peer. */ + +typedef struct grpc_security_connector grpc_security_connector; + +#define GRPC_ARG_SECURITY_CONNECTOR "grpc.security_connector" + +typedef struct { + void (*destroy)(grpc_security_connector* sc); + void (*check_peer)(grpc_security_connector* sc, tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked); + int (*cmp)(grpc_security_connector* sc, grpc_security_connector* other); +} grpc_security_connector_vtable; + +struct grpc_security_connector { + const grpc_security_connector_vtable* vtable; + gpr_refcount refcount; + const char* url_scheme; +}; + +/* Refcounting. */ +#ifndef NDEBUG +#define GRPC_SECURITY_CONNECTOR_REF(p, r) \ + grpc_security_connector_ref((p), __FILE__, __LINE__, (r)) +#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) \ + grpc_security_connector_unref((p), __FILE__, __LINE__, (r)) +grpc_security_connector* grpc_security_connector_ref( + grpc_security_connector* policy, const char* file, int line, + const char* reason); +void grpc_security_connector_unref(grpc_security_connector* policy, + const char* file, int line, + const char* reason); +#else +#define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p)) +#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) grpc_security_connector_unref((p)) +grpc_security_connector* grpc_security_connector_ref( + grpc_security_connector* policy); +void grpc_security_connector_unref(grpc_security_connector* policy); +#endif + +/* Check the peer. Callee takes ownership of the peer object. + When done, sets *auth_context and invokes on_peer_checked. */ +void grpc_security_connector_check_peer(grpc_security_connector* sc, + tsi_peer peer, + grpc_auth_context** auth_context, + grpc_closure* on_peer_checked); + +/* Compares two security connectors. */ +int grpc_security_connector_cmp(grpc_security_connector* sc, + grpc_security_connector* other); + +/* Util to encapsulate the connector in a channel arg. */ +grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc); + +/* Util to get the connector from a channel arg. */ +grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg); + +/* Util to find the connector from channel args. */ +grpc_security_connector* grpc_security_connector_find_in_args( + const grpc_channel_args* args); + +/* --- channel_security_connector object. --- + + A channel security connector object represents a way to configure the + underlying transport security mechanism on the client side. */ + +typedef struct grpc_channel_security_connector grpc_channel_security_connector; + +struct grpc_channel_security_connector { + grpc_security_connector base; + grpc_channel_credentials* channel_creds; + grpc_call_credentials* request_metadata_creds; + bool (*check_call_host)(grpc_channel_security_connector* sc, const char* host, + grpc_auth_context* auth_context, + grpc_closure* on_call_host_checked, + grpc_error** error); + void (*cancel_check_call_host)(grpc_channel_security_connector* sc, + grpc_closure* on_call_host_checked, + grpc_error* error); + void (*add_handshakers)(grpc_channel_security_connector* sc, + grpc_handshake_manager* handshake_mgr); +}; + +/// A helper function for use in grpc_security_connector_cmp() implementations. +int grpc_channel_security_connector_cmp(grpc_channel_security_connector* sc1, + grpc_channel_security_connector* sc2); + +/// Checks that the host that will be set for a call is acceptable. +/// Returns true if completed synchronously, in which case \a error will +/// be set to indicate the result. Otherwise, \a on_call_host_checked +/// will be invoked when complete. +bool grpc_channel_security_connector_check_call_host( + grpc_channel_security_connector* sc, const char* host, + grpc_auth_context* auth_context, grpc_closure* on_call_host_checked, + grpc_error** error); + +/// Cancels a pending asychronous call to +/// grpc_channel_security_connector_check_call_host() with +/// \a on_call_host_checked as its callback. +void grpc_channel_security_connector_cancel_check_call_host( + grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked, + grpc_error* error); + +/* Registers handshakers with \a handshake_mgr. */ +void grpc_channel_security_connector_add_handshakers( + grpc_channel_security_connector* connector, + grpc_handshake_manager* handshake_mgr); + +/* --- server_security_connector object. --- + + A server security connector object represents a way to configure the + underlying transport security mechanism on the server side. */ + +typedef struct grpc_server_security_connector grpc_server_security_connector; + +struct grpc_server_security_connector { + grpc_security_connector base; + grpc_server_credentials* server_creds; + void (*add_handshakers)(grpc_server_security_connector* sc, + grpc_handshake_manager* handshake_mgr); +}; + +/// A helper function for use in grpc_security_connector_cmp() implementations. +int grpc_server_security_connector_cmp(grpc_server_security_connector* sc1, + grpc_server_security_connector* sc2); + +void grpc_server_security_connector_add_handshakers( + grpc_server_security_connector* sc, grpc_handshake_manager* handshake_mgr); + +/* --- Creation security connectors. --- */ + +/* For TESTING ONLY! + Creates a fake connector that emulates real channel security. */ +grpc_channel_security_connector* grpc_fake_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, const char* target, + const grpc_channel_args* args); + +/* For TESTING ONLY! + Creates a fake connector that emulates real server security. */ +grpc_server_security_connector* grpc_fake_server_security_connector_create( + grpc_server_credentials* server_creds); + +/* Config for ssl clients. */ + +typedef struct { + tsi_ssl_pem_key_cert_pair* pem_key_cert_pair; + char* pem_root_certs; +} grpc_ssl_config; + +/* Creates an SSL channel_security_connector. + - request_metadata_creds is the credentials object which metadata + will be sent with each request. This parameter can be NULL. + - config is the SSL config to be used for the SSL channel establishment. + - is_client should be 0 for a server or a non-0 value for a client. + - secure_peer_name is the secure peer name that should be checked in + grpc_channel_security_connector_check_peer. This parameter may be NULL in + which case the peer name will not be checked. Note that if this parameter + is not NULL, then, pem_root_certs should not be NULL either. + - sc is a pointer on the connector to be created. + This function returns GRPC_SECURITY_OK in case of success or a + specific error code otherwise. +*/ +grpc_security_status grpc_ssl_channel_security_connector_create( + grpc_channel_credentials* channel_creds, + grpc_call_credentials* request_metadata_creds, + const grpc_ssl_config* config, const char* target_name, + const char* overridden_target_name, + tsi_ssl_session_cache* ssl_session_cache, + grpc_channel_security_connector** sc); + +/* Config for ssl servers. */ +typedef struct { + tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs; + size_t num_key_cert_pairs; + char* pem_root_certs; + grpc_ssl_client_certificate_request_type client_certificate_request; +} grpc_ssl_server_config; + +/* Creates an SSL server_security_connector. + - config is the SSL config to be used for the SSL channel establishment. + - sc is a pointer on the connector to be created. + This function returns GRPC_SECURITY_OK in case of success or a + specific error code otherwise. +*/ +grpc_security_status grpc_ssl_server_security_connector_create( + grpc_server_credentials* server_credentials, + grpc_server_security_connector** sc); + +/* Util. */ +const tsi_peer_property* tsi_peer_get_property_by_name(const tsi_peer* peer, + const char* name); + +/* Exposed for testing only. */ +grpc_auth_context* grpc_ssl_peer_to_auth_context(const tsi_peer* peer); +tsi_peer grpc_shallow_peer_from_ssl_auth_context( + const grpc_auth_context* auth_context); +void grpc_shallow_peer_destruct(tsi_peer* peer); +int grpc_ssl_host_matches_name(const tsi_peer* peer, const char* peer_name); + +/* --- Default SSL Root Store. --- */ +namespace grpc_core { + +// The class implements default SSL root store. +class DefaultSslRootStore { + public: + // Gets the default SSL root store. Returns nullptr if not found. + static const tsi_ssl_root_certs_store* GetRootStore(); + + // Gets the default PEM root certificate. + static const char* GetPemRootCerts(); + + protected: + // Returns default PEM root certificates in nullptr terminated grpc_slice. + // This function is protected instead of private, so that it can be tested. + static grpc_slice ComputePemRootCerts(); + + private: + // Construct me not! + DefaultSslRootStore(); + + // Initialization of default SSL root store. + static void InitRootStore(); + + // One-time initialization of default SSL root store. + static void InitRootStoreOnce(); + + // SSL root store in tsi_ssl_root_certs_store object. + static tsi_ssl_root_certs_store* default_root_store_; + + // Default PEM root certificates. + static grpc_slice default_pem_root_certs_; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_SECURITY_SECURITY_CONNECTOR_SECURITY_CONNECTOR_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/auth_filters.h b/Sources/CgRPC/src/core/lib/security/transport/auth_filters.h index bd5902a12..af2104cfb 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/auth_filters.h +++ b/Sources/CgRPC/src/core/lib/security/transport/auth_filters.h @@ -19,9 +19,19 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H +#include + +#include #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_client_auth_filter; extern const grpc_channel_filter grpc_server_auth_filter; +void grpc_auth_metadata_context_build( + const char* url_scheme, grpc_slice call_host, grpc_slice call_method, + grpc_auth_context* auth_context, + grpc_auth_metadata_context* auth_md_context); + +void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context); + #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.c b/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.c deleted file mode 100644 index dd7dd44e7..000000000 --- a/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.c +++ /dev/null @@ -1,428 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/security/transport/auth_filters.h" - -#include - -#include -#include -#include - -#include "src/core/lib/channel/channel_stack.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/security/context/security_context.h" -#include "src/core/lib/security/credentials/credentials.h" -#include "src/core/lib/security/transport/security_connector.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/call.h" -#include "src/core/lib/transport/static_metadata.h" - -#define MAX_CREDENTIALS_METADATA_COUNT 4 - -/* We can have a per-call credentials. */ -typedef struct { - grpc_call_stack *owning_call; - grpc_call_combiner *call_combiner; - grpc_call_credentials *creds; - bool have_host; - bool have_method; - grpc_slice host; - grpc_slice method; - /* pollset{_set} bound to this call; if we need to make external - network requests, they should be done under a pollset added to this - pollset_set so that work can progress when this call wants work to progress - */ - grpc_polling_entity *pollent; - grpc_credentials_mdelem_array md_array; - grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; - grpc_auth_metadata_context auth_md_context; - grpc_closure async_result_closure; - grpc_closure check_call_host_cancel_closure; - grpc_closure get_request_metadata_cancel_closure; -} call_data; - -/* We can have a per-channel credentials. */ -typedef struct { - grpc_channel_security_connector *security_connector; - grpc_auth_context *auth_context; -} channel_data; - -static void reset_auth_metadata_context( - grpc_auth_metadata_context *auth_md_context) { - if (auth_md_context->service_url != NULL) { - gpr_free((char *)auth_md_context->service_url); - auth_md_context->service_url = NULL; - } - if (auth_md_context->method_name != NULL) { - gpr_free((char *)auth_md_context->method_name); - auth_md_context->method_name = NULL; - } - GRPC_AUTH_CONTEXT_UNREF( - (grpc_auth_context *)auth_md_context->channel_auth_context, - "grpc_auth_metadata_context"); - auth_md_context->channel_auth_context = NULL; -} - -static void add_error(grpc_error **combined, grpc_error *error) { - if (error == GRPC_ERROR_NONE) return; - if (*combined == GRPC_ERROR_NONE) { - *combined = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Client auth metadata plugin error"); - } - *combined = grpc_error_add_child(*combined, error); -} - -static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *input_error) { - grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; - grpc_call_element *elem = batch->handler_private.extra_arg; - call_data *calld = elem->call_data; - reset_auth_metadata_context(&calld->auth_md_context); - grpc_error *error = GRPC_ERROR_REF(input_error); - if (error == GRPC_ERROR_NONE) { - GPR_ASSERT(calld->md_array.size <= MAX_CREDENTIALS_METADATA_COUNT); - GPR_ASSERT(batch->send_initial_metadata); - grpc_metadata_batch *mdb = - batch->payload->send_initial_metadata.send_initial_metadata; - for (size_t i = 0; i < calld->md_array.size; ++i) { - add_error(&error, grpc_metadata_batch_add_tail( - exec_ctx, mdb, &calld->md_links[i], - GRPC_MDELEM_REF(calld->md_array.md[i]))); - } - } - if (error == GRPC_ERROR_NONE) { - grpc_call_next_op(exec_ctx, elem, batch); - } else { - error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAUTHENTICATED); - grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error, - calld->call_combiner); - } -} - -void build_auth_metadata_context(grpc_security_connector *sc, - grpc_auth_context *auth_context, - call_data *calld) { - char *service = grpc_slice_to_c_string(calld->method); - char *last_slash = strrchr(service, '/'); - char *method_name = NULL; - char *service_url = NULL; - reset_auth_metadata_context(&calld->auth_md_context); - if (last_slash == NULL) { - gpr_log(GPR_ERROR, "No '/' found in fully qualified method name"); - service[0] = '\0'; - } else if (last_slash == service) { - /* No service part in fully qualified method name: will just be "/". */ - service[1] = '\0'; - } else { - *last_slash = '\0'; - method_name = gpr_strdup(last_slash + 1); - } - if (method_name == NULL) method_name = gpr_strdup(""); - char *host = grpc_slice_to_c_string(calld->host); - gpr_asprintf(&service_url, "%s://%s%s", - sc->url_scheme == NULL ? "" : sc->url_scheme, host, service); - calld->auth_md_context.service_url = service_url; - calld->auth_md_context.method_name = method_name; - calld->auth_md_context.channel_auth_context = - GRPC_AUTH_CONTEXT_REF(auth_context, "grpc_auth_metadata_context"); - gpr_free(service); - gpr_free(host); -} - -static void cancel_get_request_metadata(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - if (error != GRPC_ERROR_NONE) { - grpc_call_credentials_cancel_get_request_metadata( - exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); - } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, - "cancel_get_request_metadata"); -} - -static void send_security_metadata(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; - grpc_client_security_context *ctx = - (grpc_client_security_context *)batch->payload - ->context[GRPC_CONTEXT_SECURITY] - .value; - grpc_call_credentials *channel_call_creds = - chand->security_connector->request_metadata_creds; - int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL); - - if (channel_call_creds == NULL && !call_creds_has_md) { - /* Skip sending metadata altogether. */ - grpc_call_next_op(exec_ctx, elem, batch); - return; - } - - if (channel_call_creds != NULL && call_creds_has_md) { - calld->creds = grpc_composite_call_credentials_create(channel_call_creds, - ctx->creds, NULL); - if (calld->creds == NULL) { - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Incompatible credentials set on channel and call."), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), - calld->call_combiner); - return; - } - } else { - calld->creds = grpc_call_credentials_ref( - call_creds_has_md ? ctx->creds : channel_call_creds); - } - - build_auth_metadata_context(&chand->security_connector->base, - chand->auth_context, calld); - - GPR_ASSERT(calld->pollent != NULL); - - GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata, - batch, grpc_schedule_on_exec_ctx); - grpc_error *error = GRPC_ERROR_NONE; - if (grpc_call_credentials_get_request_metadata( - exec_ctx, calld->creds, calld->pollent, calld->auth_md_context, - &calld->md_array, &calld->async_result_closure, &error)) { - // Synchronous return; invoke on_credentials_metadata() directly. - on_credentials_metadata(exec_ctx, batch, error); - GRPC_ERROR_UNREF(error); - } else { - // Async return; register cancellation closure with call combiner. - GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata"); - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure, - cancel_get_request_metadata, elem, - grpc_schedule_on_exec_ctx)); - } -} - -static void on_host_checked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; - grpc_call_element *elem = batch->handler_private.extra_arg; - call_data *calld = elem->call_data; - if (error == GRPC_ERROR_NONE) { - send_security_metadata(exec_ctx, elem, batch); - } else { - char *error_msg; - char *host = grpc_slice_to_c_string(calld->host); - gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.", - host); - gpr_free(host); - grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, batch, - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), - GRPC_ERROR_INT_GRPC_STATUS, - GRPC_STATUS_UNAUTHENTICATED), - calld->call_combiner); - gpr_free(error_msg); - } -} - -static void cancel_check_call_host(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; - if (error != GRPC_ERROR_NONE) { - grpc_channel_security_connector_cancel_check_call_host( - exec_ctx, chand->security_connector, &calld->async_result_closure, - GRPC_ERROR_REF(error)); - } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host"); -} - -static void auth_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0); - - /* grab pointers to our data from the call element */ - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; - - if (!batch->cancel_stream) { - GPR_ASSERT(batch->payload->context != NULL); - if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == NULL) { - batch->payload->context[GRPC_CONTEXT_SECURITY].value = - grpc_client_security_context_create(); - batch->payload->context[GRPC_CONTEXT_SECURITY].destroy = - grpc_client_security_context_destroy; - } - grpc_client_security_context *sec_ctx = - batch->payload->context[GRPC_CONTEXT_SECURITY].value; - GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter"); - sec_ctx->auth_context = - GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter"); - } - - if (batch->send_initial_metadata) { - for (grpc_linked_mdelem *l = batch->payload->send_initial_metadata - .send_initial_metadata->list.head; - l != NULL; l = l->next) { - grpc_mdelem md = l->md; - /* Pointer comparison is OK for md_elems created from the same context. - */ - if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_AUTHORITY)) { - if (calld->have_host) { - grpc_slice_unref_internal(exec_ctx, calld->host); - } - calld->host = grpc_slice_ref_internal(GRPC_MDVALUE(md)); - calld->have_host = true; - } else if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_PATH)) { - if (calld->have_method) { - grpc_slice_unref_internal(exec_ctx, calld->method); - } - calld->method = grpc_slice_ref_internal(GRPC_MDVALUE(md)); - calld->have_method = true; - } - } - if (calld->have_host) { - batch->handler_private.extra_arg = elem; - GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch, - grpc_schedule_on_exec_ctx); - char *call_host = grpc_slice_to_c_string(calld->host); - grpc_error *error = GRPC_ERROR_NONE; - if (grpc_channel_security_connector_check_call_host( - exec_ctx, chand->security_connector, call_host, - chand->auth_context, &calld->async_result_closure, &error)) { - // Synchronous return; invoke on_host_checked() directly. - on_host_checked(exec_ctx, batch, error); - GRPC_ERROR_UNREF(error); - } else { - // Async return; register cancellation closure with call combiner. - GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); - grpc_call_combiner_set_notify_on_cancel( - exec_ctx, calld->call_combiner, - GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure, - cancel_check_call_host, elem, - grpc_schedule_on_exec_ctx)); - } - gpr_free(call_host); - GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); - return; /* early exit */ - } - } - - /* pass control down the stack */ - grpc_call_next_op(exec_ctx, elem, batch); - GPR_TIMER_END("auth_start_transport_stream_op_batch", 0); -} - -/* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = elem->call_data; - calld->owning_call = args->call_stack; - calld->call_combiner = args->call_combiner; - return GRPC_ERROR_NONE; -} - -static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - grpc_polling_entity *pollent) { - call_data *calld = elem->call_data; - calld->pollent = pollent; -} - -/* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - call_data *calld = elem->call_data; - grpc_credentials_mdelem_array_destroy(exec_ctx, &calld->md_array); - grpc_call_credentials_unref(exec_ctx, calld->creds); - if (calld->have_host) { - grpc_slice_unref_internal(exec_ctx, calld->host); - } - if (calld->have_method) { - grpc_slice_unref_internal(exec_ctx, calld->method); - } - reset_auth_metadata_context(&calld->auth_md_context); -} - -/* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - grpc_security_connector *sc = - grpc_security_connector_find_in_args(args->channel_args); - if (sc == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Security connector missing from client auth filter args"); - } - grpc_auth_context *auth_context = - grpc_find_auth_context_in_args(args->channel_args); - if (auth_context == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Auth context missing from client auth filter args"); - } - - /* grab pointers to our data from the channel element */ - channel_data *chand = elem->channel_data; - - /* The first and the last filters tend to be implemented differently to - handle the case that there's no 'next' filter to call on the up or down - path */ - GPR_ASSERT(!args->is_last); - - /* initialize members */ - chand->security_connector = - (grpc_channel_security_connector *)GRPC_SECURITY_CONNECTOR_REF( - sc, "client_auth_filter"); - chand->auth_context = - GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter"); - return GRPC_ERROR_NONE; -} - -/* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - /* grab pointers to our data from the channel element */ - channel_data *chand = elem->channel_data; - grpc_channel_security_connector *sc = chand->security_connector; - if (sc != NULL) { - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "client_auth_filter"); - } - GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter"); -} - -const grpc_channel_filter grpc_client_auth_filter = { - auth_start_transport_stream_op_batch, - grpc_channel_next_op, - sizeof(call_data), - init_call_elem, - set_pollset_or_pollset_set, - destroy_call_elem, - sizeof(channel_data), - init_channel_elem, - destroy_channel_elem, - grpc_channel_next_get_info, - "client-auth"}; diff --git a/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.cc b/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.cc new file mode 100644 index 000000000..048e390a7 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/transport/client_auth_filter.cc @@ -0,0 +1,418 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/security/transport/auth_filters.h" + +#include + +#include +#include +#include + +#include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/security/context/security_context.h" +#include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/security_connector/security_connector.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/surface/call.h" +#include "src/core/lib/transport/static_metadata.h" + +#define MAX_CREDENTIALS_METADATA_COUNT 4 + +namespace { +/* We can have a per-call credentials. */ +struct call_data { + grpc_call_stack* owning_call; + grpc_call_combiner* call_combiner; + grpc_call_credentials* creds; + grpc_slice host; + grpc_slice method; + /* pollset{_set} bound to this call; if we need to make external + network requests, they should be done under a pollset added to this + pollset_set so that work can progress when this call wants work to progress + */ + grpc_polling_entity* pollent; + grpc_credentials_mdelem_array md_array; + grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT]; + grpc_auth_metadata_context auth_md_context; + grpc_closure async_result_closure; + grpc_closure check_call_host_cancel_closure; + grpc_closure get_request_metadata_cancel_closure; +}; + +/* We can have a per-channel credentials. */ +struct channel_data { + grpc_channel_security_connector* security_connector; + grpc_auth_context* auth_context; +}; +} // namespace + +void grpc_auth_metadata_context_reset( + grpc_auth_metadata_context* auth_md_context) { + if (auth_md_context->service_url != nullptr) { + gpr_free(const_cast(auth_md_context->service_url)); + auth_md_context->service_url = nullptr; + } + if (auth_md_context->method_name != nullptr) { + gpr_free(const_cast(auth_md_context->method_name)); + auth_md_context->method_name = nullptr; + } + GRPC_AUTH_CONTEXT_UNREF( + (grpc_auth_context*)auth_md_context->channel_auth_context, + "grpc_auth_metadata_context"); + auth_md_context->channel_auth_context = nullptr; +} + +static void add_error(grpc_error** combined, grpc_error* error) { + if (error == GRPC_ERROR_NONE) return; + if (*combined == GRPC_ERROR_NONE) { + *combined = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Client auth metadata plugin error"); + } + *combined = grpc_error_add_child(*combined, error); +} + +static void on_credentials_metadata(void* arg, grpc_error* input_error) { + grpc_transport_stream_op_batch* batch = + static_cast(arg); + grpc_call_element* elem = + static_cast(batch->handler_private.extra_arg); + call_data* calld = static_cast(elem->call_data); + grpc_auth_metadata_context_reset(&calld->auth_md_context); + grpc_error* error = GRPC_ERROR_REF(input_error); + if (error == GRPC_ERROR_NONE) { + GPR_ASSERT(calld->md_array.size <= MAX_CREDENTIALS_METADATA_COUNT); + GPR_ASSERT(batch->send_initial_metadata); + grpc_metadata_batch* mdb = + batch->payload->send_initial_metadata.send_initial_metadata; + for (size_t i = 0; i < calld->md_array.size; ++i) { + add_error(&error, grpc_metadata_batch_add_tail( + mdb, &calld->md_links[i], + GRPC_MDELEM_REF(calld->md_array.md[i]))); + } + } + if (error == GRPC_ERROR_NONE) { + grpc_call_next_op(elem, batch); + } else { + error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAVAILABLE); + grpc_transport_stream_op_batch_finish_with_failure(batch, error, + calld->call_combiner); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "get_request_metadata"); +} + +void grpc_auth_metadata_context_build( + const char* url_scheme, grpc_slice call_host, grpc_slice call_method, + grpc_auth_context* auth_context, + grpc_auth_metadata_context* auth_md_context) { + char* service = grpc_slice_to_c_string(call_method); + char* last_slash = strrchr(service, '/'); + char* method_name = nullptr; + char* service_url = nullptr; + grpc_auth_metadata_context_reset(auth_md_context); + if (last_slash == nullptr) { + gpr_log(GPR_ERROR, "No '/' found in fully qualified method name"); + service[0] = '\0'; + method_name = gpr_strdup(""); + } else if (last_slash == service) { + method_name = gpr_strdup(""); + } else { + *last_slash = '\0'; + method_name = gpr_strdup(last_slash + 1); + } + char* host_and_port = grpc_slice_to_c_string(call_host); + if (url_scheme != nullptr && strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) { + /* Remove the port if it is 443. */ + char* port_delimiter = strrchr(host_and_port, ':'); + if (port_delimiter != nullptr && strcmp(port_delimiter + 1, "443") == 0) { + *port_delimiter = '\0'; + } + } + gpr_asprintf(&service_url, "%s://%s%s", + url_scheme == nullptr ? "" : url_scheme, host_and_port, service); + auth_md_context->service_url = service_url; + auth_md_context->method_name = method_name; + auth_md_context->channel_auth_context = + GRPC_AUTH_CONTEXT_REF(auth_context, "grpc_auth_metadata_context"); + gpr_free(service); + gpr_free(host_and_port); +} + +static void cancel_get_request_metadata(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); + if (error != GRPC_ERROR_NONE) { + grpc_call_credentials_cancel_get_request_metadata( + calld->creds, &calld->md_array, GRPC_ERROR_REF(error)); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_get_request_metadata"); +} + +static void send_security_metadata(grpc_call_element* elem, + grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + grpc_client_security_context* ctx = + static_cast( + batch->payload->context[GRPC_CONTEXT_SECURITY].value); + grpc_call_credentials* channel_call_creds = + chand->security_connector->request_metadata_creds; + int call_creds_has_md = (ctx != nullptr) && (ctx->creds != nullptr); + + if (channel_call_creds == nullptr && !call_creds_has_md) { + /* Skip sending metadata altogether. */ + grpc_call_next_op(elem, batch); + return; + } + + if (channel_call_creds != nullptr && call_creds_has_md) { + calld->creds = grpc_composite_call_credentials_create(channel_call_creds, + ctx->creds, nullptr); + if (calld->creds == nullptr) { + grpc_transport_stream_op_batch_finish_with_failure( + batch, + grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Incompatible credentials set on channel and call."), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED), + calld->call_combiner); + return; + } + } else { + calld->creds = grpc_call_credentials_ref( + call_creds_has_md ? ctx->creds : channel_call_creds); + } + + grpc_auth_metadata_context_build( + chand->security_connector->base.url_scheme, calld->host, calld->method, + chand->auth_context, &calld->auth_md_context); + + GPR_ASSERT(calld->pollent != nullptr); + GRPC_CALL_STACK_REF(calld->owning_call, "get_request_metadata"); + GRPC_CLOSURE_INIT(&calld->async_result_closure, on_credentials_metadata, + batch, grpc_schedule_on_exec_ctx); + grpc_error* error = GRPC_ERROR_NONE; + if (grpc_call_credentials_get_request_metadata( + calld->creds, calld->pollent, calld->auth_md_context, + &calld->md_array, &calld->async_result_closure, &error)) { + // Synchronous return; invoke on_credentials_metadata() directly. + on_credentials_metadata(batch, error); + GRPC_ERROR_UNREF(error); + } else { + // Async return; register cancellation closure with call combiner. + GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata"); + grpc_call_combiner_set_notify_on_cancel( + calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure, + cancel_get_request_metadata, elem, + grpc_schedule_on_exec_ctx)); + } +} + +static void on_host_checked(void* arg, grpc_error* error) { + grpc_transport_stream_op_batch* batch = + static_cast(arg); + grpc_call_element* elem = + static_cast(batch->handler_private.extra_arg); + call_data* calld = static_cast(elem->call_data); + if (error == GRPC_ERROR_NONE) { + send_security_metadata(elem, batch); + } else { + char* error_msg; + char* host = grpc_slice_to_c_string(calld->host); + gpr_asprintf(&error_msg, "Invalid host %s set in :authority metadata.", + host); + gpr_free(host); + grpc_transport_stream_op_batch_finish_with_failure( + batch, + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg), + GRPC_ERROR_INT_GRPC_STATUS, + GRPC_STATUS_UNAUTHENTICATED), + calld->call_combiner); + gpr_free(error_msg); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "check_call_host"); +} + +static void cancel_check_call_host(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + if (error != GRPC_ERROR_NONE) { + grpc_channel_security_connector_cancel_check_call_host( + chand->security_connector, &calld->async_result_closure, + GRPC_ERROR_REF(error)); + } + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_check_call_host"); +} + +static void auth_start_transport_stream_op_batch( + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + GPR_TIMER_SCOPE("auth_start_transport_stream_op_batch", 0); + + /* grab pointers to our data from the call element */ + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); + + if (!batch->cancel_stream) { + GPR_ASSERT(batch->payload->context != nullptr); + if (batch->payload->context[GRPC_CONTEXT_SECURITY].value == nullptr) { + batch->payload->context[GRPC_CONTEXT_SECURITY].value = + grpc_client_security_context_create(); + batch->payload->context[GRPC_CONTEXT_SECURITY].destroy = + grpc_client_security_context_destroy; + } + grpc_client_security_context* sec_ctx = + static_cast( + batch->payload->context[GRPC_CONTEXT_SECURITY].value); + GRPC_AUTH_CONTEXT_UNREF(sec_ctx->auth_context, "client auth filter"); + sec_ctx->auth_context = + GRPC_AUTH_CONTEXT_REF(chand->auth_context, "client_auth_filter"); + } + + if (batch->send_initial_metadata) { + grpc_metadata_batch* metadata = + batch->payload->send_initial_metadata.send_initial_metadata; + if (metadata->idx.named.path != nullptr) { + calld->method = + grpc_slice_ref_internal(GRPC_MDVALUE(metadata->idx.named.path->md)); + } + if (metadata->idx.named.authority != nullptr) { + calld->host = grpc_slice_ref_internal( + GRPC_MDVALUE(metadata->idx.named.authority->md)); + batch->handler_private.extra_arg = elem; + GRPC_CALL_STACK_REF(calld->owning_call, "check_call_host"); + GRPC_CLOSURE_INIT(&calld->async_result_closure, on_host_checked, batch, + grpc_schedule_on_exec_ctx); + char* call_host = grpc_slice_to_c_string(calld->host); + grpc_error* error = GRPC_ERROR_NONE; + if (grpc_channel_security_connector_check_call_host( + chand->security_connector, call_host, chand->auth_context, + &calld->async_result_closure, &error)) { + // Synchronous return; invoke on_host_checked() directly. + on_host_checked(batch, error); + GRPC_ERROR_UNREF(error); + } else { + // Async return; register cancellation closure with call combiner. + GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host"); + grpc_call_combiner_set_notify_on_cancel( + calld->call_combiner, + GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure, + cancel_check_call_host, elem, + grpc_schedule_on_exec_ctx)); + } + gpr_free(call_host); + return; /* early exit */ + } + } + + /* pass control down the stack */ + grpc_call_next_op(elem, batch); +} + +/* Constructor for call_data */ +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + calld->owning_call = args->call_stack; + calld->call_combiner = args->call_combiner; + calld->host = grpc_empty_slice(); + calld->method = grpc_empty_slice(); + return GRPC_ERROR_NONE; +} + +static void set_pollset_or_pollset_set(grpc_call_element* elem, + grpc_polling_entity* pollent) { + call_data* calld = static_cast(elem->call_data); + calld->pollent = pollent; +} + +/* Destructor for call_data */ +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + call_data* calld = static_cast(elem->call_data); + grpc_credentials_mdelem_array_destroy(&calld->md_array); + grpc_call_credentials_unref(calld->creds); + grpc_slice_unref_internal(calld->host); + grpc_slice_unref_internal(calld->method); + grpc_auth_metadata_context_reset(&calld->auth_md_context); +} + +/* Constructor for channel_data */ +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + grpc_security_connector* sc = + grpc_security_connector_find_in_args(args->channel_args); + if (sc == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Security connector missing from client auth filter args"); + } + grpc_auth_context* auth_context = + grpc_find_auth_context_in_args(args->channel_args); + if (auth_context == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Auth context missing from client auth filter args"); + } + + /* grab pointers to our data from the channel element */ + channel_data* chand = static_cast(elem->channel_data); + + /* The first and the last filters tend to be implemented differently to + handle the case that there's no 'next' filter to call on the up or down + path */ + GPR_ASSERT(!args->is_last); + + /* initialize members */ + chand->security_connector = + reinterpret_cast( + GRPC_SECURITY_CONNECTOR_REF(sc, "client_auth_filter")); + chand->auth_context = + GRPC_AUTH_CONTEXT_REF(auth_context, "client_auth_filter"); + return GRPC_ERROR_NONE; +} + +/* Destructor for channel data */ +static void destroy_channel_elem(grpc_channel_element* elem) { + /* grab pointers to our data from the channel element */ + channel_data* chand = static_cast(elem->channel_data); + grpc_channel_security_connector* sc = chand->security_connector; + if (sc != nullptr) { + GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "client_auth_filter"); + } + GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter"); +} + +const grpc_channel_filter grpc_client_auth_filter = { + auth_start_transport_stream_op_batch, + grpc_channel_next_op, + sizeof(call_data), + init_call_elem, + set_pollset_or_pollset_set, + destroy_call_elem, + sizeof(channel_data), + init_channel_elem, + destroy_channel_elem, + grpc_channel_next_get_info, + "client-auth"}; diff --git a/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.c b/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.c deleted file mode 100644 index 5583a4e0f..000000000 --- a/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/security/transport/lb_targets_info.h" - -/* Channel arg key for the mapping of LB server addresses to their names for - * secure naming purposes. */ -#define GRPC_ARG_LB_SECURE_NAMING_MAP "grpc.lb_secure_naming_map" - -static void *targets_info_copy(void *p) { return grpc_slice_hash_table_ref(p); } -static void targets_info_destroy(grpc_exec_ctx *exec_ctx, void *p) { - grpc_slice_hash_table_unref(exec_ctx, p); -} -static int targets_info_cmp(void *a, void *b) { - return grpc_slice_hash_table_cmp(a, b); -} -static const grpc_arg_pointer_vtable server_to_balancer_names_vtable = { - targets_info_copy, targets_info_destroy, targets_info_cmp}; - -grpc_arg grpc_lb_targets_info_create_channel_arg( - grpc_slice_hash_table *targets_info) { - return grpc_channel_arg_pointer_create(GRPC_ARG_LB_SECURE_NAMING_MAP, - targets_info, - &server_to_balancer_names_vtable); -} - -grpc_slice_hash_table *grpc_lb_targets_info_find_in_args( - const grpc_channel_args *args) { - const grpc_arg *targets_info_arg = - grpc_channel_args_find(args, GRPC_ARG_LB_SECURE_NAMING_MAP); - if (targets_info_arg != NULL) { - GPR_ASSERT(targets_info_arg->type == GRPC_ARG_POINTER); - return targets_info_arg->value.pointer.p; - } - return NULL; -} diff --git a/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.h b/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.h deleted file mode 100644 index c3d685df5..000000000 --- a/Sources/CgRPC/src/core/lib/security/transport/lb_targets_info.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H -#define GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H - -#include "src/core/lib/slice/slice_hash_table.h" - -/** Return a channel argument containing \a targets_info. */ -grpc_arg grpc_lb_targets_info_create_channel_arg( - grpc_slice_hash_table *targets_info); - -/** Return the instance of targets info in \a args or NULL */ -grpc_slice_hash_table *grpc_lb_targets_info_find_in_args( - const grpc_channel_args *args); - -#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.c b/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.c rename to Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.cc index ae5633b82..840b2e73b 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.c +++ b/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.cc @@ -20,6 +20,8 @@ using that endpoint. Because of various transitive includes in uv.h, including windows.h on Windows, uv.h must be included before other system headers. Therefore, sockaddr.h must always be included first */ +#include + #include "src/core/lib/iomgr/sockaddr.h" #include @@ -28,27 +30,27 @@ #include #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/security/transport/secure_endpoint.h" #include "src/core/lib/security/transport/tsi_error.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" #include "src/core/tsi/transport_security_grpc.h" #define STAGING_BUFFER_SIZE 8192 typedef struct { grpc_endpoint base; - grpc_endpoint *wrapped_ep; - struct tsi_frame_protector *protector; - struct tsi_zero_copy_grpc_protector *zero_copy_protector; + grpc_endpoint* wrapped_ep; + struct tsi_frame_protector* protector; + struct tsi_zero_copy_grpc_protector* zero_copy_protector; gpr_mu protector_mu; /* saved upper level callbacks and user_data. */ - grpc_closure *read_cb; - grpc_closure *write_cb; + grpc_closure* read_cb; + grpc_closure* write_cb; grpc_closure on_read; - grpc_slice_buffer *read_buffer; + grpc_slice_buffer* read_buffer; grpc_slice_buffer source_buffer; /* saved handshaker leftover data to unprotect. */ grpc_slice_buffer leftover_bytes; @@ -61,45 +63,43 @@ typedef struct { gpr_refcount ref; } secure_endpoint; -grpc_tracer_flag grpc_trace_secure_endpoint = - GRPC_TRACER_INITIALIZER(false, "secure_endpoint"); +grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint"); -static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) { - secure_endpoint *ep = secure_ep; - grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep); +static void destroy(secure_endpoint* secure_ep) { + secure_endpoint* ep = secure_ep; + grpc_endpoint_destroy(ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); - tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes); - grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer); - grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->output_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &ep->source_buffer); + tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector); + grpc_slice_buffer_destroy_internal(&ep->leftover_bytes); + grpc_slice_unref_internal(ep->read_staging_buffer); + grpc_slice_unref_internal(ep->write_staging_buffer); + grpc_slice_buffer_destroy_internal(&ep->output_buffer); + grpc_slice_buffer_destroy_internal(&ep->source_buffer); gpr_mu_destroy(&ep->protector_mu); gpr_free(ep); } #ifndef NDEBUG -#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ - secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__) +#define SECURE_ENDPOINT_UNREF(ep, reason) \ + secure_endpoint_unref((ep), (reason), __FILE__, __LINE__) #define SECURE_ENDPOINT_REF(ep, reason) \ secure_endpoint_ref((ep), (reason), __FILE__, __LINE__) -static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, - const char *reason, const char *file, - int line) { - if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { +static void secure_endpoint_unref(secure_endpoint* ep, const char* reason, + const char* file, int line) { + if (grpc_trace_secure_endpoint.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val, val - 1); } if (gpr_unref(&ep->ref)) { - destroy(exec_ctx, ep); + destroy(ep); } } -static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { +static void secure_endpoint_ref(secure_endpoint* ep, const char* reason, + const char* file, int line) { + if (grpc_trace_secure_endpoint.enabled()) { gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val, @@ -108,73 +108,69 @@ static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, gpr_ref(&ep->ref); } #else -#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \ - secure_endpoint_unref((exec_ctx), (ep)) +#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep)) #define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) -static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, - secure_endpoint *ep) { +static void secure_endpoint_unref(secure_endpoint* ep) { if (gpr_unref(&ep->ref)) { - destroy(exec_ctx, ep); + destroy(ep); } } -static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } +static void secure_endpoint_ref(secure_endpoint* ep) { gpr_ref(&ep->ref); } #endif -static void flush_read_staging_buffer(secure_endpoint *ep, uint8_t **cur, - uint8_t **end) { +static void flush_read_staging_buffer(secure_endpoint* ep, uint8_t** cur, + uint8_t** end) { grpc_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer); ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE); *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer); *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer); } -static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep, - grpc_error *error) { - if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { +static void call_read_cb(secure_endpoint* ep, grpc_error* error) { + if (grpc_trace_secure_endpoint.enabled()) { size_t i; for (i = 0; i < ep->read_buffer->count; i++) { - char *data = grpc_dump_slice(ep->read_buffer->slices[i], + char* data = grpc_dump_slice(ep->read_buffer->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "READ %p: %s", ep, data); + gpr_log(GPR_INFO, "READ %p: %s", ep, data); gpr_free(data); } } - ep->read_buffer = NULL; - GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error); - SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read"); + ep->read_buffer = nullptr; + GRPC_CLOSURE_SCHED(ep->read_cb, error); + SECURE_ENDPOINT_UNREF(ep, "read"); } -static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_error *error) { +static void on_read(void* user_data, grpc_error* error) { unsigned i; uint8_t keep_looping = 0; tsi_result result = TSI_OK; - secure_endpoint *ep = (secure_endpoint *)user_data; - uint8_t *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer); - uint8_t *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer); + secure_endpoint* ep = static_cast(user_data); + uint8_t* cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer); + uint8_t* end = GRPC_SLICE_END_PTR(ep->read_staging_buffer); if (error != GRPC_ERROR_NONE) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); - call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Secure read failed", &error, 1)); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); + call_read_cb(ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Secure read failed", &error, 1)); return; } - if (ep->zero_copy_protector != NULL) { + if (ep->zero_copy_protector != nullptr) { // Use zero-copy grpc protector to unprotect. result = tsi_zero_copy_grpc_protector_unprotect( - exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); + ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer); } else { // Use frame protector to unprotect. /* TODO(yangg) check error, maybe bail out early */ for (i = 0; i < ep->source_buffer.count; i++) { grpc_slice encrypted = ep->source_buffer.slices[i]; - uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted); + uint8_t* message_bytes = GRPC_SLICE_START_PTR(encrypted); size_t message_size = GRPC_SLICE_LENGTH(encrypted); while (message_size > 0 || keep_looping) { - size_t unprotected_buffer_size_written = (size_t)(end - cur); + size_t unprotected_buffer_size_written = static_cast(end - cur); size_t processed_message_size = message_size; gpr_mu_lock(&ep->protector_mu); result = tsi_frame_protector_unprotect( @@ -211,86 +207,85 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, ep->read_buffer, grpc_slice_split_head( &ep->read_staging_buffer, - (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer)))); + static_cast( + cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer)))); } } /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->source_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->source_buffer); if (result != TSI_OK) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); call_read_cb( - exec_ctx, ep, - grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result)); + ep, grpc_set_tsi_error_result( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result)); return; } - call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE); + call_read_cb(ep, GRPC_ERROR_NONE); } -static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_slice_buffer *slices, grpc_closure *cb) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; +static void endpoint_read(grpc_endpoint* secure_ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + secure_endpoint* ep = reinterpret_cast(secure_ep); ep->read_cb = cb; ep->read_buffer = slices; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer); + grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer); SECURE_ENDPOINT_REF(ep, "read"); if (ep->leftover_bytes.count) { grpc_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer); GPR_ASSERT(ep->leftover_bytes.count == 0); - on_read(exec_ctx, ep, GRPC_ERROR_NONE); + on_read(ep, GRPC_ERROR_NONE); return; } - grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer, - &ep->on_read); + grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read); } -static void flush_write_staging_buffer(secure_endpoint *ep, uint8_t **cur, - uint8_t **end) { +static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur, + uint8_t** end) { grpc_slice_buffer_add(&ep->output_buffer, ep->write_staging_buffer); ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE); *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer); *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer); } -static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_slice_buffer *slices, grpc_closure *cb) { - GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0); +static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices, + grpc_closure* cb) { + GPR_TIMER_SCOPE("secure_endpoint.endpoint_write", 0); unsigned i; tsi_result result = TSI_OK; - secure_endpoint *ep = (secure_endpoint *)secure_ep; - uint8_t *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer); - uint8_t *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer); + secure_endpoint* ep = reinterpret_cast(secure_ep); + uint8_t* cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer); + uint8_t* end = GRPC_SLICE_END_PTR(ep->write_staging_buffer); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer); - if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) { + if (grpc_trace_secure_endpoint.enabled()) { for (i = 0; i < slices->count; i++) { - char *data = + char* data = grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); - gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data); + gpr_log(GPR_INFO, "WRITE %p: %s", ep, data); gpr_free(data); } } - if (ep->zero_copy_protector != NULL) { + if (ep->zero_copy_protector != nullptr) { // Use zero-copy grpc protector to protect. - result = tsi_zero_copy_grpc_protector_protect( - exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer); + result = tsi_zero_copy_grpc_protector_protect(ep->zero_copy_protector, + slices, &ep->output_buffer); } else { // Use frame protector to protect. for (i = 0; i < slices->count; i++) { grpc_slice plain = slices->slices[i]; - uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain); + uint8_t* message_bytes = GRPC_SLICE_START_PTR(plain); size_t message_size = GRPC_SLICE_LENGTH(plain); while (message_size > 0) { - size_t protected_buffer_size_to_send = (size_t)(end - cur); + size_t protected_buffer_size_to_send = static_cast(end - cur); size_t processed_message_size = message_size; gpr_mu_lock(&ep->protector_mu); result = tsi_frame_protector_protect(ep->protector, message_bytes, @@ -315,7 +310,7 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, if (result == TSI_OK) { size_t still_pending_size; do { - size_t protected_buffer_size_to_send = (size_t)(end - cur); + size_t protected_buffer_size_to_send = static_cast(end - cur); gpr_mu_lock(&ep->protector_mu); result = tsi_frame_protector_protect_flush( ep->protector, cur, &protected_buffer_size_to_send, @@ -332,66 +327,65 @@ static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, &ep->output_buffer, grpc_slice_split_head( &ep->write_staging_buffer, - (size_t)(cur - - GRPC_SLICE_START_PTR(ep->write_staging_buffer)))); + static_cast( + cur - GRPC_SLICE_START_PTR(ep->write_staging_buffer)))); } } } if (result != TSI_OK) { /* TODO(yangg) do different things according to the error type? */ - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer); + grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer); GRPC_CLOSURE_SCHED( - exec_ctx, cb, - grpc_set_tsi_error_result( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result)); - GPR_TIMER_END("secure_endpoint.endpoint_write", 0); + cb, grpc_set_tsi_error_result( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result)); return; } - grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb); - GPR_TIMER_END("secure_endpoint.endpoint_write", 0); + grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb); +} + +static void endpoint_shutdown(grpc_endpoint* secure_ep, grpc_error* why) { + secure_endpoint* ep = reinterpret_cast(secure_ep); + grpc_endpoint_shutdown(ep->wrapped_ep, why); } -static void endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep, - grpc_error *why) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep, why); +static void endpoint_destroy(grpc_endpoint* secure_ep) { + secure_endpoint* ep = reinterpret_cast(secure_ep); + SECURE_ENDPOINT_UNREF(ep, "destroy"); } -static void endpoint_destroy(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; - SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy"); +static void endpoint_add_to_pollset(grpc_endpoint* secure_ep, + grpc_pollset* pollset) { + secure_endpoint* ep = reinterpret_cast(secure_ep); + grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset); } -static void endpoint_add_to_pollset(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep, - grpc_pollset *pollset) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset); +static void endpoint_add_to_pollset_set(grpc_endpoint* secure_ep, + grpc_pollset_set* pollset_set) { + secure_endpoint* ep = reinterpret_cast(secure_ep); + grpc_endpoint_add_to_pollset_set(ep->wrapped_ep, pollset_set); } -static void endpoint_add_to_pollset_set(grpc_exec_ctx *exec_ctx, - grpc_endpoint *secure_ep, - grpc_pollset_set *pollset_set) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; - grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set); +static void endpoint_delete_from_pollset_set(grpc_endpoint* secure_ep, + grpc_pollset_set* pollset_set) { + secure_endpoint* ep = reinterpret_cast(secure_ep); + grpc_endpoint_delete_from_pollset_set(ep->wrapped_ep, pollset_set); } -static char *endpoint_get_peer(grpc_endpoint *secure_ep) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; +static char* endpoint_get_peer(grpc_endpoint* secure_ep) { + secure_endpoint* ep = reinterpret_cast(secure_ep); return grpc_endpoint_get_peer(ep->wrapped_ep); } -static int endpoint_get_fd(grpc_endpoint *secure_ep) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; +static int endpoint_get_fd(grpc_endpoint* secure_ep) { + secure_endpoint* ep = reinterpret_cast(secure_ep); return grpc_endpoint_get_fd(ep->wrapped_ep); } -static grpc_resource_user *endpoint_get_resource_user( - grpc_endpoint *secure_ep) { - secure_endpoint *ep = (secure_endpoint *)secure_ep; +static grpc_resource_user* endpoint_get_resource_user( + grpc_endpoint* secure_ep) { + secure_endpoint* ep = reinterpret_cast(secure_ep); return grpc_endpoint_get_resource_user(ep->wrapped_ep); } @@ -399,19 +393,21 @@ static const grpc_endpoint_vtable vtable = {endpoint_read, endpoint_write, endpoint_add_to_pollset, endpoint_add_to_pollset_set, + endpoint_delete_from_pollset_set, endpoint_shutdown, endpoint_destroy, endpoint_get_resource_user, endpoint_get_peer, endpoint_get_fd}; -grpc_endpoint *grpc_secure_endpoint_create( - struct tsi_frame_protector *protector, - struct tsi_zero_copy_grpc_protector *zero_copy_protector, - grpc_endpoint *transport, grpc_slice *leftover_slices, +grpc_endpoint* grpc_secure_endpoint_create( + struct tsi_frame_protector* protector, + struct tsi_zero_copy_grpc_protector* zero_copy_protector, + grpc_endpoint* transport, grpc_slice* leftover_slices, size_t leftover_nslices) { size_t i; - secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint)); + secure_endpoint* ep = + static_cast(gpr_malloc(sizeof(secure_endpoint))); ep->base.vtable = &vtable; ep->wrapped_ep = transport; ep->protector = protector; @@ -425,7 +421,7 @@ grpc_endpoint *grpc_secure_endpoint_create( ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE); grpc_slice_buffer_init(&ep->output_buffer); grpc_slice_buffer_init(&ep->source_buffer); - ep->read_buffer = NULL; + ep->read_buffer = nullptr; GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx); gpr_mu_init(&ep->protector_mu); gpr_ref_init(&ep->ref, 1); diff --git a/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.h b/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.h index 3323a6ff4..e7e335167 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.h +++ b/Sources/CgRPC/src/core/lib/security/transport/secure_endpoint.h @@ -19,21 +19,23 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H +#include + #include #include "src/core/lib/iomgr/endpoint.h" struct tsi_frame_protector; struct tsi_zero_copy_grpc_protector; -extern grpc_tracer_flag grpc_trace_secure_endpoint; +extern grpc_core::TraceFlag grpc_trace_secure_endpoint; /* Takes ownership of protector, zero_copy_protector, and to_wrap, and refs * leftover_slices. If zero_copy_protector is not NULL, protector will never be * used. */ -grpc_endpoint *grpc_secure_endpoint_create( - struct tsi_frame_protector *protector, - struct tsi_zero_copy_grpc_protector *zero_copy_protector, - grpc_endpoint *to_wrap, grpc_slice *leftover_slices, +grpc_endpoint* grpc_secure_endpoint_create( + struct tsi_frame_protector* protector, + struct tsi_zero_copy_grpc_protector* zero_copy_protector, + grpc_endpoint* to_wrap, grpc_slice* leftover_slices, size_t leftover_nslices); #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/security_connector.c b/Sources/CgRPC/src/core/lib/security/transport/security_connector.c deleted file mode 100644 index 2a9e939d4..000000000 --- a/Sources/CgRPC/src/core/lib/security/transport/security_connector.c +++ /dev/null @@ -1,914 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/security/transport/security_connector.h" - -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/ext/transport/chttp2/alpn/alpn.h" -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/channel/handshaker.h" -#include "src/core/lib/iomgr/load_file.h" -#include "src/core/lib/security/context/security_context.h" -#include "src/core/lib/security/credentials/credentials.h" -#include "src/core/lib/security/credentials/fake/fake_credentials.h" -#include "src/core/lib/security/transport/lb_targets_info.h" -#include "src/core/lib/security/transport/secure_endpoint.h" -#include "src/core/lib/security/transport/security_handshaker.h" -#include "src/core/lib/support/env.h" -#include "src/core/lib/support/string.h" -#include "src/core/tsi/fake_transport_security.h" -#include "src/core/tsi/ssl_transport_security.h" -#include "src/core/tsi/transport_security_adapter.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_security_connector_refcount = - GRPC_TRACER_INITIALIZER(false, "security_connector_refcount"); -#endif - -/* -- Constants. -- */ - -#ifndef INSTALL_PREFIX -static const char *installed_roots_path = "/usr/share/grpc/roots.pem"; -#else -static const char *installed_roots_path = - INSTALL_PREFIX "/share/grpc/roots.pem"; -#endif - -/* -- Overridden default roots. -- */ - -static grpc_ssl_roots_override_callback ssl_roots_override_cb = NULL; - -void grpc_set_ssl_roots_override_callback(grpc_ssl_roots_override_callback cb) { - ssl_roots_override_cb = cb; -} - -/* -- Cipher suites. -- */ - -/* Defines the cipher suites that we accept by default. All these cipher suites - are compliant with HTTP2. */ -#define GRPC_SSL_CIPHER_SUITES \ - "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384" - -static gpr_once cipher_suites_once = GPR_ONCE_INIT; -static const char *cipher_suites = NULL; - -static void init_cipher_suites(void) { - char *overridden = gpr_getenv("GRPC_SSL_CIPHER_SUITES"); - cipher_suites = overridden != NULL ? overridden : GRPC_SSL_CIPHER_SUITES; -} - -static const char *ssl_cipher_suites(void) { - gpr_once_init(&cipher_suites_once, init_cipher_suites); - return cipher_suites; -} - -/* -- Common methods. -- */ - -/* Returns the first property with that name. */ -const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, - const char *name) { - size_t i; - if (peer == NULL) return NULL; - for (i = 0; i < peer->property_count; i++) { - const tsi_peer_property *property = &peer->properties[i]; - if (name == NULL && property->name == NULL) { - return property; - } - if (name != NULL && property->name != NULL && - strcmp(property->name, name) == 0) { - return property; - } - } - return NULL; -} - -void grpc_channel_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, - grpc_handshake_manager *handshake_mgr) { - if (connector != NULL) { - connector->add_handshakers(exec_ctx, connector, handshake_mgr); - } -} - -void grpc_server_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *connector, - grpc_handshake_manager *handshake_mgr) { - if (connector != NULL) { - connector->add_handshakers(exec_ctx, connector, handshake_mgr); - } -} - -void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, - tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - if (sc == NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, - GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "cannot check peer -- no security connector")); - tsi_peer_destruct(&peer); - } else { - sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); - } -} - -bool grpc_channel_security_connector_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - const char *host, grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, grpc_error **error) { - if (sc == NULL || sc->check_call_host == NULL) { - *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "cannot check call host -- no security connector"); - return true; - } - return sc->check_call_host(exec_ctx, sc, host, auth_context, - on_call_host_checked, error); -} - -void grpc_channel_security_connector_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { - if (sc == NULL || sc->cancel_check_call_host == NULL) { - GRPC_ERROR_UNREF(error); - return; - } - sc->cancel_check_call_host(exec_ctx, sc, on_call_host_checked, error); -} - -#ifndef NDEBUG -grpc_security_connector *grpc_security_connector_ref( - grpc_security_connector *sc, const char *file, int line, - const char *reason) { - if (sc == NULL) return NULL; - if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "SECURITY_CONNECTOR:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", sc, - val, val + 1, reason); - } -#else -grpc_security_connector *grpc_security_connector_ref( - grpc_security_connector *sc) { - if (sc == NULL) return NULL; -#endif - gpr_ref(&sc->refcount); - return sc; -} - -#ifndef NDEBUG -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, - const char *file, int line, - const char *reason) { - if (sc == NULL) return; - if (GRPC_TRACER_ON(grpc_trace_security_connector_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&sc->refcount.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "SECURITY_CONNECTOR:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", sc, - val, val - 1, reason); - } -#else -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - if (sc == NULL) return; -#endif - if (gpr_unref(&sc->refcount)) sc->vtable->destroy(exec_ctx, sc); -} - -static void connector_pointer_arg_destroy(grpc_exec_ctx *exec_ctx, void *p) { - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, "connector_pointer_arg_destroy"); -} - -static void *connector_pointer_arg_copy(void *p) { - return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy"); -} - -static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); } - -static const grpc_arg_pointer_vtable connector_pointer_vtable = { - connector_pointer_arg_copy, connector_pointer_arg_destroy, - connector_pointer_cmp}; - -grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc) { - return grpc_channel_arg_pointer_create(GRPC_ARG_SECURITY_CONNECTOR, sc, - &connector_pointer_vtable); -} - -grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg) { - if (strcmp(arg->key, GRPC_ARG_SECURITY_CONNECTOR)) return NULL; - if (arg->type != GRPC_ARG_POINTER) { - gpr_log(GPR_ERROR, "Invalid type %d for arg %s", arg->type, - GRPC_ARG_SECURITY_CONNECTOR); - return NULL; - } - return arg->value.pointer.p; -} - -grpc_security_connector *grpc_security_connector_find_in_args( - const grpc_channel_args *args) { - size_t i; - if (args == NULL) return NULL; - for (i = 0; i < args->num_args; i++) { - grpc_security_connector *sc = - grpc_security_connector_from_arg(&args->args[i]); - if (sc != NULL) return sc; - } - return NULL; -} - -/* -- Fake implementation. -- */ - -typedef struct { - grpc_channel_security_connector base; - char *target; - char *expected_targets; - bool is_lb_channel; -} grpc_fake_channel_security_connector; - -static void fake_channel_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - grpc_fake_channel_security_connector *c = - (grpc_fake_channel_security_connector *)sc; - grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); - gpr_free(c->target); - gpr_free(c->expected_targets); - gpr_free(c); -} - -static void fake_server_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - gpr_free(sc); -} - -static bool fake_check_target(const char *target_type, const char *target, - const char *set_str) { - GPR_ASSERT(target_type != NULL); - GPR_ASSERT(target != NULL); - char **set = NULL; - size_t set_size = 0; - gpr_string_split(set_str, ",", &set, &set_size); - bool found = false; - for (size_t i = 0; i < set_size; ++i) { - if (set[i] != NULL && strcmp(target, set[i]) == 0) found = true; - } - for (size_t i = 0; i < set_size; ++i) { - gpr_free(set[i]); - } - gpr_free(set); - return found; -} - -static void fake_secure_name_check(const char *target, - const char *expected_targets, - bool is_lb_channel) { - if (expected_targets == NULL) return; - char **lbs_and_backends = NULL; - size_t lbs_and_backends_size = 0; - bool success = false; - gpr_string_split(expected_targets, ";", &lbs_and_backends, - &lbs_and_backends_size); - if (lbs_and_backends_size > 2 || lbs_and_backends_size == 0) { - gpr_log(GPR_ERROR, "Invalid expected targets arg value: '%s'", - expected_targets); - goto done; - } - if (is_lb_channel) { - if (lbs_and_backends_size != 2) { - gpr_log(GPR_ERROR, - "Invalid expected targets arg value: '%s'. Expectations for LB " - "channels must be of the form 'be1,be2,be3,...;lb1,lb2,...", - expected_targets); - goto done; - } - if (!fake_check_target("LB", target, lbs_and_backends[1])) { - gpr_log(GPR_ERROR, "LB target '%s' not found in expected set '%s'", - target, lbs_and_backends[1]); - goto done; - } - success = true; - } else { - if (!fake_check_target("Backend", target, lbs_and_backends[0])) { - gpr_log(GPR_ERROR, "Backend target '%s' not found in expected set '%s'", - target, lbs_and_backends[0]); - goto done; - } - success = true; - } -done: - for (size_t i = 0; i < lbs_and_backends_size; ++i) { - gpr_free(lbs_and_backends[i]); - } - gpr_free(lbs_and_backends); - if (!success) abort(); -} - -static void fake_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - const char *prop_name; - grpc_error *error = GRPC_ERROR_NONE; - *auth_context = NULL; - if (peer.property_count != 1) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Fake peers should only have 1 property."); - goto end; - } - prop_name = peer.properties[0].name; - if (prop_name == NULL || - strcmp(prop_name, TSI_CERTIFICATE_TYPE_PEER_PROPERTY)) { - char *msg; - gpr_asprintf(&msg, "Unexpected property in fake peer: %s.", - prop_name == NULL ? "" : prop_name); - error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - goto end; - } - if (strncmp(peer.properties[0].value.data, TSI_FAKE_CERTIFICATE_TYPE, - peer.properties[0].value.length)) { - error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Invalid value for cert type property."); - goto end; - } - *auth_context = grpc_auth_context_create(NULL); - grpc_auth_context_add_cstring_property( - *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, - GRPC_FAKE_TRANSPORT_SECURITY_TYPE); -end: - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); - tsi_peer_destruct(&peer); -} - -static void fake_channel_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); - grpc_fake_channel_security_connector *c = - (grpc_fake_channel_security_connector *)sc; - fake_secure_name_check(c->target, c->expected_targets, c->is_lb_channel); -} - -static void fake_server_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked); -} - -static bool fake_channel_check_call_host(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - const char *host, - grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, - grpc_error **error) { - return true; -} - -static void fake_channel_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { - GRPC_ERROR_UNREF(error); -} - -static void fake_channel_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(true /* is_client */), - &sc->base)); -} - -static void fake_server_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_fake_handshaker(false /* is_client */), - &sc->base)); -} - -static grpc_security_connector_vtable fake_channel_vtable = { - fake_channel_destroy, fake_channel_check_peer}; - -static grpc_security_connector_vtable fake_server_vtable = { - fake_server_destroy, fake_server_check_peer}; - -grpc_channel_security_connector *grpc_fake_channel_security_connector_create( - grpc_call_credentials *request_metadata_creds, const char *target, - const grpc_channel_args *args) { - grpc_fake_channel_security_connector *c = gpr_zalloc(sizeof(*c)); - gpr_ref_init(&c->base.base.refcount, 1); - c->base.base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; - c->base.base.vtable = &fake_channel_vtable; - c->base.request_metadata_creds = - grpc_call_credentials_ref(request_metadata_creds); - c->base.check_call_host = fake_channel_check_call_host; - c->base.cancel_check_call_host = fake_channel_cancel_check_call_host; - c->base.add_handshakers = fake_channel_add_handshakers; - c->target = gpr_strdup(target); - const char *expected_targets = grpc_fake_transport_get_expected_targets(args); - c->expected_targets = gpr_strdup(expected_targets); - c->is_lb_channel = (grpc_lb_targets_info_find_in_args(args) != NULL); - return &c->base; -} - -grpc_server_security_connector *grpc_fake_server_security_connector_create( - void) { - grpc_server_security_connector *c = - gpr_zalloc(sizeof(grpc_server_security_connector)); - gpr_ref_init(&c->base.refcount, 1); - c->base.vtable = &fake_server_vtable; - c->base.url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME; - c->add_handshakers = fake_server_add_handshakers; - return c; -} - -/* --- Ssl implementation. --- */ - -typedef struct { - grpc_channel_security_connector base; - tsi_ssl_client_handshaker_factory *client_handshaker_factory; - char *target_name; - char *overridden_target_name; -} grpc_ssl_channel_security_connector; - -typedef struct { - grpc_server_security_connector base; - tsi_ssl_server_handshaker_factory *server_handshaker_factory; -} grpc_ssl_server_security_connector; - -static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - grpc_ssl_channel_security_connector *c = - (grpc_ssl_channel_security_connector *)sc; - grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); - tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory); - c->client_handshaker_factory = NULL; - if (c->target_name != NULL) gpr_free(c->target_name); - if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name); - gpr_free(sc); -} - -static void ssl_server_destroy(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc) { - grpc_ssl_server_security_connector *c = - (grpc_ssl_server_security_connector *)sc; - tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory); - c->server_handshaker_factory = NULL; - gpr_free(sc); -} - -static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_ssl_channel_security_connector *c = - (grpc_ssl_channel_security_connector *)sc; - // Instantiate TSI handshaker. - tsi_handshaker *tsi_hs = NULL; - tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( - c->client_handshaker_factory, - c->overridden_target_name != NULL ? c->overridden_target_name - : c->target_name, - &tsi_hs); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", - tsi_result_to_string(result)); - return; - } - - // Create handshakers. - grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); -} - -static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr) { - grpc_ssl_server_security_connector *c = - (grpc_ssl_server_security_connector *)sc; - // Instantiate TSI handshaker. - tsi_handshaker *tsi_hs = NULL; - tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker( - c->server_handshaker_factory, &tsi_hs); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", - tsi_result_to_string(result)); - return; - } - - // Create handshakers. - grpc_handshake_manager_add( - handshake_mgr, - grpc_security_handshaker_create( - exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base)); -} - -static int ssl_host_matches_name(const tsi_peer *peer, const char *peer_name) { - char *allocated_name = NULL; - int r; - - if (strchr(peer_name, ':') != NULL) { - char *ignored_port; - gpr_split_host_port(peer_name, &allocated_name, &ignored_port); - gpr_free(ignored_port); - peer_name = allocated_name; - if (!peer_name) return 0; - } - r = tsi_ssl_peer_matches_name(peer, peer_name); - gpr_free(allocated_name); - return r; -} - -grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer) { - size_t i; - grpc_auth_context *ctx = NULL; - const char *peer_identity_property_name = NULL; - - /* The caller has checked the certificate type property. */ - GPR_ASSERT(peer->property_count >= 1); - ctx = grpc_auth_context_create(NULL); - grpc_auth_context_add_cstring_property( - ctx, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME, - GRPC_SSL_TRANSPORT_SECURITY_TYPE); - for (i = 0; i < peer->property_count; i++) { - const tsi_peer_property *prop = &peer->properties[i]; - if (prop->name == NULL) continue; - if (strcmp(prop->name, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY) == 0) { - /* If there is no subject alt name, have the CN as the identity. */ - if (peer_identity_property_name == NULL) { - peer_identity_property_name = GRPC_X509_CN_PROPERTY_NAME; - } - grpc_auth_context_add_property(ctx, GRPC_X509_CN_PROPERTY_NAME, - prop->value.data, prop->value.length); - } else if (strcmp(prop->name, - TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) { - peer_identity_property_name = GRPC_X509_SAN_PROPERTY_NAME; - grpc_auth_context_add_property(ctx, GRPC_X509_SAN_PROPERTY_NAME, - prop->value.data, prop->value.length); - } else if (strcmp(prop->name, TSI_X509_PEM_CERT_PROPERTY) == 0) { - grpc_auth_context_add_property(ctx, GRPC_X509_PEM_CERT_PROPERTY_NAME, - prop->value.data, prop->value.length); - } - } - if (peer_identity_property_name != NULL) { - GPR_ASSERT(grpc_auth_context_set_peer_identity_property_name( - ctx, peer_identity_property_name) == 1); - } - return ctx; -} - -static grpc_error *ssl_check_peer(grpc_security_connector *sc, - const char *peer_name, const tsi_peer *peer, - grpc_auth_context **auth_context) { - /* Check the ALPN. */ - const tsi_peer_property *p = - tsi_peer_get_property_by_name(peer, TSI_SSL_ALPN_SELECTED_PROTOCOL); - if (p == NULL) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Cannot check peer: missing selected ALPN property."); - } - if (!grpc_chttp2_is_alpn_version_supported(p->value.data, p->value.length)) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Cannot check peer: invalid ALPN value."); - } - - /* Check the peer name if specified. */ - if (peer_name != NULL && !ssl_host_matches_name(peer, peer_name)) { - char *msg; - gpr_asprintf(&msg, "Peer name %s is not in peer certificate", peer_name); - grpc_error *error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); - gpr_free(msg); - return error; - } - *auth_context = tsi_ssl_peer_to_auth_context(peer); - return GRPC_ERROR_NONE; -} - -static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - grpc_ssl_channel_security_connector *c = - (grpc_ssl_channel_security_connector *)sc; - grpc_error *error = ssl_check_peer(sc, c->overridden_target_name != NULL - ? c->overridden_target_name - : c->target_name, - &peer, auth_context); - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); - tsi_peer_destruct(&peer); -} - -static void ssl_server_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked) { - grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context); - tsi_peer_destruct(&peer); - GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error); -} - -static void add_shallow_auth_property_to_peer(tsi_peer *peer, - const grpc_auth_property *prop, - const char *tsi_prop_name) { - tsi_peer_property *tsi_prop = &peer->properties[peer->property_count++]; - tsi_prop->name = (char *)tsi_prop_name; - tsi_prop->value.data = prop->value; - tsi_prop->value.length = prop->value_length; -} - -tsi_peer tsi_shallow_peer_from_ssl_auth_context( - const grpc_auth_context *auth_context) { - size_t max_num_props = 0; - grpc_auth_property_iterator it; - const grpc_auth_property *prop; - tsi_peer peer; - memset(&peer, 0, sizeof(peer)); - - it = grpc_auth_context_property_iterator(auth_context); - while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++; - - if (max_num_props > 0) { - peer.properties = gpr_malloc(max_num_props * sizeof(tsi_peer_property)); - it = grpc_auth_context_property_iterator(auth_context); - while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) { - if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) { - add_shallow_auth_property_to_peer( - &peer, prop, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY); - } else if (strcmp(prop->name, GRPC_X509_CN_PROPERTY_NAME) == 0) { - add_shallow_auth_property_to_peer( - &peer, prop, TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY); - } else if (strcmp(prop->name, GRPC_X509_PEM_CERT_PROPERTY_NAME) == 0) { - add_shallow_auth_property_to_peer(&peer, prop, - TSI_X509_PEM_CERT_PROPERTY); - } - } - } - return peer; -} - -void tsi_shallow_peer_destruct(tsi_peer *peer) { - if (peer->properties != NULL) gpr_free(peer->properties); -} - -static bool ssl_channel_check_call_host(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - const char *host, - grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, - grpc_error **error) { - grpc_ssl_channel_security_connector *c = - (grpc_ssl_channel_security_connector *)sc; - grpc_security_status status = GRPC_SECURITY_ERROR; - tsi_peer peer = tsi_shallow_peer_from_ssl_auth_context(auth_context); - if (ssl_host_matches_name(&peer, host)) status = GRPC_SECURITY_OK; - /* If the target name was overridden, then the original target_name was - 'checked' transitively during the previous peer check at the end of the - handshake. */ - if (c->overridden_target_name != NULL && strcmp(host, c->target_name) == 0) { - status = GRPC_SECURITY_OK; - } - if (status != GRPC_SECURITY_OK) { - *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "call host does not match SSL server name"); - } - tsi_shallow_peer_destruct(&peer); - return true; -} - -static void ssl_channel_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error) { - GRPC_ERROR_UNREF(error); -} - -static grpc_security_connector_vtable ssl_channel_vtable = { - ssl_channel_destroy, ssl_channel_check_peer}; - -static grpc_security_connector_vtable ssl_server_vtable = { - ssl_server_destroy, ssl_server_check_peer}; - -/* returns a NULL terminated slice. */ -static grpc_slice compute_default_pem_root_certs_once(void) { - grpc_slice result = grpc_empty_slice(); - - /* First try to load the roots from the environment. */ - char *default_root_certs_path = - gpr_getenv(GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR); - if (default_root_certs_path != NULL) { - GRPC_LOG_IF_ERROR("load_file", - grpc_load_file(default_root_certs_path, 1, &result)); - gpr_free(default_root_certs_path); - } - - /* Try overridden roots if needed. */ - grpc_ssl_roots_override_result ovrd_res = GRPC_SSL_ROOTS_OVERRIDE_FAIL; - if (GRPC_SLICE_IS_EMPTY(result) && ssl_roots_override_cb != NULL) { - char *pem_root_certs = NULL; - ovrd_res = ssl_roots_override_cb(&pem_root_certs); - if (ovrd_res == GRPC_SSL_ROOTS_OVERRIDE_OK) { - GPR_ASSERT(pem_root_certs != NULL); - result = grpc_slice_from_copied_buffer( - pem_root_certs, - strlen(pem_root_certs) + 1); // NULL terminator. - } - gpr_free(pem_root_certs); - } - - /* Fall back to installed certs if needed. */ - if (GRPC_SLICE_IS_EMPTY(result) && - ovrd_res != GRPC_SSL_ROOTS_OVERRIDE_FAIL_PERMANENTLY) { - GRPC_LOG_IF_ERROR("load_file", - grpc_load_file(installed_roots_path, 1, &result)); - } - return result; -} - -static grpc_slice default_pem_root_certs; - -static void init_default_pem_root_certs(void) { - default_pem_root_certs = compute_default_pem_root_certs_once(); -} - -grpc_slice grpc_get_default_ssl_roots_for_testing(void) { - return compute_default_pem_root_certs_once(); -} - -static tsi_client_certificate_request_type -get_tsi_client_certificate_request_type( - grpc_ssl_client_certificate_request_type grpc_request_type) { - switch (grpc_request_type) { - case GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE: - return TSI_DONT_REQUEST_CLIENT_CERTIFICATE; - - case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - return TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY; - - case GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: - return TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY; - - case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY; - - case GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: - return TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY; - - default: - // Is this a sane default - return TSI_DONT_REQUEST_CLIENT_CERTIFICATE; - } -} - -const char *grpc_get_default_ssl_roots(void) { - /* TODO(jboeuf@google.com): Maybe revisit the approach which consists in - loading all the roots once for the lifetime of the process. */ - static gpr_once once = GPR_ONCE_INIT; - gpr_once_init(&once, init_default_pem_root_certs); - return GRPC_SLICE_IS_EMPTY(default_pem_root_certs) - ? NULL - : (const char *)GRPC_SLICE_START_PTR(default_pem_root_certs); -} - -grpc_security_status grpc_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *request_metadata_creds, - const grpc_ssl_config *config, const char *target_name, - const char *overridden_target_name, grpc_channel_security_connector **sc) { - size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions(); - const char **alpn_protocol_strings = - gpr_malloc(sizeof(const char *) * num_alpn_protocols); - tsi_result result = TSI_OK; - grpc_ssl_channel_security_connector *c; - size_t i; - const char *pem_root_certs; - char *port; - - for (i = 0; i < num_alpn_protocols; i++) { - alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i); - } - - if (config == NULL || target_name == NULL) { - gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name."); - goto error; - } - if (config->pem_root_certs == NULL) { - pem_root_certs = grpc_get_default_ssl_roots(); - if (pem_root_certs == NULL) { - gpr_log(GPR_ERROR, "Could not get default pem root certs."); - goto error; - } - } else { - pem_root_certs = config->pem_root_certs; - } - - c = gpr_zalloc(sizeof(grpc_ssl_channel_security_connector)); - - gpr_ref_init(&c->base.base.refcount, 1); - c->base.base.vtable = &ssl_channel_vtable; - c->base.base.url_scheme = GRPC_SSL_URL_SCHEME; - c->base.request_metadata_creds = - grpc_call_credentials_ref(request_metadata_creds); - c->base.check_call_host = ssl_channel_check_call_host; - c->base.cancel_check_call_host = ssl_channel_cancel_check_call_host; - c->base.add_handshakers = ssl_channel_add_handshakers; - gpr_split_host_port(target_name, &c->target_name, &port); - gpr_free(port); - if (overridden_target_name != NULL) { - c->overridden_target_name = gpr_strdup(overridden_target_name); - } - - bool has_key_cert_pair = config->pem_key_cert_pair.private_key != NULL && - config->pem_key_cert_pair.cert_chain != NULL; - result = tsi_create_ssl_client_handshaker_factory( - has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs, - ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, - &c->client_handshaker_factory); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", - tsi_result_to_string(result)); - ssl_channel_destroy(exec_ctx, &c->base.base); - *sc = NULL; - goto error; - } - *sc = &c->base; - gpr_free((void *)alpn_protocol_strings); - return GRPC_SECURITY_OK; - -error: - gpr_free((void *)alpn_protocol_strings); - return GRPC_SECURITY_ERROR; -} - -grpc_security_status grpc_ssl_server_security_connector_create( - grpc_exec_ctx *exec_ctx, const grpc_ssl_server_config *config, - grpc_server_security_connector **sc) { - size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions(); - const char **alpn_protocol_strings = - gpr_malloc(sizeof(const char *) * num_alpn_protocols); - tsi_result result = TSI_OK; - grpc_ssl_server_security_connector *c; - size_t i; - - for (i = 0; i < num_alpn_protocols; i++) { - alpn_protocol_strings[i] = grpc_chttp2_get_alpn_version_index(i); - } - - if (config == NULL || config->num_key_cert_pairs == 0) { - gpr_log(GPR_ERROR, "An SSL server needs a key and a cert."); - goto error; - } - c = gpr_zalloc(sizeof(grpc_ssl_server_security_connector)); - - gpr_ref_init(&c->base.base.refcount, 1); - c->base.base.url_scheme = GRPC_SSL_URL_SCHEME; - c->base.base.vtable = &ssl_server_vtable; - result = tsi_create_ssl_server_handshaker_factory_ex( - config->pem_key_cert_pairs, config->num_key_cert_pairs, - config->pem_root_certs, get_tsi_client_certificate_request_type( - config->client_certificate_request), - ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, - &c->server_handshaker_factory); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", - tsi_result_to_string(result)); - ssl_server_destroy(exec_ctx, &c->base.base); - *sc = NULL; - goto error; - } - c->base.add_handshakers = ssl_server_add_handshakers; - *sc = &c->base; - gpr_free((void *)alpn_protocol_strings); - return GRPC_SECURITY_OK; - -error: - gpr_free((void *)alpn_protocol_strings); - return GRPC_SECURITY_ERROR; -} diff --git a/Sources/CgRPC/src/core/lib/security/transport/security_connector.h b/Sources/CgRPC/src/core/lib/security/transport/security_connector.h deleted file mode 100644 index 4f9b63ad2..000000000 --- a/Sources/CgRPC/src/core/lib/security/transport/security_connector.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H -#define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H - -#include - -#include - -#include "src/core/lib/channel/handshaker.h" -#include "src/core/lib/iomgr/endpoint.h" -#include "src/core/lib/iomgr/tcp_server.h" -#include "src/core/tsi/ssl_transport_security.h" -#include "src/core/tsi/transport_security_interface.h" - -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_security_connector_refcount; -#endif - -/* --- status enum. --- */ - -typedef enum { GRPC_SECURITY_OK = 0, GRPC_SECURITY_ERROR } grpc_security_status; - -/* --- URL schemes. --- */ - -#define GRPC_SSL_URL_SCHEME "https" -#define GRPC_FAKE_SECURITY_URL_SCHEME "http+fake_security" - -/* --- security_connector object. --- - - A security connector object represents away to configure the underlying - transport security mechanism and check the resulting trusted peer. */ - -typedef struct grpc_security_connector grpc_security_connector; - -#define GRPC_ARG_SECURITY_CONNECTOR "grpc.security_connector" - -typedef struct { - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc); - void (*check_peer)(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc, - tsi_peer peer, grpc_auth_context **auth_context, - grpc_closure *on_peer_checked); -} grpc_security_connector_vtable; - -typedef struct grpc_security_connector_handshake_list { - void *handshake; - struct grpc_security_connector_handshake_list *next; -} grpc_security_connector_handshake_list; - -struct grpc_security_connector { - const grpc_security_connector_vtable *vtable; - gpr_refcount refcount; - const char *url_scheme; -}; - -/* Refcounting. */ -#ifndef NDEBUG -#define GRPC_SECURITY_CONNECTOR_REF(p, r) \ - grpc_security_connector_ref((p), __FILE__, __LINE__, (r)) -#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \ - grpc_security_connector_unref((exec_ctx), (p), __FILE__, __LINE__, (r)) -grpc_security_connector *grpc_security_connector_ref( - grpc_security_connector *policy, const char *file, int line, - const char *reason); -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *policy, - const char *file, int line, - const char *reason); -#else -#define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p)) -#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \ - grpc_security_connector_unref((exec_ctx), (p)) -grpc_security_connector *grpc_security_connector_ref( - grpc_security_connector *policy); -void grpc_security_connector_unref(grpc_exec_ctx *exec_ctx, - grpc_security_connector *policy); -#endif - -/* Check the peer. Callee takes ownership of the peer object. - When done, sets *auth_context and invokes on_peer_checked. */ -void grpc_security_connector_check_peer(grpc_exec_ctx *exec_ctx, - grpc_security_connector *sc, - tsi_peer peer, - grpc_auth_context **auth_context, - grpc_closure *on_peer_checked); - -/* Util to encapsulate the connector in a channel arg. */ -grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc); - -/* Util to get the connector from a channel arg. */ -grpc_security_connector *grpc_security_connector_from_arg(const grpc_arg *arg); - -/* Util to find the connector from channel args. */ -grpc_security_connector *grpc_security_connector_find_in_args( - const grpc_channel_args *args); - -/* --- channel_security_connector object. --- - - A channel security connector object represents away to configure the - underlying transport security mechanism on the client side. */ - -typedef struct grpc_channel_security_connector grpc_channel_security_connector; - -struct grpc_channel_security_connector { - grpc_security_connector base; - grpc_call_credentials *request_metadata_creds; - bool (*check_call_host)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, const char *host, - grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, - grpc_error **error); - void (*cancel_check_call_host)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, - grpc_error *error); - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_channel_security_connector *sc, - grpc_handshake_manager *handshake_mgr); -}; - -/// Checks that the host that will be set for a call is acceptable. -/// Returns true if completed synchronously, in which case \a error will -/// be set to indicate the result. Otherwise, \a on_call_host_checked -/// will be invoked when complete. -bool grpc_channel_security_connector_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - const char *host, grpc_auth_context *auth_context, - grpc_closure *on_call_host_checked, grpc_error **error); - -/// Cancels a pending asychronous call to -/// grpc_channel_security_connector_check_call_host() with -/// \a on_call_host_checked as its callback. -void grpc_channel_security_connector_cancel_check_call_host( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *sc, - grpc_closure *on_call_host_checked, grpc_error *error); - -/* Registers handshakers with \a handshake_mgr. */ -void grpc_channel_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_channel_security_connector *connector, - grpc_handshake_manager *handshake_mgr); - -/* --- server_security_connector object. --- - - A server security connector object represents away to configure the - underlying transport security mechanism on the server side. */ - -typedef struct grpc_server_security_connector grpc_server_security_connector; - -struct grpc_server_security_connector { - grpc_security_connector base; - void (*add_handshakers)(grpc_exec_ctx *exec_ctx, - grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr); -}; - -void grpc_server_security_connector_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_server_security_connector *sc, - grpc_handshake_manager *handshake_mgr); - -/* --- Creation security connectors. --- */ - -/* For TESTING ONLY! - Creates a fake connector that emulates real channel security. */ -grpc_channel_security_connector *grpc_fake_channel_security_connector_create( - grpc_call_credentials *request_metadata_creds, const char *target, - const grpc_channel_args *args); - -/* For TESTING ONLY! - Creates a fake connector that emulates real server security. */ -grpc_server_security_connector *grpc_fake_server_security_connector_create( - void); - -/* Config for ssl clients. */ - -typedef struct { - tsi_ssl_pem_key_cert_pair pem_key_cert_pair; - char *pem_root_certs; -} grpc_ssl_config; - -/* Creates an SSL channel_security_connector. - - request_metadata_creds is the credentials object which metadata - will be sent with each request. This parameter can be NULL. - - config is the SSL config to be used for the SSL channel establishment. - - is_client should be 0 for a server or a non-0 value for a client. - - secure_peer_name is the secure peer name that should be checked in - grpc_channel_security_connector_check_peer. This parameter may be NULL in - which case the peer name will not be checked. Note that if this parameter - is not NULL, then, pem_root_certs should not be NULL either. - - sc is a pointer on the connector to be created. - This function returns GRPC_SECURITY_OK in case of success or a - specific error code otherwise. -*/ -grpc_security_status grpc_ssl_channel_security_connector_create( - grpc_exec_ctx *exec_ctx, grpc_call_credentials *request_metadata_creds, - const grpc_ssl_config *config, const char *target_name, - const char *overridden_target_name, grpc_channel_security_connector **sc); - -/* Gets the default ssl roots. Returns NULL if not found. */ -const char *grpc_get_default_ssl_roots(void); - -/* Exposed for TESTING ONLY!. */ -grpc_slice grpc_get_default_ssl_roots_for_testing(void); - -/* Config for ssl servers. */ -typedef struct { - tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs; - size_t num_key_cert_pairs; - char *pem_root_certs; - grpc_ssl_client_certificate_request_type client_certificate_request; -} grpc_ssl_server_config; - -/* Creates an SSL server_security_connector. - - config is the SSL config to be used for the SSL channel establishment. - - sc is a pointer on the connector to be created. - This function returns GRPC_SECURITY_OK in case of success or a - specific error code otherwise. -*/ -grpc_security_status grpc_ssl_server_security_connector_create( - grpc_exec_ctx *exec_ctx, const grpc_ssl_server_config *config, - grpc_server_security_connector **sc); - -/* Util. */ -const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer, - const char *name); - -/* Exposed for testing only. */ -grpc_auth_context *tsi_ssl_peer_to_auth_context(const tsi_peer *peer); -tsi_peer tsi_shallow_peer_from_ssl_auth_context( - const grpc_auth_context *auth_context); -void tsi_shallow_peer_destruct(tsi_peer *peer); - -#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.c b/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.cc similarity index 52% rename from Sources/CgRPC/src/core/lib/security/transport/security_handshaker.c rename to Sources/CgRPC/src/core/lib/security/transport/security_handshaker.cc index 3d1960561..d9ba3483e 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.c +++ b/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/transport/security_handshaker.h" #include @@ -40,75 +42,90 @@ typedef struct { grpc_handshaker base; // State set at creation time. - tsi_handshaker *handshaker; - grpc_security_connector *connector; + tsi_handshaker* handshaker; + grpc_security_connector* connector; gpr_mu mu; gpr_refcount refs; bool shutdown; // Endpoint and read buffer to destroy after a shutdown. - grpc_endpoint *endpoint_to_destroy; - grpc_slice_buffer *read_buffer_to_destroy; + grpc_endpoint* endpoint_to_destroy; + grpc_slice_buffer* read_buffer_to_destroy; // State saved while performing the handshake. - grpc_handshaker_args *args; - grpc_closure *on_handshake_done; + grpc_handshaker_args* args; + grpc_closure* on_handshake_done; - unsigned char *handshake_buffer; + unsigned char* handshake_buffer; size_t handshake_buffer_size; grpc_slice_buffer outgoing; grpc_closure on_handshake_data_sent_to_peer; grpc_closure on_handshake_data_received_from_peer; grpc_closure on_peer_checked; - grpc_auth_context *auth_context; - tsi_handshaker_result *handshaker_result; + grpc_auth_context* auth_context; + tsi_handshaker_result* handshaker_result; } security_handshaker; -static void security_handshaker_unref(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static size_t move_read_buffer_into_handshake_buffer(security_handshaker* h) { + size_t bytes_in_read_buffer = h->args->read_buffer->length; + if (h->handshake_buffer_size < bytes_in_read_buffer) { + h->handshake_buffer = static_cast( + gpr_realloc(h->handshake_buffer, bytes_in_read_buffer)); + h->handshake_buffer_size = bytes_in_read_buffer; + } + size_t offset = 0; + while (h->args->read_buffer->count > 0) { + grpc_slice next_slice = grpc_slice_buffer_take_first(h->args->read_buffer); + memcpy(h->handshake_buffer + offset, GRPC_SLICE_START_PTR(next_slice), + GRPC_SLICE_LENGTH(next_slice)); + offset += GRPC_SLICE_LENGTH(next_slice); + grpc_slice_unref_internal(next_slice); + } + return bytes_in_read_buffer; +} + +static void security_handshaker_unref(security_handshaker* h) { if (gpr_unref(&h->refs)) { gpr_mu_destroy(&h->mu); tsi_handshaker_destroy(h->handshaker); tsi_handshaker_result_destroy(h->handshaker_result); - if (h->endpoint_to_destroy != NULL) { - grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy); + if (h->endpoint_to_destroy != nullptr) { + grpc_endpoint_destroy(h->endpoint_to_destroy); } - if (h->read_buffer_to_destroy != NULL) { - grpc_slice_buffer_destroy_internal(exec_ctx, h->read_buffer_to_destroy); + if (h->read_buffer_to_destroy != nullptr) { + grpc_slice_buffer_destroy_internal(h->read_buffer_to_destroy); gpr_free(h->read_buffer_to_destroy); } gpr_free(h->handshake_buffer); - grpc_slice_buffer_destroy_internal(exec_ctx, &h->outgoing); + grpc_slice_buffer_destroy_internal(&h->outgoing); GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake"); - GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, h->connector, "handshake"); + GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake"); gpr_free(h); } } // Set args fields to NULL, saving the endpoint and read buffer for // later destruction. -static void cleanup_args_for_failure_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static void cleanup_args_for_failure_locked(security_handshaker* h) { h->endpoint_to_destroy = h->args->endpoint; - h->args->endpoint = NULL; + h->args->endpoint = nullptr; h->read_buffer_to_destroy = h->args->read_buffer; - h->args->read_buffer = NULL; - grpc_channel_args_destroy(exec_ctx, h->args->args); - h->args->args = NULL; + h->args->read_buffer = nullptr; + grpc_channel_args_destroy(h->args->args); + h->args->args = nullptr; } // If the handshake failed or we're shutting down, clean up and invoke the // callback with the error. -static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h, - grpc_error *error) { +static void security_handshake_failed_locked(security_handshaker* h, + grpc_error* error) { if (error == GRPC_ERROR_NONE) { // If we were shut down after the handshake succeeded but before an // endpoint callback was invoked, we need to generate our own error. error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown"); } - const char *msg = grpc_error_string(error); + const char* msg = grpc_error_string(error); gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg); if (!h->shutdown) { @@ -116,93 +133,88 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx, // before destroying them, even if we know that there are no // pending read/write callbacks. This should be fixed, at which // point this can be removed. - grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(error)); + grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(error)); // Not shutting down, so the write failed. Clean up before // invoking the callback. - cleanup_args_for_failure_locked(exec_ctx, h); + cleanup_args_for_failure_locked(h); // Set shutdown to true so that subsequent calls to // security_handshaker_shutdown() do nothing. h->shutdown = true; } // Invoke callback. - GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error); + GRPC_CLOSURE_SCHED(h->on_handshake_done, error); } -static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx, - security_handshaker *h, grpc_error *error) { +static void on_peer_checked_inner(security_handshaker* h, grpc_error* error) { if (error != GRPC_ERROR_NONE || h->shutdown) { - security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error)); + security_handshake_failed_locked(h, GRPC_ERROR_REF(error)); return; } // Create zero-copy frame protector, if implemented. - tsi_zero_copy_grpc_protector *zero_copy_protector = NULL; + tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr; tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector( - exec_ctx, h->handshaker_result, NULL, &zero_copy_protector); + h->handshaker_result, nullptr, &zero_copy_protector); if (result != TSI_OK && result != TSI_UNIMPLEMENTED) { error = grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Zero-copy frame protector creation failed"), result); - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); return; } // Create frame protector if zero-copy frame protector is NULL. - tsi_frame_protector *protector = NULL; - if (zero_copy_protector == NULL) { + tsi_frame_protector* protector = nullptr; + if (zero_copy_protector == nullptr) { result = tsi_handshaker_result_create_frame_protector(h->handshaker_result, - NULL, &protector); + nullptr, &protector); if (result != TSI_OK) { error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Frame protector creation failed"), result); - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); return; } } // Get unused bytes. - const unsigned char *unused_bytes = NULL; + const unsigned char* unused_bytes = nullptr; size_t unused_bytes_size = 0; result = tsi_handshaker_result_get_unused_bytes( h->handshaker_result, &unused_bytes, &unused_bytes_size); // Create secure endpoint. if (unused_bytes_size > 0) { grpc_slice slice = - grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size); + grpc_slice_from_copied_buffer((char*)unused_bytes, unused_bytes_size); h->args->endpoint = grpc_secure_endpoint_create( protector, zero_copy_protector, h->args->endpoint, &slice, 1); - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); } else { h->args->endpoint = grpc_secure_endpoint_create( - protector, zero_copy_protector, h->args->endpoint, NULL, 0); + protector, zero_copy_protector, h->args->endpoint, nullptr, 0); } tsi_handshaker_result_destroy(h->handshaker_result); - h->handshaker_result = NULL; - // Clear out the read buffer before it gets passed to the transport. - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer); + h->handshaker_result = nullptr; // Add auth context to channel args. grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context); - grpc_channel_args *tmp_args = h->args->args; + grpc_channel_args* tmp_args = h->args->args; h->args->args = grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1); - grpc_channel_args_destroy(exec_ctx, tmp_args); + grpc_channel_args_destroy(tmp_args); // Invoke callback. - GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(h->on_handshake_done, GRPC_ERROR_NONE); // Set shutdown to true so that subsequent calls to // security_handshaker_shutdown() do nothing. h->shutdown = true; } -static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - security_handshaker *h = (security_handshaker *)arg; +static void on_peer_checked(void* arg, grpc_error* error) { + security_handshaker* h = static_cast(arg); gpr_mu_lock(&h->mu); - on_peer_checked_inner(exec_ctx, h, error); + on_peer_checked_inner(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); } -static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, - security_handshaker *h) { +static grpc_error* check_peer_locked(security_handshaker* h) { tsi_peer peer; tsi_result result = tsi_handshaker_result_extract_peer(h->handshaker_result, &peer); @@ -210,20 +222,24 @@ static grpc_error *check_peer_locked(grpc_exec_ctx *exec_ctx, return grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Peer extraction failed"), result); } - grpc_security_connector_check_peer(exec_ctx, h->connector, peer, - &h->auth_context, &h->on_peer_checked); + grpc_security_connector_check_peer(h->connector, peer, &h->auth_context, + &h->on_peer_checked); return GRPC_ERROR_NONE; } -static grpc_error *on_handshake_next_done_locked( - grpc_exec_ctx *exec_ctx, security_handshaker *h, tsi_result result, - const unsigned char *bytes_to_send, size_t bytes_to_send_size, - tsi_handshaker_result *handshaker_result) { - grpc_error *error = GRPC_ERROR_NONE; +static grpc_error* on_handshake_next_done_locked( + security_handshaker* h, tsi_result result, + const unsigned char* bytes_to_send, size_t bytes_to_send_size, + tsi_handshaker_result* handshaker_result) { + grpc_error* error = GRPC_ERROR_NONE; + // Handshaker was shutdown. + if (h->shutdown) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown"); + } // Read more if we need to. if (result == TSI_INCOMPLETE_DATA) { GPR_ASSERT(bytes_to_send_size == 0); - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); return error; } @@ -232,57 +248,55 @@ static grpc_error *on_handshake_next_done_locked( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result); } // Update handshaker result. - if (handshaker_result != NULL) { - GPR_ASSERT(h->handshaker_result == NULL); + if (handshaker_result != nullptr) { + GPR_ASSERT(h->handshaker_result == nullptr); h->handshaker_result = handshaker_result; } if (bytes_to_send_size > 0) { // Send data to peer, if needed. grpc_slice to_send = grpc_slice_from_copied_buffer( - (const char *)bytes_to_send, bytes_to_send_size); - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing); + reinterpret_cast(bytes_to_send), bytes_to_send_size); + grpc_slice_buffer_reset_and_unref_internal(&h->outgoing); grpc_slice_buffer_add(&h->outgoing, to_send); - grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing, + grpc_endpoint_write(h->args->endpoint, &h->outgoing, &h->on_handshake_data_sent_to_peer); - } else if (handshaker_result == NULL) { + } else if (handshaker_result == nullptr) { // There is nothing to send, but need to read from peer. - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); } else { // Handshake has finished, check peer and so on. - error = check_peer_locked(exec_ctx, h); + error = check_peer_locked(h); } return error; } static void on_handshake_next_done_grpc_wrapper( - tsi_result result, void *user_data, const unsigned char *bytes_to_send, - size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) { - security_handshaker *h = (security_handshaker *)user_data; + tsi_result result, void* user_data, const unsigned char* bytes_to_send, + size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result) { + security_handshaker* h = static_cast(user_data); // This callback will be invoked by TSI in a non-grpc thread, so it's // safe to create our own exec_ctx here. - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; gpr_mu_lock(&h->mu); - grpc_error *error = - on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send, - bytes_to_send_size, handshaker_result); + grpc_error* error = on_handshake_next_done_locked( + h, result, bytes_to_send, bytes_to_send_size, handshaker_result); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(&exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(&exec_ctx, h); + security_handshaker_unref(h); } else { gpr_mu_unlock(&h->mu); } - grpc_exec_ctx_finish(&exec_ctx); } -static grpc_error *do_handshaker_next_locked( - grpc_exec_ctx *exec_ctx, security_handshaker *h, - const unsigned char *bytes_received, size_t bytes_received_size) { +static grpc_error* do_handshaker_next_locked( + security_handshaker* h, const unsigned char* bytes_received, + size_t bytes_received_size) { // Invoke TSI handshaker. - const unsigned char *bytes_to_send = NULL; + const unsigned char* bytes_to_send = nullptr; size_t bytes_to_send_size = 0; - tsi_handshaker_result *handshaker_result = NULL; + tsi_handshaker_result* handshaker_result = nullptr; tsi_result result = tsi_handshaker_next( h->handshaker, bytes_received, bytes_received_size, &bytes_to_send, &bytes_to_send_size, &handshaker_result, @@ -294,75 +308,57 @@ static grpc_error *do_handshaker_next_locked( } // Handshaker returned synchronously. Invoke callback directly in // this thread with our existing exec_ctx. - return on_handshake_next_done_locked(exec_ctx, h, result, bytes_to_send, + return on_handshake_next_done_locked(h, result, bytes_to_send, bytes_to_send_size, handshaker_result); } -static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - security_handshaker *h = (security_handshaker *)arg; +static void on_handshake_data_received_from_peer(void* arg, grpc_error* error) { + security_handshaker* h = static_cast(arg); gpr_mu_lock(&h->mu); if (error != GRPC_ERROR_NONE || h->shutdown) { security_handshake_failed_locked( - exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Handshake read failed", &error, 1)); + h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Handshake read failed", &error, 1)); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } // Copy all slices received. - size_t i; - size_t bytes_received_size = 0; - for (i = 0; i < h->args->read_buffer->count; i++) { - bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]); - } - if (bytes_received_size > h->handshake_buffer_size) { - h->handshake_buffer = - (uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size); - h->handshake_buffer_size = bytes_received_size; - } - size_t offset = 0; - for (i = 0; i < h->args->read_buffer->count; i++) { - size_t slice_size = GPR_SLICE_LENGTH(h->args->read_buffer->slices[i]); - memcpy(h->handshake_buffer + offset, - GRPC_SLICE_START_PTR(h->args->read_buffer->slices[i]), slice_size); - offset += slice_size; - } + size_t bytes_received_size = move_read_buffer_into_handshake_buffer(h); // Call TSI handshaker. - error = do_handshaker_next_locked(exec_ctx, h, h->handshake_buffer, - bytes_received_size); + error = + do_handshaker_next_locked(h, h->handshake_buffer, bytes_received_size); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); } else { gpr_mu_unlock(&h->mu); } } -static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - security_handshaker *h = (security_handshaker *)arg; +static void on_handshake_data_sent_to_peer(void* arg, grpc_error* error) { + security_handshaker* h = static_cast(arg); gpr_mu_lock(&h->mu); if (error != GRPC_ERROR_NONE || h->shutdown) { security_handshake_failed_locked( - exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( - "Handshake write failed", &error, 1)); + h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + "Handshake write failed", &error, 1)); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } // We may be done. - if (h->handshaker_result == NULL) { - grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer, + if (h->handshaker_result == nullptr) { + grpc_endpoint_read(h->args->endpoint, h->args->read_buffer, &h->on_handshake_data_received_from_peer); } else { - error = check_peer_locked(exec_ctx, h); + error = check_peer_locked(h); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } } @@ -373,41 +369,40 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg, // public handshaker API // -static void security_handshaker_destroy(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker) { - security_handshaker *h = (security_handshaker *)handshaker; - security_handshaker_unref(exec_ctx, h); +static void security_handshaker_destroy(grpc_handshaker* handshaker) { + security_handshaker* h = reinterpret_cast(handshaker); + security_handshaker_unref(h); } -static void security_handshaker_shutdown(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, - grpc_error *why) { - security_handshaker *h = (security_handshaker *)handshaker; +static void security_handshaker_shutdown(grpc_handshaker* handshaker, + grpc_error* why) { + security_handshaker* h = reinterpret_cast(handshaker); gpr_mu_lock(&h->mu); if (!h->shutdown) { h->shutdown = true; - grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(why)); - cleanup_args_for_failure_locked(exec_ctx, h); + grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(why)); + cleanup_args_for_failure_locked(h); } gpr_mu_unlock(&h->mu); GRPC_ERROR_UNREF(why); } -static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, - grpc_tcp_server_acceptor *acceptor, - grpc_closure *on_handshake_done, - grpc_handshaker_args *args) { - security_handshaker *h = (security_handshaker *)handshaker; +static void security_handshaker_do_handshake(grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args) { + security_handshaker* h = reinterpret_cast(handshaker); gpr_mu_lock(&h->mu); h->args = args; h->on_handshake_done = on_handshake_done; gpr_ref(&h->refs); - grpc_error *error = do_handshaker_next_locked(exec_ctx, h, NULL, 0); + size_t bytes_received_size = move_read_buffer_into_handshake_buffer(h); + grpc_error* error = + do_handshaker_next_locked(h, h->handshake_buffer, bytes_received_size); if (error != GRPC_ERROR_NONE) { - security_handshake_failed_locked(exec_ctx, h, error); + security_handshake_failed_locked(h, error); gpr_mu_unlock(&h->mu); - security_handshaker_unref(exec_ctx, h); + security_handshaker_unref(h); return; } gpr_mu_unlock(&h->mu); @@ -415,20 +410,20 @@ static void security_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, static const grpc_handshaker_vtable security_handshaker_vtable = { security_handshaker_destroy, security_handshaker_shutdown, - security_handshaker_do_handshake}; + security_handshaker_do_handshake, "security"}; -static grpc_handshaker *security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector) { - security_handshaker *h = - (security_handshaker *)gpr_zalloc(sizeof(security_handshaker)); +static grpc_handshaker* security_handshaker_create( + tsi_handshaker* handshaker, grpc_security_connector* connector) { + security_handshaker* h = static_cast( + gpr_zalloc(sizeof(security_handshaker))); grpc_handshaker_init(&security_handshaker_vtable, &h->base); h->handshaker = handshaker; h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake"); gpr_mu_init(&h->mu); gpr_ref_init(&h->refs, 1); h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE; - h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size); + h->handshake_buffer = + static_cast(gpr_malloc(h->handshake_buffer_size)); GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer, on_handshake_data_sent_to_peer, h, grpc_schedule_on_exec_ctx); @@ -445,33 +440,30 @@ static grpc_handshaker *security_handshaker_create( // fail_handshaker // -static void fail_handshaker_destroy(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker) { +static void fail_handshaker_destroy(grpc_handshaker* handshaker) { gpr_free(handshaker); } -static void fail_handshaker_shutdown(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, - grpc_error *why) { +static void fail_handshaker_shutdown(grpc_handshaker* handshaker, + grpc_error* why) { GRPC_ERROR_UNREF(why); } -static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx, - grpc_handshaker *handshaker, - grpc_tcp_server_acceptor *acceptor, - grpc_closure *on_handshake_done, - grpc_handshaker_args *args) { - GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, +static void fail_handshaker_do_handshake(grpc_handshaker* handshaker, + grpc_tcp_server_acceptor* acceptor, + grpc_closure* on_handshake_done, + grpc_handshaker_args* args) { + GRPC_CLOSURE_SCHED(on_handshake_done, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Failed to create security handshaker")); } static const grpc_handshaker_vtable fail_handshaker_vtable = { fail_handshaker_destroy, fail_handshaker_shutdown, - fail_handshaker_do_handshake}; + fail_handshaker_do_handshake, "security_fail"}; -static grpc_handshaker *fail_handshaker_create() { - grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h)); +static grpc_handshaker* fail_handshaker_create() { + grpc_handshaker* h = static_cast(gpr_malloc(sizeof(*h))); grpc_handshaker_init(&fail_handshaker_vtable, h); return h; } @@ -481,27 +473,27 @@ static grpc_handshaker *fail_handshaker_create() { // static void client_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { - grpc_channel_security_connector *security_connector = - (grpc_channel_security_connector *)grpc_security_connector_find_in_args( - args); - grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector, + grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { + grpc_channel_security_connector* security_connector = + reinterpret_cast( + grpc_security_connector_find_in_args(args)); + grpc_channel_security_connector_add_handshakers(security_connector, handshake_mgr); } static void server_handshaker_factory_add_handshakers( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *hf, - const grpc_channel_args *args, grpc_handshake_manager *handshake_mgr) { - grpc_server_security_connector *security_connector = - (grpc_server_security_connector *)grpc_security_connector_find_in_args( - args); - grpc_server_security_connector_add_handshakers(exec_ctx, security_connector, + grpc_handshaker_factory* hf, const grpc_channel_args* args, + grpc_handshake_manager* handshake_mgr) { + grpc_server_security_connector* security_connector = + reinterpret_cast( + grpc_security_connector_find_in_args(args)); + grpc_server_security_connector_add_handshakers(security_connector, handshake_mgr); } static void handshaker_factory_destroy( - grpc_exec_ctx *exec_ctx, grpc_handshaker_factory *handshaker_factory) {} + grpc_handshaker_factory* handshaker_factory) {} static const grpc_handshaker_factory_vtable client_handshaker_factory_vtable = { client_handshaker_factory_add_handshakers, handshaker_factory_destroy}; @@ -519,15 +511,14 @@ static grpc_handshaker_factory server_handshaker_factory = { // exported functions // -grpc_handshaker *grpc_security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector) { +grpc_handshaker* grpc_security_handshaker_create( + tsi_handshaker* handshaker, grpc_security_connector* connector) { // If no TSI handshaker was created, return a handshaker that always fails. // Otherwise, return a real security handshaker. - if (handshaker == NULL) { + if (handshaker == nullptr) { return fail_handshaker_create(); } else { - return security_handshaker_create(exec_ctx, handshaker, connector); + return security_handshaker_create(handshaker, connector); } } diff --git a/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.h b/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.h index 95bf127fc..88483b02e 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.h +++ b/Sources/CgRPC/src/core/lib/security/transport/security_handshaker.h @@ -19,14 +19,14 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H +#include + #include "src/core/lib/channel/handshaker.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/security/transport/security_connector.h" +#include "src/core/lib/security/security_connector/security_connector.h" /// Creates a security handshaker using \a handshaker. -grpc_handshaker *grpc_security_handshaker_create( - grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker, - grpc_security_connector *connector); +grpc_handshaker* grpc_security_handshaker_create( + tsi_handshaker* handshaker, grpc_security_connector* connector); /// Registers security handshaker factories. void grpc_security_register_handshaker_factories(); diff --git a/Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.c b/Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.cc similarity index 56% rename from Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.c rename to Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.cc index 7f523c088..a560a4a02 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.c +++ b/Sources/CgRPC/src/core/lib/security/transport/server_auth_filter.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -26,45 +28,47 @@ #include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/slice/slice_internal.h" -typedef enum { +namespace { +enum async_state { STATE_INIT = 0, STATE_DONE, STATE_CANCELLED, -} async_state; +}; -typedef struct call_data { - grpc_call_combiner *call_combiner; - grpc_call_stack *owning_call; - grpc_transport_stream_op_batch *recv_initial_metadata_batch; - grpc_closure *original_recv_initial_metadata_ready; +struct call_data { + grpc_call_combiner* call_combiner; + grpc_call_stack* owning_call; + grpc_transport_stream_op_batch* recv_initial_metadata_batch; + grpc_closure* original_recv_initial_metadata_ready; grpc_closure recv_initial_metadata_ready; grpc_metadata_array md; - const grpc_metadata *consumed_md; + const grpc_metadata* consumed_md; size_t num_consumed_md; - grpc_auth_context *auth_context; + grpc_auth_context* auth_context; grpc_closure cancel_closure; gpr_atm state; // async_state -} call_data; +}; -typedef struct channel_data { - grpc_auth_context *auth_context; - grpc_server_credentials *creds; -} channel_data; +struct channel_data { + grpc_auth_context* auth_context; + grpc_server_credentials* creds; +}; +} // namespace static grpc_metadata_array metadata_batch_to_md_array( - const grpc_metadata_batch *batch) { - grpc_linked_mdelem *l; + const grpc_metadata_batch* batch) { + grpc_linked_mdelem* l; grpc_metadata_array result; grpc_metadata_array_init(&result); - for (l = batch->list.head; l != NULL; l = l->next) { - grpc_metadata *usr_md = NULL; + for (l = batch->list.head; l != nullptr; l = l->next) { + grpc_metadata* usr_md = nullptr; grpc_mdelem md = l->md; grpc_slice key = GRPC_MDKEY(md); grpc_slice value = GRPC_MDVALUE(md); if (result.count == result.capacity) { result.capacity = GPR_MAX(result.capacity + 8, result.capacity * 2); - result.metadata = - gpr_realloc(result.metadata, result.capacity * sizeof(grpc_metadata)); + result.metadata = static_cast(gpr_realloc( + result.metadata, result.capacity * sizeof(grpc_metadata))); } usr_md = &result.metadata[result.count++]; usr_md->key = grpc_slice_ref_internal(key); @@ -73,14 +77,13 @@ static grpc_metadata_array metadata_batch_to_md_array( return result; } -static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx, - void *user_data, +static grpc_filtered_mdelem remove_consumed_md(void* user_data, grpc_mdelem md) { - grpc_call_element *elem = user_data; - call_data *calld = elem->call_data; + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); size_t i; for (i = 0; i < calld->num_consumed_md; i++) { - const grpc_metadata *consumed_md = &calld->consumed_md[i]; + const grpc_metadata* consumed_md = &calld->consumed_md[i]; if (grpc_slice_eq(GRPC_MDKEY(md), consumed_md->key) && grpc_slice_eq(GRPC_MDVALUE(md), consumed_md->value)) return GRPC_FILTERED_REMOVE(); @@ -88,17 +91,16 @@ static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx *exec_ctx, return GRPC_FILTERED_MDELEM(md); } -static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_metadata *consumed_md, +static void on_md_processing_done_inner(grpc_call_element* elem, + const grpc_metadata* consumed_md, size_t num_consumed_md, - const grpc_metadata *response_md, + const grpc_metadata* response_md, size_t num_response_md, - grpc_error *error) { - call_data *calld = elem->call_data; - grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; + grpc_error* error) { + call_data* calld = static_cast(elem->call_data); + grpc_transport_stream_op_batch* batch = calld->recv_initial_metadata_batch; /* TODO(jboeuf): Implement support for response_md. */ - if (response_md != NULL && num_response_md > 0) { + if (response_md != nullptr && num_response_md > 0) { gpr_log(GPR_INFO, "response_md in auth metadata processing not supported for now. " "Ignoring..."); @@ -107,73 +109,70 @@ static void on_md_processing_done_inner(grpc_exec_ctx *exec_ctx, calld->consumed_md = consumed_md; calld->num_consumed_md = num_consumed_md; error = grpc_metadata_batch_filter( - exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata, + batch->payload->recv_initial_metadata.recv_initial_metadata, remove_consumed_md, elem, "Response metadata filtering error"); } - GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready, - error); + GRPC_CLOSURE_SCHED(calld->original_recv_initial_metadata_ready, error); } // Called from application code. static void on_md_processing_done( - void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md, - const grpc_metadata *response_md, size_t num_response_md, - grpc_status_code status, const char *error_details) { - grpc_call_element *elem = user_data; - call_data *calld = elem->call_data; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + void* user_data, const grpc_metadata* consumed_md, size_t num_consumed_md, + const grpc_metadata* response_md, size_t num_response_md, + grpc_status_code status, const char* error_details) { + grpc_call_element* elem = static_cast(user_data); + call_data* calld = static_cast(elem->call_data); + grpc_core::ExecCtx exec_ctx; // If the call was not cancelled while we were in flight, process the result. - if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, - (gpr_atm)STATE_DONE)) { - grpc_error *error = GRPC_ERROR_NONE; + if (gpr_atm_full_cas(&calld->state, static_cast(STATE_INIT), + static_cast(STATE_DONE))) { + grpc_error* error = GRPC_ERROR_NONE; if (status != GRPC_STATUS_OK) { - if (error_details == NULL) { + if (error_details == nullptr) { error_details = "Authentication metadata processing failed."; } error = grpc_error_set_int( GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details), GRPC_ERROR_INT_GRPC_STATUS, status); } - on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md, - response_md, num_response_md, error); + on_md_processing_done_inner(elem, consumed_md, num_consumed_md, response_md, + num_response_md, error); } // Clean up. for (size_t i = 0; i < calld->md.count; i++) { - grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key); - grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value); + grpc_slice_unref_internal(calld->md.metadata[i].key); + grpc_slice_unref_internal(calld->md.metadata[i].value); } grpc_metadata_array_destroy(&calld->md); - GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata"); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_CALL_STACK_UNREF(calld->owning_call, "server_auth_metadata"); } -static void cancel_call(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - call_data *calld = elem->call_data; +static void cancel_call(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + call_data* calld = static_cast(elem->call_data); // If the result was not already processed, invoke the callback now. if (error != GRPC_ERROR_NONE && - gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT, - (gpr_atm)STATE_CANCELLED)) { - on_md_processing_done_inner(exec_ctx, elem, NULL, 0, NULL, 0, + gpr_atm_full_cas(&calld->state, static_cast(STATE_INIT), + static_cast(STATE_CANCELLED))) { + on_md_processing_done_inner(elem, nullptr, 0, nullptr, 0, GRPC_ERROR_REF(error)); } - GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call"); + GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_call"); } -static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)arg; - channel_data *chand = elem->channel_data; - call_data *calld = elem->call_data; - grpc_transport_stream_op_batch *batch = calld->recv_initial_metadata_batch; +static void recv_initial_metadata_ready(void* arg, grpc_error* error) { + grpc_call_element* elem = static_cast(arg); + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + grpc_transport_stream_op_batch* batch = calld->recv_initial_metadata_batch; if (error == GRPC_ERROR_NONE) { - if (chand->creds != NULL && chand->creds->processor.process != NULL) { + if (chand->creds != nullptr && chand->creds->processor.process != nullptr) { // We're calling out to the application, so we need to make sure // to drop the call combiner early if we get cancelled. GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call"); GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem, grpc_schedule_on_exec_ctx); - grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, + grpc_call_combiner_set_notify_on_cancel(calld->call_combiner, &calld->cancel_closure); GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata"); calld->md = metadata_batch_to_md_array( @@ -184,14 +183,13 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg, return; } } - GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready, + GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, GRPC_ERROR_REF(error)); } static void auth_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *batch) { - call_data *calld = elem->call_data; + grpc_call_element* elem, grpc_transport_stream_op_batch* batch) { + call_data* calld = static_cast(elem->call_data); if (batch->recv_initial_metadata) { // Inject our callback. calld->recv_initial_metadata_batch = batch; @@ -200,15 +198,14 @@ static void auth_start_transport_stream_op_batch( batch->payload->recv_initial_metadata.recv_initial_metadata_ready = &calld->recv_initial_metadata_ready; } - grpc_call_next_op(exec_ctx, elem, batch); + grpc_call_next_op(elem, batch); } /* Constructor for call_data */ -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = elem->call_data; - channel_data *chand = elem->channel_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); calld->call_combiner = args->call_combiner; calld->owning_call = args->call_stack; GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready, @@ -216,11 +213,11 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, grpc_schedule_on_exec_ctx); // Create server security context. Set its auth context from channel // data and save it in the call context. - grpc_server_security_context *server_ctx = + grpc_server_security_context* server_ctx = grpc_server_security_context_create(); server_ctx->auth_context = grpc_auth_context_create(chand->auth_context); calld->auth_context = server_ctx->auth_context; - if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) { + if (args->context[GRPC_CONTEXT_SECURITY].value != nullptr) { args->context[GRPC_CONTEXT_SECURITY].destroy( args->context[GRPC_CONTEXT_SECURITY].value); } @@ -231,33 +228,31 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, } /* Destructor for call_data */ -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) {} +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) {} /* Constructor for channel_data */ -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(!args->is_last); - channel_data *chand = elem->channel_data; - grpc_auth_context *auth_context = + channel_data* chand = static_cast(elem->channel_data); + grpc_auth_context* auth_context = grpc_find_auth_context_in_args(args->channel_args); - GPR_ASSERT(auth_context != NULL); + GPR_ASSERT(auth_context != nullptr); chand->auth_context = GRPC_AUTH_CONTEXT_REF(auth_context, "server_auth_filter"); - grpc_server_credentials *creds = + grpc_server_credentials* creds = grpc_find_server_credentials_in_args(args->channel_args); chand->creds = grpc_server_credentials_ref(creds); return GRPC_ERROR_NONE; } /* Destructor for channel data */ -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { - channel_data *chand = elem->channel_data; +static void destroy_channel_elem(grpc_channel_element* elem) { + channel_data* chand = static_cast(elem->channel_data); GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter"); - grpc_server_credentials_unref(exec_ctx, chand->creds); + grpc_server_credentials_unref(chand->creds); } const grpc_channel_filter grpc_server_auth_filter = { diff --git a/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.cc b/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.cc new file mode 100644 index 000000000..1eeb557f6 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.cc @@ -0,0 +1,75 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/security/transport/target_authority_table.h" + +// Channel arg key for the mapping of target addresses to their authorities. +#define GRPC_ARG_TARGET_AUTHORITY_TABLE "grpc.target_authority_table" + +namespace grpc_core { +namespace { + +void* target_authority_table_copy(void* p) { + TargetAuthorityTable* table = static_cast(p); + // TODO(roth): When channel_args are converted to C++, pass the + // RefCountedPtr<> directly instead of managing the ref manually. + table->Ref().release(); + return p; +} +void target_authority_table_destroy(void* p) { + TargetAuthorityTable* table = static_cast(p); + table->Unref(); +} +int target_authority_table_cmp(void* a, void* b) { + return TargetAuthorityTable::Cmp( + *static_cast(a), + *static_cast(b)); +} +const grpc_arg_pointer_vtable target_authority_table_arg_vtable = { + target_authority_table_copy, target_authority_table_destroy, + target_authority_table_cmp}; + +} // namespace + +grpc_arg CreateTargetAuthorityTableChannelArg(TargetAuthorityTable* table) { + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_TARGET_AUTHORITY_TABLE, + table, + &target_authority_table_arg_vtable); +} + +TargetAuthorityTable* FindTargetAuthorityTableInArgs( + const grpc_channel_args* args) { + const grpc_arg* arg = + grpc_channel_args_find(args, GRPC_ARG_TARGET_AUTHORITY_TABLE); + if (arg != nullptr) { + if (arg->type == GRPC_ARG_POINTER) { + return static_cast(arg->value.pointer.p); + } else { + gpr_log(GPR_ERROR, "value of " GRPC_ARG_TARGET_AUTHORITY_TABLE + " channel arg was not pointer type; ignoring"); + } + } + return nullptr; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.h b/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.h new file mode 100644 index 000000000..a2e7dc6ac --- /dev/null +++ b/Sources/CgRPC/src/core/lib/security/transport/target_authority_table.h @@ -0,0 +1,40 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H +#define GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H + +#include + +#include "src/core/lib/slice/slice_hash_table.h" + +namespace grpc_core { + +/// A hash table mapping target addresses to authorities. +typedef SliceHashTable> TargetAuthorityTable; + +/// Returns a channel argument containing \a table. +grpc_arg CreateTargetAuthorityTableChannelArg(TargetAuthorityTable* table); + +/// Returns the target authority table from \a args or nullptr. +TargetAuthorityTable* FindTargetAuthorityTableInArgs( + const grpc_channel_args* args); + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TARGET_AUTHORITY_TABLE_H */ diff --git a/Sources/CgRPC/src/core/lib/security/transport/tsi_error.c b/Sources/CgRPC/src/core/lib/security/transport/tsi_error.cc similarity index 89% rename from Sources/CgRPC/src/core/lib/security/transport/tsi_error.c rename to Sources/CgRPC/src/core/lib/security/transport/tsi_error.cc index 72f9600e8..f78bb8df3 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/tsi_error.c +++ b/Sources/CgRPC/src/core/lib/security/transport/tsi_error.cc @@ -16,9 +16,11 @@ * */ +#include + #include "src/core/lib/security/transport/tsi_error.h" -grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result) { +grpc_error* grpc_set_tsi_error_result(grpc_error* error, tsi_result result) { return grpc_error_set_int( grpc_error_set_str( error, GRPC_ERROR_STR_TSI_ERROR, diff --git a/Sources/CgRPC/src/core/lib/security/transport/tsi_error.h b/Sources/CgRPC/src/core/lib/security/transport/tsi_error.h index 87a63a8a7..16e04f70f 100644 --- a/Sources/CgRPC/src/core/lib/security/transport/tsi_error.h +++ b/Sources/CgRPC/src/core/lib/security/transport/tsi_error.h @@ -19,9 +19,11 @@ #ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H #define GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H +#include + #include "src/core/lib/iomgr/error.h" #include "src/core/tsi/transport_security_interface.h" -grpc_error *grpc_set_tsi_error_result(grpc_error *error, tsi_result result); +grpc_error* grpc_set_tsi_error_result(grpc_error* error, tsi_result result); #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H */ diff --git a/Sources/CgRPC/src/core/lib/security/util/json_util.c b/Sources/CgRPC/src/core/lib/security/util/json_util.cc similarity index 60% rename from Sources/CgRPC/src/core/lib/security/util/json_util.c rename to Sources/CgRPC/src/core/lib/security/util/json_util.cc index d847addef..75512a19c 100644 --- a/Sources/CgRPC/src/core/lib/security/util/json_util.c +++ b/Sources/CgRPC/src/core/lib/security/util/json_util.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/security/util/json_util.h" #include @@ -23,24 +25,24 @@ #include #include -const char *grpc_json_get_string_property(const grpc_json *json, - const char *prop_name) { - grpc_json *child; - for (child = json->child; child != NULL; child = child->next) { +const char* grpc_json_get_string_property(const grpc_json* json, + const char* prop_name) { + grpc_json* child; + for (child = json->child; child != nullptr; child = child->next) { if (strcmp(child->key, prop_name) == 0) break; } - if (child == NULL || child->type != GRPC_JSON_STRING) { + if (child == nullptr || child->type != GRPC_JSON_STRING) { gpr_log(GPR_ERROR, "Invalid or missing %s property.", prop_name); - return NULL; + return nullptr; } return child->value; } -bool grpc_copy_json_string_property(const grpc_json *json, - const char *prop_name, - char **copied_value) { - const char *prop_value = grpc_json_get_string_property(json, prop_name); - if (prop_value == NULL) return false; +bool grpc_copy_json_string_property(const grpc_json* json, + const char* prop_name, + char** copied_value) { + const char* prop_value = grpc_json_get_string_property(json, prop_name); + if (prop_value == nullptr) return false; *copied_value = gpr_strdup(prop_value); return true; } diff --git a/Sources/CgRPC/src/core/lib/security/util/json_util.h b/Sources/CgRPC/src/core/lib/security/util/json_util.h index 5ea831e27..89deffcc0 100644 --- a/Sources/CgRPC/src/core/lib/security/util/json_util.h +++ b/Sources/CgRPC/src/core/lib/security/util/json_util.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H #define GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H +#include + #include #include "src/core/lib/json/json.h" @@ -29,12 +31,12 @@ #define GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER "authorized_user" // Gets a child property from a json node. -const char *grpc_json_get_string_property(const grpc_json *json, - const char *prop_name); +const char* grpc_json_get_string_property(const grpc_json* json, + const char* prop_name); // Copies the value of the json child property specified by prop_name. // Returns false if the property was not found. -bool grpc_copy_json_string_property(const grpc_json *json, - const char *prop_name, char **copied_value); +bool grpc_copy_json_string_property(const grpc_json* json, + const char* prop_name, char** copied_value); #endif /* GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H */ diff --git a/Sources/CgRPC/src/core/lib/slice/b64.c b/Sources/CgRPC/src/core/lib/slice/b64.cc similarity index 73% rename from Sources/CgRPC/src/core/lib/slice/b64.c rename to Sources/CgRPC/src/core/lib/slice/b64.cc index 50264719a..27f272400 100644 --- a/Sources/CgRPC/src/core/lib/slice/b64.c +++ b/Sources/CgRPC/src/core/lib/slice/b64.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/slice/b64.h" #include @@ -23,8 +25,8 @@ #include #include -#include +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/slice/slice_internal.h" /* --- Constants. --- */ @@ -54,11 +56,11 @@ static const char base64_url_safe_chars[] = /* --- base64 functions. --- */ -char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe, +char* grpc_base64_encode(const void* vdata, size_t data_size, int url_safe, int multiline) { size_t result_projected_size = grpc_base64_estimate_encoded_size(data_size, url_safe, multiline); - char *result = (char *)gpr_malloc(result_projected_size); + char* result = static_cast(gpr_malloc(result_projected_size)); grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline); return result; } @@ -73,15 +75,15 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe, return result_projected_size; } -void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size, +void grpc_base64_encode_core(char* result, const void* vdata, size_t data_size, int url_safe, int multiline) { - const unsigned char *data = (const unsigned char *)vdata; - const char *base64_chars = + const unsigned char* data = static_cast(vdata); + const char* base64_chars = url_safe ? base64_url_safe_chars : base64_url_unsafe_chars; const size_t result_projected_size = grpc_base64_estimate_encoded_size(data_size, url_safe, multiline); - char *current = result; + char* current = result; size_t num_blocks = 0; size_t i = 0; @@ -122,27 +124,28 @@ void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size, result[current - result] = '\0'; } -grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64, - int url_safe) { - return grpc_base64_decode_with_len(exec_ctx, b64, strlen(b64), url_safe); +grpc_slice grpc_base64_decode(const char* b64, int url_safe) { + return grpc_base64_decode_with_len(b64, strlen(b64), url_safe); } -static void decode_one_char(const unsigned char *codes, unsigned char *result, - size_t *result_offset) { - uint32_t packed = ((uint32_t)codes[0] << 2) | ((uint32_t)codes[1] >> 4); - result[(*result_offset)++] = (unsigned char)packed; +static void decode_one_char(const unsigned char* codes, unsigned char* result, + size_t* result_offset) { + uint32_t packed = (static_cast(codes[0]) << 2) | + (static_cast(codes[1]) >> 4); + result[(*result_offset)++] = static_cast(packed); } -static void decode_two_chars(const unsigned char *codes, unsigned char *result, - size_t *result_offset) { - uint32_t packed = ((uint32_t)codes[0] << 10) | ((uint32_t)codes[1] << 4) | - ((uint32_t)codes[2] >> 2); - result[(*result_offset)++] = (unsigned char)(packed >> 8); - result[(*result_offset)++] = (unsigned char)(packed); +static void decode_two_chars(const unsigned char* codes, unsigned char* result, + size_t* result_offset) { + uint32_t packed = (static_cast(codes[0]) << 10) | + (static_cast(codes[1]) << 4) | + (static_cast(codes[2]) >> 2); + result[(*result_offset)++] = static_cast(packed >> 8); + result[(*result_offset)++] = static_cast(packed); } -static int decode_group(const unsigned char *codes, size_t num_codes, - unsigned char *result, size_t *result_offset) { +static int decode_group(const unsigned char* codes, size_t num_codes, + unsigned char* result, size_t* result_offset) { GPR_ASSERT(num_codes <= 4); /* Short end groups that may not have padding. */ @@ -176,25 +179,26 @@ static int decode_group(const unsigned char *codes, size_t num_codes, decode_two_chars(codes, result, result_offset); } else { /* No padding. */ - uint32_t packed = ((uint32_t)codes[0] << 18) | ((uint32_t)codes[1] << 12) | - ((uint32_t)codes[2] << 6) | codes[3]; - result[(*result_offset)++] = (unsigned char)(packed >> 16); - result[(*result_offset)++] = (unsigned char)(packed >> 8); - result[(*result_offset)++] = (unsigned char)(packed); + uint32_t packed = (static_cast(codes[0]) << 18) | + (static_cast(codes[1]) << 12) | + (static_cast(codes[2]) << 6) | codes[3]; + result[(*result_offset)++] = static_cast(packed >> 16); + result[(*result_offset)++] = static_cast(packed >> 8); + result[(*result_offset)++] = static_cast(packed); } return 1; } -grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, - size_t b64_len, int url_safe) { +grpc_slice grpc_base64_decode_with_len(const char* b64, size_t b64_len, + int url_safe) { grpc_slice result = GRPC_SLICE_MALLOC(b64_len); - unsigned char *current = GRPC_SLICE_START_PTR(result); + unsigned char* current = GRPC_SLICE_START_PTR(result); size_t result_size = 0; unsigned char codes[4]; size_t num_codes = 0; while (b64_len--) { - unsigned char c = (unsigned char)(*b64++); + unsigned char c = static_cast(*b64++); signed char code; if (c >= GPR_ARRAY_SIZE(base64_bytes)) continue; if (url_safe) { @@ -215,7 +219,7 @@ grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, goto fail; } } else { - codes[num_codes++] = (unsigned char)code; + codes[num_codes++] = static_cast(code); if (num_codes == 4) { if (!decode_group(codes, num_codes, current, &result_size)) goto fail; num_codes = 0; @@ -231,6 +235,6 @@ grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, return result; fail: - grpc_slice_unref_internal(exec_ctx, result); + grpc_slice_unref_internal(result); return grpc_empty_slice(); } diff --git a/Sources/CgRPC/src/core/lib/slice/b64.h b/Sources/CgRPC/src/core/lib/slice/b64.h index 3fd15febe..4475568c2 100644 --- a/Sources/CgRPC/src/core/lib/slice/b64.h +++ b/Sources/CgRPC/src/core/lib/slice/b64.h @@ -19,12 +19,14 @@ #ifndef GRPC_CORE_LIB_SLICE_B64_H #define GRPC_CORE_LIB_SLICE_B64_H +#include + #include /* Encodes data using base64. It is the caller's responsability to free the returned char * using gpr_free. Returns NULL on NULL input. TODO(makdharma) : change the flags to bool from int */ -char *grpc_base64_encode(const void *data, size_t data_size, int url_safe, +char* grpc_base64_encode(const void* data, size_t data_size, int url_safe, int multiline); /* estimate the upper bound on size of base64 encoded data. The actual size @@ -35,16 +37,15 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe, /* Encodes data using base64 and write it to memory pointed to by result. It is * the caller's responsiblity to allocate enough memory in |result| to fit the * encoded data. */ -void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size, +void grpc_base64_encode_core(char* result, const void* vdata, size_t data_size, int url_safe, int multiline); /* Decodes data according to the base64 specification. Returns an empty slice in case of failure. */ -grpc_slice grpc_base64_decode(grpc_exec_ctx *exec_ctx, const char *b64, - int url_safe); +grpc_slice grpc_base64_decode(const char* b64, int url_safe); /* Same as above except that the length is provided by the caller. */ -grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx *exec_ctx, const char *b64, - size_t b64_len, int url_safe); +grpc_slice grpc_base64_decode_with_len(const char* b64, size_t b64_len, + int url_safe); #endif /* GRPC_CORE_LIB_SLICE_B64_H */ diff --git a/Sources/CgRPC/src/core/lib/slice/percent_encoding.c b/Sources/CgRPC/src/core/lib/slice/percent_encoding.cc similarity index 78% rename from Sources/CgRPC/src/core/lib/slice/percent_encoding.c rename to Sources/CgRPC/src/core/lib/slice/percent_encoding.cc index effc8d7ad..45cd2cc47 100644 --- a/Sources/CgRPC/src/core/lib/slice/percent_encoding.c +++ b/Sources/CgRPC/src/core/lib/slice/percent_encoding.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/slice/percent_encoding.h" #include @@ -32,19 +34,19 @@ const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; static bool is_unreserved_character(uint8_t c, - const uint8_t *unreserved_bytes) { + const uint8_t* unreserved_bytes) { return ((unreserved_bytes[c / 8] >> (c % 8)) & 1) != 0; } grpc_slice grpc_percent_encode_slice(grpc_slice slice, - const uint8_t *unreserved_bytes) { + const uint8_t* unreserved_bytes) { static const uint8_t hex[] = "0123456789ABCDEF"; // first pass: count the number of bytes needed to output this string size_t output_length = 0; - const uint8_t *slice_start = GRPC_SLICE_START_PTR(slice); - const uint8_t *slice_end = GRPC_SLICE_END_PTR(slice); - const uint8_t *p; + const uint8_t* slice_start = GRPC_SLICE_START_PTR(slice); + const uint8_t* slice_end = GRPC_SLICE_END_PTR(slice); + const uint8_t* p; bool any_reserved_bytes = false; for (p = slice_start; p < slice_end; p++) { bool unres = is_unreserved_character(*p, unreserved_bytes); @@ -57,7 +59,7 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice, } // second pass: actually encode grpc_slice out = GRPC_SLICE_MALLOC(output_length); - uint8_t *q = GRPC_SLICE_START_PTR(out); + uint8_t* q = GRPC_SLICE_START_PTR(out); for (p = slice_start; p < slice_end; p++) { if (is_unreserved_character(*p, unreserved_bytes)) { *q++ = *p; @@ -71,24 +73,24 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice, return out; } -static bool valid_hex(const uint8_t *p, const uint8_t *end) { +static bool valid_hex(const uint8_t* p, const uint8_t* end) { if (p >= end) return false; return (*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F'); } static uint8_t dehex(uint8_t c) { - if (c >= '0' && c <= '9') return (uint8_t)(c - '0'); - if (c >= 'A' && c <= 'F') return (uint8_t)(c - 'A' + 10); - if (c >= 'a' && c <= 'f') return (uint8_t)(c - 'a' + 10); + if (c >= '0' && c <= '9') return static_cast(c - '0'); + if (c >= 'A' && c <= 'F') return static_cast(c - 'A' + 10); + if (c >= 'a' && c <= 'f') return static_cast(c - 'a' + 10); GPR_UNREACHABLE_CODE(return 255); } bool grpc_strict_percent_decode_slice(grpc_slice slice_in, - const uint8_t *unreserved_bytes, - grpc_slice *slice_out) { - const uint8_t *p = GRPC_SLICE_START_PTR(slice_in); - const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in); + const uint8_t* unreserved_bytes, + grpc_slice* slice_out) { + const uint8_t* p = GRPC_SLICE_START_PTR(slice_in); + const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in); size_t out_length = 0; bool any_percent_encoded_stuff = false; while (p != in_end) { @@ -111,10 +113,10 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in, } p = GRPC_SLICE_START_PTR(slice_in); *slice_out = GRPC_SLICE_MALLOC(out_length); - uint8_t *q = GRPC_SLICE_START_PTR(*slice_out); + uint8_t* q = GRPC_SLICE_START_PTR(*slice_out); while (p != in_end) { if (*p == '%') { - *q++ = (uint8_t)(dehex(p[1]) << 4) | (dehex(p[2])); + *q++ = static_cast(dehex(p[1]) << 4) | (dehex(p[2])); p += 3; } else { *q++ = *p++; @@ -125,8 +127,8 @@ bool grpc_strict_percent_decode_slice(grpc_slice slice_in, } grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) { - const uint8_t *p = GRPC_SLICE_START_PTR(slice_in); - const uint8_t *in_end = GRPC_SLICE_END_PTR(slice_in); + const uint8_t* p = GRPC_SLICE_START_PTR(slice_in); + const uint8_t* in_end = GRPC_SLICE_END_PTR(slice_in); size_t out_length = 0; bool any_percent_encoded_stuff = false; while (p != in_end) { @@ -149,13 +151,13 @@ grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in) { } p = GRPC_SLICE_START_PTR(slice_in); grpc_slice out = GRPC_SLICE_MALLOC(out_length); - uint8_t *q = GRPC_SLICE_START_PTR(out); + uint8_t* q = GRPC_SLICE_START_PTR(out); while (p != in_end) { if (*p == '%') { if (!valid_hex(p + 1, in_end) || !valid_hex(p + 2, in_end)) { *q++ = *p++; } else { - *q++ = (uint8_t)(dehex(p[1]) << 4) | (dehex(p[2])); + *q++ = static_cast(dehex(p[1]) << 4) | (dehex(p[2])); p += 3; } } else { diff --git a/Sources/CgRPC/src/core/lib/slice/percent_encoding.h b/Sources/CgRPC/src/core/lib/slice/percent_encoding.h index faae26a68..6b13ffc3f 100644 --- a/Sources/CgRPC/src/core/lib/slice/percent_encoding.h +++ b/Sources/CgRPC/src/core/lib/slice/percent_encoding.h @@ -26,6 +26,8 @@ and another which applies percent encoding only to non-http2 header bytes (the 'compatible' variant) */ +#include + #include #include @@ -45,7 +47,7 @@ extern const uint8_t grpc_compatible_percent_encoding_unreserved_bytes[256 / 8]; unreserved_bytes is a bitfield indicating which bytes are considered unreserved and thus do not need percent encoding */ grpc_slice grpc_percent_encode_slice(grpc_slice slice, - const uint8_t *unreserved_bytes); + const uint8_t* unreserved_bytes); /* Percent-decode a slice, strictly. If the input is legal (contains no unreserved bytes, and legal % encodings), returns true and sets *slice_out to the decoded slice. @@ -53,8 +55,8 @@ grpc_slice grpc_percent_encode_slice(grpc_slice slice, unreserved_bytes is a bitfield indicating which bytes are considered unreserved and thus do not need percent encoding */ bool grpc_strict_percent_decode_slice(grpc_slice slice_in, - const uint8_t *unreserved_bytes, - grpc_slice *slice_out); + const uint8_t* unreserved_bytes, + grpc_slice* slice_out); /* Percent-decode a slice, permissively. If a % triplet can not be decoded, pass it through verbatim. This cannot fail. */ diff --git a/Sources/CgRPC/src/core/lib/slice/slice.c b/Sources/CgRPC/src/core/lib/slice/slice.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/slice/slice.c rename to Sources/CgRPC/src/core/lib/slice/slice.cc index 0764eda05..419474129 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice.c +++ b/Sources/CgRPC/src/core/lib/slice/slice.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/slice/slice_internal.h" #include @@ -26,8 +28,8 @@ #include "src/core/lib/iomgr/exec_ctx.h" -char *grpc_slice_to_c_string(grpc_slice slice) { - char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1); +char* grpc_slice_to_c_string(grpc_slice slice) { + char* out = static_cast(gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1)); memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice)); out[GRPC_SLICE_LENGTH(slice)] = 0; return out; @@ -35,7 +37,7 @@ char *grpc_slice_to_c_string(grpc_slice slice) { grpc_slice grpc_empty_slice(void) { grpc_slice out; - out.refcount = NULL; + out.refcount = nullptr; out.data.inlined.length = 0; return out; } @@ -54,9 +56,9 @@ grpc_slice grpc_slice_ref_internal(grpc_slice slice) { return slice; } -void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice) { +void grpc_slice_unref_internal(grpc_slice slice) { if (slice.refcount) { - slice.refcount->vtable->unref(exec_ctx, slice.refcount); + slice.refcount->vtable->unref(slice.refcount); } } @@ -67,15 +69,18 @@ grpc_slice grpc_slice_ref(grpc_slice slice) { /* Public API */ void grpc_slice_unref(grpc_slice slice) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, slice); - grpc_exec_ctx_finish(&exec_ctx); + if (grpc_core::ExecCtx::Get() == nullptr) { + grpc_core::ExecCtx exec_ctx; + grpc_slice_unref_internal(slice); + } else { + grpc_slice_unref_internal(slice); + } } /* grpc_slice_from_static_string support structure - a refcount that does nothing */ -static void noop_ref(void *unused) {} -static void noop_unref(grpc_exec_ctx *exec_ctx, void *unused) {} +static void noop_ref(void* unused) {} +static void noop_unref(void* unused) {} static const grpc_slice_refcount_vtable noop_refcount_vtable = { noop_ref, noop_unref, grpc_slice_default_eq_impl, @@ -83,15 +88,15 @@ static const grpc_slice_refcount_vtable noop_refcount_vtable = { static grpc_slice_refcount noop_refcount = {&noop_refcount_vtable, &noop_refcount}; -grpc_slice grpc_slice_from_static_buffer(const void *s, size_t len) { +grpc_slice grpc_slice_from_static_buffer(const void* s, size_t len) { grpc_slice slice; slice.refcount = &noop_refcount; - slice.data.refcounted.bytes = (uint8_t *)s; + slice.data.refcounted.bytes = (uint8_t*)s; slice.data.refcounted.length = len; return slice; } -grpc_slice grpc_slice_from_static_string(const char *s) { +grpc_slice grpc_slice_from_static_string(const char* s) { return grpc_slice_from_static_buffer(s, strlen(s)); } @@ -100,17 +105,17 @@ grpc_slice grpc_slice_from_static_string(const char *s) { typedef struct new_slice_refcount { grpc_slice_refcount rc; gpr_refcount refs; - void (*user_destroy)(void *); - void *user_data; + void (*user_destroy)(void*); + void* user_data; } new_slice_refcount; -static void new_slice_ref(void *p) { - new_slice_refcount *r = (new_slice_refcount *)p; +static void new_slice_ref(void* p) { + new_slice_refcount* r = static_cast(p); gpr_ref(&r->refs); } -static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { - new_slice_refcount *r = (new_slice_refcount *)p; +static void new_slice_unref(void* p) { + new_slice_refcount* r = static_cast(p); if (gpr_unref(&r->refs)) { r->user_destroy(r->user_data); gpr_free(r); @@ -121,12 +126,12 @@ static const grpc_slice_refcount_vtable new_slice_vtable = { new_slice_ref, new_slice_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl}; -grpc_slice grpc_slice_new_with_user_data(void *p, size_t len, - void (*destroy)(void *), - void *user_data) { +grpc_slice grpc_slice_new_with_user_data(void* p, size_t len, + void (*destroy)(void*), + void* user_data) { grpc_slice slice; - new_slice_refcount *rc = - (new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount)); + new_slice_refcount* rc = + static_cast(gpr_malloc(sizeof(new_slice_refcount))); gpr_ref_init(&rc->refs, 1); rc->rc.vtable = &new_slice_vtable; rc->rc.sub_refcount = &rc->rc; @@ -134,12 +139,12 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len, rc->user_data = user_data; slice.refcount = &rc->rc; - slice.data.refcounted.bytes = (uint8_t *)p; + slice.data.refcounted.bytes = static_cast(p); slice.data.refcounted.length = len; return slice; } -grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) { +grpc_slice grpc_slice_new(void* p, size_t len, void (*destroy)(void*)) { /* Pass "p" to *destroy when the slice is no longer needed. */ return grpc_slice_new_with_user_data(p, len, destroy, p); } @@ -149,18 +154,18 @@ grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) { typedef struct new_with_len_slice_refcount { grpc_slice_refcount rc; gpr_refcount refs; - void *user_data; + void* user_data; size_t user_length; - void (*user_destroy)(void *, size_t); + void (*user_destroy)(void*, size_t); } new_with_len_slice_refcount; -static void new_with_len_ref(void *p) { - new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p; +static void new_with_len_ref(void* p) { + new_with_len_slice_refcount* r = static_cast(p); gpr_ref(&r->refs); } -static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) { - new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p; +static void new_with_len_unref(void* p) { + new_with_len_slice_refcount* r = static_cast(p); if (gpr_unref(&r->refs)) { r->user_destroy(r->user_data, r->user_length); gpr_free(r); @@ -171,11 +176,11 @@ static const grpc_slice_refcount_vtable new_with_len_vtable = { new_with_len_ref, new_with_len_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl}; -grpc_slice grpc_slice_new_with_len(void *p, size_t len, - void (*destroy)(void *, size_t)) { +grpc_slice grpc_slice_new_with_len(void* p, size_t len, + void (*destroy)(void*, size_t)) { grpc_slice slice; - new_with_len_slice_refcount *rc = (new_with_len_slice_refcount *)gpr_malloc( - sizeof(new_with_len_slice_refcount)); + new_with_len_slice_refcount* rc = static_cast( + gpr_malloc(sizeof(new_with_len_slice_refcount))); gpr_ref_init(&rc->refs, 1); rc->rc.vtable = &new_with_len_vtable; rc->rc.sub_refcount = &rc->rc; @@ -184,19 +189,19 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len, rc->user_length = len; slice.refcount = &rc->rc; - slice.data.refcounted.bytes = (uint8_t *)p; + slice.data.refcounted.bytes = static_cast(p); slice.data.refcounted.length = len; return slice; } -grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t length) { +grpc_slice grpc_slice_from_copied_buffer(const char* source, size_t length) { if (length == 0) return grpc_empty_slice(); grpc_slice slice = GRPC_SLICE_MALLOC(length); memcpy(GRPC_SLICE_START_PTR(slice), source, length); return slice; } -grpc_slice grpc_slice_from_copied_string(const char *source) { +grpc_slice grpc_slice_from_copied_string(const char* source) { return grpc_slice_from_copied_buffer(source, strlen(source)); } @@ -205,13 +210,13 @@ typedef struct { gpr_refcount refs; } malloc_refcount; -static void malloc_ref(void *p) { - malloc_refcount *r = (malloc_refcount *)p; +static void malloc_ref(void* p) { + malloc_refcount* r = static_cast(p); gpr_ref(&r->refs); } -static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) { - malloc_refcount *r = (malloc_refcount *)p; +static void malloc_unref(void* p) { + malloc_refcount* r = static_cast(p); if (gpr_unref(&r->refs)) { gpr_free(r); } @@ -233,8 +238,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) { refcount is a malloc_refcount bytes is an array of bytes of the requested length Both parts are placed in the same allocation returned from gpr_malloc */ - malloc_refcount *rc = - (malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length); + malloc_refcount* rc = static_cast( + gpr_malloc(sizeof(malloc_refcount) + length)); /* Initial refcount on rc is 1 - and it's up to the caller to release this reference. */ @@ -247,7 +252,7 @@ grpc_slice grpc_slice_malloc_large(size_t length) { /* The slices refcount points back to the allocated block. */ slice.refcount = &rc->base; /* The data bytes are placed immediately after the refcount struct */ - slice.data.refcounted.bytes = (uint8_t *)(rc + 1); + slice.data.refcounted.bytes = reinterpret_cast(rc + 1); /* And the length of the block is set to the requested length */ slice.data.refcounted.length = length; return slice; @@ -260,8 +265,8 @@ grpc_slice grpc_slice_malloc(size_t length) { return grpc_slice_malloc_large(length); } else { /* small slice: just inline the data */ - slice.refcount = NULL; - slice.data.inlined.length = (uint8_t)length; + slice.refcount = nullptr; + slice.data.inlined.length = static_cast(length); } return slice; } @@ -283,8 +288,8 @@ grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) { } else { /* Enforce preconditions */ GPR_ASSERT(source.data.inlined.length >= end); - subset.refcount = NULL; - subset.data.inlined.length = (uint8_t)(end - begin); + subset.refcount = nullptr; + subset.data.inlined.length = static_cast(end - begin); memcpy(subset.data.inlined.bytes, source.data.inlined.bytes + begin, end - begin); } @@ -295,8 +300,8 @@ grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) { grpc_slice subset; if (end - begin <= sizeof(subset.data.inlined.bytes)) { - subset.refcount = NULL; - subset.data.inlined.length = (uint8_t)(end - begin); + subset.refcount = nullptr; + subset.data.inlined.length = static_cast(end - begin); memcpy(subset.data.inlined.bytes, GRPC_SLICE_START_PTR(source) + begin, end - begin); } else { @@ -307,26 +312,27 @@ grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) { return subset; } -grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *source, size_t split, +grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice* source, size_t split, grpc_slice_ref_whom ref_whom) { grpc_slice tail; - if (source->refcount == NULL) { + if (source->refcount == nullptr) { /* inlined data, copy it out */ GPR_ASSERT(source->data.inlined.length >= split); - tail.refcount = NULL; - tail.data.inlined.length = (uint8_t)(source->data.inlined.length - split); + tail.refcount = nullptr; + tail.data.inlined.length = + static_cast(source->data.inlined.length - split); memcpy(tail.data.inlined.bytes, source->data.inlined.bytes + split, tail.data.inlined.length); - source->data.inlined.length = (uint8_t)split; + source->data.inlined.length = static_cast(split); } else { size_t tail_length = source->data.refcounted.length - split; GPR_ASSERT(source->data.refcounted.length >= split); if (tail_length < sizeof(tail.data.inlined.bytes) && ref_whom != GRPC_SLICE_REF_TAIL) { /* Copy out the bytes - it'll be cheaper than refcounting */ - tail.refcount = NULL; - tail.data.inlined.length = (uint8_t)tail_length; + tail.refcount = nullptr; + tail.data.inlined.length = static_cast(tail_length); memcpy(tail.data.inlined.bytes, source->data.refcounted.bytes + split, tail_length); source->refcount = source->refcount->sub_refcount; @@ -358,28 +364,28 @@ grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice *source, size_t split, return tail; } -grpc_slice grpc_slice_split_tail(grpc_slice *source, size_t split) { +grpc_slice grpc_slice_split_tail(grpc_slice* source, size_t split) { return grpc_slice_split_tail_maybe_ref(source, split, GRPC_SLICE_REF_BOTH); } -grpc_slice grpc_slice_split_head(grpc_slice *source, size_t split) { +grpc_slice grpc_slice_split_head(grpc_slice* source, size_t split) { grpc_slice head; - if (source->refcount == NULL) { + if (source->refcount == nullptr) { GPR_ASSERT(source->data.inlined.length >= split); - head.refcount = NULL; - head.data.inlined.length = (uint8_t)split; + head.refcount = nullptr; + head.data.inlined.length = static_cast(split); memcpy(head.data.inlined.bytes, source->data.inlined.bytes, split); source->data.inlined.length = - (uint8_t)(source->data.inlined.length - split); + static_cast(source->data.inlined.length - split); memmove(source->data.inlined.bytes, source->data.inlined.bytes + split, source->data.inlined.length); } else if (split < sizeof(head.data.inlined.bytes)) { GPR_ASSERT(source->data.refcounted.length >= split); - head.refcount = NULL; - head.data.inlined.length = (uint8_t)split; + head.refcount = nullptr; + head.data.inlined.length = static_cast(split); memcpy(head.data.inlined.bytes, source->data.refcounted.bytes, split); source->refcount = source->refcount->sub_refcount; source->data.refcounted.bytes += split; @@ -417,62 +423,63 @@ int grpc_slice_eq(grpc_slice a, grpc_slice b) { } int grpc_slice_cmp(grpc_slice a, grpc_slice b) { - int d = (int)(GRPC_SLICE_LENGTH(a) - GRPC_SLICE_LENGTH(b)); + int d = static_cast(GRPC_SLICE_LENGTH(a) - GRPC_SLICE_LENGTH(b)); if (d != 0) return d; return memcmp(GRPC_SLICE_START_PTR(a), GRPC_SLICE_START_PTR(b), GRPC_SLICE_LENGTH(a)); } -int grpc_slice_str_cmp(grpc_slice a, const char *b) { +int grpc_slice_str_cmp(grpc_slice a, const char* b) { size_t b_length = strlen(b); - int d = (int)(GRPC_SLICE_LENGTH(a) - b_length); + int d = static_cast(GRPC_SLICE_LENGTH(a) - b_length); if (d != 0) return d; return memcmp(GRPC_SLICE_START_PTR(a), b, b_length); } int grpc_slice_is_equivalent(grpc_slice a, grpc_slice b) { - if (a.refcount == NULL || b.refcount == NULL) { + if (a.refcount == nullptr || b.refcount == nullptr) { return grpc_slice_eq(a, b); } return a.data.refcounted.length == b.data.refcounted.length && a.data.refcounted.bytes == b.data.refcounted.bytes; } -int grpc_slice_buf_start_eq(grpc_slice a, const void *b, size_t len) { +int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t len) { if (GRPC_SLICE_LENGTH(a) < len) return 0; return 0 == memcmp(GRPC_SLICE_START_PTR(a), b, len); } int grpc_slice_rchr(grpc_slice s, char c) { - const char *b = (const char *)GRPC_SLICE_START_PTR(s); + const char* b = reinterpret_cast GRPC_SLICE_START_PTR(s); int i; - for (i = (int)GRPC_SLICE_LENGTH(s) - 1; i != -1 && b[i] != c; i--) + for (i = static_cast GRPC_SLICE_LENGTH(s) - 1; i != -1 && b[i] != c; i--) ; return i; } int grpc_slice_chr(grpc_slice s, char c) { - const char *b = (const char *)GRPC_SLICE_START_PTR(s); - const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s)); - return p == NULL ? -1 : (int)(p - b); + const char* b = reinterpret_cast GRPC_SLICE_START_PTR(s); + const char* p = static_cast(memchr(b, c, GRPC_SLICE_LENGTH(s))); + return p == nullptr ? -1 : static_cast(p - b); } int grpc_slice_slice(grpc_slice haystack, grpc_slice needle) { size_t haystack_len = GRPC_SLICE_LENGTH(haystack); - const uint8_t *haystack_bytes = GRPC_SLICE_START_PTR(haystack); + const uint8_t* haystack_bytes = GRPC_SLICE_START_PTR(haystack); size_t needle_len = GRPC_SLICE_LENGTH(needle); - const uint8_t *needle_bytes = GRPC_SLICE_START_PTR(needle); + const uint8_t* needle_bytes = GRPC_SLICE_START_PTR(needle); if (haystack_len == 0 || needle_len == 0) return -1; if (haystack_len < needle_len) return -1; if (haystack_len == needle_len) return grpc_slice_eq(haystack, needle) ? 0 : -1; - if (needle_len == 1) return grpc_slice_chr(haystack, (char)*needle_bytes); + if (needle_len == 1) + return grpc_slice_chr(haystack, static_cast(*needle_bytes)); - const uint8_t *last = haystack_bytes + haystack_len - needle_len; - for (const uint8_t *cur = haystack_bytes; cur != last; ++cur) { + const uint8_t* last = haystack_bytes + haystack_len - needle_len; + for (const uint8_t* cur = haystack_bytes; cur != last; ++cur) { if (0 == memcmp(cur, needle_bytes, needle_len)) { - return (int)(cur - haystack_bytes); + return static_cast(cur - haystack_bytes); } } return -1; diff --git a/Sources/CgRPC/src/core/lib/slice/slice_buffer.c b/Sources/CgRPC/src/core/lib/slice/slice_buffer.cc similarity index 72% rename from Sources/CgRPC/src/core/lib/slice/slice_buffer.c rename to Sources/CgRPC/src/core/lib/slice/slice_buffer.cc index 63ffc0b00..fd5699738 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice_buffer.c +++ b/Sources/CgRPC/src/core/lib/slice/slice_buffer.cc @@ -16,23 +16,25 @@ * */ -#include #include +#include + #include #include #include -#include +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" /* grow a buffer; requires GRPC_SLICE_BUFFER_INLINE_ELEMENTS > 1 */ #define GROW(x) (3 * (x) / 2) -static void maybe_embiggen(grpc_slice_buffer *sb) { +static void maybe_embiggen(grpc_slice_buffer* sb) { /* How far away from sb->base_slices is sb->slices pointer */ - size_t slice_offset = (size_t)(sb->slices - sb->base_slices); + size_t slice_offset = static_cast(sb->slices - sb->base_slices); size_t slice_count = sb->count + slice_offset; if (slice_count == sb->capacity) { @@ -45,12 +47,12 @@ static void maybe_embiggen(grpc_slice_buffer *sb) { sb->capacity = GROW(sb->capacity); GPR_ASSERT(sb->capacity > slice_count); if (sb->base_slices == sb->inlined) { - sb->base_slices = - (grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice)); + sb->base_slices = static_cast( + gpr_malloc(sb->capacity * sizeof(grpc_slice))); memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice)); } else { - sb->base_slices = (grpc_slice *)gpr_realloc( - sb->base_slices, sb->capacity * sizeof(grpc_slice)); + sb->base_slices = static_cast( + gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice))); } sb->slices = sb->base_slices + slice_offset; @@ -58,30 +60,32 @@ static void maybe_embiggen(grpc_slice_buffer *sb) { } } -void grpc_slice_buffer_init(grpc_slice_buffer *sb) { +void grpc_slice_buffer_init(grpc_slice_buffer* sb) { sb->count = 0; sb->length = 0; sb->capacity = GRPC_SLICE_BUFFER_INLINE_ELEMENTS; sb->base_slices = sb->slices = sb->inlined; } -void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb) { - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, sb); +void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb) { + grpc_slice_buffer_reset_and_unref_internal(sb); if (sb->base_slices != sb->inlined) { gpr_free(sb->base_slices); } } -void grpc_slice_buffer_destroy(grpc_slice_buffer *sb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_buffer_destroy_internal(&exec_ctx, sb); - grpc_exec_ctx_finish(&exec_ctx); +void grpc_slice_buffer_destroy(grpc_slice_buffer* sb) { + if (grpc_core::ExecCtx::Get() == nullptr) { + grpc_core::ExecCtx exec_ctx; + grpc_slice_buffer_destroy_internal(sb); + } else { + grpc_slice_buffer_destroy_internal(sb); + } } -uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t n) { - grpc_slice *back; - uint8_t *out; +uint8_t* grpc_slice_buffer_tiny_add(grpc_slice_buffer* sb, size_t n) { + grpc_slice* back; + uint8_t* out; sb->length += n; @@ -91,19 +95,20 @@ uint8_t *grpc_slice_buffer_tiny_add(grpc_slice_buffer *sb, size_t n) { if ((back->data.inlined.length + n) > sizeof(back->data.inlined.bytes)) goto add_new; out = back->data.inlined.bytes + back->data.inlined.length; - back->data.inlined.length = (uint8_t)(back->data.inlined.length + n); + back->data.inlined.length = + static_cast(back->data.inlined.length + n); return out; add_new: maybe_embiggen(sb); back = &sb->slices[sb->count]; sb->count++; - back->refcount = NULL; - back->data.inlined.length = (uint8_t)n; + back->refcount = nullptr; + back->data.inlined.length = static_cast(n); return back->data.inlined.bytes; } -size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, grpc_slice s) { +size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer* sb, grpc_slice s) { size_t out = sb->count; maybe_embiggen(sb); sb->slices[out] = s; @@ -112,7 +117,7 @@ size_t grpc_slice_buffer_add_indexed(grpc_slice_buffer *sb, grpc_slice s) { return out; } -void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) { +void grpc_slice_buffer_add(grpc_slice_buffer* sb, grpc_slice s) { size_t n = sb->count; /* if both the last slice in the slice buffer and the slice being added are inlined (that is, that they carry their data inside the slice data @@ -120,15 +125,15 @@ void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) { into the back slice, preventing many small slices being passed into writes */ if (!s.refcount && n) { - grpc_slice *back = &sb->slices[n - 1]; + grpc_slice* back = &sb->slices[n - 1]; if (!back->refcount && back->data.inlined.length < GRPC_SLICE_INLINED_SIZE) { if (s.data.inlined.length + back->data.inlined.length <= GRPC_SLICE_INLINED_SIZE) { memcpy(back->data.inlined.bytes + back->data.inlined.length, s.data.inlined.bytes, s.data.inlined.length); - back->data.inlined.length = - (uint8_t)(back->data.inlined.length + s.data.inlined.length); + back->data.inlined.length = static_cast( + back->data.inlined.length + s.data.inlined.length); } else { size_t cp1 = GRPC_SLICE_INLINED_SIZE - back->data.inlined.length; memcpy(back->data.inlined.bytes + back->data.inlined.length, @@ -137,8 +142,9 @@ void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) { maybe_embiggen(sb); back = &sb->slices[n]; sb->count = n + 1; - back->refcount = NULL; - back->data.inlined.length = (uint8_t)(s.data.inlined.length - cp1); + back->refcount = nullptr; + back->data.inlined.length = + static_cast(s.data.inlined.length - cp1); memcpy(back->data.inlined.bytes, s.data.inlined.bytes + cp1, s.data.inlined.length - cp1); } @@ -149,40 +155,42 @@ void grpc_slice_buffer_add(grpc_slice_buffer *sb, grpc_slice s) { grpc_slice_buffer_add_indexed(sb, s); } -void grpc_slice_buffer_addn(grpc_slice_buffer *sb, grpc_slice *s, size_t n) { +void grpc_slice_buffer_addn(grpc_slice_buffer* sb, grpc_slice* s, size_t n) { size_t i; for (i = 0; i < n; i++) { grpc_slice_buffer_add(sb, s[i]); } } -void grpc_slice_buffer_pop(grpc_slice_buffer *sb) { +void grpc_slice_buffer_pop(grpc_slice_buffer* sb) { if (sb->count != 0) { size_t count = --sb->count; sb->length -= GRPC_SLICE_LENGTH(sb->slices[count]); } } -void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb) { +void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb) { size_t i; for (i = 0; i < sb->count; i++) { - grpc_slice_unref_internal(exec_ctx, sb->slices[i]); + grpc_slice_unref_internal(sb->slices[i]); } sb->count = 0; sb->length = 0; } -void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer *sb) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, sb); - grpc_exec_ctx_finish(&exec_ctx); +void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer* sb) { + if (grpc_core::ExecCtx::Get() == nullptr) { + grpc_core::ExecCtx exec_ctx; + grpc_slice_buffer_reset_and_unref_internal(sb); + } else { + grpc_slice_buffer_reset_and_unref_internal(sb); + } } -void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) { - size_t a_offset = (size_t)(a->slices - a->base_slices); - size_t b_offset = (size_t)(b->slices - b->base_slices); +void grpc_slice_buffer_swap(grpc_slice_buffer* a, grpc_slice_buffer* b) { + size_t a_offset = static_cast(a->slices - a->base_slices); + size_t b_offset = static_cast(b->slices - b->base_slices); size_t a_count = a->count + a_offset; size_t b_count = b->count + b_offset; @@ -207,7 +215,7 @@ void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) { memcpy(a->base_slices, b->inlined, b_count * sizeof(grpc_slice)); } else { /* no inlining: easy swap */ - GPR_SWAP(grpc_slice *, a->base_slices, b->base_slices); + GPR_SWAP(grpc_slice*, a->base_slices, b->base_slices); } /* Update the slices pointers (cannot do a GPR_SWAP on slices fields here). @@ -222,8 +230,8 @@ void grpc_slice_buffer_swap(grpc_slice_buffer *a, grpc_slice_buffer *b) { GPR_SWAP(size_t, a->length, b->length); } -void grpc_slice_buffer_move_into(grpc_slice_buffer *src, - grpc_slice_buffer *dst) { +void grpc_slice_buffer_move_into(grpc_slice_buffer* src, + grpc_slice_buffer* dst) { /* anything to move? */ if (src->count == 0) { return; @@ -239,8 +247,8 @@ void grpc_slice_buffer_move_into(grpc_slice_buffer *src, src->length = 0; } -static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer *src, size_t n, - grpc_slice_buffer *dst, +static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer* src, size_t n, + grpc_slice_buffer* dst, bool incref) { GPR_ASSERT(src->length >= n); if (src->length == n) { @@ -279,20 +287,19 @@ static void slice_buffer_move_first_maybe_ref(grpc_slice_buffer *src, size_t n, GPR_ASSERT(src->count > 0); } -void grpc_slice_buffer_move_first(grpc_slice_buffer *src, size_t n, - grpc_slice_buffer *dst) { +void grpc_slice_buffer_move_first(grpc_slice_buffer* src, size_t n, + grpc_slice_buffer* dst) { slice_buffer_move_first_maybe_ref(src, n, dst, true); } -void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n, - grpc_slice_buffer *dst) { +void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer* src, size_t n, + grpc_slice_buffer* dst) { slice_buffer_move_first_maybe_ref(src, n, dst, false); } -void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *src, size_t n, - void *dst) { - char *dstp = (char *)dst; +void grpc_slice_buffer_move_first_into_buffer(grpc_slice_buffer* src, size_t n, + void* dst) { + char* dstp = static_cast(dst); GPR_ASSERT(src->length >= n); while (n > 0) { @@ -305,19 +312,19 @@ void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx, n = 0; } else if (slice_len == n) { memcpy(dstp, GRPC_SLICE_START_PTR(slice), n); - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); n = 0; } else { memcpy(dstp, GRPC_SLICE_START_PTR(slice), slice_len); dstp += slice_len; n -= slice_len; - grpc_slice_unref_internal(exec_ctx, slice); + grpc_slice_unref_internal(slice); } } } -void grpc_slice_buffer_trim_end(grpc_slice_buffer *sb, size_t n, - grpc_slice_buffer *garbage) { +void grpc_slice_buffer_trim_end(grpc_slice_buffer* sb, size_t n, + grpc_slice_buffer* garbage) { GPR_ASSERT(n <= sb->length); sb->length -= n; for (;;) { @@ -340,7 +347,7 @@ void grpc_slice_buffer_trim_end(grpc_slice_buffer *sb, size_t n, } } -grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *sb) { +grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer* sb) { grpc_slice slice; GPR_ASSERT(sb->count > 0); slice = sb->slices[0]; @@ -351,7 +358,7 @@ grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer *sb) { return slice; } -void grpc_slice_buffer_undo_take_first(grpc_slice_buffer *sb, +void grpc_slice_buffer_undo_take_first(grpc_slice_buffer* sb, grpc_slice slice) { sb->slices--; sb->slices[0] = slice; diff --git a/Sources/CgRPC/src/core/lib/slice/slice_hash_table.c b/Sources/CgRPC/src/core/lib/slice/slice_hash_table.c deleted file mode 100644 index 6c2c9c201..000000000 --- a/Sources/CgRPC/src/core/lib/slice/slice_hash_table.c +++ /dev/null @@ -1,146 +0,0 @@ -// -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "src/core/lib/slice/slice_hash_table.h" - -#include -#include - -#include -#include - -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/transport/metadata.h" - -struct grpc_slice_hash_table { - gpr_refcount refs; - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value); - int (*value_cmp)(void* a, void* b); - size_t size; - size_t max_num_probes; - grpc_slice_hash_table_entry* entries; -}; - -static bool is_empty(grpc_slice_hash_table_entry* entry) { - return entry->value == NULL; -} - -static void grpc_slice_hash_table_add(grpc_slice_hash_table* table, - grpc_slice key, void* value) { - GPR_ASSERT(value != NULL); - const size_t hash = grpc_slice_hash(key); - for (size_t offset = 0; offset < table->size; ++offset) { - const size_t idx = (hash + offset) % table->size; - if (is_empty(&table->entries[idx])) { - table->entries[idx].key = key; - table->entries[idx].value = value; - // Keep track of the maximum number of probes needed, since this - // provides an upper bound for lookups. - if (offset > table->max_num_probes) table->max_num_probes = offset; - return; - } - } - GPR_ASSERT(false); // Table should never be full. -} - -grpc_slice_hash_table* grpc_slice_hash_table_create( - size_t num_entries, grpc_slice_hash_table_entry* entries, - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value), - int (*value_cmp)(void* a, void* b)) { - grpc_slice_hash_table* table = - (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table)); - gpr_ref_init(&table->refs, 1); - table->destroy_value = destroy_value; - table->value_cmp = value_cmp; - // Keep load factor low to improve performance of lookups. - table->size = num_entries * 2; - const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size; - table->entries = (grpc_slice_hash_table_entry*)gpr_zalloc(entry_size); - for (size_t i = 0; i < num_entries; ++i) { - grpc_slice_hash_table_entry* entry = &entries[i]; - grpc_slice_hash_table_add(table, entry->key, entry->value); - } - return table; -} - -grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table) { - if (table != NULL) gpr_ref(&table->refs); - return table; -} - -void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx, - grpc_slice_hash_table* table) { - if (table != NULL && gpr_unref(&table->refs)) { - for (size_t i = 0; i < table->size; ++i) { - grpc_slice_hash_table_entry* entry = &table->entries[i]; - if (!is_empty(entry)) { - grpc_slice_unref_internal(exec_ctx, entry->key); - table->destroy_value(exec_ctx, entry->value); - } - } - gpr_free(table->entries); - gpr_free(table); - } -} - -void* grpc_slice_hash_table_get(const grpc_slice_hash_table* table, - const grpc_slice key) { - const size_t hash = grpc_slice_hash(key); - // We cap the number of probes at the max number recorded when - // populating the table. - for (size_t offset = 0; offset <= table->max_num_probes; ++offset) { - const size_t idx = (hash + offset) % table->size; - if (is_empty(&table->entries[idx])) break; - if (grpc_slice_eq(table->entries[idx].key, key)) { - return table->entries[idx].value; - } - } - return NULL; // Not found. -} - -static int pointer_cmp(void* a, void* b) { return GPR_ICMP(a, b); } -int grpc_slice_hash_table_cmp(const grpc_slice_hash_table* a, - const grpc_slice_hash_table* b) { - int (*const value_cmp_fn_a)(void* a, void* b) = - a->value_cmp != NULL ? a->value_cmp : pointer_cmp; - int (*const value_cmp_fn_b)(void* a, void* b) = - b->value_cmp != NULL ? b->value_cmp : pointer_cmp; - // Compare value_fns - const int value_fns_cmp = - GPR_ICMP((void*)value_cmp_fn_a, (void*)value_cmp_fn_b); - if (value_fns_cmp != 0) return value_fns_cmp; - // Compare sizes - if (a->size < b->size) return -1; - if (a->size > b->size) return 1; - // Compare rows. - for (size_t i = 0; i < a->size; ++i) { - if (is_empty(&a->entries[i])) { - if (!is_empty(&b->entries[i])) { - return -1; // a empty but b non-empty - } - continue; // both empty, no need to check key or value - } else if (is_empty(&b->entries[i])) { - return 1; // a non-empty but b empty - } - // neither entry is empty - const int key_cmp = grpc_slice_cmp(a->entries[i].key, b->entries[i].key); - if (key_cmp != 0) return key_cmp; - const int value_cmp = - value_cmp_fn_a(a->entries[i].value, b->entries[i].value); - if (value_cmp != 0) return value_cmp; - } - return 0; -} diff --git a/Sources/CgRPC/src/core/lib/slice/slice_hash_table.h b/Sources/CgRPC/src/core/lib/slice/slice_hash_table.h index 339078fef..4bbcf88e8 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice_hash_table.h +++ b/Sources/CgRPC/src/core/lib/slice/slice_hash_table.h @@ -17,54 +17,189 @@ #ifndef GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H #define GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H -#include "src/core/lib/transport/metadata.h" +#include -/** Hash table implementation. - * - * This implementation uses open addressing - * (https://en.wikipedia.org/wiki/Open_addressing) with linear - * probing (https://en.wikipedia.org/wiki/Linear_probing). - * - * The keys are \a grpc_slice objects. The values are arbitrary pointers - * with a common destroy function. - * - * Hash tables are intentionally immutable, to avoid the need for locking. - */ +#include + +#include +#include + +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/slice/slice_internal.h" + +/// Hash table implementation. +/// +/// This implementation uses open addressing +/// (https://en.wikipedia.org/wiki/Open_addressing) with linear +/// probing (https://en.wikipedia.org/wiki/Linear_probing). +/// +/// The keys are \a grpc_slice objects. The values can be any type. +/// +/// Hash tables are intentionally immutable, to avoid the need for locking. + +namespace grpc_core { + +template +class SliceHashTable : public RefCounted> { + public: + struct Entry { + grpc_slice key; + T value; + bool is_set; + }; + + // Function for comparing values. + // TODO(roth): Eliminate this and the Cmp() method from this API once + // grpc_channel_args is redesigned to require that keys are unique. + typedef int (*ValueCmp)(const T&, const T&); + + /// Creates a new hash table containing \a entries, which is an array + /// of length \a num_entries. Takes ownership of all keys and values in \a + /// entries. If not null, \a value_cmp will be used to compare values in + /// the context of \a Cmp(). If null, raw pointer (\a GPR_ICMP) comparison + /// will be used. + static RefCountedPtr Create(size_t num_entries, + Entry* entries, + ValueCmp value_cmp); + + /// Returns the value from the table associated with \a key. + /// Returns null if \a key is not found. + const T* Get(const grpc_slice& key) const; + + /// Compares \a a vs. \a b. + /// A table is considered "smaller" (resp. "greater") if: + /// - GPR_ICMP(a->value_cmp, b->value_cmp) < 1 (resp. > 1), + /// - else, it contains fewer (resp. more) entries, + /// - else, if strcmp(a_key, b_key) < 1 (resp. > 1), + /// - else, if value_cmp(a_value, b_value) < 1 (resp. > 1). + static int Cmp(const SliceHashTable& a, const SliceHashTable& b); + + private: + // So New() can call our private ctor. + template + friend T2* New(Args&&... args); + + // So Delete() can call our private dtor. + template + friend void Delete(T2*); + + SliceHashTable(size_t num_entries, Entry* entries, ValueCmp value_cmp); + virtual ~SliceHashTable(); + + void Add(grpc_slice key, T& value); + + // Default value comparison function, if none specified by caller. + static int DefaultValueCmp(const T& a, const T& b) { return GPR_ICMP(a, b); } + + const ValueCmp value_cmp_; + const size_t size_; + size_t max_num_probes_; + Entry* entries_; +}; + +// +// implementation -- no user-serviceable parts below +// + +template +RefCountedPtr> SliceHashTable::Create(size_t num_entries, + Entry* entries, + ValueCmp value_cmp) { + return MakeRefCounted>(num_entries, entries, value_cmp); +} + +template +SliceHashTable::SliceHashTable(size_t num_entries, Entry* entries, + ValueCmp value_cmp) + : value_cmp_(value_cmp), + // Keep load factor low to improve performance of lookups. + size_(num_entries * 2), + max_num_probes_(0) { + entries_ = static_cast(gpr_zalloc(sizeof(Entry) * size_)); + for (size_t i = 0; i < num_entries; ++i) { + Entry* entry = &entries[i]; + Add(entry->key, entry->value); + } +} + +template +SliceHashTable::~SliceHashTable() { + for (size_t i = 0; i < size_; ++i) { + Entry& entry = entries_[i]; + if (entry.is_set) { + grpc_slice_unref_internal(entry.key); + entry.value.~T(); + } + } + gpr_free(entries_); +} + +template +void SliceHashTable::Add(grpc_slice key, T& value) { + const size_t hash = grpc_slice_hash(key); + for (size_t offset = 0; offset < size_; ++offset) { + const size_t idx = (hash + offset) % size_; + if (!entries_[idx].is_set) { + entries_[idx].is_set = true; + entries_[idx].key = key; + entries_[idx].value = std::move(value); + // Keep track of the maximum number of probes needed, since this + // provides an upper bound for lookups. + if (offset > max_num_probes_) max_num_probes_ = offset; + return; + } + } + GPR_ASSERT(false); // Table should never be full. +} + +template +const T* SliceHashTable::Get(const grpc_slice& key) const { + const size_t hash = grpc_slice_hash(key); + // We cap the number of probes at the max number recorded when + // populating the table. + for (size_t offset = 0; offset <= max_num_probes_; ++offset) { + const size_t idx = (hash + offset) % size_; + if (!entries_[idx].is_set) break; + if (grpc_slice_eq(entries_[idx].key, key)) { + return &entries_[idx].value; + } + } + return nullptr; // Not found. +} + +template +int SliceHashTable::Cmp(const SliceHashTable& a, const SliceHashTable& b) { + ValueCmp value_cmp_a = + a.value_cmp_ != nullptr ? a.value_cmp_ : DefaultValueCmp; + ValueCmp value_cmp_b = + b.value_cmp_ != nullptr ? b.value_cmp_ : DefaultValueCmp; + // Compare value_fns + const int value_fns_cmp = GPR_ICMP((void*)value_cmp_a, (void*)value_cmp_b); + if (value_fns_cmp != 0) return value_fns_cmp; + // Compare sizes + if (a.size_ < b.size_) return -1; + if (a.size_ > b.size_) return 1; + // Compare rows. + for (size_t i = 0; i < a.size_; ++i) { + if (!a.entries_[i].is_set) { + if (b.entries_[i].is_set) { + return -1; // a empty but b non-empty + } + continue; // both empty, no need to check key or value + } else if (!b.entries_[i].is_set) { + return 1; // a non-empty but b empty + } + // neither entry is empty + const int key_cmp = grpc_slice_cmp(a.entries_[i].key, b.entries_[i].key); + if (key_cmp != 0) return key_cmp; + const int value_cmp = value_cmp_a(a.entries_[i].value, b.entries_[i].value); + if (value_cmp != 0) return value_cmp; + } + return 0; +} -typedef struct grpc_slice_hash_table grpc_slice_hash_table; - -typedef struct grpc_slice_hash_table_entry { - grpc_slice key; - void *value; /* Must not be NULL. */ -} grpc_slice_hash_table_entry; - -/** Creates a new hash table of containing \a entries, which is an array - of length \a num_entries. Takes ownership of all keys and values in \a - entries. Values will be cleaned up via \a destroy_value(). If not NULL, \a - value_cmp will be used to compare values in the context of \a - grpc_slice_hash_table_cmp. If NULL, raw pointer (\a GPR_ICMP) comparison - will be used. */ -grpc_slice_hash_table *grpc_slice_hash_table_create( - size_t num_entries, grpc_slice_hash_table_entry *entries, - void (*destroy_value)(grpc_exec_ctx *exec_ctx, void *value), - int (*value_cmp)(void *a, void *b)); - -grpc_slice_hash_table *grpc_slice_hash_table_ref(grpc_slice_hash_table *table); -void grpc_slice_hash_table_unref(grpc_exec_ctx *exec_ctx, - grpc_slice_hash_table *table); - -/** Returns the value from \a table associated with \a key. - Returns NULL if \a key is not found. */ -void *grpc_slice_hash_table_get(const grpc_slice_hash_table *table, - const grpc_slice key); - -/** Compares \a a vs. \a b. - * A table is considered "smaller" (resp. "greater") if: - * - GPR_ICMP(a->value_cmp, b->value_cmp) < 1 (resp. > 1), - * - else, it contains fewer (resp. more) entries, - * - else, if strcmp(a_key, b_key) < 1 (resp. > 1), - * - else, if value_cmp(a_value, b_value) < 1 (resp. > 1). */ -int grpc_slice_hash_table_cmp(const grpc_slice_hash_table *a, - const grpc_slice_hash_table *b); +} // namespace grpc_core #endif /* GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H */ diff --git a/Sources/CgRPC/src/core/lib/slice/slice_intern.c b/Sources/CgRPC/src/core/lib/slice/slice_intern.cc similarity index 77% rename from Sources/CgRPC/src/core/lib/slice/slice_intern.c rename to Sources/CgRPC/src/core/lib/slice/slice_intern.cc index ec71b3ca1..e53c040e1 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice_intern.c +++ b/Sources/CgRPC/src/core/lib/slice/slice_intern.cc @@ -16,17 +16,20 @@ * */ +#include + #include "src/core/lib/slice/slice_internal.h" +#include #include #include #include +#include "src/core/lib/gpr/murmur_hash.h" #include "src/core/lib/iomgr/iomgr_internal.h" /* for iomgr_abort_on_leaks() */ #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/murmur_hash.h" #include "src/core/lib/transport/static_metadata.h" #define LOG2_SHARD_COUNT 5 @@ -42,12 +45,12 @@ typedef struct interned_slice_refcount { size_t length; gpr_atm refcnt; uint32_t hash; - struct interned_slice_refcount *bucket_next; + struct interned_slice_refcount* bucket_next; } interned_slice_refcount; typedef struct slice_shard { gpr_mu mu; - interned_slice_refcount **strs; + interned_slice_refcount** strs; size_t count; size_t capacity; } slice_shard; @@ -68,17 +71,17 @@ static static_metadata_hash_ent static uint32_t max_static_metadata_hash_probe; static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT]; -static void interned_slice_ref(void *p) { - interned_slice_refcount *s = (interned_slice_refcount *)p; +static void interned_slice_ref(void* p) { + interned_slice_refcount* s = static_cast(p); GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0); } -static void interned_slice_destroy(interned_slice_refcount *s) { - slice_shard *shard = &g_shards[SHARD_IDX(s->hash)]; +static void interned_slice_destroy(interned_slice_refcount* s) { + slice_shard* shard = &g_shards[SHARD_IDX(s->hash)]; gpr_mu_lock(&shard->mu); GPR_ASSERT(0 == gpr_atm_no_barrier_load(&s->refcnt)); - interned_slice_refcount **prev_next; - interned_slice_refcount *cur; + interned_slice_refcount** prev_next; + interned_slice_refcount* cur; for (prev_next = &shard->strs[TABLE_IDX(s->hash, shard->capacity)], cur = *prev_next; cur != s; prev_next = &cur->bucket_next, cur = cur->bucket_next) @@ -89,24 +92,26 @@ static void interned_slice_destroy(interned_slice_refcount *s) { gpr_mu_unlock(&shard->mu); } -static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) { - interned_slice_refcount *s = (interned_slice_refcount *)p; +static void interned_slice_unref(void* p) { + interned_slice_refcount* s = static_cast(p); if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) { interned_slice_destroy(s); } } -static void interned_slice_sub_ref(void *p) { - interned_slice_ref(((char *)p) - offsetof(interned_slice_refcount, sub)); +static void interned_slice_sub_ref(void* p) { + interned_slice_ref((static_cast(p)) - + offsetof(interned_slice_refcount, sub)); } -static void interned_slice_sub_unref(grpc_exec_ctx *exec_ctx, void *p) { - interned_slice_unref(exec_ctx, - ((char *)p) - offsetof(interned_slice_refcount, sub)); +static void interned_slice_sub_unref(void* p) { + interned_slice_unref((static_cast(p)) - + offsetof(interned_slice_refcount, sub)); } static uint32_t interned_slice_hash(grpc_slice slice) { - interned_slice_refcount *s = (interned_slice_refcount *)slice.refcount; + interned_slice_refcount* s = + reinterpret_cast(slice.refcount); return s->hash; } @@ -121,16 +126,16 @@ static const grpc_slice_refcount_vtable interned_slice_sub_vtable = { interned_slice_sub_ref, interned_slice_sub_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl}; -static void grow_shard(slice_shard *shard) { +static void grow_shard(slice_shard* shard) { + GPR_TIMER_SCOPE("grow_strtab", 0); + size_t capacity = shard->capacity * 2; size_t i; - interned_slice_refcount **strtab; + interned_slice_refcount** strtab; interned_slice_refcount *s, *next; - GPR_TIMER_BEGIN("grow_strtab", 0); - - strtab = (interned_slice_refcount **)gpr_zalloc( - sizeof(interned_slice_refcount *) * capacity); + strtab = static_cast( + gpr_zalloc(sizeof(interned_slice_refcount*) * capacity)); for (i = 0; i < shard->capacity; i++) { for (s = shard->strs[i]; s; s = next) { @@ -140,18 +145,15 @@ static void grow_shard(slice_shard *shard) { strtab[idx] = s; } } - gpr_free(shard->strs); shard->strs = strtab; shard->capacity = capacity; - - GPR_TIMER_END("grow_strtab", 0); } -static grpc_slice materialize(interned_slice_refcount *s) { +static grpc_slice materialize(interned_slice_refcount* s) { grpc_slice slice; slice.refcount = &s->base; - slice.data.refcounted.bytes = (uint8_t *)(s + 1); + slice.data.refcounted.bytes = reinterpret_cast(s + 1); slice.data.refcounted.length = s->length; return slice; } @@ -170,12 +172,12 @@ int grpc_static_slice_eq(grpc_slice a, grpc_slice b) { } uint32_t grpc_slice_hash(grpc_slice s) { - return s.refcount == NULL ? grpc_slice_default_hash_impl(s) - : s.refcount->vtable->hash(s); + return s.refcount == nullptr ? grpc_slice_default_hash_impl(s) + : s.refcount->vtable->hash(s); } grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice, - bool *returned_slice_is_different) { + bool* returned_slice_is_different) { if (GRPC_IS_STATIC_METADATA_STRING(slice)) { return slice; } @@ -200,9 +202,8 @@ bool grpc_slice_is_interned(grpc_slice slice) { } grpc_slice grpc_slice_intern(grpc_slice slice) { - GPR_TIMER_BEGIN("grpc_slice_intern", 0); + GPR_TIMER_SCOPE("grpc_slice_intern", 0); if (GRPC_IS_STATIC_METADATA_STRING(slice)) { - GPR_TIMER_END("grpc_slice_intern", 0); return slice; } @@ -212,13 +213,12 @@ grpc_slice grpc_slice_intern(grpc_slice slice) { static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)]; if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT && grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) { - GPR_TIMER_END("grpc_slice_intern", 0); return grpc_static_slice_table[ent.idx]; } } - interned_slice_refcount *s; - slice_shard *shard = &g_shards[SHARD_IDX(hash)]; + interned_slice_refcount* s; + slice_shard* shard = &g_shards[SHARD_IDX(hash)]; gpr_mu_lock(&shard->mu); @@ -235,7 +235,6 @@ grpc_slice grpc_slice_intern(grpc_slice slice) { /* and treat this as if we were never here... sshhh */ } else { gpr_mu_unlock(&shard->mu); - GPR_TIMER_END("grpc_slice_intern", 0); return materialize(s); } } @@ -243,8 +242,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) { /* not found: create a new string */ /* string data goes after the internal_string header */ - s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) + - GRPC_SLICE_LENGTH(slice)); + s = static_cast( + gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice))); gpr_atm_rel_store(&s->refcnt, 1); s->length = GRPC_SLICE_LENGTH(slice); s->hash = hash; @@ -264,7 +263,6 @@ grpc_slice grpc_slice_intern(grpc_slice slice) { gpr_mu_unlock(&shard->mu); - GPR_TIMER_END("grpc_slice_intern", 0); return materialize(s); } @@ -275,15 +273,15 @@ void grpc_test_only_set_slice_hash_seed(uint32_t seed) { void grpc_slice_intern_init(void) { if (!g_forced_hash_seed) { - g_hash_seed = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec; + g_hash_seed = static_cast(gpr_now(GPR_CLOCK_REALTIME).tv_nsec); } for (size_t i = 0; i < SHARD_COUNT; i++) { - slice_shard *shard = &g_shards[i]; + slice_shard* shard = &g_shards[i]; gpr_mu_init(&shard->mu); shard->count = 0; shard->capacity = INITIAL_SHARD_CAPACITY; - shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) * - shard->capacity); + shard->strs = static_cast( + gpr_zalloc(sizeof(*shard->strs) * shard->capacity)); } for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) { static_metadata_hash[i].hash = 0; @@ -298,9 +296,9 @@ void grpc_slice_intern_init(void) { GPR_ARRAY_SIZE(static_metadata_hash); if (static_metadata_hash[slot].idx == GRPC_STATIC_MDSTR_COUNT) { static_metadata_hash[slot].hash = static_metadata_hash_values[i]; - static_metadata_hash[slot].idx = (uint32_t)i; + static_metadata_hash[slot].idx = static_cast(i); if (j > max_static_metadata_hash_probe) { - max_static_metadata_hash_probe = (uint32_t)j; + max_static_metadata_hash_probe = static_cast(j); } break; } @@ -310,16 +308,16 @@ void grpc_slice_intern_init(void) { void grpc_slice_intern_shutdown(void) { for (size_t i = 0; i < SHARD_COUNT; i++) { - slice_shard *shard = &g_shards[i]; + slice_shard* shard = &g_shards[i]; gpr_mu_destroy(&shard->mu); /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */ if (shard->count != 0) { gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata strings were leaked", shard->count); for (size_t j = 0; j < shard->capacity; j++) { - for (interned_slice_refcount *s = shard->strs[j]; s; + for (interned_slice_refcount* s = shard->strs[j]; s; s = s->bucket_next) { - char *text = + char* text = grpc_dump_slice(materialize(s), GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "LEAKED: %s", text); gpr_free(text); diff --git a/Sources/CgRPC/src/core/lib/slice/slice_internal.h b/Sources/CgRPC/src/core/lib/slice/slice_internal.h index 6df0b4b50..5d3d26b67 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice_internal.h +++ b/Sources/CgRPC/src/core/lib/slice/slice_internal.h @@ -19,17 +19,17 @@ #ifndef GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H #define GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H +#include + #include #include -#include "src/core/lib/iomgr/exec_ctx.h" - grpc_slice grpc_slice_ref_internal(grpc_slice slice); -void grpc_slice_unref_internal(grpc_exec_ctx *exec_ctx, grpc_slice slice); -void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb); -void grpc_slice_buffer_destroy_internal(grpc_exec_ctx *exec_ctx, - grpc_slice_buffer *sb); +void grpc_slice_unref_internal(grpc_slice slice); +void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb); +void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb, + size_t idx); +void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb); /* Check if a slice is interned */ bool grpc_slice_is_interned(grpc_slice slice); @@ -42,7 +42,7 @@ void grpc_test_only_set_slice_hash_seed(uint32_t key); // used for surface boundaries where we might receive an un-interned static // string grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice, - bool *returned_slice_is_different); + bool* returned_slice_is_different); uint32_t grpc_static_slice_hash(grpc_slice s); int grpc_static_slice_eq(grpc_slice a, grpc_slice b); diff --git a/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.c b/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.c deleted file mode 100644 index d461c474d..000000000 --- a/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/slice/slice_string_helpers.h" - -#include - -#include - -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" - -char *grpc_dump_slice(grpc_slice s, uint32_t flags) { - return gpr_dump((const char *)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s), - flags); -} - -/** Finds the initial (\a begin) and final (\a end) offsets of the next - * substring from \a str + \a read_offset until the next \a sep or the end of \a - * str. - * - * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */ -static int slice_find_separator_offset(const grpc_slice str, const char *sep, - const size_t read_offset, size_t *begin, - size_t *end) { - size_t i; - const uint8_t *str_ptr = GRPC_SLICE_START_PTR(str) + read_offset; - const size_t str_len = GRPC_SLICE_LENGTH(str) - read_offset; - const size_t sep_len = strlen(sep); - if (str_len < sep_len) { - return 0; - } - - for (i = 0; i <= str_len - sep_len; i++) { - if (memcmp(str_ptr + i, sep, sep_len) == 0) { - *begin = read_offset; - *end = read_offset + i; - return 1; - } - } - return 0; -} - -void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst) { - const size_t sep_len = strlen(sep); - size_t begin, end; - - GPR_ASSERT(sep_len > 0); - - if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) { - do { - grpc_slice_buffer_add_indexed(dst, grpc_slice_sub(str, begin, end)); - } while (slice_find_separator_offset(str, sep, end + sep_len, &begin, - &end) != 0); - grpc_slice_buffer_add_indexed( - dst, grpc_slice_sub(str, end + sep_len, GRPC_SLICE_LENGTH(str))); - } else { /* no sep found, add whole input */ - grpc_slice_buffer_add_indexed(dst, grpc_slice_ref_internal(str)); - } -} - -bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result) { - return gpr_parse_bytes_to_uint32((const char *)GRPC_SLICE_START_PTR(str), - GRPC_SLICE_LENGTH(str), result) != 0; -} diff --git a/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.cc b/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.cc new file mode 100644 index 000000000..6af9c33eb --- /dev/null +++ b/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.cc @@ -0,0 +1,118 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/slice/slice_string_helpers.h" + +#include + +#include + +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/slice/slice_internal.h" + +char* grpc_dump_slice(grpc_slice s, uint32_t flags) { + return gpr_dump(reinterpret_cast GRPC_SLICE_START_PTR(s), + GRPC_SLICE_LENGTH(s), flags); +} + +/** Finds the initial (\a begin) and final (\a end) offsets of the next + * substring from \a str + \a read_offset until the next \a sep or the end of \a + * str. + * + * Returns 1 and updates \a begin and \a end. Returns 0 otherwise. */ +static int slice_find_separator_offset(const grpc_slice str, const char* sep, + const size_t read_offset, size_t* begin, + size_t* end) { + size_t i; + const uint8_t* str_ptr = GRPC_SLICE_START_PTR(str) + read_offset; + const size_t str_len = GRPC_SLICE_LENGTH(str) - read_offset; + const size_t sep_len = strlen(sep); + if (str_len < sep_len) { + return 0; + } + + for (i = 0; i <= str_len - sep_len; i++) { + if (memcmp(str_ptr + i, sep, sep_len) == 0) { + *begin = read_offset; + *end = read_offset + i; + return 1; + } + } + return 0; +} + +static void skip_leading_trailing_spaces(const uint8_t* str_buffer, + size_t* begin, size_t* end) { + while (*begin < *end && str_buffer[*begin] == ' ') { + (*begin)++; + } + while (*begin < *end && str_buffer[*end - 1] == ' ') { + (*end)--; + } +} + +static void grpc_slice_split_inner(grpc_slice str, const char* sep, + grpc_slice_buffer* dst, bool no_space) { + const size_t sep_len = strlen(sep); + size_t begin, end; + const uint8_t* str_buffer = GRPC_SLICE_START_PTR(str); + size_t sep_pos; + + GPR_ASSERT(sep_len > 0); + + if (slice_find_separator_offset(str, sep, 0, &begin, &end) != 0) { + do { + sep_pos = end; + if (no_space) { + skip_leading_trailing_spaces(str_buffer, &begin, &end); + } + grpc_slice_buffer_add_indexed(dst, grpc_slice_sub(str, begin, end)); + } while (slice_find_separator_offset(str, sep, sep_pos + sep_len, &begin, + &end) != 0); + begin = sep_pos + sep_len; + end = GRPC_SLICE_LENGTH(str); + if (no_space) { + skip_leading_trailing_spaces(str_buffer, &begin, &end); + } + grpc_slice_buffer_add_indexed(dst, grpc_slice_sub(str, begin, end)); + } else { /* no sep found, add whole input */ + begin = 0; + end = GRPC_SLICE_LENGTH(str); + if (no_space) { + skip_leading_trailing_spaces(str_buffer, &begin, &end); + } + grpc_slice_buffer_add_indexed(dst, grpc_slice_sub(str, begin, end)); + } +} + +void grpc_slice_split(grpc_slice str, const char* sep, grpc_slice_buffer* dst) { + grpc_slice_split_inner(str, sep, dst, false); +} + +void grpc_slice_split_without_space(grpc_slice str, const char* sep, + grpc_slice_buffer* dst) { + grpc_slice_split_inner(str, sep, dst, true); +} + +bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t* result) { + return gpr_parse_bytes_to_uint32( + reinterpret_cast GRPC_SLICE_START_PTR(str), + GRPC_SLICE_LENGTH(str), result) != 0; +} diff --git a/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.h b/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.h index bcfb33bfb..976f72411 100644 --- a/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.h +++ b/Sources/CgRPC/src/core/lib/slice/slice_string_helpers.h @@ -19,30 +19,29 @@ #ifndef GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H #define GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H +#include + #include #include #include #include -#include - -#include "src/core/lib/support/string.h" -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/lib/gpr/string.h" /* Calls gpr_dump on a slice. */ -char *grpc_dump_slice(grpc_slice slice, uint32_t flags); +char* grpc_dump_slice(grpc_slice slice, uint32_t flags); /** Split \a str by the separator \a sep. Results are stored in \a dst, which * should be a properly initialized instance. */ -void grpc_slice_split(grpc_slice str, const char *sep, grpc_slice_buffer *dst); +void grpc_slice_split(grpc_slice str, const char* sep, grpc_slice_buffer* dst); -bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t *result); +/** Split \a str by the separator \a sep and remove the leading and trailing + * spaces of each resulting token. Results are stored in \a dst, which should be + * a properly initialized instance. */ +void grpc_slice_split_without_space(grpc_slice str, const char* sep, + grpc_slice_buffer* dst); -#ifdef __cplusplus -} -#endif +bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t* result); #endif /* GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H */ diff --git a/Sources/CgRPC/src/core/lib/slice/slice_weak_hash_table.h b/Sources/CgRPC/src/core/lib/slice/slice_weak_hash_table.h new file mode 100644 index 000000000..dc3ccc5da --- /dev/null +++ b/Sources/CgRPC/src/core/lib/slice/slice_weak_hash_table.h @@ -0,0 +1,109 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H +#define GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H + +#include + +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/slice/slice_internal.h" + +/// Weak hash table implementation. +/// +/// This entries in this table are weak: an entry may be removed at any time due +/// to a number of reasons: memory pressure, hash collisions, etc. +/// +/// The keys are \a grpc_slice objects. The values are of arbitrary type. +/// +/// This class is thread unsafe. It's the caller's responsibility to ensure +/// proper locking when accessing its methods. + +namespace grpc_core { + +template +class SliceWeakHashTable : public RefCounted> { + public: + /// Creates a new table of at most \a size entries. + static RefCountedPtr Create() { + return MakeRefCounted>(); + } + + /// Add a mapping from \a key to \a value, taking ownership of \a key. This + /// operation will always succeed. It may discard older entries. + void Add(grpc_slice key, T value) { + const size_t idx = grpc_slice_hash(key) % Size; + entries_[idx].Set(key, std::move(value)); + return; + } + + /// Returns the value from the table associated with / \a key or null if not + /// found. + const T* Get(const grpc_slice key) const { + const size_t idx = grpc_slice_hash(key) % Size; + const auto& entry = entries_[idx]; + return grpc_slice_eq(entry.key(), key) ? entry.value() : nullptr; + } + + private: + // So New() can call our private ctor. + template + friend T2* New(Args&&... args); + + // So Delete() can call our private dtor. + template + friend void Delete(T2*); + + SliceWeakHashTable() = default; + ~SliceWeakHashTable() = default; + + /// The type of the table "rows". + class Entry { + public: + Entry() = default; + ~Entry() { + if (is_set_) grpc_slice_unref_internal(key_); + } + grpc_slice key() const { return key_; } + + /// Return the entry's value, or null if unset. + const T* value() const { + if (!is_set_) return nullptr; + return &value_; + } + + /// Set the \a key and \a value (which is moved) for the entry. + void Set(grpc_slice key, T&& value) { + if (is_set_) grpc_slice_unref_internal(key_); + key_ = key; + value_ = std::move(value); + is_set_ = true; + } + + private: + grpc_slice key_; + T value_; + bool is_set_ = false; + }; + + Entry entries_[Size]; +}; + +} // namespace grpc_core + +#endif /* GRPC_CORE_LIB_SLICE_SLICE_WEAK_HASH_TABLE_H */ diff --git a/Sources/CgRPC/src/core/lib/support/arena.c b/Sources/CgRPC/src/core/lib/support/arena.c deleted file mode 100644 index 9e0f73ae3..000000000 --- a/Sources/CgRPC/src/core/lib/support/arena.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/support/arena.h" -#include -#include -#include -#include - -#define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ - (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) - -typedef struct zone { - size_t size_begin; - size_t size_end; - gpr_atm next_atm; -} zone; - -struct gpr_arena { - gpr_atm size_so_far; - zone initial_zone; -}; - -gpr_arena *gpr_arena_create(size_t initial_size) { - initial_size = ROUND_UP_TO_ALIGNMENT_SIZE(initial_size); - gpr_arena *a = (gpr_arena *)gpr_zalloc(sizeof(gpr_arena) + initial_size); - a->initial_zone.size_end = initial_size; - return a; -} - -size_t gpr_arena_destroy(gpr_arena *arena) { - gpr_atm size = gpr_atm_no_barrier_load(&arena->size_so_far); - zone *z = (zone *)gpr_atm_no_barrier_load(&arena->initial_zone.next_atm); - gpr_free(arena); - while (z) { - zone *next_z = (zone *)gpr_atm_no_barrier_load(&z->next_atm); - gpr_free(z); - z = next_z; - } - return (size_t)size; -} - -void *gpr_arena_alloc(gpr_arena *arena, size_t size) { - size = ROUND_UP_TO_ALIGNMENT_SIZE(size); - size_t start = - (size_t)gpr_atm_no_barrier_fetch_add(&arena->size_so_far, size); - zone *z = &arena->initial_zone; - while (start > z->size_end) { - zone *next_z = (zone *)gpr_atm_acq_load(&z->next_atm); - if (next_z == NULL) { - size_t next_z_size = (size_t)gpr_atm_no_barrier_load(&arena->size_so_far); - next_z = (zone *)gpr_zalloc(sizeof(zone) + next_z_size); - next_z->size_begin = z->size_end; - next_z->size_end = z->size_end + next_z_size; - if (!gpr_atm_rel_cas(&z->next_atm, (gpr_atm)NULL, (gpr_atm)next_z)) { - gpr_free(next_z); - next_z = (zone *)gpr_atm_acq_load(&z->next_atm); - } - } - z = next_z; - } - if (start + size > z->size_end) { - return gpr_arena_alloc(arena, size); - } - GPR_ASSERT(start >= z->size_begin); - GPR_ASSERT(start + size <= z->size_end); - return ((char *)(z + 1)) + start - z->size_begin; -} diff --git a/Sources/CgRPC/src/core/lib/support/backoff.c b/Sources/CgRPC/src/core/lib/support/backoff.c deleted file mode 100644 index 6dc0df473..000000000 --- a/Sources/CgRPC/src/core/lib/support/backoff.c +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/support/backoff.h" - -#include - -void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout, - double multiplier, double jitter, - int64_t min_timeout_millis, int64_t max_timeout_millis) { - backoff->initial_connect_timeout = initial_connect_timeout; - backoff->multiplier = multiplier; - backoff->jitter = jitter; - backoff->min_timeout_millis = min_timeout_millis; - backoff->max_timeout_millis = max_timeout_millis; - backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec; -} - -gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now) { - backoff->current_timeout_millis = backoff->initial_connect_timeout; - const int64_t first_timeout = - GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis); - return gpr_time_add(now, gpr_time_from_millis(first_timeout, GPR_TIMESPAN)); -} - -/* Generate a random number between 0 and 1. */ -static double generate_uniform_random_number(uint32_t *rng_state) { - *rng_state = (1103515245 * *rng_state + 12345) % ((uint32_t)1 << 31); - return *rng_state / (double)((uint32_t)1 << 31); -} - -gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now) { - const double new_timeout_millis = - backoff->multiplier * (double)backoff->current_timeout_millis; - backoff->current_timeout_millis = - GPR_MIN((int64_t)new_timeout_millis, backoff->max_timeout_millis); - - const double jitter_range_width = backoff->jitter * new_timeout_millis; - const double jitter = - (2 * generate_uniform_random_number(&backoff->rng_state) - 1) * - jitter_range_width; - - backoff->current_timeout_millis = - (int64_t)((double)(backoff->current_timeout_millis) + jitter); - - const gpr_timespec current_deadline = gpr_time_add( - now, gpr_time_from_millis(backoff->current_timeout_millis, GPR_TIMESPAN)); - - const gpr_timespec min_deadline = gpr_time_add( - now, gpr_time_from_millis(backoff->min_timeout_millis, GPR_TIMESPAN)); - - return gpr_time_max(current_deadline, min_deadline); -} - -void gpr_backoff_reset(gpr_backoff *backoff) { - backoff->current_timeout_millis = backoff->initial_connect_timeout; -} diff --git a/Sources/CgRPC/src/core/lib/support/backoff.h b/Sources/CgRPC/src/core/lib/support/backoff.h deleted file mode 100644 index 6e0cc3a4b..000000000 --- a/Sources/CgRPC/src/core/lib/support/backoff.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SUPPORT_BACKOFF_H -#define GRPC_CORE_LIB_SUPPORT_BACKOFF_H - -#include - -typedef struct { - /// const: how long to wait after the first failure before retrying - int64_t initial_connect_timeout; - /// const: factor with which to multiply backoff after a failed retry - double multiplier; - /// const: amount to randomize backoffs - double jitter; - /// const: minimum time between retries in milliseconds - int64_t min_timeout_millis; - /// const: maximum time between retries in milliseconds - int64_t max_timeout_millis; - - /// random number generator - uint32_t rng_state; - - /// current retry timeout in milliseconds - int64_t current_timeout_millis; -} gpr_backoff; - -/// Initialize backoff machinery - does not need to be destroyed -void gpr_backoff_init(gpr_backoff *backoff, int64_t initial_connect_timeout, - double multiplier, double jitter, - int64_t min_timeout_millis, int64_t max_timeout_millis); - -/// Begin retry loop: returns a timespec for the NEXT retry -gpr_timespec gpr_backoff_begin(gpr_backoff *backoff, gpr_timespec now); -/// Step a retry loop: returns a timespec for the NEXT retry -gpr_timespec gpr_backoff_step(gpr_backoff *backoff, gpr_timespec now); -/// Reset the backoff, so the next gpr_backoff_step will be a gpr_backoff_begin -/// instead -void gpr_backoff_reset(gpr_backoff *backoff); - -#endif /* GRPC_CORE_LIB_SUPPORT_BACKOFF_H */ diff --git a/Sources/CgRPC/src/core/lib/support/cmdline.c b/Sources/CgRPC/src/core/lib/support/cmdline.c deleted file mode 100644 index 9fb80d446..000000000 --- a/Sources/CgRPC/src/core/lib/support/cmdline.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include -#include -#include - -#include -#include -#include -#include "src/core/lib/support/string.h" - -typedef enum { ARGTYPE_INT, ARGTYPE_BOOL, ARGTYPE_STRING } argtype; - -typedef struct arg { - const char *name; - const char *help; - argtype type; - void *value; - struct arg *next; -} arg; - -struct gpr_cmdline { - const char *description; - arg *args; - const char *argv0; - - const char *extra_arg_name; - const char *extra_arg_help; - void (*extra_arg)(void *user_data, const char *arg); - void *extra_arg_user_data; - - int (*state)(gpr_cmdline *cl, char *arg); - arg *cur_arg; - - int survive_failure; -}; - -static int normal_state(gpr_cmdline *cl, char *arg); - -gpr_cmdline *gpr_cmdline_create(const char *description) { - gpr_cmdline *cl = (gpr_cmdline *)gpr_zalloc(sizeof(gpr_cmdline)); - - cl->description = description; - cl->state = normal_state; - - return cl; -} - -void gpr_cmdline_set_survive_failure(gpr_cmdline *cl) { - cl->survive_failure = 1; -} - -void gpr_cmdline_destroy(gpr_cmdline *cl) { - while (cl->args) { - arg *a = cl->args; - cl->args = a->next; - gpr_free(a); - } - gpr_free(cl); -} - -static void add_arg(gpr_cmdline *cl, const char *name, const char *help, - argtype type, void *value) { - arg *a; - - for (a = cl->args; a; a = a->next) { - GPR_ASSERT(0 != strcmp(a->name, name)); - } - - a = (arg *)gpr_zalloc(sizeof(arg)); - a->name = name; - a->help = help; - a->type = type; - a->value = value; - a->next = cl->args; - cl->args = a; -} - -void gpr_cmdline_add_int(gpr_cmdline *cl, const char *name, const char *help, - int *value) { - add_arg(cl, name, help, ARGTYPE_INT, value); -} - -void gpr_cmdline_add_flag(gpr_cmdline *cl, const char *name, const char *help, - int *value) { - add_arg(cl, name, help, ARGTYPE_BOOL, value); -} - -void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, const char *help, - char **value) { - add_arg(cl, name, help, ARGTYPE_STRING, value); -} - -void gpr_cmdline_on_extra_arg( - gpr_cmdline *cl, const char *name, const char *help, - void (*on_extra_arg)(void *user_data, const char *arg), void *user_data) { - GPR_ASSERT(!cl->extra_arg); - GPR_ASSERT(on_extra_arg); - - cl->extra_arg = on_extra_arg; - cl->extra_arg_user_data = user_data; - cl->extra_arg_name = name; - cl->extra_arg_help = help; -} - -/* recursively descend argument list, adding the last element - to s first - so that arguments are added in the order they were - added to the list by api calls */ -static void add_args_to_usage(gpr_strvec *s, arg *a) { - char *tmp; - - if (!a) return; - add_args_to_usage(s, a->next); - - switch (a->type) { - case ARGTYPE_BOOL: - gpr_asprintf(&tmp, " [--%s|--no-%s]", a->name, a->name); - gpr_strvec_add(s, tmp); - break; - case ARGTYPE_STRING: - gpr_asprintf(&tmp, " [--%s=string]", a->name); - gpr_strvec_add(s, tmp); - break; - case ARGTYPE_INT: - gpr_asprintf(&tmp, " [--%s=int]", a->name); - gpr_strvec_add(s, tmp); - break; - } -} - -char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) { - /* TODO(ctiller): make this prettier */ - gpr_strvec s; - char *tmp; - const char *name = strrchr(argv0, '/'); - - if (name) { - name++; - } else { - name = argv0; - } - - gpr_strvec_init(&s); - - gpr_asprintf(&tmp, "Usage: %s", name); - gpr_strvec_add(&s, tmp); - add_args_to_usage(&s, cl->args); - if (cl->extra_arg) { - gpr_asprintf(&tmp, " [%s...]", cl->extra_arg_name); - gpr_strvec_add(&s, tmp); - } - gpr_strvec_add(&s, gpr_strdup("\n")); - - tmp = gpr_strvec_flatten(&s, NULL); - gpr_strvec_destroy(&s); - return tmp; -} - -static int print_usage_and_die(gpr_cmdline *cl) { - char *usage = gpr_cmdline_usage_string(cl, cl->argv0); - fprintf(stderr, "%s", usage); - gpr_free(usage); - if (!cl->survive_failure) { - exit(1); - } - return 0; -} - -static int extra_state(gpr_cmdline *cl, char *str) { - if (!cl->extra_arg) { - return print_usage_and_die(cl); - } - cl->extra_arg(cl->extra_arg_user_data, str); - return 1; -} - -static arg *find_arg(gpr_cmdline *cl, char *name) { - arg *a; - - for (a = cl->args; a; a = a->next) { - if (0 == strcmp(a->name, name)) { - break; - } - } - - if (!a) { - fprintf(stderr, "Unknown argument: %s\n", name); - return NULL; - } - - return a; -} - -static int value_state(gpr_cmdline *cl, char *str) { - long intval; - char *end; - - GPR_ASSERT(cl->cur_arg); - - switch (cl->cur_arg->type) { - case ARGTYPE_INT: - intval = strtol(str, &end, 0); - if (*end || intval < INT_MIN || intval > INT_MAX) { - fprintf(stderr, "expected integer, got '%s' for %s\n", str, - cl->cur_arg->name); - return print_usage_and_die(cl); - } - *(int *)cl->cur_arg->value = (int)intval; - break; - case ARGTYPE_BOOL: - if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) { - *(int *)cl->cur_arg->value = 1; - } else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) { - *(int *)cl->cur_arg->value = 0; - } else { - fprintf(stderr, "expected boolean, got '%s' for %s\n", str, - cl->cur_arg->name); - return print_usage_and_die(cl); - } - break; - case ARGTYPE_STRING: - *(char **)cl->cur_arg->value = str; - break; - } - - cl->state = normal_state; - return 1; -} - -static int normal_state(gpr_cmdline *cl, char *str) { - char *eq = NULL; - char *tmp = NULL; - char *arg_name = NULL; - int r = 1; - - if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") || - 0 == strcmp(str, "-h")) { - return print_usage_and_die(cl); - } - - cl->cur_arg = NULL; - - if (str[0] == '-') { - if (str[1] == '-') { - if (str[2] == 0) { - /* handle '--' to move to just extra args */ - cl->state = extra_state; - return 1; - } - str += 2; - } else { - str += 1; - } - /* first byte of str is now past the leading '-' or '--' */ - if (str[0] == 'n' && str[1] == 'o' && str[2] == '-') { - /* str is of the form '--no-foo' - it's a flag disable */ - str += 3; - cl->cur_arg = find_arg(cl, str); - if (cl->cur_arg == NULL) { - return print_usage_and_die(cl); - } - if (cl->cur_arg->type != ARGTYPE_BOOL) { - fprintf(stderr, "%s is not a flag argument\n", str); - return print_usage_and_die(cl); - } - *(int *)cl->cur_arg->value = 0; - return 1; /* early out */ - } - eq = strchr(str, '='); - if (eq != NULL) { - /* copy the string into a temp buffer and extract the name */ - tmp = arg_name = (char *)gpr_malloc((size_t)(eq - str + 1)); - memcpy(arg_name, str, (size_t)(eq - str)); - arg_name[eq - str] = 0; - } else { - arg_name = str; - } - cl->cur_arg = find_arg(cl, arg_name); - if (cl->cur_arg == NULL) { - return print_usage_and_die(cl); - } - if (eq != NULL) { - /* str was of the type --foo=value, parse the value */ - r = value_state(cl, eq + 1); - } else if (cl->cur_arg->type != ARGTYPE_BOOL) { - /* flag types don't have a '--foo value' variant, other types do */ - cl->state = value_state; - } else { - /* flag parameter: just set the value */ - *(int *)cl->cur_arg->value = 1; - } - } else { - r = extra_state(cl, str); - } - - gpr_free(tmp); - return r; -} - -int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) { - int i; - - GPR_ASSERT(argc >= 1); - cl->argv0 = argv[0]; - - for (i = 1; i < argc; i++) { - if (!cl->state(cl, argv[i])) { - return 0; - } - } - return 1; -} diff --git a/Sources/CgRPC/src/core/lib/support/histogram.c b/Sources/CgRPC/src/core/lib/support/histogram.c deleted file mode 100644 index 6d5ead9aa..000000000 --- a/Sources/CgRPC/src/core/lib/support/histogram.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include -#include -#include - -#include -#include -#include -#include - -/* Histograms are stored with exponentially increasing bucket sizes. - The first bucket is [0, m) where m = 1 + resolution - Bucket n (n>=1) contains [m**n, m**(n+1)) - There are sufficient buckets to reach max_bucket_start */ - -struct gpr_histogram { - /* Sum of all values seen so far */ - double sum; - /* Sum of squares of all values seen so far */ - double sum_of_squares; - /* number of values seen so far */ - double count; - /* m in the description */ - double multiplier; - double one_on_log_multiplier; - /* minimum value seen */ - double min_seen; - /* maximum value seen */ - double max_seen; - /* maximum representable value */ - double max_possible; - /* number of buckets */ - size_t num_buckets; - /* the buckets themselves */ - uint32_t *buckets; -}; - -/* determine a bucket index given a value - does no bounds checking */ -static size_t bucket_for_unchecked(gpr_histogram *h, double x) { - return (size_t)(log(x) * h->one_on_log_multiplier); -} - -/* bounds checked version of the above */ -static size_t bucket_for(gpr_histogram *h, double x) { - size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 1.0, h->max_possible)); - GPR_ASSERT(bucket < h->num_buckets); - return bucket; -} - -/* at what value does a bucket start? */ -static double bucket_start(gpr_histogram *h, double x) { - return pow(h->multiplier, x); -} - -gpr_histogram *gpr_histogram_create(double resolution, - double max_bucket_start) { - gpr_histogram *h = (gpr_histogram *)gpr_malloc(sizeof(gpr_histogram)); - GPR_ASSERT(resolution > 0.0); - GPR_ASSERT(max_bucket_start > resolution); - h->sum = 0.0; - h->sum_of_squares = 0.0; - h->multiplier = 1.0 + resolution; - h->one_on_log_multiplier = 1.0 / log(1.0 + resolution); - h->max_possible = max_bucket_start; - h->count = 0.0; - h->min_seen = max_bucket_start; - h->max_seen = 0.0; - h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1; - GPR_ASSERT(h->num_buckets > 1); - GPR_ASSERT(h->num_buckets < 100000000); - h->buckets = (uint32_t *)gpr_zalloc(sizeof(uint32_t) * h->num_buckets); - return h; -} - -void gpr_histogram_destroy(gpr_histogram *h) { - gpr_free(h->buckets); - gpr_free(h); -} - -void gpr_histogram_add(gpr_histogram *h, double x) { - h->sum += x; - h->sum_of_squares += x * x; - h->count++; - if (x < h->min_seen) { - h->min_seen = x; - } - if (x > h->max_seen) { - h->max_seen = x; - } - h->buckets[bucket_for(h, x)]++; -} - -int gpr_histogram_merge(gpr_histogram *dst, const gpr_histogram *src) { - if ((dst->num_buckets != src->num_buckets) || - (dst->multiplier != src->multiplier)) { - /* Fail because these histograms don't match */ - return 0; - } - gpr_histogram_merge_contents(dst, src->buckets, src->num_buckets, - src->min_seen, src->max_seen, src->sum, - src->sum_of_squares, src->count); - return 1; -} - -void gpr_histogram_merge_contents(gpr_histogram *dst, const uint32_t *data, - size_t data_count, double min_seen, - double max_seen, double sum, - double sum_of_squares, double count) { - size_t i; - GPR_ASSERT(dst->num_buckets == data_count); - dst->sum += sum; - dst->sum_of_squares += sum_of_squares; - dst->count += count; - if (min_seen < dst->min_seen) { - dst->min_seen = min_seen; - } - if (max_seen > dst->max_seen) { - dst->max_seen = max_seen; - } - for (i = 0; i < dst->num_buckets; i++) { - dst->buckets[i] += data[i]; - } -} - -static double threshold_for_count_below(gpr_histogram *h, double count_below) { - double count_so_far; - double lower_bound; - double upper_bound; - size_t lower_idx; - size_t upper_idx; - - if (h->count == 0) { - return 0.0; - } - - if (count_below <= 0) { - return h->min_seen; - } - if (count_below >= h->count) { - return h->max_seen; - } - - /* find the lowest bucket that gets us above count_below */ - count_so_far = 0.0; - for (lower_idx = 0; lower_idx < h->num_buckets; lower_idx++) { - count_so_far += h->buckets[lower_idx]; - if (count_so_far >= count_below) { - break; - } - } - if (count_so_far == count_below) { - /* this bucket hits the threshold exactly... we should be midway through - any run of zero values following the bucket */ - for (upper_idx = lower_idx + 1; upper_idx < h->num_buckets; upper_idx++) { - if (h->buckets[upper_idx]) { - break; - } - } - return (bucket_start(h, (double)lower_idx) + - bucket_start(h, (double)upper_idx)) / - 2.0; - } else { - /* treat values as uniform throughout the bucket, and find where this value - should lie */ - lower_bound = bucket_start(h, (double)lower_idx); - upper_bound = bucket_start(h, (double)(lower_idx + 1)); - return GPR_CLAMP(upper_bound - - (upper_bound - lower_bound) * - (count_so_far - count_below) / - h->buckets[lower_idx], - h->min_seen, h->max_seen); - } -} - -double gpr_histogram_percentile(gpr_histogram *h, double percentile) { - return threshold_for_count_below(h, h->count * percentile / 100.0); -} - -double gpr_histogram_mean(gpr_histogram *h) { - GPR_ASSERT(h->count != 0); - return h->sum / h->count; -} - -double gpr_histogram_stddev(gpr_histogram *h) { - return sqrt(gpr_histogram_variance(h)); -} - -double gpr_histogram_variance(gpr_histogram *h) { - if (h->count == 0) return 0.0; - return (h->sum_of_squares * h->count - h->sum * h->sum) / - (h->count * h->count); -} - -double gpr_histogram_maximum(gpr_histogram *h) { return h->max_seen; } - -double gpr_histogram_minimum(gpr_histogram *h) { return h->min_seen; } - -double gpr_histogram_count(gpr_histogram *h) { return h->count; } - -double gpr_histogram_sum(gpr_histogram *h) { return h->sum; } - -double gpr_histogram_sum_of_squares(gpr_histogram *h) { - return h->sum_of_squares; -} - -const uint32_t *gpr_histogram_get_contents(gpr_histogram *h, size_t *size) { - *size = h->num_buckets; - return h->buckets; -} diff --git a/Sources/CgRPC/src/core/lib/support/memory.h b/Sources/CgRPC/src/core/lib/support/memory.h deleted file mode 100644 index dc3d32e1c..000000000 --- a/Sources/CgRPC/src/core/lib/support/memory.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SUPPORT_MEMORY_H -#define GRPC_CORE_LIB_SUPPORT_MEMORY_H - -#include - -#include -#include - -namespace grpc_core { - -// Alternative to new, since we cannot use it (for fear of libstdc++) -template -inline T* New(Args&&... args) { - void* p = gpr_malloc(sizeof(T)); - return new (p) T(std::forward(args)...); -} - -// Alternative to delete, since we cannot use it (for fear of libstdc++) -template -inline void Delete(T* p) { - p->~T(); - gpr_free(p); -} - -template -class DefaultDelete { - public: - void operator()(T* p) { Delete(p); } -}; - -template > -using UniquePtr = std::unique_ptr; - -template -inline UniquePtr MakeUnique(Args&&... args) { - return UniquePtr(New(std::forward(args)...)); -} - -} // namespace grpc_core - -#endif /* GRPC_CORE_LIB_SUPPORT_MEMORY_H */ diff --git a/Sources/CgRPC/src/core/lib/support/mpscq.c b/Sources/CgRPC/src/core/lib/support/mpscq.c deleted file mode 100644 index e9f893988..000000000 --- a/Sources/CgRPC/src/core/lib/support/mpscq.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/support/mpscq.h" - -#include - -void gpr_mpscq_init(gpr_mpscq *q) { - gpr_atm_no_barrier_store(&q->head, (gpr_atm)&q->stub); - q->tail = &q->stub; - gpr_atm_no_barrier_store(&q->stub.next, (gpr_atm)NULL); -} - -void gpr_mpscq_destroy(gpr_mpscq *q) { - GPR_ASSERT(gpr_atm_no_barrier_load(&q->head) == (gpr_atm)&q->stub); - GPR_ASSERT(q->tail == &q->stub); -} - -void gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n) { - gpr_atm_no_barrier_store(&n->next, (gpr_atm)NULL); - gpr_mpscq_node *prev = - (gpr_mpscq_node *)gpr_atm_full_xchg(&q->head, (gpr_atm)n); - gpr_atm_rel_store(&prev->next, (gpr_atm)n); -} - -gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q) { - bool empty; - return gpr_mpscq_pop_and_check_end(q, &empty); -} - -gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty) { - gpr_mpscq_node *tail = q->tail; - gpr_mpscq_node *next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); - if (tail == &q->stub) { - // indicates the list is actually (ephemerally) empty - if (next == NULL) { - *empty = true; - return NULL; - } - q->tail = next; - tail = next; - next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); - } - if (next != NULL) { - *empty = false; - q->tail = next; - return tail; - } - gpr_mpscq_node *head = (gpr_mpscq_node *)gpr_atm_acq_load(&q->head); - if (tail != head) { - *empty = false; - // indicates a retry is in order: we're still adding - return NULL; - } - gpr_mpscq_push(q, &q->stub); - next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); - if (next != NULL) { - q->tail = next; - return tail; - } - // indicates a retry is in order: we're still adding - *empty = false; - return NULL; -} diff --git a/Sources/CgRPC/src/core/lib/support/mpscq.h b/Sources/CgRPC/src/core/lib/support/mpscq.h deleted file mode 100644 index daa51768f..000000000 --- a/Sources/CgRPC/src/core/lib/support/mpscq.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SUPPORT_MPSCQ_H -#define GRPC_CORE_LIB_SUPPORT_MPSCQ_H - -#include -#include -#include - -// Multiple-producer single-consumer lock free queue, based upon the -// implementation from Dmitry Vyukov here: -// http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue - -// List node (include this in a data structure at the top, and add application -// fields after it - to simulate inheritance) -typedef struct gpr_mpscq_node { gpr_atm next; } gpr_mpscq_node; - -// Actual queue type -typedef struct gpr_mpscq { - gpr_atm head; - // make sure head & tail don't share a cacheline - char padding[GPR_CACHELINE_SIZE]; - gpr_mpscq_node *tail; - gpr_mpscq_node stub; -} gpr_mpscq; - -void gpr_mpscq_init(gpr_mpscq *q); -void gpr_mpscq_destroy(gpr_mpscq *q); -// Push a node -void gpr_mpscq_push(gpr_mpscq *q, gpr_mpscq_node *n); -// Pop a node (returns NULL if no node is ready - which doesn't indicate that -// the queue is empty!!) -gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q); -// Pop a node; sets *empty to true if the queue is empty, or false if it is not -gpr_mpscq_node *gpr_mpscq_pop_and_check_end(gpr_mpscq *q, bool *empty); - -#endif /* GRPC_CORE_LIB_SUPPORT_MPSCQ_H */ diff --git a/Sources/CgRPC/src/core/lib/support/stack_lockfree.c b/Sources/CgRPC/src/core/lib/support/stack_lockfree.c deleted file mode 100644 index 0fb64ed00..000000000 --- a/Sources/CgRPC/src/core/lib/support/stack_lockfree.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/support/stack_lockfree.h" - -#include -#include - -#include -#include -#include -#include - -/* The lockfree node structure is a single architecture-level - word that allows for an atomic CAS to set it up. */ -struct lockfree_node_contents { - /* next thing to look at. Actual index for head, next index otherwise */ - uint16_t index; -#ifdef GPR_ARCH_64 - uint16_t pad; - uint32_t aba_ctr; -#else -#ifdef GPR_ARCH_32 - uint16_t aba_ctr; -#else -#error Unsupported bit width architecture -#endif -#endif -}; - -/* Use a union to make sure that these are in the same bits as an atm word */ -typedef union lockfree_node { - gpr_atm atm; - struct lockfree_node_contents contents; -} lockfree_node; - -/* make sure that entries aligned to 8-bytes */ -#define ENTRY_ALIGNMENT_BITS 3 -/* reserve this entry as invalid */ -#define INVALID_ENTRY_INDEX ((1 << 16) - 1) - -struct gpr_stack_lockfree { - lockfree_node *entries; - lockfree_node head; /* An atomic entry describing curr head */ -}; - -gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries) { - gpr_stack_lockfree *stack; - stack = (gpr_stack_lockfree *)gpr_malloc(sizeof(*stack)); - /* Since we only allocate 16 bits to represent an entry number, - * make sure that we are within the desired range */ - /* Reserve the highest entry number as a dummy */ - GPR_ASSERT(entries < INVALID_ENTRY_INDEX); - stack->entries = (lockfree_node *)gpr_malloc_aligned( - entries * sizeof(stack->entries[0]), ENTRY_ALIGNMENT_BITS); - /* Clear out all entries */ - memset(stack->entries, 0, entries * sizeof(stack->entries[0])); - memset(&stack->head, 0, sizeof(stack->head)); - - GPR_ASSERT(sizeof(stack->entries->atm) == sizeof(stack->entries->contents)); - - /* Point the head at reserved dummy entry */ - stack->head.contents.index = INVALID_ENTRY_INDEX; -/* Fill in the pad and aba_ctr to avoid confusing memcheck tools */ -#ifdef GPR_ARCH_64 - stack->head.contents.pad = 0; -#endif - stack->head.contents.aba_ctr = 0; - return stack; -} - -void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack) { - gpr_free_aligned(stack->entries); - gpr_free(stack); -} - -int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) { - lockfree_node head; - lockfree_node newhead; - lockfree_node curent; - lockfree_node newent; - - /* First fill in the entry's index and aba ctr for new head */ - newhead.contents.index = (uint16_t)entry; -#ifdef GPR_ARCH_64 - /* Fill in the pad to avoid confusing memcheck tools */ - newhead.contents.pad = 0; -#endif - - /* Also post-increment the aba_ctr */ - curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm); - newhead.contents.aba_ctr = ++curent.contents.aba_ctr; - gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm); - - do { - /* Atomically get the existing head value for use */ - head.atm = gpr_atm_no_barrier_load(&(stack->head.atm)); - /* Point to it */ - newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm); - newent.contents.index = head.contents.index; - gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm); - } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm)); - /* Use rel_cas above to make sure that entry index is set properly */ - return head.contents.index == INVALID_ENTRY_INDEX; -} - -int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) { - lockfree_node head; - lockfree_node newhead; - - do { - head.atm = gpr_atm_acq_load(&(stack->head.atm)); - if (head.contents.index == INVALID_ENTRY_INDEX) { - return -1; - } - newhead.atm = - gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm)); - - } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm)); - - return head.contents.index; -} diff --git a/Sources/CgRPC/src/core/lib/support/stack_lockfree.h b/Sources/CgRPC/src/core/lib/support/stack_lockfree.h deleted file mode 100644 index 6324211b7..000000000 --- a/Sources/CgRPC/src/core/lib/support/stack_lockfree.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H -#define GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H - -#include - -typedef struct gpr_stack_lockfree gpr_stack_lockfree; - -/* This stack must specify the maximum number of entries to track. - The current implementation only allows up to 65534 entries */ -gpr_stack_lockfree *gpr_stack_lockfree_create(size_t entries); -void gpr_stack_lockfree_destroy(gpr_stack_lockfree *stack); - -/* Pass in a valid entry number for the next stack entry */ -/* Returns 1 if this is the first element on the stack, 0 otherwise */ -int gpr_stack_lockfree_push(gpr_stack_lockfree *, int entry); - -/* Returns -1 on empty or the actual entry number */ -int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack); - -#endif /* GRPC_CORE_LIB_SUPPORT_STACK_LOCKFREE_H */ diff --git a/Sources/CgRPC/src/core/lib/support/subprocess_posix.c b/Sources/CgRPC/src/core/lib/support/subprocess_posix.c deleted file mode 100644 index af75162ee..000000000 --- a/Sources/CgRPC/src/core/lib/support/subprocess_posix.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#ifdef GPR_POSIX_SUBPROCESS - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -struct gpr_subprocess { - int pid; - bool joined; -}; - -const char *gpr_subprocess_binary_extension() { return ""; } - -gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) { - gpr_subprocess *r; - int pid; - char **exec_args; - - pid = fork(); - if (pid == -1) { - return NULL; - } else if (pid == 0) { - exec_args = (char **)gpr_malloc(((size_t)argc + 1) * sizeof(char *)); - memcpy(exec_args, argv, (size_t)argc * sizeof(char *)); - exec_args[argc] = NULL; - execv(exec_args[0], exec_args); - /* if we reach here, an error has occurred */ - gpr_log(GPR_ERROR, "execv '%s' failed: %s", exec_args[0], strerror(errno)); - _exit(1); - return NULL; - } else { - r = (gpr_subprocess *)gpr_zalloc(sizeof(gpr_subprocess)); - r->pid = pid; - return r; - } -} - -void gpr_subprocess_destroy(gpr_subprocess *p) { - if (!p->joined) { - kill(p->pid, SIGKILL); - gpr_subprocess_join(p); - } - gpr_free(p); -} - -int gpr_subprocess_join(gpr_subprocess *p) { - int status; -retry: - if (waitpid(p->pid, &status, 0) == -1) { - if (errno == EINTR) { - goto retry; - } - gpr_log(GPR_ERROR, "waitpid failed for pid %d: %s", p->pid, - strerror(errno)); - return -1; - } - p->joined = true; - return status; -} - -void gpr_subprocess_interrupt(gpr_subprocess *p) { - if (!p->joined) { - kill(p->pid, SIGINT); - } -} - -#endif /* GPR_POSIX_SUBPROCESS */ diff --git a/Sources/CgRPC/src/core/lib/support/subprocess_windows.c b/Sources/CgRPC/src/core/lib/support/subprocess_windows.c deleted file mode 100644 index 7412f8d34..000000000 --- a/Sources/CgRPC/src/core/lib/support/subprocess_windows.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#ifdef GPR_WINDOWS_SUBPROCESS - -#include -#include -#include - -#include -#include -#include -#include "src/core/lib/support/string.h" -#include "src/core/lib/support/string_windows.h" - -struct gpr_subprocess { - PROCESS_INFORMATION pi; - int joined; - int interrupted; -}; - -const char *gpr_subprocess_binary_extension() { return ".exe"; } - -gpr_subprocess *gpr_subprocess_create(int argc, const char **argv) { - gpr_subprocess *r; - - STARTUPINFO si; - PROCESS_INFORMATION pi; - - char *args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL); - TCHAR *args_tchar; - - args_tchar = gpr_char_to_tchar(args); - gpr_free(args); - - memset(&si, 0, sizeof(si)); - si.cb = sizeof(si); - memset(&pi, 0, sizeof(pi)); - - if (!CreateProcess(NULL, args_tchar, NULL, NULL, FALSE, - CREATE_NEW_PROCESS_GROUP, NULL, NULL, &si, &pi)) { - gpr_free(args_tchar); - return NULL; - } - gpr_free(args_tchar); - - r = gpr_malloc(sizeof(gpr_subprocess)); - memset(r, 0, sizeof(*r)); - r->pi = pi; - return r; -} - -void gpr_subprocess_destroy(gpr_subprocess *p) { - if (p) { - if (!p->joined) { - gpr_subprocess_interrupt(p); - gpr_subprocess_join(p); - } - if (p->pi.hProcess) { - CloseHandle(p->pi.hProcess); - } - if (p->pi.hThread) { - CloseHandle(p->pi.hThread); - } - gpr_free(p); - } -} - -int gpr_subprocess_join(gpr_subprocess *p) { - DWORD dwExitCode; - if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) { - if (dwExitCode == STILL_ACTIVE) { - if (WaitForSingleObject(p->pi.hProcess, INFINITE) == WAIT_OBJECT_0) { - p->joined = 1; - goto getExitCode; - } - return -1; // failed to join - } else { - goto getExitCode; - } - } else { - return -1; // failed to get exit code - } - -getExitCode: - if (p->interrupted) { - return 0; - } - if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) { - return (int)dwExitCode; - } else { - return -1; // failed to get exit code - } -} - -void gpr_subprocess_interrupt(gpr_subprocess *p) { - DWORD dwExitCode; - if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) { - if (dwExitCode == STILL_ACTIVE) { - gpr_log(GPR_INFO, "sending ctrl-break"); - GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, p->pi.dwProcessId); - p->joined = 1; - p->interrupted = 1; - } - } - return; -} - -#endif /* GPR_WINDOWS_SUBPROCESS */ diff --git a/Sources/CgRPC/src/core/lib/support/thd.c b/Sources/CgRPC/src/core/lib/support/thd.c deleted file mode 100644 index ca62615d6..000000000 --- a/Sources/CgRPC/src/core/lib/support/thd.c +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Posix implementation for gpr threads. */ - -#include - -#include - -enum { GPR_THD_JOINABLE = 1 }; - -gpr_thd_options gpr_thd_options_default(void) { - gpr_thd_options options; - memset(&options, 0, sizeof(options)); - return options; -} - -void gpr_thd_options_set_detached(gpr_thd_options* options) { - options->flags &= ~GPR_THD_JOINABLE; -} - -void gpr_thd_options_set_joinable(gpr_thd_options* options) { - options->flags |= GPR_THD_JOINABLE; -} - -int gpr_thd_options_is_detached(const gpr_thd_options* options) { - if (!options) return 1; - return (options->flags & GPR_THD_JOINABLE) == 0; -} - -int gpr_thd_options_is_joinable(const gpr_thd_options* options) { - if (!options) return 0; - return (options->flags & GPR_THD_JOINABLE) == GPR_THD_JOINABLE; -} diff --git a/Sources/CgRPC/src/core/lib/support/thd_internal.h b/Sources/CgRPC/src/core/lib/support/thd_internal.h deleted file mode 100644 index 38bffc847..000000000 --- a/Sources/CgRPC/src/core/lib/support/thd_internal.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H -#define GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H - -#include - -/* Internal interfaces between modules within the gpr support library. */ -void gpr_thd_init(); - -/* Wait for all outstanding threads to finish, up to deadline */ -int gpr_await_threads(gpr_timespec deadline); - -#endif /* GRPC_CORE_LIB_SUPPORT_THD_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/lib/support/thd_posix.c b/Sources/CgRPC/src/core/lib/support/thd_posix.c deleted file mode 100644 index 219297c04..000000000 --- a/Sources/CgRPC/src/core/lib/support/thd_posix.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Posix implementation for gpr threads. */ - -#include - -#ifdef GPR_POSIX_SYNC - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "src/core/lib/support/fork.h" - -static gpr_mu g_mu; -static gpr_cv g_cv; -static int g_thread_count; -static int g_awaiting_threads; - -struct thd_arg { - void (*body)(void *arg); /* body of a thread */ - void *arg; /* argument to a thread */ -}; - -static void inc_thd_count(); -static void dec_thd_count(); - -/* Body of every thread started via gpr_thd_new. */ -static void *thread_body(void *v) { - struct thd_arg a = *(struct thd_arg *)v; - free(v); - (*a.body)(a.arg); - dec_thd_count(); - return NULL; -} - -int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, - const gpr_thd_options *options) { - int thread_started; - pthread_attr_t attr; - pthread_t p; - /* don't use gpr_malloc as we may cause an infinite recursion with - * the profiling code */ - struct thd_arg *a = (struct thd_arg *)malloc(sizeof(*a)); - GPR_ASSERT(a != NULL); - a->body = thd_body; - a->arg = arg; - inc_thd_count(); - - GPR_ASSERT(pthread_attr_init(&attr) == 0); - if (gpr_thd_options_is_detached(options)) { - GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == - 0); - } else { - GPR_ASSERT(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE) == - 0); - } - thread_started = (pthread_create(&p, &attr, &thread_body, a) == 0); - GPR_ASSERT(pthread_attr_destroy(&attr) == 0); - if (!thread_started) { - /* don't use gpr_free, as this was allocated using malloc (see above) */ - free(a); - dec_thd_count(); - } - *t = (gpr_thd_id)p; - return thread_started; -} - -gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)pthread_self(); } - -void gpr_thd_join(gpr_thd_id t) { pthread_join((pthread_t)t, NULL); } - -/***************************************** - * Only used when fork support is enabled - */ - -static void inc_thd_count() { - if (grpc_fork_support_enabled()) { - gpr_mu_lock(&g_mu); - g_thread_count++; - gpr_mu_unlock(&g_mu); - } -} - -static void dec_thd_count() { - if (grpc_fork_support_enabled()) { - gpr_mu_lock(&g_mu); - g_thread_count--; - if (g_awaiting_threads && g_thread_count == 0) { - gpr_cv_signal(&g_cv); - } - gpr_mu_unlock(&g_mu); - } -} - -void gpr_thd_init() { - gpr_mu_init(&g_mu); - gpr_cv_init(&g_cv); - g_thread_count = 0; - g_awaiting_threads = 0; -} - -int gpr_await_threads(gpr_timespec deadline) { - gpr_mu_lock(&g_mu); - g_awaiting_threads = 1; - int res = 0; - if (g_thread_count > 0) { - res = gpr_cv_wait(&g_cv, &g_mu, deadline); - } - g_awaiting_threads = 0; - gpr_mu_unlock(&g_mu); - return res == 0; -} - -#endif /* GPR_POSIX_SYNC */ diff --git a/Sources/CgRPC/src/core/lib/support/thd_windows.c b/Sources/CgRPC/src/core/lib/support/thd_windows.c deleted file mode 100644 index 4c6013bf3..000000000 --- a/Sources/CgRPC/src/core/lib/support/thd_windows.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Windows implementation for gpr threads. */ - -#include - -#ifdef GPR_WINDOWS - -#include -#include -#include -#include - -#if defined(_MSC_VER) -#define thread_local __declspec(thread) -#elif defined(__GNUC__) -#define thread_local __thread -#else -#error "Unknown compiler - please file a bug report" -#endif - -struct thd_info { - void (*body)(void *arg); /* body of a thread */ - void *arg; /* argument to a thread */ - HANDLE join_event; /* if joinable, the join event */ - int joinable; /* true if not detached */ -}; - -static thread_local struct thd_info *g_thd_info; - -/* Destroys a thread info */ -static void destroy_thread(struct thd_info *t) { - if (t->joinable) CloseHandle(t->join_event); - gpr_free(t); -} - -void gpr_thd_init(void) {} - -/* Body of every thread started via gpr_thd_new. */ -static DWORD WINAPI thread_body(void *v) { - g_thd_info = (struct thd_info *)v; - g_thd_info->body(g_thd_info->arg); - if (g_thd_info->joinable) { - BOOL ret = SetEvent(g_thd_info->join_event); - GPR_ASSERT(ret); - } else { - destroy_thread(g_thd_info); - } - return 0; -} - -int gpr_thd_new(gpr_thd_id *t, void (*thd_body)(void *arg), void *arg, - const gpr_thd_options *options) { - HANDLE handle; - struct thd_info *info = gpr_malloc(sizeof(*info)); - info->body = thd_body; - info->arg = arg; - *t = 0; - if (gpr_thd_options_is_joinable(options)) { - info->joinable = 1; - info->join_event = CreateEvent(NULL, FALSE, FALSE, NULL); - if (info->join_event == NULL) { - gpr_free(info); - return 0; - } - } else { - info->joinable = 0; - } - handle = CreateThread(NULL, 64 * 1024, thread_body, info, 0, NULL); - if (handle == NULL) { - destroy_thread(info); - } else { - *t = (gpr_thd_id)info; - CloseHandle(handle); - } - return handle != NULL; -} - -gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; } - -void gpr_thd_join(gpr_thd_id t) { - struct thd_info *info = (struct thd_info *)t; - DWORD ret = WaitForSingleObject(info->join_event, INFINITE); - GPR_ASSERT(ret == WAIT_OBJECT_0); - destroy_thread(info); -} - -#endif /* GPR_WINDOWS */ diff --git a/Sources/CgRPC/src/core/lib/surface/alarm.c b/Sources/CgRPC/src/core/lib/surface/alarm.c deleted file mode 100644 index 7712f560b..000000000 --- a/Sources/CgRPC/src/core/lib/surface/alarm.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -#include "src/core/lib/surface/alarm_internal.h" - -#include -#include -#include -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/surface/completion_queue.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_alarm_refcount = - GRPC_TRACER_INITIALIZER(false, "alarm_refcount"); -#endif - -struct grpc_alarm { - gpr_refcount refs; - grpc_timer alarm; - grpc_closure on_alarm; - grpc_cq_completion completion; - /** completion queue where events about this alarm will be posted */ - grpc_completion_queue *cq; - /** user supplied tag */ - void *tag; -}; - -static void alarm_ref(grpc_alarm *alarm) { gpr_ref(&alarm->refs); } - -static void alarm_unref(grpc_alarm *alarm) { - if (gpr_unref(&alarm->refs)) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - if (alarm->cq != NULL) { - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm"); - } - grpc_exec_ctx_finish(&exec_ctx); - gpr_free(alarm); - } -} - -#ifndef NDEBUG -static void alarm_ref_dbg(grpc_alarm *alarm, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&alarm->refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "Alarm:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", alarm, val, - val + 1, reason); - } - - alarm_ref(alarm); -} - -static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&alarm->refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "Alarm:%p Unref %" PRIdPTR " -> %" PRIdPTR " %s", alarm, val, - val - 1, reason); - } - - alarm_unref(alarm); -} -#endif - -static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg, - grpc_cq_completion *c) { - grpc_alarm *alarm = (grpc_alarm *)arg; - GRPC_ALARM_UNREF(alarm, "dequeue-end-op"); -} - -static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - grpc_alarm *alarm = (grpc_alarm *)arg; - - /* We are queuing an op on completion queue. This means, the alarm's structure - cannot be destroyed until the op is dequeued. Adding an extra ref - here and unref'ing when the op is dequeued will achieve this */ - GRPC_ALARM_REF(alarm, "queue-end-op"); - grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, error, alarm_end_completion, - (void *)alarm, &alarm->completion); -} - -grpc_alarm *grpc_alarm_create(void *reserved) { - grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm)); - -#ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) { - gpr_log(GPR_DEBUG, "Alarm:%p created (ref: 1)", alarm); - } -#endif - - gpr_ref_init(&alarm->refs, 1); - grpc_timer_init_unset(&alarm->alarm); - alarm->cq = NULL; - GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm, - grpc_schedule_on_exec_ctx); - return alarm; -} - -void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, - gpr_timespec deadline, void *tag, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - GRPC_CQ_INTERNAL_REF(cq, "alarm"); - alarm->cq = cq; - alarm->tag = tag; - - GPR_ASSERT(grpc_cq_begin_op(cq, tag)); - grpc_timer_init(&exec_ctx, &alarm->alarm, - gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), - &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); - grpc_exec_ctx_finish(&exec_ctx); -} - -void grpc_alarm_cancel(grpc_alarm *alarm, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_timer_cancel(&exec_ctx, &alarm->alarm); - grpc_exec_ctx_finish(&exec_ctx); -} - -void grpc_alarm_destroy(grpc_alarm *alarm, void *reserved) { - grpc_alarm_cancel(alarm, reserved); - GRPC_ALARM_UNREF(alarm, "alarm_destroy"); -} diff --git a/Sources/CgRPC/src/core/lib/surface/alarm_internal.h b/Sources/CgRPC/src/core/lib/surface/alarm_internal.h deleted file mode 100644 index 7f2126c5c..000000000 --- a/Sources/CgRPC/src/core/lib/surface/alarm_internal.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2015-2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_CORE_LIB_SURFACE_ALARM_INTERNAL_H -#define GRPC_CORE_LIB_SURFACE_ALARM_INTERNAL_H - -#include -#include "src/core/lib/debug/trace.h" - -#ifndef NDEBUG - -extern grpc_tracer_flag grpc_trace_alarm_refcount; - -#define GRPC_ALARM_REF(a, reason) alarm_ref_dbg(a, reason, __FILE__, __LINE__) -#define GRPC_ALARM_UNREF(a, reason) \ - alarm_unref_dbg(a, reason, __FILE__, __LINE__) - -#else /* !defined(NDEBUG) */ - -#define GRPC_ALARM_REF(a, reason) alarm_ref(a) -#define GRPC_ALARM_UNREF(a, reason) alarm_unref(a) - -#endif /* defined(NDEBUG) */ - -#endif /* GRPC_CORE_LIB_SURFACE_ALARM_INTERNAL_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/api_trace.c b/Sources/CgRPC/src/core/lib/surface/api_trace.cc similarity index 88% rename from Sources/CgRPC/src/core/lib/surface/api_trace.c rename to Sources/CgRPC/src/core/lib/surface/api_trace.cc index 56973303d..bab5a7910 100644 --- a/Sources/CgRPC/src/core/lib/surface/api_trace.c +++ b/Sources/CgRPC/src/core/lib/surface/api_trace.cc @@ -16,7 +16,9 @@ * */ -#include "src/core/lib/surface/api_trace.h" +#include + #include "src/core/lib/debug/trace.h" +#include "src/core/lib/surface/api_trace.h" -grpc_tracer_flag grpc_api_trace = GRPC_TRACER_INITIALIZER(false, "api"); +grpc_core::TraceFlag grpc_api_trace(false, "api"); diff --git a/Sources/CgRPC/src/core/lib/surface/api_trace.h b/Sources/CgRPC/src/core/lib/surface/api_trace.h index 849cbaaef..72ed83055 100644 --- a/Sources/CgRPC/src/core/lib/surface/api_trace.h +++ b/Sources/CgRPC/src/core/lib/surface/api_trace.h @@ -19,10 +19,12 @@ #ifndef GRPC_CORE_LIB_SURFACE_API_TRACE_H #define GRPC_CORE_LIB_SURFACE_API_TRACE_H +#include + #include #include "src/core/lib/debug/trace.h" -extern grpc_tracer_flag grpc_api_trace; +extern grpc_core::TraceFlag grpc_api_trace; /* Provide unwrapping macros because we're in C89 and variadic macros weren't introduced until C99... */ @@ -43,7 +45,7 @@ extern grpc_tracer_flag grpc_api_trace; /* Due to the limitations of C89's preprocessor, the arity of the var-arg list 'nargs' must be specified. */ #define GRPC_API_TRACE(fmt, nargs, args) \ - if (GRPC_TRACER_ON(grpc_api_trace)) { \ + if (grpc_api_trace.enabled()) { \ gpr_log(GPR_INFO, fmt GRPC_API_TRACE_UNWRAP##nargs args); \ } diff --git a/Sources/CgRPC/src/core/lib/surface/byte_buffer.c b/Sources/CgRPC/src/core/lib/surface/byte_buffer.cc similarity index 70% rename from Sources/CgRPC/src/core/lib/surface/byte_buffer.c rename to Sources/CgRPC/src/core/lib/surface/byte_buffer.cc index 7ed550ef8..6246796e4 100644 --- a/Sources/CgRPC/src/core/lib/surface/byte_buffer.c +++ b/Sources/CgRPC/src/core/lib/surface/byte_buffer.cc @@ -16,24 +16,27 @@ * */ +#include + #include #include #include +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" -grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices, +grpc_byte_buffer* grpc_raw_byte_buffer_create(grpc_slice* slices, size_t nslices) { return grpc_raw_compressed_byte_buffer_create(slices, nslices, GRPC_COMPRESS_NONE); } -grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create( - grpc_slice *slices, size_t nslices, +grpc_byte_buffer* grpc_raw_compressed_byte_buffer_create( + grpc_slice* slices, size_t nslices, grpc_compression_algorithm compression) { size_t i; - grpc_byte_buffer *bb = - (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer)); + grpc_byte_buffer* bb = + static_cast(gpr_malloc(sizeof(grpc_byte_buffer))); bb->type = GRPC_BB_RAW; bb->data.raw.compression = compression; grpc_slice_buffer_init(&bb->data.raw.slice_buffer); @@ -44,10 +47,10 @@ grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create( return bb; } -grpc_byte_buffer *grpc_raw_byte_buffer_from_reader( - grpc_byte_buffer_reader *reader) { - grpc_byte_buffer *bb = - (grpc_byte_buffer *)gpr_malloc(sizeof(grpc_byte_buffer)); +grpc_byte_buffer* grpc_raw_byte_buffer_from_reader( + grpc_byte_buffer_reader* reader) { + grpc_byte_buffer* bb = + static_cast(gpr_malloc(sizeof(grpc_byte_buffer))); grpc_slice slice; bb->type = GRPC_BB_RAW; bb->data.raw.compression = GRPC_COMPRESS_NONE; @@ -59,29 +62,28 @@ grpc_byte_buffer *grpc_raw_byte_buffer_from_reader( return bb; } -grpc_byte_buffer *grpc_byte_buffer_copy(grpc_byte_buffer *bb) { +grpc_byte_buffer* grpc_byte_buffer_copy(grpc_byte_buffer* bb) { switch (bb->type) { case GRPC_BB_RAW: return grpc_raw_compressed_byte_buffer_create( bb->data.raw.slice_buffer.slices, bb->data.raw.slice_buffer.count, bb->data.raw.compression); } - GPR_UNREACHABLE_CODE(return NULL); + GPR_UNREACHABLE_CODE(return nullptr); } -void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) { +void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) { if (!bb) return; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; switch (bb->type) { case GRPC_BB_RAW: - grpc_slice_buffer_destroy_internal(&exec_ctx, &bb->data.raw.slice_buffer); + grpc_slice_buffer_destroy_internal(&bb->data.raw.slice_buffer); break; } gpr_free(bb); - grpc_exec_ctx_finish(&exec_ctx); } -size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) { +size_t grpc_byte_buffer_length(grpc_byte_buffer* bb) { switch (bb->type) { case GRPC_BB_RAW: return bb->data.raw.slice_buffer.length; diff --git a/Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.c b/Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.cc similarity index 75% rename from Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.c rename to Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.cc index 87bd3239c..1debc98ea 100644 --- a/Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.c +++ b/Sources/CgRPC/src/core/lib/surface/byte_buffer_reader.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -27,9 +29,10 @@ #include #include "src/core/lib/compression/message_compress.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" -static int is_compressed(grpc_byte_buffer *buffer) { +static int is_compressed(grpc_byte_buffer* buffer) { switch (buffer->type) { case GRPC_BB_RAW: if (buffer->data.raw.compression == GRPC_COMPRESS_NONE) { @@ -40,19 +43,21 @@ static int is_compressed(grpc_byte_buffer *buffer) { return 1 /* GPR_TRUE */; } -int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, - grpc_byte_buffer *buffer) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader, + grpc_byte_buffer* buffer) { + grpc_core::ExecCtx exec_ctx; grpc_slice_buffer decompressed_slices_buffer; reader->buffer_in = buffer; switch (reader->buffer_in->type) { case GRPC_BB_RAW: grpc_slice_buffer_init(&decompressed_slices_buffer); if (is_compressed(reader->buffer_in)) { - if (grpc_msg_decompress(&exec_ctx, - reader->buffer_in->data.raw.compression, - &reader->buffer_in->data.raw.slice_buffer, - &decompressed_slices_buffer) == 0) { + if (grpc_msg_decompress( + + grpc_compression_algorithm_to_message_compression_algorithm( + reader->buffer_in->data.raw.compression), + &reader->buffer_in->data.raw.slice_buffer, + &decompressed_slices_buffer) == 0) { gpr_log(GPR_ERROR, "Unexpected error decompressing data for algorithm with enum " "value '%d'.", @@ -64,19 +69,18 @@ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader, grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices, decompressed_slices_buffer.count); } - grpc_slice_buffer_destroy_internal(&exec_ctx, - &decompressed_slices_buffer); + grpc_slice_buffer_destroy_internal(&decompressed_slices_buffer); } else { /* not compressed, use the input buffer as output */ reader->buffer_out = reader->buffer_in; } reader->current.index = 0; break; } - grpc_exec_ctx_finish(&exec_ctx); + return 1; } -void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) { +void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader) { switch (reader->buffer_in->type) { case GRPC_BB_RAW: /* keeping the same if-else structure as in the init function */ @@ -87,11 +91,11 @@ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) { } } -int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, - grpc_slice *slice) { +int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader, + grpc_slice* slice) { switch (reader->buffer_in->type) { case GRPC_BB_RAW: { - grpc_slice_buffer *slice_buffer; + grpc_slice_buffer* slice_buffer; slice_buffer = &reader->buffer_out->data.raw.slice_buffer; if (reader->current.index < slice_buffer->count) { *slice = grpc_slice_ref_internal( @@ -105,21 +109,21 @@ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader, return 0; } -grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader *reader) { +grpc_slice grpc_byte_buffer_reader_readall(grpc_byte_buffer_reader* reader) { grpc_slice in_slice; size_t bytes_read = 0; const size_t input_size = grpc_byte_buffer_length(reader->buffer_out); grpc_slice out_slice = GRPC_SLICE_MALLOC(input_size); - uint8_t *const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */ + uint8_t* const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */ - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) { const size_t slice_length = GRPC_SLICE_LENGTH(in_slice); memcpy(&(outbuf[bytes_read]), GRPC_SLICE_START_PTR(in_slice), slice_length); bytes_read += slice_length; - grpc_slice_unref_internal(&exec_ctx, in_slice); + grpc_slice_unref_internal(in_slice); GPR_ASSERT(bytes_read <= input_size); } - grpc_exec_ctx_finish(&exec_ctx); + return out_slice; } diff --git a/Sources/CgRPC/src/core/lib/surface/call.c b/Sources/CgRPC/src/core/lib/surface/call.cc similarity index 54% rename from Sources/CgRPC/src/core/lib/surface/call.c rename to Sources/CgRPC/src/core/lib/surface/call.cc index 03f47553a..da488034c 100644 --- a/Sources/CgRPC/src/core/lib/surface/call.c +++ b/Sources/CgRPC/src/core/lib/surface/call.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include #include @@ -28,25 +30,28 @@ #include #include #include -#include #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/compression/algorithm_metadata.h" #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/arena.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/gprpp/manual_constructor.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/arena.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" +#include "src/core/lib/surface/call_test_only.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/completion_queue.h" #include "src/core/lib/surface/validate_metadata.h" #include "src/core/lib/transport/error_utils.h" #include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/static_metadata.h" +#include "src/core/lib/transport/status_metadata.h" #include "src/core/lib/transport/transport.h" /** The maximum number of concurrent batches possible. @@ -85,7 +90,7 @@ typedef enum { typedef struct { bool is_set; - grpc_error *error; + grpc_error* error; } received_status; static gpr_atm pack_received_status(received_status r) { @@ -93,16 +98,17 @@ static gpr_atm pack_received_status(received_status r) { } static received_status unpack_received_status(gpr_atm atm) { - return (atm & 1) == 0 - ? (received_status){.is_set = false, .error = GRPC_ERROR_NONE} - : (received_status){.is_set = true, - .error = (grpc_error *)(atm & ~(gpr_atm)1)}; + if ((atm & 1) == 0) { + return {false, GRPC_ERROR_NONE}; + } else { + return {true, (grpc_error*)(atm & ~static_cast(1))}; + } } #define MAX_ERRORS_PER_BATCH 4 typedef struct batch_control { - grpc_call *call; + grpc_call* call; /* Share memory for cq_completion and notify_tag as they are never needed simultaneously. Each byte used in this data structure count as six bytes per call, so any savings we can make are worthwhile, @@ -118,7 +124,7 @@ typedef struct batch_control { \a is_closure is true, \a tag indicates a closure to be invoked; otherwise, \a tag indicates the tag to be used in the notification to be sent to the completion queue. */ - void *tag; + void* tag; bool is_closure; } notify_tag; } completion_data; @@ -126,7 +132,7 @@ typedef struct batch_control { grpc_closure finish_batch; gpr_refcount steps_to_complete; - grpc_error *errors[MAX_ERRORS_PER_BATCH]; + grpc_error* errors[MAX_ERRORS_PER_BATCH]; gpr_atm num_errors; grpc_transport_stream_op_batch op; @@ -134,16 +140,16 @@ typedef struct batch_control { typedef struct { gpr_mu child_list_mu; - grpc_call *first_child; + grpc_call* first_child; } parent_call; typedef struct { - grpc_call *parent; + grpc_call* parent; /** siblings: children of the same parent form a list, and this list is protected under parent->mu */ - grpc_call *sibling_next; - grpc_call *sibling_prev; + grpc_call* sibling_next; + grpc_call* sibling_prev; } child_call; #define RECV_NONE ((gpr_atm)0) @@ -151,14 +157,14 @@ typedef struct { struct grpc_call { gpr_refcount ext_ref; - gpr_arena *arena; + gpr_arena* arena; grpc_call_combiner call_combiner; - grpc_completion_queue *cq; + grpc_completion_queue* cq; grpc_polling_entity pollent; - grpc_channel *channel; + grpc_channel* channel; gpr_timespec start_time; /* parent_call* */ gpr_atm parent_call_atm; - child_call *child; + child_call* child; /* client or server call */ bool is_client; @@ -176,7 +182,7 @@ struct grpc_call { gpr_atm any_ops_sent_atm; gpr_atm received_final_op_atm; - batch_control *active_batches[MAX_CONCURRENT_BATCHES]; + batch_control* active_batches[MAX_CONCURRENT_BATCHES]; grpc_transport_stream_op_batch_payload stream_op_payload; /* first idx: is_receiving, second idx: is_trailing */ @@ -184,7 +190,7 @@ struct grpc_call { /* Buffered read metadata waiting to be returned to the application. Element 0 is initial metadata, element 1 is trailing metadata. */ - grpc_metadata_array *buffered_metadata[2]; + grpc_metadata_array* buffered_metadata[2]; grpc_metadata compression_md; @@ -199,7 +205,7 @@ struct grpc_call { grpc_call_final_info final_info; /* Compression algorithm for *incoming* data */ - grpc_compression_algorithm incoming_compression_algorithm; + grpc_message_compression_algorithm incoming_message_compression_algorithm; /* Stream compression algorithm for *incoming* data */ grpc_stream_compression_algorithm incoming_stream_compression_algorithm; /* Supported encodings (compression algorithms), a bitset */ @@ -214,12 +220,12 @@ struct grpc_call { server, it's trailing metadata */ grpc_linked_mdelem send_extra_metadata[MAX_SEND_EXTRA_METADATA_COUNT]; int send_extra_metadata_count; - gpr_timespec send_deadline; + grpc_millis send_deadline; - grpc_slice_buffer_stream sending_stream; + grpc_core::ManualConstructor sending_stream; - grpc_byte_stream *receiving_stream; - grpc_byte_buffer **receiving_buffer; + grpc_core::OrphanablePtr receiving_stream; + grpc_byte_buffer** receiving_buffer; grpc_slice receiving_slice; grpc_closure receiving_slice_ready; grpc_closure receiving_stream_ready; @@ -230,11 +236,12 @@ struct grpc_call { union { struct { - grpc_status_code *status; - grpc_slice *status_details; + grpc_status_code* status; + grpc_slice* status_details; + const char** error_string; } client; struct { - int *cancelled; + int* cancelled; } server; } final_op; @@ -257,84 +264,78 @@ struct grpc_call { gpr_atm recv_state; }; -grpc_tracer_flag grpc_call_error_trace = - GRPC_TRACER_INITIALIZER(false, "call_error"); -grpc_tracer_flag grpc_compression_trace = - GRPC_TRACER_INITIALIZER(false, "compression"); +grpc_core::TraceFlag grpc_call_error_trace(false, "call_error"); +grpc_core::TraceFlag grpc_compression_trace(false, "compression"); -#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack *)((call) + 1)) -#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call *)(call_stack)) - 1) +#define CALL_STACK_FROM_CALL(call) ((grpc_call_stack*)((call) + 1)) +#define CALL_FROM_CALL_STACK(call_stack) (((grpc_call*)(call_stack)) - 1) #define CALL_ELEM_FROM_CALL(call, idx) \ grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx) #define CALL_FROM_TOP_ELEM(top_elem) \ CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem)) -static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_transport_stream_op_batch *op, - grpc_closure *start_batch_closure); -static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_status_code status, - const char *description); -static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_error *error); -static void destroy_call(grpc_exec_ctx *exec_ctx, void *call_stack, - grpc_error *error); -static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error); -static void get_final_status(grpc_call *call, - void (*set_value)(grpc_status_code code, - void *user_data), - void *set_value_user_data, grpc_slice *details); -static void set_status_value_directly(grpc_status_code status, void *dest); -static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, - status_source source, grpc_error *error); -static void process_data_after_md(grpc_exec_ctx *exec_ctx, batch_control *bctl); -static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl); -static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl, - grpc_error *error, bool has_cancelled); - -static void add_init_error(grpc_error **composite, grpc_error *new_err) { +static void execute_batch(grpc_call* call, grpc_transport_stream_op_batch* op, + grpc_closure* start_batch_closure); +static void cancel_with_status(grpc_call* c, status_source source, + grpc_status_code status, + const char* description); +static void cancel_with_error(grpc_call* c, status_source source, + grpc_error* error); +static void destroy_call(void* call_stack, grpc_error* error); +static void receiving_slice_ready(void* bctlp, grpc_error* error); +static void get_final_status( + grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data), + void* set_value_user_data, grpc_slice* details, const char** error_string); +static void set_status_value_directly(grpc_status_code status, void* dest); +static void set_status_from_error(grpc_call* call, status_source source, + grpc_error* error); +static void process_data_after_md(batch_control* bctl); +static void post_batch_completion(batch_control* bctl); +static void add_batch_error(batch_control* bctl, grpc_error* error, + bool has_cancelled); + +static void add_init_error(grpc_error** composite, grpc_error* new_err) { if (new_err == GRPC_ERROR_NONE) return; if (*composite == GRPC_ERROR_NONE) *composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Call creation failed"); *composite = grpc_error_add_child(*composite, new_err); } -void *grpc_call_arena_alloc(grpc_call *call, size_t size) { +void* grpc_call_arena_alloc(grpc_call* call, size_t size) { return gpr_arena_alloc(call->arena, size); } -static parent_call *get_or_create_parent_call(grpc_call *call) { - parent_call *p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm); - if (p == NULL) { - p = (parent_call *)gpr_arena_alloc(call->arena, sizeof(*p)); +static parent_call* get_or_create_parent_call(grpc_call* call) { + parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm); + if (p == nullptr) { + p = static_cast(gpr_arena_alloc(call->arena, sizeof(*p))); gpr_mu_init(&p->child_list_mu); - if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) { + if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr, + (gpr_atm)p)) { gpr_mu_destroy(&p->child_list_mu); - p = (parent_call *)gpr_atm_acq_load(&call->parent_call_atm); + p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm); } } return p; } -static parent_call *get_parent_call(grpc_call *call) { - return (parent_call *)gpr_atm_acq_load(&call->parent_call_atm); +static parent_call* get_parent_call(grpc_call* call) { + return (parent_call*)gpr_atm_acq_load(&call->parent_call_atm); } -grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, - const grpc_call_create_args *args, - grpc_call **out_call) { +grpc_error* grpc_call_create(const grpc_call_create_args* args, + grpc_call** out_call) { + GPR_TIMER_SCOPE("grpc_call_create", 0); size_t i, j; - grpc_error *error = GRPC_ERROR_NONE; - grpc_channel_stack *channel_stack = + grpc_error* error = GRPC_ERROR_NONE; + grpc_channel_stack* channel_stack = grpc_channel_get_channel_stack(args->channel); - grpc_call *call; - GPR_TIMER_BEGIN("grpc_call_create", 0); + grpc_call* call; size_t initial_size = grpc_channel_get_call_size_estimate(args->channel); - GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size); - gpr_arena *arena = gpr_arena_create(initial_size); - call = (grpc_call *)gpr_arena_alloc( - arena, sizeof(grpc_call) + channel_stack->call_stack_size); + GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size); + gpr_arena* arena = gpr_arena_create(initial_size); + call = static_cast(gpr_arena_alloc( + arena, sizeof(grpc_call) + channel_stack->call_stack_size)); gpr_ref_init(&call->ext_ref, 1); call->arena = arena; grpc_call_combiner_init(&call->call_combiner); @@ -343,12 +344,12 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, call->cq = args->cq; call->start_time = gpr_now(GPR_CLOCK_MONOTONIC); /* Always support no compression */ - GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE); - call->is_client = args->server_transport_data == NULL; + GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_MESSAGE_COMPRESS_NONE); + call->is_client = args->server_transport_data == nullptr; if (call->is_client) { - GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx); + GRPC_STATS_INC_CLIENT_CALLS_CREATED(); } else { - GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx); + GRPC_STATS_INC_SERVER_CALLS_CREATED(); } call->stream_op_payload.context = call->context; grpc_slice path = grpc_empty_slice(); @@ -363,39 +364,32 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, GRPC_MDVALUE(args->add_initial_metadata[i])); } } - call->send_extra_metadata_count = (int)args->add_initial_metadata_count; + call->send_extra_metadata_count = + static_cast(args->add_initial_metadata_count); } else { GPR_ASSERT(args->add_initial_metadata_count == 0); call->send_extra_metadata_count = 0; } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { - call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); + call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE; } } - gpr_timespec send_deadline = - gpr_convert_clock_type(args->send_deadline, GPR_CLOCK_MONOTONIC); + grpc_millis send_deadline = args->send_deadline; bool immediately_cancel = false; - if (args->parent != NULL) { - child_call *cc = call->child = - (child_call *)gpr_arena_alloc(arena, sizeof(child_call)); + if (args->parent != nullptr) { + call->child = + static_cast(gpr_arena_alloc(arena, sizeof(child_call))); call->child->parent = args->parent; GRPC_CALL_INTERNAL_REF(args->parent, "child"); GPR_ASSERT(call->is_client); GPR_ASSERT(!args->parent->is_client); - parent_call *pc = get_or_create_parent_call(args->parent); - - gpr_mu_lock(&pc->child_list_mu); - if (args->propagation_mask & GRPC_PROPAGATE_DEADLINE) { - send_deadline = gpr_time_min( - gpr_convert_clock_type(send_deadline, - args->parent->send_deadline.clock_type), - args->parent->send_deadline); + send_deadline = GPR_MIN(send_deadline, args->parent->send_deadline); } /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with * GRPC_PROPAGATE_STATS_CONTEXT */ @@ -409,7 +403,7 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, } grpc_call_context_set(call, GRPC_CONTEXT_TRACING, args->parent->context[GRPC_CONTEXT_TRACING].value, - NULL); + nullptr); } else if (args->propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT) { add_init_error(&error, GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Census context propagation requested " @@ -421,8 +415,28 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, immediately_cancel = true; } } + } + + call->send_deadline = send_deadline; - if (pc->first_child == NULL) { + GRPC_CHANNEL_INTERNAL_REF(args->channel, "call"); + /* initial refcount dropped by grpc_call_unref */ + grpc_call_element_args call_args = {CALL_STACK_FROM_CALL(call), + args->server_transport_data, + call->context, + path, + call->start_time, + send_deadline, + call->arena, + &call->call_combiner}; + add_init_error(&error, grpc_call_stack_init(channel_stack, 1, destroy_call, + call, &call_args)); + // Publish this call to parent only after the call stack has been initialized. + if (args->parent != nullptr) { + child_call* cc = call->child; + parent_call* pc = get_or_create_parent_call(args->parent); + gpr_mu_lock(&pc->child_list_mu); + if (pc->first_child == nullptr) { pc->first_child = call; cc->sibling_next = cc->sibling_prev = call; } else { @@ -431,115 +445,91 @@ grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, cc->sibling_next->child->sibling_prev = cc->sibling_prev->child->sibling_next = call; } - gpr_mu_unlock(&pc->child_list_mu); } - - call->send_deadline = send_deadline; - - GRPC_CHANNEL_INTERNAL_REF(args->channel, "call"); - /* initial refcount dropped by grpc_call_unref */ - grpc_call_element_args call_args = { - .call_stack = CALL_STACK_FROM_CALL(call), - .server_transport_data = args->server_transport_data, - .context = call->context, - .path = path, - .start_time = call->start_time, - .deadline = send_deadline, - .arena = call->arena, - .call_combiner = &call->call_combiner}; - add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1, - destroy_call, call, &call_args)); if (error != GRPC_ERROR_NONE) { - cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_ERROR_REF(error)); + cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error)); } if (immediately_cancel) { - cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); + cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); } - if (args->cq != NULL) { - GPR_ASSERT( - args->pollset_set_alternative == NULL && - "Only one of 'cq' and 'pollset_set_alternative' should be non-NULL."); + if (args->cq != nullptr) { + GPR_ASSERT(args->pollset_set_alternative == nullptr && + "Only one of 'cq' and 'pollset_set_alternative' should be " + "non-nullptr."); GRPC_CQ_INTERNAL_REF(args->cq, "bind"); call->pollent = grpc_polling_entity_create_from_pollset(grpc_cq_pollset(args->cq)); } - if (args->pollset_set_alternative != NULL) { + if (args->pollset_set_alternative != nullptr) { call->pollent = grpc_polling_entity_create_from_pollset_set( args->pollset_set_alternative); } if (!grpc_polling_entity_is_empty(&call->pollent)) { - grpc_call_stack_set_pollset_or_pollset_set( - exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent); + grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call), + &call->pollent); } - grpc_slice_unref_internal(exec_ctx, path); + grpc_slice_unref_internal(path); - GPR_TIMER_END("grpc_call_create", 0); return error; } -void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_completion_queue *cq) { +void grpc_call_set_completion_queue(grpc_call* call, + grpc_completion_queue* cq) { GPR_ASSERT(cq); - if (grpc_polling_entity_pollset_set(&call->pollent) != NULL) { + if (grpc_polling_entity_pollset_set(&call->pollent) != nullptr) { gpr_log(GPR_ERROR, "A pollset_set is already registered for this call."); abort(); } call->cq = cq; GRPC_CQ_INTERNAL_REF(cq, "bind"); call->pollent = grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)); - grpc_call_stack_set_pollset_or_pollset_set( - exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent); + grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call), + &call->pollent); } #ifndef NDEBUG #define REF_REASON reason -#define REF_ARG , const char *reason +#define REF_ARG , const char* reason #else #define REF_REASON "" #define REF_ARG #endif -void grpc_call_internal_ref(grpc_call *c REF_ARG) { +void grpc_call_internal_ref(grpc_call* c REF_ARG) { GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON); } -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) { - GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON); +void grpc_call_internal_unref(grpc_call* c REF_ARG) { + GRPC_CALL_STACK_UNREF(CALL_STACK_FROM_CALL(c), REF_REASON); } -static void release_call(grpc_exec_ctx *exec_ctx, void *call, - grpc_error *error) { - grpc_call *c = (grpc_call *)call; - grpc_channel *channel = c->channel; +static void release_call(void* call, grpc_error* error) { + grpc_call* c = static_cast(call); + grpc_channel* channel = c->channel; grpc_call_combiner_destroy(&c->call_combiner); - gpr_free((char *)c->peer_string); + gpr_free((char*)c->peer_string); grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena)); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call"); + GRPC_CHANNEL_INTERNAL_UNREF(channel, "call"); } -static void set_status_value_directly(grpc_status_code status, void *dest); -static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, - grpc_error *error) { +static void set_status_value_directly(grpc_status_code status, void* dest); +static void destroy_call(void* call, grpc_error* error) { + GPR_TIMER_SCOPE("destroy_call", 0); size_t i; int ii; - grpc_call *c = (grpc_call *)call; - GPR_TIMER_BEGIN("destroy_call", 0); + grpc_call* c = static_cast(call); for (i = 0; i < 2; i++) { grpc_metadata_batch_destroy( - exec_ctx, &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]); + &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]); } - if (c->receiving_stream != NULL) { - grpc_byte_stream_destroy(exec_ctx, c->receiving_stream); - } - parent_call *pc = get_parent_call(c); - if (pc != NULL) { + c->receiving_stream.reset(); + parent_call* pc = get_parent_call(c); + if (pc != nullptr) { gpr_mu_destroy(&pc->child_list_mu); } for (ii = 0; ii < c->send_extra_metadata_count; ii++) { - GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md); + GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md); } for (i = 0; i < GRPC_CONTEXT_COUNT; i++) { if (c->context[i].destroy) { @@ -547,11 +537,11 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, } } if (c->cq) { - GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind"); + GRPC_CQ_INTERNAL_UNREF(c->cq, "bind"); } get_final_status(c, set_status_value_directly, &c->final_info.final_status, - NULL); + nullptr, c->final_info.error_string); c->final_info.stats.latency = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time); @@ -560,36 +550,36 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, unpack_received_status(gpr_atm_acq_load(&c->status[i])).error); } - grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info, + grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c), &c->final_info, GRPC_CLOSURE_INIT(&c->release_call, release_call, c, grpc_schedule_on_exec_ctx)); - GPR_TIMER_END("destroy_call", 0); } -void grpc_call_ref(grpc_call *c) { gpr_ref(&c->ext_ref); } +void grpc_call_ref(grpc_call* c) { gpr_ref(&c->ext_ref); } -void grpc_call_unref(grpc_call *c) { +void grpc_call_unref(grpc_call* c) { if (!gpr_unref(&c->ext_ref)) return; - child_call *cc = c->child; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + GPR_TIMER_SCOPE("grpc_call_unref", 0); + + child_call* cc = c->child; + grpc_core::ExecCtx exec_ctx; - GPR_TIMER_BEGIN("grpc_call_unref", 0); GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c)); if (cc) { - parent_call *pc = get_parent_call(cc->parent); + parent_call* pc = get_parent_call(cc->parent); gpr_mu_lock(&pc->child_list_mu); if (c == pc->first_child) { pc->first_child = cc->sibling_next; if (c == pc->first_child) { - pc->first_child = NULL; + pc->first_child = nullptr; } } cc->sibling_prev->child->sibling_next = cc->sibling_next; cc->sibling_next->child->sibling_prev = cc->sibling_prev; gpr_mu_unlock(&pc->child_list_mu); - GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child"); + GRPC_CALL_INTERNAL_UNREF(cc->parent, "child"); } GPR_ASSERT(!c->destroy_called); @@ -597,64 +587,59 @@ void grpc_call_unref(grpc_call *c) { bool cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) != 0 && gpr_atm_acq_load(&c->received_final_op_atm) == 0; if (cancel) { - cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); + cancel_with_error(c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); } else { // Unset the call combiner cancellation closure. This has the // effect of scheduling the previously set cancellation closure, if // any, so that it can release any internal references it may be // holding to the call stack. - grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner, NULL); + grpc_call_combiner_set_notify_on_cancel(&c->call_combiner, nullptr); } - GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); - GPR_TIMER_END("grpc_call_unref", 0); + GRPC_CALL_INTERNAL_UNREF(c, "destroy"); } -grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) { +grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) { GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved)); GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - cancel_with_error(&exec_ctx, call, STATUS_FROM_API_OVERRIDE, - GRPC_ERROR_CANCELLED); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); + return GRPC_CALL_OK; } // This is called via the call combiner to start sending a batch down // the filter stack. -static void execute_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *ignored) { - grpc_transport_stream_op_batch *batch = (grpc_transport_stream_op_batch *)arg; - grpc_call *call = (grpc_call *)batch->handler_private.extra_arg; - GPR_TIMER_BEGIN("execute_batch", 0); - grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0); +static void execute_batch_in_call_combiner(void* arg, grpc_error* ignored) { + GPR_TIMER_SCOPE("execute_batch_in_call_combiner", 0); + grpc_transport_stream_op_batch* batch = + static_cast(arg); + grpc_call* call = static_cast(batch->handler_private.extra_arg); + grpc_call_element* elem = CALL_ELEM_FROM_CALL(call, 0); GRPC_CALL_LOG_OP(GPR_INFO, elem, batch); - elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch); - GPR_TIMER_END("execute_batch", 0); + elem->filter->start_transport_stream_op_batch(elem, batch); } // start_batch_closure points to a caller-allocated closure to be used // for entering the call combiner. -static void execute_batch(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_transport_stream_op_batch *batch, - grpc_closure *start_batch_closure) { +static void execute_batch(grpc_call* call, + grpc_transport_stream_op_batch* batch, + grpc_closure* start_batch_closure) { batch->handler_private.extra_arg = call; GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch, grpc_schedule_on_exec_ctx); - GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure, + GRPC_CALL_COMBINER_START(&call->call_combiner, start_batch_closure, GRPC_ERROR_NONE, "executing batch"); } -char *grpc_call_get_peer(grpc_call *call) { - char *peer_string = (char *)gpr_atm_acq_load(&call->peer_string); - if (peer_string != NULL) return gpr_strdup(peer_string); +char* grpc_call_get_peer(grpc_call* call) { + char* peer_string = (char*)gpr_atm_acq_load(&call->peer_string); + if (peer_string != nullptr) return gpr_strdup(peer_string); peer_string = grpc_channel_get_target(call->channel); - if (peer_string != NULL) return peer_string; + if (peer_string != nullptr) return peer_string; return gpr_strdup("unknown"); } -grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { +grpc_call* grpc_call_from_top_element(grpc_call_element* elem) { return CALL_FROM_TOP_ELEM(elem); } @@ -662,61 +647,59 @@ grpc_call *grpc_call_from_top_element(grpc_call_element *elem) { * CANCELLATION */ -grpc_call_error grpc_call_cancel_with_status(grpc_call *c, +grpc_call_error grpc_call_cancel_with_status(grpc_call* c, grpc_status_code status, - const char *description, - void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + const char* description, + void* reserved) { + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE( "grpc_call_cancel_with_status(" "c=%p, status=%d, description=%s, reserved=%p)", 4, (c, (int)status, description, reserved)); - GPR_ASSERT(reserved == NULL); - cancel_with_status(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, status, - description); - grpc_exec_ctx_finish(&exec_ctx); + GPR_ASSERT(reserved == nullptr); + cancel_with_status(c, STATUS_FROM_API_OVERRIDE, status, description); + return GRPC_CALL_OK; } typedef struct { - grpc_call *call; + grpc_call* call; grpc_closure start_batch; grpc_closure finish_batch; } cancel_state; // The on_complete callback used when sending a cancel_stream batch down // the filter stack. Yields the call combiner when the batch is done. -static void done_termination(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - cancel_state *state = (cancel_state *)arg; - GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner, +static void done_termination(void* arg, grpc_error* error) { + cancel_state* state = static_cast(arg); + GRPC_CALL_COMBINER_STOP(&state->call->call_combiner, "on_complete for cancel_stream op"); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination"); + GRPC_CALL_INTERNAL_UNREF(state->call, "termination"); gpr_free(state); } -static void cancel_with_error(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_error *error) { +static void cancel_with_error(grpc_call* c, status_source source, + grpc_error* error) { GRPC_CALL_INTERNAL_REF(c, "termination"); // Inform the call combiner of the cancellation, so that it can cancel // any in-flight asynchronous actions that may be holding the call // combiner. This ensures that the cancel_stream batch can be sent // down the filter stack in a timely manner. - grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error)); - set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error)); - cancel_state *state = (cancel_state *)gpr_malloc(sizeof(*state)); + grpc_call_combiner_cancel(&c->call_combiner, GRPC_ERROR_REF(error)); + set_status_from_error(c, source, GRPC_ERROR_REF(error)); + cancel_state* state = static_cast(gpr_malloc(sizeof(*state))); state->call = c; GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state, grpc_schedule_on_exec_ctx); - grpc_transport_stream_op_batch *op = + grpc_transport_stream_op_batch* op = grpc_make_transport_stream_op(&state->finish_batch); op->cancel_stream = true; op->payload->cancel_stream.cancel_error = error; - execute_batch(exec_ctx, c, op, &state->start_batch); + execute_batch(c, op, &state->start_batch); } -static grpc_error *error_from_status(grpc_status_code status, - const char *description) { +static grpc_error* error_from_status(grpc_status_code status, + const char* description) { // copying 'description' is needed to ensure the grpc_call_cancel_with_status // guarantee that can be short-lived. return grpc_error_set_int( @@ -726,11 +709,10 @@ static grpc_error *error_from_status(grpc_status_code status, GRPC_ERROR_INT_GRPC_STATUS, status); } -static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, - status_source source, grpc_status_code status, - const char *description) { - cancel_with_error(exec_ctx, c, source, - error_from_status(status, description)); +static void cancel_with_status(grpc_call* c, status_source source, + grpc_status_code status, + const char* description) { + cancel_with_error(c, source, error_from_status(status, description)); } /******************************************************************************* @@ -738,37 +720,37 @@ static void cancel_with_status(grpc_exec_ctx *exec_ctx, grpc_call *c, */ static bool get_final_status_from( - grpc_call *call, grpc_error *error, bool allow_ok_status, - void (*set_value)(grpc_status_code code, void *user_data), - void *set_value_user_data, grpc_slice *details) { + grpc_call* call, grpc_error* error, bool allow_ok_status, + void (*set_value)(grpc_status_code code, void* user_data), + void* set_value_user_data, grpc_slice* details, const char** error_string) { grpc_status_code code; grpc_slice slice = grpc_empty_slice(); - grpc_error_get_status(error, call->send_deadline, &code, &slice, NULL); + grpc_error_get_status(error, call->send_deadline, &code, &slice, nullptr, + error_string); if (code == GRPC_STATUS_OK && !allow_ok_status) { return false; } set_value(code, set_value_user_data); - if (details != NULL) { + if (details != nullptr) { *details = grpc_slice_ref_internal(slice); } return true; } -static void get_final_status(grpc_call *call, - void (*set_value)(grpc_status_code code, - void *user_data), - void *set_value_user_data, grpc_slice *details) { +static void get_final_status( + grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data), + void* set_value_user_data, grpc_slice* details, const char** error_string) { int i; received_status status[STATUS_SOURCE_COUNT]; for (i = 0; i < STATUS_SOURCE_COUNT; i++) { status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i])); } - if (GRPC_TRACER_ON(grpc_call_error_trace)) { - gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR"); + if (grpc_call_error_trace.enabled()) { + gpr_log(GPR_INFO, "get_final_status %s", call->is_client ? "CLI" : "SVR"); for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set) { - gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error)); + gpr_log(GPR_INFO, " %d: %s", i, grpc_error_string(status[i].error)); } } } @@ -781,7 +763,8 @@ static void get_final_status(grpc_call *call, if (status[i].is_set && grpc_error_has_clear_grpc_status(status[i].error)) { if (get_final_status_from(call, status[i].error, allow_ok_status != 0, - set_value, set_value_user_data, details)) { + set_value, set_value_user_data, details, + error_string)) { return; } } @@ -790,7 +773,8 @@ static void get_final_status(grpc_call *call, for (i = 0; i < STATUS_SOURCE_COUNT; i++) { if (status[i].is_set) { if (get_final_status_from(call, status[i].error, allow_ok_status != 0, - set_value, set_value_user_data, details)) { + set_value, set_value_user_data, details, + error_string)) { return; } } @@ -804,13 +788,11 @@ static void get_final_status(grpc_call *call, } } -static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, - status_source source, grpc_error *error) { +static void set_status_from_error(grpc_call* call, status_source source, + grpc_error* error) { if (!gpr_atm_rel_cas(&call->status[source], - pack_received_status((received_status){ - .is_set = false, .error = GRPC_ERROR_NONE}), - pack_received_status((received_status){ - .is_set = true, .error = error}))) { + pack_received_status({false, GRPC_ERROR_NONE}), + pack_received_status({true, error}))) { GRPC_ERROR_UNREF(error); } } @@ -819,181 +801,135 @@ static void set_status_from_error(grpc_exec_ctx *exec_ctx, grpc_call *call, * COMPRESSION */ -static void set_incoming_compression_algorithm( - grpc_call *call, grpc_compression_algorithm algo) { - GPR_ASSERT(algo < GRPC_COMPRESS_ALGORITHMS_COUNT); - call->incoming_compression_algorithm = algo; +static void set_incoming_message_compression_algorithm( + grpc_call* call, grpc_message_compression_algorithm algo) { + GPR_ASSERT(algo < GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT); + call->incoming_message_compression_algorithm = algo; } static void set_incoming_stream_compression_algorithm( - grpc_call *call, grpc_stream_compression_algorithm algo) { + grpc_call* call, grpc_stream_compression_algorithm algo) { GPR_ASSERT(algo < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT); call->incoming_stream_compression_algorithm = algo; } grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm( - grpc_call *call) { - grpc_compression_algorithm algorithm; - algorithm = call->incoming_compression_algorithm; + grpc_call* call) { + grpc_compression_algorithm algorithm = GRPC_COMPRESS_NONE; + grpc_compression_algorithm_from_message_stream_compression_algorithm( + &algorithm, call->incoming_message_compression_algorithm, + call->incoming_stream_compression_algorithm); return algorithm; } static grpc_compression_algorithm compression_algorithm_for_level_locked( - grpc_call *call, grpc_compression_level level) { + grpc_call* call, grpc_compression_level level) { return grpc_compression_algorithm_for_level(level, call->encodings_accepted_by_peer); } -static grpc_stream_compression_algorithm -stream_compression_algorithm_for_level_locked( - grpc_call *call, grpc_stream_compression_level level) { - return grpc_stream_compression_algorithm_for_level( - level, call->stream_encodings_accepted_by_peer); -} - -uint32_t grpc_call_test_only_get_message_flags(grpc_call *call) { +uint32_t grpc_call_test_only_get_message_flags(grpc_call* call) { uint32_t flags; flags = call->test_only_last_message_flags; return flags; } -static void destroy_encodings_accepted_by_peer(void *p) { return; } +static void destroy_encodings_accepted_by_peer(void* p) { return; } -static void set_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, - grpc_call *call, grpc_mdelem mdel) { +static void set_encodings_accepted_by_peer(grpc_call* call, grpc_mdelem mdel, + uint32_t* encodings_accepted_by_peer, + bool stream_encoding) { size_t i; - grpc_compression_algorithm algorithm; + uint32_t algorithm; grpc_slice_buffer accept_encoding_parts; grpc_slice accept_encoding_slice; - void *accepted_user_data; + void* accepted_user_data; accepted_user_data = grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer); - if (accepted_user_data != NULL) { - call->encodings_accepted_by_peer = - (uint32_t)(((uintptr_t)accepted_user_data) - 1); + if (accepted_user_data != nullptr) { + *encodings_accepted_by_peer = + static_cast(((uintptr_t)accepted_user_data) - 1); return; } + *encodings_accepted_by_peer = 0; + accept_encoding_slice = GRPC_MDVALUE(mdel); grpc_slice_buffer_init(&accept_encoding_parts); - grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts); + grpc_slice_split_without_space(accept_encoding_slice, ",", + &accept_encoding_parts); - /* No need to zero call->encodings_accepted_by_peer: grpc_call_create already - * zeroes the whole grpc_call */ - /* Always support no compression */ - GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE); + GPR_BITSET(encodings_accepted_by_peer, GRPC_COMPRESS_NONE); for (i = 0; i < accept_encoding_parts.count; i++) { + int r; grpc_slice accept_encoding_entry_slice = accept_encoding_parts.slices[i]; - if (grpc_compression_algorithm_parse(accept_encoding_entry_slice, - &algorithm)) { - GPR_BITSET(&call->encodings_accepted_by_peer, algorithm); + if (!stream_encoding) { + r = grpc_message_compression_algorithm_parse( + accept_encoding_entry_slice, + reinterpret_cast(&algorithm)); } else { - char *accept_encoding_entry_str = - grpc_slice_to_c_string(accept_encoding_entry_slice); - gpr_log(GPR_ERROR, - "Invalid entry in accept encoding metadata: '%s'. Ignoring.", - accept_encoding_entry_str); - gpr_free(accept_encoding_entry_str); + r = grpc_stream_compression_algorithm_parse( + accept_encoding_entry_slice, + reinterpret_cast(&algorithm)); } - } - - grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts); - - grpc_mdelem_set_user_data( - mdel, destroy_encodings_accepted_by_peer, - (void *)(((uintptr_t)call->encodings_accepted_by_peer) + 1)); -} - -static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx *exec_ctx, - grpc_call *call, - grpc_mdelem mdel) { - size_t i; - grpc_stream_compression_algorithm algorithm; - grpc_slice_buffer accept_encoding_parts; - grpc_slice accept_encoding_slice; - void *accepted_user_data; - - accepted_user_data = - grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer); - if (accepted_user_data != NULL) { - call->stream_encodings_accepted_by_peer = - (uint32_t)(((uintptr_t)accepted_user_data) - 1); - return; - } - - accept_encoding_slice = GRPC_MDVALUE(mdel); - grpc_slice_buffer_init(&accept_encoding_parts); - grpc_slice_split(accept_encoding_slice, ",", &accept_encoding_parts); - - /* Always support no compression */ - GPR_BITSET(&call->stream_encodings_accepted_by_peer, - GRPC_STREAM_COMPRESS_NONE); - for (i = 0; i < accept_encoding_parts.count; i++) { - grpc_slice accept_encoding_entry_slice = accept_encoding_parts.slices[i]; - if (grpc_stream_compression_algorithm_parse(accept_encoding_entry_slice, - &algorithm)) { - GPR_BITSET(&call->stream_encodings_accepted_by_peer, algorithm); + if (r) { + GPR_BITSET(encodings_accepted_by_peer, algorithm); } else { - char *accept_encoding_entry_str = + char* accept_encoding_entry_str = grpc_slice_to_c_string(accept_encoding_entry_slice); - gpr_log(GPR_ERROR, - "Invalid entry in accept encoding metadata: '%s'. Ignoring.", + gpr_log(GPR_DEBUG, + "Unknown entry in accept encoding metadata: '%s'. Ignoring.", accept_encoding_entry_str); gpr_free(accept_encoding_entry_str); } } - grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts); + grpc_slice_buffer_destroy_internal(&accept_encoding_parts); grpc_mdelem_set_user_data( mdel, destroy_encodings_accepted_by_peer, - (void *)(((uintptr_t)call->stream_encodings_accepted_by_peer) + 1)); + (void*)((static_cast(*encodings_accepted_by_peer)) + 1)); } -uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call) { +uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call) { uint32_t encodings_accepted_by_peer; encodings_accepted_by_peer = call->encodings_accepted_by_peer; return encodings_accepted_by_peer; } -uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer( - grpc_call *call) { - uint32_t stream_encodings_accepted_by_peer; - stream_encodings_accepted_by_peer = call->stream_encodings_accepted_by_peer; - return stream_encodings_accepted_by_peer; -} - grpc_stream_compression_algorithm -grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call) { +grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call) { return call->incoming_stream_compression_algorithm; } -static grpc_linked_mdelem *linked_from_md(const grpc_metadata *md) { - return (grpc_linked_mdelem *)&md->internal_data; +static grpc_linked_mdelem* linked_from_md(const grpc_metadata* md) { + return (grpc_linked_mdelem*)&md->internal_data; } -static grpc_metadata *get_md_elem(grpc_metadata *metadata, - grpc_metadata *additional_metadata, int i, +static grpc_metadata* get_md_elem(grpc_metadata* metadata, + grpc_metadata* additional_metadata, int i, int count) { - grpc_metadata *res = + grpc_metadata* res = i < count ? &metadata[i] : &additional_metadata[i - count]; GPR_ASSERT(res); return res; } -static int prepare_application_metadata( - grpc_exec_ctx *exec_ctx, grpc_call *call, int count, - grpc_metadata *metadata, int is_trailing, int prepend_extra_metadata, - grpc_metadata *additional_metadata, int additional_metadata_count) { +static int prepare_application_metadata(grpc_call* call, int count, + grpc_metadata* metadata, + int is_trailing, + int prepend_extra_metadata, + grpc_metadata* additional_metadata, + int additional_metadata_count) { int total_count = count + additional_metadata_count; int i; - grpc_metadata_batch *batch = + grpc_metadata_batch* batch = &call->metadata_batch[0 /* is_receiving */][is_trailing]; for (i = 0; i < total_count; i++) { - const grpc_metadata *md = + const grpc_metadata* md = get_md_elem(metadata, additional_metadata, i, count); - grpc_linked_mdelem *l = linked_from_md(md); + grpc_linked_mdelem* l = linked_from_md(md); GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data)); if (!GRPC_LOG_IF_ERROR("validate_metadata", grpc_validate_header_key_is_legal(md->key))) { @@ -1004,14 +940,14 @@ static int prepare_application_metadata( grpc_validate_header_nonbin_value_is_legal(md->value))) { break; } - l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata *)md); + l->md = grpc_mdelem_from_grpc_metadata(const_cast(md)); } if (i != total_count) { for (int j = 0; j < i; j++) { - const grpc_metadata *md = + const grpc_metadata* md = get_md_elem(metadata, additional_metadata, j, count); - grpc_linked_mdelem *l = linked_from_md(md); - GRPC_MDELEM_UNREF(exec_ctx, l->md); + grpc_linked_mdelem* l = linked_from_md(md); + GRPC_MDELEM_UNREF(l->md); } return 0; } @@ -1022,16 +958,16 @@ static int prepare_application_metadata( for (i = 0; i < call->send_extra_metadata_count; i++) { GRPC_LOG_IF_ERROR("prepare_application_metadata", grpc_metadata_batch_link_tail( - exec_ctx, batch, &call->send_extra_metadata[i])); + batch, &call->send_extra_metadata[i])); } } } for (i = 0; i < total_count; i++) { - grpc_metadata *md = get_md_elem(metadata, additional_metadata, i, count); - grpc_linked_mdelem *l = linked_from_md(md); - grpc_error *error = grpc_metadata_batch_link_tail(exec_ctx, batch, l); + grpc_metadata* md = get_md_elem(metadata, additional_metadata, i, count); + grpc_linked_mdelem* l = linked_from_md(md); + grpc_error* error = grpc_metadata_batch_link_tail(batch, l); if (error != GRPC_ERROR_NONE) { - GRPC_MDELEM_UNREF(exec_ctx, l->md); + GRPC_MDELEM_UNREF(l->md); } GRPC_LOG_IF_ERROR("prepare_application_metadata", error); } @@ -1040,42 +976,18 @@ static int prepare_application_metadata( return 1; } -/* we offset status by a small amount when storing it into transport metadata - as metadata cannot store a 0 value (which is used as OK for grpc_status_codes - */ -#define STATUS_OFFSET 1 -static void destroy_status(void *ignored) {} - -static uint32_t decode_status(grpc_mdelem md) { - uint32_t status; - void *user_data; - if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) return 0; - if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) return 1; - if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) return 2; - user_data = grpc_mdelem_get_user_data(md, destroy_status); - if (user_data != NULL) { - status = ((uint32_t)(intptr_t)user_data) - STATUS_OFFSET; - } else { - if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) { - status = GRPC_STATUS_UNKNOWN; /* could not parse status code */ - } - grpc_mdelem_set_user_data(md, destroy_status, - (void *)(intptr_t)(status + STATUS_OFFSET)); - } - return status; -} - -static grpc_compression_algorithm decode_compression(grpc_mdelem md) { - grpc_compression_algorithm algorithm = - grpc_compression_algorithm_from_slice(GRPC_MDVALUE(md)); - if (algorithm == GRPC_COMPRESS_ALGORITHMS_COUNT) { - char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md)); +static grpc_message_compression_algorithm decode_message_compression( + grpc_mdelem md) { + grpc_message_compression_algorithm algorithm = + grpc_message_compression_algorithm_from_slice(GRPC_MDVALUE(md)); + if (algorithm == GRPC_MESSAGE_COMPRESS_ALGORITHMS_COUNT) { + char* md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, - "Invalid incoming compression algorithm: '%s'. Interpreting " - "incoming data as uncompressed.", + "Invalid incoming message compression algorithm: '%s'. " + "Interpreting incoming data as uncompressed.", md_c_str); gpr_free(md_c_str); - return GRPC_COMPRESS_NONE; + return GRPC_MESSAGE_COMPRESS_NONE; } return algorithm; } @@ -1085,7 +997,7 @@ static grpc_stream_compression_algorithm decode_stream_compression( grpc_stream_compression_algorithm algorithm = grpc_stream_compression_algorithm_from_slice(GRPC_MDVALUE(md)); if (algorithm == GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) { - char *md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md)); + char* md_c_str = grpc_slice_to_c_string(GRPC_MDVALUE(md)); gpr_log(GPR_ERROR, "Invalid incoming stream compression algorithm: '%s'. Interpreting " "incoming data as uncompressed.", @@ -1096,94 +1008,90 @@ static grpc_stream_compression_algorithm decode_stream_compression( return algorithm; } -static void publish_app_metadata(grpc_call *call, grpc_metadata_batch *b, +static void publish_app_metadata(grpc_call* call, grpc_metadata_batch* b, int is_trailing) { if (b->list.count == 0) return; - GPR_TIMER_BEGIN("publish_app_metadata", 0); - grpc_metadata_array *dest; - grpc_metadata *mdusr; + if (is_trailing && call->buffered_metadata[1] == nullptr) return; + GPR_TIMER_SCOPE("publish_app_metadata", 0); + grpc_metadata_array* dest; + grpc_metadata* mdusr; dest = call->buffered_metadata[is_trailing]; if (dest->count + b->list.count > dest->capacity) { dest->capacity = GPR_MAX(dest->capacity + b->list.count, dest->capacity * 3 / 2); - dest->metadata = (grpc_metadata *)gpr_realloc( - dest->metadata, sizeof(grpc_metadata) * dest->capacity); + dest->metadata = static_cast( + gpr_realloc(dest->metadata, sizeof(grpc_metadata) * dest->capacity)); } - for (grpc_linked_mdelem *l = b->list.head; l != NULL; l = l->next) { + for (grpc_linked_mdelem* l = b->list.head; l != nullptr; l = l->next) { mdusr = &dest->metadata[dest->count++]; /* we pass back borrowed slices that are valid whilst the call is valid */ mdusr->key = GRPC_MDKEY(l->md); mdusr->value = GRPC_MDVALUE(l->md); } - GPR_TIMER_END("publish_app_metadata", 0); } -static void recv_initial_filter(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_metadata_batch *b) { - if (b->idx.named.content_encoding != NULL) { - if (b->idx.named.grpc_encoding != NULL) { - gpr_log(GPR_ERROR, - "Received both content-encoding and grpc-encoding header. " - "Ignoring grpc-encoding."); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding); - } - GPR_TIMER_BEGIN("incoming_stream_compression_algorithm", 0); +static void recv_initial_filter(grpc_call* call, grpc_metadata_batch* b) { + if (b->idx.named.content_encoding != nullptr) { + GPR_TIMER_SCOPE("incoming_stream_compression_algorithm", 0); set_incoming_stream_compression_algorithm( call, decode_stream_compression(b->idx.named.content_encoding->md)); - GPR_TIMER_END("incoming_stream_compression_algorithm", 0); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_encoding); - } else if (b->idx.named.grpc_encoding != NULL) { - GPR_TIMER_BEGIN("incoming_compression_algorithm", 0); - set_incoming_compression_algorithm( - call, decode_compression(b->idx.named.grpc_encoding->md)); - GPR_TIMER_END("incoming_compression_algorithm", 0); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding); - } - if (b->idx.named.grpc_accept_encoding != NULL) { - GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0); - set_encodings_accepted_by_peer(exec_ctx, call, - b->idx.named.grpc_accept_encoding->md); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding); - GPR_TIMER_END("encodings_accepted_by_peer", 0); - } - if (b->idx.named.accept_encoding != NULL) { - GPR_TIMER_BEGIN("stream_encodings_accepted_by_peer", 0); - set_stream_encodings_accepted_by_peer(exec_ctx, call, - b->idx.named.accept_encoding->md); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.accept_encoding); - GPR_TIMER_END("stream_encodings_accepted_by_peer", 0); - } + grpc_metadata_batch_remove(b, b->idx.named.content_encoding); + } + if (b->idx.named.grpc_encoding != nullptr) { + GPR_TIMER_SCOPE("incoming_message_compression_algorithm", 0); + set_incoming_message_compression_algorithm( + call, decode_message_compression(b->idx.named.grpc_encoding->md)); + grpc_metadata_batch_remove(b, b->idx.named.grpc_encoding); + } + uint32_t message_encodings_accepted_by_peer = 1u; + uint32_t stream_encodings_accepted_by_peer = 1u; + if (b->idx.named.grpc_accept_encoding != nullptr) { + GPR_TIMER_SCOPE("encodings_accepted_by_peer", 0); + set_encodings_accepted_by_peer(call, b->idx.named.grpc_accept_encoding->md, + &message_encodings_accepted_by_peer, false); + grpc_metadata_batch_remove(b, b->idx.named.grpc_accept_encoding); + } + if (b->idx.named.accept_encoding != nullptr) { + GPR_TIMER_SCOPE("stream_encodings_accepted_by_peer", 0); + set_encodings_accepted_by_peer(call, b->idx.named.accept_encoding->md, + &stream_encodings_accepted_by_peer, true); + grpc_metadata_batch_remove(b, b->idx.named.accept_encoding); + } + call->encodings_accepted_by_peer = + grpc_compression_bitset_from_message_stream_compression_bitset( + message_encodings_accepted_by_peer, + stream_encodings_accepted_by_peer); publish_app_metadata(call, b, false); } -static void recv_trailing_filter(grpc_exec_ctx *exec_ctx, void *args, - grpc_metadata_batch *b) { - grpc_call *call = (grpc_call *)args; - if (b->idx.named.grpc_status != NULL) { - uint32_t status_code = decode_status(b->idx.named.grpc_status->md); - grpc_error *error = +static void recv_trailing_filter(void* args, grpc_metadata_batch* b) { + grpc_call* call = static_cast(args); + if (b->idx.named.grpc_status != nullptr) { + grpc_status_code status_code = + grpc_get_status_code_from_metadata(b->idx.named.grpc_status->md); + grpc_error* error = status_code == GRPC_STATUS_OK ? GRPC_ERROR_NONE : grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Error received from peer"), GRPC_ERROR_INT_GRPC_STATUS, - (intptr_t)status_code); - if (b->idx.named.grpc_message != NULL) { + static_cast(status_code)); + if (b->idx.named.grpc_message != nullptr) { error = grpc_error_set_str( error, GRPC_ERROR_STR_GRPC_MESSAGE, grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md))); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message); + grpc_metadata_batch_remove(b, b->idx.named.grpc_message); } else if (error != GRPC_ERROR_NONE) { error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE, grpc_empty_slice()); } - set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error); - grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status); + set_status_from_error(call, STATUS_FROM_WIRE, error); + grpc_metadata_batch_remove(b, b->idx.named.grpc_status); } publish_app_metadata(call, b, true); } -grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) { +grpc_call_stack* grpc_call_get_call_stack(grpc_call* call) { return CALL_STACK_FROM_CALL(call); } @@ -1191,12 +1099,12 @@ grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) { * BATCH API IMPLEMENTATION */ -static void set_status_value_directly(grpc_status_code status, void *dest) { - *(grpc_status_code *)dest = status; +static void set_status_value_directly(grpc_status_code status, void* dest) { + *static_cast(dest) = status; } -static void set_cancelled_value(grpc_status_code status, void *dest) { - *(int *)dest = (status != GRPC_STATUS_OK); +static void set_cancelled_value(grpc_status_code status, void* dest) { + *static_cast(dest) = (status != GRPC_STATUS_OK); } static bool are_write_flags_valid(uint32_t flags) { @@ -1236,18 +1144,18 @@ static int batch_slot_for_op(grpc_op_type type) { GPR_UNREACHABLE_CODE(return 123456789); } -static batch_control *allocate_batch_control(grpc_call *call, - const grpc_op *ops, +static batch_control* allocate_batch_control(grpc_call* call, + const grpc_op* ops, size_t num_ops) { int slot = batch_slot_for_op(ops[0].op); - batch_control **pslot = &call->active_batches[slot]; - if (*pslot == NULL) { - *pslot = - (batch_control *)gpr_arena_alloc(call->arena, sizeof(batch_control)); + batch_control** pslot = &call->active_batches[slot]; + if (*pslot == nullptr) { + *pslot = static_cast( + gpr_arena_alloc(call->arena, sizeof(batch_control))); } - batch_control *bctl = *pslot; - if (bctl->call != NULL) { - return NULL; + batch_control* bctl = *pslot; + if (bctl->call != nullptr) { + return nullptr; } memset(bctl, 0, sizeof(*bctl)); bctl->call = call; @@ -1255,44 +1163,43 @@ static batch_control *allocate_batch_control(grpc_call *call, return bctl; } -static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_cq_completion *storage) { - batch_control *bctl = (batch_control *)user_data; - grpc_call *call = bctl->call; - bctl->call = NULL; - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); +static void finish_batch_completion(void* user_data, + grpc_cq_completion* storage) { + batch_control* bctl = static_cast(user_data); + grpc_call* call = bctl->call; + bctl->call = nullptr; + GRPC_CALL_INTERNAL_UNREF(call, "completion"); } -static grpc_error *consolidate_batch_errors(batch_control *bctl) { - size_t n = (size_t)gpr_atm_acq_load(&bctl->num_errors); +static grpc_error* consolidate_batch_errors(batch_control* bctl) { + size_t n = static_cast(gpr_atm_acq_load(&bctl->num_errors)); if (n == 0) { return GRPC_ERROR_NONE; } else if (n == 1) { /* Skip creating a composite error in the case that only one error was logged */ - grpc_error *e = bctl->errors[0]; - bctl->errors[0] = NULL; + grpc_error* e = bctl->errors[0]; + bctl->errors[0] = nullptr; return e; } else { - grpc_error *error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( + grpc_error* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Call batch failed", bctl->errors, n); for (size_t i = 0; i < n; i++) { GRPC_ERROR_UNREF(bctl->errors[i]); - bctl->errors[i] = NULL; + bctl->errors[i] = nullptr; } return error; } } -static void post_batch_completion(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { - grpc_call *next_child_call; - grpc_call *call = bctl->call; - grpc_error *error = consolidate_batch_errors(bctl); +static void post_batch_completion(batch_control* bctl) { + grpc_call* next_child_call; + grpc_call* call = bctl->call; + grpc_error* error = consolidate_batch_errors(bctl); if (bctl->op.send_initial_metadata) { grpc_metadata_batch_destroy( - exec_ctx, + &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]); } if (bctl->op.send_message) { @@ -1300,29 +1207,29 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, } if (bctl->op.send_trailing_metadata) { grpc_metadata_batch_destroy( - exec_ctx, + &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]); } if (bctl->op.recv_trailing_metadata) { - grpc_metadata_batch *md = + grpc_metadata_batch* md = &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; - recv_trailing_filter(exec_ctx, call, md); + recv_trailing_filter(call, md); /* propagate cancellation to any interested children */ gpr_atm_rel_store(&call->received_final_op_atm, 1); - parent_call *pc = get_parent_call(call); - if (pc != NULL) { - grpc_call *child; + parent_call* pc = get_parent_call(call); + if (pc != nullptr) { + grpc_call* child; gpr_mu_lock(&pc->child_list_mu); child = pc->first_child; - if (child != NULL) { + if (child != nullptr) { do { next_child_call = child->child->sibling_next; if (child->cancellation_is_inherited) { GRPC_CALL_INTERNAL_REF(child, "propagate_cancel"); - cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE, + cancel_with_error(child, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, child, "propagate_cancel"); + GRPC_CALL_INTERNAL_UNREF(child, "propagate_cancel"); } child = next_child_call; } while (child != pc->first_child); @@ -1333,64 +1240,65 @@ static void post_batch_completion(grpc_exec_ctx *exec_ctx, if (call->is_client) { get_final_status(call, set_status_value_directly, call->final_op.client.status, - call->final_op.client.status_details); + call->final_op.client.status_details, + call->final_op.client.error_string); } else { get_final_status(call, set_cancelled_value, - call->final_op.server.cancelled, NULL); + call->final_op.server.cancelled, nullptr, nullptr); } GRPC_ERROR_UNREF(error); error = GRPC_ERROR_NONE; } + if (error != GRPC_ERROR_NONE && bctl->op.recv_message && + *call->receiving_buffer != nullptr) { + grpc_byte_buffer_destroy(*call->receiving_buffer); + *call->receiving_buffer = nullptr; + } if (bctl->completion_data.notify_tag.is_closure) { /* unrefs bctl->error */ - bctl->call = NULL; - GRPC_CLOSURE_RUN( - exec_ctx, (grpc_closure *)bctl->completion_data.notify_tag.tag, error); - GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); + bctl->call = nullptr; + GRPC_CLOSURE_RUN((grpc_closure*)bctl->completion_data.notify_tag.tag, + error); + GRPC_CALL_INTERNAL_UNREF(call, "completion"); } else { /* unrefs bctl->error */ - grpc_cq_end_op( - exec_ctx, bctl->call->cq, bctl->completion_data.notify_tag.tag, error, - finish_batch_completion, bctl, &bctl->completion_data.cq_completion); + grpc_cq_end_op(bctl->call->cq, bctl->completion_data.notify_tag.tag, error, + finish_batch_completion, bctl, + &bctl->completion_data.cq_completion); } } -static void finish_batch_step(grpc_exec_ctx *exec_ctx, batch_control *bctl) { +static void finish_batch_step(batch_control* bctl) { if (gpr_unref(&bctl->steps_to_complete)) { - post_batch_completion(exec_ctx, bctl); + post_batch_completion(bctl); } } -static void continue_receiving_slices(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { - grpc_error *error; - grpc_call *call = bctl->call; +static void continue_receiving_slices(batch_control* bctl) { + grpc_error* error; + grpc_call* call = bctl->call; for (;;) { - size_t remaining = call->receiving_stream->length - + size_t remaining = call->receiving_stream->length() - (*call->receiving_buffer)->data.raw.slice_buffer.length; if (remaining == 0) { call->receiving_message = 0; - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); - call->receiving_stream = NULL; - finish_batch_step(exec_ctx, bctl); + call->receiving_stream.reset(); + finish_batch_step(bctl); return; } - if (grpc_byte_stream_next(exec_ctx, call->receiving_stream, remaining, - &call->receiving_slice_ready)) { - error = grpc_byte_stream_pull(exec_ctx, call->receiving_stream, - &call->receiving_slice); + if (call->receiving_stream->Next(remaining, &call->receiving_slice_ready)) { + error = call->receiving_stream->Pull(&call->receiving_slice); if (error == GRPC_ERROR_NONE) { grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer, call->receiving_slice); } else { - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); - call->receiving_stream = NULL; + call->receiving_stream.reset(); grpc_byte_buffer_destroy(*call->receiving_buffer); - *call->receiving_buffer = NULL; + *call->receiving_buffer = nullptr; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); return; } } else { @@ -1399,228 +1307,193 @@ static void continue_receiving_slices(grpc_exec_ctx *exec_ctx, } } -static void receiving_slice_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { - batch_control *bctl = (batch_control *)bctlp; - grpc_call *call = bctl->call; - grpc_byte_stream *bs = call->receiving_stream; +static void receiving_slice_ready(void* bctlp, grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; bool release_error = false; if (error == GRPC_ERROR_NONE) { grpc_slice slice; - error = grpc_byte_stream_pull(exec_ctx, bs, &slice); + error = call->receiving_stream->Pull(&slice); if (error == GRPC_ERROR_NONE) { grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer, slice); - continue_receiving_slices(exec_ctx, bctl); + continue_receiving_slices(bctl); } else { - /* Error returned by grpc_byte_stream_pull needs to be released manually - */ + /* Error returned by ByteStream::Pull() needs to be released manually */ release_error = true; } } if (error != GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_trace_operation_failures)) { + if (grpc_trace_operation_failures.enabled()) { GRPC_LOG_IF_ERROR("receiving_slice_ready", GRPC_ERROR_REF(error)); } - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); - call->receiving_stream = NULL; + call->receiving_stream.reset(); grpc_byte_buffer_destroy(*call->receiving_buffer); - *call->receiving_buffer = NULL; + *call->receiving_buffer = nullptr; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); if (release_error) { GRPC_ERROR_UNREF(error); } } } -static void process_data_after_md(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { - grpc_call *call = bctl->call; - if (call->receiving_stream == NULL) { - *call->receiving_buffer = NULL; +static void process_data_after_md(batch_control* bctl) { + grpc_call* call = bctl->call; + if (call->receiving_stream == nullptr) { + *call->receiving_buffer = nullptr; call->receiving_message = 0; - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); } else { - call->test_only_last_message_flags = call->receiving_stream->flags; - if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) && - (call->incoming_compression_algorithm > GRPC_COMPRESS_NONE)) { - *call->receiving_buffer = grpc_raw_compressed_byte_buffer_create( - NULL, 0, call->incoming_compression_algorithm); + call->test_only_last_message_flags = call->receiving_stream->flags(); + if ((call->receiving_stream->flags() & GRPC_WRITE_INTERNAL_COMPRESS) && + (call->incoming_message_compression_algorithm > + GRPC_MESSAGE_COMPRESS_NONE)) { + grpc_compression_algorithm algo; + GPR_ASSERT( + grpc_compression_algorithm_from_message_stream_compression_algorithm( + &algo, call->incoming_message_compression_algorithm, + (grpc_stream_compression_algorithm)0)); + *call->receiving_buffer = + grpc_raw_compressed_byte_buffer_create(nullptr, 0, algo); } else { - *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0); + *call->receiving_buffer = grpc_raw_byte_buffer_create(nullptr, 0); } GRPC_CLOSURE_INIT(&call->receiving_slice_ready, receiving_slice_ready, bctl, grpc_schedule_on_exec_ctx); - continue_receiving_slices(exec_ctx, bctl); + continue_receiving_slices(bctl); } } -static void receiving_stream_ready(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { - batch_control *bctl = (batch_control *)bctlp; - grpc_call *call = bctl->call; +static void receiving_stream_ready(void* bctlp, grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; if (error != GRPC_ERROR_NONE) { - if (call->receiving_stream != NULL) { - grpc_byte_stream_destroy(exec_ctx, call->receiving_stream); - call->receiving_stream = NULL; - } - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), true); - cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_ERROR_REF(error)); + call->receiving_stream.reset(); + add_batch_error(bctl, GRPC_ERROR_REF(error), true); + cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error)); } /* If recv_state is RECV_NONE, we will save the batch_control * object with rel_cas, and will not use it after the cas. Its corresponding * acq_load is in receiving_initial_metadata_ready() */ - if (error != GRPC_ERROR_NONE || call->receiving_stream == NULL || + if (error != GRPC_ERROR_NONE || call->receiving_stream == nullptr || !gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) { - process_data_after_md(exec_ctx, bctl); + process_data_after_md(bctl); } } // The recv_message_ready callback used when sending a batch containing // a recv_message op down the filter stack. Yields the call combiner // before processing the received message. -static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx *exec_ctx, - void *bctlp, - grpc_error *error) { - batch_control *bctl = (batch_control *)bctlp; - grpc_call *call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready"); - receiving_stream_ready(exec_ctx, bctlp, error); -} - -static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, - batch_control *bctl) { - grpc_call *call = bctl->call; - /* validate compression algorithms */ +static void receiving_stream_ready_in_call_combiner(void* bctlp, + grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_message_ready"); + receiving_stream_ready(bctlp, error); +} + +static void validate_filtered_metadata(batch_control* bctl) { + grpc_compression_algorithm compression_algorithm; + grpc_call* call = bctl->call; if (call->incoming_stream_compression_algorithm != - GRPC_STREAM_COMPRESS_NONE) { - const grpc_stream_compression_algorithm algo = - call->incoming_stream_compression_algorithm; - char *error_msg = NULL; - const grpc_compression_options compression_options = - grpc_channel_compression_options(call->channel); - if (algo >= GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) { - gpr_asprintf(&error_msg, - "Invalid stream compression algorithm value '%d'.", algo); - gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); - } else if (grpc_compression_options_is_stream_compression_algorithm_enabled( - &compression_options, algo) == 0) { - /* check if algorithm is supported by current channel config */ - const char *algo_name = NULL; - grpc_stream_compression_algorithm_name(algo, &algo_name); - gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.", - algo_name); - gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); - } + GRPC_STREAM_COMPRESS_NONE && + call->incoming_message_compression_algorithm != + GRPC_MESSAGE_COMPRESS_NONE) { + char* error_msg = nullptr; + gpr_asprintf(&error_msg, + "Incoming stream has both stream compression (%d) and message " + "compression (%d).", + call->incoming_stream_compression_algorithm, + call->incoming_message_compression_algorithm); + gpr_log(GPR_ERROR, "%s", error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_INTERNAL, + error_msg); gpr_free(error_msg); - - GPR_ASSERT(call->stream_encodings_accepted_by_peer != 0); - if (!GPR_BITGET(call->stream_encodings_accepted_by_peer, - call->incoming_stream_compression_algorithm)) { - if (GRPC_TRACER_ON(grpc_compression_trace)) { - const char *algo_name = NULL; - grpc_stream_compression_algorithm_name( - call->incoming_stream_compression_algorithm, &algo_name); - gpr_log( - GPR_ERROR, - "Stream compression algorithm (content-encoding = '%s') not " - "present in the bitset of accepted encodings (accept-encodings: " - "'0x%x')", - algo_name, call->stream_encodings_accepted_by_peer); - } - } - } else if (call->incoming_compression_algorithm != GRPC_COMPRESS_NONE) { - const grpc_compression_algorithm algo = - call->incoming_compression_algorithm; - char *error_msg = NULL; + } else if ( + grpc_compression_algorithm_from_message_stream_compression_algorithm( + &compression_algorithm, call->incoming_message_compression_algorithm, + call->incoming_stream_compression_algorithm) == 0) { + char* error_msg = nullptr; + gpr_asprintf(&error_msg, + "Error in incoming message compression (%d) or stream " + "compression (%d).", + call->incoming_stream_compression_algorithm, + call->incoming_message_compression_algorithm); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_INTERNAL, + error_msg); + gpr_free(error_msg); + } else { + char* error_msg = nullptr; const grpc_compression_options compression_options = grpc_channel_compression_options(call->channel); - /* check if algorithm is known */ - if (algo >= GRPC_COMPRESS_ALGORITHMS_COUNT) { + if (compression_algorithm >= GRPC_COMPRESS_ALGORITHMS_COUNT) { gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.", - algo); + compression_algorithm); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } else if (grpc_compression_options_is_algorithm_enabled( - &compression_options, algo) == 0) { + &compression_options, compression_algorithm) == 0) { /* check if algorithm is supported by current channel config */ - const char *algo_name = NULL; - grpc_compression_algorithm_name(algo, &algo_name); + const char* algo_name = nullptr; + grpc_compression_algorithm_name(compression_algorithm, &algo_name); gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.", algo_name); gpr_log(GPR_ERROR, "%s", error_msg); - cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE, - GRPC_STATUS_UNIMPLEMENTED, error_msg); - } else { - call->incoming_compression_algorithm = algo; + cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED, + error_msg); } gpr_free(error_msg); GPR_ASSERT(call->encodings_accepted_by_peer != 0); - if (!GPR_BITGET(call->encodings_accepted_by_peer, - call->incoming_compression_algorithm)) { - if (GRPC_TRACER_ON(grpc_compression_trace)) { - const char *algo_name = NULL; - grpc_compression_algorithm_name(call->incoming_compression_algorithm, - &algo_name); + if (!GPR_BITGET(call->encodings_accepted_by_peer, compression_algorithm)) { + if (grpc_compression_trace.enabled()) { + const char* algo_name = nullptr; + grpc_compression_algorithm_name(compression_algorithm, &algo_name); gpr_log(GPR_ERROR, - "Compression algorithm (grpc-encoding = '%s') not present in " - "the bitset of accepted encodings (grpc-accept-encodings: " - "'0x%x')", + "Compression algorithm ('%s') not present in the bitset of " + "accepted encodings ('0x%x')", algo_name, call->encodings_accepted_by_peer); } } } } -static void add_batch_error(grpc_exec_ctx *exec_ctx, batch_control *bctl, - grpc_error *error, bool has_cancelled) { +static void add_batch_error(batch_control* bctl, grpc_error* error, + bool has_cancelled) { if (error == GRPC_ERROR_NONE) return; - int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1); + int idx = static_cast(gpr_atm_full_fetch_add(&bctl->num_errors, 1)); if (idx == 0 && !has_cancelled) { - cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE, - GRPC_ERROR_REF(error)); + cancel_with_error(bctl->call, STATUS_FROM_CORE, GRPC_ERROR_REF(error)); } bctl->errors[idx] = error; } -static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, - void *bctlp, grpc_error *error) { - batch_control *bctl = (batch_control *)bctlp; - grpc_call *call = bctl->call; +static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, - "recv_initial_metadata_ready"); + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_initial_metadata_ready"); - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); + add_batch_error(bctl, GRPC_ERROR_REF(error), false); if (error == GRPC_ERROR_NONE) { - grpc_metadata_batch *md = + grpc_metadata_batch* md = &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; - recv_initial_filter(exec_ctx, call, md); + recv_initial_filter(call, md); /* TODO(ctiller): this could be moved into recv_initial_filter now */ - GPR_TIMER_BEGIN("validate_filtered_metadata", 0); - validate_filtered_metadata(exec_ctx, bctl); - GPR_TIMER_END("validate_filtered_metadata", 0); - - if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) != - 0 && - !call->is_client) { - call->send_deadline = - gpr_convert_clock_type(md->deadline, GPR_CLOCK_MONOTONIC); + GPR_TIMER_SCOPE("validate_filtered_metadata", 0); + validate_filtered_metadata(bctl); + + if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) { + call->send_deadline = md->deadline; } } - grpc_closure *saved_rsr_closure = NULL; + grpc_closure* saved_rsr_closure = nullptr; while (true) { gpr_atm rsr_bctlp = gpr_atm_acq_load(&call->recv_state); /* Should only receive initial metadata once */ @@ -1637,70 +1510,68 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx, } } else { /* Already received messages */ - saved_rsr_closure = GRPC_CLOSURE_CREATE(receiving_stream_ready, - (batch_control *)rsr_bctlp, - grpc_schedule_on_exec_ctx); + saved_rsr_closure = + GRPC_CLOSURE_CREATE(receiving_stream_ready, (batch_control*)rsr_bctlp, + grpc_schedule_on_exec_ctx); /* No need to modify recv_state */ break; } } - if (saved_rsr_closure != NULL) { - GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error)); + if (saved_rsr_closure != nullptr) { + GRPC_CLOSURE_RUN(saved_rsr_closure, GRPC_ERROR_REF(error)); } - finish_batch_step(exec_ctx, bctl); + finish_batch_step(bctl); } -static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, - grpc_error *error) { - batch_control *bctl = (batch_control *)bctlp; - grpc_call *call = bctl->call; - GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete"); - add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false); - finish_batch_step(exec_ctx, bctl); +static void finish_batch(void* bctlp, grpc_error* error) { + batch_control* bctl = static_cast(bctlp); + grpc_call* call = bctl->call; + GRPC_CALL_COMBINER_STOP(&call->call_combiner, "on_complete"); + add_batch_error(bctl, GRPC_ERROR_REF(error), false); + finish_batch_step(bctl); } -static void free_no_op_completion(grpc_exec_ctx *exec_ctx, void *p, - grpc_cq_completion *completion) { +static void free_no_op_completion(void* p, grpc_cq_completion* completion) { gpr_free(completion); } -static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, - grpc_call *call, const grpc_op *ops, - size_t nops, void *notify_tag, +static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops, + size_t nops, void* notify_tag, int is_notify_tag_closure) { + GPR_TIMER_SCOPE("call_start_batch", 0); + size_t i; - const grpc_op *op; - batch_control *bctl; + const grpc_op* op; + batch_control* bctl; int num_completion_callbacks_needed = 1; grpc_call_error error = GRPC_CALL_OK; - grpc_transport_stream_op_batch *stream_op; - grpc_transport_stream_op_batch_payload *stream_op_payload; + grpc_transport_stream_op_batch* stream_op; + grpc_transport_stream_op_batch_payload* stream_op_payload; - GPR_TIMER_BEGIN("grpc_call_start_batch", 0); GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag); if (nops == 0) { if (!is_notify_tag_closure) { GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag)); - grpc_cq_end_op( - exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE, - free_no_op_completion, NULL, - (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion))); + grpc_cq_end_op(call->cq, notify_tag, GRPC_ERROR_NONE, + free_no_op_completion, nullptr, + static_cast( + gpr_malloc(sizeof(grpc_cq_completion)))); } else { - GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure *)notify_tag, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED((grpc_closure*)notify_tag, GRPC_ERROR_NONE); } error = GRPC_CALL_OK; goto done; } bctl = allocate_batch_control(call, ops, nops); - if (bctl == NULL) { + if (bctl == nullptr) { return GRPC_CALL_ERROR_TOO_MANY_OPERATIONS; } bctl->completion_data.notify_tag.tag = notify_tag; bctl->completion_data.notify_tag.is_closure = - (uint8_t)(is_notify_tag_closure != 0); + static_cast(is_notify_tag_closure != 0); stream_op = &bctl->op; stream_op_payload = &call->stream_op_payload; @@ -1708,7 +1579,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, /* rewrite batch ops into a transport op */ for (i = 0; i < nops; i++) { op = &ops[i]; - if (op->reserved != NULL) { + if (op->reserved != nullptr) { error = GRPC_CALL_ERROR; goto done_with_error; } @@ -1728,56 +1599,28 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, size_t additional_metadata_count = 0; grpc_compression_level effective_compression_level = GRPC_COMPRESS_LEVEL_NONE; - grpc_stream_compression_level effective_stream_compression_level = - GRPC_STREAM_COMPRESS_LEVEL_NONE; bool level_set = false; - bool stream_compression = false; - if (op->data.send_initial_metadata.maybe_stream_compression_level - .is_set) { - effective_stream_compression_level = - op->data.send_initial_metadata.maybe_stream_compression_level - .level; - level_set = true; - stream_compression = true; - } else if (op->data.send_initial_metadata.maybe_compression_level - .is_set) { + if (op->data.send_initial_metadata.maybe_compression_level.is_set) { effective_compression_level = op->data.send_initial_metadata.maybe_compression_level.level; level_set = true; } else { const grpc_compression_options copts = grpc_channel_compression_options(call->channel); - if (copts.default_stream_compression_level.is_set) { - level_set = true; - effective_stream_compression_level = - copts.default_stream_compression_level.level; - stream_compression = true; - } else if (copts.default_level.is_set) { + if (copts.default_level.is_set) { level_set = true; effective_compression_level = copts.default_level.level; } } if (level_set && !call->is_client) { - if (stream_compression) { - const grpc_stream_compression_algorithm calgo = - stream_compression_algorithm_for_level_locked( - call, effective_stream_compression_level); - call->compression_md.key = - GRPC_MDSTR_GRPC_INTERNAL_STREAM_ENCODING_REQUEST; - call->compression_md.value = - grpc_stream_compression_algorithm_slice(calgo); - } else { - const grpc_compression_algorithm calgo = - compression_algorithm_for_level_locked( - call, effective_compression_level); - /* the following will be picked up by the compress filter and used - * as the call's compression algorithm. */ - call->compression_md.key = - GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST; - call->compression_md.value = - grpc_compression_algorithm_slice(calgo); - additional_metadata_count++; - } + const grpc_compression_algorithm calgo = + compression_algorithm_for_level_locked( + call, effective_compression_level); + /* the following will be picked up by the compress filter and used + * as the call's compression algorithm. */ + call->compression_md.key = GRPC_MDSTR_GRPC_INTERNAL_ENCODING_REQUEST; + call->compression_md.value = grpc_compression_algorithm_slice(calgo); + additional_metadata_count++; } if (op->data.send_initial_metadata.count + additional_metadata_count > @@ -1788,9 +1631,10 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->send_initial_metadata = true; call->sent_initial_metadata = true; if (!prepare_application_metadata( - exec_ctx, call, (int)op->data.send_initial_metadata.count, + call, static_cast(op->data.send_initial_metadata.count), op->data.send_initial_metadata.metadata, 0, call->is_client, - &call->compression_md, (int)additional_metadata_count)) { + &call->compression_md, + static_cast(additional_metadata_count))) { error = GRPC_CALL_ERROR_INVALID_METADATA; goto done_with_error; } @@ -1813,7 +1657,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, error = GRPC_CALL_ERROR_INVALID_FLAGS; goto done_with_error; } - if (op->data.send_message.send_message == NULL) { + if (op->data.send_message.send_message == nullptr) { error = GRPC_CALL_ERROR_INVALID_MESSAGE; goto done_with_error; } @@ -1821,21 +1665,20 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS; goto done_with_error; } - stream_op->send_message = true; - call->sending_message = true; - grpc_slice_buffer_stream_init( - &call->sending_stream, - &op->data.send_message.send_message->data.raw.slice_buffer, - op->flags); + uint32_t flags = op->flags; /* If the outgoing buffer is already compressed, mark it as so in the flags. These will be picked up by the compression filter and further (wasteful) attempts at compression skipped. */ if (op->data.send_message.send_message->data.raw.compression > GRPC_COMPRESS_NONE) { - call->sending_stream.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS; + flags |= GRPC_WRITE_INTERNAL_COMPRESS; } - stream_op_payload->send_message.send_message = - &call->sending_stream.base; + stream_op->send_message = true; + call->sending_message = true; + call->sending_stream.Init( + &op->data.send_message.send_message->data.raw.slice_buffer, flags); + stream_op_payload->send_message.send_message.reset( + call->sending_stream.get()); break; } case GRPC_OP_SEND_CLOSE_FROM_CLIENT: { @@ -1882,36 +1725,37 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, GPR_ASSERT(call->send_extra_metadata_count == 0); call->send_extra_metadata_count = 1; call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem( - exec_ctx, call->channel, op->data.send_status_from_server.status); + call->channel, op->data.send_status_from_server.status); { - grpc_error *override_error = GRPC_ERROR_NONE; + grpc_error* override_error = GRPC_ERROR_NONE; if (op->data.send_status_from_server.status != GRPC_STATUS_OK) { - override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Error from server send status"); + override_error = + error_from_status(op->data.send_status_from_server.status, + "Returned non-ok status"); } - if (op->data.send_status_from_server.status_details != NULL) { + if (op->data.send_status_from_server.status_details != nullptr) { call->send_extra_metadata[1].md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + GRPC_MDSTR_GRPC_MESSAGE, grpc_slice_ref_internal( *op->data.send_status_from_server.status_details)); call->send_extra_metadata_count++; - char *msg = grpc_slice_to_c_string( + char* msg = grpc_slice_to_c_string( GRPC_MDVALUE(call->send_extra_metadata[1].md)); override_error = grpc_error_set_str(override_error, GRPC_ERROR_STR_GRPC_MESSAGE, grpc_slice_from_copied_string(msg)); gpr_free(msg); } - set_status_from_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE, - override_error); + set_status_from_error(call, STATUS_FROM_API_OVERRIDE, override_error); } if (!prepare_application_metadata( - exec_ctx, call, - (int)op->data.send_status_from_server.trailing_metadata_count, - op->data.send_status_from_server.trailing_metadata, 1, 1, NULL, - 0)) { + call, + static_cast( + op->data.send_status_from_server.trailing_metadata_count), + op->data.send_status_from_server.trailing_metadata, 1, 1, + nullptr, 0)) { for (int n = 0; n < call->send_extra_metadata_count; n++) { - GRPC_MDELEM_UNREF(exec_ctx, call->send_extra_metadata[n].md); + GRPC_MDELEM_UNREF(call->send_extra_metadata[n].md); } call->send_extra_metadata_count = 0; error = GRPC_CALL_ERROR_INVALID_METADATA; @@ -1991,6 +1835,8 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, call->final_op.client.status = op->data.recv_status_on_client.status; call->final_op.client.status_details = op->data.recv_status_on_client.status_details; + call->final_op.client.error_string = + op->data.recv_status_on_client.error_string; stream_op->recv_trailing_metadata = true; stream_op->collect_stats = true; stream_op_payload->recv_trailing_metadata.recv_trailing_metadata = @@ -2038,25 +1884,24 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, stream_op->on_complete = &bctl->finish_batch; gpr_atm_rel_store(&call->any_ops_sent_atm, 1); - execute_batch(exec_ctx, call, stream_op, &bctl->start_batch); + execute_batch(call, stream_op, &bctl->start_batch); done: - GPR_TIMER_END("grpc_call_start_batch", 0); return error; done_with_error: /* reverse any mutations that occured */ if (stream_op->send_initial_metadata) { call->sent_initial_metadata = false; - grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][0]); + grpc_metadata_batch_clear(&call->metadata_batch[0][0]); } if (stream_op->send_message) { call->sending_message = false; - grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base); + call->sending_stream->Orphan(); } if (stream_op->send_trailing_metadata) { call->sent_final_op = false; - grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][1]); + grpc_metadata_batch_clear(&call->metadata_batch[0][1]); } if (stream_op->recv_initial_metadata) { call->received_initial_metadata = false; @@ -2070,9 +1915,9 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx, goto done; } -grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, - size_t nops, void *tag, void *reserved) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops, + size_t nops, void* tag, void* reserved) { + grpc_core::ExecCtx exec_ctx; grpc_call_error err; GRPC_API_TRACE( @@ -2080,26 +1925,24 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops, "reserved=%p)", 5, (call, ops, (unsigned long)nops, tag, reserved)); - if (reserved != NULL) { + if (reserved != nullptr) { err = GRPC_CALL_ERROR; } else { - err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0); + err = call_start_batch(call, ops, nops, tag, 0); } - grpc_exec_ctx_finish(&exec_ctx); return err; } -grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx, - grpc_call *call, - const grpc_op *ops, +grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call, + const grpc_op* ops, size_t nops, - grpc_closure *closure) { - return call_start_batch(exec_ctx, call, ops, nops, closure, 1); + grpc_closure* closure) { + return call_start_batch(call, ops, nops, closure, 1); } -void grpc_call_context_set(grpc_call *call, grpc_context_index elem, - void *value, void (*destroy)(void *value)) { +void grpc_call_context_set(grpc_call* call, grpc_context_index elem, + void* value, void (*destroy)(void* value)) { if (call->context[elem].destroy) { call->context[elem].destroy(call->context[elem].value); } @@ -2107,20 +1950,20 @@ void grpc_call_context_set(grpc_call *call, grpc_context_index elem, call->context[elem].destroy = destroy; } -void *grpc_call_context_get(grpc_call *call, grpc_context_index elem) { +void* grpc_call_context_get(grpc_call* call, grpc_context_index elem) { return call->context[elem].value; } -uint8_t grpc_call_is_client(grpc_call *call) { return call->is_client; } +uint8_t grpc_call_is_client(grpc_call* call) { return call->is_client; } grpc_compression_algorithm grpc_call_compression_for_level( - grpc_call *call, grpc_compression_level level) { + grpc_call* call, grpc_compression_level level) { grpc_compression_algorithm algo = compression_algorithm_for_level_locked(call, level); return algo; } -const char *grpc_call_error_to_string(grpc_call_error error) { +const char* grpc_call_error_to_string(grpc_call_error error) { switch (error) { case GRPC_CALL_ERROR: return "GRPC_CALL_ERROR"; diff --git a/Sources/CgRPC/src/core/lib/surface/call.h b/Sources/CgRPC/src/core/lib/surface/call.h index c680139cf..793cce4ef 100644 --- a/Sources/CgRPC/src/core/lib/surface/call.h +++ b/Sources/CgRPC/src/core/lib/surface/call.h @@ -19,9 +19,7 @@ #ifndef GRPC_CORE_LIB_SURFACE_CALL_H #define GRPC_CORE_LIB_SURFACE_CALL_H -#ifdef __cplusplus -extern "C" { -#endif +#include #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/context.h" @@ -30,93 +28,82 @@ extern "C" { #include #include -typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx *exec_ctx, - grpc_call *call, int success, - void *user_data); +typedef void (*grpc_ioreq_completion_func)(grpc_call* call, int success, + void* user_data); typedef struct grpc_call_create_args { - grpc_channel *channel; + grpc_channel* channel; - grpc_call *parent; + grpc_call* parent; uint32_t propagation_mask; - grpc_completion_queue *cq; + grpc_completion_queue* cq; /* if not NULL, it'll be used in lieu of cq */ - grpc_pollset_set *pollset_set_alternative; + grpc_pollset_set* pollset_set_alternative; - const void *server_transport_data; + const void* server_transport_data; - grpc_mdelem *add_initial_metadata; + grpc_mdelem* add_initial_metadata; size_t add_initial_metadata_count; - gpr_timespec send_deadline; + grpc_millis send_deadline; } grpc_call_create_args; /* Create a new call based on \a args. Regardless of success or failure, always returns a valid new call into *call */ -grpc_error *grpc_call_create(grpc_exec_ctx *exec_ctx, - const grpc_call_create_args *args, - grpc_call **call); +grpc_error* grpc_call_create(const grpc_call_create_args* args, + grpc_call** call); -void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, - grpc_completion_queue *cq); +void grpc_call_set_completion_queue(grpc_call* call, grpc_completion_queue* cq); #ifndef NDEBUG -void grpc_call_internal_ref(grpc_call *call, const char *reason); -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call, - const char *reason); +void grpc_call_internal_ref(grpc_call* call, const char* reason); +void grpc_call_internal_unref(grpc_call* call, const char* reason); #define GRPC_CALL_INTERNAL_REF(call, reason) \ grpc_call_internal_ref(call, reason) -#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \ - grpc_call_internal_unref(exec_ctx, call, reason) +#define GRPC_CALL_INTERNAL_UNREF(call, reason) \ + grpc_call_internal_unref(call, reason) #else -void grpc_call_internal_ref(grpc_call *call); -void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *call); +void grpc_call_internal_ref(grpc_call* call); +void grpc_call_internal_unref(grpc_call* call); #define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call) -#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \ - grpc_call_internal_unref(exec_ctx, call) +#define GRPC_CALL_INTERNAL_UNREF(call, reason) grpc_call_internal_unref(call) #endif -grpc_call_stack *grpc_call_get_call_stack(grpc_call *call); +grpc_call_stack* grpc_call_get_call_stack(grpc_call* call); -grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx *exec_ctx, - grpc_call *call, - const grpc_op *ops, +grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call, + const grpc_op* ops, size_t nops, - grpc_closure *closure); + grpc_closure* closure); /* Given the top call_element, get the call object. */ -grpc_call *grpc_call_from_top_element(grpc_call_element *surface_element); +grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element); -void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity, - grpc_call *call, const grpc_op *ops, size_t nops, - void *tag); +void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity, + grpc_call* call, const grpc_op* ops, size_t nops, + void* tag); /* Set a context pointer. No thread safety guarantees are made wrt this value. */ /* TODO(#9731): add exec_ctx to destroy */ -void grpc_call_context_set(grpc_call *call, grpc_context_index elem, - void *value, void (*destroy)(void *value)); +void grpc_call_context_set(grpc_call* call, grpc_context_index elem, + void* value, void (*destroy)(void* value)); /* Get a context pointer. */ -void *grpc_call_context_get(grpc_call *call, grpc_context_index elem); +void* grpc_call_context_get(grpc_call* call, grpc_context_index elem); #define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \ - if (GRPC_TRACER_ON(grpc_api_trace)) \ - grpc_call_log_batch(sev, call, ops, nops, tag) + if (grpc_api_trace.enabled()) grpc_call_log_batch(sev, call, ops, nops, tag) -uint8_t grpc_call_is_client(grpc_call *call); +uint8_t grpc_call_is_client(grpc_call* call); /* Return an appropriate compression algorithm for the requested compression \a * level in the context of \a call. */ grpc_compression_algorithm grpc_call_compression_for_level( - grpc_call *call, grpc_compression_level level); - -extern grpc_tracer_flag grpc_call_error_trace; -extern grpc_tracer_flag grpc_compression_trace; + grpc_call* call, grpc_compression_level level); -#ifdef __cplusplus -} -#endif +extern grpc_core::TraceFlag grpc_call_error_trace; +extern grpc_core::TraceFlag grpc_compression_trace; #endif /* GRPC_CORE_LIB_SURFACE_CALL_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/call_details.c b/Sources/CgRPC/src/core/lib/surface/call_details.cc similarity index 86% rename from Sources/CgRPC/src/core/lib/surface/call_details.c rename to Sources/CgRPC/src/core/lib/surface/call_details.cc index ea9208c7e..7f20b1dae 100644 --- a/Sources/CgRPC/src/core/lib/surface/call_details.c +++ b/Sources/CgRPC/src/core/lib/surface/call_details.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -34,8 +36,7 @@ void grpc_call_details_init(grpc_call_details* cd) { void grpc_call_details_destroy(grpc_call_details* cd) { GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_slice_unref_internal(&exec_ctx, cd->method); - grpc_slice_unref_internal(&exec_ctx, cd->host); - grpc_exec_ctx_finish(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + grpc_slice_unref_internal(cd->method); + grpc_slice_unref_internal(cd->host); } diff --git a/Sources/CgRPC/src/core/lib/surface/call_log_batch.c b/Sources/CgRPC/src/core/lib/surface/call_log_batch.cc similarity index 86% rename from Sources/CgRPC/src/core/lib/surface/call_log_batch.c rename to Sources/CgRPC/src/core/lib/surface/call_log_batch.cc index 4a1c26581..f0c82c035 100644 --- a/Sources/CgRPC/src/core/lib/surface/call_log_batch.c +++ b/Sources/CgRPC/src/core/lib/surface/call_log_batch.cc @@ -16,16 +16,20 @@ * */ +#include + #include "src/core/lib/surface/call.h" +#include + #include #include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" -static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) { +static void add_metadata(gpr_strvec* b, const grpc_metadata* md, size_t count) { size_t i; - if (md == NULL) { + if (md == nullptr) { gpr_strvec_add(b, gpr_strdup("(nil)")); return; } @@ -39,9 +43,9 @@ static void add_metadata(gpr_strvec *b, const grpc_metadata *md, size_t count) { } } -char *grpc_op_string(const grpc_op *op) { - char *tmp; - char *out; +char* grpc_op_string(const grpc_op* op) { + char* tmp; + char* out; gpr_strvec b; gpr_strvec_init(&b); @@ -64,7 +68,7 @@ char *grpc_op_string(const grpc_op *op) { gpr_asprintf(&tmp, "SEND_STATUS_FROM_SERVER status=%d details=", op->data.send_status_from_server.status); gpr_strvec_add(&b, tmp); - if (op->data.send_status_from_server.status_details != NULL) { + if (op->data.send_status_from_server.status_details != nullptr) { gpr_strvec_add(&b, grpc_dump_slice( *op->data.send_status_from_server.status_details, GPR_DUMP_ASCII)); @@ -97,16 +101,16 @@ char *grpc_op_string(const grpc_op *op) { op->data.recv_close_on_server.cancelled); gpr_strvec_add(&b, tmp); } - out = gpr_strvec_flatten(&b, NULL); + out = gpr_strvec_flatten(&b, nullptr); gpr_strvec_destroy(&b); return out; } -void grpc_call_log_batch(const char *file, int line, gpr_log_severity severity, - grpc_call *call, const grpc_op *ops, size_t nops, - void *tag) { - char *tmp; +void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity, + grpc_call* call, const grpc_op* ops, size_t nops, + void* tag) { + char* tmp; size_t i; for (i = 0; i < nops; i++) { tmp = grpc_op_string(&ops[i]); diff --git a/Sources/CgRPC/src/core/lib/surface/call_test_only.h b/Sources/CgRPC/src/core/lib/surface/call_test_only.h index a5a01b367..dbd1a866c 100644 --- a/Sources/CgRPC/src/core/lib/surface/call_test_only.h +++ b/Sources/CgRPC/src/core/lib/surface/call_test_only.h @@ -19,43 +19,25 @@ #ifndef GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H #define GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H -#include +#include -#ifdef __cplusplus -extern "C" { -#endif +#include -/** Return the compression algorithm from \a call. +/** Return the message compression algorithm from \a call. * * \warning This function should \b only be used in test code. */ grpc_compression_algorithm grpc_call_test_only_get_compression_algorithm( - grpc_call *call); + grpc_call* call); /** Return the message flags from \a call. * * \warning This function should \b only be used in test code. */ -uint32_t grpc_call_test_only_get_message_flags(grpc_call *call); +uint32_t grpc_call_test_only_get_message_flags(grpc_call* call); /** Returns a bitset for the encodings (compression algorithms) supported by \a * call's peer. * * To be indexed by grpc_compression_algorithm enum values. */ -uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call *call); - -/** Returns a bitset for the stream encodings (stream compression algorithms) - * supported by \a call's peer. - * - * To be indexed by grpc_stream_compression_algorithm enum values. */ -uint32_t grpc_call_test_only_get_stream_encodings_accepted_by_peer( - grpc_call *call); - -/** Returns the incoming stream compression algorithm (content-encoding header) - * received by a call. */ -grpc_stream_compression_algorithm -grpc_call_test_only_get_incoming_stream_encodings(grpc_call *call); - -#ifdef __cplusplus -} -#endif +uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call); #endif /* GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/channel.c b/Sources/CgRPC/src/core/lib/surface/channel.c deleted file mode 100644 index 48962e5e4..000000000 --- a/Sources/CgRPC/src/core/lib/surface/channel.c +++ /dev/null @@ -1,454 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/surface/channel.h" - -#include -#include - -#include -#include -#include -#include - -#include "src/core/lib/channel/channel_args.h" -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/iomgr.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/api_trace.h" -#include "src/core/lib/surface/call.h" -#include "src/core/lib/surface/channel_init.h" -#include "src/core/lib/transport/static_metadata.h" - -/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS. - * Avoids needing to take a metadata context lock for sending status - * if the status code is <= NUM_CACHED_STATUS_ELEMS. - * Sized to allow the most commonly used codes to fit in - * (OK, Cancelled, Unknown). */ -#define NUM_CACHED_STATUS_ELEMS 3 - -typedef struct registered_call { - grpc_mdelem path; - grpc_mdelem authority; - struct registered_call *next; -} registered_call; - -struct grpc_channel { - int is_client; - grpc_compression_options compression_options; - grpc_mdelem default_authority; - - gpr_atm call_size_estimate; - - gpr_mu registered_call_mu; - registered_call *registered_calls; - - char *target; -}; - -#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1)) -#define CHANNEL_FROM_CHANNEL_STACK(channel_stack) \ - (((grpc_channel *)(channel_stack)) - 1) -#define CHANNEL_FROM_TOP_ELEM(top_elem) \ - CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem)) - -static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error); - -grpc_channel *grpc_channel_create_with_builder( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, - grpc_channel_stack_type channel_stack_type) { - char *target = gpr_strdup(grpc_channel_stack_builder_get_target(builder)); - grpc_channel_args *args = grpc_channel_args_copy( - grpc_channel_stack_builder_get_channel_arguments(builder)); - grpc_channel *channel; - if (channel_stack_type == GRPC_SERVER_CHANNEL) { - GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx); - } else { - GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx); - } - grpc_error *error = grpc_channel_stack_builder_finish( - exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, NULL, - (void **)&channel); - if (error != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "channel stack builder failed: %s", - grpc_error_string(error)); - GRPC_ERROR_UNREF(error); - gpr_free(target); - goto done; - } - - memset(channel, 0, sizeof(*channel)); - channel->target = target; - channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type); - gpr_mu_init(&channel->registered_call_mu); - channel->registered_calls = NULL; - - gpr_atm_no_barrier_store( - &channel->call_size_estimate, - (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size); - - grpc_compression_options_init(&channel->compression_options); - for (size_t i = 0; i < args->num_args; i++) { - if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", - GRPC_ARG_DEFAULT_AUTHORITY); - } else { - if (!GRPC_MDISNULL(channel->default_authority)) { - /* setting this takes precedence over anything else */ - GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority); - } - channel->default_authority = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_intern( - grpc_slice_from_static_string(args->args[i].value.string))); - } - } else if (0 == - strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { - if (args->args[i].type != GRPC_ARG_STRING) { - gpr_log(GPR_ERROR, "%s ignored: it must be a string", - GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); - } else { - if (!GRPC_MDISNULL(channel->default_authority)) { - /* other ways of setting this (notably ssl) take precedence */ - gpr_log(GPR_ERROR, - "%s ignored: default host already set some other way", - GRPC_SSL_TARGET_NAME_OVERRIDE_ARG); - } else { - channel->default_authority = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_intern( - grpc_slice_from_static_string(args->args[i].value.string))); - } - } - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { - channel->compression_options.default_level.is_set = true; - channel->compression_options.default_level.level = - (grpc_compression_level)grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){GRPC_COMPRESS_LEVEL_NONE, - GRPC_COMPRESS_LEVEL_NONE, - GRPC_COMPRESS_LEVEL_COUNT - 1}); - } else if (0 == strcmp(args->args[i].key, - GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { - channel->compression_options.default_stream_compression_level.is_set = - true; - channel->compression_options.default_stream_compression_level.level = - (grpc_stream_compression_level)grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){GRPC_STREAM_COMPRESS_LEVEL_NONE, - GRPC_STREAM_COMPRESS_LEVEL_NONE, - GRPC_STREAM_COMPRESS_LEVEL_COUNT - 1}); - } else if (0 == strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { - channel->compression_options.default_algorithm.is_set = true; - channel->compression_options.default_algorithm.algorithm = - (grpc_compression_algorithm)grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE, - GRPC_COMPRESS_ALGORITHMS_COUNT - 1}); - } else if (0 == strcmp(args->args[i].key, - GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { - channel->compression_options.default_stream_compression_algorithm.is_set = - true; - channel->compression_options.default_stream_compression_algorithm - .algorithm = - (grpc_stream_compression_algorithm)grpc_channel_arg_get_integer( - &args->args[i], - (grpc_integer_options){ - GRPC_STREAM_COMPRESS_NONE, GRPC_STREAM_COMPRESS_NONE, - GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT - 1}); - } else if (0 == - strcmp(args->args[i].key, - GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { - channel->compression_options.enabled_algorithms_bitset = - (uint32_t)args->args[i].value.integer | - 0x1; /* always support no compression */ - } else if (0 == - strcmp( - args->args[i].key, - GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { - channel->compression_options - .enabled_stream_compression_algorithms_bitset = - (uint32_t)args->args[i].value.integer | - 0x1; /* always support no compression */ - } - } - -done: - grpc_channel_args_destroy(exec_ctx, args); - return channel; -} - -grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, - const grpc_channel_args *input_args, - grpc_channel_stack_type channel_stack_type, - grpc_transport *optional_transport) { - grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create(); - grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder, - input_args); - grpc_channel_stack_builder_set_target(builder, target); - grpc_channel_stack_builder_set_transport(builder, optional_transport); - if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) { - grpc_channel_stack_builder_destroy(exec_ctx, builder); - return NULL; - } - return grpc_channel_create_with_builder(exec_ctx, builder, - channel_stack_type); -} - -size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) { -#define ROUND_UP_SIZE 256 - /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE. - This ensures: - 1. a consistent size allocation when our estimate is drifting slowly - (which is common) - which tends to help most allocators reuse memory - 2. a small amount of allowed growth over the estimate without hitting - the arena size doubling case, reducing overall memory usage */ - return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) + - 2 * ROUND_UP_SIZE) & - ~(size_t)(ROUND_UP_SIZE - 1); -} - -void grpc_channel_update_call_size_estimate(grpc_channel *channel, - size_t size) { - size_t cur = (size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate); - if (cur < size) { - /* size grew: update estimate */ - gpr_atm_no_barrier_cas(&channel->call_size_estimate, (gpr_atm)cur, - (gpr_atm)size); - /* if we lose: never mind, something else will likely update soon enough */ - } else if (cur == size) { - /* no change: holding pattern */ - } else if (cur > 0) { - /* size shrank: decrease estimate */ - gpr_atm_no_barrier_cas( - &channel->call_size_estimate, (gpr_atm)cur, - (gpr_atm)(GPR_MIN(cur - 1, (255 * cur + size) / 256))); - /* if we lose: never mind, something else will likely update soon enough */ - } -} - -char *grpc_channel_get_target(grpc_channel *channel) { - GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel)); - return gpr_strdup(channel->target); -} - -void grpc_channel_get_info(grpc_channel *channel, - const grpc_channel_info *channel_info) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_channel_element *elem = - grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); - elem->filter->get_channel_info(&exec_ctx, elem, channel_info); - grpc_exec_ctx_finish(&exec_ctx); -} - -static grpc_call *grpc_channel_create_call_internal( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_completion_queue *cq, - grpc_pollset_set *pollset_set_alternative, grpc_mdelem path_mdelem, - grpc_mdelem authority_mdelem, gpr_timespec deadline) { - grpc_mdelem send_metadata[2]; - size_t num_metadata = 0; - - GPR_ASSERT(channel->is_client); - GPR_ASSERT(!(cq != NULL && pollset_set_alternative != NULL)); - - send_metadata[num_metadata++] = path_mdelem; - if (!GRPC_MDISNULL(authority_mdelem)) { - send_metadata[num_metadata++] = authority_mdelem; - } else if (!GRPC_MDISNULL(channel->default_authority)) { - send_metadata[num_metadata++] = GRPC_MDELEM_REF(channel->default_authority); - } - - grpc_call_create_args args; - memset(&args, 0, sizeof(args)); - args.channel = channel; - args.parent = parent_call; - args.propagation_mask = propagation_mask; - args.cq = cq; - args.pollset_set_alternative = pollset_set_alternative; - args.server_transport_data = NULL; - args.add_initial_metadata = send_metadata; - args.add_initial_metadata_count = num_metadata; - args.send_deadline = deadline; - - grpc_call *call; - GRPC_LOG_IF_ERROR("call_create", grpc_call_create(exec_ctx, &args, &call)); - return call; -} - -grpc_call *grpc_channel_create_call(grpc_channel *channel, - grpc_call *parent_call, - uint32_t propagation_mask, - grpc_completion_queue *cq, - grpc_slice method, const grpc_slice *host, - gpr_timespec deadline, void *reserved) { - GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_call *call = grpc_channel_create_call_internal( - &exec_ctx, channel, parent_call, propagation_mask, cq, NULL, - grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH, - grpc_slice_ref_internal(method)), - host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_ref_internal(*host)) - : GRPC_MDNULL, - deadline); - grpc_exec_ctx_finish(&exec_ctx); - return call; -} - -grpc_call *grpc_channel_create_pollset_set_call( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, gpr_timespec deadline, void *reserved) { - GPR_ASSERT(!reserved); - return grpc_channel_create_call_internal( - exec_ctx, channel, parent_call, propagation_mask, NULL, pollset_set, - grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, - grpc_slice_ref_internal(method)), - host != NULL ? grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_ref_internal(*host)) - : GRPC_MDNULL, - deadline); -} - -void *grpc_channel_register_call(grpc_channel *channel, const char *method, - const char *host, void *reserved) { - registered_call *rc = (registered_call *)gpr_malloc(sizeof(registered_call)); - GRPC_API_TRACE( - "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)", - 4, (channel, method, host, reserved)); - GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - - rc->path = grpc_mdelem_from_slices( - &exec_ctx, GRPC_MDSTR_PATH, - grpc_slice_intern(grpc_slice_from_static_string(method))); - rc->authority = - host ? grpc_mdelem_from_slices( - &exec_ctx, GRPC_MDSTR_AUTHORITY, - grpc_slice_intern(grpc_slice_from_static_string(host))) - : GRPC_MDNULL; - gpr_mu_lock(&channel->registered_call_mu); - rc->next = channel->registered_calls; - channel->registered_calls = rc; - gpr_mu_unlock(&channel->registered_call_mu); - grpc_exec_ctx_finish(&exec_ctx); - return rc; -} - -grpc_call *grpc_channel_create_registered_call( - grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, - grpc_completion_queue *completion_queue, void *registered_call_handle, - gpr_timespec deadline, void *reserved) { - registered_call *rc = (registered_call *)registered_call_handle; - GRPC_API_TRACE( - "grpc_channel_create_registered_call(" - "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, " - "registered_call_handle=%p, " - "deadline=gpr_timespec { tv_sec: %" PRId64 - ", tv_nsec: %d, clock_type: %d }, " - "reserved=%p)", - 9, (channel, parent_call, (unsigned)propagation_mask, completion_queue, - registered_call_handle, deadline.tv_sec, deadline.tv_nsec, - (int)deadline.clock_type, reserved)); - GPR_ASSERT(!reserved); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_call *call = grpc_channel_create_call_internal( - &exec_ctx, channel, parent_call, propagation_mask, completion_queue, NULL, - GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), deadline); - grpc_exec_ctx_finish(&exec_ctx); - return call; -} - -#ifndef NDEBUG -#define REF_REASON reason -#define REF_ARG , const char *reason -#else -#define REF_REASON "" -#define REF_ARG -#endif -void grpc_channel_internal_ref(grpc_channel *c REF_ARG) { - GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); -} - -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_channel *c REF_ARG) { - GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); -} - -static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_channel *channel = (grpc_channel *)arg; - grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel)); - while (channel->registered_calls) { - registered_call *rc = channel->registered_calls; - channel->registered_calls = rc->next; - GRPC_MDELEM_UNREF(exec_ctx, rc->path); - GRPC_MDELEM_UNREF(exec_ctx, rc->authority); - gpr_free(rc); - } - GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority); - gpr_mu_destroy(&channel->registered_call_mu); - gpr_free(channel->target); - gpr_free(channel); -} - -void grpc_channel_destroy(grpc_channel *channel) { - grpc_transport_op *op = grpc_make_transport_op(NULL); - grpc_channel_element *elem; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel)); - op->disconnect_with_error = - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed"); - elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); - elem->filter->start_transport_op(&exec_ctx, elem, op); - - GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel"); - - grpc_exec_ctx_finish(&exec_ctx); -} - -grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel) { - return CHANNEL_STACK_FROM_CHANNEL(channel); -} - -grpc_compression_options grpc_channel_compression_options( - const grpc_channel *channel) { - return channel->compression_options; -} - -grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, - grpc_channel *channel, int i) { - char tmp[GPR_LTOA_MIN_BUFSIZE]; - switch (i) { - case 0: - return GRPC_MDELEM_GRPC_STATUS_0; - case 1: - return GRPC_MDELEM_GRPC_STATUS_1; - case 2: - return GRPC_MDELEM_GRPC_STATUS_2; - } - gpr_ltoa(i, tmp); - return grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS, - grpc_slice_from_copied_string(tmp)); -} diff --git a/Sources/CgRPC/src/core/lib/surface/channel.cc b/Sources/CgRPC/src/core/lib/surface/channel.cc new file mode 100644 index 000000000..d740ebd41 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/surface/channel.cc @@ -0,0 +1,450 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/surface/channel.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/channel/channel_trace.h" +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/manual_constructor.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/iomgr.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/surface/call.h" +#include "src/core/lib/surface/channel_init.h" +#include "src/core/lib/transport/static_metadata.h" + +/** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS. + * Avoids needing to take a metadata context lock for sending status + * if the status code is <= NUM_CACHED_STATUS_ELEMS. + * Sized to allow the most commonly used codes to fit in + * (OK, Cancelled, Unknown). */ +#define NUM_CACHED_STATUS_ELEMS 3 + +typedef struct registered_call { + grpc_mdelem path; + grpc_mdelem authority; + struct registered_call* next; +} registered_call; + +struct grpc_channel { + int is_client; + grpc_compression_options compression_options; + + gpr_atm call_size_estimate; + + gpr_mu registered_call_mu; + registered_call* registered_calls; + + grpc_core::RefCountedPtr tracer; + + char* target; +}; + +#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1)) +#define CHANNEL_FROM_CHANNEL_STACK(channel_stack) \ + (((grpc_channel*)(channel_stack)) - 1) +#define CHANNEL_FROM_TOP_ELEM(top_elem) \ + CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem)) + +static void destroy_channel(void* arg, grpc_error* error); + +grpc_channel* grpc_channel_create_with_builder( + grpc_channel_stack_builder* builder, + grpc_channel_stack_type channel_stack_type) { + char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder)); + grpc_channel_args* args = grpc_channel_args_copy( + grpc_channel_stack_builder_get_channel_arguments(builder)); + grpc_channel* channel; + if (channel_stack_type == GRPC_SERVER_CHANNEL) { + GRPC_STATS_INC_SERVER_CHANNELS_CREATED(); + } else { + GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(); + } + grpc_error* error = grpc_channel_stack_builder_finish( + builder, sizeof(grpc_channel), 1, destroy_channel, nullptr, + reinterpret_cast(&channel)); + if (error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "channel stack builder failed: %s", + grpc_error_string(error)); + GRPC_ERROR_UNREF(error); + gpr_free(target); + grpc_channel_args_destroy(args); + return channel; + } + + memset(channel, 0, sizeof(*channel)); + channel->target = target; + channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type); + size_t channel_tracer_max_nodes = 0; // default to off + gpr_mu_init(&channel->registered_call_mu); + channel->registered_calls = nullptr; + + gpr_atm_no_barrier_store( + &channel->call_size_estimate, + (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size); + + grpc_compression_options_init(&channel->compression_options); + for (size_t i = 0; i < args->num_args; i++) { + if (0 == + strcmp(args->args[i].key, GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) { + channel->compression_options.default_level.is_set = true; + channel->compression_options.default_level.level = + static_cast(grpc_channel_arg_get_integer( + &args->args[i], + {GRPC_COMPRESS_LEVEL_NONE, GRPC_COMPRESS_LEVEL_NONE, + GRPC_COMPRESS_LEVEL_COUNT - 1})); + } else if (0 == strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) { + channel->compression_options.default_algorithm.is_set = true; + channel->compression_options.default_algorithm.algorithm = + static_cast(grpc_channel_arg_get_integer( + &args->args[i], {GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE, + GRPC_COMPRESS_ALGORITHMS_COUNT - 1})); + } else if (0 == + strcmp(args->args[i].key, + GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) { + channel->compression_options.enabled_algorithms_bitset = + static_cast(args->args[i].value.integer) | + 0x1; /* always support no compression */ + } else if (0 == strcmp(args->args[i].key, + GRPC_ARG_MAX_CHANNEL_TRACE_EVENTS_PER_NODE)) { + GPR_ASSERT(channel_tracer_max_nodes == 0); + // max_nodes defaults to 0 (which is off), clamped between 0 and INT_MAX + const grpc_integer_options options = {0, 0, INT_MAX}; + channel_tracer_max_nodes = + (size_t)grpc_channel_arg_get_integer(&args->args[i], options); + } + } + + grpc_channel_args_destroy(args); + channel->tracer = grpc_core::MakeRefCounted( + channel_tracer_max_nodes); + channel->tracer->AddTraceEvent( + grpc_core::ChannelTrace::Severity::Info, + grpc_slice_from_static_string("Channel created")); + return channel; +} + +static grpc_core::UniquePtr get_default_authority( + const grpc_channel_args* input_args) { + bool has_default_authority = false; + char* ssl_override = nullptr; + grpc_core::UniquePtr default_authority; + const size_t num_args = input_args != nullptr ? input_args->num_args : 0; + for (size_t i = 0; i < num_args; ++i) { + if (0 == strcmp(input_args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) { + has_default_authority = true; + } else if (0 == strcmp(input_args->args[i].key, + GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) { + ssl_override = grpc_channel_arg_get_string(&input_args->args[i]); + } + } + if (!has_default_authority && ssl_override != nullptr) { + default_authority.reset(gpr_strdup(ssl_override)); + } + return default_authority; +} + +static grpc_channel_args* build_channel_args( + const grpc_channel_args* input_args, char* default_authority) { + grpc_arg new_args[1]; + size_t num_new_args = 0; + if (default_authority != nullptr) { + new_args[num_new_args++] = grpc_channel_arg_string_create( + const_cast(GRPC_ARG_DEFAULT_AUTHORITY), default_authority); + } + return grpc_channel_args_copy_and_add(input_args, new_args, num_new_args); +} + +char* grpc_channel_get_trace(grpc_channel* channel) { + return channel->tracer->RenderTrace(); +} + +intptr_t grpc_channel_get_uuid(grpc_channel* channel) { + return channel->tracer->GetUuid(); +} + +grpc_channel* grpc_channel_create(const char* target, + const grpc_channel_args* input_args, + grpc_channel_stack_type channel_stack_type, + grpc_transport* optional_transport) { + grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create(); + const grpc_core::UniquePtr default_authority = + get_default_authority(input_args); + grpc_channel_args* args = + build_channel_args(input_args, default_authority.get()); + grpc_channel_stack_builder_set_channel_arguments(builder, args); + grpc_channel_args_destroy(args); + grpc_channel_stack_builder_set_target(builder, target); + grpc_channel_stack_builder_set_transport(builder, optional_transport); + if (!grpc_channel_init_create_stack(builder, channel_stack_type)) { + grpc_channel_stack_builder_destroy(builder); + return nullptr; + } + return grpc_channel_create_with_builder(builder, channel_stack_type); +} + +size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) { +#define ROUND_UP_SIZE 256 + /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE. + This ensures: + 1. a consistent size allocation when our estimate is drifting slowly + (which is common) - which tends to help most allocators reuse memory + 2. a small amount of allowed growth over the estimate without hitting + the arena size doubling case, reducing overall memory usage */ + return (static_cast( + gpr_atm_no_barrier_load(&channel->call_size_estimate)) + + 2 * ROUND_UP_SIZE) & + ~static_cast(ROUND_UP_SIZE - 1); +} + +void grpc_channel_update_call_size_estimate(grpc_channel* channel, + size_t size) { + size_t cur = static_cast( + gpr_atm_no_barrier_load(&channel->call_size_estimate)); + if (cur < size) { + /* size grew: update estimate */ + gpr_atm_no_barrier_cas(&channel->call_size_estimate, + static_cast(cur), + static_cast(size)); + /* if we lose: never mind, something else will likely update soon enough */ + } else if (cur == size) { + /* no change: holding pattern */ + } else if (cur > 0) { + /* size shrank: decrease estimate */ + gpr_atm_no_barrier_cas( + &channel->call_size_estimate, static_cast(cur), + static_cast(GPR_MIN(cur - 1, (255 * cur + size) / 256))); + /* if we lose: never mind, something else will likely update soon enough */ + } +} + +char* grpc_channel_get_target(grpc_channel* channel) { + GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel)); + return gpr_strdup(channel->target); +} + +void grpc_channel_get_info(grpc_channel* channel, + const grpc_channel_info* channel_info) { + grpc_core::ExecCtx exec_ctx; + grpc_channel_element* elem = + grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); + elem->filter->get_channel_info(elem, channel_info); +} + +static grpc_call* grpc_channel_create_call_internal( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_completion_queue* cq, grpc_pollset_set* pollset_set_alternative, + grpc_mdelem path_mdelem, grpc_mdelem authority_mdelem, + grpc_millis deadline) { + grpc_mdelem send_metadata[2]; + size_t num_metadata = 0; + + GPR_ASSERT(channel->is_client); + GPR_ASSERT(!(cq != nullptr && pollset_set_alternative != nullptr)); + + send_metadata[num_metadata++] = path_mdelem; + if (!GRPC_MDISNULL(authority_mdelem)) { + send_metadata[num_metadata++] = authority_mdelem; + } + + grpc_call_create_args args; + memset(&args, 0, sizeof(args)); + args.channel = channel; + args.parent = parent_call; + args.propagation_mask = propagation_mask; + args.cq = cq; + args.pollset_set_alternative = pollset_set_alternative; + args.server_transport_data = nullptr; + args.add_initial_metadata = send_metadata; + args.add_initial_metadata_count = num_metadata; + args.send_deadline = deadline; + + grpc_call* call; + GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call)); + return call; +} + +grpc_call* grpc_channel_create_call(grpc_channel* channel, + grpc_call* parent_call, + uint32_t propagation_mask, + grpc_completion_queue* cq, + grpc_slice method, const grpc_slice* host, + gpr_timespec deadline, void* reserved) { + GPR_ASSERT(!reserved); + grpc_core::ExecCtx exec_ctx; + grpc_call* call = grpc_channel_create_call_internal( + channel, parent_call, propagation_mask, cq, nullptr, + grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)), + host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY, + grpc_slice_ref_internal(*host)) + : GRPC_MDNULL, + grpc_timespec_to_millis_round_up(deadline)); + + return call; +} + +grpc_call* grpc_channel_create_pollset_set_call( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_pollset_set* pollset_set, grpc_slice method, const grpc_slice* host, + grpc_millis deadline, void* reserved) { + GPR_ASSERT(!reserved); + return grpc_channel_create_call_internal( + channel, parent_call, propagation_mask, nullptr, pollset_set, + grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)), + host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY, + grpc_slice_ref_internal(*host)) + : GRPC_MDNULL, + deadline); +} + +void* grpc_channel_register_call(grpc_channel* channel, const char* method, + const char* host, void* reserved) { + registered_call* rc = + static_cast(gpr_malloc(sizeof(registered_call))); + GRPC_API_TRACE( + "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)", + 4, (channel, method, host, reserved)); + GPR_ASSERT(!reserved); + grpc_core::ExecCtx exec_ctx; + + rc->path = grpc_mdelem_from_slices( + GRPC_MDSTR_PATH, + grpc_slice_intern(grpc_slice_from_static_string(method))); + rc->authority = + host ? grpc_mdelem_from_slices( + GRPC_MDSTR_AUTHORITY, + grpc_slice_intern(grpc_slice_from_static_string(host))) + : GRPC_MDNULL; + gpr_mu_lock(&channel->registered_call_mu); + rc->next = channel->registered_calls; + channel->registered_calls = rc; + gpr_mu_unlock(&channel->registered_call_mu); + + return rc; +} + +grpc_call* grpc_channel_create_registered_call( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_completion_queue* completion_queue, void* registered_call_handle, + gpr_timespec deadline, void* reserved) { + registered_call* rc = static_cast(registered_call_handle); + GRPC_API_TRACE( + "grpc_channel_create_registered_call(" + "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, " + "registered_call_handle=%p, " + "deadline=gpr_timespec { tv_sec: %" PRId64 + ", tv_nsec: %d, clock_type: %d }, " + "reserved=%p)", + 9, + (channel, parent_call, (unsigned)propagation_mask, completion_queue, + registered_call_handle, deadline.tv_sec, deadline.tv_nsec, + (int)deadline.clock_type, reserved)); + GPR_ASSERT(!reserved); + grpc_core::ExecCtx exec_ctx; + grpc_call* call = grpc_channel_create_call_internal( + channel, parent_call, propagation_mask, completion_queue, nullptr, + GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority), + grpc_timespec_to_millis_round_up(deadline)); + + return call; +} + +#ifndef NDEBUG +#define REF_REASON reason +#define REF_ARG , const char* reason +#else +#define REF_REASON "" +#define REF_ARG +#endif +void grpc_channel_internal_ref(grpc_channel* c REF_ARG) { + GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); +} + +void grpc_channel_internal_unref(grpc_channel* c REF_ARG) { + GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON); +} + +static void destroy_channel(void* arg, grpc_error* error) { + grpc_channel* channel = static_cast(arg); + grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel)); + while (channel->registered_calls) { + registered_call* rc = channel->registered_calls; + channel->registered_calls = rc->next; + GRPC_MDELEM_UNREF(rc->path); + GRPC_MDELEM_UNREF(rc->authority); + gpr_free(rc); + } + channel->tracer.reset(); + gpr_mu_destroy(&channel->registered_call_mu); + gpr_free(channel->target); + gpr_free(channel); +} + +void grpc_channel_destroy(grpc_channel* channel) { + grpc_transport_op* op = grpc_make_transport_op(nullptr); + grpc_channel_element* elem; + grpc_core::ExecCtx exec_ctx; + GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel)); + op->disconnect_with_error = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed"); + elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0); + elem->filter->start_transport_op(elem, op); + + GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel"); +} + +grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel) { + return CHANNEL_STACK_FROM_CHANNEL(channel); +} + +grpc_compression_options grpc_channel_compression_options( + const grpc_channel* channel) { + return channel->compression_options; +} + +grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel, int i) { + char tmp[GPR_LTOA_MIN_BUFSIZE]; + switch (i) { + case 0: + return GRPC_MDELEM_GRPC_STATUS_0; + case 1: + return GRPC_MDELEM_GRPC_STATUS_1; + case 2: + return GRPC_MDELEM_GRPC_STATUS_2; + } + gpr_ltoa(i, tmp); + return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS, + grpc_slice_from_copied_string(tmp)); +} diff --git a/Sources/CgRPC/src/core/lib/surface/channel.h b/Sources/CgRPC/src/core/lib/surface/channel.h index 528bb868e..288313951 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel.h +++ b/Sources/CgRPC/src/core/lib/surface/channel.h @@ -19,17 +19,19 @@ #ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_H #define GRPC_CORE_LIB_SURFACE_CHANNEL_H +#include + #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/channel/channel_stack_builder.h" #include "src/core/lib/surface/channel_stack_type.h" -grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target, - const grpc_channel_args *args, +grpc_channel* grpc_channel_create(const char* target, + const grpc_channel_args* args, grpc_channel_stack_type channel_stack_type, - grpc_transport *optional_transport); + grpc_transport* optional_transport); -grpc_channel *grpc_channel_create_with_builder( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, +grpc_channel* grpc_channel_create_with_builder( + grpc_channel_stack_builder* builder, grpc_channel_stack_type channel_stack_type); /** Create a call given a grpc_channel, in order to call \a method. @@ -40,45 +42,42 @@ grpc_channel *grpc_channel_create_with_builder( non-NULL, it must be a server-side call. It will be used to propagate properties from the server call to this new client call, depending on the value of \a propagation_mask (see propagation_bits.h for possible values) */ -grpc_call *grpc_channel_create_pollset_set_call( - grpc_exec_ctx *exec_ctx, grpc_channel *channel, grpc_call *parent_call, - uint32_t propagation_mask, grpc_pollset_set *pollset_set, grpc_slice method, - const grpc_slice *host, gpr_timespec deadline, void *reserved); +grpc_call* grpc_channel_create_pollset_set_call( + grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask, + grpc_pollset_set* pollset_set, grpc_slice method, const grpc_slice* host, + grpc_millis deadline, void* reserved); /** Get a (borrowed) pointer to this channels underlying channel stack */ -grpc_channel_stack *grpc_channel_get_channel_stack(grpc_channel *channel); +grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel); /** Get a grpc_mdelem of grpc-status: X where X is the numeric value of status_code. The returned elem is owned by the caller. */ -grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx *exec_ctx, - grpc_channel *channel, +grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel, int status_code); -size_t grpc_channel_get_call_size_estimate(grpc_channel *channel); -void grpc_channel_update_call_size_estimate(grpc_channel *channel, size_t size); +size_t grpc_channel_get_call_size_estimate(grpc_channel* channel); +void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size); #ifndef NDEBUG -void grpc_channel_internal_ref(grpc_channel *channel, const char *reason); -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel, - const char *reason); +void grpc_channel_internal_ref(grpc_channel* channel, const char* reason); +void grpc_channel_internal_unref(grpc_channel* channel, const char* reason); #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ grpc_channel_internal_ref(channel, reason) -#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \ - grpc_channel_internal_unref(exec_ctx, channel, reason) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel, reason) #else -void grpc_channel_internal_ref(grpc_channel *channel); -void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_channel *channel); +void grpc_channel_internal_ref(grpc_channel* channel); +void grpc_channel_internal_unref(grpc_channel* channel); #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \ grpc_channel_internal_ref(channel) -#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \ - grpc_channel_internal_unref(exec_ctx, channel) +#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \ + grpc_channel_internal_unref(channel) #endif /** Return the channel's compression options. */ grpc_compression_options grpc_channel_compression_options( - const grpc_channel *channel); + const grpc_channel* channel); #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/channel_init.c b/Sources/CgRPC/src/core/lib/surface/channel_init.cc similarity index 74% rename from Sources/CgRPC/src/core/lib/surface/channel_init.c rename to Sources/CgRPC/src/core/lib/surface/channel_init.cc index 33f444b89..62eb1c3f9 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel_init.c +++ b/Sources/CgRPC/src/core/lib/surface/channel_init.cc @@ -16,20 +16,21 @@ * */ +#include + #include "src/core/lib/surface/channel_init.h" #include -#include typedef struct stage_slot { grpc_channel_init_stage fn; - void *arg; + void* arg; int priority; size_t insertion_order; } stage_slot; typedef struct stage_slots { - stage_slot *slots; + stage_slot* slots; size_t num_slots; size_t cap_slots; } stage_slots; @@ -39,7 +40,7 @@ static bool g_finalized; void grpc_channel_init_init(void) { for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) { - g_slots[i].slots = NULL; + g_slots[i].slots = nullptr; g_slots[i].num_slots = 0; g_slots[i].cap_slots = 0; } @@ -49,24 +50,24 @@ void grpc_channel_init_init(void) { void grpc_channel_init_register_stage(grpc_channel_stack_type type, int priority, grpc_channel_init_stage stage, - void *stage_arg) { + void* stage_arg) { GPR_ASSERT(!g_finalized); if (g_slots[type].cap_slots == g_slots[type].num_slots) { g_slots[type].cap_slots = GPR_MAX(8, 3 * g_slots[type].cap_slots / 2); - g_slots[type].slots = (stage_slot *)gpr_realloc( - g_slots[type].slots, - g_slots[type].cap_slots * sizeof(*g_slots[type].slots)); + g_slots[type].slots = static_cast( + gpr_realloc(g_slots[type].slots, + g_slots[type].cap_slots * sizeof(*g_slots[type].slots))); } - stage_slot *s = &g_slots[type].slots[g_slots[type].num_slots++]; + stage_slot* s = &g_slots[type].slots[g_slots[type].num_slots++]; s->insertion_order = g_slots[type].num_slots; s->priority = priority; s->fn = stage; s->arg = stage_arg; } -static int compare_slots(const void *a, const void *b) { - const stage_slot *sa = (const stage_slot *)a; - const stage_slot *sb = (const stage_slot *)b; +static int compare_slots(const void* a, const void* b) { + const stage_slot* sa = static_cast(a); + const stage_slot* sb = static_cast(b); int c = GPR_ICMP(sa->priority, sb->priority); if (c != 0) return c; @@ -85,12 +86,12 @@ void grpc_channel_init_finalize(void) { void grpc_channel_init_shutdown(void) { for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) { gpr_free(g_slots[i].slots); - g_slots[i].slots = (stage_slot *)(void *)(uintptr_t)0xdeadbeef; + g_slots[i].slots = + static_cast((void*)static_cast(0xdeadbeef)); } } -bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_channel_init_create_stack(grpc_channel_stack_builder* builder, grpc_channel_stack_type type) { GPR_ASSERT(g_finalized); @@ -98,8 +99,8 @@ bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, grpc_channel_stack_type_string(type)); for (size_t i = 0; i < g_slots[type].num_slots; i++) { - const stage_slot *slot = &g_slots[type].slots[i]; - if (!slot->fn(exec_ctx, builder, slot->arg)) { + const stage_slot* slot = &g_slots[type].slots[i]; + if (!slot->fn(builder, slot->arg)) { return false; } } diff --git a/Sources/CgRPC/src/core/lib/surface/channel_init.h b/Sources/CgRPC/src/core/lib/surface/channel_init.h index 5f109332a..f01852473 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel_init.h +++ b/Sources/CgRPC/src/core/lib/surface/channel_init.h @@ -19,16 +19,14 @@ #ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H #define GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H +#include + #include "src/core/lib/channel/channel_stack_builder.h" #include "src/core/lib/surface/channel_stack_type.h" #include "src/core/lib/transport/transport.h" #define GRPC_CHANNEL_INIT_BUILTIN_PRIORITY 10000 -#ifdef __cplusplus -extern "C" { -#endif - /// This module provides a way for plugins (and the grpc core library itself) /// to register mutators for channel stacks. /// It also provides a universal entry path to run those mutators to build @@ -36,9 +34,8 @@ extern "C" { /// One stage of mutation: call functions against \a builder to influence the /// finally constructed channel stack -typedef bool (*grpc_channel_init_stage)(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, - void *arg); +typedef bool (*grpc_channel_init_stage)(grpc_channel_stack_builder* builder, + void* arg); /// Global initialization of the system void grpc_channel_init_init(void); @@ -51,7 +48,7 @@ void grpc_channel_init_init(void); void grpc_channel_init_register_stage(grpc_channel_stack_type type, int priority, grpc_channel_init_stage stage_fn, - void *stage_arg); + void* stage_arg); /// Finalize registration. No more calls to grpc_channel_init_register_stage are /// allowed. @@ -70,12 +67,7 @@ void grpc_channel_init_shutdown(void); /// \a optional_transport is either NULL or a constructed transport object /// Returns a pointer to the base of the memory allocated (the actual channel /// stack object will be prefix_bytes past that pointer) -bool grpc_channel_init_create_stack(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, +bool grpc_channel_init_create_stack(grpc_channel_stack_builder* builder, grpc_channel_stack_type type); -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/channel_ping.c b/Sources/CgRPC/src/core/lib/surface/channel_ping.cc similarity index 60% rename from Sources/CgRPC/src/core/lib/surface/channel_ping.c rename to Sources/CgRPC/src/core/lib/surface/channel_ping.cc index f45b56895..bae945942 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel_ping.c +++ b/Sources/CgRPC/src/core/lib/surface/channel_ping.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/surface/channel.h" #include @@ -28,38 +30,36 @@ typedef struct { grpc_closure closure; - void *tag; - grpc_completion_queue *cq; + void* tag; + grpc_completion_queue* cq; grpc_cq_completion completion_storage; } ping_result; -static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg, - grpc_cq_completion *storage) { +static void ping_destroy(void* arg, grpc_cq_completion* storage) { gpr_free(arg); } -static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { - ping_result *pr = (ping_result *)arg; - grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy, - pr, &pr->completion_storage); +static void ping_done(void* arg, grpc_error* error) { + ping_result* pr = static_cast(arg); + grpc_cq_end_op(pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy, pr, + &pr->completion_storage); } -void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq, - void *tag, void *reserved) { +void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq, + void* tag, void* reserved) { GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4, (channel, cq, tag, reserved)); - grpc_transport_op *op = grpc_make_transport_op(NULL); - ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr)); - grpc_channel_element *top_elem = + grpc_transport_op* op = grpc_make_transport_op(nullptr); + ping_result* pr = static_cast(gpr_malloc(sizeof(*pr))); + grpc_channel_element* top_elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GPR_ASSERT(reserved == NULL); + grpc_core::ExecCtx exec_ctx; + GPR_ASSERT(reserved == nullptr); pr->tag = tag; pr->cq = cq; GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx); - op->send_ping = &pr->closure; + op->send_ping.on_ack = &pr->closure; op->bind_pollset = grpc_cq_pollset(cq); GPR_ASSERT(grpc_cq_begin_op(cq, tag)); - top_elem->filter->start_transport_op(&exec_ctx, top_elem, op); - grpc_exec_ctx_finish(&exec_ctx); + top_elem->filter->start_transport_op(top_elem, op); } diff --git a/Sources/CgRPC/src/core/lib/surface/channel_stack_type.c b/Sources/CgRPC/src/core/lib/surface/channel_stack_type.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/surface/channel_stack_type.c rename to Sources/CgRPC/src/core/lib/surface/channel_stack_type.cc index 5f5c87772..fcf96ddc9 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel_stack_type.c +++ b/Sources/CgRPC/src/core/lib/surface/channel_stack_type.cc @@ -16,10 +16,11 @@ * */ -#include "src/core/lib/surface/channel_stack_type.h" -#include #include +#include +#include "src/core/lib/surface/channel_stack_type.h" + bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type) { switch (type) { case GRPC_CLIENT_CHANNEL: @@ -38,7 +39,7 @@ bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type) { GPR_UNREACHABLE_CODE(return true;); } -const char *grpc_channel_stack_type_string(grpc_channel_stack_type type) { +const char* grpc_channel_stack_type_string(grpc_channel_stack_type type) { switch (type) { case GRPC_CLIENT_CHANNEL: return "CLIENT_CHANNEL"; diff --git a/Sources/CgRPC/src/core/lib/surface/channel_stack_type.h b/Sources/CgRPC/src/core/lib/surface/channel_stack_type.h index 3f0e14ffc..8a3c08e1c 100644 --- a/Sources/CgRPC/src/core/lib/surface/channel_stack_type.h +++ b/Sources/CgRPC/src/core/lib/surface/channel_stack_type.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H #define GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H +#include + #include typedef enum { @@ -40,6 +42,6 @@ typedef enum { bool grpc_channel_stack_type_is_client(grpc_channel_stack_type type); -const char *grpc_channel_stack_type_string(grpc_channel_stack_type type); +const char* grpc_channel_stack_type_string(grpc_channel_stack_type type); #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/completion_queue.c b/Sources/CgRPC/src/core/lib/surface/completion_queue.c deleted file mode 100644 index fed66e3a2..000000000 --- a/Sources/CgRPC/src/core/lib/surface/completion_queue.c +++ /dev/null @@ -1,1249 +0,0 @@ -/* - * - * Copyright 2015-2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -#include "src/core/lib/surface/completion_queue.h" - -#include -#include - -#include -#include -#include -#include -#include - -#include "src/core/lib/debug/stats.h" -#include "src/core/lib/iomgr/pollset.h" -#include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/support/spinlock.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/surface/api_trace.h" -#include "src/core/lib/surface/call.h" -#include "src/core/lib/surface/event_string.h" - -grpc_tracer_flag grpc_trace_operation_failures = - GRPC_TRACER_INITIALIZER(false, "op_failure"); -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_pending_tags = - GRPC_TRACER_INITIALIZER(false, "pending_tags"); -grpc_tracer_flag grpc_trace_cq_refcount = - GRPC_TRACER_INITIALIZER(false, "cq_refcount"); -#endif - -typedef struct { - grpc_pollset_worker **worker; - void *tag; -} plucker; - -typedef struct { - bool can_get_pollset; - bool can_listen; - size_t (*size)(void); - void (*init)(grpc_pollset *pollset, gpr_mu **mu); - grpc_error *(*kick)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker); - grpc_error *(*work)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker **worker, gpr_timespec now, - gpr_timespec deadline); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_closure *closure); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset); -} cq_poller_vtable; - -typedef struct non_polling_worker { - gpr_cv cv; - bool kicked; - struct non_polling_worker *next; - struct non_polling_worker *prev; -} non_polling_worker; - -typedef struct { - gpr_mu mu; - non_polling_worker *root; - grpc_closure *shutdown; -} non_polling_poller; - -static size_t non_polling_poller_size(void) { - return sizeof(non_polling_poller); -} - -static void non_polling_poller_init(grpc_pollset *pollset, gpr_mu **mu) { - non_polling_poller *npp = (non_polling_poller *)pollset; - gpr_mu_init(&npp->mu); - *mu = &npp->mu; -} - -static void non_polling_poller_destroy(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset) { - non_polling_poller *npp = (non_polling_poller *)pollset; - gpr_mu_destroy(&npp->mu); -} - -static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, - grpc_pollset_worker **worker, - gpr_timespec now, - gpr_timespec deadline) { - non_polling_poller *npp = (non_polling_poller *)pollset; - if (npp->shutdown) return GRPC_ERROR_NONE; - non_polling_worker w; - gpr_cv_init(&w.cv); - if (worker != NULL) *worker = (grpc_pollset_worker *)&w; - if (npp->root == NULL) { - npp->root = w.next = w.prev = &w; - } else { - w.next = npp->root; - w.prev = w.next->prev; - w.next->prev = w.prev->next = &w; - } - w.kicked = false; - while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline)) - ; - if (&w == npp->root) { - npp->root = w.next; - if (&w == npp->root) { - if (npp->shutdown) { - GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE); - } - npp->root = NULL; - } - } - w.next->prev = w.prev; - w.prev->next = w.next; - gpr_cv_destroy(&w.cv); - if (worker != NULL) *worker = NULL; - return GRPC_ERROR_NONE; -} - -static grpc_error *non_polling_poller_kick( - grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, - grpc_pollset_worker *specific_worker) { - non_polling_poller *p = (non_polling_poller *)pollset; - if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root; - if (specific_worker != NULL) { - non_polling_worker *w = (non_polling_worker *)specific_worker; - if (!w->kicked) { - w->kicked = true; - gpr_cv_signal(&w->cv); - } - } - return GRPC_ERROR_NONE; -} - -static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx, - grpc_pollset *pollset, - grpc_closure *closure) { - non_polling_poller *p = (non_polling_poller *)pollset; - GPR_ASSERT(closure != NULL); - p->shutdown = closure; - if (p->root == NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE); - } else { - non_polling_worker *w = p->root; - do { - gpr_cv_signal(&w->cv); - w = w->next; - } while (w != p->root); - } -} - -static const cq_poller_vtable g_poller_vtable_by_poller_type[] = { - /* GRPC_CQ_DEFAULT_POLLING */ - {.can_get_pollset = true, - .can_listen = true, - .size = grpc_pollset_size, - .init = grpc_pollset_init, - .kick = grpc_pollset_kick, - .work = grpc_pollset_work, - .shutdown = grpc_pollset_shutdown, - .destroy = grpc_pollset_destroy}, - /* GRPC_CQ_NON_LISTENING */ - {.can_get_pollset = true, - .can_listen = false, - .size = grpc_pollset_size, - .init = grpc_pollset_init, - .kick = grpc_pollset_kick, - .work = grpc_pollset_work, - .shutdown = grpc_pollset_shutdown, - .destroy = grpc_pollset_destroy}, - /* GRPC_CQ_NON_POLLING */ - {.can_get_pollset = false, - .can_listen = false, - .size = non_polling_poller_size, - .init = non_polling_poller_init, - .kick = non_polling_poller_kick, - .work = non_polling_poller_work, - .shutdown = non_polling_poller_shutdown, - .destroy = non_polling_poller_destroy}, -}; - -typedef struct cq_vtable { - grpc_cq_completion_type cq_completion_type; - size_t data_size; - void (*init)(void *data); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq); - void (*destroy)(void *data); - bool (*begin_op)(grpc_completion_queue *cq, void *tag); - void (*end_op)(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage); - grpc_event (*next)(grpc_completion_queue *cq, gpr_timespec deadline, - void *reserved); - grpc_event (*pluck)(grpc_completion_queue *cq, void *tag, - gpr_timespec deadline, void *reserved); -} cq_vtable; - -/* Queue that holds the cq_completion_events. Internally uses gpr_mpscq queue - * (a lockfree multiproducer single consumer queue). It uses a queue_lock - * to support multiple consumers. - * Only used in completion queues whose completion_type is GRPC_CQ_NEXT */ -typedef struct grpc_cq_event_queue { - /* Spinlock to serialize consumers i.e pop() operations */ - gpr_spinlock queue_lock; - - gpr_mpscq queue; - - /* A lazy counter of number of items in the queue. This is NOT atomically - incremented/decremented along with push/pop operations and hence is only - eventually consistent */ - gpr_atm num_queue_items; -} grpc_cq_event_queue; - -typedef struct cq_next_data { - /** Completed events for completion-queues of type GRPC_CQ_NEXT */ - grpc_cq_event_queue queue; - - /** Counter of how many things have ever been queued on this completion queue - useful for avoiding locks to check the queue */ - gpr_atm things_queued_ever; - - /* Number of outstanding events (+1 if not shut down) */ - gpr_atm pending_events; - - /** 0 initially. 1 once we initiated shutdown */ - bool shutdown_called; -} cq_next_data; - -typedef struct cq_pluck_data { - /** Completed events for completion-queues of type GRPC_CQ_PLUCK */ - grpc_cq_completion completed_head; - grpc_cq_completion *completed_tail; - - /** Number of pending events (+1 if we're not shutdown) */ - gpr_atm pending_events; - - /** Counter of how many things have ever been queued on this completion queue - useful for avoiding locks to check the queue */ - gpr_atm things_queued_ever; - - /** 0 initially. 1 once we completed shutting */ - /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if - * (pending_events == 0). So consider removing this in future and use - * pending_events */ - gpr_atm shutdown; - - /** 0 initially. 1 once we initiated shutdown */ - bool shutdown_called; - - int num_pluckers; - plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; -} cq_pluck_data; - -/* Completion queue structure */ -struct grpc_completion_queue { - /** Once owning_refs drops to zero, we will destroy the cq */ - gpr_refcount owning_refs; - - gpr_mu *mu; - - const cq_vtable *vtable; - const cq_poller_vtable *poller_vtable; - -#ifndef NDEBUG - void **outstanding_tags; - size_t outstanding_tag_count; - size_t outstanding_tag_capacity; -#endif - - grpc_closure pollset_shutdown_done; - int num_polls; -}; - -/* Forward declarations */ -static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); -static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq); - -static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag); -static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag); - -static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage); - -static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage); - -static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, - void *reserved); - -static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, - gpr_timespec deadline, void *reserved); - -static void cq_init_next(void *data); -static void cq_init_pluck(void *data); -static void cq_destroy_next(void *data); -static void cq_destroy_pluck(void *data); - -/* Completion queue vtables based on the completion-type */ -static const cq_vtable g_cq_vtable[] = { - /* GRPC_CQ_NEXT */ - {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next, - cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, NULL}, - /* GRPC_CQ_PLUCK */ - {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck, - cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, NULL, - cq_pluck}, -}; - -#define DATA_FROM_CQ(cq) ((void *)(cq + 1)) -#define POLLSET_FROM_CQ(cq) \ - ((grpc_pollset *)(cq->vtable->data_size + (char *)DATA_FROM_CQ(cq))) - -grpc_tracer_flag grpc_cq_pluck_trace = - GRPC_TRACER_INITIALIZER(true, "queue_pluck"); -grpc_tracer_flag grpc_cq_event_timeout_trace = - GRPC_TRACER_INITIALIZER(true, "queue_timeout"); - -#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \ - if (GRPC_TRACER_ON(grpc_api_trace) && \ - (GRPC_TRACER_ON(grpc_cq_pluck_trace) || \ - (event)->type != GRPC_QUEUE_TIMEOUT)) { \ - char *_ev = grpc_event_string(event); \ - gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \ - gpr_free(_ev); \ - } - -static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *cq, - grpc_error *error); - -static void cq_event_queue_init(grpc_cq_event_queue *q) { - gpr_mpscq_init(&q->queue); - q->queue_lock = GPR_SPINLOCK_INITIALIZER; - gpr_atm_no_barrier_store(&q->num_queue_items, 0); -} - -static void cq_event_queue_destroy(grpc_cq_event_queue *q) { - gpr_mpscq_destroy(&q->queue); -} - -static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) { - gpr_mpscq_push(&q->queue, (gpr_mpscq_node *)c); - return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0; -} - -static grpc_cq_completion *cq_event_queue_pop(grpc_cq_event_queue *q) { - grpc_cq_completion *c = NULL; - if (gpr_spinlock_trylock(&q->queue_lock)) { - c = (grpc_cq_completion *)gpr_mpscq_pop(&q->queue); - gpr_spinlock_unlock(&q->queue_lock); - } - - if (c) { - gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1); - } - - return c; -} - -/* Note: The counter is not incremented/decremented atomically with push/pop. - * The count is only eventually consistent */ -static long cq_event_queue_num_items(grpc_cq_event_queue *q) { - return (long)gpr_atm_no_barrier_load(&q->num_queue_items); -} - -grpc_completion_queue *grpc_completion_queue_create_internal( - grpc_cq_completion_type completion_type, - grpc_cq_polling_type polling_type) { - grpc_completion_queue *cq; - - GPR_TIMER_BEGIN("grpc_completion_queue_create_internal", 0); - - GRPC_API_TRACE( - "grpc_completion_queue_create_internal(completion_type=%d, " - "polling_type=%d)", - 2, (completion_type, polling_type)); - - const cq_vtable *vtable = &g_cq_vtable[completion_type]; - const cq_poller_vtable *poller_vtable = - &g_poller_vtable_by_poller_type[polling_type]; - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_STATS_INC_CQS_CREATED(&exec_ctx); - grpc_exec_ctx_finish(&exec_ctx); - - cq = (grpc_completion_queue *)gpr_zalloc(sizeof(grpc_completion_queue) + - vtable->data_size + - poller_vtable->size()); - - cq->vtable = vtable; - cq->poller_vtable = poller_vtable; - - /* One for destroy(), one for pollset_shutdown */ - gpr_ref_init(&cq->owning_refs, 2); - - poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu); - vtable->init(DATA_FROM_CQ(cq)); - - GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq, - grpc_schedule_on_exec_ctx); - - GPR_TIMER_END("grpc_completion_queue_create_internal", 0); - - return cq; -} - -static void cq_init_next(void *ptr) { - cq_next_data *cqd = (cq_next_data *)ptr; - /* Initial count is dropped by grpc_completion_queue_shutdown */ - gpr_atm_no_barrier_store(&cqd->pending_events, 1); - cqd->shutdown_called = false; - gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0); - cq_event_queue_init(&cqd->queue); -} - -static void cq_destroy_next(void *ptr) { - cq_next_data *cqd = (cq_next_data *)ptr; - GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0); - cq_event_queue_destroy(&cqd->queue); -} - -static void cq_init_pluck(void *ptr) { - cq_pluck_data *cqd = (cq_pluck_data *)ptr; - /* Initial count is dropped by grpc_completion_queue_shutdown */ - gpr_atm_no_barrier_store(&cqd->pending_events, 1); - cqd->completed_tail = &cqd->completed_head; - cqd->completed_head.next = (uintptr_t)cqd->completed_tail; - gpr_atm_no_barrier_store(&cqd->shutdown, 0); - cqd->shutdown_called = false; - cqd->num_pluckers = 0; - gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0); -} - -static void cq_destroy_pluck(void *ptr) { - cq_pluck_data *cqd = (cq_pluck_data *)ptr; - GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head); -} - -grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cq) { - return cq->vtable->cq_completion_type; -} - -int grpc_get_cq_poll_num(grpc_completion_queue *cq) { - int cur_num_polls; - gpr_mu_lock(cq->mu); - cur_num_polls = cq->num_polls; - gpr_mu_unlock(cq->mu); - return cur_num_polls; -} - -#ifndef NDEBUG -void grpc_cq_internal_ref(grpc_completion_queue *cq, const char *reason, - const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "CQ:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", cq, val, val + 1, - reason); - } -#else -void grpc_cq_internal_ref(grpc_completion_queue *cq) { -#endif - gpr_ref(&cq->owning_refs); -} - -static void on_pollset_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_completion_queue *cq = (grpc_completion_queue *)arg; - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy"); -} - -#ifndef NDEBUG -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, - const char *reason, const char *file, int line) { - if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count); - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "CQ:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", cq, val, val - 1, - reason); - } -#else -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { -#endif - if (gpr_unref(&cq->owning_refs)) { - cq->vtable->destroy(DATA_FROM_CQ(cq)); - cq->poller_vtable->destroy(exec_ctx, POLLSET_FROM_CQ(cq)); -#ifndef NDEBUG - gpr_free(cq->outstanding_tags); -#endif - gpr_free(cq); - } -} - -#ifndef NDEBUG -static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) { - int found = 0; - if (lock_cq) { - gpr_mu_lock(cq->mu); - } - - for (int i = 0; i < (int)cq->outstanding_tag_count; i++) { - if (cq->outstanding_tags[i] == tag) { - cq->outstanding_tag_count--; - GPR_SWAP(void *, cq->outstanding_tags[i], - cq->outstanding_tags[cq->outstanding_tag_count]); - found = 1; - break; - } - } - - if (lock_cq) { - gpr_mu_unlock(cq->mu); - } - - GPR_ASSERT(found); -} -#else -static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {} -#endif - -/* Atomically increments a counter only if the counter is not zero. Returns - * true if the increment was successful; false if the counter is zero */ -static bool atm_inc_if_nonzero(gpr_atm *counter) { - while (true) { - gpr_atm count = gpr_atm_acq_load(counter); - /* If zero, we are done. If not, we must to a CAS (instead of an atomic - * increment) to maintain the contract: do not increment the counter if it - * is zero. */ - if (count == 0) { - return false; - } else if (gpr_atm_full_cas(counter, count, count + 1)) { - break; - } - } - - return true; -} - -static bool cq_begin_op_for_next(grpc_completion_queue *cq, void *tag) { - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - return atm_inc_if_nonzero(&cqd->pending_events); -} - -static bool cq_begin_op_for_pluck(grpc_completion_queue *cq, void *tag) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - return atm_inc_if_nonzero(&cqd->pending_events); -} - -bool grpc_cq_begin_op(grpc_completion_queue *cq, void *tag) { -#ifndef NDEBUG - gpr_mu_lock(cq->mu); - if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) { - cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity); - cq->outstanding_tags = (void **)gpr_realloc( - cq->outstanding_tags, - sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity); - } - cq->outstanding_tags[cq->outstanding_tag_count++] = tag; - gpr_mu_unlock(cq->mu); -#endif - return cq->vtable->begin_op(cq, tag); -} - -/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a - * completion - * type of GRPC_CQ_NEXT) */ -static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage) { - GPR_TIMER_BEGIN("cq_end_op_for_next", 0); - - if (GRPC_TRACER_ON(grpc_api_trace) || - (GRPC_TRACER_ON(grpc_trace_operation_failures) && - error != GRPC_ERROR_NONE)) { - const char *errmsg = grpc_error_string(error); - GRPC_API_TRACE( - "cq_end_op_for_next(exec_ctx=%p, cq=%p, tag=%p, error=%s, " - "done=%p, done_arg=%p, storage=%p)", - 7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage)); - if (GRPC_TRACER_ON(grpc_trace_operation_failures) && - error != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); - } - } - - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - int is_success = (error == GRPC_ERROR_NONE); - - storage->tag = tag; - storage->done = done; - storage->done_arg = done_arg; - storage->next = (uintptr_t)(is_success); - - cq_check_tag(cq, tag, true); /* Used in debug builds only */ - - /* Add the completion to the queue */ - bool is_first = cq_event_queue_push(&cqd->queue, storage); - gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); - - /* Since we do not hold the cq lock here, it is important to do an 'acquire' - load here (instead of a 'no_barrier' load) to match with the release store - (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next - */ - bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1; - - if (!will_definitely_shutdown) { - /* Only kick if this is the first item queued */ - if (is_first) { - gpr_mu_lock(cq->mu); - grpc_error *kick_error = - cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), NULL); - gpr_mu_unlock(cq->mu); - - if (kick_error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(kick_error); - gpr_log(GPR_ERROR, "Kick failed: %s", msg); - GRPC_ERROR_UNREF(kick_error); - } - } - if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); - gpr_mu_lock(cq->mu); - cq_finish_shutdown_next(exec_ctx, cq); - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); - } - } else { - GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); - gpr_atm_rel_store(&cqd->pending_events, 0); - gpr_mu_lock(cq->mu); - cq_finish_shutdown_next(exec_ctx, cq); - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); - } - - GPR_TIMER_END("cq_end_op_for_next", 0); - - GRPC_ERROR_UNREF(error); -} - -/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a - * completion - * type of GRPC_CQ_PLUCK) */ -static void cq_end_op_for_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq, void *tag, - grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, - void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - int is_success = (error == GRPC_ERROR_NONE); - - GPR_TIMER_BEGIN("cq_end_op_for_pluck", 0); - - if (GRPC_TRACER_ON(grpc_api_trace) || - (GRPC_TRACER_ON(grpc_trace_operation_failures) && - error != GRPC_ERROR_NONE)) { - const char *errmsg = grpc_error_string(error); - GRPC_API_TRACE( - "cq_end_op_for_pluck(exec_ctx=%p, cq=%p, tag=%p, error=%s, " - "done=%p, done_arg=%p, storage=%p)", - 7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage)); - if (GRPC_TRACER_ON(grpc_trace_operation_failures) && - error != GRPC_ERROR_NONE) { - gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); - } - } - - storage->tag = tag; - storage->done = done; - storage->done_arg = done_arg; - storage->next = ((uintptr_t)&cqd->completed_head) | ((uintptr_t)(is_success)); - - gpr_mu_lock(cq->mu); - cq_check_tag(cq, tag, false); /* Used in debug builds only */ - - /* Add to the list of completions */ - gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); - cqd->completed_tail->next = - ((uintptr_t)storage) | (1u & (uintptr_t)cqd->completed_tail->next); - cqd->completed_tail = storage; - - if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_pluck(exec_ctx, cq); - gpr_mu_unlock(cq->mu); - } else { - grpc_pollset_worker *pluck_worker = NULL; - for (int i = 0; i < cqd->num_pluckers; i++) { - if (cqd->pluckers[i].tag == tag) { - pluck_worker = *cqd->pluckers[i].worker; - break; - } - } - - grpc_error *kick_error = - cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker); - - gpr_mu_unlock(cq->mu); - - if (kick_error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(kick_error); - gpr_log(GPR_ERROR, "Kick failed: %s", msg); - - GRPC_ERROR_UNREF(kick_error); - } - } - - GPR_TIMER_END("cq_end_op_for_pluck", 0); - - GRPC_ERROR_UNREF(error); -} - -void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cq, - void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage) { - cq->vtable->end_op(exec_ctx, cq, tag, error, done, done_arg, storage); -} - -typedef struct { - gpr_atm last_seen_things_queued_ever; - grpc_completion_queue *cq; - gpr_timespec deadline; - grpc_cq_completion *stolen_completion; - void *tag; /* for pluck */ - bool first_loop; -} cq_is_finished_arg; - -static bool cq_is_next_finished(grpc_exec_ctx *exec_ctx, void *arg) { - cq_is_finished_arg *a = (cq_is_finished_arg *)arg; - grpc_completion_queue *cq = a->cq; - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - GPR_ASSERT(a->stolen_completion == NULL); - - gpr_atm current_last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever); - - if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) { - a->last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever); - - /* Pop a cq_completion from the queue. Returns NULL if the queue is empty - * might return NULL in some cases even if the queue is not empty; but - * that - * is ok and doesn't affect correctness. Might effect the tail latencies a - * bit) */ - a->stolen_completion = cq_event_queue_pop(&cqd->queue); - if (a->stolen_completion != NULL) { - return true; - } - } - return !a->first_loop && - gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0; -} - -#ifndef NDEBUG -static void dump_pending_tags(grpc_completion_queue *cq) { - if (!GRPC_TRACER_ON(grpc_trace_pending_tags)) return; - - gpr_strvec v; - gpr_strvec_init(&v); - gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:")); - gpr_mu_lock(cq->mu); - for (size_t i = 0; i < cq->outstanding_tag_count; i++) { - char *s; - gpr_asprintf(&s, " %p", cq->outstanding_tags[i]); - gpr_strvec_add(&v, s); - } - gpr_mu_unlock(cq->mu); - char *out = gpr_strvec_flatten(&v, NULL); - gpr_strvec_destroy(&v); - gpr_log(GPR_DEBUG, "%s", out); - gpr_free(out); -} -#else -static void dump_pending_tags(grpc_completion_queue *cq) {} -#endif - -static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, - void *reserved) { - grpc_event ret; - gpr_timespec now; - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - - GPR_TIMER_BEGIN("grpc_completion_queue_next", 0); - - GRPC_API_TRACE( - "grpc_completion_queue_next(" - "cq=%p, " - "deadline=gpr_timespec { tv_sec: %" PRId64 - ", tv_nsec: %d, clock_type: %d }, " - "reserved=%p)", - 5, (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, - reserved)); - GPR_ASSERT(!reserved); - - dump_pending_tags(cq); - - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - - GRPC_CQ_INTERNAL_REF(cq, "next"); - - cq_is_finished_arg is_finished_arg = { - .last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever), - .cq = cq, - .deadline = deadline, - .stolen_completion = NULL, - .tag = NULL, - .first_loop = true}; - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg); - - for (;;) { - gpr_timespec iteration_deadline = deadline; - - if (is_finished_arg.stolen_completion != NULL) { - grpc_cq_completion *c = is_finished_arg.stolen_completion; - is_finished_arg.stolen_completion = NULL; - ret.type = GRPC_OP_COMPLETE; - ret.success = c->next & 1u; - ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); - break; - } - - grpc_cq_completion *c = cq_event_queue_pop(&cqd->queue); - - if (c != NULL) { - ret.type = GRPC_OP_COMPLETE; - ret.success = c->next & 1u; - ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); - break; - } else { - /* If c == NULL it means either the queue is empty OR in an transient - inconsistent state. If it is the latter, we shold do a 0-timeout poll - so that the thread comes back quickly from poll to make a second - attempt at popping. Not doing this can potentially deadlock this - thread forever (if the deadline is infinity) */ - if (cq_event_queue_num_items(&cqd->queue) > 0) { - iteration_deadline = gpr_time_0(GPR_CLOCK_MONOTONIC); - } - } - - if (gpr_atm_acq_load(&cqd->pending_events) == 0) { - /* Before returning, check if the queue has any items left over (since - gpr_mpscq_pop() can sometimes return NULL even if the queue is not - empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */ - if (cq_event_queue_num_items(&cqd->queue) > 0) { - /* Go to the beginning of the loop. No point doing a poll because - (cq->shutdown == true) is only possible when there is no pending - work (i.e cq->pending_events == 0) and any outstanding completion - events should have already been queued on this cq */ - continue; - } - - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_SHUTDOWN; - break; - } - - now = gpr_now(GPR_CLOCK_MONOTONIC); - if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) { - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_TIMEOUT; - dump_pending_tags(cq); - break; - } - - /* The main polling work happens in grpc_pollset_work */ - gpr_mu_lock(cq->mu); - cq->num_polls++; - grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - NULL, now, iteration_deadline); - gpr_mu_unlock(cq->mu); - - if (err != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(err); - gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg); - - GRPC_ERROR_UNREF(err); - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_TIMEOUT; - dump_pending_tags(cq); - break; - } - is_finished_arg.first_loop = false; - } - - if (cq_event_queue_num_items(&cqd->queue) > 0 && - gpr_atm_acq_load(&cqd->pending_events) > 0) { - gpr_mu_lock(cq->mu); - cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL); - gpr_mu_unlock(cq->mu); - } - - GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "next"); - grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(is_finished_arg.stolen_completion == NULL); - - GPR_TIMER_END("grpc_completion_queue_next", 0); - - return ret; -} - -/* Finishes the completion queue shutdown. This means that there are no more - completion events / tags expected from the completion queue - - Must be called under completion queue lock - - Must be called only once in completion queue's lifetime - - grpc_completion_queue_shutdown() MUST have been called before calling - this function */ -static void cq_finish_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - - GPR_ASSERT(cqd->shutdown_called); - GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0); - - cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq), - &cq->pollset_shutdown_done); -} - -static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { - cq_next_data *cqd = (cq_next_data *)DATA_FROM_CQ(cq); - - /* Need an extra ref for cq here because: - * We call cq_finish_shutdown_next() below, that would call pollset shutdown. - * Pollset shutdown decrements the cq ref count which can potentially destroy - * the cq (if that happens to be the last ref). - * Creating an extra ref here prevents the cq from getting destroyed while - * this function is still active */ - GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); - gpr_mu_lock(cq->mu); - if (cqd->shutdown_called) { - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); - return; - } - cqd->shutdown_called = true; - /* Doing a full_fetch_add (i.e acq/release) here to match with - * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write - * on this counter without necessarily holding a lock on cq */ - if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_next(exec_ctx, cq); - } - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down"); -} - -grpc_event grpc_completion_queue_next(grpc_completion_queue *cq, - gpr_timespec deadline, void *reserved) { - return cq->vtable->next(cq, deadline, reserved); -} - -static int add_plucker(grpc_completion_queue *cq, void *tag, - grpc_pollset_worker **worker) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) { - return 0; - } - cqd->pluckers[cqd->num_pluckers].tag = tag; - cqd->pluckers[cqd->num_pluckers].worker = worker; - cqd->num_pluckers++; - return 1; -} - -static void del_plucker(grpc_completion_queue *cq, void *tag, - grpc_pollset_worker **worker) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - for (int i = 0; i < cqd->num_pluckers; i++) { - if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) { - cqd->num_pluckers--; - GPR_SWAP(plucker, cqd->pluckers[i], cqd->pluckers[cqd->num_pluckers]); - return; - } - } - GPR_UNREACHABLE_CODE(return ); -} - -static bool cq_is_pluck_finished(grpc_exec_ctx *exec_ctx, void *arg) { - cq_is_finished_arg *a = (cq_is_finished_arg *)arg; - grpc_completion_queue *cq = a->cq; - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - - GPR_ASSERT(a->stolen_completion == NULL); - gpr_atm current_last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever); - if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) { - gpr_mu_lock(cq->mu); - a->last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever); - grpc_cq_completion *c; - grpc_cq_completion *prev = &cqd->completed_head; - while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) != - &cqd->completed_head) { - if (c->tag == a->tag) { - prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1); - if (c == cqd->completed_tail) { - cqd->completed_tail = prev; - } - gpr_mu_unlock(cq->mu); - a->stolen_completion = c; - return true; - } - prev = c; - } - gpr_mu_unlock(cq->mu); - } - return !a->first_loop && - gpr_time_cmp(a->deadline, gpr_now(a->deadline.clock_type)) < 0; -} - -static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag, - gpr_timespec deadline, void *reserved) { - grpc_event ret; - grpc_cq_completion *c; - grpc_cq_completion *prev; - grpc_pollset_worker *worker = NULL; - gpr_timespec now; - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - - GPR_TIMER_BEGIN("grpc_completion_queue_pluck", 0); - - if (GRPC_TRACER_ON(grpc_cq_pluck_trace)) { - GRPC_API_TRACE( - "grpc_completion_queue_pluck(" - "cq=%p, tag=%p, " - "deadline=gpr_timespec { tv_sec: %" PRId64 - ", tv_nsec: %d, clock_type: %d }, " - "reserved=%p)", - 6, (cq, tag, deadline.tv_sec, deadline.tv_nsec, - (int)deadline.clock_type, reserved)); - } - GPR_ASSERT(!reserved); - - dump_pending_tags(cq); - - deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); - - GRPC_CQ_INTERNAL_REF(cq, "pluck"); - gpr_mu_lock(cq->mu); - cq_is_finished_arg is_finished_arg = { - .last_seen_things_queued_ever = - gpr_atm_no_barrier_load(&cqd->things_queued_ever), - .cq = cq, - .deadline = deadline, - .stolen_completion = NULL, - .tag = tag, - .first_loop = true}; - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, cq_is_pluck_finished, &is_finished_arg); - for (;;) { - if (is_finished_arg.stolen_completion != NULL) { - gpr_mu_unlock(cq->mu); - c = is_finished_arg.stolen_completion; - is_finished_arg.stolen_completion = NULL; - ret.type = GRPC_OP_COMPLETE; - ret.success = c->next & 1u; - ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); - break; - } - prev = &cqd->completed_head; - while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) != - &cqd->completed_head) { - if (c->tag == tag) { - prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1); - if (c == cqd->completed_tail) { - cqd->completed_tail = prev; - } - gpr_mu_unlock(cq->mu); - ret.type = GRPC_OP_COMPLETE; - ret.success = c->next & 1u; - ret.tag = c->tag; - c->done(&exec_ctx, c->done_arg, c); - goto done; - } - prev = c; - } - if (gpr_atm_no_barrier_load(&cqd->shutdown)) { - gpr_mu_unlock(cq->mu); - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_SHUTDOWN; - break; - } - if (!add_plucker(cq, tag, &worker)) { - gpr_log(GPR_DEBUG, - "Too many outstanding grpc_completion_queue_pluck calls: maximum " - "is %d", - GRPC_MAX_COMPLETION_QUEUE_PLUCKERS); - gpr_mu_unlock(cq->mu); - memset(&ret, 0, sizeof(ret)); - /* TODO(ctiller): should we use a different result here */ - ret.type = GRPC_QUEUE_TIMEOUT; - dump_pending_tags(cq); - break; - } - now = gpr_now(GPR_CLOCK_MONOTONIC); - if (!is_finished_arg.first_loop && gpr_time_cmp(now, deadline) >= 0) { - del_plucker(cq, tag, &worker); - gpr_mu_unlock(cq->mu); - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_TIMEOUT; - dump_pending_tags(cq); - break; - } - - cq->num_polls++; - grpc_error *err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq), - &worker, now, deadline); - if (err != GRPC_ERROR_NONE) { - del_plucker(cq, tag, &worker); - gpr_mu_unlock(cq->mu); - const char *msg = grpc_error_string(err); - gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg); - - GRPC_ERROR_UNREF(err); - memset(&ret, 0, sizeof(ret)); - ret.type = GRPC_QUEUE_TIMEOUT; - dump_pending_tags(cq); - break; - } - is_finished_arg.first_loop = false; - del_plucker(cq, tag, &worker); - } -done: - GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "pluck"); - grpc_exec_ctx_finish(&exec_ctx); - GPR_ASSERT(is_finished_arg.stolen_completion == NULL); - - GPR_TIMER_END("grpc_completion_queue_pluck", 0); - - return ret; -} - -grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, - gpr_timespec deadline, void *reserved) { - return cq->vtable->pluck(cq, tag, deadline, reserved); -} - -static void cq_finish_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - - GPR_ASSERT(cqd->shutdown_called); - GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown)); - gpr_atm_no_barrier_store(&cqd->shutdown, 1); - - cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq), - &cq->pollset_shutdown_done); -} - -/* NOTE: This function is almost exactly identical to cq_shutdown_next() but - * merging them is a bit tricky and probably not worth it */ -static void cq_shutdown_pluck(grpc_exec_ctx *exec_ctx, - grpc_completion_queue *cq) { - cq_pluck_data *cqd = (cq_pluck_data *)DATA_FROM_CQ(cq); - - /* Need an extra ref for cq here because: - * We call cq_finish_shutdown_pluck() below, that would call pollset shutdown. - * Pollset shutdown decrements the cq ref count which can potentially destroy - * the cq (if that happens to be the last ref). - * Creating an extra ref here prevents the cq from getting destroyed while - * this function is still active */ - GRPC_CQ_INTERNAL_REF(cq, "shutting_down (pluck cq)"); - gpr_mu_lock(cq->mu); - if (cqd->shutdown_called) { - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)"); - return; - } - cqd->shutdown_called = true; - if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { - cq_finish_shutdown_pluck(exec_ctx, cq); - } - gpr_mu_unlock(cq->mu); - GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)"); -} - -/* Shutdown simply drops a ref that we reserved at creation time; if we drop - to zero here, then enter shutdown mode and wake up any waiters */ -void grpc_completion_queue_shutdown(grpc_completion_queue *cq) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0); - GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq)); - cq->vtable->shutdown(&exec_ctx, cq); - grpc_exec_ctx_finish(&exec_ctx); - GPR_TIMER_END("grpc_completion_queue_shutdown", 0); -} - -void grpc_completion_queue_destroy(grpc_completion_queue *cq) { - GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq)); - GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0); - grpc_completion_queue_shutdown(cq); - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "destroy"); - grpc_exec_ctx_finish(&exec_ctx); - GPR_TIMER_END("grpc_completion_queue_destroy", 0); -} - -grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cq) { - return cq->poller_vtable->can_get_pollset ? POLLSET_FROM_CQ(cq) : NULL; -} - -bool grpc_cq_can_listen(grpc_completion_queue *cq) { - return cq->poller_vtable->can_listen; -} diff --git a/Sources/CgRPC/src/core/lib/surface/completion_queue.cc b/Sources/CgRPC/src/core/lib/surface/completion_queue.cc new file mode 100644 index 000000000..d0363917b --- /dev/null +++ b/Sources/CgRPC/src/core/lib/surface/completion_queue.cc @@ -0,0 +1,1262 @@ +/* + * + * Copyright 2015-2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +#include + +#include "src/core/lib/surface/completion_queue.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/spinlock.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gpr/tls.h" +#include "src/core/lib/iomgr/pollset.h" +#include "src/core/lib/iomgr/timer.h" +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/surface/api_trace.h" +#include "src/core/lib/surface/call.h" +#include "src/core/lib/surface/event_string.h" + +grpc_core::TraceFlag grpc_trace_operation_failures(false, "op_failure"); +grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags(false, "pending_tags"); +grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount(false, "cq_refcount"); + +// Specifies a cq thread local cache. +// The first event that occurs on a thread +// with a cq cache will go into that cache, and +// will only be returned on the thread that initialized the cache. +// NOTE: Only one event will ever be cached. +GPR_TLS_DECL(g_cached_event); +GPR_TLS_DECL(g_cached_cq); + +typedef struct { + grpc_pollset_worker** worker; + void* tag; +} plucker; + +typedef struct { + bool can_get_pollset; + bool can_listen; + size_t (*size)(void); + void (*init)(grpc_pollset* pollset, gpr_mu** mu); + grpc_error* (*kick)(grpc_pollset* pollset, + grpc_pollset_worker* specific_worker); + grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker, + grpc_millis deadline); + void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure); + void (*destroy)(grpc_pollset* pollset); +} cq_poller_vtable; + +typedef struct non_polling_worker { + gpr_cv cv; + bool kicked; + struct non_polling_worker* next; + struct non_polling_worker* prev; +} non_polling_worker; + +typedef struct { + gpr_mu mu; + non_polling_worker* root; + grpc_closure* shutdown; +} non_polling_poller; + +static size_t non_polling_poller_size(void) { + return sizeof(non_polling_poller); +} + +static void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) { + non_polling_poller* npp = reinterpret_cast(pollset); + gpr_mu_init(&npp->mu); + *mu = &npp->mu; +} + +static void non_polling_poller_destroy(grpc_pollset* pollset) { + non_polling_poller* npp = reinterpret_cast(pollset); + gpr_mu_destroy(&npp->mu); +} + +static grpc_error* non_polling_poller_work(grpc_pollset* pollset, + grpc_pollset_worker** worker, + grpc_millis deadline) { + non_polling_poller* npp = reinterpret_cast(pollset); + if (npp->shutdown) return GRPC_ERROR_NONE; + non_polling_worker w; + gpr_cv_init(&w.cv); + if (worker != nullptr) *worker = reinterpret_cast(&w); + if (npp->root == nullptr) { + npp->root = w.next = w.prev = &w; + } else { + w.next = npp->root; + w.prev = w.next->prev; + w.next->prev = w.prev->next = &w; + } + w.kicked = false; + gpr_timespec deadline_ts = + grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC); + while (!npp->shutdown && !w.kicked && + !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts)) + ; + grpc_core::ExecCtx::Get()->InvalidateNow(); + if (&w == npp->root) { + npp->root = w.next; + if (&w == npp->root) { + if (npp->shutdown) { + GRPC_CLOSURE_SCHED(npp->shutdown, GRPC_ERROR_NONE); + } + npp->root = nullptr; + } + } + w.next->prev = w.prev; + w.prev->next = w.next; + gpr_cv_destroy(&w.cv); + if (worker != nullptr) *worker = nullptr; + return GRPC_ERROR_NONE; +} + +static grpc_error* non_polling_poller_kick( + grpc_pollset* pollset, grpc_pollset_worker* specific_worker) { + non_polling_poller* p = reinterpret_cast(pollset); + if (specific_worker == nullptr) + specific_worker = reinterpret_cast(p->root); + if (specific_worker != nullptr) { + non_polling_worker* w = + reinterpret_cast(specific_worker); + if (!w->kicked) { + w->kicked = true; + gpr_cv_signal(&w->cv); + } + } + return GRPC_ERROR_NONE; +} + +static void non_polling_poller_shutdown(grpc_pollset* pollset, + grpc_closure* closure) { + non_polling_poller* p = reinterpret_cast(pollset); + GPR_ASSERT(closure != nullptr); + p->shutdown = closure; + if (p->root == nullptr) { + GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE); + } else { + non_polling_worker* w = p->root; + do { + gpr_cv_signal(&w->cv); + w = w->next; + } while (w != p->root); + } +} + +static const cq_poller_vtable g_poller_vtable_by_poller_type[] = { + /* GRPC_CQ_DEFAULT_POLLING */ + {true, true, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick, + grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy}, + /* GRPC_CQ_NON_LISTENING */ + {true, false, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick, + grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy}, + /* GRPC_CQ_NON_POLLING */ + {false, false, non_polling_poller_size, non_polling_poller_init, + non_polling_poller_kick, non_polling_poller_work, + non_polling_poller_shutdown, non_polling_poller_destroy}, +}; + +typedef struct cq_vtable { + grpc_cq_completion_type cq_completion_type; + size_t data_size; + void (*init)(void* data); + void (*shutdown)(grpc_completion_queue* cq); + void (*destroy)(void* data); + bool (*begin_op)(grpc_completion_queue* cq, void* tag); + void (*end_op)(grpc_completion_queue* cq, void* tag, grpc_error* error, + void (*done)(void* done_arg, grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage); + grpc_event (*next)(grpc_completion_queue* cq, gpr_timespec deadline, + void* reserved); + grpc_event (*pluck)(grpc_completion_queue* cq, void* tag, + gpr_timespec deadline, void* reserved); +} cq_vtable; + +/* Queue that holds the cq_completion_events. Internally uses gpr_mpscq queue + * (a lockfree multiproducer single consumer queue). It uses a queue_lock + * to support multiple consumers. + * Only used in completion queues whose completion_type is GRPC_CQ_NEXT */ +typedef struct grpc_cq_event_queue { + /* Spinlock to serialize consumers i.e pop() operations */ + gpr_spinlock queue_lock; + + gpr_mpscq queue; + + /* A lazy counter of number of items in the queue. This is NOT atomically + incremented/decremented along with push/pop operations and hence is only + eventually consistent */ + gpr_atm num_queue_items; +} grpc_cq_event_queue; + +typedef struct cq_next_data { + /** Completed events for completion-queues of type GRPC_CQ_NEXT */ + grpc_cq_event_queue queue; + + /** Counter of how many things have ever been queued on this completion queue + useful for avoiding locks to check the queue */ + gpr_atm things_queued_ever; + + /* Number of outstanding events (+1 if not shut down) */ + gpr_atm pending_events; + + /** 0 initially. 1 once we initiated shutdown */ + bool shutdown_called; +} cq_next_data; + +typedef struct cq_pluck_data { + /** Completed events for completion-queues of type GRPC_CQ_PLUCK */ + grpc_cq_completion completed_head; + grpc_cq_completion* completed_tail; + + /** Number of pending events (+1 if we're not shutdown) */ + gpr_atm pending_events; + + /** Counter of how many things have ever been queued on this completion queue + useful for avoiding locks to check the queue */ + gpr_atm things_queued_ever; + + /** 0 initially. 1 once we completed shutting */ + /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if + * (pending_events == 0). So consider removing this in future and use + * pending_events */ + gpr_atm shutdown; + + /** 0 initially. 1 once we initiated shutdown */ + bool shutdown_called; + + int num_pluckers; + plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; +} cq_pluck_data; + +/* Completion queue structure */ +struct grpc_completion_queue { + /** Once owning_refs drops to zero, we will destroy the cq */ + gpr_refcount owning_refs; + + gpr_mu* mu; + + const cq_vtable* vtable; + const cq_poller_vtable* poller_vtable; + +#ifndef NDEBUG + void** outstanding_tags; + size_t outstanding_tag_count; + size_t outstanding_tag_capacity; +#endif + + grpc_closure pollset_shutdown_done; + int num_polls; +}; + +/* Forward declarations */ +static void cq_finish_shutdown_next(grpc_completion_queue* cq); +static void cq_finish_shutdown_pluck(grpc_completion_queue* cq); +static void cq_shutdown_next(grpc_completion_queue* cq); +static void cq_shutdown_pluck(grpc_completion_queue* cq); + +static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag); +static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag); + +static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag, + grpc_error* error, + void (*done)(void* done_arg, + grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage); + +static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag, + grpc_error* error, + void (*done)(void* done_arg, + grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage); + +static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, + void* reserved); + +static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag, + gpr_timespec deadline, void* reserved); + +static void cq_init_next(void* data); +static void cq_init_pluck(void* data); +static void cq_destroy_next(void* data); +static void cq_destroy_pluck(void* data); + +/* Completion queue vtables based on the completion-type */ +static const cq_vtable g_cq_vtable[] = { + /* GRPC_CQ_NEXT */ + {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next, + cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, + nullptr}, + /* GRPC_CQ_PLUCK */ + {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck, + cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr, + cq_pluck}, +}; + +#define DATA_FROM_CQ(cq) ((void*)(cq + 1)) +#define POLLSET_FROM_CQ(cq) \ + ((grpc_pollset*)(cq->vtable->data_size + (char*)DATA_FROM_CQ(cq))) + +grpc_core::TraceFlag grpc_cq_pluck_trace(true, "queue_pluck"); +grpc_core::TraceFlag grpc_cq_event_timeout_trace(true, "queue_timeout"); + +#define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \ + if (grpc_api_trace.enabled() && (grpc_cq_pluck_trace.enabled() || \ + (event)->type != GRPC_QUEUE_TIMEOUT)) { \ + char* _ev = grpc_event_string(event); \ + gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, _ev); \ + gpr_free(_ev); \ + } + +static void on_pollset_shutdown_done(void* cq, grpc_error* error); + +void grpc_cq_global_init() { + gpr_tls_init(&g_cached_event); + gpr_tls_init(&g_cached_cq); +} + +void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) { + if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == nullptr) { + gpr_tls_set(&g_cached_event, (intptr_t)0); + gpr_tls_set(&g_cached_cq, (intptr_t)cq); + } +} + +int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq, + void** tag, int* ok) { + grpc_cq_completion* storage = + (grpc_cq_completion*)gpr_tls_get(&g_cached_event); + int ret = 0; + if (storage != nullptr && + (grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) { + *tag = storage->tag; + grpc_core::ExecCtx exec_ctx; + *ok = (storage->next & static_cast(1)) == 1; + storage->done(storage->done_arg, storage); + ret = 1; + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); + gpr_mu_lock(cq->mu); + cq_finish_shutdown_next(cq); + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); + } + } + gpr_tls_set(&g_cached_event, (intptr_t)0); + gpr_tls_set(&g_cached_cq, (intptr_t)0); + + return ret; +} + +static void cq_event_queue_init(grpc_cq_event_queue* q) { + gpr_mpscq_init(&q->queue); + q->queue_lock = GPR_SPINLOCK_INITIALIZER; + gpr_atm_no_barrier_store(&q->num_queue_items, 0); +} + +static void cq_event_queue_destroy(grpc_cq_event_queue* q) { + gpr_mpscq_destroy(&q->queue); +} + +static bool cq_event_queue_push(grpc_cq_event_queue* q, grpc_cq_completion* c) { + gpr_mpscq_push(&q->queue, reinterpret_cast(c)); + return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0; +} + +static grpc_cq_completion* cq_event_queue_pop(grpc_cq_event_queue* q) { + grpc_cq_completion* c = nullptr; + grpc_core::ExecCtx exec_ctx; + + if (gpr_spinlock_trylock(&q->queue_lock)) { + GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(); + + bool is_empty = false; + c = reinterpret_cast( + gpr_mpscq_pop_and_check_end(&q->queue, &is_empty)); + gpr_spinlock_unlock(&q->queue_lock); + + if (c == nullptr && !is_empty) { + GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(); + } + } else { + GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(); + } + + if (c) { + gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1); + } + + return c; +} + +/* Note: The counter is not incremented/decremented atomically with push/pop. + * The count is only eventually consistent */ +static long cq_event_queue_num_items(grpc_cq_event_queue* q) { + return static_cast(gpr_atm_no_barrier_load(&q->num_queue_items)); +} + +grpc_completion_queue* grpc_completion_queue_create_internal( + grpc_cq_completion_type completion_type, + grpc_cq_polling_type polling_type) { + GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0); + + grpc_completion_queue* cq; + + GRPC_API_TRACE( + "grpc_completion_queue_create_internal(completion_type=%d, " + "polling_type=%d)", + 2, (completion_type, polling_type)); + + const cq_vtable* vtable = &g_cq_vtable[completion_type]; + const cq_poller_vtable* poller_vtable = + &g_poller_vtable_by_poller_type[polling_type]; + + grpc_core::ExecCtx exec_ctx; + GRPC_STATS_INC_CQS_CREATED(); + + cq = static_cast( + gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size + + poller_vtable->size())); + + cq->vtable = vtable; + cq->poller_vtable = poller_vtable; + + /* One for destroy(), one for pollset_shutdown */ + gpr_ref_init(&cq->owning_refs, 2); + + poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu); + vtable->init(DATA_FROM_CQ(cq)); + + GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq, + grpc_schedule_on_exec_ctx); + return cq; +} + +static void cq_init_next(void* ptr) { + cq_next_data* cqd = static_cast(ptr); + /* Initial count is dropped by grpc_completion_queue_shutdown */ + gpr_atm_no_barrier_store(&cqd->pending_events, 1); + cqd->shutdown_called = false; + gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0); + cq_event_queue_init(&cqd->queue); +} + +static void cq_destroy_next(void* ptr) { + cq_next_data* cqd = static_cast(ptr); + GPR_ASSERT(cq_event_queue_num_items(&cqd->queue) == 0); + cq_event_queue_destroy(&cqd->queue); +} + +static void cq_init_pluck(void* ptr) { + cq_pluck_data* cqd = static_cast(ptr); + /* Initial count is dropped by grpc_completion_queue_shutdown */ + gpr_atm_no_barrier_store(&cqd->pending_events, 1); + cqd->completed_tail = &cqd->completed_head; + cqd->completed_head.next = (uintptr_t)cqd->completed_tail; + gpr_atm_no_barrier_store(&cqd->shutdown, 0); + cqd->shutdown_called = false; + cqd->num_pluckers = 0; + gpr_atm_no_barrier_store(&cqd->things_queued_ever, 0); +} + +static void cq_destroy_pluck(void* ptr) { + cq_pluck_data* cqd = static_cast(ptr); + GPR_ASSERT(cqd->completed_head.next == (uintptr_t)&cqd->completed_head); +} + +grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) { + return cq->vtable->cq_completion_type; +} + +int grpc_get_cq_poll_num(grpc_completion_queue* cq) { + int cur_num_polls; + gpr_mu_lock(cq->mu); + cur_num_polls = cq->num_polls; + gpr_mu_unlock(cq->mu); + return cur_num_polls; +} + +#ifndef NDEBUG +void grpc_cq_internal_ref(grpc_completion_queue* cq, const char* reason, + const char* file, int line) { + if (grpc_trace_cq_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "CQ:%p ref %" PRIdPTR " -> %" PRIdPTR " %s", cq, val, val + 1, + reason); + } +#else +void grpc_cq_internal_ref(grpc_completion_queue* cq) { +#endif + gpr_ref(&cq->owning_refs); +} + +static void on_pollset_shutdown_done(void* arg, grpc_error* error) { + grpc_completion_queue* cq = static_cast(arg); + GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy"); +} + +#ifndef NDEBUG +void grpc_cq_internal_unref(grpc_completion_queue* cq, const char* reason, + const char* file, int line) { + if (grpc_trace_cq_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count); + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, + "CQ:%p unref %" PRIdPTR " -> %" PRIdPTR " %s", cq, val, val - 1, + reason); + } +#else +void grpc_cq_internal_unref(grpc_completion_queue* cq) { +#endif + if (gpr_unref(&cq->owning_refs)) { + cq->vtable->destroy(DATA_FROM_CQ(cq)); + cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq)); +#ifndef NDEBUG + gpr_free(cq->outstanding_tags); +#endif + gpr_free(cq); + } +} + +#ifndef NDEBUG +static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) { + int found = 0; + if (lock_cq) { + gpr_mu_lock(cq->mu); + } + + for (int i = 0; i < static_cast(cq->outstanding_tag_count); i++) { + if (cq->outstanding_tags[i] == tag) { + cq->outstanding_tag_count--; + GPR_SWAP(void*, cq->outstanding_tags[i], + cq->outstanding_tags[cq->outstanding_tag_count]); + found = 1; + break; + } + } + + if (lock_cq) { + gpr_mu_unlock(cq->mu); + } + + GPR_ASSERT(found); +} +#else +static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {} +#endif + +/* Atomically increments a counter only if the counter is not zero. Returns + * true if the increment was successful; false if the counter is zero */ +static bool atm_inc_if_nonzero(gpr_atm* counter) { + while (true) { + gpr_atm count = gpr_atm_acq_load(counter); + /* If zero, we are done. If not, we must to a CAS (instead of an atomic + * increment) to maintain the contract: do not increment the counter if it + * is zero. */ + if (count == 0) { + return false; + } else if (gpr_atm_full_cas(counter, count, count + 1)) { + break; + } + } + + return true; +} + +static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag) { + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + return atm_inc_if_nonzero(&cqd->pending_events); +} + +static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag) { + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + return atm_inc_if_nonzero(&cqd->pending_events); +} + +bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) { +#ifndef NDEBUG + gpr_mu_lock(cq->mu); + if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) { + cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity); + cq->outstanding_tags = static_cast(gpr_realloc( + cq->outstanding_tags, + sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity)); + } + cq->outstanding_tags[cq->outstanding_tag_count++] = tag; + gpr_mu_unlock(cq->mu); +#endif + return cq->vtable->begin_op(cq, tag); +} + +/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a + * completion + * type of GRPC_CQ_NEXT) */ +static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag, + grpc_error* error, + void (*done)(void* done_arg, + grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage) { + GPR_TIMER_SCOPE("cq_end_op_for_next", 0); + + if (grpc_api_trace.enabled() || + (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE)) { + const char* errmsg = grpc_error_string(error); + GRPC_API_TRACE( + "cq_end_op_for_next(cq=%p, tag=%p, error=%s, " + "done=%p, done_arg=%p, storage=%p)", + 6, (cq, tag, errmsg, done, done_arg, storage)); + if (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); + } + } + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + int is_success = (error == GRPC_ERROR_NONE); + + storage->tag = tag; + storage->done = done; + storage->done_arg = done_arg; + storage->next = static_cast(is_success); + + cq_check_tag(cq, tag, true); /* Used in debug builds only */ + + if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq && + (grpc_cq_completion*)gpr_tls_get(&g_cached_event) == nullptr) { + gpr_tls_set(&g_cached_event, (intptr_t)storage); + } else { + /* Add the completion to the queue */ + bool is_first = cq_event_queue_push(&cqd->queue, storage); + gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); + + /* Since we do not hold the cq lock here, it is important to do an 'acquire' + load here (instead of a 'no_barrier' load) to match with the release + store + (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next + */ + bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1; + + if (!will_definitely_shutdown) { + /* Only kick if this is the first item queued */ + if (is_first) { + gpr_mu_lock(cq->mu); + grpc_error* kick_error = + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr); + gpr_mu_unlock(cq->mu); + + if (kick_error != GRPC_ERROR_NONE) { + const char* msg = grpc_error_string(kick_error); + gpr_log(GPR_ERROR, "Kick failed: %s", msg); + GRPC_ERROR_UNREF(kick_error); + } + } + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); + gpr_mu_lock(cq->mu); + cq_finish_shutdown_next(cq); + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); + } + } else { + GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); + gpr_atm_rel_store(&cqd->pending_events, 0); + gpr_mu_lock(cq->mu); + cq_finish_shutdown_next(cq); + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); + } + } + + GRPC_ERROR_UNREF(error); +} + +/* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a + * completion + * type of GRPC_CQ_PLUCK) */ +static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag, + grpc_error* error, + void (*done)(void* done_arg, + grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage) { + GPR_TIMER_SCOPE("cq_end_op_for_pluck", 0); + + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + int is_success = (error == GRPC_ERROR_NONE); + + if (grpc_api_trace.enabled() || + (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE)) { + const char* errmsg = grpc_error_string(error); + GRPC_API_TRACE( + "cq_end_op_for_pluck(cq=%p, tag=%p, error=%s, " + "done=%p, done_arg=%p, storage=%p)", + 6, (cq, tag, errmsg, done, done_arg, storage)); + if (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg); + } + } + + storage->tag = tag; + storage->done = done; + storage->done_arg = done_arg; + storage->next = + ((uintptr_t)&cqd->completed_head) | (static_cast(is_success)); + + gpr_mu_lock(cq->mu); + cq_check_tag(cq, tag, false); /* Used in debug builds only */ + + /* Add to the list of completions */ + gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); + cqd->completed_tail->next = + ((uintptr_t)storage) | (1u & cqd->completed_tail->next); + cqd->completed_tail = storage; + + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + cq_finish_shutdown_pluck(cq); + gpr_mu_unlock(cq->mu); + } else { + grpc_pollset_worker* pluck_worker = nullptr; + for (int i = 0; i < cqd->num_pluckers; i++) { + if (cqd->pluckers[i].tag == tag) { + pluck_worker = *cqd->pluckers[i].worker; + break; + } + } + + grpc_error* kick_error = + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker); + + gpr_mu_unlock(cq->mu); + + if (kick_error != GRPC_ERROR_NONE) { + const char* msg = grpc_error_string(kick_error); + gpr_log(GPR_ERROR, "Kick failed: %s", msg); + + GRPC_ERROR_UNREF(kick_error); + } + } + + GRPC_ERROR_UNREF(error); +} + +void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error, + void (*done)(void* done_arg, grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage) { + cq->vtable->end_op(cq, tag, error, done, done_arg, storage); +} + +typedef struct { + gpr_atm last_seen_things_queued_ever; + grpc_completion_queue* cq; + grpc_millis deadline; + grpc_cq_completion* stolen_completion; + void* tag; /* for pluck */ + bool first_loop; +} cq_is_finished_arg; + +class ExecCtxNext : public grpc_core::ExecCtx { + public: + ExecCtxNext(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {} + + bool CheckReadyToFinish() override { + cq_is_finished_arg* a = + static_cast(check_ready_to_finish_arg_); + grpc_completion_queue* cq = a->cq; + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + GPR_ASSERT(a->stolen_completion == nullptr); + + gpr_atm current_last_seen_things_queued_ever = + gpr_atm_no_barrier_load(&cqd->things_queued_ever); + + if (current_last_seen_things_queued_ever != + a->last_seen_things_queued_ever) { + a->last_seen_things_queued_ever = + gpr_atm_no_barrier_load(&cqd->things_queued_ever); + + /* Pop a cq_completion from the queue. Returns NULL if the queue is empty + * might return NULL in some cases even if the queue is not empty; but + * that + * is ok and doesn't affect correctness. Might effect the tail latencies a + * bit) */ + a->stolen_completion = cq_event_queue_pop(&cqd->queue); + if (a->stolen_completion != nullptr) { + return true; + } + } + return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now(); + } + + private: + void* check_ready_to_finish_arg_; +}; + +#ifndef NDEBUG +static void dump_pending_tags(grpc_completion_queue* cq) { + if (!grpc_trace_pending_tags.enabled()) return; + + gpr_strvec v; + gpr_strvec_init(&v); + gpr_strvec_add(&v, gpr_strdup("PENDING TAGS:")); + gpr_mu_lock(cq->mu); + for (size_t i = 0; i < cq->outstanding_tag_count; i++) { + char* s; + gpr_asprintf(&s, " %p", cq->outstanding_tags[i]); + gpr_strvec_add(&v, s); + } + gpr_mu_unlock(cq->mu); + char* out = gpr_strvec_flatten(&v, nullptr); + gpr_strvec_destroy(&v); + gpr_log(GPR_DEBUG, "%s", out); + gpr_free(out); +} +#else +static void dump_pending_tags(grpc_completion_queue* cq) {} +#endif + +static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, + void* reserved) { + GPR_TIMER_SCOPE("grpc_completion_queue_next", 0); + + grpc_event ret; + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + + GRPC_API_TRACE( + "grpc_completion_queue_next(" + "cq=%p, " + "deadline=gpr_timespec { tv_sec: %" PRId64 + ", tv_nsec: %d, clock_type: %d }, " + "reserved=%p)", + 5, + (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, + reserved)); + GPR_ASSERT(!reserved); + + dump_pending_tags(cq); + + GRPC_CQ_INTERNAL_REF(cq, "next"); + + grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); + cq_is_finished_arg is_finished_arg = { + gpr_atm_no_barrier_load(&cqd->things_queued_ever), + cq, + deadline_millis, + nullptr, + nullptr, + true}; + ExecCtxNext exec_ctx(&is_finished_arg); + for (;;) { + grpc_millis iteration_deadline = deadline_millis; + + if (is_finished_arg.stolen_completion != nullptr) { + grpc_cq_completion* c = is_finished_arg.stolen_completion; + is_finished_arg.stolen_completion = nullptr; + ret.type = GRPC_OP_COMPLETE; + ret.success = c->next & 1u; + ret.tag = c->tag; + c->done(c->done_arg, c); + break; + } + + grpc_cq_completion* c = cq_event_queue_pop(&cqd->queue); + + if (c != nullptr) { + ret.type = GRPC_OP_COMPLETE; + ret.success = c->next & 1u; + ret.tag = c->tag; + c->done(c->done_arg, c); + break; + } else { + /* If c == NULL it means either the queue is empty OR in an transient + inconsistent state. If it is the latter, we shold do a 0-timeout poll + so that the thread comes back quickly from poll to make a second + attempt at popping. Not doing this can potentially deadlock this + thread forever (if the deadline is infinity) */ + if (cq_event_queue_num_items(&cqd->queue) > 0) { + iteration_deadline = 0; + } + } + + if (gpr_atm_acq_load(&cqd->pending_events) == 0) { + /* Before returning, check if the queue has any items left over (since + gpr_mpscq_pop() can sometimes return NULL even if the queue is not + empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */ + if (cq_event_queue_num_items(&cqd->queue) > 0) { + /* Go to the beginning of the loop. No point doing a poll because + (cq->shutdown == true) is only possible when there is no pending + work (i.e cq->pending_events == 0) and any outstanding completion + events should have already been queued on this cq */ + continue; + } + + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_SHUTDOWN; + break; + } + + if (!is_finished_arg.first_loop && + grpc_core::ExecCtx::Get()->Now() >= deadline_millis) { + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_TIMEOUT; + dump_pending_tags(cq); + break; + } + + /* The main polling work happens in grpc_pollset_work */ + gpr_mu_lock(cq->mu); + cq->num_polls++; + grpc_error* err = cq->poller_vtable->work(POLLSET_FROM_CQ(cq), nullptr, + iteration_deadline); + gpr_mu_unlock(cq->mu); + + if (err != GRPC_ERROR_NONE) { + const char* msg = grpc_error_string(err); + gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg); + + GRPC_ERROR_UNREF(err); + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_TIMEOUT; + dump_pending_tags(cq); + break; + } + is_finished_arg.first_loop = false; + } + + if (cq_event_queue_num_items(&cqd->queue) > 0 && + gpr_atm_acq_load(&cqd->pending_events) > 0) { + gpr_mu_lock(cq->mu); + cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr); + gpr_mu_unlock(cq->mu); + } + + GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); + GRPC_CQ_INTERNAL_UNREF(cq, "next"); + + GPR_ASSERT(is_finished_arg.stolen_completion == nullptr); + + return ret; +} + +/* Finishes the completion queue shutdown. This means that there are no more + completion events / tags expected from the completion queue + - Must be called under completion queue lock + - Must be called only once in completion queue's lifetime + - grpc_completion_queue_shutdown() MUST have been called before calling + this function */ +static void cq_finish_shutdown_next(grpc_completion_queue* cq) { + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + + GPR_ASSERT(cqd->shutdown_called); + GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0); + + cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done); +} + +static void cq_shutdown_next(grpc_completion_queue* cq) { + cq_next_data* cqd = static_cast DATA_FROM_CQ(cq); + + /* Need an extra ref for cq here because: + * We call cq_finish_shutdown_next() below, that would call pollset shutdown. + * Pollset shutdown decrements the cq ref count which can potentially destroy + * the cq (if that happens to be the last ref). + * Creating an extra ref here prevents the cq from getting destroyed while + * this function is still active */ + GRPC_CQ_INTERNAL_REF(cq, "shutting_down"); + gpr_mu_lock(cq->mu); + if (cqd->shutdown_called) { + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); + return; + } + cqd->shutdown_called = true; + /* Doing a full_fetch_add (i.e acq/release) here to match with + * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write + * on this counter without necessarily holding a lock on cq */ + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + cq_finish_shutdown_next(cq); + } + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down"); +} + +grpc_event grpc_completion_queue_next(grpc_completion_queue* cq, + gpr_timespec deadline, void* reserved) { + return cq->vtable->next(cq, deadline, reserved); +} + +static int add_plucker(grpc_completion_queue* cq, void* tag, + grpc_pollset_worker** worker) { + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) { + return 0; + } + cqd->pluckers[cqd->num_pluckers].tag = tag; + cqd->pluckers[cqd->num_pluckers].worker = worker; + cqd->num_pluckers++; + return 1; +} + +static void del_plucker(grpc_completion_queue* cq, void* tag, + grpc_pollset_worker** worker) { + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + for (int i = 0; i < cqd->num_pluckers; i++) { + if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) { + cqd->num_pluckers--; + GPR_SWAP(plucker, cqd->pluckers[i], cqd->pluckers[cqd->num_pluckers]); + return; + } + } + GPR_UNREACHABLE_CODE(return ); +} + +class ExecCtxPluck : public grpc_core::ExecCtx { + public: + ExecCtxPluck(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {} + + bool CheckReadyToFinish() override { + cq_is_finished_arg* a = + static_cast(check_ready_to_finish_arg_); + grpc_completion_queue* cq = a->cq; + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + + GPR_ASSERT(a->stolen_completion == nullptr); + gpr_atm current_last_seen_things_queued_ever = + gpr_atm_no_barrier_load(&cqd->things_queued_ever); + if (current_last_seen_things_queued_ever != + a->last_seen_things_queued_ever) { + gpr_mu_lock(cq->mu); + a->last_seen_things_queued_ever = + gpr_atm_no_barrier_load(&cqd->things_queued_ever); + grpc_cq_completion* c; + grpc_cq_completion* prev = &cqd->completed_head; + while ((c = (grpc_cq_completion*)(prev->next & + ~static_cast(1))) != + &cqd->completed_head) { + if (c->tag == a->tag) { + prev->next = (prev->next & static_cast(1)) | + (c->next & ~static_cast(1)); + if (c == cqd->completed_tail) { + cqd->completed_tail = prev; + } + gpr_mu_unlock(cq->mu); + a->stolen_completion = c; + return true; + } + prev = c; + } + gpr_mu_unlock(cq->mu); + } + return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now(); + } + + private: + void* check_ready_to_finish_arg_; +}; + +static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag, + gpr_timespec deadline, void* reserved) { + GPR_TIMER_SCOPE("grpc_completion_queue_pluck", 0); + + grpc_event ret; + grpc_cq_completion* c; + grpc_cq_completion* prev; + grpc_pollset_worker* worker = nullptr; + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + + if (grpc_cq_pluck_trace.enabled()) { + GRPC_API_TRACE( + "grpc_completion_queue_pluck(" + "cq=%p, tag=%p, " + "deadline=gpr_timespec { tv_sec: %" PRId64 + ", tv_nsec: %d, clock_type: %d }, " + "reserved=%p)", + 6, + (cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type, + reserved)); + } + GPR_ASSERT(!reserved); + + dump_pending_tags(cq); + + GRPC_CQ_INTERNAL_REF(cq, "pluck"); + gpr_mu_lock(cq->mu); + grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline); + cq_is_finished_arg is_finished_arg = { + gpr_atm_no_barrier_load(&cqd->things_queued_ever), + cq, + deadline_millis, + nullptr, + tag, + true}; + ExecCtxPluck exec_ctx(&is_finished_arg); + for (;;) { + if (is_finished_arg.stolen_completion != nullptr) { + gpr_mu_unlock(cq->mu); + c = is_finished_arg.stolen_completion; + is_finished_arg.stolen_completion = nullptr; + ret.type = GRPC_OP_COMPLETE; + ret.success = c->next & 1u; + ret.tag = c->tag; + c->done(c->done_arg, c); + break; + } + prev = &cqd->completed_head; + while ( + (c = (grpc_cq_completion*)(prev->next & ~static_cast(1))) != + &cqd->completed_head) { + if (c->tag == tag) { + prev->next = (prev->next & static_cast(1)) | + (c->next & ~static_cast(1)); + if (c == cqd->completed_tail) { + cqd->completed_tail = prev; + } + gpr_mu_unlock(cq->mu); + ret.type = GRPC_OP_COMPLETE; + ret.success = c->next & 1u; + ret.tag = c->tag; + c->done(c->done_arg, c); + goto done; + } + prev = c; + } + if (gpr_atm_no_barrier_load(&cqd->shutdown)) { + gpr_mu_unlock(cq->mu); + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_SHUTDOWN; + break; + } + if (!add_plucker(cq, tag, &worker)) { + gpr_log(GPR_DEBUG, + "Too many outstanding grpc_completion_queue_pluck calls: maximum " + "is %d", + GRPC_MAX_COMPLETION_QUEUE_PLUCKERS); + gpr_mu_unlock(cq->mu); + memset(&ret, 0, sizeof(ret)); + /* TODO(ctiller): should we use a different result here */ + ret.type = GRPC_QUEUE_TIMEOUT; + dump_pending_tags(cq); + break; + } + if (!is_finished_arg.first_loop && + grpc_core::ExecCtx::Get()->Now() >= deadline_millis) { + del_plucker(cq, tag, &worker); + gpr_mu_unlock(cq->mu); + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_TIMEOUT; + dump_pending_tags(cq); + break; + } + cq->num_polls++; + grpc_error* err = + cq->poller_vtable->work(POLLSET_FROM_CQ(cq), &worker, deadline_millis); + if (err != GRPC_ERROR_NONE) { + del_plucker(cq, tag, &worker); + gpr_mu_unlock(cq->mu); + const char* msg = grpc_error_string(err); + gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg); + + GRPC_ERROR_UNREF(err); + memset(&ret, 0, sizeof(ret)); + ret.type = GRPC_QUEUE_TIMEOUT; + dump_pending_tags(cq); + break; + } + is_finished_arg.first_loop = false; + del_plucker(cq, tag, &worker); + } +done: + GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret); + GRPC_CQ_INTERNAL_UNREF(cq, "pluck"); + + GPR_ASSERT(is_finished_arg.stolen_completion == nullptr); + + return ret; +} + +grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag, + gpr_timespec deadline, void* reserved) { + return cq->vtable->pluck(cq, tag, deadline, reserved); +} + +static void cq_finish_shutdown_pluck(grpc_completion_queue* cq) { + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + + GPR_ASSERT(cqd->shutdown_called); + GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown)); + gpr_atm_no_barrier_store(&cqd->shutdown, 1); + + cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done); +} + +/* NOTE: This function is almost exactly identical to cq_shutdown_next() but + * merging them is a bit tricky and probably not worth it */ +static void cq_shutdown_pluck(grpc_completion_queue* cq) { + cq_pluck_data* cqd = static_cast DATA_FROM_CQ(cq); + + /* Need an extra ref for cq here because: + * We call cq_finish_shutdown_pluck() below, that would call pollset shutdown. + * Pollset shutdown decrements the cq ref count which can potentially destroy + * the cq (if that happens to be the last ref). + * Creating an extra ref here prevents the cq from getting destroyed while + * this function is still active */ + GRPC_CQ_INTERNAL_REF(cq, "shutting_down (pluck cq)"); + gpr_mu_lock(cq->mu); + if (cqd->shutdown_called) { + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)"); + return; + } + cqd->shutdown_called = true; + if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { + cq_finish_shutdown_pluck(cq); + } + gpr_mu_unlock(cq->mu); + GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)"); +} + +/* Shutdown simply drops a ref that we reserved at creation time; if we drop + to zero here, then enter shutdown mode and wake up any waiters */ +void grpc_completion_queue_shutdown(grpc_completion_queue* cq) { + GPR_TIMER_SCOPE("grpc_completion_queue_shutdown", 0); + grpc_core::ExecCtx exec_ctx; + GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq)); + cq->vtable->shutdown(cq); +} + +void grpc_completion_queue_destroy(grpc_completion_queue* cq) { + GPR_TIMER_SCOPE("grpc_completion_queue_destroy", 0); + GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq)); + grpc_completion_queue_shutdown(cq); + + grpc_core::ExecCtx exec_ctx; + GRPC_CQ_INTERNAL_UNREF(cq, "destroy"); +} + +grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cq) { + return cq->poller_vtable->can_get_pollset ? POLLSET_FROM_CQ(cq) : nullptr; +} + +bool grpc_cq_can_listen(grpc_completion_queue* cq) { + return cq->poller_vtable->can_listen; +} diff --git a/Sources/CgRPC/src/core/lib/surface/completion_queue.h b/Sources/CgRPC/src/core/lib/surface/completion_queue.h index 69d144bd9..c9dc2d93c 100644 --- a/Sources/CgRPC/src/core/lib/surface/completion_queue.h +++ b/Sources/CgRPC/src/core/lib/surface/completion_queue.h @@ -21,82 +21,73 @@ /* Internal API for completion queues */ +#include + #include #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/pollset.h" /* These trace flags default to 1. The corresponding lines are only traced if grpc_api_trace is also truthy */ -extern grpc_tracer_flag grpc_cq_pluck_trace; -extern grpc_tracer_flag grpc_cq_event_timeout_trace; -extern grpc_tracer_flag grpc_trace_operation_failures; - -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_pending_tags; -extern grpc_tracer_flag grpc_trace_cq_refcount; -#endif - -#ifdef __cplusplus -extern "C" { -#endif +extern grpc_core::TraceFlag grpc_cq_pluck_trace; +extern grpc_core::TraceFlag grpc_cq_event_timeout_trace; +extern grpc_core::TraceFlag grpc_trace_operation_failures; +extern grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags; +extern grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount; typedef struct grpc_cq_completion { gpr_mpscq_node node; /** user supplied tag */ - void *tag; + void* tag; /** done callback - called when this queue element is no longer needed by the completion queue */ - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - struct grpc_cq_completion *c); - void *done_arg; + void (*done)(void* done_arg, struct grpc_cq_completion* c); + void* done_arg; /** next pointer; low bit is used to indicate success or not */ uintptr_t next; } grpc_cq_completion; #ifndef NDEBUG -void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason, - const char *file, int line); -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc, - const char *reason, const char *file, int line); +void grpc_cq_internal_ref(grpc_completion_queue* cc, const char* reason, + const char* file, int line); +void grpc_cq_internal_unref(grpc_completion_queue* cc, const char* reason, + const char* file, int line); #define GRPC_CQ_INTERNAL_REF(cc, reason) \ grpc_cq_internal_ref(cc, reason, __FILE__, __LINE__) -#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) \ - grpc_cq_internal_unref(ec, cc, reason, __FILE__, __LINE__) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) \ + grpc_cq_internal_unref(cc, reason, __FILE__, __LINE__) #else -void grpc_cq_internal_ref(grpc_completion_queue *cc); -void grpc_cq_internal_unref(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc); +void grpc_cq_internal_ref(grpc_completion_queue* cc); +void grpc_cq_internal_unref(grpc_completion_queue* cc); #define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc) -#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) grpc_cq_internal_unref(ec, cc) +#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc) #endif +/* Initializes global variables used by completion queues */ +void grpc_cq_global_init(); + /* Flag that an operation is beginning: the completion channel will not finish shutdown until a corrensponding grpc_cq_end_* call is made. \a tag is currently used only in debug builds. Return true on success, and false if completion_queue has been shutdown. */ -bool grpc_cq_begin_op(grpc_completion_queue *cc, void *tag); +bool grpc_cq_begin_op(grpc_completion_queue* cc, void* tag); /* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to grpc_cq_begin_op */ -void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc, - void *tag, grpc_error *error, - void (*done)(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage), - void *done_arg, grpc_cq_completion *storage); +void grpc_cq_end_op(grpc_completion_queue* cc, void* tag, grpc_error* error, + void (*done)(void* done_arg, grpc_cq_completion* storage), + void* done_arg, grpc_cq_completion* storage); -grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc); +grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cc); -bool grpc_cq_can_listen(grpc_completion_queue *cc); +bool grpc_cq_can_listen(grpc_completion_queue* cc); -grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue *cc); +grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cc); -int grpc_get_cq_poll_num(grpc_completion_queue *cc); +int grpc_get_cq_poll_num(grpc_completion_queue* cc); -grpc_completion_queue *grpc_completion_queue_create_internal( +grpc_completion_queue* grpc_completion_queue_create_internal( grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type); -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.c b/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/surface/completion_queue_factory.c rename to Sources/CgRPC/src/core/lib/surface/completion_queue_factory.cc index aeecff530..51c1183c5 100644 --- a/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.c +++ b/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.cc @@ -16,8 +16,10 @@ * */ -#include "src/core/lib/surface/completion_queue_factory.h" +#include + #include "src/core/lib/surface/completion_queue.h" +#include "src/core/lib/surface/completion_queue_factory.h" #include @@ -35,7 +37,7 @@ static grpc_completion_queue* default_create( static grpc_completion_queue_factory_vtable default_vtable = {default_create}; static const grpc_completion_queue_factory g_default_cq_factory = { - "Default Factory", NULL, &default_vtable}; + "Default Factory", nullptr, &default_vtable}; /* * == Completion queue factory APIs diff --git a/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.h b/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.h index 89be8f821..d2b30a9ce 100644 --- a/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.h +++ b/Sources/CgRPC/src/core/lib/surface/completion_queue_factory.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H #define GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H +#include + #include #include "src/core/lib/surface/completion_queue.h" diff --git a/Sources/CgRPC/src/core/lib/surface/event_string.c b/Sources/CgRPC/src/core/lib/surface/event_string.cc similarity index 76% rename from Sources/CgRPC/src/core/lib/surface/event_string.c rename to Sources/CgRPC/src/core/lib/surface/event_string.cc index f236272e2..d639baec4 100644 --- a/Sources/CgRPC/src/core/lib/surface/event_string.c +++ b/Sources/CgRPC/src/core/lib/surface/event_string.cc @@ -16,33 +16,35 @@ * */ +#include + #include "src/core/lib/surface/event_string.h" #include #include #include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" -static void addhdr(gpr_strvec *buf, grpc_event *ev) { - char *tmp; +static void addhdr(gpr_strvec* buf, grpc_event* ev) { + char* tmp; gpr_asprintf(&tmp, "tag:%p", ev->tag); gpr_strvec_add(buf, tmp); } -static const char *errstr(int success) { return success ? "OK" : "ERROR"; } +static const char* errstr(int success) { return success ? "OK" : "ERROR"; } -static void adderr(gpr_strvec *buf, int success) { - char *tmp; +static void adderr(gpr_strvec* buf, int success) { + char* tmp; gpr_asprintf(&tmp, " %s", errstr(success)); gpr_strvec_add(buf, tmp); } -char *grpc_event_string(grpc_event *ev) { - char *out; +char* grpc_event_string(grpc_event* ev) { + char* out; gpr_strvec buf; - if (ev == NULL) return gpr_strdup("null"); + if (ev == nullptr) return gpr_strdup("null"); gpr_strvec_init(&buf); @@ -60,7 +62,7 @@ char *grpc_event_string(grpc_event *ev) { break; } - out = gpr_strvec_flatten(&buf, NULL); + out = gpr_strvec_flatten(&buf, nullptr); gpr_strvec_destroy(&buf); return out; } diff --git a/Sources/CgRPC/src/core/lib/surface/event_string.h b/Sources/CgRPC/src/core/lib/surface/event_string.h index f00efca7f..e6095705e 100644 --- a/Sources/CgRPC/src/core/lib/surface/event_string.h +++ b/Sources/CgRPC/src/core/lib/surface/event_string.h @@ -19,9 +19,11 @@ #ifndef GRPC_CORE_LIB_SURFACE_EVENT_STRING_H #define GRPC_CORE_LIB_SURFACE_EVENT_STRING_H +#include + #include /* Returns a string describing an event. Must be later freed with gpr_free() */ -char *grpc_event_string(grpc_event *ev); +char* grpc_event_string(grpc_event* ev); #endif /* GRPC_CORE_LIB_SURFACE_EVENT_STRING_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/init.c b/Sources/CgRPC/src/core/lib/surface/init.cc similarity index 62% rename from Sources/CgRPC/src/core/lib/surface/init.c rename to Sources/CgRPC/src/core/lib/surface/init.cc index b04fa3d15..bd436d685 100644 --- a/Sources/CgRPC/src/core/lib/surface/init.c +++ b/Sources/CgRPC/src/core/lib/surface/init.cc @@ -27,10 +27,13 @@ #include #include #include "src/core/lib/channel/channel_stack.h" +#include "src/core/lib/channel/channel_trace_registry.h" #include "src/core/lib/channel/connected_channel.h" #include "src/core/lib/channel/handshaker_registry.h" #include "src/core/lib/debug/stats.h" #include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/fork.h" +#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/http/parser.h" #include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/combiner.h" @@ -40,9 +43,6 @@ #include "src/core/lib/iomgr/timer_manager.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/fork.h" -#include "src/core/lib/support/thd_internal.h" -#include "src/core/lib/surface/alarm_internal.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel_init.h" @@ -68,37 +68,36 @@ static void do_basic_init(void) { grpc_fork_support_init(); gpr_mu_init(&g_init_mu); grpc_register_built_in_plugins(); + grpc_cq_global_init(); g_initializations = 0; grpc_fork_handlers_auto_register(); } -static bool append_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, void *arg) { +static bool append_filter(grpc_channel_stack_builder* builder, void* arg) { return grpc_channel_stack_builder_append_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); + builder, static_cast(arg), nullptr, nullptr); } -static bool prepend_filter(grpc_exec_ctx *exec_ctx, - grpc_channel_stack_builder *builder, void *arg) { +static bool prepend_filter(grpc_channel_stack_builder* builder, void* arg) { return grpc_channel_stack_builder_prepend_filter( - builder, (const grpc_channel_filter *)arg, NULL, NULL); + builder, static_cast(arg), nullptr, nullptr); } static void register_builtin_channel_init() { grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - grpc_add_connected_filter, NULL); + grpc_add_connected_filter, nullptr); grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - grpc_add_connected_filter, NULL); + grpc_add_connected_filter, nullptr); grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - grpc_add_connected_filter, NULL); + grpc_add_connected_filter, nullptr); grpc_channel_init_register_stage(GRPC_CLIENT_LAME_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, - append_filter, (void *)&grpc_lame_filter); + append_filter, (void*)&grpc_lame_filter); grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter, - (void *)&grpc_server_top_filter); + (void*)&grpc_server_top_filter); } typedef struct grpc_plugin { @@ -111,7 +110,7 @@ static int g_number_of_plugins = 0; void grpc_register_plugin(void (*init)(void), void (*destroy)(void)) { GRPC_API_TRACE("grpc_register_plugin(init=%p, destroy=%p)", 2, - ((void *)(intptr_t)init, (void *)(intptr_t)destroy)); + ((void*)(intptr_t)init, (void*)(intptr_t)destroy)); GPR_ASSERT(g_number_of_plugins != MAX_PLUGINS); g_all_of_the_plugins[g_number_of_plugins].init = init; g_all_of_the_plugins[g_number_of_plugins].destroy = destroy; @@ -122,46 +121,23 @@ void grpc_init(void) { int i; gpr_once_init(&g_basic_init, do_basic_init); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(&g_init_mu); if (++g_initializations == 1) { gpr_time_init(); - gpr_thd_init(); + grpc_core::Thread::Init(); grpc_stats_init(); grpc_slice_intern_init(); grpc_mdctx_global_init(); grpc_channel_init_init(); - grpc_register_tracer(&grpc_api_trace); - grpc_register_tracer(&grpc_trace_channel); - grpc_register_tracer(&grpc_connectivity_state_trace); - grpc_register_tracer(&grpc_trace_channel_stack_builder); - grpc_register_tracer(&grpc_http1_trace); - grpc_register_tracer(&grpc_cq_pluck_trace); // default on - grpc_register_tracer(&grpc_call_combiner_trace); - grpc_register_tracer(&grpc_combiner_trace); - grpc_register_tracer(&grpc_server_channel_trace); - grpc_register_tracer(&grpc_bdp_estimator_trace); - grpc_register_tracer(&grpc_cq_event_timeout_trace); // default on - grpc_register_tracer(&grpc_trace_operation_failures); - grpc_register_tracer(&grpc_resource_quota_trace); - grpc_register_tracer(&grpc_call_error_trace); -#ifndef NDEBUG - grpc_register_tracer(&grpc_trace_pending_tags); - grpc_register_tracer(&grpc_trace_alarm_refcount); - grpc_register_tracer(&grpc_trace_cq_refcount); - grpc_register_tracer(&grpc_trace_closure); - grpc_register_tracer(&grpc_trace_error_refcount); - grpc_register_tracer(&grpc_trace_stream_refcount); - grpc_register_tracer(&grpc_trace_fd_refcount); - grpc_register_tracer(&grpc_trace_metadata); -#endif + grpc_channel_trace_registry_init(); grpc_security_pre_init(); - grpc_iomgr_init(&exec_ctx); + grpc_core::ExecCtx::GlobalInit(); + grpc_iomgr_init(); gpr_timers_global_init(); grpc_handshaker_factory_registry_init(); grpc_security_init(); for (i = 0; i < g_number_of_plugins; i++) { - if (g_all_of_the_plugins[i].init != NULL) { + if (g_all_of_the_plugins[i].init != nullptr) { g_all_of_the_plugins[i].init(); } } @@ -172,37 +148,42 @@ void grpc_init(void) { grpc_tracer_init("GRPC_TRACE"); /* no more changes to channel init pipelines */ grpc_channel_init_finalize(); - grpc_iomgr_start(&exec_ctx); + grpc_iomgr_start(); } gpr_mu_unlock(&g_init_mu); - grpc_exec_ctx_finish(&exec_ctx); + GRPC_API_TRACE("grpc_init(void)", 0, ()); } void grpc_shutdown(void) { int i; GRPC_API_TRACE("grpc_shutdown(void)", 0, ()); - grpc_exec_ctx exec_ctx = - GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); gpr_mu_lock(&g_init_mu); if (--g_initializations == 0) { - grpc_executor_shutdown(&exec_ctx); - grpc_timer_manager_set_threading(false); // shutdown timer_manager thread - for (i = g_number_of_plugins; i >= 0; i--) { - if (g_all_of_the_plugins[i].destroy != NULL) { - g_all_of_the_plugins[i].destroy(); + { + grpc_core::ExecCtx exec_ctx(0); + { + grpc_timer_manager_set_threading( + false); // shutdown timer_manager thread + grpc_executor_shutdown(); + for (i = g_number_of_plugins; i >= 0; i--) { + if (g_all_of_the_plugins[i].destroy != nullptr) { + g_all_of_the_plugins[i].destroy(); + } + } } + grpc_iomgr_shutdown(); + gpr_timers_global_destroy(); + grpc_tracer_shutdown(); + grpc_mdctx_global_shutdown(); + grpc_handshaker_factory_registry_shutdown(); + grpc_slice_intern_shutdown(); + grpc_channel_trace_registry_shutdown(); + grpc_stats_shutdown(); } - grpc_iomgr_shutdown(&exec_ctx); - gpr_timers_global_destroy(); - grpc_tracer_shutdown(); - grpc_mdctx_global_shutdown(&exec_ctx); - grpc_handshaker_factory_registry_shutdown(&exec_ctx); - grpc_slice_intern_shutdown(); - grpc_stats_shutdown(); + grpc_core::ExecCtx::GlobalShutdown(); } gpr_mu_unlock(&g_init_mu); - grpc_exec_ctx_finish(&exec_ctx); } int grpc_is_initialized(void) { diff --git a/Sources/CgRPC/src/core/lib/surface/init_secure.c b/Sources/CgRPC/src/core/lib/surface/init_secure.cc similarity index 72% rename from Sources/CgRPC/src/core/lib/surface/init_secure.c rename to Sources/CgRPC/src/core/lib/surface/init_secure.cc index 8fbde3d1b..28c6f7b12 100644 --- a/Sources/CgRPC/src/core/lib/surface/init_secure.c +++ b/Sources/CgRPC/src/core/lib/surface/init_secure.cc @@ -24,37 +24,27 @@ #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/security/context/security_context.h" #include "src/core/lib/security/credentials/credentials.h" #include "src/core/lib/security/credentials/plugin/plugin_credentials.h" +#include "src/core/lib/security/security_connector/security_connector.h" #include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/security/transport/secure_endpoint.h" -#include "src/core/lib/security/transport/security_connector.h" #include "src/core/lib/security/transport/security_handshaker.h" #include "src/core/lib/surface/channel_init.h" #include "src/core/tsi/transport_security_interface.h" -#ifndef NDEBUG -#include "src/core/lib/security/context/security_context.h" -#endif - -void grpc_security_pre_init(void) { - grpc_register_tracer(&grpc_trace_secure_endpoint); - grpc_register_tracer(&tsi_tracing_enabled); -#ifndef NDEBUG - grpc_register_tracer(&grpc_trace_auth_context_refcount); - grpc_register_tracer(&grpc_trace_security_connector_refcount); -#endif -} +void grpc_security_pre_init(void) {} static bool maybe_prepend_client_auth_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { - const grpc_channel_args *args = + grpc_channel_stack_builder* builder, void* arg) { + const grpc_channel_args* args = grpc_channel_stack_builder_get_channel_arguments(builder); if (args) { for (size_t i = 0; i < args->num_args; i++) { if (0 == strcmp(GRPC_ARG_SECURITY_CONNECTOR, args->args[i].key)) { return grpc_channel_stack_builder_prepend_filter( - builder, &grpc_client_auth_filter, NULL, NULL); + builder, &grpc_client_auth_filter, nullptr, nullptr); } } } @@ -62,14 +52,14 @@ static bool maybe_prepend_client_auth_filter( } static bool maybe_prepend_server_auth_filter( - grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) { - const grpc_channel_args *args = + grpc_channel_stack_builder* builder, void* arg) { + const grpc_channel_args* args = grpc_channel_stack_builder_get_channel_arguments(builder); if (args) { for (size_t i = 0; i < args->num_args; i++) { if (0 == strcmp(GRPC_SERVER_CREDENTIALS_ARG, args->args[i].key)) { return grpc_channel_stack_builder_prepend_filter( - builder, &grpc_server_auth_filter, NULL, NULL); + builder, &grpc_server_auth_filter, nullptr, nullptr); } } } @@ -77,15 +67,15 @@ static bool maybe_prepend_server_auth_filter( } void grpc_register_security_filters(void) { - grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX, - maybe_prepend_client_auth_filter, NULL); - grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX, - maybe_prepend_client_auth_filter, NULL); + // Register the auth client with a priority < INT_MAX to allow the authority + // filter -on which the auth filter depends- to be higher on the channel + // stack. + grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX - 1, + maybe_prepend_client_auth_filter, nullptr); + grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX - 1, + maybe_prepend_client_auth_filter, nullptr); grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, - maybe_prepend_server_auth_filter, NULL); + maybe_prepend_server_auth_filter, nullptr); } -void grpc_security_init() { - grpc_security_register_handshaker_factories(); - grpc_register_tracer(&grpc_plugin_credentials_trace); -} +void grpc_security_init() { grpc_security_register_handshaker_factories(); } diff --git a/Sources/CgRPC/src/core/lib/surface/lame_client.cc b/Sources/CgRPC/src/core/lib/surface/lame_client.cc index 6286f9159..5a84428b0 100644 --- a/Sources/CgRPC/src/core/lib/surface/lame_client.cc +++ b/Sources/CgRPC/src/core/lib/surface/lame_client.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include @@ -23,24 +25,22 @@ #include #include -#include "src/core/lib/support/atomic.h" +#include "src/core/lib/gprpp/atomic.h" -extern "C" { #include "src/core/lib/channel/channel_stack.h" -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel.h" #include "src/core/lib/surface/lame_client.h" #include "src/core/lib/transport/static_metadata.h" -} namespace grpc_core { namespace { struct CallData { - grpc_call_combiner *call_combiner; + grpc_call_combiner* call_combiner; grpc_linked_mdelem status; grpc_linked_mdelem details; grpc_core::atomic filled_metadata; @@ -48,105 +48,102 @@ struct CallData { struct ChannelData { grpc_status_code error_code; - const char *error_message; + const char* error_message; }; -static void fill_metadata(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_metadata_batch *mdb) { - CallData *calld = reinterpret_cast(elem->call_data); +static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) { + CallData* calld = static_cast(elem->call_data); bool expected = false; if (!calld->filled_metadata.compare_exchange_strong( expected, true, grpc_core::memory_order_relaxed, grpc_core::memory_order_relaxed)) { return; } - ChannelData *chand = reinterpret_cast(elem->channel_data); + ChannelData* chand = static_cast(elem->channel_data); char tmp[GPR_LTOA_MIN_BUFSIZE]; gpr_ltoa(chand->error_code, tmp); calld->status.md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp)); + GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp)); calld->details.md = grpc_mdelem_from_slices( - exec_ctx, GRPC_MDSTR_GRPC_MESSAGE, + GRPC_MDSTR_GRPC_MESSAGE, grpc_slice_from_copied_string(chand->error_message)); - calld->status.prev = calld->details.next = NULL; + calld->status.prev = calld->details.next = nullptr; calld->status.next = &calld->details; calld->details.prev = &calld->status; mdb->list.head = &calld->status; mdb->list.tail = &calld->details; mdb->list.count = 2; - mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + mdb->deadline = GRPC_MILLIS_INF_FUTURE; } static void lame_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - CallData *calld = reinterpret_cast(elem->call_data); + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { + CallData* calld = static_cast(elem->call_data); if (op->recv_initial_metadata) { - fill_metadata(exec_ctx, elem, + fill_metadata(elem, op->payload->recv_initial_metadata.recv_initial_metadata); } else if (op->recv_trailing_metadata) { - fill_metadata(exec_ctx, elem, + fill_metadata(elem, op->payload->recv_trailing_metadata.recv_trailing_metadata); } grpc_transport_stream_op_batch_finish_with_failure( - exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), + op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"), calld->call_combiner); } -static void lame_get_channel_info(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - const grpc_channel_info *channel_info) {} +static void lame_get_channel_info(grpc_channel_element* elem, + const grpc_channel_info* channel_info) {} -static void lame_start_transport_op(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_transport_op *op) { +static void lame_start_transport_op(grpc_channel_element* elem, + grpc_transport_op* op) { if (op->on_connectivity_state_change) { GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN); *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN; - GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(op->on_connectivity_state_change, GRPC_ERROR_NONE); + } + if (op->send_ping.on_initiate != nullptr) { + GRPC_CLOSURE_SCHED( + op->send_ping.on_initiate, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel")); } - if (op->send_ping != NULL) { + if (op->send_ping.on_ack != nullptr) { GRPC_CLOSURE_SCHED( - exec_ctx, op->send_ping, + op->send_ping.on_ack, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel")); } GRPC_ERROR_UNREF(op->disconnect_with_error); - if (op->on_consumed != NULL) { - GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); + if (op->on_consumed != nullptr) { + GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE); } } -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - CallData *calld = reinterpret_cast(elem->call_data); +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + CallData* calld = static_cast(elem->call_data); calld->call_combiner = args->call_combiner; return GRPC_ERROR_NONE; } -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *then_schedule_closure) { - GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE); +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* then_schedule_closure) { + GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE); } -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { GPR_ASSERT(args->is_first); GPR_ASSERT(args->is_last); return GRPC_ERROR_NONE; } -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) {} +static void destroy_channel_elem(grpc_channel_element* elem) {} } // namespace } // namespace grpc_core -extern "C" const grpc_channel_filter grpc_lame_filter = { +const grpc_channel_filter grpc_lame_filter = { grpc_core::lame_start_transport_stream_op_batch, grpc_core::lame_start_transport_op, sizeof(grpc_core::CallData), @@ -160,24 +157,24 @@ extern "C" const grpc_channel_filter grpc_lame_filter = { "lame-client", }; -#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack *)((c) + 1)) +#define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1)) -grpc_channel *grpc_lame_client_channel_create(const char *target, +grpc_channel* grpc_lame_client_channel_create(const char* target, grpc_status_code error_code, - const char *error_message) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - grpc_channel_element *elem; - grpc_channel *channel = grpc_channel_create(&exec_ctx, target, NULL, - GRPC_CLIENT_LAME_CHANNEL, NULL); + const char* error_message) { + grpc_core::ExecCtx exec_ctx; + grpc_channel_element* elem; + grpc_channel* channel = + grpc_channel_create(target, nullptr, GRPC_CLIENT_LAME_CHANNEL, nullptr); elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); GRPC_API_TRACE( "grpc_lame_client_channel_create(target=%s, error_code=%d, " "error_message=%s)", 3, (target, (int)error_code, error_message)); GPR_ASSERT(elem->filter == &grpc_lame_filter); - auto chand = reinterpret_cast(elem->channel_data); + auto chand = static_cast(elem->channel_data); chand->error_code = error_code; chand->error_message = error_message; - grpc_exec_ctx_finish(&exec_ctx); + return channel; } diff --git a/Sources/CgRPC/src/core/lib/surface/lame_client.h b/Sources/CgRPC/src/core/lib/surface/lame_client.h index 3ce353f10..aefa67c24 100644 --- a/Sources/CgRPC/src/core/lib/surface/lame_client.h +++ b/Sources/CgRPC/src/core/lib/surface/lame_client.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H #define GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H +#include + #include "src/core/lib/channel/channel_stack.h" extern const grpc_channel_filter grpc_lame_filter; diff --git a/Sources/CgRPC/src/core/lib/surface/metadata_array.c b/Sources/CgRPC/src/core/lib/surface/metadata_array.cc similarity index 96% rename from Sources/CgRPC/src/core/lib/surface/metadata_array.c rename to Sources/CgRPC/src/core/lib/surface/metadata_array.cc index 0afb8b4b8..f794a2bb9 100644 --- a/Sources/CgRPC/src/core/lib/surface/metadata_array.c +++ b/Sources/CgRPC/src/core/lib/surface/metadata_array.cc @@ -16,6 +16,8 @@ * */ +#include + #include #include diff --git a/Sources/CgRPC/src/core/lib/surface/server.c b/Sources/CgRPC/src/core/lib/surface/server.cc similarity index 53% rename from Sources/CgRPC/src/core/lib/surface/server.c rename to Sources/CgRPC/src/core/lib/surface/server.cc index 1d0fd472d..cb34def74 100644 --- a/Sources/CgRPC/src/core/lib/surface/server.c +++ b/Sources/CgRPC/src/core/lib/surface/server.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/surface/server.h" #include @@ -25,16 +27,16 @@ #include #include #include -#include #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/connected_channel.h" #include "src/core/lib/debug/stats.h" +#include "src/core/lib/gpr/mpscq.h" +#include "src/core/lib/gpr/spinlock.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/iomgr.h" #include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/support/stack_lockfree.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/call.h" #include "src/core/lib/surface/channel.h" @@ -43,63 +45,61 @@ #include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/static_metadata.h" -typedef struct listener { - void *arg; - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t pollset_count); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *closure); - struct listener *next; - grpc_closure destroy_done; -} listener; +grpc_core::TraceFlag grpc_server_channel_trace(false, "server_channel"); -typedef struct call_data call_data; -typedef struct channel_data channel_data; -typedef struct registered_method registered_method; +namespace { +struct listener { + void* arg; + void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets, + size_t pollset_count); + void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure); + struct listener* next; + grpc_closure destroy_done; +}; -typedef enum { BATCH_CALL, REGISTERED_CALL } requested_call_type; +enum requested_call_type { BATCH_CALL, REGISTERED_CALL }; -grpc_tracer_flag grpc_server_channel_trace = - GRPC_TRACER_INITIALIZER(false, "server_channel"); +struct registered_method; -typedef struct requested_call { +struct requested_call { + gpr_mpscq_node request_link; /* must be first */ requested_call_type type; size_t cq_idx; - void *tag; - grpc_server *server; - grpc_completion_queue *cq_bound_to_call; - grpc_call **call; + void* tag; + grpc_server* server; + grpc_completion_queue* cq_bound_to_call; + grpc_call** call; grpc_cq_completion completion; - grpc_metadata_array *initial_metadata; + grpc_metadata_array* initial_metadata; union { struct { - grpc_call_details *details; + grpc_call_details* details; } batch; struct { - registered_method *method; - gpr_timespec *deadline; - grpc_byte_buffer **optional_payload; + registered_method* method; + gpr_timespec* deadline; + grpc_byte_buffer** optional_payload; } registered; } data; -} requested_call; +}; -typedef struct channel_registered_method { - registered_method *server_registered_method; +struct channel_registered_method { + registered_method* server_registered_method; uint32_t flags; bool has_host; grpc_slice method; grpc_slice host; -} channel_registered_method; +}; struct channel_data { - grpc_server *server; + grpc_server* server; grpc_connectivity_state connectivity_state; - grpc_channel *channel; + grpc_channel* channel; size_t cq_idx; /* linked list of all channels on a server */ - channel_data *next; - channel_data *prev; - channel_registered_method *registered_methods; + channel_data* next; + channel_data* prev; + channel_registered_method* registered_methods; uint32_t registered_method_slots; uint32_t registered_method_max_probes; grpc_closure finish_destroy_channel_closure; @@ -107,8 +107,8 @@ struct channel_data { }; typedef struct shutdown_tag { - void *tag; - grpc_completion_queue *cq; + void* tag; + grpc_completion_queue* cq; grpc_cq_completion completion; } shutdown_tag; @@ -126,65 +126,63 @@ typedef enum { typedef struct request_matcher request_matcher; struct call_data { - grpc_call *call; + grpc_call* call; - /** protects state */ - gpr_mu mu_state; - /** the current state of a call - see call_state */ - call_state state; + gpr_atm state; bool path_set; bool host_set; grpc_slice path; grpc_slice host; - gpr_timespec deadline; + grpc_millis deadline; - grpc_completion_queue *cq_new; + grpc_completion_queue* cq_new; - grpc_metadata_batch *recv_initial_metadata; + grpc_metadata_batch* recv_initial_metadata; uint32_t recv_initial_metadata_flags; grpc_metadata_array initial_metadata; - request_matcher *matcher; - grpc_byte_buffer *payload; + request_matcher* matcher; + grpc_byte_buffer* payload; grpc_closure got_initial_metadata; grpc_closure server_on_recv_initial_metadata; grpc_closure kill_zombie_closure; - grpc_closure *on_done_recv_initial_metadata; + grpc_closure* on_done_recv_initial_metadata; grpc_closure publish; - call_data *pending_next; + call_data* pending_next; }; struct request_matcher { - grpc_server *server; - call_data *pending_head; - call_data *pending_tail; - gpr_stack_lockfree **requests_per_cq; + grpc_server* server; + call_data* pending_head; + call_data* pending_tail; + gpr_locked_mpscq* requests_per_cq; }; struct registered_method { - char *method; - char *host; + char* method; + char* host; grpc_server_register_method_payload_handling payload_handling; uint32_t flags; /* one request matcher per method */ request_matcher matcher; - registered_method *next; + registered_method* next; }; typedef struct { - grpc_channel **channels; + grpc_channel** channels; size_t num_channels; } channel_broadcaster; +} // namespace struct grpc_server { - grpc_channel_args *channel_args; + grpc_channel_args* channel_args; - grpc_completion_queue **cqs; - grpc_pollset **pollsets; + grpc_completion_queue** cqs; + grpc_pollset** pollsets; size_t cq_count; size_t pollset_count; bool started; @@ -204,23 +202,18 @@ struct grpc_server { bool starting; gpr_cv starting_cv; - registered_method *registered_methods; + registered_method* registered_methods; /** one request matcher for unregistered methods */ request_matcher unregistered_request_matcher; - /** free list of available requested_calls_per_cq indices */ - gpr_stack_lockfree **request_freelist_per_cq; - /** requested call backing data */ - requested_call **requested_calls_per_cq; - int max_requested_calls_per_cq; gpr_atm shutdown_flag; uint8_t shutdown_published; size_t num_shutdown_tags; - shutdown_tag *shutdown_tags; + shutdown_tag* shutdown_tags; channel_data root_channel_data; - listener *listeners; + listener* listeners; int listeners_destroyed; gpr_refcount internal_refcount; @@ -229,30 +222,29 @@ struct grpc_server { }; #define SERVER_FROM_CALL_ELEM(elem) \ - (((channel_data *)(elem)->channel_data)->server) + (((channel_data*)(elem)->channel_data)->server) -static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *calld, - grpc_error *error); -static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - size_t cq_idx, requested_call *rc, grpc_error *error); +static void publish_new_rpc(void* calld, grpc_error* error); +static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc, + grpc_error* error); /* Before calling maybe_finish_shutdown, we must hold mu_global and not hold mu_call */ -static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_server *server); +static void maybe_finish_shutdown(grpc_server* server); /* * channel broadcaster */ /* assumes server locked */ -static void channel_broadcaster_init(grpc_server *s, channel_broadcaster *cb) { - channel_data *c; +static void channel_broadcaster_init(grpc_server* s, channel_broadcaster* cb) { + channel_data* c; size_t count = 0; for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) { count++; } cb->num_channels = count; - cb->channels = - (grpc_channel **)gpr_malloc(sizeof(*cb->channels) * cb->num_channels); + cb->channels = static_cast( + gpr_malloc(sizeof(*cb->channels) * cb->num_channels)); count = 0; for (c = s->root_channel_data.next; c != &s->root_channel_data; c = c->next) { cb->channels[count++] = c->channel; @@ -265,21 +257,21 @@ struct shutdown_cleanup_args { grpc_slice slice; }; -static void shutdown_cleanup(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - struct shutdown_cleanup_args *a = (struct shutdown_cleanup_args *)arg; - grpc_slice_unref_internal(exec_ctx, a->slice); +static void shutdown_cleanup(void* arg, grpc_error* error) { + struct shutdown_cleanup_args* a = + static_cast(arg); + grpc_slice_unref_internal(a->slice); gpr_free(a); } -static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, - bool send_goaway, grpc_error *send_disconnect) { - struct shutdown_cleanup_args *sc = - (struct shutdown_cleanup_args *)gpr_malloc(sizeof(*sc)); +static void send_shutdown(grpc_channel* channel, bool send_goaway, + grpc_error* send_disconnect) { + struct shutdown_cleanup_args* sc = + static_cast(gpr_malloc(sizeof(*sc))); GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc, grpc_schedule_on_exec_ctx); - grpc_transport_op *op = grpc_make_transport_op(&sc->closure); - grpc_channel_element *elem; + grpc_transport_op* op = grpc_make_transport_op(&sc->closure); + grpc_channel_element* elem; op->goaway_error = send_goaway ? grpc_error_set_int( @@ -291,19 +283,18 @@ static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel, op->disconnect_with_error = send_disconnect; elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0); - elem->filter->start_transport_op(exec_ctx, elem, op); + elem->filter->start_transport_op(elem, op); } -static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx, - channel_broadcaster *cb, +static void channel_broadcaster_shutdown(channel_broadcaster* cb, bool send_goaway, - grpc_error *force_disconnect) { + grpc_error* force_disconnect) { size_t i; for (i = 0; i < cb->num_channels; i++) { - send_shutdown(exec_ctx, cb->channels[i], send_goaway, + send_shutdown(cb->channels[i], send_goaway, GRPC_ERROR_REF(force_disconnect)); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast"); + GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast"); } gpr_free(cb->channels); GRPC_ERROR_UNREF(force_disconnect); @@ -313,57 +304,50 @@ static void channel_broadcaster_shutdown(grpc_exec_ctx *exec_ctx, * request_matcher */ -static void request_matcher_init(request_matcher *rm, size_t entries, - grpc_server *server) { +static void request_matcher_init(request_matcher* rm, grpc_server* server) { memset(rm, 0, sizeof(*rm)); rm->server = server; - rm->requests_per_cq = (gpr_stack_lockfree **)gpr_malloc( - sizeof(*rm->requests_per_cq) * server->cq_count); + rm->requests_per_cq = static_cast( + gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count)); for (size_t i = 0; i < server->cq_count; i++) { - rm->requests_per_cq[i] = gpr_stack_lockfree_create(entries); + gpr_locked_mpscq_init(&rm->requests_per_cq[i]); } } -static void request_matcher_destroy(request_matcher *rm) { +static void request_matcher_destroy(request_matcher* rm) { for (size_t i = 0; i < rm->server->cq_count; i++) { - GPR_ASSERT(gpr_stack_lockfree_pop(rm->requests_per_cq[i]) == -1); - gpr_stack_lockfree_destroy(rm->requests_per_cq[i]); + GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr); + gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]); } gpr_free(rm->requests_per_cq); } -static void kill_zombie(grpc_exec_ctx *exec_ctx, void *elem, - grpc_error *error) { - grpc_call_unref(grpc_call_from_top_element((grpc_call_element *)elem)); +static void kill_zombie(void* elem, grpc_error* error) { + grpc_call_unref( + grpc_call_from_top_element(static_cast(elem))); } -static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx *exec_ctx, - request_matcher *rm) { +static void request_matcher_zombify_all_pending_calls(request_matcher* rm) { while (rm->pending_head) { - call_data *calld = rm->pending_head; + call_data* calld = rm->pending_head; rm->pending_head = calld->pending_next; - gpr_mu_lock(&calld->mu_state); - calld->state = ZOMBIED; - gpr_mu_unlock(&calld->mu_state); + gpr_atm_no_barrier_store(&calld->state, ZOMBIED); GRPC_CLOSURE_INIT( &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); } } -static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx, - grpc_server *server, - request_matcher *rm, - grpc_error *error) { - int request_id; +static void request_matcher_kill_requests(grpc_server* server, + request_matcher* rm, + grpc_error* error) { + requested_call* rc; for (size_t i = 0; i < server->cq_count; i++) { - while ((request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[i])) != - -1) { - fail_call(exec_ctx, server, i, - &server->requested_calls_per_cq[i][request_id], - GRPC_ERROR_REF(error)); + while ((rc = reinterpret_cast( + gpr_locked_mpscq_pop(&rm->requests_per_cq[i]))) != nullptr) { + fail_call(server, i, rc, GRPC_ERROR_REF(error)); } } GRPC_ERROR_UNREF(error); @@ -373,18 +357,18 @@ static void request_matcher_kill_requests(grpc_exec_ctx *exec_ctx, * server proper */ -static void server_ref(grpc_server *server) { +static void server_ref(grpc_server* server) { gpr_ref(&server->internal_refcount); } -static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { - registered_method *rm; +static void server_delete(grpc_server* server) { + registered_method* rm; size_t i; - grpc_channel_args_destroy(exec_ctx, server->channel_args); + grpc_channel_args_destroy(server->channel_args); gpr_mu_destroy(&server->mu_global); gpr_mu_destroy(&server->mu_call); gpr_cv_destroy(&server->starting_cv); - while ((rm = server->registered_methods) != NULL) { + while ((rm = server->registered_methods) != nullptr) { server->registered_methods = rm->next; if (server->started) { request_matcher_destroy(&rm->matcher); @@ -397,92 +381,68 @@ static void server_delete(grpc_exec_ctx *exec_ctx, grpc_server *server) { request_matcher_destroy(&server->unregistered_request_matcher); } for (i = 0; i < server->cq_count; i++) { - GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server"); - if (server->started) { - gpr_stack_lockfree_destroy(server->request_freelist_per_cq[i]); - gpr_free(server->requested_calls_per_cq[i]); - } + GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server"); } - gpr_free(server->request_freelist_per_cq); - gpr_free(server->requested_calls_per_cq); gpr_free(server->cqs); gpr_free(server->pollsets); gpr_free(server->shutdown_tags); gpr_free(server); } -static void server_unref(grpc_exec_ctx *exec_ctx, grpc_server *server) { +static void server_unref(grpc_server* server) { if (gpr_unref(&server->internal_refcount)) { - server_delete(exec_ctx, server); + server_delete(server); } } -static int is_channel_orphaned(channel_data *chand) { +static int is_channel_orphaned(channel_data* chand) { return chand->next == chand; } -static void orphan_channel(channel_data *chand) { +static void orphan_channel(channel_data* chand) { chand->next->prev = chand->prev; chand->prev->next = chand->next; chand->next = chand->prev = chand; } -static void finish_destroy_channel(grpc_exec_ctx *exec_ctx, void *cd, - grpc_error *error) { - channel_data *chand = (channel_data *)cd; - grpc_server *server = chand->server; - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server"); - server_unref(exec_ctx, server); +static void finish_destroy_channel(void* cd, grpc_error* error) { + channel_data* chand = static_cast(cd); + grpc_server* server = chand->server; + GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server"); + server_unref(server); } -static void destroy_channel(grpc_exec_ctx *exec_ctx, channel_data *chand, - grpc_error *error) { +static void destroy_channel(channel_data* chand, grpc_error* error) { if (is_channel_orphaned(chand)) return; - GPR_ASSERT(chand->server != NULL); + GPR_ASSERT(chand->server != nullptr); orphan_channel(chand); server_ref(chand->server); - maybe_finish_shutdown(exec_ctx, chand->server); + maybe_finish_shutdown(chand->server); GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure, finish_destroy_channel, chand, grpc_schedule_on_exec_ctx); - if (GRPC_TRACER_ON(grpc_server_channel_trace) && error != GRPC_ERROR_NONE) { - const char *msg = grpc_error_string(error); + if (grpc_server_channel_trace.enabled() && error != GRPC_ERROR_NONE) { + const char* msg = grpc_error_string(error); gpr_log(GPR_INFO, "Disconnected client: %s", msg); } GRPC_ERROR_UNREF(error); - grpc_transport_op *op = + grpc_transport_op* op = grpc_make_transport_op(&chand->finish_destroy_channel_closure); op->set_accept_stream = true; - grpc_channel_next_op(exec_ctx, - grpc_channel_stack_element( + grpc_channel_next_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(chand->channel), 0), op); } -static void done_request_event(grpc_exec_ctx *exec_ctx, void *req, - grpc_cq_completion *c) { - requested_call *rc = (requested_call *)req; - grpc_server *server = rc->server; - - if (rc >= server->requested_calls_per_cq[rc->cq_idx] && - rc < server->requested_calls_per_cq[rc->cq_idx] + - server->max_requested_calls_per_cq) { - GPR_ASSERT(rc - server->requested_calls_per_cq[rc->cq_idx] <= INT_MAX); - gpr_stack_lockfree_push( - server->request_freelist_per_cq[rc->cq_idx], - (int)(rc - server->requested_calls_per_cq[rc->cq_idx])); - } else { - gpr_free(req); - } - - server_unref(exec_ctx, server); +static void done_request_event(void* req, grpc_cq_completion* c) { + gpr_free(req); } -static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - call_data *calld, size_t cq_idx, requested_call *rc) { - grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call); - grpc_call *call = calld->call; +static void publish_call(grpc_server* server, call_data* calld, size_t cq_idx, + requested_call* rc) { + grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call); + grpc_call* call = calld->call; *rc->call = call; calld->cq_new = server->cqs[cq_idx]; GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, calld->initial_metadata); @@ -492,94 +452,101 @@ static void publish_call(grpc_exec_ctx *exec_ctx, grpc_server *server, GPR_ASSERT(calld->path_set); rc->data.batch.details->host = grpc_slice_ref_internal(calld->host); rc->data.batch.details->method = grpc_slice_ref_internal(calld->path); - rc->data.batch.details->deadline = calld->deadline; + rc->data.batch.details->deadline = + grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC); rc->data.batch.details->flags = calld->recv_initial_metadata_flags; break; case REGISTERED_CALL: - *rc->data.registered.deadline = calld->deadline; + *rc->data.registered.deadline = + grpc_millis_to_timespec(calld->deadline, GPR_CLOCK_MONOTONIC); if (rc->data.registered.optional_payload) { *rc->data.registered.optional_payload = calld->payload; - calld->payload = NULL; + calld->payload = nullptr; } break; default: GPR_UNREACHABLE_CODE(return ); } - grpc_call_element *elem = - grpc_call_stack_element(grpc_call_get_call_stack(call), 0); - channel_data *chand = (channel_data *)elem->channel_data; - server_ref(chand->server); - grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE, - done_request_event, rc, &rc->completion); + grpc_cq_end_op(calld->cq_new, rc->tag, GRPC_ERROR_NONE, done_request_event, + rc, &rc->completion); } -static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - grpc_call_element *call_elem = (grpc_call_element *)arg; - call_data *calld = (call_data *)call_elem->call_data; - channel_data *chand = (channel_data *)call_elem->channel_data; - request_matcher *rm = calld->matcher; - grpc_server *server = rm->server; +static void publish_new_rpc(void* arg, grpc_error* error) { + grpc_call_element* call_elem = static_cast(arg); + call_data* calld = static_cast(call_elem->call_data); + channel_data* chand = static_cast(call_elem->channel_data); + request_matcher* rm = calld->matcher; + grpc_server* server = rm->server; if (error != GRPC_ERROR_NONE || gpr_atm_acq_load(&server->shutdown_flag)) { - gpr_mu_lock(&calld->mu_state); - calld->state = ZOMBIED; - gpr_mu_unlock(&calld->mu_state); + gpr_atm_no_barrier_store(&calld->state, ZOMBIED); GRPC_CLOSURE_INIT( &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_REF(error)); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_REF(error)); return; } for (size_t i = 0; i < server->cq_count; i++) { size_t cq_idx = (chand->cq_idx + i) % server->cq_count; - int request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]); - if (request_id == -1) { + requested_call* rc = reinterpret_cast( + gpr_locked_mpscq_try_pop(&rm->requests_per_cq[cq_idx])); + if (rc == nullptr) { continue; } else { - GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i); - gpr_mu_lock(&calld->mu_state); - calld->state = ACTIVATED; - gpr_mu_unlock(&calld->mu_state); - publish_call(exec_ctx, server, calld, cq_idx, - &server->requested_calls_per_cq[cq_idx][request_id]); + GRPC_STATS_INC_SERVER_CQS_CHECKED(i); + gpr_atm_no_barrier_store(&calld->state, ACTIVATED); + publish_call(server, calld, cq_idx, rc); return; /* early out */ } } /* no cq to take the request found: queue it on the slow list */ - GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx); + GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(); gpr_mu_lock(&server->mu_call); - gpr_mu_lock(&calld->mu_state); - calld->state = PENDING; - gpr_mu_unlock(&calld->mu_state); - if (rm->pending_head == NULL) { + + // We need to ensure that all the queues are empty. We do this under + // the server mu_call lock to ensure that if something is added to + // an empty request queue, it will block until the call is actually + // added to the pending list. + for (size_t i = 0; i < server->cq_count; i++) { + size_t cq_idx = (chand->cq_idx + i) % server->cq_count; + requested_call* rc = reinterpret_cast( + gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx])); + if (rc == nullptr) { + continue; + } else { + gpr_mu_unlock(&server->mu_call); + GRPC_STATS_INC_SERVER_CQS_CHECKED(i + server->cq_count); + gpr_atm_no_barrier_store(&calld->state, ACTIVATED); + publish_call(server, calld, cq_idx, rc); + return; /* early out */ + } + } + + gpr_atm_no_barrier_store(&calld->state, PENDING); + if (rm->pending_head == nullptr) { rm->pending_tail = rm->pending_head = calld; } else { rm->pending_tail->pending_next = calld; rm->pending_tail = calld; } - calld->pending_next = NULL; + calld->pending_next = nullptr; gpr_mu_unlock(&server->mu_call); } static void finish_start_new_rpc( - grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem, - request_matcher *rm, + grpc_server* server, grpc_call_element* elem, request_matcher* rm, grpc_server_register_method_payload_handling payload_handling) { - call_data *calld = (call_data *)elem->call_data; + call_data* calld = static_cast(elem->call_data); if (gpr_atm_acq_load(&server->shutdown_flag)) { - gpr_mu_lock(&calld->mu_state); - calld->state = ZOMBIED; - gpr_mu_unlock(&calld->mu_state); + gpr_atm_no_barrier_store(&calld->state, ZOMBIED); GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); return; } @@ -587,7 +554,7 @@ static void finish_start_new_rpc( switch (payload_handling) { case GRPC_SRM_PAYLOAD_NONE: - publish_new_rpc(exec_ctx, elem, GRPC_ERROR_NONE); + publish_new_rpc(elem, GRPC_ERROR_NONE); break; case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: { grpc_op op; @@ -596,20 +563,19 @@ static void finish_start_new_rpc( op.data.recv_message.recv_message = &calld->payload; GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem, grpc_schedule_on_exec_ctx); - grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1, - &calld->publish); + grpc_call_start_batch_and_execute(calld->call, &op, 1, &calld->publish); break; } } } -static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; - grpc_server *server = chand->server; +static void start_new_rpc(grpc_call_element* elem) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); + grpc_server* server = chand->server; uint32_t i; uint32_t hash; - channel_registered_method *rm; + channel_registered_method* rm; if (chand->registered_methods && calld->path_set && calld->host_set) { /* TODO(ctiller): unify these two searches */ @@ -628,8 +594,7 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) { continue; } - finish_start_new_rpc(exec_ctx, server, elem, - &rm->server_registered_method->matcher, + finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher, rm->server_registered_method->payload_handling); return; } @@ -646,19 +611,17 @@ static void start_new_rpc(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) { continue; } - finish_start_new_rpc(exec_ctx, server, elem, - &rm->server_registered_method->matcher, + finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher, rm->server_registered_method->payload_handling); return; } } - finish_start_new_rpc(exec_ctx, server, elem, - &server->unregistered_request_matcher, + finish_start_new_rpc(server, elem, &server->unregistered_request_matcher, GRPC_SRM_PAYLOAD_NONE); } -static int num_listeners(grpc_server *server) { - listener *l; +static int num_listeners(grpc_server* server) { + listener* l; int n = 0; for (l = server->listeners; l; l = l->next) { n++; @@ -666,13 +629,12 @@ static int num_listeners(grpc_server *server) { return n; } -static void done_shutdown_event(grpc_exec_ctx *exec_ctx, void *server, - grpc_cq_completion *completion) { - server_unref(exec_ctx, (grpc_server *)server); +static void done_shutdown_event(void* server, grpc_cq_completion* completion) { + server_unref(static_cast(server)); } -static int num_channels(grpc_server *server) { - channel_data *chand; +static int num_channels(grpc_server* server) { + channel_data* chand; int n = 0; for (chand = server->root_channel_data.next; chand != &server->root_channel_data; chand = chand->next) { @@ -681,34 +643,30 @@ static int num_channels(grpc_server *server) { return n; } -static void kill_pending_work_locked(grpc_exec_ctx *exec_ctx, - grpc_server *server, grpc_error *error) { +static void kill_pending_work_locked(grpc_server* server, grpc_error* error) { if (server->started) { - request_matcher_kill_requests(exec_ctx, server, - &server->unregistered_request_matcher, + request_matcher_kill_requests(server, &server->unregistered_request_matcher, GRPC_ERROR_REF(error)); request_matcher_zombify_all_pending_calls( - exec_ctx, &server->unregistered_request_matcher); - for (registered_method *rm = server->registered_methods; rm; + &server->unregistered_request_matcher); + for (registered_method* rm = server->registered_methods; rm; rm = rm->next) { - request_matcher_kill_requests(exec_ctx, server, &rm->matcher, + request_matcher_kill_requests(server, &rm->matcher, GRPC_ERROR_REF(error)); - request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher); + request_matcher_zombify_all_pending_calls(&rm->matcher); } } GRPC_ERROR_UNREF(error); } -static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, - grpc_server *server) { +static void maybe_finish_shutdown(grpc_server* server) { size_t i; if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) { return; } kill_pending_work_locked( - exec_ctx, server, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); + server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); if (server->root_channel_data.next != &server->root_channel_data || server->listeners_destroyed < num_listeners(server)) { @@ -728,58 +686,56 @@ static void maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, server->shutdown_published = 1; for (i = 0; i < server->num_shutdown_tags; i++) { server_ref(server); - grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq, - server->shutdown_tags[i].tag, GRPC_ERROR_NONE, - done_shutdown_event, server, + grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag, + GRPC_ERROR_NONE, done_shutdown_event, server, &server->shutdown_tags[i].completion); } } -static void server_on_recv_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)ptr; - call_data *calld = (call_data *)elem->call_data; - gpr_timespec op_deadline; +static void server_on_recv_initial_metadata(void* ptr, grpc_error* error) { + grpc_call_element* elem = static_cast(ptr); + call_data* calld = static_cast(elem->call_data); + grpc_millis op_deadline; if (error == GRPC_ERROR_NONE) { - GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != NULL); - GPR_ASSERT(calld->recv_initial_metadata->idx.named.authority != NULL); + GPR_ASSERT(calld->recv_initial_metadata->idx.named.path != nullptr); + GPR_ASSERT(calld->recv_initial_metadata->idx.named.authority != nullptr); calld->path = grpc_slice_ref_internal( GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.path->md)); calld->host = grpc_slice_ref_internal( GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md)); calld->path_set = true; calld->host_set = true; - grpc_metadata_batch_remove(exec_ctx, calld->recv_initial_metadata, + grpc_metadata_batch_remove(calld->recv_initial_metadata, calld->recv_initial_metadata->idx.named.path); grpc_metadata_batch_remove( - exec_ctx, calld->recv_initial_metadata, + calld->recv_initial_metadata, calld->recv_initial_metadata->idx.named.authority); } else { GRPC_ERROR_REF(error); } op_deadline = calld->recv_initial_metadata->deadline; - if (0 != gpr_time_cmp(op_deadline, gpr_inf_future(op_deadline.clock_type))) { + if (op_deadline != GRPC_MILLIS_INF_FUTURE) { calld->deadline = op_deadline; } if (calld->host_set && calld->path_set) { /* do nothing */ } else { - grpc_error *src_error = error; + grpc_error* src_error = error; error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Missing :authority or :path", &error, 1); GRPC_ERROR_UNREF(src_error); } - GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error); + GRPC_CLOSURE_RUN(calld->on_done_recv_initial_metadata, error); } -static void server_mutate_op(grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - call_data *calld = (call_data *)elem->call_data; +static void server_mutate_op(grpc_call_element* elem, + grpc_transport_stream_op_batch* op) { + call_data* calld = static_cast(elem->call_data); if (op->recv_initial_metadata) { - GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == NULL); + GPR_ASSERT(op->payload->recv_initial_metadata.recv_flags == nullptr); calld->recv_initial_metadata = op->payload->recv_initial_metadata.recv_initial_metadata; calld->on_done_recv_initial_metadata = @@ -792,58 +748,47 @@ static void server_mutate_op(grpc_call_element *elem, } static void server_start_transport_stream_op_batch( - grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { + grpc_call_element* elem, grpc_transport_stream_op_batch* op) { server_mutate_op(elem, op); - grpc_call_next_op(exec_ctx, elem, op); + grpc_call_next_op(elem, op); } -static void got_initial_metadata(grpc_exec_ctx *exec_ctx, void *ptr, - grpc_error *error) { - grpc_call_element *elem = (grpc_call_element *)ptr; - call_data *calld = (call_data *)elem->call_data; +static void got_initial_metadata(void* ptr, grpc_error* error) { + grpc_call_element* elem = static_cast(ptr); + call_data* calld = static_cast(elem->call_data); if (error == GRPC_ERROR_NONE) { - start_new_rpc(exec_ctx, elem); + start_new_rpc(elem); } else { - gpr_mu_lock(&calld->mu_state); - if (calld->state == NOT_STARTED) { - calld->state = ZOMBIED; - gpr_mu_unlock(&calld->mu_state); + if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) { GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem, grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE); - } else if (calld->state == PENDING) { - calld->state = ZOMBIED; - gpr_mu_unlock(&calld->mu_state); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); + } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) { /* zombied call will be destroyed when it's removed from the pending queue... later */ - } else { - gpr_mu_unlock(&calld->mu_state); } } } -static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, - grpc_transport *transport, - const void *transport_server_data) { - channel_data *chand = (channel_data *)cd; +static void accept_stream(void* cd, grpc_transport* transport, + const void* transport_server_data) { + channel_data* chand = static_cast(cd); /* create a call */ grpc_call_create_args args; memset(&args, 0, sizeof(args)); args.channel = chand->channel; args.server_transport_data = transport_server_data; - args.send_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - grpc_call *call; - grpc_error *error = grpc_call_create(exec_ctx, &args, &call); - grpc_call_element *elem = + args.send_deadline = GRPC_MILLIS_INF_FUTURE; + grpc_call* call; + grpc_error* error = grpc_call_create(&args, &call); + grpc_call_element* elem = grpc_call_stack_element(grpc_call_get_call_stack(call), 0); if (error != GRPC_ERROR_NONE) { - got_initial_metadata(exec_ctx, elem, error); + got_initial_metadata(elem, error); GRPC_ERROR_UNREF(error); return; } - call_data *calld = (call_data *)elem->call_data; + call_data* calld = static_cast(elem->call_data); grpc_op op; memset(&op, 0, sizeof(op)); op.op = GRPC_OP_RECV_INITIAL_METADATA; @@ -851,39 +796,34 @@ static void accept_stream(grpc_exec_ctx *exec_ctx, void *cd, &calld->initial_metadata; GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem, grpc_schedule_on_exec_ctx); - grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1, - &calld->got_initial_metadata); + grpc_call_start_batch_and_execute(call, &op, 1, &calld->got_initial_metadata); } -static void channel_connectivity_changed(grpc_exec_ctx *exec_ctx, void *cd, - grpc_error *error) { - channel_data *chand = (channel_data *)cd; - grpc_server *server = chand->server; +static void channel_connectivity_changed(void* cd, grpc_error* error) { + channel_data* chand = static_cast(cd); + grpc_server* server = chand->server; if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) { - grpc_transport_op *op = grpc_make_transport_op(NULL); - op->on_connectivity_state_change = &chand->channel_connectivity_changed, + grpc_transport_op* op = grpc_make_transport_op(nullptr); + op->on_connectivity_state_change = &chand->channel_connectivity_changed; op->connectivity_state = &chand->connectivity_state; - grpc_channel_next_op(exec_ctx, - grpc_channel_stack_element( + grpc_channel_next_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(chand->channel), 0), op); } else { gpr_mu_lock(&server->mu_global); - destroy_channel(exec_ctx, chand, GRPC_ERROR_REF(error)); + destroy_channel(chand, GRPC_ERROR_REF(error)); gpr_mu_unlock(&server->mu_global); - GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity"); + GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity"); } } -static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, - grpc_call_element *elem, - const grpc_call_element_args *args) { - call_data *calld = (call_data *)elem->call_data; - channel_data *chand = (channel_data *)elem->channel_data; +static grpc_error* init_call_elem(grpc_call_element* elem, + const grpc_call_element_args* args) { + call_data* calld = static_cast(elem->call_data); + channel_data* chand = static_cast(elem->channel_data); memset(calld, 0, sizeof(call_data)); - calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + calld->deadline = GRPC_MILLIS_INF_FUTURE; calld->call = grpc_call_from_top_element(elem); - gpr_mu_init(&calld->mu_state); GRPC_CLOSURE_INIT(&calld->server_on_recv_initial_metadata, server_on_recv_initial_metadata, elem, @@ -893,38 +833,35 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, - const grpc_call_final_info *final_info, - grpc_closure *ignored) { - channel_data *chand = (channel_data *)elem->channel_data; - call_data *calld = (call_data *)elem->call_data; +static void destroy_call_elem(grpc_call_element* elem, + const grpc_call_final_info* final_info, + grpc_closure* ignored) { + channel_data* chand = static_cast(elem->channel_data); + call_data* calld = static_cast(elem->call_data); GPR_ASSERT(calld->state != PENDING); if (calld->host_set) { - grpc_slice_unref_internal(exec_ctx, calld->host); + grpc_slice_unref_internal(calld->host); } if (calld->path_set) { - grpc_slice_unref_internal(exec_ctx, calld->path); + grpc_slice_unref_internal(calld->path); } grpc_metadata_array_destroy(&calld->initial_metadata); grpc_byte_buffer_destroy(calld->payload); - gpr_mu_destroy(&calld->mu_state); - - server_unref(exec_ctx, chand->server); + server_unref(chand->server); } -static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem, - grpc_channel_element_args *args) { - channel_data *chand = (channel_data *)elem->channel_data; +static grpc_error* init_channel_elem(grpc_channel_element* elem, + grpc_channel_element_args* args) { + channel_data* chand = static_cast(elem->channel_data); GPR_ASSERT(args->is_first); GPR_ASSERT(!args->is_last); - chand->server = NULL; - chand->channel = NULL; + chand->server = nullptr; + chand->channel = nullptr; chand->next = chand->prev = chand; - chand->registered_methods = NULL; + chand->registered_methods = nullptr; chand->connectivity_state = GRPC_CHANNEL_IDLE; GRPC_CLOSURE_INIT(&chand->channel_connectivity_changed, channel_connectivity_changed, chand, @@ -932,15 +869,14 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx, return GRPC_ERROR_NONE; } -static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, - grpc_channel_element *elem) { +static void destroy_channel_elem(grpc_channel_element* elem) { size_t i; - channel_data *chand = (channel_data *)elem->channel_data; + channel_data* chand = static_cast(elem->channel_data); if (chand->registered_methods) { for (i = 0; i < chand->registered_method_slots; i++) { - grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method); + grpc_slice_unref_internal(chand->registered_methods[i].method); if (chand->registered_methods[i].has_host) { - grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].host); + grpc_slice_unref_internal(chand->registered_methods[i].host); } } gpr_free(chand->registered_methods); @@ -950,9 +886,9 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx, chand->next->prev = chand->prev; chand->prev->next = chand->next; chand->next = chand->prev = chand; - maybe_finish_shutdown(exec_ctx, chand->server); + maybe_finish_shutdown(chand->server); gpr_mu_unlock(&chand->server->mu_global); - server_unref(exec_ctx, chand->server); + server_unref(chand->server); } } @@ -970,9 +906,9 @@ const grpc_channel_filter grpc_server_top_filter = { "server", }; -static void register_completion_queue(grpc_server *server, - grpc_completion_queue *cq, - void *reserved) { +static void register_completion_queue(grpc_server* server, + grpc_completion_queue* cq, + void* reserved) { size_t i, n; GPR_ASSERT(!reserved); for (i = 0; i < server->cq_count; i++) { @@ -981,14 +917,14 @@ static void register_completion_queue(grpc_server *server, GRPC_CQ_INTERNAL_REF(cq, "server"); n = server->cq_count++; - server->cqs = (grpc_completion_queue **)gpr_realloc( - server->cqs, server->cq_count * sizeof(grpc_completion_queue *)); + server->cqs = static_cast(gpr_realloc( + server->cqs, server->cq_count * sizeof(grpc_completion_queue*))); server->cqs[n] = cq; } -void grpc_server_register_completion_queue(grpc_server *server, - grpc_completion_queue *cq, - void *reserved) { +void grpc_server_register_completion_queue(grpc_server* server, + grpc_completion_queue* cq, + void* reserved) { GRPC_API_TRACE( "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3, (server, cq, reserved)); @@ -1004,10 +940,11 @@ void grpc_server_register_completion_queue(grpc_server *server, register_completion_queue(server, cq, reserved); } -grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) { +grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) { GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved)); - grpc_server *server = (grpc_server *)gpr_zalloc(sizeof(grpc_server)); + grpc_server* server = + static_cast(gpr_zalloc(sizeof(grpc_server))); gpr_mu_init(&server->mu_global); gpr_mu_init(&server->mu_call); @@ -1018,25 +955,23 @@ grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) { server->root_channel_data.next = server->root_channel_data.prev = &server->root_channel_data; - /* TODO(ctiller): expose a channel_arg for this */ - server->max_requested_calls_per_cq = 32768; server->channel_args = grpc_channel_args_copy(args); return server; } -static int streq(const char *a, const char *b) { - if (a == NULL && b == NULL) return 1; - if (a == NULL) return 0; - if (b == NULL) return 0; +static int streq(const char* a, const char* b) { + if (a == nullptr && b == nullptr) return 1; + if (a == nullptr) return 0; + if (b == nullptr) return 0; return 0 == strcmp(a, b); } -void *grpc_server_register_method( - grpc_server *server, const char *method, const char *host, +void* grpc_server_register_method( + grpc_server* server, const char* method, const char* host, grpc_server_register_method_payload_handling payload_handling, uint32_t flags) { - registered_method *m; + registered_method* m; GRPC_API_TRACE( "grpc_server_register_method(server=%p, method=%s, host=%s, " "flags=0x%08x)", @@ -1044,21 +979,21 @@ void *grpc_server_register_method( if (!method) { gpr_log(GPR_ERROR, "grpc_server_register_method method string cannot be NULL"); - return NULL; + return nullptr; } for (m = server->registered_methods; m; m = m->next) { if (streq(m->method, method) && streq(m->host, host)) { gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method, host ? host : "*"); - return NULL; + return nullptr; } } if ((flags & ~GRPC_INITIAL_METADATA_USED_MASK) != 0) { gpr_log(GPR_ERROR, "grpc_server_register_method invalid flags 0x%08x", flags); - return NULL; + return nullptr; } - m = (registered_method *)gpr_zalloc(sizeof(registered_method)); + m = static_cast(gpr_zalloc(sizeof(registered_method))); m->method = gpr_strdup(method); m->host = gpr_strdup(host); m->next = server->registered_methods; @@ -1068,11 +1003,10 @@ void *grpc_server_register_method( return m; } -static void start_listeners(grpc_exec_ctx *exec_ctx, void *s, - grpc_error *error) { - grpc_server *server = (grpc_server *)s; - for (listener *l = server->listeners; l; l = l->next) { - l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count); +static void start_listeners(void* s, grpc_error* error) { + grpc_server* server = static_cast(s); + for (listener* l = server->listeners; l; l = l->next) { + l->start(server, l->arg, server->pollsets, server->pollset_count); } gpr_mu_lock(&server->mu_global); @@ -1080,82 +1014,63 @@ static void start_listeners(grpc_exec_ctx *exec_ctx, void *s, gpr_cv_signal(&server->starting_cv); gpr_mu_unlock(&server->mu_global); - server_unref(exec_ctx, server); + server_unref(server); } -void grpc_server_start(grpc_server *server) { +void grpc_server_start(grpc_server* server) { size_t i; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server)); server->started = true; server->pollset_count = 0; - server->pollsets = - (grpc_pollset **)gpr_malloc(sizeof(grpc_pollset *) * server->cq_count); - server->request_freelist_per_cq = (gpr_stack_lockfree **)gpr_malloc( - sizeof(*server->request_freelist_per_cq) * server->cq_count); - server->requested_calls_per_cq = (requested_call **)gpr_malloc( - sizeof(*server->requested_calls_per_cq) * server->cq_count); + server->pollsets = static_cast( + gpr_malloc(sizeof(grpc_pollset*) * server->cq_count)); for (i = 0; i < server->cq_count; i++) { if (grpc_cq_can_listen(server->cqs[i])) { server->pollsets[server->pollset_count++] = grpc_cq_pollset(server->cqs[i]); } - server->request_freelist_per_cq[i] = - gpr_stack_lockfree_create((size_t)server->max_requested_calls_per_cq); - for (int j = 0; j < server->max_requested_calls_per_cq; j++) { - gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j); - } - server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc( - (size_t)server->max_requested_calls_per_cq * - sizeof(*server->requested_calls_per_cq[i])); } - request_matcher_init(&server->unregistered_request_matcher, - (size_t)server->max_requested_calls_per_cq, server); - for (registered_method *rm = server->registered_methods; rm; rm = rm->next) { - request_matcher_init(&rm->matcher, - (size_t)server->max_requested_calls_per_cq, server); + request_matcher_init(&server->unregistered_request_matcher, server); + for (registered_method* rm = server->registered_methods; rm; rm = rm->next) { + request_matcher_init(&rm->matcher, server); } server_ref(server); server->starting = true; GRPC_CLOSURE_SCHED( - &exec_ctx, GRPC_CLOSURE_CREATE(start_listeners, server, grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)), GRPC_ERROR_NONE); - - grpc_exec_ctx_finish(&exec_ctx); } -void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, - size_t *pollset_count) { +void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets, + size_t* pollset_count) { *pollset_count = server->pollset_count; *pollsets = server->pollsets; } -void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, - grpc_transport *transport, - grpc_pollset *accepting_pollset, - const grpc_channel_args *args) { +void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport, + grpc_pollset* accepting_pollset, + const grpc_channel_args* args) { size_t num_registered_methods; size_t alloc; - registered_method *rm; - channel_registered_method *crm; - grpc_channel *channel; - channel_data *chand; + registered_method* rm; + channel_registered_method* crm; + grpc_channel* channel; + channel_data* chand; uint32_t hash; size_t slots; uint32_t probes; uint32_t max_probes = 0; - grpc_transport_op *op = NULL; + grpc_transport_op* op = nullptr; - channel = - grpc_channel_create(exec_ctx, NULL, args, GRPC_SERVER_CHANNEL, transport); - chand = (channel_data *)grpc_channel_stack_element( - grpc_channel_get_channel_stack(channel), 0) - ->channel_data; + channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport); + chand = static_cast( + grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0) + ->channel_data); chand->server = s; server_ref(s); chand->channel = channel; @@ -1166,7 +1081,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, } if (cq_idx == s->cq_count) { /* completion queue not found: pick a random one to publish new calls to */ - cq_idx = (size_t)rand() % s->cq_count; + cq_idx = static_cast(rand()) % s->cq_count; } chand->cq_idx = cq_idx; @@ -1179,12 +1094,13 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, if (num_registered_methods > 0) { slots = 2 * num_registered_methods; alloc = sizeof(channel_registered_method) * slots; - chand->registered_methods = (channel_registered_method *)gpr_zalloc(alloc); + chand->registered_methods = + static_cast(gpr_zalloc(alloc)); for (rm = s->registered_methods; rm; rm = rm->next) { grpc_slice host; bool has_host; grpc_slice method; - if (rm->host != NULL) { + if (rm->host != nullptr) { host = grpc_slice_intern(grpc_slice_from_static_string(rm->host)); has_host = true; } else { @@ -1194,7 +1110,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, hash = GRPC_MDSTR_KV_HASH(has_host ? grpc_slice_hash(host) : 0, grpc_slice_hash(method)); for (probes = 0; chand->registered_methods[(hash + probes) % slots] - .server_registered_method != NULL; + .server_registered_method != nullptr; probes++) ; if (probes > max_probes) max_probes = probes; @@ -1208,7 +1124,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, crm->method = method; } GPR_ASSERT(slots <= UINT32_MAX); - chand->registered_method_slots = (uint32_t)slots; + chand->registered_method_slots = static_cast(slots); chand->registered_method_max_probes = max_probes; } @@ -1219,7 +1135,7 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, gpr_mu_unlock(&s->mu_global); GRPC_CHANNEL_INTERNAL_REF(channel, "connectivity"); - op = grpc_make_transport_op(NULL); + op = grpc_make_transport_op(nullptr); op->set_accept_stream = true; op->set_accept_stream_fn = accept_stream; op->set_accept_stream_user_data = chand; @@ -1229,30 +1145,44 @@ void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *s, op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"); } - grpc_transport_perform_op(exec_ctx, transport, op); + grpc_transport_perform_op(transport, op); } -void done_published_shutdown(grpc_exec_ctx *exec_ctx, void *done_arg, - grpc_cq_completion *storage) { +void done_published_shutdown(void* done_arg, grpc_cq_completion* storage) { (void)done_arg; gpr_free(storage); } -static void listener_destroy_done(grpc_exec_ctx *exec_ctx, void *s, - grpc_error *error) { - grpc_server *server = (grpc_server *)s; +static void listener_destroy_done(void* s, grpc_error* error) { + grpc_server* server = static_cast(s); gpr_mu_lock(&server->mu_global); server->listeners_destroyed++; - maybe_finish_shutdown(exec_ctx, server); + maybe_finish_shutdown(server); gpr_mu_unlock(&server->mu_global); } -void grpc_server_shutdown_and_notify(grpc_server *server, - grpc_completion_queue *cq, void *tag) { - listener *l; - shutdown_tag *sdt; +/* + - Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via + grpc_server_request_call and grpc_server_request_registered call will now be + cancelled). See 'kill_pending_work_locked()' + + - Shuts down the listeners (i.e the server will no longer listen on the port + for new incoming channels). + + - Iterates through all channels on the server and sends shutdown msg (see + 'channel_broadcaster_shutdown()' for details) to the clients via the + transport layer. The transport layer then guarantees the following: + -- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY) + -- If the server has outstanding calls that are in the process, the + connection is NOT closed until the server is done with all those calls + -- Once, there are no more calls in progress, the channel is closed + */ +void grpc_server_shutdown_and_notify(grpc_server* server, + grpc_completion_queue* cq, void* tag) { + listener* l; + shutdown_tag* sdt; channel_broadcaster broadcaster; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3, (server, cq, tag)); @@ -1261,27 +1191,27 @@ void grpc_server_shutdown_and_notify(grpc_server *server, gpr_mu_lock(&server->mu_global); while (server->starting) { gpr_cv_wait(&server->starting_cv, &server->mu_global, - gpr_inf_future(GPR_CLOCK_REALTIME)); + gpr_inf_future(GPR_CLOCK_MONOTONIC)); } /* stay locked, and gather up some stuff to do */ GPR_ASSERT(grpc_cq_begin_op(cq, tag)); if (server->shutdown_published) { - grpc_cq_end_op( - &exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL, - (grpc_cq_completion *)gpr_malloc(sizeof(grpc_cq_completion))); + grpc_cq_end_op(cq, tag, GRPC_ERROR_NONE, done_published_shutdown, nullptr, + static_cast( + gpr_malloc(sizeof(grpc_cq_completion)))); gpr_mu_unlock(&server->mu_global); - goto done; + return; } - server->shutdown_tags = (shutdown_tag *)gpr_realloc( - server->shutdown_tags, - sizeof(shutdown_tag) * (server->num_shutdown_tags + 1)); + server->shutdown_tags = static_cast( + gpr_realloc(server->shutdown_tags, + sizeof(shutdown_tag) * (server->num_shutdown_tags + 1))); sdt = &server->shutdown_tags[server->num_shutdown_tags++]; sdt->tag = tag; sdt->cq = cq; if (gpr_atm_acq_load(&server->shutdown_flag)) { gpr_mu_unlock(&server->mu_global); - goto done; + return; } server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME); @@ -1293,30 +1223,26 @@ void grpc_server_shutdown_and_notify(grpc_server *server, /* collect all unregistered then registered calls */ gpr_mu_lock(&server->mu_call); kill_pending_work_locked( - &exec_ctx, server, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); + server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); gpr_mu_unlock(&server->mu_call); - maybe_finish_shutdown(&exec_ctx, server); + maybe_finish_shutdown(server); gpr_mu_unlock(&server->mu_global); /* Shutdown listeners */ for (l = server->listeners; l; l = l->next) { GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server, grpc_schedule_on_exec_ctx); - l->destroy(&exec_ctx, server, l->arg, &l->destroy_done); + l->destroy(server, l->arg, &l->destroy_done); } - channel_broadcaster_shutdown(&exec_ctx, &broadcaster, true /* send_goaway */, + channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */, GRPC_ERROR_NONE); - -done: - grpc_exec_ctx_finish(&exec_ctx); } -void grpc_server_cancel_all_calls(grpc_server *server) { +void grpc_server_cancel_all_calls(grpc_server* server) { channel_broadcaster broadcaster; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server)); @@ -1325,14 +1251,13 @@ void grpc_server_cancel_all_calls(grpc_server *server) { gpr_mu_unlock(&server->mu_global); channel_broadcaster_shutdown( - &exec_ctx, &broadcaster, false /* send_goaway */, + &broadcaster, false /* send_goaway */, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls")); - grpc_exec_ctx_finish(&exec_ctx); } -void grpc_server_destroy(grpc_server *server) { - listener *l; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; +void grpc_server_destroy(grpc_server* server) { + listener* l; + grpc_core::ExecCtx exec_ctx; GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server)); @@ -1348,17 +1273,16 @@ void grpc_server_destroy(grpc_server *server) { gpr_mu_unlock(&server->mu_global); - server_unref(&exec_ctx, server); - grpc_exec_ctx_finish(&exec_ctx); + server_unref(server); } -void grpc_server_add_listener( - grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t pollset_count), - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *on_done)) { - listener *l = (listener *)gpr_malloc(sizeof(listener)); +void grpc_server_add_listener(grpc_server* server, void* arg, + void (*start)(grpc_server* server, void* arg, + grpc_pollset** pollsets, + size_t pollset_count), + void (*destroy)(grpc_server* server, void* arg, + grpc_closure* on_done)) { + listener* l = static_cast(gpr_malloc(sizeof(listener))); l->arg = arg; l->start = start; l->destroy = destroy; @@ -1366,26 +1290,15 @@ void grpc_server_add_listener( server->listeners = l; } -static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, - grpc_server *server, size_t cq_idx, - requested_call *rc) { - call_data *calld = NULL; - request_matcher *rm = NULL; - int request_id; +static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx, + requested_call* rc) { + call_data* calld = nullptr; + request_matcher* rm = nullptr; if (gpr_atm_acq_load(&server->shutdown_flag)) { - fail_call(exec_ctx, server, cq_idx, rc, + fail_call(server, cq_idx, rc, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown")); return GRPC_CALL_OK; } - request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]); - if (request_id == -1) { - /* out of request ids: just fail this one */ - fail_call(exec_ctx, server, cq_idx, rc, - grpc_error_set_int( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Out of request ids"), - GRPC_ERROR_INT_LIMIT, server->max_requested_calls_per_cq)); - return GRPC_CALL_OK; - } switch (rc->type) { case BATCH_CALL: rm = &server->unregistered_request_matcher; @@ -1394,32 +1307,25 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, rm = &rc->data.registered.method->matcher; break; } - server->requested_calls_per_cq[cq_idx][request_id] = *rc; - gpr_free(rc); - if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) { + if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) { /* this was the first queued request: we need to lock and start matching calls */ gpr_mu_lock(&server->mu_call); - while ((calld = rm->pending_head) != NULL) { - request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]); - if (request_id == -1) break; + while ((calld = rm->pending_head) != nullptr) { + rc = reinterpret_cast( + gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx])); + if (rc == nullptr) break; rm->pending_head = calld->pending_next; gpr_mu_unlock(&server->mu_call); - gpr_mu_lock(&calld->mu_state); - if (calld->state == ZOMBIED) { - gpr_mu_unlock(&calld->mu_state); + if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) { + // Zombied Call GRPC_CLOSURE_INIT( &calld->kill_zombie_closure, kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0), grpc_schedule_on_exec_ctx); - GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, - GRPC_ERROR_NONE); + GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE); } else { - GPR_ASSERT(calld->state == PENDING); - calld->state = ACTIVATED; - gpr_mu_unlock(&calld->mu_state); - publish_call(exec_ctx, server, calld, cq_idx, - &server->requested_calls_per_cq[cq_idx][request_id]); + publish_call(server, calld, cq_idx, rc); } gpr_mu_lock(&server->mu_call); } @@ -1429,20 +1335,21 @@ static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx, } grpc_call_error grpc_server_request_call( - grpc_server *server, grpc_call **call, grpc_call_details *details, - grpc_metadata_array *initial_metadata, - grpc_completion_queue *cq_bound_to_call, - grpc_completion_queue *cq_for_notification, void *tag) { + grpc_server* server, grpc_call** call, grpc_call_details* details, + grpc_metadata_array* initial_metadata, + grpc_completion_queue* cq_bound_to_call, + grpc_completion_queue* cq_for_notification, void* tag) { grpc_call_error error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc)); - GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + requested_call* rc = static_cast(gpr_malloc(sizeof(*rc))); + GRPC_STATS_INC_SERVER_REQUESTED_CALLS(); GRPC_API_TRACE( "grpc_server_request_call(" "server=%p, call=%p, details=%p, initial_metadata=%p, " "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)", - 7, (server, call, details, initial_metadata, cq_bound_to_call, - cq_for_notification, tag)); + 7, + (server, call, details, initial_metadata, cq_bound_to_call, + cq_for_notification, tag)); size_t cq_idx; for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) { if (server->cqs[cq_idx] == cq_for_notification) { @@ -1459,7 +1366,7 @@ grpc_call_error grpc_server_request_call( error = GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN; goto done; } - details->reserved = NULL; + details->reserved = nullptr; rc->cq_idx = cq_idx; rc->type = BATCH_CALL; rc->server = server; @@ -1468,29 +1375,30 @@ grpc_call_error grpc_server_request_call( rc->call = call; rc->data.batch.details = details; rc->initial_metadata = initial_metadata; - error = queue_call_request(&exec_ctx, server, cq_idx, rc); + error = queue_call_request(server, cq_idx, rc); done: - grpc_exec_ctx_finish(&exec_ctx); + return error; } grpc_call_error grpc_server_request_registered_call( - grpc_server *server, void *rmp, grpc_call **call, gpr_timespec *deadline, - grpc_metadata_array *initial_metadata, grpc_byte_buffer **optional_payload, - grpc_completion_queue *cq_bound_to_call, - grpc_completion_queue *cq_for_notification, void *tag) { + grpc_server* server, void* rmp, grpc_call** call, gpr_timespec* deadline, + grpc_metadata_array* initial_metadata, grpc_byte_buffer** optional_payload, + grpc_completion_queue* cq_bound_to_call, + grpc_completion_queue* cq_for_notification, void* tag) { grpc_call_error error; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc)); - registered_method *rm = (registered_method *)rmp; - GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx); + grpc_core::ExecCtx exec_ctx; + requested_call* rc = static_cast(gpr_malloc(sizeof(*rc))); + registered_method* rm = static_cast(rmp); + GRPC_STATS_INC_SERVER_REQUESTED_CALLS(); GRPC_API_TRACE( "grpc_server_request_registered_call(" "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, " "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, " "tag=%p)", - 9, (server, rmp, call, deadline, initial_metadata, optional_payload, - cq_bound_to_call, cq_for_notification, tag)); + 9, + (server, rmp, call, deadline, initial_metadata, optional_payload, + cq_bound_to_call, cq_for_notification, tag)); size_t cq_idx; for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) { @@ -1503,7 +1411,7 @@ grpc_call_error grpc_server_request_registered_call( error = GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE; goto done; } - if ((optional_payload == NULL) != + if ((optional_payload == nullptr) != (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)) { gpr_free(rc); error = GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH; @@ -1524,28 +1432,27 @@ grpc_call_error grpc_server_request_registered_call( rc->data.registered.deadline = deadline; rc->initial_metadata = initial_metadata; rc->data.registered.optional_payload = optional_payload; - error = queue_call_request(&exec_ctx, server, cq_idx, rc); + error = queue_call_request(server, cq_idx, rc); done: - grpc_exec_ctx_finish(&exec_ctx); + return error; } -static void fail_call(grpc_exec_ctx *exec_ctx, grpc_server *server, - size_t cq_idx, requested_call *rc, grpc_error *error) { - *rc->call = NULL; +static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc, + grpc_error* error) { + *rc->call = nullptr; rc->initial_metadata->count = 0; GPR_ASSERT(error != GRPC_ERROR_NONE); - server_ref(server); - grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error, - done_request_event, rc, &rc->completion); + grpc_cq_end_op(server->cqs[cq_idx], rc->tag, error, done_request_event, rc, + &rc->completion); } -const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server) { +const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) { return server->channel_args; } -int grpc_server_has_open_connections(grpc_server *server) { +int grpc_server_has_open_connections(grpc_server* server) { int r; gpr_mu_lock(&server->mu_global); r = server->root_channel_data.next != &server->root_channel_data; diff --git a/Sources/CgRPC/src/core/lib/surface/server.h b/Sources/CgRPC/src/core/lib/surface/server.h index dd5639d97..c617cc223 100644 --- a/Sources/CgRPC/src/core/lib/surface/server.h +++ b/Sources/CgRPC/src/core/lib/surface/server.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_SURFACE_SERVER_H #define GRPC_CORE_LIB_SURFACE_SERVER_H +#include + #include #include "src/core/lib/channel/channel_stack.h" #include "src/core/lib/debug/trace.h" @@ -27,31 +29,30 @@ extern const grpc_channel_filter grpc_server_top_filter; /** Lightweight tracing of server channel state */ -extern grpc_tracer_flag grpc_server_channel_trace; +extern grpc_core::TraceFlag grpc_server_channel_trace; /* Add a listener to the server: when the server starts, it will call start, and when it shuts down, it will call destroy */ -void grpc_server_add_listener( - grpc_exec_ctx *exec_ctx, grpc_server *server, void *listener, - void (*start)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_pollset **pollsets, size_t npollsets), - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_server *server, void *arg, - grpc_closure *on_done)); +void grpc_server_add_listener(grpc_server* server, void* listener, + void (*start)(grpc_server* server, void* arg, + grpc_pollset** pollsets, + size_t npollsets), + void (*destroy)(grpc_server* server, void* arg, + grpc_closure* on_done)); /* Setup a transport - creates a channel stack, binds the transport to the server */ -void grpc_server_setup_transport(grpc_exec_ctx *exec_ctx, grpc_server *server, - grpc_transport *transport, - grpc_pollset *accepting_pollset, - const grpc_channel_args *args); +void grpc_server_setup_transport(grpc_server* server, grpc_transport* transport, + grpc_pollset* accepting_pollset, + const grpc_channel_args* args); -const grpc_channel_args *grpc_server_get_channel_args(grpc_server *server); +const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server); -int grpc_server_has_open_connections(grpc_server *server); +int grpc_server_has_open_connections(grpc_server* server); /* Do not call this before grpc_server_start. Returns the pollsets and the * number of pollsets via 'pollsets' and 'pollset_count'. */ -void grpc_server_get_pollsets(grpc_server *server, grpc_pollset ***pollsets, - size_t *pollset_count); +void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets, + size_t* pollset_count); #endif /* GRPC_CORE_LIB_SURFACE_SERVER_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/validate_metadata.c b/Sources/CgRPC/src/core/lib/surface/validate_metadata.cc similarity index 83% rename from Sources/CgRPC/src/core/lib/surface/validate_metadata.c rename to Sources/CgRPC/src/core/lib/surface/validate_metadata.cc index 61209ae48..2dd18f3dd 100644 --- a/Sources/CgRPC/src/core/lib/surface/validate_metadata.c +++ b/Sources/CgRPC/src/core/lib/surface/validate_metadata.cc @@ -16,28 +16,30 @@ * */ +#include + #include #include #include #include -#include #include "src/core/lib/iomgr/error.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/surface/validate_metadata.h" -static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits, - const char *err_desc) { - const uint8_t *p = GRPC_SLICE_START_PTR(slice); - const uint8_t *e = GRPC_SLICE_END_PTR(slice); +static grpc_error* conforms_to(grpc_slice slice, const uint8_t* legal_bits, + const char* err_desc) { + const uint8_t* p = GRPC_SLICE_START_PTR(slice); + const uint8_t* e = GRPC_SLICE_END_PTR(slice); for (; p != e; p++) { int idx = *p; int byte = idx / 8; int bit = idx % 8; if ((legal_bits[byte] & (1 << bit)) == 0) { - char *dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII); - grpc_error *error = grpc_error_set_str( + char* dump = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII); + grpc_error* error = grpc_error_set_str( grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(err_desc), GRPC_ERROR_INT_OFFSET, p - GRPC_SLICE_START_PTR(slice)), @@ -49,13 +51,13 @@ static grpc_error *conforms_to(grpc_slice slice, const uint8_t *legal_bits, return GRPC_ERROR_NONE; } -static int error2int(grpc_error *error) { +static int error2int(grpc_error* error) { int r = (error == GRPC_ERROR_NONE); GRPC_ERROR_UNREF(error); return r; } -grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice) { +grpc_error* grpc_validate_header_key_is_legal(grpc_slice slice) { static const uint8_t legal_header_bits[256 / 8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xff, 0x03, 0x00, 0x00, 0x00, 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -75,7 +77,7 @@ int grpc_header_key_is_legal(grpc_slice slice) { return error2int(grpc_validate_header_key_is_legal(slice)); } -grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice) { +grpc_error* grpc_validate_header_nonbin_value_is_legal(grpc_slice slice) { static const uint8_t legal_header_bits[256 / 8] = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/Sources/CgRPC/src/core/lib/surface/validate_metadata.h b/Sources/CgRPC/src/core/lib/surface/validate_metadata.h index de869d89b..e87fb7bee 100644 --- a/Sources/CgRPC/src/core/lib/surface/validate_metadata.h +++ b/Sources/CgRPC/src/core/lib/surface/validate_metadata.h @@ -19,10 +19,12 @@ #ifndef GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H #define GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H +#include + #include #include "src/core/lib/iomgr/error.h" -grpc_error *grpc_validate_header_key_is_legal(grpc_slice slice); -grpc_error *grpc_validate_header_nonbin_value_is_legal(grpc_slice slice); +grpc_error* grpc_validate_header_key_is_legal(grpc_slice slice); +grpc_error* grpc_validate_header_nonbin_value_is_legal(grpc_slice slice); #endif /* GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H */ diff --git a/Sources/CgRPC/src/core/lib/surface/version.c b/Sources/CgRPC/src/core/lib/surface/version.cc similarity index 81% rename from Sources/CgRPC/src/core/lib/surface/version.c rename to Sources/CgRPC/src/core/lib/surface/version.cc index 2f0610a20..b8d6a2c46 100644 --- a/Sources/CgRPC/src/core/lib/surface/version.c +++ b/Sources/CgRPC/src/core/lib/surface/version.cc @@ -19,8 +19,10 @@ /* This file is autogenerated from: templates/src/core/surface/version.c.template */ +#include + #include -const char *grpc_version_string(void) { return "5.0.0"; } +const char* grpc_version_string(void) { return "6.0.0"; } -const char *grpc_g_stands_for(void) { return "gambit"; } +const char* grpc_g_stands_for(void) { return "glorious"; } diff --git a/Sources/CgRPC/src/core/lib/transport/bdp_estimator.c b/Sources/CgRPC/src/core/lib/transport/bdp_estimator.c deleted file mode 100644 index 8b5769341..000000000 --- a/Sources/CgRPC/src/core/lib/transport/bdp_estimator.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/transport/bdp_estimator.h" - -#include - -#include -#include - -grpc_tracer_flag grpc_bdp_estimator_trace = - GRPC_TRACER_INITIALIZER(false, "bdp_estimator"); - -void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name) { - estimator->estimate = 65536; - estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED; - estimator->name = name; - estimator->bw_est = 0; -} - -bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator, - int64_t *estimate) { - *estimate = estimator->estimate; - return true; -} - -bool grpc_bdp_estimator_get_bw(const grpc_bdp_estimator *estimator, - double *bw) { - *bw = estimator->bw_est; - return true; -} - -void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator, - int64_t num_bytes) { - estimator->accumulator += num_bytes; -} - -bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator) { - switch (estimator->ping_state) { - case GRPC_BDP_PING_UNSCHEDULED: - return true; - case GRPC_BDP_PING_SCHEDULED: - return false; - case GRPC_BDP_PING_STARTED: - return false; - } - GPR_UNREACHABLE_CODE(return false); -} - -void grpc_bdp_estimator_schedule_ping(grpc_bdp_estimator *estimator) { - if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, - estimator->name, estimator->accumulator, estimator->estimate); - } - GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_UNSCHEDULED); - estimator->ping_state = GRPC_BDP_PING_SCHEDULED; - estimator->accumulator = 0; -} - -void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator) { - if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, - estimator->name, estimator->accumulator, estimator->estimate); - } - GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_SCHEDULED); - estimator->ping_state = GRPC_BDP_PING_STARTED; - estimator->accumulator = 0; - estimator->ping_start_time = gpr_now(GPR_CLOCK_MONOTONIC); -} - -void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) { - gpr_timespec dt_ts = - gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), estimator->ping_start_time); - double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec; - double bw = dt > 0 ? ((double)estimator->accumulator / dt) : 0; - if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64 - " dt=%lf bw=%lfMbs bw_est=%lfMbs", - estimator->name, estimator->accumulator, estimator->estimate, dt, - bw / 125000.0, estimator->bw_est / 125000.0); - } - GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_STARTED); - if (estimator->accumulator > 2 * estimator->estimate / 3 && - bw > estimator->bw_est) { - estimator->estimate = - GPR_MAX(estimator->accumulator, estimator->estimate * 2); - estimator->bw_est = bw; - if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) { - gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, - estimator->name, estimator->estimate); - } - } - estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED; - estimator->accumulator = 0; -} diff --git a/Sources/CgRPC/src/core/lib/transport/bdp_estimator.cc b/Sources/CgRPC/src/core/lib/transport/bdp_estimator.cc new file mode 100644 index 000000000..8e71f8698 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/bdp_estimator.cc @@ -0,0 +1,87 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/bdp_estimator.h" + +#include +#include + +#include "src/core/lib/gpr/useful.h" + +grpc_core::TraceFlag grpc_bdp_estimator_trace(false, "bdp_estimator"); + +namespace grpc_core { + +BdpEstimator::BdpEstimator(const char* name) + : ping_state_(PingState::UNSCHEDULED), + accumulator_(0), + estimate_(65536), + ping_start_time_(gpr_time_0(GPR_CLOCK_MONOTONIC)), + inter_ping_delay_(100.0), // start at 100ms + stable_estimate_count_(0), + bw_est_(0), + name_(name) {} + +grpc_millis BdpEstimator::CompletePing() { + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_); + double dt = static_cast(dt_ts.tv_sec) + + 1e-9 * static_cast(dt_ts.tv_nsec); + double bw = dt > 0 ? (static_cast(accumulator_) / dt) : 0; + int start_inter_ping_delay = inter_ping_delay_; + if (grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, + "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64 + " dt=%lf bw=%lfMbs bw_est=%lfMbs", + name_, accumulator_, estimate_, dt, bw / 125000.0, + bw_est_ / 125000.0); + } + GPR_ASSERT(ping_state_ == PingState::STARTED); + if (accumulator_ > 2 * estimate_ / 3 && bw > bw_est_) { + estimate_ = GPR_MAX(accumulator_, estimate_ * 2); + bw_est_ = bw; + if (grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_, + estimate_); + } + inter_ping_delay_ /= 2; // if the ping estimate changes, + // exponentially get faster at probing + } else if (inter_ping_delay_ < 10000) { + stable_estimate_count_++; + if (stable_estimate_count_ >= 2) { + inter_ping_delay_ += + 100 + static_cast(rand() * 100.0 / + RAND_MAX); // if the ping estimate is steady, + // slowly ramp down the probe time + } + } + if (start_inter_ping_delay != inter_ping_delay_) { + stable_estimate_count_ = 0; + if (grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_, + inter_ping_delay_); + } + } + ping_state_ = PingState::UNSCHEDULED; + accumulator_ = 0; + return grpc_core::ExecCtx::Get()->Now() + inter_ping_delay_; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/transport/bdp_estimator.h b/Sources/CgRPC/src/core/lib/transport/bdp_estimator.h index 1ef0dc99d..ab13ae4be 100644 --- a/Sources/CgRPC/src/core/lib/transport/bdp_estimator.h +++ b/Sources/CgRPC/src/core/lib/transport/bdp_estimator.h @@ -19,51 +19,76 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H #define GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H -#include +#include + +#include #include #include + +#include +#include + #include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/exec_ctx.h" + +extern grpc_core::TraceFlag grpc_bdp_estimator_trace; + +namespace grpc_core { + +class BdpEstimator { + public: + explicit BdpEstimator(const char* name); + ~BdpEstimator() {} + + int64_t EstimateBdp() const { return estimate_; } + double EstimateBandwidth() const { return bw_est_; } + + void AddIncomingBytes(int64_t num_bytes) { accumulator_ += num_bytes; } + + // Schedule a ping: call in response to receiving a true from + // grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a + // transport (but not necessarily started) + void SchedulePing() { + if (grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_, + accumulator_, estimate_); + } + GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED); + ping_state_ = PingState::SCHEDULED; + accumulator_ = 0; + } + + // Start a ping: call after calling grpc_bdp_estimator_schedule_ping and + // once + // the ping is on the wire + void StartPing() { + if (grpc_bdp_estimator_trace.enabled()) { + gpr_log(GPR_INFO, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_, + accumulator_, estimate_); + } + GPR_ASSERT(ping_state_ == PingState::SCHEDULED); + ping_state_ = PingState::STARTED; + accumulator_ = 0; + ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC); + } + + // Completes a previously started ping, returns when to schedule the next one + grpc_millis CompletePing(); + + private: + enum class PingState { UNSCHEDULED, SCHEDULED, STARTED }; + + PingState ping_state_; + int64_t accumulator_; + int64_t estimate_; + // when was the current ping started? + gpr_timespec ping_start_time_; + int inter_ping_delay_; + int stable_estimate_count_; + double bw_est_; + const char* name_; +}; -#define GRPC_BDP_SAMPLES 16 -#define GRPC_BDP_MIN_SAMPLES_FOR_ESTIMATE 3 - -extern grpc_tracer_flag grpc_bdp_estimator_trace; - -typedef enum { - GRPC_BDP_PING_UNSCHEDULED, - GRPC_BDP_PING_SCHEDULED, - GRPC_BDP_PING_STARTED -} grpc_bdp_estimator_ping_state; - -typedef struct grpc_bdp_estimator { - grpc_bdp_estimator_ping_state ping_state; - int64_t accumulator; - int64_t estimate; - gpr_timespec ping_start_time; - double bw_est; - const char *name; -} grpc_bdp_estimator; - -void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name); - -// Returns true if a reasonable estimate could be obtained -bool grpc_bdp_estimator_get_estimate(const grpc_bdp_estimator *estimator, - int64_t *estimate); -// Tracks new bytes read. -bool grpc_bdp_estimator_get_bw(const grpc_bdp_estimator *estimator, double *bw); -// Returns true if the user should schedule a ping -void grpc_bdp_estimator_add_incoming_bytes(grpc_bdp_estimator *estimator, - int64_t num_bytes); -// Returns true if the user should schedule a ping -bool grpc_bdp_estimator_need_ping(const grpc_bdp_estimator *estimator); -// Schedule a ping: call in response to receiving a true from -// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a -// transport (but not necessarily started) -void grpc_bdp_estimator_schedule_ping(grpc_bdp_estimator *estimator); -// Start a ping: call after calling grpc_bdp_estimator_schedule_ping and once -// the ping is on the wire -void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator); -// Completes a previously started ping -void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator); +} // namespace grpc_core #endif /* GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/byte_stream.c b/Sources/CgRPC/src/core/lib/transport/byte_stream.c deleted file mode 100644 index 08f61629a..000000000 --- a/Sources/CgRPC/src/core/lib/transport/byte_stream.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/transport/byte_stream.h" - -#include -#include - -#include - -#include "src/core/lib/slice/slice_internal.h" - -bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, size_t max_size_hint, - grpc_closure *on_complete) { - return byte_stream->vtable->next(exec_ctx, byte_stream, max_size_hint, - on_complete); -} - -grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_slice *slice) { - return byte_stream->vtable->pull(exec_ctx, byte_stream, slice); -} - -void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_error *error) { - byte_stream->vtable->shutdown(exec_ctx, byte_stream, error); -} - -void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { - byte_stream->vtable->destroy(exec_ctx, byte_stream); -} - -// grpc_slice_buffer_stream - -static bool slice_buffer_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - size_t max_size_hint, - grpc_closure *on_complete) { - grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; - GPR_ASSERT(stream->cursor < stream->backing_buffer->count); - return true; -} - -static grpc_error *slice_buffer_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_slice *slice) { - grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; - if (stream->shutdown_error != GRPC_ERROR_NONE) { - return GRPC_ERROR_REF(stream->shutdown_error); - } - GPR_ASSERT(stream->cursor < stream->backing_buffer->count); - *slice = - grpc_slice_ref_internal(stream->backing_buffer->slices[stream->cursor]); - stream->cursor++; - return GRPC_ERROR_NONE; -} - -static void slice_buffer_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_error *error) { - grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; - GRPC_ERROR_UNREF(stream->shutdown_error); - stream->shutdown_error = error; -} - -static void slice_buffer_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { - grpc_slice_buffer_stream *stream = (grpc_slice_buffer_stream *)byte_stream; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer); - GRPC_ERROR_UNREF(stream->shutdown_error); -} - -static const grpc_byte_stream_vtable slice_buffer_stream_vtable = { - slice_buffer_stream_next, slice_buffer_stream_pull, - slice_buffer_stream_shutdown, slice_buffer_stream_destroy}; - -void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream, - grpc_slice_buffer *slice_buffer, - uint32_t flags) { - GPR_ASSERT(slice_buffer->length <= UINT32_MAX); - stream->base.length = (uint32_t)slice_buffer->length; - stream->base.flags = flags; - stream->base.vtable = &slice_buffer_stream_vtable; - stream->backing_buffer = slice_buffer; - stream->cursor = 0; - stream->shutdown_error = GRPC_ERROR_NONE; -} - -// grpc_caching_byte_stream - -void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache, - grpc_byte_stream *underlying_stream) { - cache->underlying_stream = underlying_stream; - grpc_slice_buffer_init(&cache->cache_buffer); -} - -void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream_cache *cache) { - grpc_byte_stream_destroy(exec_ctx, cache->underlying_stream); - grpc_slice_buffer_destroy_internal(exec_ctx, &cache->cache_buffer); -} - -static bool caching_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - size_t max_size_hint, - grpc_closure *on_complete) { - grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; - if (stream->shutdown_error != GRPC_ERROR_NONE) return true; - if (stream->cursor < stream->cache->cache_buffer.count) return true; - return grpc_byte_stream_next(exec_ctx, stream->cache->underlying_stream, - max_size_hint, on_complete); -} - -static grpc_error *caching_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_slice *slice) { - grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; - if (stream->shutdown_error != GRPC_ERROR_NONE) { - return GRPC_ERROR_REF(stream->shutdown_error); - } - if (stream->cursor < stream->cache->cache_buffer.count) { - *slice = grpc_slice_ref_internal( - stream->cache->cache_buffer.slices[stream->cursor]); - ++stream->cursor; - return GRPC_ERROR_NONE; - } - grpc_error *error = - grpc_byte_stream_pull(exec_ctx, stream->cache->underlying_stream, slice); - if (error == GRPC_ERROR_NONE) { - ++stream->cursor; - grpc_slice_buffer_add(&stream->cache->cache_buffer, - grpc_slice_ref_internal(*slice)); - } - return error; -} - -static void caching_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_error *error) { - grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; - GRPC_ERROR_UNREF(stream->shutdown_error); - stream->shutdown_error = GRPC_ERROR_REF(error); - grpc_byte_stream_shutdown(exec_ctx, stream->cache->underlying_stream, error); -} - -static void caching_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream) { - grpc_caching_byte_stream *stream = (grpc_caching_byte_stream *)byte_stream; - GRPC_ERROR_UNREF(stream->shutdown_error); -} - -static const grpc_byte_stream_vtable caching_byte_stream_vtable = { - caching_byte_stream_next, caching_byte_stream_pull, - caching_byte_stream_shutdown, caching_byte_stream_destroy}; - -void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream, - grpc_byte_stream_cache *cache) { - memset(stream, 0, sizeof(*stream)); - stream->base.length = cache->underlying_stream->length; - stream->base.flags = cache->underlying_stream->flags; - stream->base.vtable = &caching_byte_stream_vtable; - stream->cache = cache; - stream->shutdown_error = GRPC_ERROR_NONE; -} - -void grpc_caching_byte_stream_reset(grpc_caching_byte_stream *stream) { - stream->cursor = 0; -} diff --git a/Sources/CgRPC/src/core/lib/transport/byte_stream.cc b/Sources/CgRPC/src/core/lib/transport/byte_stream.cc new file mode 100644 index 000000000..cb15a71a9 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/byte_stream.cc @@ -0,0 +1,160 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/byte_stream.h" + +#include +#include + +#include + +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/slice/slice_internal.h" + +namespace grpc_core { + +// +// SliceBufferByteStream +// + +SliceBufferByteStream::SliceBufferByteStream(grpc_slice_buffer* slice_buffer, + uint32_t flags) + : ByteStream(static_cast(slice_buffer->length), flags) { + GPR_ASSERT(slice_buffer->length <= UINT32_MAX); + grpc_slice_buffer_init(&backing_buffer_); + grpc_slice_buffer_swap(slice_buffer, &backing_buffer_); +} + +SliceBufferByteStream::~SliceBufferByteStream() {} + +void SliceBufferByteStream::Orphan() { + grpc_slice_buffer_destroy(&backing_buffer_); + GRPC_ERROR_UNREF(shutdown_error_); + // Note: We do not actually delete the object here, since + // SliceBufferByteStream is usually allocated as part of a larger + // object and has an OrphanablePtr of itself passed down through the + // filter stack. +} + +bool SliceBufferByteStream::Next(size_t max_size_hint, + grpc_closure* on_complete) { + GPR_ASSERT(cursor_ < backing_buffer_.count); + return true; +} + +grpc_error* SliceBufferByteStream::Pull(grpc_slice* slice) { + if (shutdown_error_ != GRPC_ERROR_NONE) { + return GRPC_ERROR_REF(shutdown_error_); + } + GPR_ASSERT(cursor_ < backing_buffer_.count); + *slice = grpc_slice_ref_internal(backing_buffer_.slices[cursor_]); + ++cursor_; + return GRPC_ERROR_NONE; +} + +void SliceBufferByteStream::Shutdown(grpc_error* error) { + GRPC_ERROR_UNREF(shutdown_error_); + shutdown_error_ = error; +} + +// +// ByteStreamCache +// + +ByteStreamCache::ByteStreamCache(OrphanablePtr underlying_stream) + : underlying_stream_(std::move(underlying_stream)), + length_(underlying_stream_->length()), + flags_(underlying_stream_->flags()) { + grpc_slice_buffer_init(&cache_buffer_); +} + +ByteStreamCache::~ByteStreamCache() { Destroy(); } + +void ByteStreamCache::Destroy() { + underlying_stream_.reset(); + if (cache_buffer_.length > 0) { + grpc_slice_buffer_destroy_internal(&cache_buffer_); + } +} + +// +// ByteStreamCache::CachingByteStream +// + +ByteStreamCache::CachingByteStream::CachingByteStream(ByteStreamCache* cache) + : ByteStream(cache->length_, cache->flags_), cache_(cache) {} + +ByteStreamCache::CachingByteStream::~CachingByteStream() {} + +void ByteStreamCache::CachingByteStream::Orphan() { + GRPC_ERROR_UNREF(shutdown_error_); + // Note: We do not actually delete the object here, since + // CachingByteStream is usually allocated as part of a larger + // object and has an OrphanablePtr of itself passed down through the + // filter stack. +} + +bool ByteStreamCache::CachingByteStream::Next(size_t max_size_hint, + grpc_closure* on_complete) { + if (shutdown_error_ != GRPC_ERROR_NONE) return true; + if (cursor_ < cache_->cache_buffer_.count) return true; + GPR_ASSERT(cache_->underlying_stream_ != nullptr); + return cache_->underlying_stream_->Next(max_size_hint, on_complete); +} + +grpc_error* ByteStreamCache::CachingByteStream::Pull(grpc_slice* slice) { + if (shutdown_error_ != GRPC_ERROR_NONE) { + return GRPC_ERROR_REF(shutdown_error_); + } + if (cursor_ < cache_->cache_buffer_.count) { + *slice = grpc_slice_ref_internal(cache_->cache_buffer_.slices[cursor_]); + ++cursor_; + offset_ += GRPC_SLICE_LENGTH(*slice); + return GRPC_ERROR_NONE; + } + GPR_ASSERT(cache_->underlying_stream_ != nullptr); + grpc_error* error = cache_->underlying_stream_->Pull(slice); + if (error == GRPC_ERROR_NONE) { + grpc_slice_buffer_add(&cache_->cache_buffer_, + grpc_slice_ref_internal(*slice)); + ++cursor_; + offset_ += GRPC_SLICE_LENGTH(*slice); + // Orphan the underlying stream if it's been drained. + if (offset_ == cache_->underlying_stream_->length()) { + cache_->underlying_stream_.reset(); + } + } + return error; +} + +void ByteStreamCache::CachingByteStream::Shutdown(grpc_error* error) { + GRPC_ERROR_UNREF(shutdown_error_); + shutdown_error_ = GRPC_ERROR_REF(error); + if (cache_->underlying_stream_ != nullptr) { + cache_->underlying_stream_->Shutdown(error); + } +} + +void ByteStreamCache::CachingByteStream::Reset() { + cursor_ = 0; + offset_ = 0; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/transport/byte_stream.h b/Sources/CgRPC/src/core/lib/transport/byte_stream.h index be2a35213..eff832515 100644 --- a/Sources/CgRPC/src/core/lib/transport/byte_stream.h +++ b/Sources/CgRPC/src/core/lib/transport/byte_stream.h @@ -19,8 +19,12 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H #define GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H +#include + #include -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/gprpp/abstract.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/iomgr/closure.h" /** Internal bit flag for grpc_begin_message's \a flags signaling the use of * compression for the message */ @@ -28,77 +32,82 @@ /** Mask of all valid internal flags. */ #define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS) -typedef struct grpc_byte_stream grpc_byte_stream; - -typedef struct { - bool (*next)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - size_t max_size_hint, grpc_closure *on_complete); - grpc_error *(*pull)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - grpc_slice *slice); - void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream, - grpc_error *error); - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_byte_stream *byte_stream); -} grpc_byte_stream_vtable; - -struct grpc_byte_stream { - uint32_t length; - uint32_t flags; - const grpc_byte_stream_vtable *vtable; +namespace grpc_core { + +class ByteStream : public Orphanable { + public: + virtual ~ByteStream() {} + + // Returns true if the bytes are available immediately (in which case + // on_complete will not be called), or false if the bytes will be available + // asynchronously (in which case on_complete will be called when they + // are available). + // + // max_size_hint can be set as a hint as to the maximum number + // of bytes that would be acceptable to read. + virtual bool Next(size_t max_size_hint, + grpc_closure* on_complete) GRPC_ABSTRACT; + + // Returns the next slice in the byte stream when it is available, as + // indicated by Next(). + // + // Once a slice is returned into *slice, it is owned by the caller. + virtual grpc_error* Pull(grpc_slice* slice) GRPC_ABSTRACT; + + // Shuts down the byte stream. + // + // If there is a pending call to on_complete from Next(), it will be + // invoked with the error passed to Shutdown(). + // + // The next call to Pull() (if any) will return the error passed to + // Shutdown(). + virtual void Shutdown(grpc_error* error) GRPC_ABSTRACT; + + uint32_t length() const { return length_; } + uint32_t flags() const { return flags_; } + + void set_flags(uint32_t flags) { flags_ = flags; } + + GRPC_ABSTRACT_BASE_CLASS + + protected: + ByteStream(uint32_t length, uint32_t flags) + : length_(length), flags_(flags) {} + + private: + const uint32_t length_; + uint32_t flags_; }; -// Returns true if the bytes are available immediately (in which case -// on_complete will not be called), false if the bytes will be available -// asynchronously. -// -// max_size_hint can be set as a hint as to the maximum number -// of bytes that would be acceptable to read. -bool grpc_byte_stream_next(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, size_t max_size_hint, - grpc_closure *on_complete); - -// Returns the next slice in the byte stream when it is ready (indicated by -// either grpc_byte_stream_next returning true or on_complete passed to -// grpc_byte_stream_next is called). // -// Once a slice is returned into *slice, it is owned by the caller. -grpc_error *grpc_byte_stream_pull(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_slice *slice); - -// Shuts down the byte stream. +// SliceBufferByteStream // -// If there is a pending call to on_complete from grpc_byte_stream_next(), -// it will be invoked with the error passed to grpc_byte_stream_shutdown(). +// A ByteStream that wraps a slice buffer. // -// The next call to grpc_byte_stream_pull() (if any) will return the error -// passed to grpc_byte_stream_shutdown(). -void grpc_byte_stream_shutdown(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream, - grpc_error *error); -void grpc_byte_stream_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream *byte_stream); +class SliceBufferByteStream : public ByteStream { + public: + // Removes all slices in slice_buffer, leaving it empty. + SliceBufferByteStream(grpc_slice_buffer* slice_buffer, uint32_t flags); + + ~SliceBufferByteStream(); + + void Orphan() override; + + bool Next(size_t max_size_hint, grpc_closure* on_complete) override; + grpc_error* Pull(grpc_slice* slice) override; + void Shutdown(grpc_error* error) override; + + private: + grpc_slice_buffer backing_buffer_; + size_t cursor_ = 0; + grpc_error* shutdown_error_ = GRPC_ERROR_NONE; +}; -// grpc_slice_buffer_stream // -// A grpc_byte_stream that wraps a slice buffer. The stream takes -// ownership of the slices in the buffer, and on destruction will -// reset the contents of the buffer. - -typedef struct grpc_slice_buffer_stream { - grpc_byte_stream base; - grpc_slice_buffer *backing_buffer; - size_t cursor; - grpc_error *shutdown_error; -} grpc_slice_buffer_stream; - -void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream, - grpc_slice_buffer *slice_buffer, - uint32_t flags); - -// grpc_caching_byte_stream +// CachingByteStream // -// A grpc_byte_stream that that wraps an underlying byte stream but caches +// A ByteStream that that wraps an underlying byte stream but caches // the resulting slices in a slice buffer. If an initial attempt fails // without fully draining the underlying stream, a new caching stream // can be created from the same underlying cache, in which case it will @@ -106,33 +115,50 @@ void grpc_slice_buffer_stream_init(grpc_slice_buffer_stream *stream, // underlying stream. // // NOTE: No synchronization is done, so it is not safe to have multiple -// grpc_caching_byte_streams simultaneously drawing from the same underlying -// grpc_byte_stream_cache at the same time. - -typedef struct { - grpc_byte_stream *underlying_stream; - grpc_slice_buffer cache_buffer; -} grpc_byte_stream_cache; - -// Takes ownership of underlying_stream. -void grpc_byte_stream_cache_init(grpc_byte_stream_cache *cache, - grpc_byte_stream *underlying_stream); - -// Must not be called while still in use by a grpc_caching_byte_stream. -void grpc_byte_stream_cache_destroy(grpc_exec_ctx *exec_ctx, - grpc_byte_stream_cache *cache); - -typedef struct { - grpc_byte_stream base; - grpc_byte_stream_cache *cache; - size_t cursor; - grpc_error *shutdown_error; -} grpc_caching_byte_stream; - -void grpc_caching_byte_stream_init(grpc_caching_byte_stream *stream, - grpc_byte_stream_cache *cache); - -// Resets the byte stream to the start of the underlying stream. -void grpc_caching_byte_stream_reset(grpc_caching_byte_stream *stream); +// CachingByteStreams simultaneously drawing from the same underlying +// ByteStreamCache at the same time. +// + +class ByteStreamCache { + public: + class CachingByteStream : public ByteStream { + public: + explicit CachingByteStream(ByteStreamCache* cache); + + ~CachingByteStream(); + + void Orphan() override; + + bool Next(size_t max_size_hint, grpc_closure* on_complete) override; + grpc_error* Pull(grpc_slice* slice) override; + void Shutdown(grpc_error* error) override; + + // Resets the byte stream to the start of the underlying stream. + void Reset(); + + private: + ByteStreamCache* cache_; + size_t cursor_ = 0; + size_t offset_ = 0; + grpc_error* shutdown_error_ = GRPC_ERROR_NONE; + }; + + explicit ByteStreamCache(OrphanablePtr underlying_stream); + + ~ByteStreamCache(); + + // Must not be destroyed while still in use by a CachingByteStream. + void Destroy(); + + grpc_slice_buffer* cache_buffer() { return &cache_buffer_; } + + private: + OrphanablePtr underlying_stream_; + uint32_t length_; + uint32_t flags_; + grpc_slice_buffer cache_buffer_; +}; + +} // namespace grpc_core #endif /* GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/connectivity_state.c b/Sources/CgRPC/src/core/lib/transport/connectivity_state.cc similarity index 52% rename from Sources/CgRPC/src/core/lib/transport/connectivity_state.c rename to Sources/CgRPC/src/core/lib/transport/connectivity_state.cc index f328a6cdb..db6b6c044 100644 --- a/Sources/CgRPC/src/core/lib/transport/connectivity_state.c +++ b/Sources/CgRPC/src/core/lib/transport/connectivity_state.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/transport/connectivity_state.h" #include @@ -24,13 +26,10 @@ #include #include -grpc_tracer_flag grpc_connectivity_state_trace = - GRPC_TRACER_INITIALIZER(false, "connectivity_state"); +grpc_core::TraceFlag grpc_connectivity_state_trace(false, "connectivity_state"); -const char *grpc_connectivity_state_name(grpc_connectivity_state state) { +const char* grpc_connectivity_state_name(grpc_connectivity_state state) { switch (state) { - case GRPC_CHANNEL_INIT: - return "INIT"; case GRPC_CHANNEL_IDLE: return "IDLE"; case GRPC_CHANNEL_CONNECTING: @@ -45,19 +44,18 @@ const char *grpc_connectivity_state_name(grpc_connectivity_state state) { GPR_UNREACHABLE_CODE(return "UNKNOWN"); } -void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_init(grpc_connectivity_state_tracker* tracker, grpc_connectivity_state init_state, - const char *name) { + const char* name) { gpr_atm_no_barrier_store(&tracker->current_state_atm, init_state); tracker->current_error = GRPC_ERROR_NONE; - tracker->watchers = NULL; + tracker->watchers = nullptr; tracker->name = gpr_strdup(name); } -void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker) { - grpc_error *error; - grpc_connectivity_state_watcher *w; +void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker* tracker) { + grpc_error* error; + grpc_connectivity_state_watcher* w; while ((w = tracker->watchers)) { tracker->watchers = w->next; @@ -68,7 +66,7 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner"); } - GRPC_CLOSURE_SCHED(exec_ctx, w->notify, error); + GRPC_CLOSURE_SCHED(w->notify, error); gpr_free(w); } GRPC_ERROR_UNREF(tracker->current_error); @@ -76,65 +74,62 @@ void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, } grpc_connectivity_state grpc_connectivity_state_check( - grpc_connectivity_state_tracker *tracker) { - grpc_connectivity_state cur = - (grpc_connectivity_state)gpr_atm_no_barrier_load( - &tracker->current_state_atm); - if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) { - gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, + grpc_connectivity_state_tracker* tracker) { + grpc_connectivity_state cur = static_cast( + gpr_atm_no_barrier_load(&tracker->current_state_atm)); + if (grpc_connectivity_state_trace.enabled()) { + gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name, grpc_connectivity_state_name(cur)); } return cur; } grpc_connectivity_state grpc_connectivity_state_get( - grpc_connectivity_state_tracker *tracker, grpc_error **error) { - grpc_connectivity_state cur = - (grpc_connectivity_state)gpr_atm_no_barrier_load( - &tracker->current_state_atm); - if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) { - gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, + grpc_connectivity_state_tracker* tracker, grpc_error** error) { + grpc_connectivity_state cur = static_cast( + gpr_atm_no_barrier_load(&tracker->current_state_atm)); + if (grpc_connectivity_state_trace.enabled()) { + gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name, grpc_connectivity_state_name(cur)); } - if (error != NULL) { + if (error != nullptr) { *error = GRPC_ERROR_REF(tracker->current_error); } return cur; } bool grpc_connectivity_state_has_watchers( - grpc_connectivity_state_tracker *connectivity_state) { - return connectivity_state->watchers != NULL; + grpc_connectivity_state_tracker* connectivity_state) { + return connectivity_state->watchers != nullptr; } bool grpc_connectivity_state_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, - grpc_connectivity_state *current, grpc_closure *notify) { - grpc_connectivity_state cur = - (grpc_connectivity_state)gpr_atm_no_barrier_load( - &tracker->current_state_atm); - if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) { - if (current == NULL) { - gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker, + grpc_connectivity_state_tracker* tracker, grpc_connectivity_state* current, + grpc_closure* notify) { + grpc_connectivity_state cur = static_cast( + gpr_atm_no_barrier_load(&tracker->current_state_atm)); + if (grpc_connectivity_state_trace.enabled()) { + if (current == nullptr) { + gpr_log(GPR_INFO, "CONWATCH: %p %s: unsubscribe notify=%p", tracker, tracker->name, notify); } else { - gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker, + gpr_log(GPR_INFO, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker, tracker->name, grpc_connectivity_state_name(*current), grpc_connectivity_state_name(cur), notify); } } - if (current == NULL) { - grpc_connectivity_state_watcher *w = tracker->watchers; - if (w != NULL && w->notify == notify) { - GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED); + if (current == nullptr) { + grpc_connectivity_state_watcher* w = tracker->watchers; + if (w != nullptr && w->notify == notify) { + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED); tracker->watchers = w->next; gpr_free(w); return false; } - while (w != NULL) { - grpc_connectivity_state_watcher *rm_candidate = w->next; - if (rm_candidate != NULL && rm_candidate->notify == notify) { - GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED); + while (w != nullptr) { + grpc_connectivity_state_watcher* rm_candidate = w->next; + if (rm_candidate != nullptr && rm_candidate->notify == notify) { + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED); w->next = w->next->next; gpr_free(rm_candidate); return false; @@ -145,11 +140,10 @@ bool grpc_connectivity_state_notify_on_state_change( } else { if (cur != *current) { *current = cur; - GRPC_CLOSURE_SCHED(exec_ctx, notify, - GRPC_ERROR_REF(tracker->current_error)); + GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_REF(tracker->current_error)); } else { - grpc_connectivity_state_watcher *w = - (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w)); + grpc_connectivity_state_watcher* w = + static_cast(gpr_malloc(sizeof(*w))); w->current = current; w->notify = notify; w->next = tracker->watchers; @@ -159,22 +153,19 @@ bool grpc_connectivity_state_notify_on_state_change( } } -void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker, grpc_connectivity_state state, - grpc_error *error, const char *reason) { - grpc_connectivity_state cur = - (grpc_connectivity_state)gpr_atm_no_barrier_load( - &tracker->current_state_atm); - grpc_connectivity_state_watcher *w; - if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) { - const char *error_string = grpc_error_string(error); - gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker, + grpc_error* error, const char* reason) { + grpc_connectivity_state cur = static_cast( + gpr_atm_no_barrier_load(&tracker->current_state_atm)); + grpc_connectivity_state_watcher* w; + if (grpc_connectivity_state_trace.enabled()) { + const char* error_string = grpc_error_string(error); + gpr_log(GPR_INFO, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker, tracker->name, grpc_connectivity_state_name(cur), grpc_connectivity_state_name(state), reason, error, error_string); } switch (state) { - case GRPC_CHANNEL_INIT: case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_READY: @@ -192,15 +183,13 @@ void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, } GPR_ASSERT(cur != GRPC_CHANNEL_SHUTDOWN); gpr_atm_no_barrier_store(&tracker->current_state_atm, state); - while ((w = tracker->watchers) != NULL) { + while ((w = tracker->watchers) != nullptr) { *w->current = state; tracker->watchers = w->next; - if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) { - gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, - w->notify); + if (grpc_connectivity_state_trace.enabled()) { + gpr_log(GPR_INFO, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify); } - GRPC_CLOSURE_SCHED(exec_ctx, w->notify, - GRPC_ERROR_REF(tracker->current_error)); + GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error)); gpr_free(w); } } diff --git a/Sources/CgRPC/src/core/lib/transport/connectivity_state.h b/Sources/CgRPC/src/core/lib/transport/connectivity_state.h index 2fece6cc2..ecb083cfc 100644 --- a/Sources/CgRPC/src/core/lib/transport/connectivity_state.h +++ b/Sources/CgRPC/src/core/lib/transport/connectivity_state.h @@ -19,69 +19,69 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_CONNECTIVITY_STATE_H #define GRPC_CORE_LIB_TRANSPORT_CONNECTIVITY_STATE_H +#include + #include #include "src/core/lib/debug/trace.h" -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/closure.h" typedef struct grpc_connectivity_state_watcher { /** we keep watchers in a linked list */ - struct grpc_connectivity_state_watcher *next; + struct grpc_connectivity_state_watcher* next; /** closure to notify on change */ - grpc_closure *notify; + grpc_closure* notify; /** the current state as believed by the watcher */ - grpc_connectivity_state *current; + grpc_connectivity_state* current; } grpc_connectivity_state_watcher; typedef struct { /** current grpc_connectivity_state */ gpr_atm current_state_atm; /** error associated with state */ - grpc_error *current_error; + grpc_error* current_error; /** all our watchers */ - grpc_connectivity_state_watcher *watchers; + grpc_connectivity_state_watcher* watchers; /** a name to help debugging */ - char *name; + char* name; } grpc_connectivity_state_tracker; -extern grpc_tracer_flag grpc_connectivity_state_trace; +extern grpc_core::TraceFlag grpc_connectivity_state_trace; /** enum --> string conversion */ -const char *grpc_connectivity_state_name(grpc_connectivity_state state); +const char* grpc_connectivity_state_name(grpc_connectivity_state state); -void grpc_connectivity_state_init(grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_init(grpc_connectivity_state_tracker* tracker, grpc_connectivity_state init_state, - const char *name); -void grpc_connectivity_state_destroy(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker); + const char* name); +void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker* tracker); /** Set connectivity state; not thread safe; access must be serialized with an * external lock */ -void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx, - grpc_connectivity_state_tracker *tracker, +void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker, grpc_connectivity_state state, - grpc_error *associated_error, - const char *reason); + grpc_error* associated_error, + const char* reason); /** Return true if this connectivity state has watchers. Access must be serialized with an external lock. */ bool grpc_connectivity_state_has_watchers( - grpc_connectivity_state_tracker *tracker); + grpc_connectivity_state_tracker* tracker); /** Return the last seen connectivity state. No need to synchronize access. */ grpc_connectivity_state grpc_connectivity_state_check( - grpc_connectivity_state_tracker *tracker); + grpc_connectivity_state_tracker* tracker); /** Return the last seen connectivity state, and the associated error. Access must be serialized with an external lock. */ grpc_connectivity_state grpc_connectivity_state_get( - grpc_connectivity_state_tracker *tracker, grpc_error **error); + grpc_connectivity_state_tracker* tracker, grpc_error** error); /** Return 1 if the channel should start connecting, 0 otherwise. If current==NULL cancel notify if it is already queued (success==0 in that case). Access must be serialized with an external lock. */ bool grpc_connectivity_state_notify_on_state_change( - grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker, - grpc_connectivity_state *current, grpc_closure *notify); + grpc_connectivity_state_tracker* tracker, grpc_connectivity_state* current, + grpc_closure* notify); #endif /* GRPC_CORE_LIB_TRANSPORT_CONNECTIVITY_STATE_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/error_utils.c b/Sources/CgRPC/src/core/lib/transport/error_utils.cc similarity index 64% rename from Sources/CgRPC/src/core/lib/transport/error_utils.c rename to Sources/CgRPC/src/core/lib/transport/error_utils.cc index 5e3920b62..2eff8b291 100644 --- a/Sources/CgRPC/src/core/lib/transport/error_utils.c +++ b/Sources/CgRPC/src/core/lib/transport/error_utils.cc @@ -16,37 +16,42 @@ * */ +#include + #include "src/core/lib/transport/error_utils.h" +#include #include "src/core/lib/iomgr/error_internal.h" #include "src/core/lib/transport/status_conversion.h" -static grpc_error *recursively_find_error_with_field(grpc_error *error, +static grpc_error* recursively_find_error_with_field(grpc_error* error, grpc_error_ints which) { // If the error itself has a status code, return it. - if (grpc_error_get_int(error, which, NULL)) { + if (grpc_error_get_int(error, which, nullptr)) { return error; } - if (grpc_error_is_special(error)) return NULL; + if (grpc_error_is_special(error)) return nullptr; // Otherwise, search through its children. uint8_t slot = error->first_err; while (slot != UINT8_MAX) { - grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot); - grpc_error *result = recursively_find_error_with_field(lerr->err, which); + grpc_linked_error* lerr = + reinterpret_cast(error->arena + slot); + grpc_error* result = recursively_find_error_with_field(lerr->err, which); if (result) return result; slot = lerr->next; } - return NULL; + return nullptr; } -void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, - grpc_status_code *code, grpc_slice *slice, - grpc_http2_error_code *http_error) { +void grpc_error_get_status(grpc_error* error, grpc_millis deadline, + grpc_status_code* code, grpc_slice* slice, + grpc_http2_error_code* http_error, + const char** error_string) { // Start with the parent error and recurse through the tree of children // until we find the first one that has a status code. - grpc_error *found_error = + grpc_error* found_error = recursively_find_error_with_field(error, GRPC_ERROR_INT_GRPC_STATUS); - if (found_error == NULL) { + if (found_error == nullptr) { /// If no grpc-status exists, retry through the tree to find a http2 error /// code found_error = @@ -55,25 +60,30 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, // If we found an error with a status code above, use that; otherwise, // fall back to using the parent error. - if (found_error == NULL) found_error = error; + if (found_error == nullptr) found_error = error; grpc_status_code status = GRPC_STATUS_UNKNOWN; intptr_t integer; if (grpc_error_get_int(found_error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) { - status = (grpc_status_code)integer; + status = static_cast(integer); } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR, &integer)) { - status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer, - deadline); + status = grpc_http2_error_to_grpc_status( + static_cast(integer), deadline); + } + if (code != nullptr) *code = status; + + if (error_string != nullptr && status != GRPC_STATUS_OK) { + *error_string = gpr_strdup(grpc_error_string(error)); } - if (code != NULL) *code = status; - if (http_error != NULL) { + if (http_error != nullptr) { if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR, &integer)) { - *http_error = (grpc_http2_error_code)integer; + *http_error = static_cast(integer); } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) { - *http_error = grpc_status_to_http2_error((grpc_status_code)integer); + *http_error = + grpc_status_to_http2_error(static_cast(integer)); } else { *http_error = found_error == GRPC_ERROR_NONE ? GRPC_HTTP2_NO_ERROR : GRPC_HTTP2_INTERNAL_ERROR; @@ -82,24 +92,23 @@ void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, // If the error has a status message, use it. Otherwise, fall back to // the error description. - if (slice != NULL) { + if (slice != nullptr) { if (!grpc_error_get_str(found_error, GRPC_ERROR_STR_GRPC_MESSAGE, slice)) { if (!grpc_error_get_str(found_error, GRPC_ERROR_STR_DESCRIPTION, slice)) { *slice = grpc_slice_from_static_string("unknown error"); } } } - - if (found_error == NULL) found_error = error; } -bool grpc_error_has_clear_grpc_status(grpc_error *error) { - if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, NULL)) { +bool grpc_error_has_clear_grpc_status(grpc_error* error) { + if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, nullptr)) { return true; } uint8_t slot = error->first_err; while (slot != UINT8_MAX) { - grpc_linked_error *lerr = (grpc_linked_error *)(error->arena + slot); + grpc_linked_error* lerr = + reinterpret_cast(error->arena + slot); if (grpc_error_has_clear_grpc_status(lerr->err)) { return true; } diff --git a/Sources/CgRPC/src/core/lib/transport/error_utils.h b/Sources/CgRPC/src/core/lib/transport/error_utils.h index e53088421..9a46267f3 100644 --- a/Sources/CgRPC/src/core/lib/transport/error_utils.h +++ b/Sources/CgRPC/src/core/lib/transport/error_utils.h @@ -19,23 +19,28 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H #define GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H +#include + #include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/http2_errors.h" /// A utility function to get the status code and message to be returned /// to the application. If not set in the top-level message, looks /// through child errors until it finds the first one with these attributes. -/// All attributes are pulled from the same child error. If any of the -/// attributes (code, msg, http_status) are unneeded, they can be passed as +/// All attributes are pulled from the same child error. error_string will +/// be populated with the entire error string. If any of the attributes (code, +/// msg, http_status, error_string) are unneeded, they can be passed as /// NULL. -void grpc_error_get_status(grpc_error *error, gpr_timespec deadline, - grpc_status_code *code, grpc_slice *slice, - grpc_http2_error_code *http_status); +void grpc_error_get_status(grpc_error* error, grpc_millis deadline, + grpc_status_code* code, grpc_slice* slice, + grpc_http2_error_code* http_status, + const char** error_string); /// A utility function to check whether there is a clear status code that /// doesn't need to be guessed in \a error. This means that \a error or some /// child has GRPC_ERROR_INT_GRPC_STATUS set, or that it is GRPC_ERROR_NONE or /// GRPC_ERROR_CANCELLED -bool grpc_error_has_clear_grpc_status(grpc_error *error); +bool grpc_error_has_clear_grpc_status(grpc_error* error); #endif /* GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/metadata.c b/Sources/CgRPC/src/core/lib/transport/metadata.cc similarity index 68% rename from Sources/CgRPC/src/core/lib/transport/metadata.c rename to Sources/CgRPC/src/core/lib/transport/metadata.cc index 188b48562..d10194a2f 100644 --- a/Sources/CgRPC/src/core/lib/transport/metadata.c +++ b/Sources/CgRPC/src/core/lib/transport/metadata.cc @@ -16,9 +16,12 @@ * */ +#include + #include "src/core/lib/transport/metadata.h" #include +#include #include #include @@ -30,12 +33,12 @@ #include #include +#include "src/core/lib/gpr/murmur_hash.h" +#include "src/core/lib/gpr/string.h" #include "src/core/lib/iomgr/iomgr_internal.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/murmur_hash.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/static_metadata.h" /* There are two kinds of mdelem and mdstr instances. @@ -47,9 +50,9 @@ * used to determine which kind of element a pointer refers to. */ +grpc_core::DebugOnlyTraceFlag grpc_trace_metadata(false, "metadata"); + #ifndef NDEBUG -grpc_tracer_flag grpc_trace_metadata = - GRPC_TRACER_INITIALIZER(false, "metadata"); #define DEBUG_ARGS , const char *file, int line #define FWD_DEBUG_ARGS , file, line #define REF_MD_LOCKED(shard, s) ref_md_locked((shard), (s), __FILE__, __LINE__) @@ -66,7 +69,7 @@ grpc_tracer_flag grpc_trace_metadata = #define TABLE_IDX(hash, capacity) (((hash) >> (LOG2_SHARD_COUNT)) % (capacity)) #define SHARD_IDX(hash) ((hash) & ((1 << (LOG2_SHARD_COUNT)) - 1)) -typedef void (*destroy_user_data_func)(void *user_data); +typedef void (*destroy_user_data_func)(void* user_data); /* Shadow structure for grpc_mdelem_data for interned elements */ typedef struct interned_metadata { @@ -81,7 +84,7 @@ typedef struct interned_metadata { gpr_atm destroy_user_data; gpr_atm user_data; - struct interned_metadata *bucket_next; + struct interned_metadata* bucket_next; } interned_metadata; /* Shadow structure for grpc_mdelem_data for allocated elements */ @@ -96,7 +99,7 @@ typedef struct allocated_metadata { typedef struct mdtab_shard { gpr_mu mu; - interned_metadata **elems; + interned_metadata** elems; size_t count; size_t capacity; /** Estimate of the number of unreferenced mdelems in the hash table. @@ -107,26 +110,26 @@ typedef struct mdtab_shard { static mdtab_shard g_shards[SHARD_COUNT]; -static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard); +static void gc_mdtab(mdtab_shard* shard); void grpc_mdctx_global_init(void) { /* initialize shards */ for (size_t i = 0; i < SHARD_COUNT; i++) { - mdtab_shard *shard = &g_shards[i]; + mdtab_shard* shard = &g_shards[i]; gpr_mu_init(&shard->mu); shard->count = 0; gpr_atm_no_barrier_store(&shard->free_estimate, 0); shard->capacity = INITIAL_SHARD_CAPACITY; - shard->elems = (interned_metadata **)gpr_zalloc(sizeof(*shard->elems) * - shard->capacity); + shard->elems = static_cast( + gpr_zalloc(sizeof(*shard->elems) * shard->capacity)); } } -void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx) { +void grpc_mdctx_global_shutdown() { for (size_t i = 0; i < SHARD_COUNT; i++) { - mdtab_shard *shard = &g_shards[i]; + mdtab_shard* shard = &g_shards[i]; gpr_mu_destroy(&shard->mu); - gc_mdtab(exec_ctx, shard); + gc_mdtab(shard); /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */ if (shard->count != 0) { gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked", @@ -145,14 +148,14 @@ static int is_mdelem_static(grpc_mdelem e) { &grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT]; } -static void ref_md_locked(mdtab_shard *shard, - interned_metadata *md DEBUG_ARGS) { +static void ref_md_locked(mdtab_shard* shard, + interned_metadata* md DEBUG_ARGS) { #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, - "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void *)md, + "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", (void*)md, gpr_atm_no_barrier_load(&md->refcnt), gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str); gpr_free(key_str); @@ -164,21 +167,22 @@ static void ref_md_locked(mdtab_shard *shard, } } -static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { +static void gc_mdtab(mdtab_shard* shard) { + GPR_TIMER_SCOPE("gc_mdtab", 0); + size_t i; - interned_metadata **prev_next; + interned_metadata** prev_next; interned_metadata *md, *next; gpr_atm num_freed = 0; - GPR_TIMER_BEGIN("gc_mdtab", 0); for (i = 0; i < shard->capacity; i++) { prev_next = &shard->elems[i]; for (md = shard->elems[i]; md; md = next) { - void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data); + void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data); next = md->bucket_next; if (gpr_atm_acq_load(&md->refcnt) == 0) { - grpc_slice_unref_internal(exec_ctx, md->key); - grpc_slice_unref_internal(exec_ctx, md->value); + grpc_slice_unref_internal(md->key); + grpc_slice_unref_internal(md->value); if (md->user_data) { ((destroy_user_data_func)gpr_atm_no_barrier_load( &md->destroy_user_data))(user_data); @@ -193,20 +197,19 @@ static void gc_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { } } gpr_atm_no_barrier_fetch_add(&shard->free_estimate, -num_freed); - GPR_TIMER_END("gc_mdtab", 0); } -static void grow_mdtab(mdtab_shard *shard) { +static void grow_mdtab(mdtab_shard* shard) { + GPR_TIMER_SCOPE("grow_mdtab", 0); + size_t capacity = shard->capacity * 2; size_t i; - interned_metadata **mdtab; + interned_metadata** mdtab; interned_metadata *md, *next; uint32_t hash; - GPR_TIMER_BEGIN("grow_mdtab", 0); - - mdtab = - (interned_metadata **)gpr_zalloc(sizeof(interned_metadata *) * capacity); + mdtab = static_cast( + gpr_zalloc(sizeof(interned_metadata*) * capacity)); for (i = 0; i < shard->capacity; i++) { for (md = shard->elems[i]; md; md = next) { @@ -219,43 +222,40 @@ static void grow_mdtab(mdtab_shard *shard) { mdtab[idx] = md; } } - gpr_free(shard->elems); shard->elems = mdtab; shard->capacity = capacity; - - GPR_TIMER_END("grow_mdtab", 0); } -static void rehash_mdtab(grpc_exec_ctx *exec_ctx, mdtab_shard *shard) { +static void rehash_mdtab(mdtab_shard* shard) { if (gpr_atm_no_barrier_load(&shard->free_estimate) > - (gpr_atm)(shard->capacity / 4)) { - gc_mdtab(exec_ctx, shard); + static_cast(shard->capacity / 4)) { + gc_mdtab(shard); } else { grow_mdtab(shard); } } grpc_mdelem grpc_mdelem_create( - grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value, - grpc_mdelem_data *compatible_external_backing_store) { + grpc_slice key, grpc_slice value, + grpc_mdelem_data* compatible_external_backing_store) { if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) { - if (compatible_external_backing_store != NULL) { + if (compatible_external_backing_store != nullptr) { return GRPC_MAKE_MDELEM(compatible_external_backing_store, GRPC_MDELEM_STORAGE_EXTERNAL); } - allocated_metadata *allocated = - (allocated_metadata *)gpr_malloc(sizeof(*allocated)); + allocated_metadata* allocated = + static_cast(gpr_malloc(sizeof(*allocated))); allocated->key = grpc_slice_ref_internal(key); allocated->value = grpc_slice_ref_internal(value); gpr_atm_rel_store(&allocated->refcnt, 1); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(allocated->key); - char *value_str = grpc_slice_to_c_string(allocated->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(allocated->key); + char* value_str = grpc_slice_to_c_string(allocated->value); gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'", - (void *)allocated, gpr_atm_no_barrier_load(&allocated->refcnt), + (void*)allocated, gpr_atm_no_barrier_load(&allocated->refcnt), key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -275,11 +275,11 @@ grpc_mdelem grpc_mdelem_create( uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash(key), grpc_slice_hash(value)); - interned_metadata *md; - mdtab_shard *shard = &g_shards[SHARD_IDX(hash)]; + interned_metadata* md; + mdtab_shard* shard = &g_shards[SHARD_IDX(hash)]; size_t idx; - GPR_TIMER_BEGIN("grpc_mdelem_from_metadata_strings", 0); + GPR_TIMER_SCOPE("grpc_mdelem_from_metadata_strings", 0); gpr_mu_lock(&shard->mu); @@ -289,13 +289,12 @@ grpc_mdelem grpc_mdelem_create( if (grpc_slice_eq(key, md->key) && grpc_slice_eq(value, md->value)) { REF_MD_LOCKED(shard, md); gpr_mu_unlock(&shard->mu); - GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0); return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED); } } /* not found: create a new pair */ - md = (interned_metadata *)gpr_malloc(sizeof(interned_metadata)); + md = static_cast(gpr_malloc(sizeof(interned_metadata))); gpr_atm_rel_store(&md->refcnt, 1); md->key = grpc_slice_ref_internal(key); md->value = grpc_slice_ref_internal(value); @@ -305,10 +304,10 @@ grpc_mdelem grpc_mdelem_create( shard->elems[idx] = md; gpr_mu_init(&md->mu_user_data); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); - gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void *)md, + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); + gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", (void*)md, gpr_atm_no_barrier_load(&md->refcnt), key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -317,33 +316,30 @@ grpc_mdelem grpc_mdelem_create( shard->count++; if (shard->count > shard->capacity * 2) { - rehash_mdtab(exec_ctx, shard); + rehash_mdtab(shard); } gpr_mu_unlock(&shard->mu); - GPR_TIMER_END("grpc_mdelem_from_metadata_strings", 0); - return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED); } -grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key, - grpc_slice value) { - grpc_mdelem out = grpc_mdelem_create(exec_ctx, key, value, NULL); - grpc_slice_unref_internal(exec_ctx, key); - grpc_slice_unref_internal(exec_ctx, value); +grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value) { + grpc_mdelem out = grpc_mdelem_create(key, value, nullptr); + grpc_slice_unref_internal(key); + grpc_slice_unref_internal(value); return out; } -grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx, - grpc_metadata *metadata) { +grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata) { bool changed = false; grpc_slice key_slice = grpc_slice_maybe_static_intern(metadata->key, &changed); grpc_slice value_slice = grpc_slice_maybe_static_intern(metadata->value, &changed); - return grpc_mdelem_create(exec_ctx, key_slice, value_slice, - changed ? NULL : (grpc_mdelem_data *)metadata); + return grpc_mdelem_create( + key_slice, value_slice, + changed ? nullptr : reinterpret_cast(metadata)); } static size_t get_base64_encoded_size(size_t raw_length) { @@ -351,11 +347,14 @@ static size_t get_base64_encoded_size(size_t raw_length) { return raw_length / 3 * 4 + tail_xtra[raw_length % 3]; } -size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem) { +size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem, + bool use_true_binary_metadata) { size_t overhead_and_key = 32 + GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)); size_t value_len = GRPC_SLICE_LENGTH(GRPC_MDVALUE(elem)); if (grpc_is_binary_header(GRPC_MDKEY(elem))) { - return overhead_and_key + get_base64_encoded_size(value_len); + return overhead_and_key + (use_true_binary_metadata + ? value_len + 1 + : get_base64_encoded_size(value_len)); } else { return overhead_and_key + value_len; } @@ -367,14 +366,15 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) { case GRPC_MDELEM_STORAGE_STATIC: break; case GRPC_MDELEM_STORAGE_INTERNED: { - interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd); + interned_metadata* md = + reinterpret_cast GRPC_MDELEM_DATA(gmd); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", - (void *)md, gpr_atm_no_barrier_load(&md->refcnt), + (void*)md, gpr_atm_no_barrier_load(&md->refcnt), gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -389,14 +389,15 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) { break; } case GRPC_MDELEM_STORAGE_ALLOCATED: { - allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd); + allocated_metadata* md = + reinterpret_cast GRPC_MDELEM_DATA(gmd); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", - (void *)md, gpr_atm_no_barrier_load(&md->refcnt), + (void*)md, gpr_atm_no_barrier_load(&md->refcnt), gpr_atm_no_barrier_load(&md->refcnt) + 1, key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -413,20 +414,21 @@ grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd DEBUG_ARGS) { return gmd; } -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { +void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) { switch (GRPC_MDELEM_STORAGE(gmd)) { case GRPC_MDELEM_STORAGE_EXTERNAL: case GRPC_MDELEM_STORAGE_STATIC: break; case GRPC_MDELEM_STORAGE_INTERNED: { - interned_metadata *md = (interned_metadata *)GRPC_MDELEM_DATA(gmd); + interned_metadata* md = + reinterpret_cast GRPC_MDELEM_DATA(gmd); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", - (void *)md, gpr_atm_no_barrier_load(&md->refcnt), + (void*)md, gpr_atm_no_barrier_load(&md->refcnt), gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -439,20 +441,21 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { if (1 == prev_refcount) { /* once the refcount hits zero, some other thread can come along and free md at any time: it's unsafe from this point on to access it */ - mdtab_shard *shard = &g_shards[SHARD_IDX(hash)]; + mdtab_shard* shard = &g_shards[SHARD_IDX(hash)]; gpr_atm_no_barrier_fetch_add(&shard->free_estimate, 1); } break; } case GRPC_MDELEM_STORAGE_ALLOCATED: { - allocated_metadata *md = (allocated_metadata *)GRPC_MDELEM_DATA(gmd); + allocated_metadata* md = + reinterpret_cast GRPC_MDELEM_DATA(gmd); #ifndef NDEBUG - if (GRPC_TRACER_ON(grpc_trace_metadata)) { - char *key_str = grpc_slice_to_c_string(md->key); - char *value_str = grpc_slice_to_c_string(md->value); + if (grpc_trace_metadata.enabled()) { + char* key_str = grpc_slice_to_c_string(md->key); + char* value_str = grpc_slice_to_c_string(md->value); gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", - (void *)md, gpr_atm_no_barrier_load(&md->refcnt), + (void*)md, gpr_atm_no_barrier_load(&md->refcnt), gpr_atm_no_barrier_load(&md->refcnt) - 1, key_str, value_str); gpr_free(key_str); gpr_free(value_str); @@ -461,8 +464,8 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1); GPR_ASSERT(prev_refcount >= 1); if (1 == prev_refcount) { - grpc_slice_unref_internal(exec_ctx, md->key); - grpc_slice_unref_internal(exec_ctx, md->value); + grpc_slice_unref_internal(md->key); + grpc_slice_unref_internal(md->value); gpr_free(md); } break; @@ -470,51 +473,53 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem gmd DEBUG_ARGS) { } } -void *grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void *)) { +void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*destroy_func)(void*)) { switch (GRPC_MDELEM_STORAGE(md)) { case GRPC_MDELEM_STORAGE_EXTERNAL: case GRPC_MDELEM_STORAGE_ALLOCATED: - return NULL; + return nullptr; case GRPC_MDELEM_STORAGE_STATIC: - return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - - grpc_static_mdelem_table]; + return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - + grpc_static_mdelem_table]; case GRPC_MDELEM_STORAGE_INTERNED: { - interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md); - void *result; + interned_metadata* im = + reinterpret_cast GRPC_MDELEM_DATA(md); + void* result; if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) { - return (void *)gpr_atm_no_barrier_load(&im->user_data); + return (void*)gpr_atm_no_barrier_load(&im->user_data); } else { - return NULL; + return nullptr; } return result; } } - GPR_UNREACHABLE_CODE(return NULL); + GPR_UNREACHABLE_CODE(return nullptr); } -void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *), - void *user_data) { +void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*), + void* user_data) { switch (GRPC_MDELEM_STORAGE(md)) { case GRPC_MDELEM_STORAGE_EXTERNAL: case GRPC_MDELEM_STORAGE_ALLOCATED: destroy_func(user_data); - return NULL; + return nullptr; case GRPC_MDELEM_STORAGE_STATIC: destroy_func(user_data); - return (void *)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - - grpc_static_mdelem_table]; + return (void*)grpc_static_mdelem_user_data[GRPC_MDELEM_DATA(md) - + grpc_static_mdelem_table]; case GRPC_MDELEM_STORAGE_INTERNED: { - interned_metadata *im = (interned_metadata *)GRPC_MDELEM_DATA(md); + interned_metadata* im = + reinterpret_cast GRPC_MDELEM_DATA(md); GPR_ASSERT(!is_mdelem_static(md)); - GPR_ASSERT((user_data == NULL) == (destroy_func == NULL)); + GPR_ASSERT((user_data == nullptr) == (destroy_func == nullptr)); gpr_mu_lock(&im->mu_user_data); if (gpr_atm_no_barrier_load(&im->destroy_user_data)) { /* user data can only be set once */ gpr_mu_unlock(&im->mu_user_data); - if (destroy_func != NULL) { + if (destroy_func != nullptr) { destroy_func(user_data); } - return (void *)gpr_atm_no_barrier_load(&im->user_data); + return (void*)gpr_atm_no_barrier_load(&im->user_data); } gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data); gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func); @@ -522,7 +527,7 @@ void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *), return user_data; } } - GPR_UNREACHABLE_CODE(return NULL); + GPR_UNREACHABLE_CODE(return nullptr); } bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b) { diff --git a/Sources/CgRPC/src/core/lib/transport/metadata.h b/Sources/CgRPC/src/core/lib/transport/metadata.h index 974469e43..78df4bc3a 100644 --- a/Sources/CgRPC/src/core/lib/transport/metadata.h +++ b/Sources/CgRPC/src/core/lib/transport/metadata.h @@ -19,19 +19,15 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_H #define GRPC_CORE_LIB_TRANSPORT_METADATA_H +#include + #include #include -#include -#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/debug/trace.h" +#include "src/core/lib/gpr/useful.h" -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_metadata; -#endif - -#ifdef __cplusplus -extern "C" { -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata; /* This file provides a mechanism for tracking metadata through the grpc stack. It's not intended for consumption outside of the library. @@ -98,56 +94,56 @@ struct grpc_mdelem { uintptr_t payload; }; -#define GRPC_MDELEM_DATA(md) \ - ((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3)) +#define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3)) #define GRPC_MDELEM_STORAGE(md) \ ((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3)) +#ifdef __cplusplus +#define GRPC_MAKE_MDELEM(data, storage) \ + (grpc_mdelem{((uintptr_t)(data)) | ((uintptr_t)storage)}) +#else #define GRPC_MAKE_MDELEM(data, storage) \ ((grpc_mdelem){((uintptr_t)(data)) | ((uintptr_t)storage)}) +#endif #define GRPC_MDELEM_IS_INTERNED(md) \ ((grpc_mdelem_data_storage)((md).payload & \ (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT)) /* Unrefs the slices. */ -grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx *exec_ctx, grpc_slice key, - grpc_slice value); +grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value); /* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata object as backing storage (so lifetimes should align) */ -grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx *exec_ctx, - grpc_metadata *metadata); +grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata); /* Does not unref the slices; if a new non-interned mdelem is needed, allocates one if compatible_external_backing_store is NULL, or uses compatible_external_backing_store if it is non-NULL (in which case it's the users responsibility to ensure that it outlives usage) */ grpc_mdelem grpc_mdelem_create( - grpc_exec_ctx *exec_ctx, grpc_slice key, grpc_slice value, - grpc_mdelem_data *compatible_external_backing_store); + grpc_slice key, grpc_slice value, + grpc_mdelem_data* compatible_external_backing_store); bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b); -size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem); +size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem, + bool use_true_binary_metadata); /* Mutator and accessor for grpc_mdelem user data. The destructor function is used as a type tag and is checked during user_data fetch. */ -void *grpc_mdelem_get_user_data(grpc_mdelem md, - void (*if_destroy_func)(void *)); -void *grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void *), - void *user_data); +void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*)); +void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*), + void* user_data); #ifndef NDEBUG #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__) -#define GRPC_MDELEM_UNREF(exec_ctx, s) \ - grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__) -grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char *file, int line); -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md, - const char *file, int line); +#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__) +grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char* file, int line); +void grpc_mdelem_unref(grpc_mdelem md, const char* file, int line); #else #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s)) -#define GRPC_MDELEM_UNREF(exec_ctx, s) grpc_mdelem_unref((exec_ctx), (s)) +#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s)) grpc_mdelem grpc_mdelem_ref(grpc_mdelem md); -void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md); +void grpc_mdelem_unref(grpc_mdelem md); #endif #define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key) @@ -164,10 +160,6 @@ void grpc_mdelem_unref(grpc_exec_ctx *exec_ctx, grpc_mdelem md); #define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash)) void grpc_mdctx_global_init(void); -void grpc_mdctx_global_shutdown(grpc_exec_ctx *exec_ctx); - -#ifdef __cplusplus -} -#endif +void grpc_mdctx_global_shutdown(); #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/metadata_batch.c b/Sources/CgRPC/src/core/lib/transport/metadata_batch.c deleted file mode 100644 index 54388bdcd..000000000 --- a/Sources/CgRPC/src/core/lib/transport/metadata_batch.c +++ /dev/null @@ -1,315 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/transport/metadata_batch.h" - -#include -#include - -#include -#include - -#include "src/core/lib/profiling/timers.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" - -static void assert_valid_list(grpc_mdelem_list *list) { -#ifndef NDEBUG - grpc_linked_mdelem *l; - - GPR_ASSERT((list->head == NULL) == (list->tail == NULL)); - if (!list->head) return; - GPR_ASSERT(list->head->prev == NULL); - GPR_ASSERT(list->tail->next == NULL); - GPR_ASSERT((list->head == list->tail) == (list->head->next == NULL)); - - size_t verified_count = 0; - for (l = list->head; l; l = l->next) { - GPR_ASSERT(!GRPC_MDISNULL(l->md)); - GPR_ASSERT((l->prev == NULL) == (l == list->head)); - GPR_ASSERT((l->next == NULL) == (l == list->tail)); - if (l->next) GPR_ASSERT(l->next->prev == l); - if (l->prev) GPR_ASSERT(l->prev->next == l); - verified_count++; - } - GPR_ASSERT(list->count == verified_count); -#endif /* NDEBUG */ -} - -static void assert_valid_callouts(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { -#ifndef NDEBUG - for (grpc_linked_mdelem *l = batch->list.head; l != NULL; l = l->next) { - grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md)); - grpc_metadata_batch_callouts_index callout_idx = - GRPC_BATCH_INDEX_OF(key_interned); - if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) { - GPR_ASSERT(batch->idx.array[callout_idx] == l); - } - grpc_slice_unref_internal(exec_ctx, key_interned); - } -#endif -} - -#ifndef NDEBUG -void grpc_metadata_batch_assert_ok(grpc_metadata_batch *batch) { - assert_valid_list(&batch->list); -} -#endif /* NDEBUG */ - -void grpc_metadata_batch_init(grpc_metadata_batch *batch) { - memset(batch, 0, sizeof(*batch)); - batch->deadline = gpr_inf_future(GPR_CLOCK_REALTIME); -} - -void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { - grpc_linked_mdelem *l; - for (l = batch->list.head; l; l = l->next) { - GRPC_MDELEM_UNREF(exec_ctx, l->md); - } -} - -grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md) { - grpc_error *out = grpc_error_set_str( - grpc_error_set_str(src, GRPC_ERROR_STR_KEY, - grpc_slice_ref_internal(GRPC_MDKEY(md))), - GRPC_ERROR_STR_VALUE, grpc_slice_ref_internal(GRPC_MDVALUE(md))); - return out; -} - -static grpc_error *maybe_link_callout(grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) - GRPC_MUST_USE_RESULT; - -static grpc_error *maybe_link_callout(grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) { - grpc_metadata_batch_callouts_index idx = - GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)); - if (idx == GRPC_BATCH_CALLOUTS_COUNT) { - return GRPC_ERROR_NONE; - } - if (batch->idx.array[idx] == NULL) { - if (grpc_static_callout_is_default[idx]) ++batch->list.default_count; - batch->idx.array[idx] = storage; - return GRPC_ERROR_NONE; - } - return grpc_attach_md_to_error( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unallowed duplicate metadata"), - storage->md); -} - -static void maybe_unlink_callout(grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) { - grpc_metadata_batch_callouts_index idx = - GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)); - if (idx == GRPC_BATCH_CALLOUTS_COUNT) { - return; - } - if (grpc_static_callout_is_default[idx]) --batch->list.default_count; - GPR_ASSERT(batch->idx.array[idx] != NULL); - batch->idx.array[idx] = NULL; -} - -grpc_error *grpc_metadata_batch_add_head(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, - grpc_mdelem elem_to_add) { - GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); - storage->md = elem_to_add; - return grpc_metadata_batch_link_head(exec_ctx, batch, storage); -} - -static void link_head(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { - assert_valid_list(list); - GPR_ASSERT(!GRPC_MDISNULL(storage->md)); - storage->prev = NULL; - storage->next = list->head; - if (list->head != NULL) { - list->head->prev = storage; - } else { - list->tail = storage; - } - list->head = storage; - list->count++; - assert_valid_list(list); -} - -grpc_error *grpc_metadata_batch_link_head(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); - grpc_error *err = maybe_link_callout(batch, storage); - if (err != GRPC_ERROR_NONE) { - assert_valid_callouts(exec_ctx, batch); - return err; - } - link_head(&batch->list, storage); - assert_valid_callouts(exec_ctx, batch); - return GRPC_ERROR_NONE; -} - -grpc_error *grpc_metadata_batch_add_tail(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, - grpc_mdelem elem_to_add) { - GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); - storage->md = elem_to_add; - return grpc_metadata_batch_link_tail(exec_ctx, batch, storage); -} - -static void link_tail(grpc_mdelem_list *list, grpc_linked_mdelem *storage) { - assert_valid_list(list); - GPR_ASSERT(!GRPC_MDISNULL(storage->md)); - storage->prev = list->tail; - storage->next = NULL; - storage->reserved = NULL; - if (list->tail != NULL) { - list->tail->next = storage; - } else { - list->head = storage; - } - list->tail = storage; - list->count++; - assert_valid_list(list); -} - -grpc_error *grpc_metadata_batch_link_tail(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); - grpc_error *err = maybe_link_callout(batch, storage); - if (err != GRPC_ERROR_NONE) { - assert_valid_callouts(exec_ctx, batch); - return err; - } - link_tail(&batch->list, storage); - assert_valid_callouts(exec_ctx, batch); - return GRPC_ERROR_NONE; -} - -static void unlink_storage(grpc_mdelem_list *list, - grpc_linked_mdelem *storage) { - assert_valid_list(list); - if (storage->prev != NULL) { - storage->prev->next = storage->next; - } else { - list->head = storage->next; - } - if (storage->next != NULL) { - storage->next->prev = storage->prev; - } else { - list->tail = storage->prev; - } - list->count--; - assert_valid_list(list); -} - -void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) { - assert_valid_callouts(exec_ctx, batch); - maybe_unlink_callout(batch, storage); - unlink_storage(&batch->list, storage); - GRPC_MDELEM_UNREF(exec_ctx, storage->md); - assert_valid_callouts(exec_ctx, batch); -} - -void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, - grpc_linked_mdelem *storage, - grpc_slice value) { - grpc_mdelem old_mdelem = storage->md; - grpc_mdelem new_mdelem = grpc_mdelem_from_slices( - exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value); - storage->md = new_mdelem; - GRPC_MDELEM_UNREF(exec_ctx, old_mdelem); -} - -grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, - grpc_mdelem new_mdelem) { - assert_valid_callouts(exec_ctx, batch); - grpc_error *error = GRPC_ERROR_NONE; - grpc_mdelem old_mdelem = storage->md; - if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) { - maybe_unlink_callout(batch, storage); - storage->md = new_mdelem; - error = maybe_link_callout(batch, storage); - if (error != GRPC_ERROR_NONE) { - unlink_storage(&batch->list, storage); - GRPC_MDELEM_UNREF(exec_ctx, storage->md); - } - } else { - storage->md = new_mdelem; - } - GRPC_MDELEM_UNREF(exec_ctx, old_mdelem); - assert_valid_callouts(exec_ctx, batch); - return error; -} - -void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch) { - grpc_metadata_batch_destroy(exec_ctx, batch); - grpc_metadata_batch_init(batch); -} - -bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch) { - return batch->list.head == NULL && - gpr_time_cmp(gpr_inf_future(batch->deadline.clock_type), - batch->deadline) == 0; -} - -size_t grpc_metadata_batch_size(grpc_metadata_batch *batch) { - size_t size = 0; - for (grpc_linked_mdelem *elem = batch->list.head; elem != NULL; - elem = elem->next) { - size += GRPC_MDELEM_LENGTH(elem->md); - } - return size; -} - -static void add_error(grpc_error **composite, grpc_error *error, - const char *composite_error_string) { - if (error == GRPC_ERROR_NONE) return; - if (*composite == GRPC_ERROR_NONE) { - *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(composite_error_string); - } - *composite = grpc_error_add_child(*composite, error); -} - -grpc_error *grpc_metadata_batch_filter(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_metadata_batch_filter_func func, - void *user_data, - const char *composite_error_string) { - grpc_linked_mdelem *l = batch->list.head; - grpc_error *error = GRPC_ERROR_NONE; - while (l) { - grpc_linked_mdelem *next = l->next; - grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md); - add_error(&error, new_mdelem.error, composite_error_string); - if (GRPC_MDISNULL(new_mdelem.md)) { - grpc_metadata_batch_remove(exec_ctx, batch, l); - } else if (new_mdelem.md.payload != l->md.payload) { - grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md); - } - l = next; - } - return error; -} diff --git a/Sources/CgRPC/src/core/lib/transport/metadata_batch.cc b/Sources/CgRPC/src/core/lib/transport/metadata_batch.cc new file mode 100644 index 000000000..49740fcd1 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/metadata_batch.cc @@ -0,0 +1,329 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/metadata_batch.h" + +#include +#include + +#include +#include + +#include "src/core/lib/profiling/timers.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +static void assert_valid_list(grpc_mdelem_list* list) { +#ifndef NDEBUG + grpc_linked_mdelem* l; + + GPR_ASSERT((list->head == nullptr) == (list->tail == nullptr)); + if (!list->head) return; + GPR_ASSERT(list->head->prev == nullptr); + GPR_ASSERT(list->tail->next == nullptr); + GPR_ASSERT((list->head == list->tail) == (list->head->next == nullptr)); + + size_t verified_count = 0; + for (l = list->head; l; l = l->next) { + GPR_ASSERT(!GRPC_MDISNULL(l->md)); + GPR_ASSERT((l->prev == nullptr) == (l == list->head)); + GPR_ASSERT((l->next == nullptr) == (l == list->tail)); + if (l->next) GPR_ASSERT(l->next->prev == l); + if (l->prev) GPR_ASSERT(l->prev->next == l); + verified_count++; + } + GPR_ASSERT(list->count == verified_count); +#endif /* NDEBUG */ +} + +static void assert_valid_callouts(grpc_metadata_batch* batch) { +#ifndef NDEBUG + for (grpc_linked_mdelem* l = batch->list.head; l != nullptr; l = l->next) { + grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md)); + grpc_metadata_batch_callouts_index callout_idx = + GRPC_BATCH_INDEX_OF(key_interned); + if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) { + GPR_ASSERT(batch->idx.array[callout_idx] == l); + } + grpc_slice_unref_internal(key_interned); + } +#endif +} + +#ifndef NDEBUG +void grpc_metadata_batch_assert_ok(grpc_metadata_batch* batch) { + assert_valid_list(&batch->list); +} +#endif /* NDEBUG */ + +void grpc_metadata_batch_init(grpc_metadata_batch* batch) { + memset(batch, 0, sizeof(*batch)); + batch->deadline = GRPC_MILLIS_INF_FUTURE; +} + +void grpc_metadata_batch_destroy(grpc_metadata_batch* batch) { + grpc_linked_mdelem* l; + for (l = batch->list.head; l; l = l->next) { + GRPC_MDELEM_UNREF(l->md); + } +} + +grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md) { + grpc_error* out = grpc_error_set_str( + grpc_error_set_str(src, GRPC_ERROR_STR_KEY, + grpc_slice_ref_internal(GRPC_MDKEY(md))), + GRPC_ERROR_STR_VALUE, grpc_slice_ref_internal(GRPC_MDVALUE(md))); + return out; +} + +static grpc_error* maybe_link_callout(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) + GRPC_MUST_USE_RESULT; + +static grpc_error* maybe_link_callout(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) { + grpc_metadata_batch_callouts_index idx = + GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)); + if (idx == GRPC_BATCH_CALLOUTS_COUNT) { + return GRPC_ERROR_NONE; + } + if (batch->idx.array[idx] == nullptr) { + if (grpc_static_callout_is_default[idx]) ++batch->list.default_count; + batch->idx.array[idx] = storage; + return GRPC_ERROR_NONE; + } + return grpc_attach_md_to_error( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unallowed duplicate metadata"), + storage->md); +} + +static void maybe_unlink_callout(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) { + grpc_metadata_batch_callouts_index idx = + GRPC_BATCH_INDEX_OF(GRPC_MDKEY(storage->md)); + if (idx == GRPC_BATCH_CALLOUTS_COUNT) { + return; + } + if (grpc_static_callout_is_default[idx]) --batch->list.default_count; + GPR_ASSERT(batch->idx.array[idx] != nullptr); + batch->idx.array[idx] = nullptr; +} + +grpc_error* grpc_metadata_batch_add_head(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage, + grpc_mdelem elem_to_add) { + GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); + storage->md = elem_to_add; + return grpc_metadata_batch_link_head(batch, storage); +} + +static void link_head(grpc_mdelem_list* list, grpc_linked_mdelem* storage) { + assert_valid_list(list); + GPR_ASSERT(!GRPC_MDISNULL(storage->md)); + storage->prev = nullptr; + storage->next = list->head; + if (list->head != nullptr) { + list->head->prev = storage; + } else { + list->tail = storage; + } + list->head = storage; + list->count++; + assert_valid_list(list); +} + +grpc_error* grpc_metadata_batch_link_head(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) { + assert_valid_callouts(batch); + grpc_error* err = maybe_link_callout(batch, storage); + if (err != GRPC_ERROR_NONE) { + assert_valid_callouts(batch); + return err; + } + link_head(&batch->list, storage); + assert_valid_callouts(batch); + return GRPC_ERROR_NONE; +} + +grpc_error* grpc_metadata_batch_add_tail(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage, + grpc_mdelem elem_to_add) { + GPR_ASSERT(!GRPC_MDISNULL(elem_to_add)); + storage->md = elem_to_add; + return grpc_metadata_batch_link_tail(batch, storage); +} + +static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) { + assert_valid_list(list); + GPR_ASSERT(!GRPC_MDISNULL(storage->md)); + storage->prev = list->tail; + storage->next = nullptr; + storage->reserved = nullptr; + if (list->tail != nullptr) { + list->tail->next = storage; + } else { + list->head = storage; + } + list->tail = storage; + list->count++; + assert_valid_list(list); +} + +grpc_error* grpc_metadata_batch_link_tail(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) { + assert_valid_callouts(batch); + grpc_error* err = maybe_link_callout(batch, storage); + if (err != GRPC_ERROR_NONE) { + assert_valid_callouts(batch); + return err; + } + link_tail(&batch->list, storage); + assert_valid_callouts(batch); + return GRPC_ERROR_NONE; +} + +static void unlink_storage(grpc_mdelem_list* list, + grpc_linked_mdelem* storage) { + assert_valid_list(list); + if (storage->prev != nullptr) { + storage->prev->next = storage->next; + } else { + list->head = storage->next; + } + if (storage->next != nullptr) { + storage->next->prev = storage->prev; + } else { + list->tail = storage->prev; + } + list->count--; + assert_valid_list(list); +} + +void grpc_metadata_batch_remove(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) { + assert_valid_callouts(batch); + maybe_unlink_callout(batch, storage); + unlink_storage(&batch->list, storage); + GRPC_MDELEM_UNREF(storage->md); + assert_valid_callouts(batch); +} + +void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage, + grpc_slice value) { + grpc_mdelem old_mdelem = storage->md; + grpc_mdelem new_mdelem = grpc_mdelem_from_slices( + grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value); + storage->md = new_mdelem; + GRPC_MDELEM_UNREF(old_mdelem); +} + +grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage, + grpc_mdelem new_mdelem) { + assert_valid_callouts(batch); + grpc_error* error = GRPC_ERROR_NONE; + grpc_mdelem old_mdelem = storage->md; + if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) { + maybe_unlink_callout(batch, storage); + storage->md = new_mdelem; + error = maybe_link_callout(batch, storage); + if (error != GRPC_ERROR_NONE) { + unlink_storage(&batch->list, storage); + GRPC_MDELEM_UNREF(storage->md); + } + } else { + storage->md = new_mdelem; + } + GRPC_MDELEM_UNREF(old_mdelem); + assert_valid_callouts(batch); + return error; +} + +void grpc_metadata_batch_clear(grpc_metadata_batch* batch) { + grpc_metadata_batch_destroy(batch); + grpc_metadata_batch_init(batch); +} + +bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch) { + return batch->list.head == nullptr && + batch->deadline == GRPC_MILLIS_INF_FUTURE; +} + +size_t grpc_metadata_batch_size(grpc_metadata_batch* batch) { + size_t size = 0; + for (grpc_linked_mdelem* elem = batch->list.head; elem != nullptr; + elem = elem->next) { + size += GRPC_MDELEM_LENGTH(elem->md); + } + return size; +} + +static void add_error(grpc_error** composite, grpc_error* error, + const char* composite_error_string) { + if (error == GRPC_ERROR_NONE) return; + if (*composite == GRPC_ERROR_NONE) { + *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(composite_error_string); + } + *composite = grpc_error_add_child(*composite, error); +} + +grpc_error* grpc_metadata_batch_filter(grpc_metadata_batch* batch, + grpc_metadata_batch_filter_func func, + void* user_data, + const char* composite_error_string) { + grpc_linked_mdelem* l = batch->list.head; + grpc_error* error = GRPC_ERROR_NONE; + while (l) { + grpc_linked_mdelem* next = l->next; + grpc_filtered_mdelem new_mdelem = func(user_data, l->md); + add_error(&error, new_mdelem.error, composite_error_string); + if (GRPC_MDISNULL(new_mdelem.md)) { + grpc_metadata_batch_remove(batch, l); + } else if (new_mdelem.md.payload != l->md.payload) { + grpc_metadata_batch_substitute(batch, l, new_mdelem.md); + } + l = next; + } + return error; +} + +void grpc_metadata_batch_copy(grpc_metadata_batch* src, + grpc_metadata_batch* dst, + grpc_linked_mdelem* storage) { + grpc_metadata_batch_init(dst); + dst->deadline = src->deadline; + size_t i = 0; + for (grpc_linked_mdelem* elem = src->list.head; elem != nullptr; + elem = elem->next) { + grpc_error* error = grpc_metadata_batch_add_tail(dst, &storage[i++], + GRPC_MDELEM_REF(elem->md)); + // The only way that grpc_metadata_batch_add_tail() can fail is if + // there's a duplicate entry for a callout. However, that can't be + // the case here, because we would not have been allowed to create + // a source batch that had that kind of conflict. + GPR_ASSERT(error == GRPC_ERROR_NONE); + } +} + +void grpc_metadata_batch_move(grpc_metadata_batch* src, + grpc_metadata_batch* dst) { + *dst = *src; + grpc_metadata_batch_init(src); +} diff --git a/Sources/CgRPC/src/core/lib/transport/metadata_batch.h b/Sources/CgRPC/src/core/lib/transport/metadata_batch.h index 57d298c75..7068750b6 100644 --- a/Sources/CgRPC/src/core/lib/transport/metadata_batch.h +++ b/Sources/CgRPC/src/core/lib/transport/metadata_batch.h @@ -19,31 +19,29 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_METADATA_BATCH_H #define GRPC_CORE_LIB_TRANSPORT_METADATA_BATCH_H +#include + #include #include #include -#include #include +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/metadata.h" #include "src/core/lib/transport/static_metadata.h" -#ifdef __cplusplus -extern "C" { -#endif - typedef struct grpc_linked_mdelem { grpc_mdelem md; - struct grpc_linked_mdelem *next; - struct grpc_linked_mdelem *prev; - void *reserved; + struct grpc_linked_mdelem* next; + struct grpc_linked_mdelem* prev; + void* reserved; } grpc_linked_mdelem; typedef struct grpc_mdelem_list { size_t count; size_t default_count; // Number of default keys. - grpc_linked_mdelem *head; - grpc_linked_mdelem *tail; + grpc_linked_mdelem* head; + grpc_linked_mdelem* tail; } grpc_mdelem_list; typedef struct grpc_metadata_batch { @@ -51,34 +49,29 @@ typedef struct grpc_metadata_batch { grpc_mdelem_list list; grpc_metadata_batch_callouts idx; /** Used to calculate grpc-timeout at the point of sending, - or gpr_inf_future if this batch does not need to send a + or GRPC_MILLIS_INF_FUTURE if this batch does not need to send a grpc-timeout */ - gpr_timespec deadline; + grpc_millis deadline; } grpc_metadata_batch; -void grpc_metadata_batch_init(grpc_metadata_batch *batch); -void grpc_metadata_batch_destroy(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch); -void grpc_metadata_batch_clear(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch); -bool grpc_metadata_batch_is_empty(grpc_metadata_batch *batch); +void grpc_metadata_batch_init(grpc_metadata_batch* batch); +void grpc_metadata_batch_destroy(grpc_metadata_batch* batch); +void grpc_metadata_batch_clear(grpc_metadata_batch* batch); +bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch); /* Returns the transport size of the batch. */ -size_t grpc_metadata_batch_size(grpc_metadata_batch *batch); +size_t grpc_metadata_batch_size(grpc_metadata_batch* batch); /** Remove \a storage from the batch, unreffing the mdelem contained */ -void grpc_metadata_batch_remove(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage); +void grpc_metadata_batch_remove(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage); /** Substitute a new mdelem for an old value */ -grpc_error *grpc_metadata_batch_substitute(grpc_exec_ctx *exec_ctx, - grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, +grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage, grpc_mdelem new_value); -void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, - grpc_linked_mdelem *storage, +void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage, grpc_slice value); /** Add \a storage to the beginning of \a batch. storage->md is @@ -86,17 +79,17 @@ void grpc_metadata_batch_set_value(grpc_exec_ctx *exec_ctx, \a storage is owned by the caller and must survive for the lifetime of batch. This usually means it should be around for the lifetime of the call. */ -grpc_error *grpc_metadata_batch_link_head( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT; +grpc_error* grpc_metadata_batch_link_head(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) + GRPC_MUST_USE_RESULT; /** Add \a storage to the end of \a batch. storage->md is assumed to be valid. \a storage is owned by the caller and must survive for the lifetime of batch. This usually means it should be around for the lifetime of the call. */ -grpc_error *grpc_metadata_batch_link_tail( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage) GRPC_MUST_USE_RESULT; +grpc_error* grpc_metadata_batch_link_tail(grpc_metadata_batch* batch, + grpc_linked_mdelem* storage) + GRPC_MUST_USE_RESULT; /** Add \a elem_to_add as the first element in \a batch, using \a storage as backing storage for the linked list element. @@ -104,49 +97,54 @@ grpc_error *grpc_metadata_batch_link_tail( lifetime of batch. This usually means it should be around for the lifetime of the call. Takes ownership of \a elem_to_add */ -grpc_error *grpc_metadata_batch_add_head( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; +grpc_error* grpc_metadata_batch_add_head( + grpc_metadata_batch* batch, grpc_linked_mdelem* storage, + grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; /** Add \a elem_to_add as the last element in \a batch, using \a storage as backing storage for the linked list element. \a storage is owned by the caller and must survive for the lifetime of batch. This usually means it should be around for the lifetime of the call. Takes ownership of \a elem_to_add */ -grpc_error *grpc_metadata_batch_add_tail( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_linked_mdelem *storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; +grpc_error* grpc_metadata_batch_add_tail( + grpc_metadata_batch* batch, grpc_linked_mdelem* storage, + grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT; -grpc_error *grpc_attach_md_to_error(grpc_error *src, grpc_mdelem md); +grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md); typedef struct { - grpc_error *error; + grpc_error* error; grpc_mdelem md; } grpc_filtered_mdelem; #define GRPC_FILTERED_ERROR(error) \ - ((grpc_filtered_mdelem){(error), GRPC_MDNULL}) -#define GRPC_FILTERED_MDELEM(md) ((grpc_filtered_mdelem){GRPC_ERROR_NONE, (md)}) + { (error), GRPC_MDNULL } +#define GRPC_FILTERED_MDELEM(md) \ + { GRPC_ERROR_NONE, (md) } #define GRPC_FILTERED_REMOVE() \ - ((grpc_filtered_mdelem){GRPC_ERROR_NONE, GRPC_MDNULL}) + { GRPC_ERROR_NONE, GRPC_MDNULL } typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)( - grpc_exec_ctx *exec_ctx, void *user_data, grpc_mdelem elem); -grpc_error *grpc_metadata_batch_filter( - grpc_exec_ctx *exec_ctx, grpc_metadata_batch *batch, - grpc_metadata_batch_filter_func func, void *user_data, - const char *composite_error_string) GRPC_MUST_USE_RESULT; + void* user_data, grpc_mdelem elem); +grpc_error* grpc_metadata_batch_filter( + grpc_metadata_batch* batch, grpc_metadata_batch_filter_func func, + void* user_data, const char* composite_error_string) GRPC_MUST_USE_RESULT; #ifndef NDEBUG -void grpc_metadata_batch_assert_ok(grpc_metadata_batch *comd); +void grpc_metadata_batch_assert_ok(grpc_metadata_batch* comd); #else #define grpc_metadata_batch_assert_ok(comd) \ do { \ } while (0) #endif -#ifdef __cplusplus -} -#endif +/// Copies \a src to \a dst. \a storage must point to an array of +/// \a grpc_linked_mdelem structs of at least the same size as \a src. +void grpc_metadata_batch_copy(grpc_metadata_batch* src, + grpc_metadata_batch* dst, + grpc_linked_mdelem* storage); + +void grpc_metadata_batch_move(grpc_metadata_batch* src, + grpc_metadata_batch* dst); #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_BATCH_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/pid_controller.c b/Sources/CgRPC/src/core/lib/transport/pid_controller.c deleted file mode 100644 index 4b304f17b..000000000 --- a/Sources/CgRPC/src/core/lib/transport/pid_controller.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/transport/pid_controller.h" -#include - -void grpc_pid_controller_init(grpc_pid_controller *pid_controller, - grpc_pid_controller_args args) { - pid_controller->args = args; - pid_controller->last_control_value = args.initial_control_value; - grpc_pid_controller_reset(pid_controller); -} - -void grpc_pid_controller_reset(grpc_pid_controller *pid_controller) { - pid_controller->last_error = 0.0; - pid_controller->last_dc_dt = 0.0; - pid_controller->error_integral = 0.0; -} - -double grpc_pid_controller_update(grpc_pid_controller *pid_controller, - double error, double dt) { - if (dt == 0) return pid_controller->last_control_value; - /* integrate error using the trapezoid rule */ - pid_controller->error_integral += - dt * (pid_controller->last_error + error) * 0.5; - pid_controller->error_integral = GPR_CLAMP( - pid_controller->error_integral, -pid_controller->args.integral_range, - pid_controller->args.integral_range); - double diff_error = (error - pid_controller->last_error) / dt; - /* calculate derivative of control value vs time */ - double dc_dt = pid_controller->args.gain_p * error + - pid_controller->args.gain_i * pid_controller->error_integral + - pid_controller->args.gain_d * diff_error; - /* and perform trapezoidal integration */ - double new_control_value = pid_controller->last_control_value + - dt * (pid_controller->last_dc_dt + dc_dt) * 0.5; - new_control_value = - GPR_CLAMP(new_control_value, pid_controller->args.min_control_value, - pid_controller->args.max_control_value); - pid_controller->last_error = error; - pid_controller->last_dc_dt = dc_dt; - pid_controller->last_control_value = new_control_value; - return new_control_value; -} - -double grpc_pid_controller_last(grpc_pid_controller *pid_controller) { - return pid_controller->last_control_value; -} diff --git a/Sources/CgRPC/src/core/lib/transport/pid_controller.cc b/Sources/CgRPC/src/core/lib/transport/pid_controller.cc new file mode 100644 index 000000000..dbc98f491 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/pid_controller.cc @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/pid_controller.h" + +#include "src/core/lib/gpr/useful.h" + +namespace grpc_core { + +PidController::PidController(const Args& args) + : last_control_value_(args.initial_control_value()), args_(args) {} + +double PidController::Update(double error, double dt) { + if (dt <= 0) return last_control_value_; + /* integrate error using the trapezoid rule */ + error_integral_ += dt * (last_error_ + error) * 0.5; + error_integral_ = GPR_CLAMP(error_integral_, -args_.integral_range(), + args_.integral_range()); + double diff_error = (error - last_error_) / dt; + /* calculate derivative of control value vs time */ + double dc_dt = args_.gain_p() * error + args_.gain_i() * error_integral_ + + args_.gain_d() * diff_error; + /* and perform trapezoidal integration */ + double new_control_value = + last_control_value_ + dt * (last_dc_dt_ + dc_dt) * 0.5; + new_control_value = GPR_CLAMP(new_control_value, args_.min_control_value(), + args_.max_control_value()); + last_error_ = error; + last_dc_dt_ = dc_dt; + last_control_value_ = new_control_value; + return new_control_value; +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/transport/pid_controller.h b/Sources/CgRPC/src/core/lib/transport/pid_controller.h index 9352b2643..e26205bf2 100644 --- a/Sources/CgRPC/src/core/lib/transport/pid_controller.h +++ b/Sources/CgRPC/src/core/lib/transport/pid_controller.h @@ -19,6 +19,10 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H #define GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H +#include + +#include + /* \file Simple PID controller. Implements a proportional-integral-derivative controller. Used when we want to iteratively control a variable to converge some other @@ -26,37 +30,87 @@ Gains can be set to adjust sensitivity to current error (p), the integral of error (i), and the derivative of error (d). */ -typedef struct { - double gain_p; - double gain_i; - double gain_d; - double initial_control_value; - double min_control_value; - double max_control_value; - double integral_range; -} grpc_pid_controller_args; - -typedef struct { - double last_error; - double error_integral; - double last_control_value; - double last_dc_dt; - grpc_pid_controller_args args; -} grpc_pid_controller; - -/** Initialize the controller */ -void grpc_pid_controller_init(grpc_pid_controller *pid_controller, - grpc_pid_controller_args args); - -/** Reset the controller: useful when things have changed significantly */ -void grpc_pid_controller_reset(grpc_pid_controller *pid_controller); - -/** Update the controller: given a current error estimate, and the time since - the last update, returns a new control value */ -double grpc_pid_controller_update(grpc_pid_controller *pid_controller, - double error, double dt); - -/** Returns the last control value calculated */ -double grpc_pid_controller_last(grpc_pid_controller *pid_controller); +namespace grpc_core { + +class PidController { + public: + class Args { + public: + double gain_p() const { return gain_p_; } + double gain_i() const { return gain_i_; } + double gain_d() const { return gain_d_; } + double initial_control_value() const { return initial_control_value_; } + double min_control_value() const { return min_control_value_; } + double max_control_value() const { return max_control_value_; } + double integral_range() const { return integral_range_; } + + Args& set_gain_p(double gain_p) { + gain_p_ = gain_p; + return *this; + } + Args& set_gain_i(double gain_i) { + gain_i_ = gain_i; + return *this; + } + Args& set_gain_d(double gain_d) { + gain_d_ = gain_d; + return *this; + } + Args& set_initial_control_value(double initial_control_value) { + initial_control_value_ = initial_control_value; + return *this; + } + Args& set_min_control_value(double min_control_value) { + min_control_value_ = min_control_value; + return *this; + } + Args& set_max_control_value(double max_control_value) { + max_control_value_ = max_control_value; + return *this; + } + Args& set_integral_range(double integral_range) { + integral_range_ = integral_range; + return *this; + } + + private: + double gain_p_ = 0.0; + double gain_i_ = 0.0; + double gain_d_ = 0.0; + double initial_control_value_ = 0.0; + double min_control_value_ = std::numeric_limits::min(); + double max_control_value_ = std::numeric_limits::max(); + double integral_range_ = std::numeric_limits::max(); + }; + + explicit PidController(const Args& args); + + /// Reset the controller internal state: useful when the environment has + /// changed significantly + void Reset() { + last_error_ = 0.0; + last_dc_dt_ = 0.0; + error_integral_ = 0.0; + } + + /// Update the controller: given a current error estimate, and the time since + /// the last update, returns a new control value + double Update(double error, double dt); + + /// Returns the last control value calculated + double last_control_value() const { return last_control_value_; } + + /// Returns the current error integral (mostly for testing) + double error_integral() const { return error_integral_; } + + private: + double last_error_ = 0.0; + double error_integral_ = 0.0; + double last_control_value_; + double last_dc_dt_ = 0.0; + const Args args_; +}; + +} // namespace grpc_core #endif /* GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/service_config.c b/Sources/CgRPC/src/core/lib/transport/service_config.c deleted file mode 100644 index 070a13a2b..000000000 --- a/Sources/CgRPC/src/core/lib/transport/service_config.c +++ /dev/null @@ -1,246 +0,0 @@ -// -// Copyright 2015 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "src/core/lib/transport/service_config.h" - -#include - -#include -#include -#include -#include - -#include "src/core/lib/json/json.h" -#include "src/core/lib/slice/slice_hash_table.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" - -// The main purpose of the code here is to parse the service config in -// JSON form, which will look like this: -// -// { -// "loadBalancingPolicy": "string", // optional -// "methodConfig": [ // array of one or more method_config objects -// { -// "name": [ // array of one or more name objects -// { -// "service": "string", // required -// "method": "string", // optional -// } -// ], -// // remaining fields are optional. -// // see https://developers.google.com/protocol-buffers/docs/proto3#json -// // for format details. -// "waitForReady": bool, -// "timeout": "duration_string", -// "maxRequestMessageBytes": "int64_string", -// "maxResponseMessageBytes": "int64_string", -// } -// ] -// } - -struct grpc_service_config { - char* json_string; // Underlying storage for json_tree. - grpc_json* json_tree; -}; - -grpc_service_config* grpc_service_config_create(const char* json_string) { - grpc_service_config* service_config = - (grpc_service_config*)gpr_malloc(sizeof(*service_config)); - service_config->json_string = gpr_strdup(json_string); - service_config->json_tree = - grpc_json_parse_string(service_config->json_string); - if (service_config->json_tree == NULL) { - gpr_log(GPR_INFO, "failed to parse JSON for service config"); - gpr_free(service_config->json_string); - gpr_free(service_config); - return NULL; - } - return service_config; -} - -void grpc_service_config_destroy(grpc_service_config* service_config) { - grpc_json_destroy(service_config->json_tree); - gpr_free(service_config->json_string); - gpr_free(service_config); -} - -void grpc_service_config_parse_global_params( - const grpc_service_config* service_config, - void (*process_json)(const grpc_json* json, void* arg), void* arg) { - const grpc_json* json = service_config->json_tree; - if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return; - for (grpc_json* field = json->child; field != NULL; field = field->next) { - if (field->key == NULL) return; - if (strcmp(field->key, "methodConfig") == 0) continue; - process_json(field, arg); - } -} - -const char* grpc_service_config_get_lb_policy_name( - const grpc_service_config* service_config) { - const grpc_json* json = service_config->json_tree; - if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL; - const char* lb_policy_name = NULL; - for (grpc_json* field = json->child; field != NULL; field = field->next) { - if (field->key == NULL) return NULL; - if (strcmp(field->key, "loadBalancingPolicy") == 0) { - if (lb_policy_name != NULL) return NULL; // Duplicate. - if (field->type != GRPC_JSON_STRING) return NULL; - lb_policy_name = field->value; - } - } - return lb_policy_name; -} - -// Returns the number of names specified in the method config \a json. -static size_t count_names_in_method_config_json(grpc_json* json) { - size_t num_names = 0; - for (grpc_json* field = json->child; field != NULL; field = field->next) { - if (field->key != NULL && strcmp(field->key, "name") == 0) ++num_names; - } - return num_names; -} - -// Returns a path string for the JSON name object specified by \a json. -// Returns NULL on error. Caller takes ownership of result. -static char* parse_json_method_name(grpc_json* json) { - if (json->type != GRPC_JSON_OBJECT) return NULL; - const char* service_name = NULL; - const char* method_name = NULL; - for (grpc_json* child = json->child; child != NULL; child = child->next) { - if (child->key == NULL) return NULL; - if (child->type != GRPC_JSON_STRING) return NULL; - if (strcmp(child->key, "service") == 0) { - if (service_name != NULL) return NULL; // Duplicate. - if (child->value == NULL) return NULL; - service_name = child->value; - } else if (strcmp(child->key, "method") == 0) { - if (method_name != NULL) return NULL; // Duplicate. - if (child->value == NULL) return NULL; - method_name = child->value; - } - } - if (service_name == NULL) return NULL; // Required field. - char* path; - gpr_asprintf(&path, "/%s/%s", service_name, - method_name == NULL ? "*" : method_name); - return path; -} - -// Parses the method config from \a json. Adds an entry to \a entries for -// each name found, incrementing \a idx for each entry added. -// Returns false on error. -static bool parse_json_method_config( - grpc_exec_ctx* exec_ctx, grpc_json* json, - void* (*create_value)(const grpc_json* method_config_json), - grpc_slice_hash_table_entry* entries, size_t* idx) { - // Construct value. - void* method_config = create_value(json); - if (method_config == NULL) return false; - // Construct list of paths. - bool success = false; - gpr_strvec paths; - gpr_strvec_init(&paths); - for (grpc_json* child = json->child; child != NULL; child = child->next) { - if (child->key == NULL) continue; - if (strcmp(child->key, "name") == 0) { - if (child->type != GRPC_JSON_ARRAY) goto done; - for (grpc_json* name = child->child; name != NULL; name = name->next) { - char* path = parse_json_method_name(name); - gpr_strvec_add(&paths, path); - } - } - } - if (paths.count == 0) goto done; // No names specified. - // Add entry for each path. - for (size_t i = 0; i < paths.count; ++i) { - entries[*idx].key = grpc_slice_from_copied_string(paths.strs[i]); - entries[*idx].value = method_config; - ++*idx; - } - success = true; -done: - gpr_strvec_destroy(&paths); - return success; -} - -grpc_slice_hash_table* grpc_service_config_create_method_config_table( - grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config, - void* (*create_value)(const grpc_json* method_config_json), - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value)) { - const grpc_json* json = service_config->json_tree; - // Traverse parsed JSON tree. - if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL; - size_t num_entries = 0; - grpc_slice_hash_table_entry* entries = NULL; - for (grpc_json* field = json->child; field != NULL; field = field->next) { - if (field->key == NULL) return NULL; - if (strcmp(field->key, "methodConfig") == 0) { - if (entries != NULL) return NULL; // Duplicate. - if (field->type != GRPC_JSON_ARRAY) return NULL; - // Find number of entries. - for (grpc_json* method = field->child; method != NULL; - method = method->next) { - num_entries += count_names_in_method_config_json(method); - } - // Populate method config table entries. - entries = (grpc_slice_hash_table_entry*)gpr_malloc( - num_entries * sizeof(grpc_slice_hash_table_entry)); - size_t idx = 0; - for (grpc_json* method = field->child; method != NULL; - method = method->next) { - if (!parse_json_method_config(exec_ctx, method, create_value, entries, - &idx)) { - return NULL; - } - } - GPR_ASSERT(idx == num_entries); - } - } - // Instantiate method config table. - grpc_slice_hash_table* method_config_table = NULL; - if (entries != NULL) { - method_config_table = - grpc_slice_hash_table_create(num_entries, entries, destroy_value, NULL); - gpr_free(entries); - } - return method_config_table; -} - -void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx, - const grpc_slice_hash_table* table, - grpc_slice path) { - void* value = grpc_slice_hash_table_get(table, path); - // If we didn't find a match for the path, try looking for a wildcard - // entry (i.e., change "/service/method" to "/service/*"). - if (value == NULL) { - char* path_str = grpc_slice_to_c_string(path); - const char* sep = strrchr(path_str, '/') + 1; - const size_t len = (size_t)(sep - path_str); - char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL - memcpy(buf, path_str, len); - buf[len] = '*'; - buf[len + 1] = '\0'; - grpc_slice wildcard_path = grpc_slice_from_copied_string(buf); - gpr_free(buf); - value = grpc_slice_hash_table_get(table, wildcard_path); - grpc_slice_unref_internal(exec_ctx, wildcard_path); - gpr_free(path_str); - } - return value; -} diff --git a/Sources/CgRPC/src/core/lib/transport/service_config.cc b/Sources/CgRPC/src/core/lib/transport/service_config.cc new file mode 100644 index 000000000..e1a55d98a --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/service_config.cc @@ -0,0 +1,106 @@ +// +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include "src/core/lib/transport/service_config.h" + +#include + +#include +#include +#include +#include + +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/json/json.h" +#include "src/core/lib/slice/slice_hash_table.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" + +namespace grpc_core { + +UniquePtr ServiceConfig::Create(const char* json) { + UniquePtr json_string(gpr_strdup(json)); + grpc_json* json_tree = grpc_json_parse_string(json_string.get()); + if (json_tree == nullptr) { + gpr_log(GPR_INFO, "failed to parse JSON for service config"); + return nullptr; + } + return MakeUnique(std::move(json_string), json_tree); +} + +ServiceConfig::ServiceConfig(UniquePtr json_string, grpc_json* json_tree) + : json_string_(std::move(json_string)), json_tree_(json_tree) {} + +ServiceConfig::~ServiceConfig() { grpc_json_destroy(json_tree_); } + +const char* ServiceConfig::GetLoadBalancingPolicyName() const { + if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) { + return nullptr; + } + const char* lb_policy_name = nullptr; + for (grpc_json* field = json_tree_->child; field != nullptr; + field = field->next) { + if (field->key == nullptr) return nullptr; + if (strcmp(field->key, "loadBalancingPolicy") == 0) { + if (lb_policy_name != nullptr) return nullptr; // Duplicate. + if (field->type != GRPC_JSON_STRING) return nullptr; + lb_policy_name = field->value; + } + } + return lb_policy_name; +} + +size_t ServiceConfig::CountNamesInMethodConfig(grpc_json* json) { + size_t num_names = 0; + for (grpc_json* field = json->child; field != nullptr; field = field->next) { + if (field->key != nullptr && strcmp(field->key, "name") == 0) { + if (field->type != GRPC_JSON_ARRAY) return -1; + for (grpc_json* name = field->child; name != nullptr; name = name->next) { + if (name->type != GRPC_JSON_OBJECT) return -1; + ++num_names; + } + } + } + return num_names; +} + +UniquePtr ServiceConfig::ParseJsonMethodName(grpc_json* json) { + if (json->type != GRPC_JSON_OBJECT) return nullptr; + const char* service_name = nullptr; + const char* method_name = nullptr; + for (grpc_json* child = json->child; child != nullptr; child = child->next) { + if (child->key == nullptr) return nullptr; + if (child->type != GRPC_JSON_STRING) return nullptr; + if (strcmp(child->key, "service") == 0) { + if (service_name != nullptr) return nullptr; // Duplicate. + if (child->value == nullptr) return nullptr; + service_name = child->value; + } else if (strcmp(child->key, "method") == 0) { + if (method_name != nullptr) return nullptr; // Duplicate. + if (child->value == nullptr) return nullptr; + method_name = child->value; + } + } + if (service_name == nullptr) return nullptr; // Required field. + char* path; + gpr_asprintf(&path, "/%s/%s", service_name, + method_name == nullptr ? "*" : method_name); + return UniquePtr(path); +} + +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/lib/transport/service_config.h b/Sources/CgRPC/src/core/lib/transport/service_config.h index 84110abc3..a65b267d4 100644 --- a/Sources/CgRPC/src/core/lib/transport/service_config.h +++ b/Sources/CgRPC/src/core/lib/transport/service_config.h @@ -17,46 +17,233 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H #define GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H +#include + #include +#include +#include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/json/json.h" #include "src/core/lib/slice/slice_hash_table.h" -typedef struct grpc_service_config grpc_service_config; - -grpc_service_config* grpc_service_config_create(const char* json_string); -void grpc_service_config_destroy(grpc_service_config* service_config); - -/// Invokes \a process_json() for each global parameter in the service -/// config. \a arg is passed as the second argument to \a process_json(). -void grpc_service_config_parse_global_params( - const grpc_service_config* service_config, - void (*process_json)(const grpc_json* json, void* arg), void* arg); - -/// Gets the LB policy name from \a service_config. -/// Returns NULL if no LB policy name was specified. -/// Caller does NOT take ownership. -const char* grpc_service_config_get_lb_policy_name( - const grpc_service_config* service_config); - -/// Creates a method config table based on the data in \a json. -/// The table's keys are request paths. The table's value type is -/// returned by \a create_value(), based on data parsed from the JSON tree. -/// \a destroy_value is used to clean up values. -/// Returns NULL on error. -grpc_slice_hash_table* grpc_service_config_create_method_config_table( - grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config, - void* (*create_value)(const grpc_json* method_config_json), - void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value)); - -/// A helper function for looking up values in the table returned by -/// \a grpc_service_config_create_method_config_table(). -/// Gets the method config for the specified \a path, which should be of -/// the form "/service/method". -/// Returns NULL if the method has no config. -/// Caller does NOT own a reference to the result. -void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx, - const grpc_slice_hash_table* table, - grpc_slice path); +// The main purpose of the code here is to parse the service config in +// JSON form, which will look like this: +// +// { +// "loadBalancingPolicy": "string", // optional +// "methodConfig": [ // array of one or more method_config objects +// { +// "name": [ // array of one or more name objects +// { +// "service": "string", // required +// "method": "string", // optional +// } +// ], +// // remaining fields are optional. +// // see +// https://developers.google.com/protocol-buffers/docs/proto3#json +// // for format details. +// "waitForReady": bool, +// "timeout": "duration_string", +// "maxRequestMessageBytes": "int64_string", +// "maxResponseMessageBytes": "int64_string", +// } +// ] +// } + +namespace grpc_core { + +class ServiceConfig { + public: + /// Creates a new service config from parsing \a json_string. + /// Returns null on parse error. + static UniquePtr Create(const char* json); + + ~ServiceConfig(); + + /// Invokes \a process_json() for each global parameter in the service + /// config. \a arg is passed as the second argument to \a process_json(). + template + using ProcessJson = void (*)(const grpc_json*, T*); + template + void ParseGlobalParams(ProcessJson process_json, T* arg) const; + + /// Gets the LB policy name from \a service_config. + /// Returns NULL if no LB policy name was specified. + /// Caller does NOT take ownership. + const char* GetLoadBalancingPolicyName() const; + + /// Creates a method config table based on the data in \a json. + /// The table's keys are request paths. The table's value type is + /// returned by \a create_value(), based on data parsed from the JSON tree. + /// Returns null on error. + template + using CreateValue = RefCountedPtr (*)(const grpc_json* method_config_json); + template + RefCountedPtr>> CreateMethodConfigTable( + CreateValue create_value); + + /// A helper function for looking up values in the table returned by + /// \a CreateMethodConfigTable(). + /// Gets the method config for the specified \a path, which should be of + /// the form "/service/method". + /// Returns null if the method has no config. + /// Caller does NOT own a reference to the result. + template + static RefCountedPtr MethodConfigTableLookup( + const SliceHashTable>& table, grpc_slice path); + + private: + // So New() can call our private ctor. + template + friend T* New(Args&&... args); + + // Takes ownership of \a json_tree. + ServiceConfig(UniquePtr json_string, grpc_json* json_tree); + + // Returns the number of names specified in the method config \a json. + static size_t CountNamesInMethodConfig(grpc_json* json); + + // Returns a path string for the JSON name object specified by \a json. + // Returns null on error. + static UniquePtr ParseJsonMethodName(grpc_json* json); + + // Parses the method config from \a json. Adds an entry to \a entries for + // each name found, incrementing \a idx for each entry added. + // Returns false on error. + template + static bool ParseJsonMethodConfig( + grpc_json* json, CreateValue create_value, + typename SliceHashTable>::Entry* entries, size_t* idx); + + UniquePtr json_string_; // Underlying storage for json_tree. + grpc_json* json_tree_; +}; + +// +// implementation -- no user-serviceable parts below +// + +template +void ServiceConfig::ParseGlobalParams(ProcessJson process_json, + T* arg) const { + if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) { + return; + } + for (grpc_json* field = json_tree_->child; field != nullptr; + field = field->next) { + if (field->key == nullptr) return; + if (strcmp(field->key, "methodConfig") == 0) continue; + process_json(field, arg); + } +} + +template +bool ServiceConfig::ParseJsonMethodConfig( + grpc_json* json, CreateValue create_value, + typename SliceHashTable>::Entry* entries, size_t* idx) { + // Construct value. + RefCountedPtr method_config = create_value(json); + if (method_config == nullptr) return false; + // Construct list of paths. + InlinedVector, 10> paths; + for (grpc_json* child = json->child; child != nullptr; child = child->next) { + if (child->key == nullptr) continue; + if (strcmp(child->key, "name") == 0) { + if (child->type != GRPC_JSON_ARRAY) return false; + for (grpc_json* name = child->child; name != nullptr; name = name->next) { + UniquePtr path = ParseJsonMethodName(name); + if (path == nullptr) return false; + paths.push_back(std::move(path)); + } + } + } + if (paths.size() == 0) return false; // No names specified. + // Add entry for each path. + for (size_t i = 0; i < paths.size(); ++i) { + entries[*idx].key = grpc_slice_from_copied_string(paths[i].get()); + entries[*idx].value = method_config; // Takes a new ref. + ++*idx; + } + // Success. + return true; +} + +template +RefCountedPtr>> +ServiceConfig::CreateMethodConfigTable(CreateValue create_value) { + // Traverse parsed JSON tree. + if (json_tree_->type != GRPC_JSON_OBJECT || json_tree_->key != nullptr) { + return nullptr; + } + size_t num_entries = 0; + typename SliceHashTable>::Entry* entries = nullptr; + for (grpc_json* field = json_tree_->child; field != nullptr; + field = field->next) { + if (field->key == nullptr) return nullptr; + if (strcmp(field->key, "methodConfig") == 0) { + if (entries != nullptr) return nullptr; // Duplicate. + if (field->type != GRPC_JSON_ARRAY) return nullptr; + // Find number of entries. + for (grpc_json* method = field->child; method != nullptr; + method = method->next) { + size_t count = CountNamesInMethodConfig(method); + if (count <= 0) return nullptr; + num_entries += count; + } + // Populate method config table entries. + entries = static_cast>::Entry*>( + gpr_zalloc(num_entries * + sizeof(typename SliceHashTable>::Entry))); + size_t idx = 0; + for (grpc_json* method = field->child; method != nullptr; + method = method->next) { + if (!ParseJsonMethodConfig(method, create_value, entries, &idx)) { + for (size_t i = 0; i < idx; ++i) { + grpc_slice_unref_internal(entries[i].key); + entries[i].value.reset(); + } + gpr_free(entries); + return nullptr; + } + } + GPR_ASSERT(idx == num_entries); + } + } + // Instantiate method config table. + RefCountedPtr>> method_config_table; + if (entries != nullptr) { + method_config_table = + SliceHashTable>::Create(num_entries, entries, nullptr); + gpr_free(entries); + } + return method_config_table; +} + +template +RefCountedPtr ServiceConfig::MethodConfigTableLookup( + const SliceHashTable>& table, grpc_slice path) { + const RefCountedPtr* value = table.Get(path); + // If we didn't find a match for the path, try looking for a wildcard + // entry (i.e., change "/service/method" to "/service/*"). + if (value == nullptr) { + char* path_str = grpc_slice_to_c_string(path); + const char* sep = strrchr(path_str, '/') + 1; + const size_t len = (size_t)(sep - path_str); + char* buf = (char*)gpr_malloc(len + 2); // '*' and NUL + memcpy(buf, path_str, len); + buf[len] = '*'; + buf[len + 1] = '\0'; + grpc_slice wildcard_path = grpc_slice_from_copied_string(buf); + gpr_free(buf); + value = table.Get(wildcard_path); + grpc_slice_unref_internal(wildcard_path); + gpr_free(path_str); + } + return RefCountedPtr(*value); +} + +} // namespace grpc_core #endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/static_metadata.c b/Sources/CgRPC/src/core/lib/transport/static_metadata.c deleted file mode 100644 index 472cf888e..000000000 --- a/Sources/CgRPC/src/core/lib/transport/static_metadata.c +++ /dev/null @@ -1,582 +0,0 @@ -/* - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * WARNING: Auto-generated code. - * - * To make changes to this file, change - * tools/codegen/core/gen_static_metadata.py, and then re-run it. - * - * See metadata.h for an explanation of the interface here, and metadata.c for - * an explanation of what's going on. - */ - -#include "src/core/lib/transport/static_metadata.h" - -#include "src/core/lib/slice/slice_internal.h" - -static uint8_t g_bytes[] = { - 58, 112, 97, 116, 104, 58, 109, 101, 116, 104, 111, 100, 58, 115, 116, - 97, 116, 117, 115, 58, 97, 117, 116, 104, 111, 114, 105, 116, 121, 58, - 115, 99, 104, 101, 109, 101, 116, 101, 103, 114, 112, 99, 45, 109, 101, - 115, 115, 97, 103, 101, 103, 114, 112, 99, 45, 115, 116, 97, 116, 117, - 115, 103, 114, 112, 99, 45, 112, 97, 121, 108, 111, 97, 100, 45, 98, - 105, 110, 103, 114, 112, 99, 45, 101, 110, 99, 111, 100, 105, 110, 103, - 103, 114, 112, 99, 45, 97, 99, 99, 101, 112, 116, 45, 101, 110, 99, - 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 115, 101, 114, 118, 101, - 114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99, - 45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116, - 114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116, - 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110, - 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110, - 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101, - 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, - 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101, - 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99, - 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115, - 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45, - 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 116, 105, 109, 101, 111, - 117, 116, 103, 114, 112, 99, 46, 119, 97, 105, 116, 95, 102, 111, 114, - 95, 114, 101, 97, 100, 121, 103, 114, 112, 99, 46, 116, 105, 109, 101, - 111, 117, 116, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 113, - 117, 101, 115, 116, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, 121, - 116, 101, 115, 103, 114, 112, 99, 46, 109, 97, 120, 95, 114, 101, 115, - 112, 111, 110, 115, 101, 95, 109, 101, 115, 115, 97, 103, 101, 95, 98, - 121, 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49, - 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66, - 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 48, 49, 50, 105, 100, - 101, 110, 116, 105, 116, 121, 103, 122, 105, 112, 100, 101, 102, 108, 97, - 116, 101, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105, - 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, 84, - 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, 115, - 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, 100, - 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, 48, - 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99, - 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102, - 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103, - 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103, - 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45, - 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111, - 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117, - 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104, - 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110, - 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111, - 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99, - 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111, - 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99, - 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111, - 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101, - 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102, - 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105, - 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101, - 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105, - 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, - 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101, - 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, 110, - 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111, - 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116, - 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45, - 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97, - 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101, - 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101, - 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115, - 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116, - 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102, - 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121, - 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105, - 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, - 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 103, - 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112, - 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, - 101, 44, 103, 122, 105, 112}; - -static void static_ref(void *unused) {} -static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {} -static const grpc_slice_refcount_vtable static_sub_vtable = { - static_ref, static_unref, grpc_slice_default_eq_impl, - grpc_slice_default_hash_impl}; -const grpc_slice_refcount_vtable grpc_static_metadata_vtable = { - static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash}; -static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable, - &static_sub_refcnt}; -grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = { - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, - {&grpc_static_metadata_vtable, &static_sub_refcnt}, -}; - -const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = { - {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, - {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, - {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}}, - {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, - {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}}, - {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}}, - {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, - {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}}, - {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, - {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}}, - {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}}, - {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}}, - {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, - {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, - {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}}, - {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}}, - {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}}, - {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}}, - {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}}, - {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 12}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}, - {&grpc_static_metadata_refcounts[24], {{g_bytes + 302, 19}}}, - {&grpc_static_metadata_refcounts[25], {{g_bytes + 321, 12}}}, - {&grpc_static_metadata_refcounts[26], {{g_bytes + 333, 30}}}, - {&grpc_static_metadata_refcounts[27], {{g_bytes + 363, 31}}}, - {&grpc_static_metadata_refcounts[28], {{g_bytes + 394, 36}}}, - {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}, - {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}, - {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}, - {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}, - {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}, - {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}, - {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}, - {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}, - {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}, - {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}, - {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}, - {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}, - {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}, - {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}, - {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}, - {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}, - {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}, - {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}, - {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}, - {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}, - {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}, - {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}, - {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}, - {&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}}, - {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}, - {&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}}, - {&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}}, - {&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}}, - {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}}, - {&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}}, - {&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}}, - {&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}}, - {&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}}, - {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}}, - {&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}}, - {&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}}, - {&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}}, - {&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}}, - {&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}}, - {&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}}, - {&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}}, - {&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}}, - {&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}}, - {&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}}, - {&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}}, - {&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}}, - {&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}}, - {&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}}, - {&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}}, - {&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}}, - {&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}}, - {&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}}, - {&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}}, - {&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}}, - {&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}}, - {&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}}, - {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}}, - {&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}}, - {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}}, - {&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}}, - {&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}}, - {&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}}, - {&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}}, - {&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}}, - {&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}}, - {&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}}, - {&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}}, - {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}, - {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}, - {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}, - {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}, -}; - -uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4}; - -static const int8_t elems_r[] = { - 11, 9, -3, 0, 10, 27, -74, 28, 0, 14, -7, 0, 0, 0, 18, 8, -2, - 0, 0, 13, 12, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, -50, 0, -33, -55, -56, -57, -58, -57, 0, 40, 39, 38, 37, 36, 35, 34, - 33, 32, 31, 30, 29, 28, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 22, - 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 12, 11, 0}; -static uint32_t elems_phash(uint32_t i) { - i -= 45; - uint32_t x = i % 98; - uint32_t y = i / 98; - uint32_t h = x; - if (y < GPR_ARRAY_SIZE(elems_r)) { - uint32_t delta = (uint32_t)elems_r[y]; - h += delta; - } - return h; -} - -static const uint16_t elem_keys[] = { - 1032, 1033, 1034, 247, 248, 249, 250, 251, 1623, 143, 144, 45, - 46, 440, 441, 442, 1523, 1632, 1633, 932, 933, 934, 729, 730, - 1423, 1532, 1533, 535, 731, 1923, 2023, 2123, 5223, 5523, 5623, 5723, - 5823, 1436, 1653, 5923, 6023, 6123, 6223, 6323, 6423, 6523, 6623, 6723, - 6823, 6923, 7023, 7123, 7223, 5423, 7323, 7423, 7523, 7623, 7723, 7823, - 7923, 8023, 8123, 8223, 1096, 1097, 1098, 1099, 8323, 8423, 8523, 8623, - 8723, 8823, 8923, 9023, 9123, 9223, 9323, 323, 9423, 9523, 1697, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 137, 238, 239, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0}; -static const uint8_t elem_idxs[] = { - 76, 79, 77, 19, 20, 21, 22, 23, 25, 15, 16, 17, 18, 11, - 12, 13, 38, 83, 84, 3, 4, 5, 0, 1, 43, 36, 37, 6, - 2, 72, 50, 57, 24, 28, 29, 30, 31, 7, 26, 32, 33, 34, - 35, 39, 40, 41, 42, 44, 45, 46, 47, 48, 49, 27, 51, 52, - 53, 54, 55, 56, 58, 59, 60, 61, 78, 80, 81, 82, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 73, 14, 74, 75, 85, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 8, 9, 10}; - -grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) { - if (a == -1 || b == -1) return GRPC_MDNULL; - uint32_t k = (uint32_t)(a * 100 + b); - uint32_t h = elems_phash(k); - return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k && - elem_idxs[h] != 255 - ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]], - GRPC_MDELEM_STORAGE_STATIC) - : GRPC_MDNULL; -} - -grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { - {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, - {&grpc_static_metadata_refcounts[29], {{g_bytes + 430, 1}}}}, - {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, - {&grpc_static_metadata_refcounts[30], {{g_bytes + 431, 1}}}}, - {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, - {&grpc_static_metadata_refcounts[31], {{g_bytes + 432, 1}}}}, - {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, - {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}}, - {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, - {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}}, - {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, - {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}}, - {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}}, - {&grpc_static_metadata_refcounts[35], {{g_bytes + 452, 8}}}}, - {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, - {&grpc_static_metadata_refcounts[36], {{g_bytes + 460, 16}}}}, - {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, - {&grpc_static_metadata_refcounts[37], {{g_bytes + 476, 4}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[38], {{g_bytes + 480, 3}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[39], {{g_bytes + 483, 3}}}}, - {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, - {&grpc_static_metadata_refcounts[40], {{g_bytes + 486, 4}}}}, - {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, - {&grpc_static_metadata_refcounts[41], {{g_bytes + 490, 5}}}}, - {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, - {&grpc_static_metadata_refcounts[42], {{g_bytes + 495, 4}}}}, - {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, - {&grpc_static_metadata_refcounts[43], {{g_bytes + 499, 3}}}}, - {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, - {&grpc_static_metadata_refcounts[44], {{g_bytes + 502, 3}}}}, - {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, - {&grpc_static_metadata_refcounts[45], {{g_bytes + 505, 1}}}}, - {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, - {&grpc_static_metadata_refcounts[46], {{g_bytes + 506, 11}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[47], {{g_bytes + 517, 3}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[48], {{g_bytes + 520, 3}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[49], {{g_bytes + 523, 3}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[50], {{g_bytes + 526, 3}}}}, - {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, - {&grpc_static_metadata_refcounts[51], {{g_bytes + 529, 3}}}}, - {{&grpc_static_metadata_refcounts[52], {{g_bytes + 532, 14}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[53], {{g_bytes + 546, 13}}}}, - {{&grpc_static_metadata_refcounts[54], {{g_bytes + 559, 15}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[55], {{g_bytes + 574, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[56], {{g_bytes + 587, 6}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 27}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[58], {{g_bytes + 620, 3}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[59], {{g_bytes + 623, 5}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[60], {{g_bytes + 628, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[61], {{g_bytes + 641, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 19}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, - {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}}, - {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, - {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}}, - {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[63], {{g_bytes + 673, 16}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[64], {{g_bytes + 689, 14}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[65], {{g_bytes + 703, 16}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[66], {{g_bytes + 719, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[67], {{g_bytes + 732, 6}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[68], {{g_bytes + 738, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[69], {{g_bytes + 742, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[70], {{g_bytes + 746, 6}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[71], {{g_bytes + 752, 7}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[72], {{g_bytes + 759, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[73], {{g_bytes + 763, 8}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[74], {{g_bytes + 771, 17}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[75], {{g_bytes + 788, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[76], {{g_bytes + 801, 8}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[77], {{g_bytes + 809, 19}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[78], {{g_bytes + 828, 13}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[79], {{g_bytes + 841, 11}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[80], {{g_bytes + 852, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[81], {{g_bytes + 856, 8}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[82], {{g_bytes + 864, 12}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[83], {{g_bytes + 876, 18}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[84], {{g_bytes + 894, 19}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 5}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[86], {{g_bytes + 918, 7}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 7}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[88], {{g_bytes + 932, 11}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[89], {{g_bytes + 943, 6}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[90], {{g_bytes + 949, 10}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[91], {{g_bytes + 959, 25}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[92], {{g_bytes + 984, 17}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[93], {{g_bytes + 1001, 4}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1005, 3}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1008, 16}}}, - {&grpc_static_metadata_refcounts[23], {{g_bytes + 302, 0}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[34], {{g_bytes + 445, 7}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[96], {{g_bytes + 1024, 16}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[98], {{g_bytes + 1053, 12}}}}, - {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, - {&grpc_static_metadata_refcounts[99], {{g_bytes + 1065, 21}}}}, - {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[32], {{g_bytes + 433, 8}}}}, - {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[33], {{g_bytes + 441, 4}}}}, - {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, - {&grpc_static_metadata_refcounts[97], {{g_bytes + 1040, 13}}}}, -}; -bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = { - true, // :path - true, // :method - true, // :status - true, // :authority - true, // :scheme - true, // te - true, // grpc-message - true, // grpc-status - true, // grpc-payload-bin - true, // grpc-encoding - true, // grpc-accept-encoding - true, // grpc-server-stats-bin - true, // grpc-tags-bin - true, // grpc-trace-bin - true, // content-type - true, // content-encoding - true, // accept-encoding - true, // grpc-internal-encoding-request - true, // grpc-internal-stream-encoding-request - true, // user-agent - true, // host - true, // lb-token -}; - -const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78, - 79, 80, 81, 82}; - -const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85}; diff --git a/Sources/CgRPC/src/core/lib/transport/static_metadata.cc b/Sources/CgRPC/src/core/lib/transport/static_metadata.cc new file mode 100644 index 000000000..6a5144f21 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/static_metadata.cc @@ -0,0 +1,601 @@ +/* + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * WARNING: Auto-generated code. + * + * To make changes to this file, change + * tools/codegen/core/gen_static_metadata.py, and then re-run it. + * + * See metadata.h for an explanation of the interface here, and metadata.cc for + * an explanation of what's going on. + */ + +#include + +#include "src/core/lib/transport/static_metadata.h" + +#include "src/core/lib/slice/slice_internal.h" + +static uint8_t g_bytes[] = { + 58, 112, 97, 116, 104, 58, 109, 101, 116, 104, 111, 100, 58, 115, 116, + 97, 116, 117, 115, 58, 97, 117, 116, 104, 111, 114, 105, 116, 121, 58, + 115, 99, 104, 101, 109, 101, 116, 101, 103, 114, 112, 99, 45, 109, 101, + 115, 115, 97, 103, 101, 103, 114, 112, 99, 45, 115, 116, 97, 116, 117, + 115, 103, 114, 112, 99, 45, 112, 97, 121, 108, 111, 97, 100, 45, 98, + 105, 110, 103, 114, 112, 99, 45, 101, 110, 99, 111, 100, 105, 110, 103, + 103, 114, 112, 99, 45, 97, 99, 99, 101, 112, 116, 45, 101, 110, 99, + 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 115, 101, 114, 118, 101, + 114, 45, 115, 116, 97, 116, 115, 45, 98, 105, 110, 103, 114, 112, 99, + 45, 116, 97, 103, 115, 45, 98, 105, 110, 103, 114, 112, 99, 45, 116, + 114, 97, 99, 101, 45, 98, 105, 110, 99, 111, 110, 116, 101, 110, 116, + 45, 116, 121, 112, 101, 99, 111, 110, 116, 101, 110, 116, 45, 101, 110, + 99, 111, 100, 105, 110, 103, 97, 99, 99, 101, 112, 116, 45, 101, 110, + 99, 111, 100, 105, 110, 103, 103, 114, 112, 99, 45, 105, 110, 116, 101, + 114, 110, 97, 108, 45, 101, 110, 99, 111, 100, 105, 110, 103, 45, 114, + 101, 113, 117, 101, 115, 116, 103, 114, 112, 99, 45, 105, 110, 116, 101, + 114, 110, 97, 108, 45, 115, 116, 114, 101, 97, 109, 45, 101, 110, 99, + 111, 100, 105, 110, 103, 45, 114, 101, 113, 117, 101, 115, 116, 117, 115, + 101, 114, 45, 97, 103, 101, 110, 116, 104, 111, 115, 116, 108, 98, 45, + 116, 111, 107, 101, 110, 103, 114, 112, 99, 45, 112, 114, 101, 118, 105, + 111, 117, 115, 45, 114, 112, 99, 45, 97, 116, 116, 101, 109, 112, 116, + 115, 103, 114, 112, 99, 45, 114, 101, 116, 114, 121, 45, 112, 117, 115, + 104, 98, 97, 99, 107, 45, 109, 115, 103, 114, 112, 99, 45, 116, 105, + 109, 101, 111, 117, 116, 49, 50, 51, 52, 103, 114, 112, 99, 46, 119, + 97, 105, 116, 95, 102, 111, 114, 95, 114, 101, 97, 100, 121, 103, 114, + 112, 99, 46, 116, 105, 109, 101, 111, 117, 116, 103, 114, 112, 99, 46, + 109, 97, 120, 95, 114, 101, 113, 117, 101, 115, 116, 95, 109, 101, 115, + 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 103, 114, 112, 99, 46, + 109, 97, 120, 95, 114, 101, 115, 112, 111, 110, 115, 101, 95, 109, 101, + 115, 115, 97, 103, 101, 95, 98, 121, 116, 101, 115, 47, 103, 114, 112, + 99, 46, 108, 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108, + 97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111, + 97, 100, 100, 101, 102, 108, 97, 116, 101, 103, 122, 105, 112, 115, 116, + 114, 101, 97, 109, 47, 103, 122, 105, 112, 48, 105, 100, 101, 110, 116, + 105, 116, 121, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, + 105, 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 80, 79, 83, + 84, 50, 48, 48, 52, 48, 52, 104, 116, 116, 112, 104, 116, 116, 112, + 115, 103, 114, 112, 99, 71, 69, 84, 80, 85, 84, 47, 47, 105, 110, + 100, 101, 120, 46, 104, 116, 109, 108, 50, 48, 52, 50, 48, 54, 51, + 48, 52, 52, 48, 48, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, + 99, 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, + 102, 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, + 103, 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, + 103, 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, + 45, 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, + 111, 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, + 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, + 104, 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, + 110, 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, + 111, 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, + 99, 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, + 111, 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, + 99, 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, + 111, 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, + 101, 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, + 102, 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, + 105, 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, + 101, 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, + 105, 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, + 105, 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, + 101, 100, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 108, 105, + 110, 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, + 111, 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, + 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, + 45, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, + 97, 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, + 101, 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, + 101, 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, + 115, 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, + 116, 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, + 102, 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, + 121, 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, + 105, 99, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, + 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, 105, 116, 121, 44, + 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, + 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, + 116, 101, 44, 103, 122, 105, 112}; + +static void static_ref(void* unused) {} +static void static_unref(void* unused) {} +static const grpc_slice_refcount_vtable static_sub_vtable = { + static_ref, static_unref, grpc_slice_default_eq_impl, + grpc_slice_default_hash_impl}; +const grpc_slice_refcount_vtable grpc_static_metadata_vtable = { + static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash}; +static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable, + &static_sub_refcnt}; +grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = { + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, + {&grpc_static_metadata_vtable, &static_sub_refcnt}, +}; + +const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = { + {&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, + {&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, + {&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}}, + {&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, + {&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}}, + {&grpc_static_metadata_refcounts[6], {{g_bytes + 38, 12}}}, + {&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, + {&grpc_static_metadata_refcounts[8], {{g_bytes + 61, 16}}}, + {&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, + {&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[11], {{g_bytes + 110, 21}}}, + {&grpc_static_metadata_refcounts[12], {{g_bytes + 131, 13}}}, + {&grpc_static_metadata_refcounts[13], {{g_bytes + 144, 14}}}, + {&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, + {&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, + {&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[17], {{g_bytes + 201, 30}}}, + {&grpc_static_metadata_refcounts[18], {{g_bytes + 231, 37}}}, + {&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}}, + {&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}}, + {&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}}, + {&grpc_static_metadata_refcounts[22], {{g_bytes + 290, 26}}}, + {&grpc_static_metadata_refcounts[23], {{g_bytes + 316, 22}}}, + {&grpc_static_metadata_refcounts[24], {{g_bytes + 338, 12}}}, + {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}, + {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}, + {&grpc_static_metadata_refcounts[27], {{g_bytes + 352, 1}}}, + {&grpc_static_metadata_refcounts[28], {{g_bytes + 353, 1}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}, + {&grpc_static_metadata_refcounts[30], {{g_bytes + 354, 19}}}, + {&grpc_static_metadata_refcounts[31], {{g_bytes + 373, 12}}}, + {&grpc_static_metadata_refcounts[32], {{g_bytes + 385, 30}}}, + {&grpc_static_metadata_refcounts[33], {{g_bytes + 415, 31}}}, + {&grpc_static_metadata_refcounts[34], {{g_bytes + 446, 36}}}, + {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}}, + {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}, + {&grpc_static_metadata_refcounts[37], {{g_bytes + 493, 11}}}, + {&grpc_static_metadata_refcounts[38], {{g_bytes + 504, 1}}}, + {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}, + {&grpc_static_metadata_refcounts[40], {{g_bytes + 513, 8}}}, + {&grpc_static_metadata_refcounts[41], {{g_bytes + 521, 16}}}, + {&grpc_static_metadata_refcounts[42], {{g_bytes + 537, 4}}}, + {&grpc_static_metadata_refcounts[43], {{g_bytes + 541, 3}}}, + {&grpc_static_metadata_refcounts[44], {{g_bytes + 544, 3}}}, + {&grpc_static_metadata_refcounts[45], {{g_bytes + 547, 4}}}, + {&grpc_static_metadata_refcounts[46], {{g_bytes + 551, 5}}}, + {&grpc_static_metadata_refcounts[47], {{g_bytes + 556, 4}}}, + {&grpc_static_metadata_refcounts[48], {{g_bytes + 560, 3}}}, + {&grpc_static_metadata_refcounts[49], {{g_bytes + 563, 3}}}, + {&grpc_static_metadata_refcounts[50], {{g_bytes + 566, 1}}}, + {&grpc_static_metadata_refcounts[51], {{g_bytes + 567, 11}}}, + {&grpc_static_metadata_refcounts[52], {{g_bytes + 578, 3}}}, + {&grpc_static_metadata_refcounts[53], {{g_bytes + 581, 3}}}, + {&grpc_static_metadata_refcounts[54], {{g_bytes + 584, 3}}}, + {&grpc_static_metadata_refcounts[55], {{g_bytes + 587, 3}}}, + {&grpc_static_metadata_refcounts[56], {{g_bytes + 590, 3}}}, + {&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 14}}}, + {&grpc_static_metadata_refcounts[58], {{g_bytes + 607, 13}}}, + {&grpc_static_metadata_refcounts[59], {{g_bytes + 620, 15}}}, + {&grpc_static_metadata_refcounts[60], {{g_bytes + 635, 13}}}, + {&grpc_static_metadata_refcounts[61], {{g_bytes + 648, 6}}}, + {&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 27}}}, + {&grpc_static_metadata_refcounts[63], {{g_bytes + 681, 3}}}, + {&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 5}}}, + {&grpc_static_metadata_refcounts[65], {{g_bytes + 689, 13}}}, + {&grpc_static_metadata_refcounts[66], {{g_bytes + 702, 13}}}, + {&grpc_static_metadata_refcounts[67], {{g_bytes + 715, 19}}}, + {&grpc_static_metadata_refcounts[68], {{g_bytes + 734, 16}}}, + {&grpc_static_metadata_refcounts[69], {{g_bytes + 750, 14}}}, + {&grpc_static_metadata_refcounts[70], {{g_bytes + 764, 16}}}, + {&grpc_static_metadata_refcounts[71], {{g_bytes + 780, 13}}}, + {&grpc_static_metadata_refcounts[72], {{g_bytes + 793, 6}}}, + {&grpc_static_metadata_refcounts[73], {{g_bytes + 799, 4}}}, + {&grpc_static_metadata_refcounts[74], {{g_bytes + 803, 4}}}, + {&grpc_static_metadata_refcounts[75], {{g_bytes + 807, 6}}}, + {&grpc_static_metadata_refcounts[76], {{g_bytes + 813, 7}}}, + {&grpc_static_metadata_refcounts[77], {{g_bytes + 820, 4}}}, + {&grpc_static_metadata_refcounts[78], {{g_bytes + 824, 8}}}, + {&grpc_static_metadata_refcounts[79], {{g_bytes + 832, 17}}}, + {&grpc_static_metadata_refcounts[80], {{g_bytes + 849, 13}}}, + {&grpc_static_metadata_refcounts[81], {{g_bytes + 862, 8}}}, + {&grpc_static_metadata_refcounts[82], {{g_bytes + 870, 19}}}, + {&grpc_static_metadata_refcounts[83], {{g_bytes + 889, 13}}}, + {&grpc_static_metadata_refcounts[84], {{g_bytes + 902, 11}}}, + {&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 4}}}, + {&grpc_static_metadata_refcounts[86], {{g_bytes + 917, 8}}}, + {&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 12}}}, + {&grpc_static_metadata_refcounts[88], {{g_bytes + 937, 18}}}, + {&grpc_static_metadata_refcounts[89], {{g_bytes + 955, 19}}}, + {&grpc_static_metadata_refcounts[90], {{g_bytes + 974, 5}}}, + {&grpc_static_metadata_refcounts[91], {{g_bytes + 979, 7}}}, + {&grpc_static_metadata_refcounts[92], {{g_bytes + 986, 7}}}, + {&grpc_static_metadata_refcounts[93], {{g_bytes + 993, 11}}}, + {&grpc_static_metadata_refcounts[94], {{g_bytes + 1004, 6}}}, + {&grpc_static_metadata_refcounts[95], {{g_bytes + 1010, 10}}}, + {&grpc_static_metadata_refcounts[96], {{g_bytes + 1020, 25}}}, + {&grpc_static_metadata_refcounts[97], {{g_bytes + 1045, 17}}}, + {&grpc_static_metadata_refcounts[98], {{g_bytes + 1062, 4}}}, + {&grpc_static_metadata_refcounts[99], {{g_bytes + 1066, 3}}}, + {&grpc_static_metadata_refcounts[100], {{g_bytes + 1069, 16}}}, + {&grpc_static_metadata_refcounts[101], {{g_bytes + 1085, 16}}}, + {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}}, + {&grpc_static_metadata_refcounts[103], {{g_bytes + 1114, 12}}}, + {&grpc_static_metadata_refcounts[104], {{g_bytes + 1126, 21}}}, +}; + +uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4}; + +static const int8_t elems_r[] = { + 16, 11, -1, 0, 15, 2, -78, 24, 0, 18, -5, 0, 0, 0, 17, 14, -8, 0, + 0, 27, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, -64, 0, -44, -43, -70, 0, 34, 33, 33, 32, 31, 30, 29, 28, 27, + 27, 26, 25, 24, 23, 22, 21, 20, 20, 19, 19, 18, 17, 16, 15, 14, 13, 12, + 11, 14, 13, 12, 11, 10, 9, 9, 8, 7, 6, 5, 0}; +static uint32_t elems_phash(uint32_t i) { + i -= 50; + uint32_t x = i % 103; + uint32_t y = i / 103; + uint32_t h = x; + if (y < GPR_ARRAY_SIZE(elems_r)) { + uint32_t delta = (uint32_t)elems_r[y]; + h += delta; + } + return h; +} + +static const uint16_t elem_keys[] = { + 1085, 1086, 565, 1709, 1089, 262, 263, 264, 265, 266, 1716, + 153, 154, 1719, 760, 761, 50, 51, 465, 466, 467, 980, + 981, 1604, 1499, 984, 773, 2129, 2234, 6014, 1611, 6434, 1738, + 1614, 6539, 6644, 1511, 6749, 6854, 6959, 7064, 7169, 7274, 7379, + 2024, 7484, 7589, 7694, 7799, 7904, 8009, 8114, 8219, 6224, 8324, + 8429, 6329, 8534, 8639, 8744, 8849, 8954, 9059, 9164, 9269, 9374, + 1151, 1152, 1153, 1154, 9479, 9584, 9689, 9794, 9899, 10004, 1782, + 10109, 10214, 10319, 10424, 10529, 0, 0, 0, 0, 0, 344, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 253, 254, 147, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; +static const uint8_t elem_idxs[] = { + 77, 79, 6, 25, 76, 19, 20, 21, 22, 23, 84, 15, 16, 83, 1, + 2, 17, 18, 11, 12, 13, 5, 4, 38, 43, 3, 0, 50, 57, 24, + 37, 29, 26, 36, 30, 31, 7, 32, 33, 34, 35, 39, 40, 41, 72, + 42, 44, 45, 46, 47, 48, 49, 51, 27, 52, 53, 28, 54, 55, 56, + 58, 59, 60, 61, 62, 63, 78, 80, 81, 82, 64, 65, 66, 67, 68, + 69, 85, 70, 71, 73, 74, 75, 255, 255, 255, 255, 255, 14, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 9, 10, 8}; + +grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) { + if (a == -1 || b == -1) return GRPC_MDNULL; + uint32_t k = (uint32_t)(a * 105 + b); + uint32_t h = elems_phash(k); + return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k && + elem_idxs[h] != 255 + ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]], + GRPC_MDELEM_STORAGE_STATIC) + : GRPC_MDNULL; +} + +grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { + {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, + {&grpc_static_metadata_refcounts[38], {{g_bytes + 504, 1}}}}, + {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, + {&grpc_static_metadata_refcounts[25], {{g_bytes + 350, 1}}}}, + {{&grpc_static_metadata_refcounts[7], {{g_bytes + 50, 11}}}, + {&grpc_static_metadata_refcounts[26], {{g_bytes + 351, 1}}}}, + {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, + {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}}, + {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, + {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}}, + {{&grpc_static_metadata_refcounts[9], {{g_bytes + 77, 13}}}, + {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}}}, + {{&grpc_static_metadata_refcounts[5], {{g_bytes + 36, 2}}}, + {&grpc_static_metadata_refcounts[40], {{g_bytes + 513, 8}}}}, + {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, + {&grpc_static_metadata_refcounts[41], {{g_bytes + 521, 16}}}}, + {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, + {&grpc_static_metadata_refcounts[42], {{g_bytes + 537, 4}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[43], {{g_bytes + 541, 3}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[44], {{g_bytes + 544, 3}}}}, + {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, + {&grpc_static_metadata_refcounts[45], {{g_bytes + 547, 4}}}}, + {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, + {&grpc_static_metadata_refcounts[46], {{g_bytes + 551, 5}}}}, + {{&grpc_static_metadata_refcounts[4], {{g_bytes + 29, 7}}}, + {&grpc_static_metadata_refcounts[47], {{g_bytes + 556, 4}}}}, + {{&grpc_static_metadata_refcounts[3], {{g_bytes + 19, 10}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, + {&grpc_static_metadata_refcounts[48], {{g_bytes + 560, 3}}}}, + {{&grpc_static_metadata_refcounts[1], {{g_bytes + 5, 7}}}, + {&grpc_static_metadata_refcounts[49], {{g_bytes + 563, 3}}}}, + {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, + {&grpc_static_metadata_refcounts[50], {{g_bytes + 566, 1}}}}, + {{&grpc_static_metadata_refcounts[0], {{g_bytes + 0, 5}}}, + {&grpc_static_metadata_refcounts[51], {{g_bytes + 567, 11}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[52], {{g_bytes + 578, 3}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[53], {{g_bytes + 581, 3}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[54], {{g_bytes + 584, 3}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[55], {{g_bytes + 587, 3}}}}, + {{&grpc_static_metadata_refcounts[2], {{g_bytes + 12, 7}}}, + {&grpc_static_metadata_refcounts[56], {{g_bytes + 590, 3}}}}, + {{&grpc_static_metadata_refcounts[57], {{g_bytes + 593, 14}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[58], {{g_bytes + 607, 13}}}}, + {{&grpc_static_metadata_refcounts[59], {{g_bytes + 620, 15}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[60], {{g_bytes + 635, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[61], {{g_bytes + 648, 6}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[62], {{g_bytes + 654, 27}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[63], {{g_bytes + 681, 3}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[64], {{g_bytes + 684, 5}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[65], {{g_bytes + 689, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[66], {{g_bytes + 702, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[67], {{g_bytes + 715, 19}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, + {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}}, + {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, + {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}}, + {{&grpc_static_metadata_refcounts[15], {{g_bytes + 170, 16}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[68], {{g_bytes + 734, 16}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[69], {{g_bytes + 750, 14}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[70], {{g_bytes + 764, 16}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[71], {{g_bytes + 780, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[14], {{g_bytes + 158, 12}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[72], {{g_bytes + 793, 6}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[73], {{g_bytes + 799, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[74], {{g_bytes + 803, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[75], {{g_bytes + 807, 6}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[76], {{g_bytes + 813, 7}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[77], {{g_bytes + 820, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[20], {{g_bytes + 278, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[78], {{g_bytes + 824, 8}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[79], {{g_bytes + 832, 17}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[80], {{g_bytes + 849, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[81], {{g_bytes + 862, 8}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[82], {{g_bytes + 870, 19}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[83], {{g_bytes + 889, 13}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[21], {{g_bytes + 282, 8}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[84], {{g_bytes + 902, 11}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[85], {{g_bytes + 913, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[86], {{g_bytes + 917, 8}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[87], {{g_bytes + 925, 12}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[88], {{g_bytes + 937, 18}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[89], {{g_bytes + 955, 19}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[90], {{g_bytes + 974, 5}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[91], {{g_bytes + 979, 7}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[92], {{g_bytes + 986, 7}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[93], {{g_bytes + 993, 11}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[94], {{g_bytes + 1004, 6}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[95], {{g_bytes + 1010, 10}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[96], {{g_bytes + 1020, 25}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[97], {{g_bytes + 1045, 17}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[19], {{g_bytes + 268, 10}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[98], {{g_bytes + 1062, 4}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[99], {{g_bytes + 1066, 3}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[100], {{g_bytes + 1069, 16}}}, + {&grpc_static_metadata_refcounts[29], {{g_bytes + 354, 0}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[35], {{g_bytes + 482, 7}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[101], {{g_bytes + 1085, 16}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[103], {{g_bytes + 1114, 12}}}}, + {{&grpc_static_metadata_refcounts[10], {{g_bytes + 90, 20}}}, + {&grpc_static_metadata_refcounts[104], {{g_bytes + 1126, 21}}}}, + {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[39], {{g_bytes + 505, 8}}}}, + {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[36], {{g_bytes + 489, 4}}}}, + {{&grpc_static_metadata_refcounts[16], {{g_bytes + 186, 15}}}, + {&grpc_static_metadata_refcounts[102], {{g_bytes + 1101, 13}}}}, +}; +bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = { + true, // :path + true, // :method + true, // :status + true, // :authority + true, // :scheme + true, // te + true, // grpc-message + true, // grpc-status + true, // grpc-payload-bin + true, // grpc-encoding + true, // grpc-accept-encoding + true, // grpc-server-stats-bin + true, // grpc-tags-bin + true, // grpc-trace-bin + true, // content-type + true, // content-encoding + true, // accept-encoding + true, // grpc-internal-encoding-request + true, // grpc-internal-stream-encoding-request + true, // user-agent + true, // host + true, // lb-token + true, // grpc-previous-rpc-attempts + true, // grpc-retry-pushback-ms +}; + +const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 76, 77, 78, + 79, 80, 81, 82}; + +const uint8_t grpc_static_accept_stream_encoding_metadata[4] = {0, 83, 84, 85}; diff --git a/Sources/CgRPC/src/core/lib/transport/static_metadata.h b/Sources/CgRPC/src/core/lib/transport/static_metadata.h index f03a9d23b..b3a10f587 100644 --- a/Sources/CgRPC/src/core/lib/transport/static_metadata.h +++ b/Sources/CgRPC/src/core/lib/transport/static_metadata.h @@ -20,16 +20,18 @@ * To make changes to this file, change * tools/codegen/core/gen_static_metadata.py, and then re-run it. * - * See metadata.h for an explanation of the interface here, and metadata.c for + * See metadata.h for an explanation of the interface here, and metadata.cc for * an explanation of what's going on. */ #ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H #define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H +#include + #include "src/core/lib/transport/metadata.h" -#define GRPC_STATIC_MDSTR_COUNT 100 +#define GRPC_STATIC_MDSTR_COUNT 105 extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]; /* ":path" */ #define GRPC_MDSTR_PATH (grpc_static_slice_table[0]) @@ -76,166 +78,176 @@ extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]; #define GRPC_MDSTR_HOST (grpc_static_slice_table[20]) /* "lb-token" */ #define GRPC_MDSTR_LB_TOKEN (grpc_static_slice_table[21]) +/* "grpc-previous-rpc-attempts" */ +#define GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS (grpc_static_slice_table[22]) +/* "grpc-retry-pushback-ms" */ +#define GRPC_MDSTR_GRPC_RETRY_PUSHBACK_MS (grpc_static_slice_table[23]) /* "grpc-timeout" */ -#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[22]) +#define GRPC_MDSTR_GRPC_TIMEOUT (grpc_static_slice_table[24]) +/* "1" */ +#define GRPC_MDSTR_1 (grpc_static_slice_table[25]) +/* "2" */ +#define GRPC_MDSTR_2 (grpc_static_slice_table[26]) +/* "3" */ +#define GRPC_MDSTR_3 (grpc_static_slice_table[27]) +/* "4" */ +#define GRPC_MDSTR_4 (grpc_static_slice_table[28]) /* "" */ -#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[23]) +#define GRPC_MDSTR_EMPTY (grpc_static_slice_table[29]) /* "grpc.wait_for_ready" */ -#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[24]) +#define GRPC_MDSTR_GRPC_DOT_WAIT_FOR_READY (grpc_static_slice_table[30]) /* "grpc.timeout" */ -#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[25]) +#define GRPC_MDSTR_GRPC_DOT_TIMEOUT (grpc_static_slice_table[31]) /* "grpc.max_request_message_bytes" */ #define GRPC_MDSTR_GRPC_DOT_MAX_REQUEST_MESSAGE_BYTES \ - (grpc_static_slice_table[26]) + (grpc_static_slice_table[32]) /* "grpc.max_response_message_bytes" */ #define GRPC_MDSTR_GRPC_DOT_MAX_RESPONSE_MESSAGE_BYTES \ - (grpc_static_slice_table[27]) + (grpc_static_slice_table[33]) /* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */ #define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \ - (grpc_static_slice_table[28]) + (grpc_static_slice_table[34]) +/* "deflate" */ +#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[35]) +/* "gzip" */ +#define GRPC_MDSTR_GZIP (grpc_static_slice_table[36]) +/* "stream/gzip" */ +#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[37]) /* "0" */ -#define GRPC_MDSTR_0 (grpc_static_slice_table[29]) -/* "1" */ -#define GRPC_MDSTR_1 (grpc_static_slice_table[30]) -/* "2" */ -#define GRPC_MDSTR_2 (grpc_static_slice_table[31]) +#define GRPC_MDSTR_0 (grpc_static_slice_table[38]) /* "identity" */ -#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[32]) -/* "gzip" */ -#define GRPC_MDSTR_GZIP (grpc_static_slice_table[33]) -/* "deflate" */ -#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[34]) +#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[39]) /* "trailers" */ -#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[35]) +#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[40]) /* "application/grpc" */ -#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[36]) +#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[41]) /* "POST" */ -#define GRPC_MDSTR_POST (grpc_static_slice_table[37]) +#define GRPC_MDSTR_POST (grpc_static_slice_table[42]) /* "200" */ -#define GRPC_MDSTR_200 (grpc_static_slice_table[38]) +#define GRPC_MDSTR_200 (grpc_static_slice_table[43]) /* "404" */ -#define GRPC_MDSTR_404 (grpc_static_slice_table[39]) +#define GRPC_MDSTR_404 (grpc_static_slice_table[44]) /* "http" */ -#define GRPC_MDSTR_HTTP (grpc_static_slice_table[40]) +#define GRPC_MDSTR_HTTP (grpc_static_slice_table[45]) /* "https" */ -#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[41]) +#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[46]) /* "grpc" */ -#define GRPC_MDSTR_GRPC (grpc_static_slice_table[42]) +#define GRPC_MDSTR_GRPC (grpc_static_slice_table[47]) /* "GET" */ -#define GRPC_MDSTR_GET (grpc_static_slice_table[43]) +#define GRPC_MDSTR_GET (grpc_static_slice_table[48]) /* "PUT" */ -#define GRPC_MDSTR_PUT (grpc_static_slice_table[44]) +#define GRPC_MDSTR_PUT (grpc_static_slice_table[49]) /* "/" */ -#define GRPC_MDSTR_SLASH (grpc_static_slice_table[45]) +#define GRPC_MDSTR_SLASH (grpc_static_slice_table[50]) /* "/index.html" */ -#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[46]) +#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[51]) /* "204" */ -#define GRPC_MDSTR_204 (grpc_static_slice_table[47]) +#define GRPC_MDSTR_204 (grpc_static_slice_table[52]) /* "206" */ -#define GRPC_MDSTR_206 (grpc_static_slice_table[48]) +#define GRPC_MDSTR_206 (grpc_static_slice_table[53]) /* "304" */ -#define GRPC_MDSTR_304 (grpc_static_slice_table[49]) +#define GRPC_MDSTR_304 (grpc_static_slice_table[54]) /* "400" */ -#define GRPC_MDSTR_400 (grpc_static_slice_table[50]) +#define GRPC_MDSTR_400 (grpc_static_slice_table[55]) /* "500" */ -#define GRPC_MDSTR_500 (grpc_static_slice_table[51]) +#define GRPC_MDSTR_500 (grpc_static_slice_table[56]) /* "accept-charset" */ -#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[52]) +#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[57]) /* "gzip, deflate" */ -#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[53]) +#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[58]) /* "accept-language" */ -#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[54]) +#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[59]) /* "accept-ranges" */ -#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[55]) +#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[60]) /* "accept" */ -#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[56]) +#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[61]) /* "access-control-allow-origin" */ -#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[57]) +#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[62]) /* "age" */ -#define GRPC_MDSTR_AGE (grpc_static_slice_table[58]) +#define GRPC_MDSTR_AGE (grpc_static_slice_table[63]) /* "allow" */ -#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[59]) +#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[64]) /* "authorization" */ -#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[60]) +#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[65]) /* "cache-control" */ -#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[61]) +#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[66]) /* "content-disposition" */ -#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[62]) +#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[67]) /* "content-language" */ -#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[63]) +#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[68]) /* "content-length" */ -#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[64]) +#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[69]) /* "content-location" */ -#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[65]) +#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[70]) /* "content-range" */ -#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[66]) +#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[71]) /* "cookie" */ -#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[67]) +#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[72]) /* "date" */ -#define GRPC_MDSTR_DATE (grpc_static_slice_table[68]) +#define GRPC_MDSTR_DATE (grpc_static_slice_table[73]) /* "etag" */ -#define GRPC_MDSTR_ETAG (grpc_static_slice_table[69]) +#define GRPC_MDSTR_ETAG (grpc_static_slice_table[74]) /* "expect" */ -#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[70]) +#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[75]) /* "expires" */ -#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[71]) +#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[76]) /* "from" */ -#define GRPC_MDSTR_FROM (grpc_static_slice_table[72]) +#define GRPC_MDSTR_FROM (grpc_static_slice_table[77]) /* "if-match" */ -#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[73]) +#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[78]) /* "if-modified-since" */ -#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[74]) +#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[79]) /* "if-none-match" */ -#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[75]) +#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[80]) /* "if-range" */ -#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[76]) +#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[81]) /* "if-unmodified-since" */ -#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[77]) +#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[82]) /* "last-modified" */ -#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[78]) +#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[83]) /* "lb-cost-bin" */ -#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[79]) +#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[84]) /* "link" */ -#define GRPC_MDSTR_LINK (grpc_static_slice_table[80]) +#define GRPC_MDSTR_LINK (grpc_static_slice_table[85]) /* "location" */ -#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81]) +#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[86]) /* "max-forwards" */ -#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82]) +#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[87]) /* "proxy-authenticate" */ -#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83]) +#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[88]) /* "proxy-authorization" */ -#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84]) +#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[89]) /* "range" */ -#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85]) +#define GRPC_MDSTR_RANGE (grpc_static_slice_table[90]) /* "referer" */ -#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86]) +#define GRPC_MDSTR_REFERER (grpc_static_slice_table[91]) /* "refresh" */ -#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87]) +#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[92]) /* "retry-after" */ -#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88]) +#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[93]) /* "server" */ -#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89]) +#define GRPC_MDSTR_SERVER (grpc_static_slice_table[94]) /* "set-cookie" */ -#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90]) +#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[95]) /* "strict-transport-security" */ -#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91]) +#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[96]) /* "transfer-encoding" */ -#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92]) +#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[97]) /* "vary" */ -#define GRPC_MDSTR_VARY (grpc_static_slice_table[93]) +#define GRPC_MDSTR_VARY (grpc_static_slice_table[98]) /* "via" */ -#define GRPC_MDSTR_VIA (grpc_static_slice_table[94]) +#define GRPC_MDSTR_VIA (grpc_static_slice_table[99]) /* "www-authenticate" */ -#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95]) +#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[100]) /* "identity,deflate" */ -#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[96]) +#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[101]) /* "identity,gzip" */ -#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[97]) +#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[102]) /* "deflate,gzip" */ -#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[98]) +#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[103]) /* "identity,deflate,gzip" */ #define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \ - (grpc_static_slice_table[99]) + (grpc_static_slice_table[104]) extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable; extern grpc_slice_refcount @@ -533,34 +545,38 @@ typedef enum { GRPC_BATCH_USER_AGENT, GRPC_BATCH_HOST, GRPC_BATCH_LB_TOKEN, + GRPC_BATCH_GRPC_PREVIOUS_RPC_ATTEMPTS, + GRPC_BATCH_GRPC_RETRY_PUSHBACK_MS, GRPC_BATCH_CALLOUTS_COUNT } grpc_metadata_batch_callouts_index; typedef union { - struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT]; + struct grpc_linked_mdelem* array[GRPC_BATCH_CALLOUTS_COUNT]; struct { - struct grpc_linked_mdelem *path; - struct grpc_linked_mdelem *method; - struct grpc_linked_mdelem *status; - struct grpc_linked_mdelem *authority; - struct grpc_linked_mdelem *scheme; - struct grpc_linked_mdelem *te; - struct grpc_linked_mdelem *grpc_message; - struct grpc_linked_mdelem *grpc_status; - struct grpc_linked_mdelem *grpc_payload_bin; - struct grpc_linked_mdelem *grpc_encoding; - struct grpc_linked_mdelem *grpc_accept_encoding; - struct grpc_linked_mdelem *grpc_server_stats_bin; - struct grpc_linked_mdelem *grpc_tags_bin; - struct grpc_linked_mdelem *grpc_trace_bin; - struct grpc_linked_mdelem *content_type; - struct grpc_linked_mdelem *content_encoding; - struct grpc_linked_mdelem *accept_encoding; - struct grpc_linked_mdelem *grpc_internal_encoding_request; - struct grpc_linked_mdelem *grpc_internal_stream_encoding_request; - struct grpc_linked_mdelem *user_agent; - struct grpc_linked_mdelem *host; - struct grpc_linked_mdelem *lb_token; + struct grpc_linked_mdelem* path; + struct grpc_linked_mdelem* method; + struct grpc_linked_mdelem* status; + struct grpc_linked_mdelem* authority; + struct grpc_linked_mdelem* scheme; + struct grpc_linked_mdelem* te; + struct grpc_linked_mdelem* grpc_message; + struct grpc_linked_mdelem* grpc_status; + struct grpc_linked_mdelem* grpc_payload_bin; + struct grpc_linked_mdelem* grpc_encoding; + struct grpc_linked_mdelem* grpc_accept_encoding; + struct grpc_linked_mdelem* grpc_server_stats_bin; + struct grpc_linked_mdelem* grpc_tags_bin; + struct grpc_linked_mdelem* grpc_trace_bin; + struct grpc_linked_mdelem* content_type; + struct grpc_linked_mdelem* content_encoding; + struct grpc_linked_mdelem* accept_encoding; + struct grpc_linked_mdelem* grpc_internal_encoding_request; + struct grpc_linked_mdelem* grpc_internal_stream_encoding_request; + struct grpc_linked_mdelem* user_agent; + struct grpc_linked_mdelem* host; + struct grpc_linked_mdelem* lb_token; + struct grpc_linked_mdelem* grpc_previous_rpc_attempts; + struct grpc_linked_mdelem* grpc_retry_pushback_ms; } named; } grpc_metadata_batch_callouts; diff --git a/Sources/CgRPC/src/core/lib/transport/status_conversion.c b/Sources/CgRPC/src/core/lib/transport/status_conversion.cc similarity index 94% rename from Sources/CgRPC/src/core/lib/transport/status_conversion.c rename to Sources/CgRPC/src/core/lib/transport/status_conversion.cc index a40d33328..e58bef5ba 100644 --- a/Sources/CgRPC/src/core/lib/transport/status_conversion.c +++ b/Sources/CgRPC/src/core/lib/transport/status_conversion.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/lib/transport/status_conversion.h" grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) { @@ -38,7 +40,7 @@ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status) { } grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - gpr_timespec deadline) { + grpc_millis deadline) { switch (error) { case GRPC_HTTP2_NO_ERROR: /* should never be received */ @@ -46,7 +48,7 @@ grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, case GRPC_HTTP2_CANCEL: /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been * exceeded */ - return gpr_time_cmp(gpr_now(deadline.clock_type), deadline) >= 0 + return grpc_core::ExecCtx::Get()->Now() > deadline ? GRPC_STATUS_DEADLINE_EXCEEDED : GRPC_STATUS_CANCELLED; case GRPC_HTTP2_ENHANCE_YOUR_CALM: diff --git a/Sources/CgRPC/src/core/lib/transport/status_conversion.h b/Sources/CgRPC/src/core/lib/transport/status_conversion.h index e93f3dfd9..487f00c08 100644 --- a/Sources/CgRPC/src/core/lib/transport/status_conversion.h +++ b/Sources/CgRPC/src/core/lib/transport/status_conversion.h @@ -19,13 +19,17 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H #define GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H +#include + #include + +#include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/transport/http2_errors.h" /* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */ grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status); grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error, - gpr_timespec deadline); + grpc_millis deadline); /* Conversion of HTTP status codes (:status) to grpc status codes */ grpc_status_code grpc_http2_status_to_grpc_status(int status); diff --git a/Sources/CgRPC/src/core/lib/transport/status_metadata.cc b/Sources/CgRPC/src/core/lib/transport/status_metadata.cc new file mode 100644 index 000000000..f896053e4 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/status_metadata.cc @@ -0,0 +1,54 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/status_metadata.h" + +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/transport/static_metadata.h" + +/* we offset status by a small amount when storing it into transport metadata + as metadata cannot store a 0 value (which is used as OK for grpc_status_codes + */ +#define STATUS_OFFSET 1 + +static void destroy_status(void* ignored) {} + +grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) { + if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_0)) { + return GRPC_STATUS_OK; + } + if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_1)) { + return GRPC_STATUS_CANCELLED; + } + if (grpc_mdelem_eq(md, GRPC_MDELEM_GRPC_STATUS_2)) { + return GRPC_STATUS_UNKNOWN; + } + void* user_data = grpc_mdelem_get_user_data(md, destroy_status); + if (user_data != nullptr) { + return static_cast((intptr_t)user_data - STATUS_OFFSET); + } + uint32_t status; + if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) { + status = GRPC_STATUS_UNKNOWN; /* could not parse status code */ + } + grpc_mdelem_set_user_data( + md, destroy_status, (void*)static_cast(status + STATUS_OFFSET)); + return static_cast(status); +} diff --git a/Sources/CgRPC/src/core/lib/transport/status_metadata.h b/Sources/CgRPC/src/core/lib/transport/status_metadata.h new file mode 100644 index 000000000..aed9c7ac2 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/status_metadata.h @@ -0,0 +1,30 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H +#define GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H + +#include + +#include + +#include "src/core/lib/transport/metadata.h" + +grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md); + +#endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/timeout_encoding.c b/Sources/CgRPC/src/core/lib/transport/timeout_encoding.cc similarity index 51% rename from Sources/CgRPC/src/core/lib/transport/timeout_encoding.c rename to Sources/CgRPC/src/core/lib/transport/timeout_encoding.cc index 02f179d6a..c37249920 100644 --- a/Sources/CgRPC/src/core/lib/transport/timeout_encoding.c +++ b/Sources/CgRPC/src/core/lib/transport/timeout_encoding.cc @@ -16,13 +16,14 @@ * */ +#include + #include "src/core/lib/transport/timeout_encoding.h" #include #include -#include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" static int64_t round_up(int64_t x, int64_t divisor) { return (x / divisor + (x % divisor != 0)) * divisor; @@ -41,15 +42,15 @@ static int64_t round_up_to_three_sig_figs(int64_t x) { } /* encode our minimum viable timeout value */ -static void enc_tiny(char *buffer) { memcpy(buffer, "1n", 3); } +static void enc_tiny(char* buffer) { memcpy(buffer, "1n", 3); } -static void enc_ext(char *buffer, int64_t value, char ext) { +static void enc_ext(char* buffer, int64_t value, char ext) { int n = int64_ttoa(value, buffer); buffer[n] = ext; buffer[n + 1] = 0; } -static void enc_seconds(char *buffer, int64_t sec) { +static void enc_seconds(char* buffer, int64_t sec) { if (sec % 3600 == 0) { enc_ext(buffer, sec / 3600, 'H'); } else if (sec % 60 == 0) { @@ -59,84 +60,51 @@ static void enc_seconds(char *buffer, int64_t sec) { } } -static void enc_nanos(char *buffer, int64_t x) { +static void enc_millis(char* buffer, int64_t x) { x = round_up_to_three_sig_figs(x); - if (x < 100000) { - if (x % 1000 == 0) { - enc_ext(buffer, x / 1000, 'u'); - } else { - enc_ext(buffer, x, 'n'); - } - } else if (x < 100000000) { - if (x % 1000000 == 0) { - enc_ext(buffer, x / 1000000, 'm'); - } else { - enc_ext(buffer, x / 1000, 'u'); - } - } else if (x < 1000000000) { - enc_ext(buffer, x / 1000000, 'm'); + if (x < GPR_MS_PER_SEC) { + enc_ext(buffer, x, 'm'); } else { - /* note that this is only ever called with times of less than one second, - so if we reach here the time must have been rounded up to a whole second - (and no more) */ - memcpy(buffer, "1S", 3); - } -} - -static void enc_micros(char *buffer, int64_t x) { - x = round_up_to_three_sig_figs(x); - if (x < 100000) { - if (x % 1000 == 0) { - enc_ext(buffer, x / 1000, 'm'); + if (x % GPR_MS_PER_SEC == 0) { + enc_seconds(buffer, x / GPR_MS_PER_SEC); } else { - enc_ext(buffer, x, 'u'); + enc_ext(buffer, x, 'm'); } - } else if (x < 100000000) { - if (x % 1000000 == 0) { - enc_ext(buffer, x / 1000000, 'S'); - } else { - enc_ext(buffer, x / 1000, 'm'); - } - } else { - enc_ext(buffer, x / 1000000, 'S'); } } -void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer) { - if (timeout.tv_sec < 0) { +void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer) { + if (timeout <= 0) { enc_tiny(buffer); - } else if (timeout.tv_sec == 0) { - enc_nanos(buffer, timeout.tv_nsec); - } else if (timeout.tv_sec < 1000 && timeout.tv_nsec != 0) { - enc_micros(buffer, - (int64_t)(timeout.tv_sec * 1000000) + - (timeout.tv_nsec / 1000 + (timeout.tv_nsec % 1000 != 0))); + } else if (timeout < 1000 * GPR_MS_PER_SEC) { + enc_millis(buffer, timeout); } else { - enc_seconds(buffer, timeout.tv_sec + (timeout.tv_nsec != 0)); + enc_seconds(buffer, + timeout / GPR_MS_PER_SEC + (timeout % GPR_MS_PER_SEC != 0)); } } -static int is_all_whitespace(const char *p, const char *end) { +static int is_all_whitespace(const char* p, const char* end) { while (p != end && *p == ' ') p++; return p == end; } -int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) { - int32_t x = 0; - const uint8_t *p = GRPC_SLICE_START_PTR(text); - const uint8_t *end = GRPC_SLICE_END_PTR(text); +int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout) { + grpc_millis x = 0; + const uint8_t* p = GRPC_SLICE_START_PTR(text); + const uint8_t* end = GRPC_SLICE_END_PTR(text); int have_digit = 0; /* skip whitespace */ for (; p != end && *p == ' '; p++) ; /* decode numeric part */ for (; p != end && *p >= '0' && *p <= '9'; p++) { - int32_t digit = (int32_t)(*p - (uint8_t)'0'); + int32_t digit = static_cast(*p - static_cast('0')); have_digit = 1; /* spec allows max. 8 digits, but we allow values up to 1,000,000,000 */ if (x >= (100 * 1000 * 1000)) { if (x != (100 * 1000 * 1000) || digit != 0) { - *timeout = gpr_inf_future(GPR_TIMESPAN); + *timeout = GRPC_MILLIS_INF_FUTURE; return 1; } } @@ -150,26 +118,27 @@ int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout) { /* decode unit specifier */ switch (*p) { case 'n': - *timeout = gpr_time_from_nanos(x, GPR_TIMESPAN); + *timeout = x / GPR_NS_PER_MS + (x % GPR_NS_PER_MS != 0); break; case 'u': - *timeout = gpr_time_from_micros(x, GPR_TIMESPAN); + *timeout = x / GPR_US_PER_MS + (x % GPR_US_PER_MS != 0); break; case 'm': - *timeout = gpr_time_from_millis(x, GPR_TIMESPAN); + *timeout = x; break; case 'S': - *timeout = gpr_time_from_seconds(x, GPR_TIMESPAN); + *timeout = x * GPR_MS_PER_SEC; break; case 'M': - *timeout = gpr_time_from_minutes(x, GPR_TIMESPAN); + *timeout = x * 60 * GPR_MS_PER_SEC; break; case 'H': - *timeout = gpr_time_from_hours(x, GPR_TIMESPAN); + *timeout = x * 60 * 60 * GPR_MS_PER_SEC; break; default: return 0; } p++; - return is_all_whitespace((const char *)p, (const char *)end); + return is_all_whitespace(reinterpret_cast(p), + reinterpret_cast(end)); } diff --git a/Sources/CgRPC/src/core/lib/transport/timeout_encoding.h b/Sources/CgRPC/src/core/lib/transport/timeout_encoding.h index 7ff35c408..8505e32ff 100644 --- a/Sources/CgRPC/src/core/lib/transport/timeout_encoding.h +++ b/Sources/CgRPC/src/core/lib/transport/timeout_encoding.h @@ -19,16 +19,19 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H #define GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H +#include + #include #include -#include "src/core/lib/support/string.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/exec_ctx.h" #define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1) /* Encode/decode timeouts to the GRPC over HTTP/2 format; encoding may round up arbitrarily */ -void grpc_http2_encode_timeout(gpr_timespec timeout, char *buffer); -int grpc_http2_decode_timeout(grpc_slice text, gpr_timespec *timeout); +void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer); +int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout); #endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/transport.c b/Sources/CgRPC/src/core/lib/transport/transport.c deleted file mode 100644 index 682a820b4..000000000 --- a/Sources/CgRPC/src/core/lib/transport/transport.c +++ /dev/null @@ -1,289 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/lib/transport/transport.h" - -#include - -#include -#include -#include -#include - -#include "src/core/lib/iomgr/executor.h" -#include "src/core/lib/slice/slice_internal.h" -#include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" -#include "src/core/lib/transport/transport_impl.h" - -#ifndef NDEBUG -grpc_tracer_flag grpc_trace_stream_refcount = - GRPC_TRACER_INITIALIZER(false, "stream_refcount"); -#endif - -#ifndef NDEBUG -void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) { - if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); - gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s", - refcount->object_type, refcount, refcount->destroy.cb_arg, val, - val + 1, reason); - } -#else -void grpc_stream_ref(grpc_stream_refcount *refcount) { -#endif - gpr_ref_non_zero(&refcount->refs); -} - -#ifndef NDEBUG -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, - const char *reason) { - if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) { - gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); - gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s", - refcount->object_type, refcount, refcount->destroy.cb_arg, val, - val - 1, reason); - } -#else -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, - grpc_stream_refcount *refcount) { -#endif - if (gpr_unref(&refcount->refs)) { - if (exec_ctx->flags & GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) { - /* Ick. - The thread we're running on MAY be owned (indirectly) by a call-stack. - If that's the case, destroying the call-stack MAY try to destroy the - thread, which is a tangled mess that we just don't want to ever have to - cope with. - Throw this over to the executor (on a core-owned thread) and process it - there. */ - refcount->destroy.scheduler = - grpc_executor_scheduler(GRPC_EXECUTOR_SHORT); - } - GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE); - } -} - -#define STREAM_REF_FROM_SLICE_REF(p) \ - ((grpc_stream_refcount *)(((uint8_t *)p) - \ - offsetof(grpc_stream_refcount, slice_refcount))) - -static void slice_stream_ref(void *p) { -#ifndef NDEBUG - grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p), "slice"); -#else - grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p)); -#endif -} - -static void slice_stream_unref(grpc_exec_ctx *exec_ctx, void *p) { -#ifndef NDEBUG - grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice"); -#else - grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p)); -#endif -} - -grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount, - void *buffer, size_t length) { - slice_stream_ref(&refcount->slice_refcount); - grpc_slice res; - res.refcount = &refcount->slice_refcount, - res.data.refcounted.bytes = (uint8_t *)buffer; - res.data.refcounted.length = length; - return res; -} - -static const grpc_slice_refcount_vtable stream_ref_slice_vtable = { - .ref = slice_stream_ref, - .unref = slice_stream_unref, - .eq = grpc_slice_default_eq_impl, - .hash = grpc_slice_default_hash_impl}; - -#ifndef NDEBUG -void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, - grpc_iomgr_cb_func cb, void *cb_arg, - const char *object_type) { - refcount->object_type = object_type; -#else -void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, - grpc_iomgr_cb_func cb, void *cb_arg) { -#endif - gpr_ref_init(&refcount->refs, initial_refs); - GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx); - refcount->slice_refcount.vtable = &stream_ref_slice_vtable; - refcount->slice_refcount.sub_refcount = &refcount->slice_refcount; -} - -static void move64(uint64_t *from, uint64_t *to) { - *to += *from; - *from = 0; -} - -void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats *from, - grpc_transport_one_way_stats *to) { - move64(&from->framing_bytes, &to->framing_bytes); - move64(&from->data_bytes, &to->data_bytes); - move64(&from->header_bytes, &to->header_bytes); -} - -void grpc_transport_move_stats(grpc_transport_stream_stats *from, - grpc_transport_stream_stats *to) { - grpc_transport_move_one_way_stats(&from->incoming, &to->incoming); - grpc_transport_move_one_way_stats(&from->outgoing, &to->outgoing); -} - -size_t grpc_transport_stream_size(grpc_transport *transport) { - return transport->vtable->sizeof_stream; -} - -void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, - grpc_transport *transport) { - transport->vtable->destroy(exec_ctx, transport); -} - -int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, grpc_stream *stream, - grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena) { - return transport->vtable->init_stream(exec_ctx, transport, stream, refcount, - server_data, arena); -} - -void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_stream *stream, - grpc_transport_stream_op_batch *op) { - transport->vtable->perform_stream_op(exec_ctx, transport, stream, op); -} - -void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_transport_op *op) { - transport->vtable->perform_op(exec_ctx, transport, op); -} - -void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, - grpc_polling_entity *pollent) { - grpc_pollset *pollset; - grpc_pollset_set *pollset_set; - if ((pollset = grpc_polling_entity_pollset(pollent)) != NULL) { - transport->vtable->set_pollset(exec_ctx, transport, stream, pollset); - } else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) != NULL) { - transport->vtable->set_pollset_set(exec_ctx, transport, stream, - pollset_set); - } else { - abort(); - } -} - -void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_stream *stream, - grpc_closure *then_schedule_closure) { - transport->vtable->destroy_stream(exec_ctx, transport, stream, - then_schedule_closure); -} - -grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, - grpc_transport *transport) { - return transport->vtable->get_endpoint(exec_ctx, transport); -} - -// This comment should be sung to the tune of -// "Supercalifragilisticexpialidocious": -// -// grpc_transport_stream_op_batch_finish_with_failure -// is a function that must always unref cancel_error -// though it lives in lib, it handles transport stream ops sure -// it's grpc_transport_stream_op_batch_finish_with_failure -void grpc_transport_stream_op_batch_finish_with_failure( - grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch, - grpc_error *error, grpc_call_combiner *call_combiner) { - if (batch->send_message) { - grpc_byte_stream_destroy(exec_ctx, - batch->payload->send_message.send_message); - } - if (batch->recv_message) { - GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, - batch->payload->recv_message.recv_message_ready, - GRPC_ERROR_REF(error), - "failing recv_message_ready"); - } - if (batch->recv_initial_metadata) { - GRPC_CALL_COMBINER_START( - exec_ctx, call_combiner, - batch->payload->recv_initial_metadata.recv_initial_metadata_ready, - GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); - } - GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error); - if (batch->cancel_stream) { - GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error); - } -} - -typedef struct { - grpc_closure outer_on_complete; - grpc_closure *inner_on_complete; - grpc_transport_op op; -} made_transport_op; - -static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - made_transport_op *op = (made_transport_op *)arg; - GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error)); - gpr_free(op); -} - -grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) { - made_transport_op *op = (made_transport_op *)gpr_malloc(sizeof(*op)); - GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op, - grpc_schedule_on_exec_ctx); - op->inner_on_complete = on_complete; - memset(&op->op, 0, sizeof(op->op)); - op->op.on_consumed = &op->outer_on_complete; - return &op->op; -} - -typedef struct { - grpc_closure outer_on_complete; - grpc_closure *inner_on_complete; - grpc_transport_stream_op_batch op; - grpc_transport_stream_op_batch_payload payload; -} made_transport_stream_op; - -static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - made_transport_stream_op *op = (made_transport_stream_op *)arg; - grpc_closure *c = op->inner_on_complete; - gpr_free(op); - GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error)); -} - -grpc_transport_stream_op_batch *grpc_make_transport_stream_op( - grpc_closure *on_complete) { - made_transport_stream_op *op = - (made_transport_stream_op *)gpr_zalloc(sizeof(*op)); - op->op.payload = &op->payload; - GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op, - op, grpc_schedule_on_exec_ctx); - op->inner_on_complete = on_complete; - op->op.on_complete = &op->outer_on_complete; - return &op->op; -} diff --git a/Sources/CgRPC/src/core/lib/transport/transport.cc b/Sources/CgRPC/src/core/lib/transport/transport.cc new file mode 100644 index 000000000..6b41e4b37 --- /dev/null +++ b/Sources/CgRPC/src/core/lib/transport/transport.cc @@ -0,0 +1,278 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/lib/transport/transport.h" + +#include + +#include +#include +#include +#include + +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/slice_string_helpers.h" +#include "src/core/lib/transport/transport_impl.h" + +grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount(false, + "stream_refcount"); + +#ifndef NDEBUG +void grpc_stream_ref(grpc_stream_refcount* refcount, const char* reason) { + if (grpc_trace_stream_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); + gpr_log(GPR_DEBUG, "%s %p:%p REF %" PRIdPTR "->%" PRIdPTR " %s", + refcount->object_type, refcount, refcount->destroy.cb_arg, val, + val + 1, reason); + } +#else +void grpc_stream_ref(grpc_stream_refcount* refcount) { +#endif + gpr_ref_non_zero(&refcount->refs); +} + +#ifndef NDEBUG +void grpc_stream_unref(grpc_stream_refcount* refcount, const char* reason) { + if (grpc_trace_stream_refcount.enabled()) { + gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count); + gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s", + refcount->object_type, refcount, refcount->destroy.cb_arg, val, + val - 1, reason); + } +#else +void grpc_stream_unref(grpc_stream_refcount* refcount) { +#endif + if (gpr_unref(&refcount->refs)) { + if (grpc_core::ExecCtx::Get()->flags() & + GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) { + /* Ick. + The thread we're running on MAY be owned (indirectly) by a call-stack. + If that's the case, destroying the call-stack MAY try to destroy the + thread, which is a tangled mess that we just don't want to ever have to + cope with. + Throw this over to the executor (on a core-owned thread) and process it + there. */ + refcount->destroy.scheduler = + grpc_executor_scheduler(GRPC_EXECUTOR_SHORT); + } + GRPC_CLOSURE_SCHED(&refcount->destroy, GRPC_ERROR_NONE); + } +} + +#define STREAM_REF_FROM_SLICE_REF(p) \ + ((grpc_stream_refcount*)(((uint8_t*)p) - \ + offsetof(grpc_stream_refcount, slice_refcount))) + +static void slice_stream_ref(void* p) { +#ifndef NDEBUG + grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p), "slice"); +#else + grpc_stream_ref(STREAM_REF_FROM_SLICE_REF(p)); +#endif +} + +static void slice_stream_unref(void* p) { +#ifndef NDEBUG + grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p), "slice"); +#else + grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p)); +#endif +} + +grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount, + void* buffer, size_t length) { + slice_stream_ref(&refcount->slice_refcount); + grpc_slice res; + res.refcount = &refcount->slice_refcount; + res.data.refcounted.bytes = static_cast(buffer); + res.data.refcounted.length = length; + return res; +} + +static const grpc_slice_refcount_vtable stream_ref_slice_vtable = { + slice_stream_ref, /* ref */ + slice_stream_unref, /* unref */ + grpc_slice_default_eq_impl, /* eq */ + grpc_slice_default_hash_impl /* hash */ +}; + +#ifndef NDEBUG +void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs, + grpc_iomgr_cb_func cb, void* cb_arg, + const char* object_type) { + refcount->object_type = object_type; +#else +void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs, + grpc_iomgr_cb_func cb, void* cb_arg) { +#endif + gpr_ref_init(&refcount->refs, initial_refs); + GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx); + refcount->slice_refcount.vtable = &stream_ref_slice_vtable; + refcount->slice_refcount.sub_refcount = &refcount->slice_refcount; +} + +static void move64(uint64_t* from, uint64_t* to) { + *to += *from; + *from = 0; +} + +void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from, + grpc_transport_one_way_stats* to) { + move64(&from->framing_bytes, &to->framing_bytes); + move64(&from->data_bytes, &to->data_bytes); + move64(&from->header_bytes, &to->header_bytes); +} + +void grpc_transport_move_stats(grpc_transport_stream_stats* from, + grpc_transport_stream_stats* to) { + grpc_transport_move_one_way_stats(&from->incoming, &to->incoming); + grpc_transport_move_one_way_stats(&from->outgoing, &to->outgoing); +} + +size_t grpc_transport_stream_size(grpc_transport* transport) { + return transport->vtable->sizeof_stream; +} + +void grpc_transport_destroy(grpc_transport* transport) { + transport->vtable->destroy(transport); +} + +int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream, + grpc_stream_refcount* refcount, + const void* server_data, gpr_arena* arena) { + return transport->vtable->init_stream(transport, stream, refcount, + server_data, arena); +} + +void grpc_transport_perform_stream_op(grpc_transport* transport, + grpc_stream* stream, + grpc_transport_stream_op_batch* op) { + transport->vtable->perform_stream_op(transport, stream, op); +} + +void grpc_transport_perform_op(grpc_transport* transport, + grpc_transport_op* op) { + transport->vtable->perform_op(transport, op); +} + +void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream, + grpc_polling_entity* pollent) { + grpc_pollset* pollset; + grpc_pollset_set* pollset_set; + if ((pollset = grpc_polling_entity_pollset(pollent)) != nullptr) { + transport->vtable->set_pollset(transport, stream, pollset); + } else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) != + nullptr) { + transport->vtable->set_pollset_set(transport, stream, pollset_set); + } else { + abort(); + } +} + +void grpc_transport_destroy_stream(grpc_transport* transport, + grpc_stream* stream, + grpc_closure* then_schedule_closure) { + transport->vtable->destroy_stream(transport, stream, then_schedule_closure); +} + +grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport) { + return transport->vtable->get_endpoint(transport); +} + +// This comment should be sung to the tune of +// "Supercalifragilisticexpialidocious": +// +// grpc_transport_stream_op_batch_finish_with_failure +// is a function that must always unref cancel_error +// though it lives in lib, it handles transport stream ops sure +// it's grpc_transport_stream_op_batch_finish_with_failure +void grpc_transport_stream_op_batch_finish_with_failure( + grpc_transport_stream_op_batch* batch, grpc_error* error, + grpc_call_combiner* call_combiner) { + if (batch->send_message) { + batch->payload->send_message.send_message.reset(); + } + if (batch->recv_message) { + GRPC_CALL_COMBINER_START( + call_combiner, batch->payload->recv_message.recv_message_ready, + GRPC_ERROR_REF(error), "failing recv_message_ready"); + } + if (batch->recv_initial_metadata) { + GRPC_CALL_COMBINER_START( + call_combiner, + batch->payload->recv_initial_metadata.recv_initial_metadata_ready, + GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready"); + } + GRPC_CLOSURE_SCHED(batch->on_complete, error); + if (batch->cancel_stream) { + GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error); + } +} + +typedef struct { + grpc_closure outer_on_complete; + grpc_closure* inner_on_complete; + grpc_transport_op op; +} made_transport_op; + +static void destroy_made_transport_op(void* arg, grpc_error* error) { + made_transport_op* op = static_cast(arg); + GRPC_CLOSURE_SCHED(op->inner_on_complete, GRPC_ERROR_REF(error)); + gpr_free(op); +} + +grpc_transport_op* grpc_make_transport_op(grpc_closure* on_complete) { + made_transport_op* op = + static_cast(gpr_malloc(sizeof(*op))); + GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op, + grpc_schedule_on_exec_ctx); + op->inner_on_complete = on_complete; + memset(&op->op, 0, sizeof(op->op)); + op->op.on_consumed = &op->outer_on_complete; + return &op->op; +} + +typedef struct { + grpc_closure outer_on_complete; + grpc_closure* inner_on_complete; + grpc_transport_stream_op_batch op; + grpc_transport_stream_op_batch_payload payload; +} made_transport_stream_op; + +static void destroy_made_transport_stream_op(void* arg, grpc_error* error) { + made_transport_stream_op* op = static_cast(arg); + grpc_closure* c = op->inner_on_complete; + gpr_free(op); + GRPC_CLOSURE_RUN(c, GRPC_ERROR_REF(error)); +} + +grpc_transport_stream_op_batch* grpc_make_transport_stream_op( + grpc_closure* on_complete) { + made_transport_stream_op* op = + static_cast(gpr_zalloc(sizeof(*op))); + op->op.payload = &op->payload; + GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op, + op, grpc_schedule_on_exec_ctx); + op->inner_on_complete = on_complete; + op->op.on_complete = &op->outer_on_complete; + return &op->op; +} diff --git a/Sources/CgRPC/src/core/lib/transport/transport.h b/Sources/CgRPC/src/core/lib/transport/transport.h index 2cc3e379b..10e9df0f7 100644 --- a/Sources/CgRPC/src/core/lib/transport/transport.h +++ b/Sources/CgRPC/src/core/lib/transport/transport.h @@ -19,23 +19,28 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_TRANSPORT_H #define GRPC_CORE_LIB_TRANSPORT_TRANSPORT_H +#include + #include #include "src/core/lib/channel/context.h" +#include "src/core/lib/gpr/arena.h" #include "src/core/lib/iomgr/call_combiner.h" #include "src/core/lib/iomgr/endpoint.h" #include "src/core/lib/iomgr/polling_entity.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_set.h" -#include "src/core/lib/support/arena.h" #include "src/core/lib/transport/byte_stream.h" #include "src/core/lib/transport/metadata_batch.h" -#ifdef __cplusplus -extern "C" { -#endif +/* Minimum and maximum protocol accepted versions. */ +#define GRPC_PROTOCOL_VERSION_MAX_MAJOR 2 +#define GRPC_PROTOCOL_VERSION_MAX_MINOR 1 +#define GRPC_PROTOCOL_VERSION_MIN_MAJOR 2 +#define GRPC_PROTOCOL_VERSION_MIN_MINOR 1 /* forward declarations */ + typedef struct grpc_transport grpc_transport; /* grpc_stream doesn't actually exist. It's used as a typesafe @@ -43,41 +48,38 @@ typedef struct grpc_transport grpc_transport; for a stream. */ typedef struct grpc_stream grpc_stream; -#ifndef NDEBUG -extern grpc_tracer_flag grpc_trace_stream_refcount; -#endif +extern grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount; typedef struct grpc_stream_refcount { gpr_refcount refs; grpc_closure destroy; #ifndef NDEBUG - const char *object_type; + const char* object_type; #endif grpc_slice_refcount slice_refcount; } grpc_stream_refcount; #ifndef NDEBUG -void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, - grpc_iomgr_cb_func cb, void *cb_arg, - const char *object_type); -void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason); -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount, - const char *reason); +void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs, + grpc_iomgr_cb_func cb, void* cb_arg, + const char* object_type); +void grpc_stream_ref(grpc_stream_refcount* refcount, const char* reason); +void grpc_stream_unref(grpc_stream_refcount* refcount, const char* reason); #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \ grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype) #else -void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs, - grpc_iomgr_cb_func cb, void *cb_arg); -void grpc_stream_ref(grpc_stream_refcount *refcount); -void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount); +void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs, + grpc_iomgr_cb_func cb, void* cb_arg); +void grpc_stream_ref(grpc_stream_refcount* refcount); +void grpc_stream_unref(grpc_stream_refcount* refcount); #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \ grpc_stream_ref_init(rc, ir, cb, cb_arg) #endif /* Wrap a buffer that is owned by some stream object into a slice that shares the same refcount */ -grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount *refcount, - void *buffer, size_t length); +grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount, + void* buffer, size_t length); typedef struct { uint64_t framing_bytes; @@ -90,14 +92,27 @@ typedef struct grpc_transport_stream_stats { grpc_transport_one_way_stats outgoing; } grpc_transport_stream_stats; -void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats *from, - grpc_transport_one_way_stats *to); - -void grpc_transport_move_stats(grpc_transport_stream_stats *from, - grpc_transport_stream_stats *to); - +void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from, + grpc_transport_one_way_stats* to); + +void grpc_transport_move_stats(grpc_transport_stream_stats* from, + grpc_transport_stream_stats* to); + +// This struct (which is present in both grpc_transport_stream_op_batch +// and grpc_transport_op_batch) is a convenience to allow filters or +// transports to schedule a closure related to a particular batch without +// having to allocate memory. The general pattern is to initialize the +// closure with the callback arg set to the batch and extra_arg set to +// whatever state is associated with the handler (e.g., the call element +// or the transport stream object). +// +// Note that this can only be used by the current handler of a given +// batch on the way down the stack (i.e., whichever filter or transport is +// currently handling the batch). Once a filter or transport passes control +// of the batch to the next handler, it cannot depend on the contents of +// this struct anymore, because the next handler may reuse it. typedef struct { - void *extra_arg; + void* extra_arg; grpc_closure closure; } grpc_handler_private_op_data; @@ -110,10 +125,10 @@ typedef struct grpc_transport_stream_op_batch { /** Should be enqueued when all requested operations (excluding recv_message and recv_initial_metadata which have their own closures) in a given batch have been completed. */ - grpc_closure *on_complete; + grpc_closure* on_complete; /** Values for the stream op (fields set are determined by flags above) */ - grpc_transport_stream_op_batch_payload *payload; + grpc_transport_stream_op_batch_payload* payload; /** Send initial metadata to the peer, from the provided metadata batch. */ bool send_initial_metadata : 1; @@ -149,58 +164,69 @@ typedef struct grpc_transport_stream_op_batch { struct grpc_transport_stream_op_batch_payload { struct { - grpc_metadata_batch *send_initial_metadata; + grpc_metadata_batch* send_initial_metadata; /** Iff send_initial_metadata != NULL, flags associated with send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */ uint32_t send_initial_metadata_flags; // If non-NULL, will be set by the transport to the peer string // (a char*, which the caller takes ownership of). - gpr_atm *peer_string; + // Note: This pointer may be used by the transport after the + // send_initial_metadata op is completed. It must remain valid + // until the call is destroyed. + // Note: When a transport sets this, it must free the previous + // value, if any. + gpr_atm* peer_string; } send_initial_metadata; struct { - grpc_metadata_batch *send_trailing_metadata; + grpc_metadata_batch* send_trailing_metadata; } send_trailing_metadata; struct { // The transport (or a filter that decides to return a failure before - // the op gets down to the transport) is responsible for calling - // grpc_byte_stream_destroy() on this. + // the op gets down to the transport) takes ownership. // The batch's on_complete will not be called until after the byte - // stream is destroyed. - grpc_byte_stream *send_message; + // stream is orphaned. + grpc_core::OrphanablePtr send_message; } send_message; struct { - grpc_metadata_batch *recv_initial_metadata; - uint32_t *recv_flags; + grpc_metadata_batch* recv_initial_metadata; + // Flags are used only on the server side. If non-null, will be set to + // a bitfield of the GRPC_INITIAL_METADATA_xxx macros (e.g., to + // indicate if the call is idempotent). + uint32_t* recv_flags; /** Should be enqueued when initial metadata is ready to be processed. */ - grpc_closure *recv_initial_metadata_ready; + grpc_closure* recv_initial_metadata_ready; // If not NULL, will be set to true if trailing metadata is // immediately available. This may be a signal that we received a // Trailers-Only response. - bool *trailing_metadata_available; + bool* trailing_metadata_available; // If non-NULL, will be set by the transport to the peer string // (a char*, which the caller takes ownership of). - gpr_atm *peer_string; + // Note: This pointer may be used by the transport after the + // recv_initial_metadata op is completed. It must remain valid + // until the call is destroyed. + // Note: When a transport sets this, it must free the previous + // value, if any. + gpr_atm* peer_string; } recv_initial_metadata; struct { // Will be set by the transport to point to the byte stream // containing a received message. - // The caller is responsible for calling grpc_byte_stream_destroy() - // on this byte stream. - grpc_byte_stream **recv_message; + // Will be NULL if trailing metadata is received instead of a message. + grpc_core::OrphanablePtr* recv_message; /** Should be enqueued when one message is ready to be processed. */ - grpc_closure *recv_message_ready; + grpc_closure* recv_message_ready; } recv_message; struct { - grpc_metadata_batch *recv_trailing_metadata; + grpc_metadata_batch* recv_trailing_metadata; } recv_trailing_metadata; struct { - grpc_transport_stream_stats *collect_stats; + grpc_transport_stream_stats* collect_stats; } collect_stats; /** Forcefully close this stream. @@ -216,43 +242,48 @@ struct grpc_transport_stream_op_batch_payload { struct { // Error contract: the transport that gets this op must cause cancel_error // to be unref'ed after processing it - grpc_error *cancel_error; + grpc_error* cancel_error; } cancel_stream; /* Indexes correspond to grpc_context_index enum values */ - grpc_call_context_element *context; + grpc_call_context_element* context; }; /** Transport op: a set of operations to perform on a transport as a whole */ typedef struct grpc_transport_op { /** Called when processing of this op is done. */ - grpc_closure *on_consumed; + grpc_closure* on_consumed; /** connectivity monitoring - set connectivity_state to NULL to unsubscribe */ - grpc_closure *on_connectivity_state_change; - grpc_connectivity_state *connectivity_state; + grpc_closure* on_connectivity_state_change; + grpc_connectivity_state* connectivity_state; /** should the transport be disconnected * Error contract: the transport that gets this op must cause * disconnect_with_error to be unref'ed after processing it */ - grpc_error *disconnect_with_error; + grpc_error* disconnect_with_error; /** what should the goaway contain? * Error contract: the transport that gets this op must cause * goaway_error to be unref'ed after processing it */ - grpc_error *goaway_error; + grpc_error* goaway_error; /** set the callback for accepting new streams; this is a permanent callback, unlike the other one-shot closures. If true, the callback is set to set_accept_stream_fn, with its user_data argument set to set_accept_stream_user_data */ bool set_accept_stream; - void (*set_accept_stream_fn)(grpc_exec_ctx *exec_ctx, void *user_data, - grpc_transport *transport, - const void *server_data); - void *set_accept_stream_user_data; + void (*set_accept_stream_fn)(void* user_data, grpc_transport* transport, + const void* server_data); + void* set_accept_stream_user_data; /** add this transport to a pollset */ - grpc_pollset *bind_pollset; + grpc_pollset* bind_pollset; /** add this transport to a pollset_set */ - grpc_pollset_set *bind_pollset_set; - /** send a ping, call this back if not NULL */ - grpc_closure *send_ping; + grpc_pollset_set* bind_pollset_set; + /** send a ping, if either on_initiate or on_ack is not NULL */ + struct { + /** Ping may be delayed by the transport, on_initiate callback will be + called when the ping is actually being sent. */ + grpc_closure* on_initiate; + /** Called when the ping ack is received */ + grpc_closure* on_ack; + } send_ping; /*************************************************************************** * remaining fields are initialized and used at the discretion of the @@ -263,7 +294,7 @@ typedef struct grpc_transport_op { /* Returns the amount of memory required to store a grpc_stream for this transport */ -size_t grpc_transport_stream_size(grpc_transport *transport); +size_t grpc_transport_stream_size(grpc_transport* transport); /* Initialize transport data for a stream. @@ -275,13 +306,12 @@ size_t grpc_transport_stream_size(grpc_transport *transport); stream - a pointer to uninitialized memory to initialize server_data - either NULL for a client initiated stream, or a pointer supplied from the accept_stream callback function */ -int grpc_transport_init_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, grpc_stream *stream, - grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena); +int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream, + grpc_stream_refcount* refcount, + const void* server_data, gpr_arena* arena); -void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, - grpc_stream *stream, grpc_polling_entity *pollent); +void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream, + grpc_polling_entity* pollent); /* Destroy transport data for a stream. @@ -293,17 +323,16 @@ void grpc_transport_set_pops(grpc_exec_ctx *exec_ctx, grpc_transport *transport, transport - the transport on which to create this stream stream - the grpc_stream to destroy (memory is still owned by the caller, but any child memory must be cleaned up) */ -void grpc_transport_destroy_stream(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_stream *stream, - grpc_closure *then_schedule_closure); +void grpc_transport_destroy_stream(grpc_transport* transport, + grpc_stream* stream, + grpc_closure* then_schedule_closure); void grpc_transport_stream_op_batch_finish_with_failure( - grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *op, - grpc_error *error, grpc_call_combiner *call_combiner); + grpc_transport_stream_op_batch* op, grpc_error* error, + grpc_call_combiner* call_combiner); -char *grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch *op); -char *grpc_transport_op_string(grpc_transport_op *op); +char* grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch* op); +char* grpc_transport_op_string(grpc_transport_op* op); /* Send a batch of operations on a transport @@ -315,42 +344,35 @@ char *grpc_transport_op_string(grpc_transport_op *op); non-NULL and previously initialized by the same transport. op - a grpc_transport_stream_op_batch specifying the op to perform */ -void grpc_transport_perform_stream_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_stream *stream, - grpc_transport_stream_op_batch *op); +void grpc_transport_perform_stream_op(grpc_transport* transport, + grpc_stream* stream, + grpc_transport_stream_op_batch* op); -void grpc_transport_perform_op(grpc_exec_ctx *exec_ctx, - grpc_transport *transport, - grpc_transport_op *op); +void grpc_transport_perform_op(grpc_transport* transport, + grpc_transport_op* op); /* Send a ping on a transport Calls cb with user data when a response is received. */ -void grpc_transport_ping(grpc_transport *transport, grpc_closure *cb); +void grpc_transport_ping(grpc_transport* transport, grpc_closure* cb); /* Advise peer of pending connection termination. */ -void grpc_transport_goaway(grpc_transport *transport, grpc_status_code status, +void grpc_transport_goaway(grpc_transport* transport, grpc_status_code status, grpc_slice debug_data); /* Destroy the transport */ -void grpc_transport_destroy(grpc_exec_ctx *exec_ctx, grpc_transport *transport); +void grpc_transport_destroy(grpc_transport* transport); /* Get the endpoint used by \a transport */ -grpc_endpoint *grpc_transport_get_endpoint(grpc_exec_ctx *exec_ctx, - grpc_transport *transport); +grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport); /* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to \a on_consumed and then delete the returned transport op */ -grpc_transport_op *grpc_make_transport_op(grpc_closure *on_consumed); +grpc_transport_op* grpc_make_transport_op(grpc_closure* on_consumed); /* Allocate a grpc_transport_stream_op_batch, and preconfigure the on_consumed closure to \a on_consumed and then delete the returned transport op */ -grpc_transport_stream_op_batch *grpc_make_transport_stream_op( - grpc_closure *on_consumed); - -#ifdef __cplusplus -} -#endif +grpc_transport_stream_op_batch* grpc_make_transport_stream_op( + grpc_closure* on_consumed); #endif /* GRPC_CORE_LIB_TRANSPORT_TRANSPORT_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/transport_impl.h b/Sources/CgRPC/src/core/lib/transport/transport_impl.h index bbae69c22..ba5e05df0 100644 --- a/Sources/CgRPC/src/core/lib/transport/transport_impl.h +++ b/Sources/CgRPC/src/core/lib/transport/transport_impl.h @@ -19,6 +19,8 @@ #ifndef GRPC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H #define GRPC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H +#include + #include "src/core/lib/transport/transport.h" typedef struct grpc_transport_vtable { @@ -27,46 +29,43 @@ typedef struct grpc_transport_vtable { size_t sizeof_stream; /* = sizeof(transport stream) */ /* name of this transport implementation */ - const char *name; + const char* name; /* implementation of grpc_transport_init_stream */ - int (*init_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_stream_refcount *refcount, - const void *server_data, gpr_arena *arena); + int (*init_stream)(grpc_transport* self, grpc_stream* stream, + grpc_stream_refcount* refcount, const void* server_data, + gpr_arena* arena); /* implementation of grpc_transport_set_pollset */ - void (*set_pollset)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset *pollset); + void (*set_pollset)(grpc_transport* self, grpc_stream* stream, + grpc_pollset* pollset); /* implementation of grpc_transport_set_pollset */ - void (*set_pollset_set)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, grpc_pollset_set *pollset_set); + void (*set_pollset_set)(grpc_transport* self, grpc_stream* stream, + grpc_pollset_set* pollset_set); /* implementation of grpc_transport_perform_stream_op */ - void (*perform_stream_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, - grpc_transport_stream_op_batch *op); + void (*perform_stream_op)(grpc_transport* self, grpc_stream* stream, + grpc_transport_stream_op_batch* op); /* implementation of grpc_transport_perform_op */ - void (*perform_op)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_transport_op *op); + void (*perform_op)(grpc_transport* self, grpc_transport_op* op); /* implementation of grpc_transport_destroy_stream */ - void (*destroy_stream)(grpc_exec_ctx *exec_ctx, grpc_transport *self, - grpc_stream *stream, - grpc_closure *then_schedule_closure); + void (*destroy_stream)(grpc_transport* self, grpc_stream* stream, + grpc_closure* then_schedule_closure); /* implementation of grpc_transport_destroy */ - void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_transport *self); + void (*destroy)(grpc_transport* self); /* implementation of grpc_transport_get_endpoint */ - grpc_endpoint *(*get_endpoint)(grpc_exec_ctx *exec_ctx, grpc_transport *self); + grpc_endpoint* (*get_endpoint)(grpc_transport* self); } grpc_transport_vtable; /* an instance of a grpc transport */ struct grpc_transport { /* pointer to a vtable defining operations on this transport */ - const grpc_transport_vtable *vtable; + const grpc_transport_vtable* vtable; }; #endif /* GRPC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H */ diff --git a/Sources/CgRPC/src/core/lib/transport/transport_op_string.c b/Sources/CgRPC/src/core/lib/transport/transport_op_string.cc similarity index 71% rename from Sources/CgRPC/src/core/lib/transport/transport_op_string.c rename to Sources/CgRPC/src/core/lib/transport/transport_op_string.cc index 858664715..99af7c193 100644 --- a/Sources/CgRPC/src/core/lib/transport/transport_op_string.c +++ b/Sources/CgRPC/src/core/lib/transport/transport_op_string.cc @@ -16,23 +16,25 @@ * */ +#include + #include "src/core/lib/channel/channel_stack.h" +#include #include #include #include #include #include -#include +#include "src/core/lib/gpr/string.h" #include "src/core/lib/slice/slice_string_helpers.h" -#include "src/core/lib/support/string.h" #include "src/core/lib/transport/connectivity_state.h" /* These routines are here to facilitate debugging - they produce string representations of various transport data structures */ -static void put_metadata(gpr_strvec *b, grpc_mdelem md) { +static void put_metadata(gpr_strvec* b, grpc_mdelem md) { gpr_strvec_add(b, gpr_strdup("key=")); gpr_strvec_add( b, grpc_dump_slice(GRPC_MDKEY(md), GPR_DUMP_HEX | GPR_DUMP_ASCII)); @@ -42,24 +44,23 @@ static void put_metadata(gpr_strvec *b, grpc_mdelem md) { b, grpc_dump_slice(GRPC_MDVALUE(md), GPR_DUMP_HEX | GPR_DUMP_ASCII)); } -static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) { - grpc_linked_mdelem *m; - for (m = md.list.head; m != NULL; m = m->next) { +static void put_metadata_list(gpr_strvec* b, grpc_metadata_batch md) { + grpc_linked_mdelem* m; + for (m = md.list.head; m != nullptr; m = m->next) { if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", ")); put_metadata(b, m->md); } - if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) { - char *tmp; - gpr_asprintf(&tmp, " deadline=%" PRId64 ".%09d", md.deadline.tv_sec, - md.deadline.tv_nsec); + if (md.deadline != GRPC_MILLIS_INF_FUTURE) { + char* tmp; + gpr_asprintf(&tmp, " deadline=%" PRIdPTR, md.deadline); gpr_strvec_add(b, tmp); } } -char *grpc_transport_stream_op_batch_string( - grpc_transport_stream_op_batch *op) { - char *tmp; - char *out; +char* grpc_transport_stream_op_batch_string( + grpc_transport_stream_op_batch* op) { + char* tmp; + char* out; gpr_strvec b; gpr_strvec_init(&b); @@ -74,9 +75,16 @@ char *grpc_transport_stream_op_batch_string( if (op->send_message) { gpr_strvec_add(&b, gpr_strdup(" ")); - gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d", - op->payload->send_message.send_message->flags, - op->payload->send_message.send_message->length); + if (op->payload->send_message.send_message != nullptr) { + gpr_asprintf(&tmp, "SEND_MESSAGE:flags=0x%08x:len=%d", + op->payload->send_message.send_message->flags(), + op->payload->send_message.send_message->length()); + } else { + // This can happen when we check a batch after the transport has + // processed and cleared the send_message op. + tmp = + gpr_strdup("SEND_MESSAGE(flag and length unknown, already orphaned)"); + } gpr_strvec_add(&b, tmp); } @@ -105,7 +113,7 @@ char *grpc_transport_stream_op_batch_string( if (op->cancel_stream) { gpr_strvec_add(&b, gpr_strdup(" ")); - const char *msg = + const char* msg = grpc_error_string(op->payload->cancel_stream.cancel_error); gpr_asprintf(&tmp, "CANCEL:%s", msg); @@ -119,24 +127,24 @@ char *grpc_transport_stream_op_batch_string( gpr_strvec_add(&b, tmp); } - out = gpr_strvec_flatten(&b, NULL); + out = gpr_strvec_flatten(&b, nullptr); gpr_strvec_destroy(&b); return out; } -char *grpc_transport_op_string(grpc_transport_op *op) { - char *tmp; - char *out; +char* grpc_transport_op_string(grpc_transport_op* op) { + char* tmp; + char* out; bool first = true; gpr_strvec b; gpr_strvec_init(&b); - if (op->on_connectivity_state_change != NULL) { + if (op->on_connectivity_state_change != nullptr) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = false; - if (op->connectivity_state != NULL) { + if (op->connectivity_state != nullptr) { gpr_asprintf(&tmp, "ON_CONNECTIVITY_STATE_CHANGE:p=%p:from=%s", op->on_connectivity_state_change, grpc_connectivity_state_name(*op->connectivity_state)); @@ -151,7 +159,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) { if (op->disconnect_with_error != GRPC_ERROR_NONE) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = false; - const char *err = grpc_error_string(op->disconnect_with_error); + const char* err = grpc_error_string(op->disconnect_with_error); gpr_asprintf(&tmp, "DISCONNECT:%s", err); gpr_strvec_add(&b, tmp); } @@ -159,7 +167,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) { if (op->goaway_error) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = false; - const char *msg = grpc_error_string(op->goaway_error); + const char* msg = grpc_error_string(op->goaway_error); gpr_asprintf(&tmp, "SEND_GOAWAY:%s", msg); gpr_strvec_add(&b, tmp); @@ -173,34 +181,34 @@ char *grpc_transport_op_string(grpc_transport_op *op) { gpr_strvec_add(&b, tmp); } - if (op->bind_pollset != NULL) { + if (op->bind_pollset != nullptr) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = false; gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET")); } - if (op->bind_pollset_set != NULL) { + if (op->bind_pollset_set != nullptr) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); first = false; gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET")); } - if (op->send_ping != NULL) { + if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { if (!first) gpr_strvec_add(&b, gpr_strdup(" ")); - first = false; + // first = false; gpr_strvec_add(&b, gpr_strdup("SEND_PING")); } - out = gpr_strvec_flatten(&b, NULL); + out = gpr_strvec_flatten(&b, nullptr); gpr_strvec_destroy(&b); return out; } -void grpc_call_log_op(const char *file, int line, gpr_log_severity severity, - grpc_call_element *elem, - grpc_transport_stream_op_batch *op) { - char *str = grpc_transport_stream_op_batch_string(op); +void grpc_call_log_op(const char* file, int line, gpr_log_severity severity, + grpc_call_element* elem, + grpc_transport_stream_op_batch* op) { + char* str = grpc_transport_stream_op_batch_string(op); gpr_log(file, line, severity, "OP[%s:%p]: %s", elem->filter->name, elem, str); gpr_free(str); } diff --git a/Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.c b/Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.cc similarity index 55% rename from Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.c rename to Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.cc index 9cacf3d30..e371310fa 100644 --- a/Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.c +++ b/Sources/CgRPC/src/core/plugin_registry/grpc_plugin_registry.cc @@ -16,56 +16,58 @@ * */ +#include + #include -extern void grpc_http_filters_init(void); -extern void grpc_http_filters_shutdown(void); -extern void grpc_chttp2_plugin_init(void); -extern void grpc_chttp2_plugin_shutdown(void); -extern void grpc_tsi_gts_init(void); -extern void grpc_tsi_gts_shutdown(void); -extern void grpc_deadline_filter_init(void); -extern void grpc_deadline_filter_shutdown(void); -extern void grpc_client_channel_init(void); -extern void grpc_client_channel_shutdown(void); -extern void grpc_inproc_plugin_init(void); -extern void grpc_inproc_plugin_shutdown(void); -extern void grpc_resolver_fake_init(void); -extern void grpc_resolver_fake_shutdown(void); -extern void grpc_lb_policy_grpclb_init(void); -extern void grpc_lb_policy_grpclb_shutdown(void); -extern void grpc_lb_policy_pick_first_init(void); -extern void grpc_lb_policy_pick_first_shutdown(void); -extern void grpc_lb_policy_round_robin_init(void); -extern void grpc_lb_policy_round_robin_shutdown(void); -extern void grpc_resolver_dns_ares_init(void); -extern void grpc_resolver_dns_ares_shutdown(void); -extern void grpc_resolver_dns_native_init(void); -extern void grpc_resolver_dns_native_shutdown(void); -extern void grpc_resolver_sockaddr_init(void); -extern void grpc_resolver_sockaddr_shutdown(void); -extern void grpc_server_load_reporting_plugin_init(void); -extern void grpc_server_load_reporting_plugin_shutdown(void); -extern void census_grpc_plugin_init(void); -extern void census_grpc_plugin_shutdown(void); -extern void grpc_max_age_filter_init(void); -extern void grpc_max_age_filter_shutdown(void); -extern void grpc_message_size_filter_init(void); -extern void grpc_message_size_filter_shutdown(void); -extern void grpc_workaround_cronet_compression_filter_init(void); -extern void grpc_workaround_cronet_compression_filter_shutdown(void); +void grpc_http_filters_init(void); +void grpc_http_filters_shutdown(void); +void grpc_chttp2_plugin_init(void); +void grpc_chttp2_plugin_shutdown(void); +void grpc_deadline_filter_init(void); +void grpc_deadline_filter_shutdown(void); +void grpc_client_channel_init(void); +void grpc_client_channel_shutdown(void); +void grpc_tsi_alts_init(void); +void grpc_tsi_alts_shutdown(void); +void grpc_inproc_plugin_init(void); +void grpc_inproc_plugin_shutdown(void); +void grpc_resolver_fake_init(void); +void grpc_resolver_fake_shutdown(void); +void grpc_lb_policy_grpclb_init(void); +void grpc_lb_policy_grpclb_shutdown(void); +void grpc_lb_policy_pick_first_init(void); +void grpc_lb_policy_pick_first_shutdown(void); +void grpc_lb_policy_round_robin_init(void); +void grpc_lb_policy_round_robin_shutdown(void); +void grpc_resolver_dns_ares_init(void); +void grpc_resolver_dns_ares_shutdown(void); +void grpc_resolver_dns_native_init(void); +void grpc_resolver_dns_native_shutdown(void); +void grpc_resolver_sockaddr_init(void); +void grpc_resolver_sockaddr_shutdown(void); +void grpc_server_load_reporting_plugin_init(void); +void grpc_server_load_reporting_plugin_shutdown(void); +void grpc_max_age_filter_init(void); +void grpc_max_age_filter_shutdown(void); +void grpc_message_size_filter_init(void); +void grpc_message_size_filter_shutdown(void); +void grpc_client_authority_filter_init(void); +void grpc_client_authority_filter_shutdown(void); +void grpc_workaround_cronet_compression_filter_init(void); +void grpc_workaround_cronet_compression_filter_shutdown(void); void grpc_register_built_in_plugins(void) { grpc_register_plugin(grpc_http_filters_init, grpc_http_filters_shutdown); grpc_register_plugin(grpc_chttp2_plugin_init, grpc_chttp2_plugin_shutdown); - grpc_register_plugin(grpc_tsi_gts_init, - grpc_tsi_gts_shutdown); grpc_register_plugin(grpc_deadline_filter_init, grpc_deadline_filter_shutdown); grpc_register_plugin(grpc_client_channel_init, grpc_client_channel_shutdown); + grpc_register_plugin(grpc_tsi_alts_init, + grpc_tsi_alts_shutdown); grpc_register_plugin(grpc_inproc_plugin_init, grpc_inproc_plugin_shutdown); grpc_register_plugin(grpc_resolver_fake_init, @@ -84,12 +86,12 @@ void grpc_register_built_in_plugins(void) { grpc_resolver_sockaddr_shutdown); grpc_register_plugin(grpc_server_load_reporting_plugin_init, grpc_server_load_reporting_plugin_shutdown); - grpc_register_plugin(census_grpc_plugin_init, - census_grpc_plugin_shutdown); grpc_register_plugin(grpc_max_age_filter_init, grpc_max_age_filter_shutdown); grpc_register_plugin(grpc_message_size_filter_init, grpc_message_size_filter_shutdown); + grpc_register_plugin(grpc_client_authority_filter_init, + grpc_client_authority_filter_shutdown); grpc_register_plugin(grpc_workaround_cronet_compression_filter_init, grpc_workaround_cronet_compression_filter_shutdown); } diff --git a/Sources/CgRPC/src/core/tsi/alts/crypt/aes_gcm.cc b/Sources/CgRPC/src/core/tsi/alts/crypt/aes_gcm.cc new file mode 100644 index 000000000..02b1ac449 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/crypt/aes_gcm.cc @@ -0,0 +1,687 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" + +#include +#include +#include +#include +#include +#include + +#include + +constexpr size_t kKdfKeyLen = 32; +constexpr size_t kKdfCounterLen = 6; +constexpr size_t kKdfCounterOffset = 2; +constexpr size_t kRekeyAeadKeyLen = kAes128GcmKeyLength; + +/* Struct for additional data required if rekeying is enabled. */ +struct gsec_aes_gcm_aead_rekey_data { + uint8_t kdf_counter[kKdfCounterLen]; + uint8_t nonce_mask[kAesGcmNonceLength]; +}; + +/* Main struct for AES_GCM crypter interface. */ +struct gsec_aes_gcm_aead_crypter { + gsec_aead_crypter crypter; + size_t key_length; + size_t nonce_length; + size_t tag_length; + uint8_t* key; + gsec_aes_gcm_aead_rekey_data* rekey_data; + EVP_CIPHER_CTX* ctx; +}; + +static char* aes_gcm_get_openssl_errors() { + BIO* bio = BIO_new(BIO_s_mem()); + ERR_print_errors(bio); + BUF_MEM* mem = nullptr; + char* error_msg = nullptr; + BIO_get_mem_ptr(bio, &mem); + if (mem != nullptr) { + error_msg = static_cast(gpr_malloc(mem->length + 1)); + memcpy(error_msg, mem->data, mem->length); + error_msg[mem->length] = '\0'; + } + BIO_free_all(bio); + return error_msg; +} + +static void aes_gcm_format_errors(const char* error_msg, char** error_details) { + if (error_details == nullptr) { + return; + } + unsigned long error = ERR_get_error(); + if (error == 0 && error_msg != nullptr) { + *error_details = static_cast(gpr_malloc(strlen(error_msg) + 1)); + memcpy(*error_details, error_msg, strlen(error_msg) + 1); + return; + } + char* openssl_errors = aes_gcm_get_openssl_errors(); + if (openssl_errors != nullptr && error_msg != nullptr) { + size_t len = strlen(error_msg) + strlen(openssl_errors) + 2; /* ", " */ + *error_details = static_cast(gpr_malloc(len + 1)); + snprintf(*error_details, len + 1, "%s, %s", error_msg, openssl_errors); + gpr_free(openssl_errors); + } +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_max_ciphertext_and_tag_length( + const gsec_aead_crypter* crypter, size_t plaintext_length, + size_t* max_ciphertext_and_tag_length, char** error_details) { + if (max_ciphertext_and_tag_length == nullptr) { + aes_gcm_format_errors("max_ciphertext_and_tag_length is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + *max_ciphertext_and_tag_length = + plaintext_length + aes_gcm_crypter->tag_length; + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_max_plaintext_length( + const gsec_aead_crypter* crypter, size_t ciphertext_and_tag_length, + size_t* max_plaintext_length, char** error_details) { + if (max_plaintext_length == nullptr) { + aes_gcm_format_errors("max_plaintext_length is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + if (ciphertext_and_tag_length < aes_gcm_crypter->tag_length) { + *max_plaintext_length = 0; + aes_gcm_format_errors( + "ciphertext_and_tag_length is smaller than tag_length.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + *max_plaintext_length = + ciphertext_and_tag_length - aes_gcm_crypter->tag_length; + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_nonce_length( + const gsec_aead_crypter* crypter, size_t* nonce_length, + char** error_details) { + if (nonce_length == nullptr) { + aes_gcm_format_errors("nonce_length is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + *nonce_length = aes_gcm_crypter->nonce_length; + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_key_length( + const gsec_aead_crypter* crypter, size_t* key_length, + char** error_details) { + if (key_length == nullptr) { + aes_gcm_format_errors("key_length is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + *key_length = aes_gcm_crypter->key_length; + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_tag_length( + const gsec_aead_crypter* crypter, size_t* tag_length, + char** error_details) { + if (tag_length == nullptr) { + aes_gcm_format_errors("tag_length is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + *tag_length = aes_gcm_crypter->tag_length; + return GRPC_STATUS_OK; +} + +static void aes_gcm_mask_nonce(uint8_t* dst, const uint8_t* nonce, + const uint8_t* mask) { + uint64_t mask1; + uint32_t mask2; + memcpy(&mask1, mask, sizeof(mask1)); + memcpy(&mask2, mask + sizeof(mask1), sizeof(mask2)); + uint64_t nonce1; + uint32_t nonce2; + memcpy(&nonce1, nonce, sizeof(nonce1)); + memcpy(&nonce2, nonce + sizeof(nonce1), sizeof(nonce2)); + nonce1 ^= mask1; + nonce2 ^= mask2; + memcpy(dst, &nonce1, sizeof(nonce1)); + memcpy(dst + sizeof(nonce1), &nonce2, sizeof(nonce2)); +} + +static grpc_status_code aes_gcm_derive_aead_key(uint8_t* dst, + const uint8_t* kdf_key, + const uint8_t* kdf_counter) { + unsigned char buf[EVP_MAX_MD_SIZE]; + unsigned char ctr = 1; +#if OPENSSL_VERSION_NUMBER < 0x10100000L + HMAC_CTX hmac; + HMAC_CTX_init(&hmac); + if (!HMAC_Init_ex(&hmac, kdf_key, kKdfKeyLen, EVP_sha256(), nullptr) || + !HMAC_Update(&hmac, kdf_counter, kKdfCounterLen) || + !HMAC_Update(&hmac, &ctr, 1) || !HMAC_Final(&hmac, buf, nullptr)) { + HMAC_CTX_cleanup(&hmac); + return GRPC_STATUS_INTERNAL; + } + HMAC_CTX_cleanup(&hmac); +#else + HMAC_CTX* hmac = HMAC_CTX_new(); + if (hmac == nullptr) { + return GRPC_STATUS_INTERNAL; + } + if (!HMAC_Init_ex(hmac, kdf_key, kKdfKeyLen, EVP_sha256(), nullptr) || + !HMAC_Update(hmac, kdf_counter, kKdfCounterLen) || + !HMAC_Update(hmac, &ctr, 1) || !HMAC_Final(hmac, buf, nullptr)) { + HMAC_CTX_free(hmac); + return GRPC_STATUS_INTERNAL; + } + HMAC_CTX_free(hmac); +#endif + memcpy(dst, buf, kRekeyAeadKeyLen); + return GRPC_STATUS_OK; +} + +static grpc_status_code aes_gcm_rekey_if_required( + gsec_aes_gcm_aead_crypter* aes_gcm_crypter, const uint8_t* nonce, + char** error_details) { + // If rekey_data is nullptr, then rekeying is not supported and not required. + // If bytes 2-7 of kdf_counter differ from the (per message) nonce, then the + // encryption key is recomputed from a new kdf_counter to ensure that we don't + // encrypt more than 2^16 messages per encryption key (in each direction). + if (aes_gcm_crypter->rekey_data == nullptr || + memcmp(aes_gcm_crypter->rekey_data->kdf_counter, + nonce + kKdfCounterOffset, kKdfCounterLen) == 0) { + return GRPC_STATUS_OK; + } + memcpy(aes_gcm_crypter->rekey_data->kdf_counter, nonce + kKdfCounterOffset, + kKdfCounterLen); + uint8_t aead_key[kRekeyAeadKeyLen]; + if (aes_gcm_derive_aead_key(aead_key, aes_gcm_crypter->key, + aes_gcm_crypter->rekey_data->kdf_counter) != + GRPC_STATUS_OK) { + aes_gcm_format_errors("Rekeying failed in key derivation.", error_details); + return GRPC_STATUS_INTERNAL; + } + if (!EVP_DecryptInit_ex(aes_gcm_crypter->ctx, nullptr, nullptr, aead_key, + nullptr)) { + aes_gcm_format_errors("Rekeying failed in context update.", error_details); + return GRPC_STATUS_INTERNAL; + } + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_encrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* plaintext_vec, size_t plaintext_vec_length, + struct iovec ciphertext_vec, size_t* ciphertext_bytes_written, + char** error_details) { + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast(crypter); + // Input checks + if (nonce == nullptr) { + aes_gcm_format_errors("Nonce buffer is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (kAesGcmNonceLength != nonce_length) { + aes_gcm_format_errors("Nonce buffer has the wrong length.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (aad_vec_length > 0 && aad_vec == nullptr) { + aes_gcm_format_errors("Non-zero aad_vec_length but aad_vec is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (plaintext_vec_length > 0 && plaintext_vec == nullptr) { + aes_gcm_format_errors( + "Non-zero plaintext_vec_length but plaintext_vec is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (ciphertext_bytes_written == nullptr) { + aes_gcm_format_errors("bytes_written is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + *ciphertext_bytes_written = 0; + // rekey if required + if (aes_gcm_rekey_if_required(aes_gcm_crypter, nonce, error_details) != + GRPC_STATUS_OK) { + return GRPC_STATUS_INTERNAL; + } + // mask nonce if required + const uint8_t* nonce_aead = nonce; + uint8_t nonce_masked[kAesGcmNonceLength]; + if (aes_gcm_crypter->rekey_data != nullptr) { + aes_gcm_mask_nonce(nonce_masked, aes_gcm_crypter->rekey_data->nonce_mask, + nonce); + nonce_aead = nonce_masked; + } + // init openssl context + if (!EVP_EncryptInit_ex(aes_gcm_crypter->ctx, nullptr, nullptr, nullptr, + nonce_aead)) { + aes_gcm_format_errors("Initializing nonce failed", error_details); + return GRPC_STATUS_INTERNAL; + } + // process aad + size_t i; + for (i = 0; i < aad_vec_length; i++) { + const uint8_t* aad = static_cast(aad_vec[i].iov_base); + size_t aad_length = aad_vec[i].iov_len; + if (aad_length == 0) { + continue; + } + size_t aad_bytes_read = 0; + if (aad == nullptr) { + aes_gcm_format_errors("aad is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (!EVP_EncryptUpdate(aes_gcm_crypter->ctx, nullptr, + reinterpret_cast(&aad_bytes_read), aad, + static_cast(aad_length)) || + aad_bytes_read != aad_length) { + aes_gcm_format_errors("Setting authenticated associated data failed", + error_details); + return GRPC_STATUS_INTERNAL; + } + } + uint8_t* ciphertext = static_cast(ciphertext_vec.iov_base); + size_t ciphertext_length = ciphertext_vec.iov_len; + if (ciphertext == nullptr) { + aes_gcm_format_errors("ciphertext is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + // process plaintext + for (i = 0; i < plaintext_vec_length; i++) { + const uint8_t* plaintext = static_cast(plaintext_vec[i].iov_base); + size_t plaintext_length = plaintext_vec[i].iov_len; + if (plaintext == nullptr) { + if (plaintext_length == 0) { + continue; + } + aes_gcm_format_errors("plaintext is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (ciphertext_length < plaintext_length) { + aes_gcm_format_errors( + "ciphertext is not large enough to hold the result.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + int bytes_written = 0; + int bytes_to_write = static_cast(plaintext_length); + if (!EVP_EncryptUpdate(aes_gcm_crypter->ctx, ciphertext, &bytes_written, + plaintext, bytes_to_write)) { + aes_gcm_format_errors("Encrypting plaintext failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + if (bytes_written > bytes_to_write) { + aes_gcm_format_errors("More bytes written than expected.", error_details); + return GRPC_STATUS_INTERNAL; + } + ciphertext += bytes_written; + ciphertext_length -= bytes_written; + } + int bytes_written_temp = 0; + if (!EVP_EncryptFinal_ex(aes_gcm_crypter->ctx, nullptr, + &bytes_written_temp)) { + aes_gcm_format_errors("Finalizing encryption failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + if (bytes_written_temp != 0) { + aes_gcm_format_errors("Openssl wrote some unexpected bytes.", + error_details); + return GRPC_STATUS_INTERNAL; + } + if (ciphertext_length < kAesGcmTagLength) { + aes_gcm_format_errors("ciphertext is too small to hold a tag.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + + if (!EVP_CIPHER_CTX_ctrl(aes_gcm_crypter->ctx, EVP_CTRL_GCM_GET_TAG, + kAesGcmTagLength, ciphertext)) { + aes_gcm_format_errors("Writing tag failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + ciphertext += kAesGcmTagLength; + ciphertext_length -= kAesGcmTagLength; + *ciphertext_bytes_written = ciphertext_vec.iov_len - ciphertext_length; + return GRPC_STATUS_OK; +} + +static grpc_status_code gsec_aes_gcm_aead_crypter_decrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* ciphertext_vec, size_t ciphertext_vec_length, + struct iovec plaintext_vec, size_t* plaintext_bytes_written, + char** error_details) { + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + if (nonce == nullptr) { + aes_gcm_format_errors("Nonce buffer is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (kAesGcmNonceLength != nonce_length) { + aes_gcm_format_errors("Nonce buffer has the wrong length.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (aad_vec_length > 0 && aad_vec == nullptr) { + aes_gcm_format_errors("Non-zero aad_vec_length but aad_vec is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (ciphertext_vec_length > 0 && ciphertext_vec == nullptr) { + aes_gcm_format_errors( + "Non-zero plaintext_vec_length but plaintext_vec is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + // Compute the total length so we can ensure we don't pass the tag into + // EVP_decrypt. + size_t total_ciphertext_length = 0; + size_t i; + for (i = 0; i < ciphertext_vec_length; i++) { + total_ciphertext_length += ciphertext_vec[i].iov_len; + } + if (total_ciphertext_length < kAesGcmTagLength) { + aes_gcm_format_errors("ciphertext is too small to hold a tag.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (plaintext_bytes_written == nullptr) { + aes_gcm_format_errors("bytes_written is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + *plaintext_bytes_written = 0; + // rekey if required + if (aes_gcm_rekey_if_required(aes_gcm_crypter, nonce, error_details) != + GRPC_STATUS_OK) { + aes_gcm_format_errors("Rekeying failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + // mask nonce if required + const uint8_t* nonce_aead = nonce; + uint8_t nonce_masked[kAesGcmNonceLength]; + if (aes_gcm_crypter->rekey_data != nullptr) { + aes_gcm_mask_nonce(nonce_masked, aes_gcm_crypter->rekey_data->nonce_mask, + nonce); + nonce_aead = nonce_masked; + } + // init openssl context + if (!EVP_DecryptInit_ex(aes_gcm_crypter->ctx, nullptr, nullptr, nullptr, + nonce_aead)) { + aes_gcm_format_errors("Initializing nonce failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + // process aad + for (i = 0; i < aad_vec_length; i++) { + const uint8_t* aad = static_cast(aad_vec[i].iov_base); + size_t aad_length = aad_vec[i].iov_len; + if (aad_length == 0) { + continue; + } + size_t aad_bytes_read = 0; + if (aad == nullptr) { + aes_gcm_format_errors("aad is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (!EVP_DecryptUpdate(aes_gcm_crypter->ctx, nullptr, + reinterpret_cast(&aad_bytes_read), aad, + static_cast(aad_length)) || + aad_bytes_read != aad_length) { + aes_gcm_format_errors("Setting authenticated associated data failed.", + error_details); + return GRPC_STATUS_INTERNAL; + } + } + // process ciphertext + uint8_t* plaintext = static_cast(plaintext_vec.iov_base); + size_t plaintext_length = plaintext_vec.iov_len; + if (plaintext_length > 0 && plaintext == nullptr) { + aes_gcm_format_errors( + "plaintext is nullptr, but plaintext_length is positive.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + const uint8_t* ciphertext = nullptr; + size_t ciphertext_length = 0; + for (i = 0; + i < ciphertext_vec_length && total_ciphertext_length > kAesGcmTagLength; + i++) { + ciphertext = static_cast(ciphertext_vec[i].iov_base); + ciphertext_length = ciphertext_vec[i].iov_len; + if (ciphertext == nullptr) { + if (ciphertext_length == 0) { + continue; + } + aes_gcm_format_errors("ciphertext is nullptr.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INVALID_ARGUMENT; + } + size_t bytes_written = 0; + size_t bytes_to_write = ciphertext_length; + // Don't include the tag + if (bytes_to_write > total_ciphertext_length - kAesGcmTagLength) { + bytes_to_write = total_ciphertext_length - kAesGcmTagLength; + } + if (plaintext_length < bytes_to_write) { + aes_gcm_format_errors( + "Not enough plaintext buffer to hold encrypted ciphertext.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (!EVP_DecryptUpdate(aes_gcm_crypter->ctx, plaintext, + reinterpret_cast(&bytes_written), ciphertext, + static_cast(bytes_to_write))) { + aes_gcm_format_errors("Decrypting ciphertext failed.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INTERNAL; + } + if (bytes_written > ciphertext_length) { + aes_gcm_format_errors("More bytes written than expected.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INTERNAL; + } + ciphertext += bytes_written; + ciphertext_length -= bytes_written; + total_ciphertext_length -= bytes_written; + plaintext += bytes_written; + plaintext_length -= bytes_written; + } + if (total_ciphertext_length > kAesGcmTagLength) { + aes_gcm_format_errors( + "Not enough plaintext buffer to hold encrypted ciphertext.", + error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INVALID_ARGUMENT; + } + uint8_t tag[kAesGcmTagLength]; + uint8_t* tag_tmp = tag; + if (ciphertext_length > 0) { + memcpy(tag_tmp, ciphertext, ciphertext_length); + tag_tmp += ciphertext_length; + total_ciphertext_length -= ciphertext_length; + } + for (; i < ciphertext_vec_length; i++) { + ciphertext = static_cast(ciphertext_vec[i].iov_base); + ciphertext_length = ciphertext_vec[i].iov_len; + if (ciphertext == nullptr) { + if (ciphertext_length == 0) { + continue; + } + aes_gcm_format_errors("ciphertext is nullptr.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INVALID_ARGUMENT; + } + memcpy(tag_tmp, ciphertext, ciphertext_length); + tag_tmp += ciphertext_length; + total_ciphertext_length -= ciphertext_length; + } + if (!EVP_CIPHER_CTX_ctrl(aes_gcm_crypter->ctx, EVP_CTRL_GCM_SET_TAG, + kAesGcmTagLength, reinterpret_cast(tag))) { + aes_gcm_format_errors("Setting tag failed.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INTERNAL; + } + int bytes_written_temp = 0; + if (!EVP_DecryptFinal_ex(aes_gcm_crypter->ctx, nullptr, + &bytes_written_temp)) { + aes_gcm_format_errors("Checking tag failed.", error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (bytes_written_temp != 0) { + aes_gcm_format_errors("Openssl wrote some unexpected bytes.", + error_details); + memset(plaintext_vec.iov_base, 0x00, plaintext_vec.iov_len); + return GRPC_STATUS_INTERNAL; + } + *plaintext_bytes_written = plaintext_vec.iov_len - plaintext_length; + return GRPC_STATUS_OK; +} + +static void gsec_aes_gcm_aead_crypter_destroy(gsec_aead_crypter* crypter) { + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + reinterpret_cast( + const_cast(crypter)); + gpr_free(aes_gcm_crypter->key); + gpr_free(aes_gcm_crypter->rekey_data); + EVP_CIPHER_CTX_free(aes_gcm_crypter->ctx); +} + +static const gsec_aead_crypter_vtable vtable = { + gsec_aes_gcm_aead_crypter_encrypt_iovec, + gsec_aes_gcm_aead_crypter_decrypt_iovec, + gsec_aes_gcm_aead_crypter_max_ciphertext_and_tag_length, + gsec_aes_gcm_aead_crypter_max_plaintext_length, + gsec_aes_gcm_aead_crypter_nonce_length, + gsec_aes_gcm_aead_crypter_key_length, + gsec_aes_gcm_aead_crypter_tag_length, + gsec_aes_gcm_aead_crypter_destroy}; + +static grpc_status_code aes_gcm_new_evp_cipher_ctx( + gsec_aes_gcm_aead_crypter* aes_gcm_crypter, char** error_details) { + const EVP_CIPHER* cipher = nullptr; + bool is_rekey = aes_gcm_crypter->rekey_data != nullptr; + switch (is_rekey ? kRekeyAeadKeyLen : aes_gcm_crypter->key_length) { + case kAes128GcmKeyLength: + cipher = EVP_aes_128_gcm(); + break; + case kAes256GcmKeyLength: + cipher = EVP_aes_256_gcm(); + break; + } + const uint8_t* aead_key = aes_gcm_crypter->key; + uint8_t aead_key_rekey[kRekeyAeadKeyLen]; + if (is_rekey) { + if (aes_gcm_derive_aead_key(aead_key_rekey, aes_gcm_crypter->key, + aes_gcm_crypter->rekey_data->kdf_counter) != + GRPC_STATUS_OK) { + aes_gcm_format_errors("Deriving key failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + aead_key = aead_key_rekey; + } + if (!EVP_DecryptInit_ex(aes_gcm_crypter->ctx, cipher, nullptr, aead_key, + nullptr)) { + aes_gcm_format_errors("Setting key failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + if (!EVP_CIPHER_CTX_ctrl(aes_gcm_crypter->ctx, EVP_CTRL_GCM_SET_IVLEN, + static_cast(aes_gcm_crypter->nonce_length), + nullptr)) { + aes_gcm_format_errors("Setting nonce length failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + return GRPC_STATUS_OK; +} + +grpc_status_code gsec_aes_gcm_aead_crypter_create(const uint8_t* key, + size_t key_length, + size_t nonce_length, + size_t tag_length, bool rekey, + gsec_aead_crypter** crypter, + char** error_details) { + if (key == nullptr) { + aes_gcm_format_errors("key is nullptr.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (crypter == nullptr) { + aes_gcm_format_errors("crypter is nullptr.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + *crypter = nullptr; + if ((rekey && key_length != kAes128GcmRekeyKeyLength) || + (!rekey && key_length != kAes128GcmKeyLength && + key_length != kAes256GcmKeyLength) || + (tag_length != kAesGcmTagLength) || + (nonce_length != kAesGcmNonceLength)) { + aes_gcm_format_errors( + "Invalid key and/or nonce and/or tag length are provided at AEAD " + "crypter instance construction time.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + gsec_aes_gcm_aead_crypter* aes_gcm_crypter = + static_cast( + gpr_malloc(sizeof(gsec_aes_gcm_aead_crypter))); + aes_gcm_crypter->crypter.vtable = &vtable; + aes_gcm_crypter->nonce_length = nonce_length; + aes_gcm_crypter->tag_length = tag_length; + if (rekey) { + aes_gcm_crypter->key_length = kKdfKeyLen; + aes_gcm_crypter->rekey_data = static_cast( + gpr_malloc(sizeof(gsec_aes_gcm_aead_rekey_data))); + memcpy(aes_gcm_crypter->rekey_data->nonce_mask, key + kKdfKeyLen, + kAesGcmNonceLength); + // Set kdf_counter to all-zero for initial key derivation. + memset(aes_gcm_crypter->rekey_data->kdf_counter, 0, kKdfCounterLen); + } else { + aes_gcm_crypter->key_length = key_length; + aes_gcm_crypter->rekey_data = nullptr; + } + aes_gcm_crypter->key = + static_cast(gpr_malloc(aes_gcm_crypter->key_length)); + memcpy(aes_gcm_crypter->key, key, aes_gcm_crypter->key_length); + aes_gcm_crypter->ctx = EVP_CIPHER_CTX_new(); + grpc_status_code status = + aes_gcm_new_evp_cipher_ctx(aes_gcm_crypter, error_details); + if (status != GRPC_STATUS_OK) { + gsec_aes_gcm_aead_crypter_destroy(&aes_gcm_crypter->crypter); + gpr_free(aes_gcm_crypter); + return status; + } + *crypter = &aes_gcm_crypter->crypter; + return GRPC_STATUS_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.cc b/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.cc new file mode 100644 index 000000000..6236591a9 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.cc @@ -0,0 +1,189 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" + +#include +#include + +#include + +static const char vtable_error_msg[] = + "crypter or crypter->vtable has not been initialized properly"; + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +grpc_status_code gsec_aead_crypter_encrypt( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const uint8_t* aad, size_t aad_length, const uint8_t* plaintext, + size_t plaintext_length, uint8_t* ciphertext_and_tag, + size_t ciphertext_and_tag_length, size_t* bytes_written, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->encrypt_iovec != nullptr) { + struct iovec aad_vec = {(void*)aad, aad_length}; + struct iovec plaintext_vec = {(void*)plaintext, plaintext_length}; + struct iovec ciphertext_vec = {ciphertext_and_tag, + ciphertext_and_tag_length}; + return crypter->vtable->encrypt_iovec( + crypter, nonce, nonce_length, &aad_vec, 1, &plaintext_vec, 1, + ciphertext_vec, bytes_written, error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_encrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* plaintext_vec, size_t plaintext_vec_length, + struct iovec ciphertext_vec, size_t* ciphertext_bytes_written, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->encrypt_iovec != nullptr) { + return crypter->vtable->encrypt_iovec( + crypter, nonce, nonce_length, aad_vec, aad_vec_length, plaintext_vec, + plaintext_vec_length, ciphertext_vec, ciphertext_bytes_written, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_decrypt( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const uint8_t* aad, size_t aad_length, const uint8_t* ciphertext_and_tag, + size_t ciphertext_and_tag_length, uint8_t* plaintext, + size_t plaintext_length, size_t* bytes_written, char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->decrypt_iovec != nullptr) { + struct iovec aad_vec = {(void*)aad, aad_length}; + struct iovec ciphertext_vec = {(void*)ciphertext_and_tag, + ciphertext_and_tag_length}; + struct iovec plaintext_vec = {plaintext, plaintext_length}; + return crypter->vtable->decrypt_iovec( + crypter, nonce, nonce_length, &aad_vec, 1, &ciphertext_vec, 1, + plaintext_vec, bytes_written, error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_decrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* ciphertext_vec, size_t ciphertext_vec_length, + struct iovec plaintext_vec, size_t* plaintext_bytes_written, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->encrypt_iovec != nullptr) { + return crypter->vtable->decrypt_iovec( + crypter, nonce, nonce_length, aad_vec, aad_vec_length, ciphertext_vec, + ciphertext_vec_length, plaintext_vec, plaintext_bytes_written, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_max_ciphertext_and_tag_length( + const gsec_aead_crypter* crypter, size_t plaintext_length, + size_t* max_ciphertext_and_tag_length_to_return, char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->max_ciphertext_and_tag_length != nullptr) { + return crypter->vtable->max_ciphertext_and_tag_length( + crypter, plaintext_length, max_ciphertext_and_tag_length_to_return, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_max_plaintext_length( + const gsec_aead_crypter* crypter, size_t ciphertext_and_tag_length, + size_t* max_plaintext_length_to_return, char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->max_plaintext_length != nullptr) { + return crypter->vtable->max_plaintext_length( + crypter, ciphertext_and_tag_length, max_plaintext_length_to_return, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_nonce_length( + const gsec_aead_crypter* crypter, size_t* nonce_length_to_return, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->nonce_length != nullptr) { + return crypter->vtable->nonce_length(crypter, nonce_length_to_return, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_key_length(const gsec_aead_crypter* crypter, + size_t* key_length_to_return, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->key_length != nullptr) { + return crypter->vtable->key_length(crypter, key_length_to_return, + error_details); + } + /* An error occurred */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +grpc_status_code gsec_aead_crypter_tag_length(const gsec_aead_crypter* crypter, + size_t* tag_length_to_return, + char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->tag_length != nullptr) { + return crypter->vtable->tag_length(crypter, tag_length_to_return, + error_details); + } + /* An error occurred. */ + maybe_copy_error_msg(vtable_error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +void gsec_aead_crypter_destroy(gsec_aead_crypter* crypter) { + if (crypter != nullptr) { + if (crypter->vtable != nullptr && crypter->vtable->destruct != nullptr) { + crypter->vtable->destruct(crypter); + } + gpr_free(crypter); + } +} diff --git a/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.h b/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.h new file mode 100644 index 000000000..4d65caa94 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/crypt/gsec.h @@ -0,0 +1,454 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_CRYPT_GSEC_H +#define GRPC_CORE_TSI_ALTS_CRYPT_GSEC_H + +#include + +#include +#include +#include + +#include + +struct iovec { + void* iov_base; + size_t iov_len; +}; + +/** + * A gsec interface for AEAD encryption schemes. The API is thread-compatible. + * Each implementation of this interface should specify supported values for + * key, nonce, and tag lengths. + */ + +/* Key, nonce, and tag length in bytes */ +const size_t kAesGcmNonceLength = 12; +const size_t kAesGcmTagLength = 16; +const size_t kAes128GcmKeyLength = 16; +const size_t kAes256GcmKeyLength = 32; + +// The first 32 bytes are used as a KDF key and the remaining 12 bytes are used +// to mask the nonce. +const size_t kAes128GcmRekeyKeyLength = 44; + +typedef struct gsec_aead_crypter gsec_aead_crypter; + +/** + * The gsec_aead_crypter is an API for different AEAD implementations such as + * AES_GCM. It encapsulates all AEAD-related operations in the format of + * V-table that stores pointers to functions implementing those operations. + * It also provides helper functions to wrap each of those function pointers. + * + * A typical usage of this object would be: + * + *------------------------------------------------------------------------------ + * // Declare a gsec_aead_crypter object, and create and assign an instance + * // of specific AEAD implementation e.g., AES_GCM to it. We assume both + * // key and nonce contain cryptographically secure random bytes, and the key + * // can be derived from an upper-layer application. + * gsec_aead_crypter* crypter; + * char* error_in_creation; + * // User can populate the message with any 100 bytes data. + * uint8_t* message = gpr_malloc(100); + * grpc_status_code creation_status = gsec_aes_gcm_aead_crypter_create(key, + * kAes128GcmKeyLength, + * kAesGcmNonceLength, + * kAesGcmTagLength, + * &crypter, + * false, + * 0 + * &error_in_creation); + * + * if (creation_status == GRPC_STATUS_OK) { + * // Allocate a correct amount of memory to hold a ciphertext. + * size_t clength = 0; + * gsec_aead_crypter_max_ciphertext_and_tag_length(crypter, 100, &clength, + * nullptr); + * uint8_t* ciphertext = gpr_malloc(clength); + * + * // Perform encryption + * size_t num_encrypted_bytes = 0; + * char* error_in_encryption = nullptr; + * grpc_status_code status = gsec_aead_crypter_encrypt(crypter, nonce, + * kAesGcmNonceLength, + * nullptr, 0, message, + * 100, ciphertext, + * clength, + * &num_encrypted_bytes, + * &error_in_encryption); + * if (status == GRPC_STATUS_OK) { + * // Allocate a correct amount of memory to hold a plaintext. + * size_t plength = 0; + * gsec_aead_crypter_max_plaintext_length(crypter, num_encrypted_bytes, + * &plength, nullptr); + * uint8_t* plaintext = gpr_malloc(plength); + * + * // Perform decryption. + * size_t num_decrypted_bytes = 0; + * char* error_in_decryption = nullptr; + * status = gsec_aead_crypter_decrypt(crypter, nonce, + * kAesGcmNonceLength, nullptr, 0, + * ciphertext, num_encrypted_bytes, + * plaintext, plength, + * &num_decrypted_bytes, + * &error_in_decryption); + * if (status != GRPC_STATUS_OK) { + * fprintf(stderr, "AEAD decrypt operation failed with error code:" + * "%d, message: %s\n", status, error_in_decryption); + * } + * ... + * gpr_free(plaintext); + * gpr_free(error_in_decryption); + * } else { + * fprintf(stderr, "AEAD encrypt operation failed with error code:" + * "%d, message: %s\n", status, error_in_encryption); + * } + * ... + * gpr_free(ciphertext); + * gpr_free(error_in_encryption); + * } else { + * fprintf(stderr, "Creation of AEAD crypter instance failed with error code:" + * "%d, message: %s\n", creation_status, error_in_creation); + * } + * + * // Destruct AEAD crypter instance. + * if (creation_status == GRPC_STATUS_OK) { + * gsec_aead_crypter_destroy(crypter); + * } + * gpr_free(error_in_creation); + * gpr_free(message); + * ----------------------------------------------------------------------------- + */ + +/* V-table for gsec AEAD operations */ +typedef struct gsec_aead_crypter_vtable { + grpc_status_code (*encrypt_iovec)( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* plaintext_vec, size_t plaintext_vec_length, + struct iovec ciphertext_vec, size_t* ciphertext_bytes_written, + char** error_details); + grpc_status_code (*decrypt_iovec)( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* ciphertext_vec, size_t ciphertext_vec_length, + struct iovec plaintext_vec, size_t* plaintext_bytes_written, + char** error_details); + grpc_status_code (*max_ciphertext_and_tag_length)( + const gsec_aead_crypter* crypter, size_t plaintext_length, + size_t* max_ciphertext_and_tag_length_to_return, char** error_details); + grpc_status_code (*max_plaintext_length)( + const gsec_aead_crypter* crypter, size_t ciphertext_and_tag_length, + size_t* max_plaintext_length_to_return, char** error_details); + grpc_status_code (*nonce_length)(const gsec_aead_crypter* crypter, + size_t* nonce_length_to_return, + char** error_details); + grpc_status_code (*key_length)(const gsec_aead_crypter* crypter, + size_t* key_length_to_return, + char** error_details); + grpc_status_code (*tag_length)(const gsec_aead_crypter* crypter, + size_t* tag_length_to_return, + char** error_details); + void (*destruct)(gsec_aead_crypter* crypter); +} gsec_aead_crypter_vtable; + +/* Main struct for gsec interface */ +struct gsec_aead_crypter { + const struct gsec_aead_crypter_vtable* vtable; +}; + +/** + * This method performs an AEAD encrypt operation. + * + * - crypter: AEAD crypter instance. + * - nonce: buffer containing a nonce with its size equal to nonce_length. + * - nonce_length: size of nonce buffer, and must be equal to the value returned + * from method gsec_aead_crypter_nonce_length. + * - aad: buffer containing data that needs to be authenticated but not + * encrypted with its size equal to aad_length. + * - aad_length: size of aad buffer, which should be zero if the buffer is + * nullptr. + * - plaintext: buffer containing data that needs to be both encrypted and + * authenticated with its size equal to plaintext_length. + * - plaintext_length: size of plaintext buffer, which should be zero if + * plaintext is nullptr. + * - ciphertext_and_tag: buffer that will contain ciphertext and tags the method + * produced. The buffer should not overlap the plaintext buffer, and pointers + * to those buffers should not be equal. Also if the ciphertext+tag buffer is + * nullptr, the plaintext_length should be zero. + * - ciphertext_and_tag_length: size of ciphertext+tag buffer, which should be + * at least as long as the one returned from method + * gsec_aead_crypter_max_ciphertext_and_tag_length. + * - bytes_written: the actual number of bytes written to the ciphertext+tag + * buffer. If bytes_written is nullptr, the plaintext_length should be zero. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of encryption, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + * + */ +grpc_status_code gsec_aead_crypter_encrypt( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const uint8_t* aad, size_t aad_length, const uint8_t* plaintext, + size_t plaintext_length, uint8_t* ciphertext_and_tag, + size_t ciphertext_and_tag_length, size_t* bytes_written, + char** error_details); + +/** + * This method performs an AEAD encrypt operation. + * + * - crypter: AEAD crypter instance. + * - nonce: buffer containing a nonce with its size equal to nonce_length. + * - nonce_length: size of nonce buffer, and must be equal to the value returned + * from method gsec_aead_crypter_nonce_length. + * - aad_vec: an iovec array containing data that needs to be authenticated but + * not encrypted. + * - aad_vec_length: the array length of aad_vec. + * - plaintext_vec: an iovec array containing data that needs to be both + * encrypted and authenticated. + * - plaintext_vec_length: the array length of plaintext_vec. + * - ciphertext_vec: an iovec containing a ciphertext buffer. The buffer should + * not overlap the plaintext buffer. + * - ciphertext_bytes_written: the actual number of bytes written to + * ciphertext_vec. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of encryption, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + * + */ +grpc_status_code gsec_aead_crypter_encrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* plaintext_vec, size_t plaintext_vec_length, + struct iovec ciphertext_vec, size_t* ciphertext_bytes_written, + char** error_details); + +/** + * This method performs an AEAD decrypt operation. + * + * - crypter: AEAD crypter instance. + * - nonce: buffer containing a nonce with its size equal to nonce_length. + * - nonce_length: size of nonce buffer, and must be equal to the value returned + * from method gsec_aead_crypter_nonce_length. + * - aad: buffer containing data that needs to be authenticated only. + * - aad_length: size of aad buffer, which should be zero if the buffer is + * nullptr. + * - ciphertext_and_tag: buffer containing ciphertext and tag. + * - ciphertext_and_tag_length: length of ciphertext and tag. It should be zero + * if any of plaintext, ciphertext_and_tag, or bytes_written is nullptr. Also, + * ciphertext_and_tag_length should be at least as large as the tag length set + * at AEAD crypter instance construction time. + * - plaintext: buffer containing decrypted and authenticated data the method + * produced. The buffer should not overlap with the ciphertext+tag buffer, and + * pointers to those buffers should not be equal. + * - plaintext_length: size of plaintext buffer, which should be at least as + * long as the one returned from gsec_aead_crypter_max_plaintext_length + * method. + * - bytes_written: the actual number of bytes written to the plaintext + * buffer. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of decryption, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_decrypt( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const uint8_t* aad, size_t aad_length, const uint8_t* ciphertext_and_tag, + size_t ciphertext_and_tag_length, uint8_t* plaintext, + size_t plaintext_length, size_t* bytes_written, char** error_details); + +/** + * This method performs an AEAD decrypt operation. + * + * - crypter: AEAD crypter instance. + * - nonce: buffer containing a nonce with its size equal to nonce_length. + * - nonce_length: size of nonce buffer, and must be equal to the value returned + * from method gsec_aead_crypter_nonce_length. + * - aad_vec: an iovec array containing data that needs to be authenticated but + * not encrypted. + * - aad_vec_length: the array length of aad_vec. + * - ciphertext_vec: an iovec array containing the ciphertext and tag. + * - ciphertext_vec_length: the array length of ciphertext_vec. + * - plaintext_vec: an iovec containing a plaintext buffer. The buffer should + * not overlap the ciphertext buffer. + * - plaintext_bytes_written: the actual number of bytes written to + * plaintext_vec. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of decryption, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_decrypt_iovec( + gsec_aead_crypter* crypter, const uint8_t* nonce, size_t nonce_length, + const struct iovec* aad_vec, size_t aad_vec_length, + const struct iovec* ciphertext_vec, size_t ciphertext_vec_length, + struct iovec plaintext_vec, size_t* plaintext_bytes_written, + char** error_details); + +/** + * This method computes the size of ciphertext+tag buffer that must be passed to + * gsec_aead_crypter_encrypt function to ensure correct encryption of a + * plaintext. The actual size of ciphertext+tag written to the buffer could be + * smaller. + * + * - crypter: AEAD crypter instance. + * - plaintext_length: length of plaintext. + * - max_ciphertext_and_tag_length_to_return: the size of ciphertext+tag buffer + * the method returns. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of execution, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_max_ciphertext_and_tag_length( + const gsec_aead_crypter* crypter, size_t plaintext_length, + size_t* max_ciphertext_and_tag_length_to_return, char** error_details); + +/** + * This method computes the size of plaintext buffer that must be passed to + * gsec_aead_crypter_decrypt function to ensure correct decryption of a + * ciphertext. The actual size of plaintext written to the buffer could be + * smaller. + * + * - crypter: AEAD crypter instance. + * - ciphertext_and_tag_length: length of ciphertext and tag. + * - max_plaintext_length_to_return: the size of plaintext buffer the method + * returns. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of execution, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_max_plaintext_length( + const gsec_aead_crypter* crypter, size_t ciphertext_and_tag_length, + size_t* max_plaintext_length_to_return, char** error_details); + +/** + * This method returns a valid size of nonce array used at the construction of + * AEAD crypter instance. It is also the size that should be passed to encrypt + * and decrypt methods executed on the instance. + * + * - crypter: AEAD crypter instance. + * - nonce_length_to_return: the length of nonce array the method returns. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of execution, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_nonce_length( + const gsec_aead_crypter* crypter, size_t* nonce_length_to_return, + char** error_details); + +/** + * This method returns a valid size of key array used at the construction of + * AEAD crypter instance. It is also the size that should be passed to encrypt + * and decrypt methods executed on the instance. + * + * - crypter: AEAD crypter instance. + * - key_length_to_return: the length of key array the method returns. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of execution, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_key_length(const gsec_aead_crypter* crypter, + size_t* key_length_to_return, + char** error_details); +/** + * This method returns a valid size of tag array used at the construction of + * AEAD crypter instance. It is also the size that should be passed to encrypt + * and decrypt methods executed on the instance. + * + * - crypter: AEAD crypter instance. + * - tag_length_to_return: the length of tag array the method returns. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On the success of execution, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code gsec_aead_crypter_tag_length(const gsec_aead_crypter* crypter, + size_t* tag_length_to_return, + char** error_details); + +/** + * This method destroys an AEAD crypter instance by de-allocating all of its + * occupied memory. + * + * - crypter: AEAD crypter instance that needs to be destroyed. + */ +void gsec_aead_crypter_destroy(gsec_aead_crypter* crypter); + +/** + * This method creates an AEAD crypter instance of AES-GCM encryption scheme + * which supports 16 and 32 bytes long keys, 12 and 16 bytes long nonces, and + * 16 bytes long tags. It should be noted that once the lengths of key, nonce, + * and tag are determined at construction time, they cannot be modified later. + * + * - key: buffer containing a key which is binded with AEAD crypter instance. + * - key_length: length of a key in bytes, which should be 44 if rekeying is + * enabled and 16 or 32 otherwise. + * - nonce_length: length of a nonce in bytes, which should be either 12 or 16. + * - tag_length: length of a tag in bytes, which should be always 16. + * - rekey: enable nonce-based rekeying and nonce-masking. + * - crypter: address of AES_GCM crypter instance returned from the method. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On success of instance creation, it stores the address of instance at + * crypter. Otherwise, it returns an error status code together with its details + * specified in error_details. + */ +grpc_status_code gsec_aes_gcm_aead_crypter_create(const uint8_t* key, + size_t key_length, + size_t nonce_length, + size_t tag_length, bool rekey, + gsec_aead_crypter** crypter, + char** error_details); + +#endif /* GRPC_CORE_TSI_ALTS_CRYPT_GSEC_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.cc new file mode 100644 index 000000000..de163e3e0 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.cc @@ -0,0 +1,118 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/frame_protector/alts_counter.h" + +#include + +#include + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +grpc_status_code alts_counter_create(bool is_client, size_t counter_size, + size_t overflow_size, + alts_counter** crypter_counter, + char** error_details) { + /* Perform input sanity check. */ + if (counter_size == 0) { + const char error_msg[] = "counter_size is invalid."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (overflow_size == 0 || overflow_size >= counter_size) { + const char error_msg[] = "overflow_size is invalid."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (crypter_counter == nullptr) { + const char error_msg[] = "crypter_counter is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + *crypter_counter = + static_cast(gpr_malloc(sizeof(**crypter_counter))); + (*crypter_counter)->size = counter_size; + (*crypter_counter)->overflow_size = overflow_size; + (*crypter_counter)->counter = + static_cast(gpr_zalloc(counter_size)); + if (is_client) { + ((*crypter_counter)->counter)[counter_size - 1] = 0x80; + } + return GRPC_STATUS_OK; +} + +grpc_status_code alts_counter_increment(alts_counter* crypter_counter, + bool* is_overflow, + char** error_details) { + /* Perform input sanity check. */ + if (crypter_counter == nullptr) { + const char error_msg[] = "crypter_counter is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (is_overflow == nullptr) { + const char error_msg[] = "is_overflow is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + /* Increment the internal counter. */ + size_t i = 0; + for (; i < crypter_counter->overflow_size; i++) { + (crypter_counter->counter)[i]++; + if ((crypter_counter->counter)[i] != 0x00) { + break; + } + } + /** + * If the lower overflow_size bytes are all zero, the counter has overflowed. + */ + if (i == crypter_counter->overflow_size) { + *is_overflow = true; + return GRPC_STATUS_FAILED_PRECONDITION; + } + *is_overflow = false; + return GRPC_STATUS_OK; +} + +size_t alts_counter_get_size(alts_counter* crypter_counter) { + if (crypter_counter == nullptr) { + return 0; + } + return crypter_counter->size; +} + +unsigned char* alts_counter_get_counter(alts_counter* crypter_counter) { + if (crypter_counter == nullptr) { + return nullptr; + } + return crypter_counter->counter; +} + +void alts_counter_destroy(alts_counter* crypter_counter) { + if (crypter_counter != nullptr) { + gpr_free(crypter_counter->counter); + gpr_free(crypter_counter); + } +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.h b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.h new file mode 100644 index 000000000..d705638fa --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_counter.h @@ -0,0 +1,98 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_COUNTER_H +#define GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_COUNTER_H + +#include + +#include +#include + +#include + +/* Main struct for a crypter counter managed within seal/unseal operations. */ +typedef struct alts_counter { + size_t size; + size_t overflow_size; + unsigned char* counter; +} alts_counter; + +/** + * This method creates and initializes an alts_counter instance. + * + * - is_client: a flag indicating if the alts_counter instance will be used + * at client (is_client = true) or server (is_client = false) side. + * - counter_size: size of buffer holding the counter value. + * - overflow_size: overflow size in bytes. The counter instance can be used + * to produce at most 2^(overflow_size*8) frames. + * - crypter_counter: an alts_counter instance to be returned from the method. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details and + * otherwise, the parameter should be freed with gpr_free. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code alts_counter_create(bool is_client, size_t counter_size, + size_t overflow_size, + alts_counter** crypter_counter, + char** error_details); + +/** + * This method increments the internal counter. + * + * - crypter_counter: an alts_counter instance. + * - is_overflow: after incrementing the internal counter, if an overflow + * occurs, is_overflow is set to true, and no further calls to + * alts_counter_increment() should be made. Otherwise, is_overflow is set to + * false. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details and + * otherwise, the parameter should be freed with gpr_free. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code alts_counter_increment(alts_counter* crypter_counter, + bool* is_overflow, + char** error_details); + +/** + * This method returns the size of counter buffer. + * + * - crypter_counter: an alts_counter instance. + */ +size_t alts_counter_get_size(alts_counter* crypter_counter); + +/** + * This method returns the counter buffer. + * + * - crypter_counter: an alts_counter instance. + */ +unsigned char* alts_counter_get_counter(alts_counter* crypter_counter); + +/** + * This method de-allocates all memory allocated to an alts_coutner instance. + * - crypter_counter: an alts_counter instance. + */ +void alts_counter_destroy(alts_counter* crypter_counter); + +#endif /* GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_COUNTER_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.cc new file mode 100644 index 000000000..56f051218 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.cc @@ -0,0 +1,66 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/frame_protector/alts_crypter.h" + +#include + +#include + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +grpc_status_code alts_crypter_process_in_place( + alts_crypter* crypter, unsigned char* data, size_t data_allocated_size, + size_t data_size, size_t* output_size, char** error_details) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->process_in_place != nullptr) { + return crypter->vtable->process_in_place(crypter, data, data_allocated_size, + data_size, output_size, + error_details); + } + /* An error occurred. */ + const char error_msg[] = + "crypter or crypter->vtable has not been initialized properly."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; +} + +size_t alts_crypter_num_overhead_bytes(const alts_crypter* crypter) { + if (crypter != nullptr && crypter->vtable != nullptr && + crypter->vtable->num_overhead_bytes != nullptr) { + return crypter->vtable->num_overhead_bytes(crypter); + } + /* An error occurred. */ + return 0; +} + +void alts_crypter_destroy(alts_crypter* crypter) { + if (crypter != nullptr) { + if (crypter->vtable != nullptr && crypter->vtable->destruct != nullptr) { + crypter->vtable->destruct(crypter); + } + gpr_free(crypter); + } +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.h b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.h new file mode 100644 index 000000000..3140778f4 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_crypter.h @@ -0,0 +1,255 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_CRYPTER_H +#define GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_CRYPTER_H + +#include + +#include +#include + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" + +/** + * An alts_crypter interface for an ALTS record protocol providing + * seal/unseal functionality. The interface is thread-compatible. + */ + +typedef struct alts_crypter alts_crypter; + +/** + * A typical usage of the interface would be + *------------------------------------------------------------------------------ + * // Perform a seal operation. We assume the gsec_aead_crypter instance - + * // client_aead_crypter is created beforehand with a 16-byte key and 12-byte + * // nonce length. + * + * alts_crypter* client = nullptr; + * char* client_error_in_creation = nullptr; + * unsigned char* data = nullptr; + * grpc_status_code client_status = + * alts_seal_crypter_create(client_aead_crypter, 1, 5, &client, + * &client_error_in_creation); + * if (client_status == GRPC_STATUS_OK) { + * size_t data_size = 100; + * size_t num_overhead_bytes = alts_crypter_num_overhead_bytes(client); + * size_t data_allocated_size = data_size + num_overhead_bytes; + * data = gpr_malloc(data_allocated_size); + * char* client_error_in_seal = nullptr; + * // Client performs a seal operation. + * client_status = alts_crypter_process_in_place(client, data, + * data_allocated_size, + * &data_size, + * &client_error_in_seal); + * if (client_status != GRPC_STATUS_OK) { + * fprintf(stderr, "seal operation failed with error code:" + * "%d, message: %s\n", client_status, + * client_error_in_seal); + * } + * gpr_free(client_error_in_seal); + * } else { + * fprintf(stderr, "alts_crypter instance creation failed with error" + * "code: %d, message: %s\n", client_status, + * client_error_in_creation); + * } + * + * ... + * + * gpr_free(client_error_in_creation); + * alts_crypter_destroy(client); + * + * ... + * + * // Perform an unseal operation. We assume the gsec_aead_crypter instance - + * // server_aead_crypter is created beforehand with a 16-byte key and 12-byte + * // nonce length. The key used in the creation of gsec_aead_crypter instances + * // at server and client sides should be identical. + * + * alts_crypter* server = nullptr; + * char* server_error_in_creation = nullptr; + * grpc_status_code server_status = + * alts_unseal_crypter_create(server_aead_crypter, 0, 5, &server, + * &server_error_in_creation); + * if (server_status == GRPC_STATUS_OK) { + * size_t num_overhead_bytes = alts_crypter_num_overhead_bytes(server); + * size_t data_size = 100 + num_overhead_bytes; + * size_t data_allocated_size = data_size; + * char* server_error_in_unseal = nullptr; + * // Server performs an unseal operation. + * server_status = alts_crypter_process_in_place(server, data, + * data_allocated_size, + * &data_size, + * &server_error_in_unseal); + * if (server_status != GRPC_STATUS_OK) { + * fprintf(stderr, "unseal operation failed with error code:" + * "%d, message: %s\n", server_status, + * server_error_in_unseal); + * } + * gpr_free(server_error_in_unseal); + * } else { + * fprintf(stderr, "alts_crypter instance creation failed with error" + * "code: %d, message: %s\n", server_status, + * server_error_in_creation); + * } + * + * ... + * + * gpr_free(data); + * gpr_free(server_error_in_creation); + * alts_crypter_destroy(server); + * + * ... + *------------------------------------------------------------------------------ + */ + +/* V-table for alts_crypter operations */ +typedef struct alts_crypter_vtable { + size_t (*num_overhead_bytes)(const alts_crypter* crypter); + grpc_status_code (*process_in_place)(alts_crypter* crypter, + unsigned char* data, + size_t data_allocated_size, + size_t data_size, size_t* output_size, + char** error_details); + void (*destruct)(alts_crypter* crypter); +} alts_crypter_vtable; + +/* Main struct for alts_crypter interface */ +struct alts_crypter { + const alts_crypter_vtable* vtable; +}; + +/** + * This method gets the number of overhead bytes needed for sealing data that + * is the difference in size between the protected and raw data. The counter + * value used in a seal or unseal operation is locally maintained (not sent or + * received from the other peer) and therefore, will not be counted as part of + * overhead bytes. + * + * - crypter: an alts_crypter instance. + * + * On success, the method returns the number of overhead bytes. Otherwise, it + * returns zero. + * + */ +size_t alts_crypter_num_overhead_bytes(const alts_crypter* crypter); + +/** + * This method performs either a seal or an unseal operation depending on the + * alts_crypter instance - crypter passed to the method. If the crypter is + * an instance implementing a seal operation, the method will perform a seal + * operation. That is, it seals raw data and stores the result in-place, and the + * memory allocated for data must be at least data_length + + * alts_crypter_num_overhead_bytes(). If the crypter is an instance + * implementing an unseal operation, the method will perform an unseal + * operation. That is, it unseals protected data and stores the result in-place. + * The size of unsealed data will be data_length - + * alts_crypter_num_overhead_bytes(). Integrity tag will be verified during + * the unseal operation, and if verification fails, the data will be wiped. + * The counters used in both seal and unseal operations are managed internally. + * + * - crypter: an alts_crypter instance. + * - data: if the method performs a seal operation, the data represents raw data + * that needs to be sealed. It also plays the role of buffer to hold the + * protected data as a result of seal. If the method performs an unseal + * operation, the data represents protected data that needs to be unsealed. It + * also plays the role of buffer to hold raw data as a result of unseal. + * - data_allocated_size: the size of data buffer. The parameter is used to + * check whether the result of either seal or unseal can be safely written to + * the data buffer. + * - data_size: if the method performs a seal operation, data_size + * represents the size of raw data that needs to be sealed, and if the method + * performs an unseal operation, data_size represents the size of protected + * data that needs to be unsealed. + * - output_size: size of data written to the data buffer after a seal or an + * unseal operation. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is legal to pass nullptr into error_details and + * otherwise, the parameter should be freed with gpr_free. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code alts_crypter_process_in_place( + alts_crypter* crypter, unsigned char* data, size_t data_allocated_size, + size_t data_size, size_t* output_size, char** error_details); + +/** + * This method creates an alts_crypter instance to be used to perform a seal + * operation, given a gsec_aead_crypter instance and a flag indicating if the + * created instance will be used at the client or server side. It takes + * ownership of gsec_aead_crypter instance. + * + * - gc: a gsec_aead_crypter instance used to perform AEAD encryption. + * - is_client: a flag indicating if the alts_crypter instance will be + * used at the client (is_client = true) or server (is_client = + * false) side. + * - overflow_size: overflow size of counter in bytes. + * - crypter: an alts_crypter instance to be returned from the method. + * - error_details: a buffer containing an error message if the method does + * not function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On success of creation, the method returns GRPC_STATUS_OK. + * Otherwise, it returns an error status code along with its details specified + * in error_details (if error_details is not nullptr). + */ +grpc_status_code alts_seal_crypter_create(gsec_aead_crypter* gc, bool is_client, + size_t overflow_size, + alts_crypter** crypter, + char** error_details); + +/** + * This method creates an alts_crypter instance used to perform an unseal + * operation, given a gsec_aead_crypter instance and a flag indicating if the + * created instance will be used at the client or server side. It takes + * ownership of gsec_aead_crypter instance. + * + * - gc: a gsec_aead_crypter instance used to perform AEAD decryption. + * - is_client: a flag indicating if the alts_crypter instance will be + * used at the client (is_client = true) or server (is_client = + * false) side. + * - overflow_size: overflow size of counter in bytes. + * - crypter: an alts_crypter instance to be returned from the method. + * - error_details: a buffer containing an error message if the method does + * not function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On success of creation, the method returns GRPC_STATUS_OK. + * Otherwise, it returns an error status code along with its details specified + * in error_details (if error_details is not nullptr). + */ +grpc_status_code alts_unseal_crypter_create(gsec_aead_crypter* gc, + bool is_client, + size_t overflow_size, + alts_crypter** crypter, + char** error_details); + +/** + * This method destroys an alts_crypter instance by de-allocating all of its + * occupied memory. A gsec_aead_crypter instance passed in at alts_crypter + * instance creation time will be destroyed in this method. + * + * - crypter: an alts_crypter instance. + */ +void alts_crypter_destroy(alts_crypter* crypter); + +#endif /* GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_CRYPTER_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.cc new file mode 100644 index 000000000..bfa0b7a72 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.cc @@ -0,0 +1,407 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/frame_protector/alts_frame_protector.h" + +#include +#include + +#include +#include + +#include "src/core/lib/gpr/useful.h" +#include "src/core/tsi/alts/crypt/gsec.h" +#include "src/core/tsi/alts/frame_protector/alts_crypter.h" +#include "src/core/tsi/alts/frame_protector/frame_handler.h" +#include "src/core/tsi/transport_security.h" + +constexpr size_t kMinFrameLength = 1024; +constexpr size_t kDefaultFrameLength = 16 * 1024; +constexpr size_t kMaxFrameLength = 1024 * 1024; + +// Limit k on number of frames such that at most 2^(8 * k) frames can be sent. +constexpr size_t kAltsRecordProtocolRekeyFrameLimit = 8; +constexpr size_t kAltsRecordProtocolFrameLimit = 5; + +/* Main struct for alts_frame_protector. */ +struct alts_frame_protector { + tsi_frame_protector base; + alts_crypter* seal_crypter; + alts_crypter* unseal_crypter; + alts_frame_writer* writer; + alts_frame_reader* reader; + unsigned char* in_place_protect_buffer; + unsigned char* in_place_unprotect_buffer; + size_t in_place_protect_bytes_buffered; + size_t in_place_unprotect_bytes_processed; + size_t max_protected_frame_size; + size_t max_unprotected_frame_size; + size_t overhead_length; + size_t counter_overflow; +}; + +static tsi_result seal(alts_frame_protector* impl) { + char* error_details = nullptr; + size_t output_size = 0; + grpc_status_code status = alts_crypter_process_in_place( + impl->seal_crypter, impl->in_place_protect_buffer, + impl->max_protected_frame_size, impl->in_place_protect_bytes_buffered, + &output_size, &error_details); + impl->in_place_protect_bytes_buffered = output_size; + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "%s", error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + return TSI_OK; +} + +static size_t max_encrypted_payload_bytes(alts_frame_protector* impl) { + return impl->max_protected_frame_size - kFrameHeaderSize; +} + +static tsi_result alts_protect_flush(tsi_frame_protector* self, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size, + size_t* still_pending_size) { + if (self == nullptr || protected_output_frames == nullptr || + protected_output_frames_size == nullptr || + still_pending_size == nullptr) { + gpr_log(GPR_ERROR, "Invalid nullptr arguments to alts_protect_flush()."); + return TSI_INVALID_ARGUMENT; + } + alts_frame_protector* impl = reinterpret_cast(self); + /** + * If there's nothing to flush (i.e., in_place_protect_buffer is empty), + * we're done. + */ + if (impl->in_place_protect_bytes_buffered == 0) { + *protected_output_frames_size = 0; + *still_pending_size = 0; + return TSI_OK; + } + /** + * If a new frame can start being processed, we encrypt the payload and reset + * the frame writer to point to in_place_protect_buffer that holds the newly + * sealed frame. + */ + if (alts_is_frame_writer_done(impl->writer)) { + tsi_result result = seal(impl); + if (result != TSI_OK) { + return result; + } + if (!alts_reset_frame_writer(impl->writer, impl->in_place_protect_buffer, + impl->in_place_protect_bytes_buffered)) { + gpr_log(GPR_ERROR, "Couldn't reset frame writer."); + return TSI_INTERNAL_ERROR; + } + } + /** + * Write the sealed frame as much as possible to protected_output_frames. It's + * possible a frame will not be written out completely by a single flush + * (i.e., still_pending_size != 0), in which case the flush should be called + * iteratively until a complete frame has been written out. + */ + size_t written_frame_bytes = *protected_output_frames_size; + if (!alts_write_frame_bytes(impl->writer, protected_output_frames, + &written_frame_bytes)) { + gpr_log(GPR_ERROR, "Couldn't write frame bytes."); + return TSI_INTERNAL_ERROR; + } + *protected_output_frames_size = written_frame_bytes; + *still_pending_size = alts_get_num_writer_bytes_remaining(impl->writer); + /** + * If the current frame has been finished processing (i.e., sealed and written + * out completely), we empty in_place_protect_buffer. + */ + if (alts_is_frame_writer_done(impl->writer)) { + impl->in_place_protect_bytes_buffered = 0; + } + return TSI_OK; +} + +static tsi_result alts_protect(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size) { + if (self == nullptr || unprotected_bytes == nullptr || + unprotected_bytes_size == nullptr || protected_output_frames == nullptr || + protected_output_frames_size == nullptr) { + gpr_log(GPR_ERROR, "Invalid nullptr arguments to alts_protect()."); + return TSI_INVALID_ARGUMENT; + } + alts_frame_protector* impl = reinterpret_cast(self); + + /** + * If more payload can be buffered, we buffer it as much as possible to + * in_place_protect_buffer. + */ + if (impl->in_place_protect_bytes_buffered + impl->overhead_length < + max_encrypted_payload_bytes(impl)) { + size_t bytes_to_buffer = GPR_MIN(*unprotected_bytes_size, + max_encrypted_payload_bytes(impl) - + impl->in_place_protect_bytes_buffered - + impl->overhead_length); + *unprotected_bytes_size = bytes_to_buffer; + if (bytes_to_buffer > 0) { + memcpy( + impl->in_place_protect_buffer + impl->in_place_protect_bytes_buffered, + unprotected_bytes, bytes_to_buffer); + impl->in_place_protect_bytes_buffered += bytes_to_buffer; + } + } else { + *unprotected_bytes_size = 0; + } + /** + * If a full frame has been buffered, we output it. If the first condition + * holds, then there exists an unencrypted full frame. If the second + * condition holds, then there exists a full frame that has already been + * encrypted. + */ + if (max_encrypted_payload_bytes(impl) == + impl->in_place_protect_bytes_buffered + impl->overhead_length || + max_encrypted_payload_bytes(impl) == + impl->in_place_protect_bytes_buffered) { + size_t still_pending_size = 0; + return alts_protect_flush(self, protected_output_frames, + protected_output_frames_size, + &still_pending_size); + } else { + *protected_output_frames_size = 0; + return TSI_OK; + } +} + +static tsi_result unseal(alts_frame_protector* impl) { + char* error_details = nullptr; + size_t output_size = 0; + grpc_status_code status = alts_crypter_process_in_place( + impl->unseal_crypter, impl->in_place_unprotect_buffer, + impl->max_unprotected_frame_size, + alts_get_output_bytes_read(impl->reader), &output_size, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "%s", error_details); + gpr_free(error_details); + return TSI_DATA_CORRUPTED; + } + return TSI_OK; +} + +static void ensure_buffer_size(alts_frame_protector* impl) { + if (!alts_has_read_frame_length(impl->reader)) { + return; + } + size_t buffer_space_remaining = impl->max_unprotected_frame_size - + alts_get_output_bytes_read(impl->reader); + /** + * Check if we need to resize in_place_unprotect_buffer in order to hold + * remaining bytes of a full frame. + */ + if (buffer_space_remaining < alts_get_reader_bytes_remaining(impl->reader)) { + size_t buffer_len = alts_get_output_bytes_read(impl->reader) + + alts_get_reader_bytes_remaining(impl->reader); + unsigned char* buffer = static_cast(gpr_malloc(buffer_len)); + memcpy(buffer, impl->in_place_unprotect_buffer, + alts_get_output_bytes_read(impl->reader)); + impl->max_unprotected_frame_size = buffer_len; + gpr_free(impl->in_place_unprotect_buffer); + impl->in_place_unprotect_buffer = buffer; + alts_reset_reader_output_buffer( + impl->reader, buffer + alts_get_output_bytes_read(impl->reader)); + } +} + +static tsi_result alts_unprotect(tsi_frame_protector* self, + const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, + unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size) { + if (self == nullptr || protected_frames_bytes == nullptr || + protected_frames_bytes_size == nullptr || unprotected_bytes == nullptr || + unprotected_bytes_size == nullptr) { + gpr_log(GPR_ERROR, "Invalid nullptr arguments to alts_unprotect()."); + return TSI_INVALID_ARGUMENT; + } + alts_frame_protector* impl = reinterpret_cast(self); + /** + * If a new frame can start being processed, we reset the frame reader to + * point to in_place_unprotect_buffer that will be used to hold deframed + * result. + */ + if (alts_is_frame_reader_done(impl->reader) && + ((alts_get_output_buffer(impl->reader) == nullptr) || + (alts_get_output_bytes_read(impl->reader) == + impl->in_place_unprotect_bytes_processed + impl->overhead_length))) { + if (!alts_reset_frame_reader(impl->reader, + impl->in_place_unprotect_buffer)) { + gpr_log(GPR_ERROR, "Couldn't reset frame reader."); + return TSI_INTERNAL_ERROR; + } + impl->in_place_unprotect_bytes_processed = 0; + } + /** + * If a full frame has not yet been read, we read more bytes from + * protected_frames_bytes until a full frame has been read. We also need to + * make sure in_place_unprotect_buffer is large enough to hold a complete + * frame. + */ + if (!alts_is_frame_reader_done(impl->reader)) { + ensure_buffer_size(impl); + *protected_frames_bytes_size = + GPR_MIN(impl->max_unprotected_frame_size - + alts_get_output_bytes_read(impl->reader), + *protected_frames_bytes_size); + size_t read_frames_bytes_size = *protected_frames_bytes_size; + if (!alts_read_frame_bytes(impl->reader, protected_frames_bytes, + &read_frames_bytes_size)) { + gpr_log(GPR_ERROR, "Failed to process frame."); + return TSI_INTERNAL_ERROR; + } + *protected_frames_bytes_size = read_frames_bytes_size; + } else { + *protected_frames_bytes_size = 0; + } + /** + * If a full frame has been read, we unseal it, and write out the + * deframed result to unprotected_bytes. + */ + if (alts_is_frame_reader_done(impl->reader)) { + if (impl->in_place_unprotect_bytes_processed == 0) { + tsi_result result = unseal(impl); + if (result != TSI_OK) { + return result; + } + } + size_t bytes_to_write = GPR_MIN( + *unprotected_bytes_size, alts_get_output_bytes_read(impl->reader) - + impl->in_place_unprotect_bytes_processed - + impl->overhead_length); + if (bytes_to_write > 0) { + memcpy(unprotected_bytes, + impl->in_place_unprotect_buffer + + impl->in_place_unprotect_bytes_processed, + bytes_to_write); + } + *unprotected_bytes_size = bytes_to_write; + impl->in_place_unprotect_bytes_processed += bytes_to_write; + return TSI_OK; + } else { + *unprotected_bytes_size = 0; + return TSI_OK; + } +} + +static void alts_destroy(tsi_frame_protector* self) { + alts_frame_protector* impl = reinterpret_cast(self); + if (impl != nullptr) { + alts_crypter_destroy(impl->seal_crypter); + alts_crypter_destroy(impl->unseal_crypter); + gpr_free(impl->in_place_protect_buffer); + gpr_free(impl->in_place_unprotect_buffer); + alts_destroy_frame_writer(impl->writer); + alts_destroy_frame_reader(impl->reader); + gpr_free(impl); + } +} + +static const tsi_frame_protector_vtable alts_frame_protector_vtable = { + alts_protect, alts_protect_flush, alts_unprotect, alts_destroy}; + +static grpc_status_code create_alts_crypters(const uint8_t* key, + size_t key_size, bool is_client, + bool is_rekey, + alts_frame_protector* impl, + char** error_details) { + grpc_status_code status; + gsec_aead_crypter* aead_crypter_seal = nullptr; + gsec_aead_crypter* aead_crypter_unseal = nullptr; + status = gsec_aes_gcm_aead_crypter_create(key, key_size, kAesGcmNonceLength, + kAesGcmTagLength, is_rekey, + &aead_crypter_seal, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + status = gsec_aes_gcm_aead_crypter_create( + key, key_size, kAesGcmNonceLength, kAesGcmTagLength, is_rekey, + &aead_crypter_unseal, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + size_t overflow_size = is_rekey ? kAltsRecordProtocolRekeyFrameLimit + : kAltsRecordProtocolFrameLimit; + status = alts_seal_crypter_create(aead_crypter_seal, is_client, overflow_size, + &impl->seal_crypter, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + status = + alts_unseal_crypter_create(aead_crypter_unseal, is_client, overflow_size, + &impl->unseal_crypter, error_details); + return status; +} + +tsi_result alts_create_frame_protector(const uint8_t* key, size_t key_size, + bool is_client, bool is_rekey, + size_t* max_protected_frame_size, + tsi_frame_protector** self) { + if (key == nullptr || self == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_create_frame_protector()."); + return TSI_INTERNAL_ERROR; + } + char* error_details = nullptr; + alts_frame_protector* impl = + static_cast(gpr_zalloc(sizeof(*impl))); + grpc_status_code status = create_alts_crypters( + key, key_size, is_client, is_rekey, impl, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to create ALTS crypters, %s.", error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + /** + * Set maximum frame size to be used by a frame protector. If it is nullptr, a + * default frame size will be used. Otherwise, the provided frame size will be + * adjusted (if not falling into a valid frame range) and used. + */ + size_t max_protected_frame_size_to_set = kDefaultFrameLength; + if (max_protected_frame_size != nullptr) { + *max_protected_frame_size = + GPR_MIN(*max_protected_frame_size, kMaxFrameLength); + *max_protected_frame_size = + GPR_MAX(*max_protected_frame_size, kMinFrameLength); + max_protected_frame_size_to_set = *max_protected_frame_size; + } + impl->max_protected_frame_size = max_protected_frame_size_to_set; + impl->max_unprotected_frame_size = max_protected_frame_size_to_set; + impl->in_place_protect_bytes_buffered = 0; + impl->in_place_unprotect_bytes_processed = 0; + impl->in_place_protect_buffer = static_cast( + gpr_malloc(sizeof(unsigned char) * max_protected_frame_size_to_set)); + impl->in_place_unprotect_buffer = static_cast( + gpr_malloc(sizeof(unsigned char) * max_protected_frame_size_to_set)); + impl->overhead_length = alts_crypter_num_overhead_bytes(impl->seal_crypter); + impl->writer = alts_create_frame_writer(); + impl->reader = alts_create_frame_reader(); + impl->base.vtable = &alts_frame_protector_vtable; + *self = &impl->base; + return TSI_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.h b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.h new file mode 100644 index 000000000..321bffaed --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_frame_protector.h @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_FRAME_PROTECTOR_H +#define GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_FRAME_PROTECTOR_H + +#include + +#include + +#include "src/core/tsi/transport_security_interface.h" + +typedef struct alts_frame_protector alts_frame_protector; + +/** + * TODO: Add a parameter to the interface to support the use of + * different record protocols within a frame protector. + * + * This method creates a frame protector. + * + * - key: a symmetric key used to seal/unseal frames. + * - key_size: the size of symmetric key. + * - is_client: a flag indicating if the frame protector will be used at client + * (is_client = true) or server (is_client = false) side. + * - is_rekey: a flag indicating if the frame protector will use an AEAD with + * rekeying. + * - max_protected_frame_size: an in/out parameter indicating max frame size + * to be used by the frame protector. If it is nullptr, the default frame + * size will be used. Otherwise, the provided frame size will be adjusted (if + * not falling into a valid frame range) and used. + * - self: a pointer to the frame protector returned from the method. + * + * This method returns TSI_OK on success and TSI_INTERNAL_ERROR otherwise. + */ +tsi_result alts_create_frame_protector(const uint8_t* key, size_t key_size, + bool is_client, bool is_rekey, + size_t* max_protected_frame_size, + tsi_frame_protector** self); + +#endif /* GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_FRAME_PROTECTOR_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc new file mode 100644 index 000000000..0574ed501 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc @@ -0,0 +1,114 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h" + +#include + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +grpc_status_code input_sanity_check( + const alts_record_protocol_crypter* rp_crypter, const unsigned char* data, + size_t* output_size, char** error_details) { + if (rp_crypter == nullptr) { + maybe_copy_error_msg("alts_crypter instance is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } else if (data == nullptr) { + maybe_copy_error_msg("data is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } else if (output_size == nullptr) { + maybe_copy_error_msg("output_size is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + return GRPC_STATUS_OK; +} + +grpc_status_code increment_counter(alts_record_protocol_crypter* rp_crypter, + char** error_details) { + bool is_overflow = false; + grpc_status_code status = + alts_counter_increment(rp_crypter->ctr, &is_overflow, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + if (is_overflow) { + const char error_msg[] = + "crypter counter is wrapped. The connection" + "should be closed and the key should be deleted."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INTERNAL; + } + return GRPC_STATUS_OK; +} + +size_t alts_record_protocol_crypter_num_overhead_bytes(const alts_crypter* c) { + if (c != nullptr) { + size_t num_overhead_bytes = 0; + char* error_details = nullptr; + const alts_record_protocol_crypter* rp_crypter = + reinterpret_cast(c); + grpc_status_code status = gsec_aead_crypter_tag_length( + rp_crypter->crypter, &num_overhead_bytes, &error_details); + if (status == GRPC_STATUS_OK) { + return num_overhead_bytes; + } + } + return 0; +} + +void alts_record_protocol_crypter_destruct(alts_crypter* c) { + if (c != nullptr) { + alts_record_protocol_crypter* rp_crypter = + reinterpret_cast(c); + alts_counter_destroy(rp_crypter->ctr); + gsec_aead_crypter_destroy(rp_crypter->crypter); + } +} + +alts_record_protocol_crypter* alts_crypter_create_common( + gsec_aead_crypter* crypter, bool is_client, size_t overflow_size, + char** error_details) { + if (crypter != nullptr) { + auto* rp_crypter = static_cast( + gpr_malloc(sizeof(alts_record_protocol_crypter))); + size_t counter_size = 0; + grpc_status_code status = + gsec_aead_crypter_nonce_length(crypter, &counter_size, error_details); + if (status != GRPC_STATUS_OK) { + return nullptr; + } + /* Create a counter. */ + status = alts_counter_create(is_client, counter_size, overflow_size, + &rp_crypter->ctr, error_details); + if (status != GRPC_STATUS_OK) { + return nullptr; + } + rp_crypter->crypter = crypter; + return rp_crypter; + } + const char error_msg[] = "crypter is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return nullptr; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h new file mode 100644 index 000000000..682a8f7e7 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h @@ -0,0 +1,114 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_RECORD_PROTOCOL_CRYPTER_COMMON_H +#define GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_RECORD_PROTOCOL_CRYPTER_COMMON_H + +#include + +#include + +#include "src/core/tsi/alts/frame_protector/alts_counter.h" +#include "src/core/tsi/alts/frame_protector/alts_crypter.h" + +/** + * This file contains common implementation that will be used in both seal and + * unseal operations. + */ + +/** + * Main struct for alts_record_protocol_crypter that will be used in both + * seal and unseal operations. + */ +typedef struct alts_record_protocol_crypter { + alts_crypter base; + gsec_aead_crypter* crypter; + alts_counter* ctr; +} alts_record_protocol_crypter; + +/** + * This method performs input sanity checks on a subset of inputs to + * alts_crypter_process_in_place() for both seal and unseal operations. + * + * - rp_crypter: an alts_record_protocol_crypter instance. + * - data: it represents raw data that needs to be sealed in a seal operation or + * protected data that needs to be unsealed in an unseal operation. + * - output_size: size of data written to the data buffer after a seal or + * unseal operation. + * - error_details: a buffer containing an error message if any of checked + * inputs is nullptr. It is legal to pass nullptr into error_details and + * otherwise, the parameter should be freed with gpr_free. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code input_sanity_check( + const alts_record_protocol_crypter* rp_crypter, const unsigned char* data, + size_t* output_size, char** error_details); + +/** + * This method increments the counter within an alts_record_protocol_crypter + * instance. + * + * - rp_crypter: an alts_record_protocol_crypter instance. + * - error_details: a buffer containing an error message if the method does not + * function correctly or the counter is wrapped. It is legal to pass nullptr + * into error_details and otherwise, the parameter should be freed with + * gpr_free. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, + * it returns an error status code along with its details specified in + * error_details (if error_details is not nullptr). + */ +grpc_status_code increment_counter(alts_record_protocol_crypter* rp_crypter, + char** error_details); + +/** + * This method creates an alts_crypter instance, and populates the fields + * that are common to both seal and unseal operations. + * + * - crypter: a gsec_aead_crypter instance used to perform AEAD decryption. The + * function does not take ownership of crypter. + * - is_client: a flag indicating if the alts_crypter instance will be + * used at the client (is_client = true) or server (is_client = + * false) side. + * - overflow_size: overflow size of counter in bytes. + * - error_details: a buffer containing an error message if the method does + * not function correctly. It is legal to pass nullptr into error_details, and + * otherwise, the parameter should be freed with gpr_free. + * + * On success of creation, the method returns alts_record_protocol_crypter + * instance. Otherwise, it returns nullptr with its details specified in + * error_details (if error_details is not nullptr). + * + */ +alts_record_protocol_crypter* alts_crypter_create_common( + gsec_aead_crypter* crypter, bool is_client, size_t overflow_size, + char** error_details); + +/** + * For the following two methods, please refer to the corresponding API in + * alts_crypter.h for detailed specifications. + */ +size_t alts_record_protocol_crypter_num_overhead_bytes(const alts_crypter* c); + +void alts_record_protocol_crypter_destruct(alts_crypter* c); + +#endif /* GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_ALTS_RECORD_PROTOCOL_CRYPTER_COMMON_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc new file mode 100644 index 000000000..f40783161 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc @@ -0,0 +1,105 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include "src/core/tsi/alts/frame_protector/alts_counter.h" +#include "src/core/tsi/alts/frame_protector/alts_crypter.h" +#include "src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h" + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +/* Perform input santity check for a seal operation. */ +static grpc_status_code seal_check(alts_crypter* c, const unsigned char* data, + size_t data_allocated_size, size_t data_size, + size_t* output_size, char** error_details) { + /* Do common input sanity check. */ + grpc_status_code status = input_sanity_check( + reinterpret_cast(c), data, + output_size, error_details); + if (status != GRPC_STATUS_OK) return status; + /* Do seal-specific check. */ + size_t num_overhead_bytes = + alts_crypter_num_overhead_bytes(reinterpret_cast(c)); + if (data_size == 0) { + const char error_msg[] = "data_size is zero."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (data_size + num_overhead_bytes > data_allocated_size) { + const char error_msg[] = + "data_allocated_size is smaller than sum of data_size and " + "num_overhead_bytes."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + return GRPC_STATUS_OK; +} + +static grpc_status_code alts_seal_crypter_process_in_place( + alts_crypter* c, unsigned char* data, size_t data_allocated_size, + size_t data_size, size_t* output_size, char** error_details) { + grpc_status_code status = seal_check(c, data, data_allocated_size, data_size, + output_size, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Do AEAD encryption. */ + alts_record_protocol_crypter* rp_crypter = + reinterpret_cast(c); + status = gsec_aead_crypter_encrypt( + rp_crypter->crypter, alts_counter_get_counter(rp_crypter->ctr), + alts_counter_get_size(rp_crypter->ctr), nullptr /* aad */, + 0 /* aad_length */, data, data_size, data, data_allocated_size, + output_size, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Increment the crypter counter. */ + return increment_counter(rp_crypter, error_details); +} + +static const alts_crypter_vtable vtable = { + alts_record_protocol_crypter_num_overhead_bytes, + alts_seal_crypter_process_in_place, alts_record_protocol_crypter_destruct}; + +grpc_status_code alts_seal_crypter_create(gsec_aead_crypter* gc, bool is_client, + size_t overflow_size, + alts_crypter** crypter, + char** error_details) { + if (crypter == nullptr) { + const char error_msg[] = "crypter is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + alts_record_protocol_crypter* rp_crypter = + alts_crypter_create_common(gc, !is_client, overflow_size, error_details); + if (rp_crypter == nullptr) { + return GRPC_STATUS_FAILED_PRECONDITION; + } + rp_crypter->base.vtable = &vtable; + *crypter = &rp_crypter->base; + return GRPC_STATUS_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc new file mode 100644 index 000000000..51bea24f1 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc @@ -0,0 +1,103 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include "src/core/tsi/alts/frame_protector/alts_counter.h" +#include "src/core/tsi/alts/frame_protector/alts_crypter.h" +#include "src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h" + +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +/* Perform input santity check. */ +static grpc_status_code unseal_check(alts_crypter* c, const unsigned char* data, + size_t data_allocated_size, + size_t data_size, size_t* output_size, + char** error_details) { + /* Do common input sanity check. */ + grpc_status_code status = input_sanity_check( + reinterpret_cast(c), data, + output_size, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Do unseal-specific input check. */ + size_t num_overhead_bytes = + alts_crypter_num_overhead_bytes(reinterpret_cast(c)); + if (num_overhead_bytes > data_size) { + const char error_msg[] = "data_size is smaller than num_overhead_bytes."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + return GRPC_STATUS_OK; +} + +static grpc_status_code alts_unseal_crypter_process_in_place( + alts_crypter* c, unsigned char* data, size_t data_allocated_size, + size_t data_size, size_t* output_size, char** error_details) { + grpc_status_code status = unseal_check(c, data, data_allocated_size, + data_size, output_size, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Do AEAD decryption. */ + alts_record_protocol_crypter* rp_crypter = + reinterpret_cast(c); + status = gsec_aead_crypter_decrypt( + rp_crypter->crypter, alts_counter_get_counter(rp_crypter->ctr), + alts_counter_get_size(rp_crypter->ctr), nullptr /* aad */, + 0 /* aad_length */, data, data_size, data, data_allocated_size, + output_size, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Increment the crypter counter. */ + return increment_counter(rp_crypter, error_details); +} + +static const alts_crypter_vtable vtable = { + alts_record_protocol_crypter_num_overhead_bytes, + alts_unseal_crypter_process_in_place, + alts_record_protocol_crypter_destruct}; + +grpc_status_code alts_unseal_crypter_create(gsec_aead_crypter* gc, + bool is_client, + size_t overflow_size, + alts_crypter** crypter, + char** error_details) { + if (crypter == nullptr) { + const char error_msg[] = "crypter is nullptr."; + maybe_copy_error_msg(error_msg, error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + alts_record_protocol_crypter* rp_crypter = + alts_crypter_create_common(gc, is_client, overflow_size, error_details); + if (rp_crypter == nullptr) { + return GRPC_STATUS_FAILED_PRECONDITION; + } + rp_crypter->base.vtable = &vtable; + *crypter = &rp_crypter->base; + return GRPC_STATUS_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.cc b/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.cc new file mode 100644 index 000000000..d3fda63b3 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.cc @@ -0,0 +1,218 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/frame_protector/frame_handler.h" + +#include +#include +#include + +#include +#include + +#include "src/core/lib/gpr/useful.h" + +/* Use little endian to interpret a string of bytes as uint32_t. */ +static uint32_t load_32_le(const unsigned char* buffer) { + return (((uint32_t)buffer[3]) << 24) | (((uint32_t)buffer[2]) << 16) | + (((uint32_t)buffer[1]) << 8) | ((uint32_t)buffer[0]); +} + +/* Store uint32_t as a string of little endian bytes. */ +static void store_32_le(uint32_t value, unsigned char* buffer) { + buffer[3] = (unsigned char)(value >> 24) & 0xFF; + buffer[2] = (unsigned char)(value >> 16) & 0xFF; + buffer[1] = (unsigned char)(value >> 8) & 0xFF; + buffer[0] = (unsigned char)(value)&0xFF; +} + +/* Frame writer implementation. */ +alts_frame_writer* alts_create_frame_writer() { + alts_frame_writer* writer = + static_cast(gpr_zalloc(sizeof(*writer))); + return writer; +} + +bool alts_reset_frame_writer(alts_frame_writer* writer, + const unsigned char* buffer, size_t length) { + if (buffer == nullptr) return false; + size_t max_input_size = SIZE_MAX - kFrameLengthFieldSize; + if (length > max_input_size) { + gpr_log(GPR_ERROR, "length must be at most %zu", max_input_size); + return false; + } + writer->input_buffer = buffer; + writer->input_size = length; + writer->input_bytes_written = 0; + writer->header_bytes_written = 0; + store_32_le( + static_cast(writer->input_size + kFrameMessageTypeFieldSize), + writer->header_buffer); + store_32_le(kFrameMessageType, writer->header_buffer + kFrameLengthFieldSize); + return true; +} + +bool alts_write_frame_bytes(alts_frame_writer* writer, unsigned char* output, + size_t* bytes_size) { + if (bytes_size == nullptr || output == nullptr) return false; + if (alts_is_frame_writer_done(writer)) { + *bytes_size = 0; + return true; + } + size_t bytes_written = 0; + /* Write some header bytes, if needed. */ + if (writer->header_bytes_written != sizeof(writer->header_buffer)) { + size_t bytes_to_write = + GPR_MIN(*bytes_size, + sizeof(writer->header_buffer) - writer->header_bytes_written); + memcpy(output, writer->header_buffer + writer->header_bytes_written, + bytes_to_write); + bytes_written += bytes_to_write; + *bytes_size -= bytes_to_write; + writer->header_bytes_written += bytes_to_write; + output += bytes_to_write; + if (writer->header_bytes_written != sizeof(writer->header_buffer)) { + *bytes_size = bytes_written; + return true; + } + } + /* Write some non-header bytes. */ + size_t bytes_to_write = + GPR_MIN(writer->input_size - writer->input_bytes_written, *bytes_size); + memcpy(output, writer->input_buffer, bytes_to_write); + writer->input_buffer += bytes_to_write; + bytes_written += bytes_to_write; + writer->input_bytes_written += bytes_to_write; + *bytes_size = bytes_written; + return true; +} + +bool alts_is_frame_writer_done(alts_frame_writer* writer) { + return writer->input_buffer == nullptr || + writer->input_size == writer->input_bytes_written; +} + +size_t alts_get_num_writer_bytes_remaining(alts_frame_writer* writer) { + return (sizeof(writer->header_buffer) - writer->header_bytes_written) + + (writer->input_size - writer->input_bytes_written); +} + +void alts_destroy_frame_writer(alts_frame_writer* writer) { gpr_free(writer); } + +/* Frame reader implementation. */ +alts_frame_reader* alts_create_frame_reader() { + alts_frame_reader* reader = + static_cast(gpr_zalloc(sizeof(*reader))); + return reader; +} + +bool alts_is_frame_reader_done(alts_frame_reader* reader) { + return reader->output_buffer == nullptr || + (reader->header_bytes_read == sizeof(reader->header_buffer) && + reader->bytes_remaining == 0); +} + +bool alts_has_read_frame_length(alts_frame_reader* reader) { + return sizeof(reader->header_buffer) == reader->header_bytes_read; +} + +size_t alts_get_reader_bytes_remaining(alts_frame_reader* reader) { + return alts_has_read_frame_length(reader) ? reader->bytes_remaining : 0; +} + +void alts_reset_reader_output_buffer(alts_frame_reader* reader, + unsigned char* buffer) { + reader->output_buffer = buffer; +} + +bool alts_reset_frame_reader(alts_frame_reader* reader, unsigned char* buffer) { + if (buffer == nullptr) return false; + reader->output_buffer = buffer; + reader->bytes_remaining = 0; + reader->header_bytes_read = 0; + reader->output_bytes_read = 0; + return true; +} + +bool alts_read_frame_bytes(alts_frame_reader* reader, + const unsigned char* bytes, size_t* bytes_size) { + if (bytes_size == nullptr) return false; + if (bytes == nullptr) { + *bytes_size = 0; + return false; + } + if (alts_is_frame_reader_done(reader)) { + *bytes_size = 0; + return true; + } + size_t bytes_processed = 0; + /* Process the header, if needed. */ + if (reader->header_bytes_read != sizeof(reader->header_buffer)) { + size_t bytes_to_write = GPR_MIN( + *bytes_size, sizeof(reader->header_buffer) - reader->header_bytes_read); + memcpy(reader->header_buffer + reader->header_bytes_read, bytes, + bytes_to_write); + reader->header_bytes_read += bytes_to_write; + bytes_processed += bytes_to_write; + bytes += bytes_to_write; + *bytes_size -= bytes_to_write; + if (reader->header_bytes_read != sizeof(reader->header_buffer)) { + *bytes_size = bytes_processed; + return true; + } + size_t frame_length = load_32_le(reader->header_buffer); + if (frame_length < kFrameMessageTypeFieldSize || + frame_length > kFrameMaxSize) { + gpr_log(GPR_ERROR, + "Bad frame length (should be at least %zu, and at most %zu)", + kFrameMessageTypeFieldSize, kFrameMaxSize); + *bytes_size = 0; + return false; + } + size_t message_type = + load_32_le(reader->header_buffer + kFrameLengthFieldSize); + if (message_type != kFrameMessageType) { + gpr_log(GPR_ERROR, "Unsupported message type %zu (should be %zu)", + message_type, kFrameMessageType); + *bytes_size = 0; + return false; + } + reader->bytes_remaining = frame_length - kFrameMessageTypeFieldSize; + } + /* Process the non-header bytes. */ + size_t bytes_to_write = GPR_MIN(*bytes_size, reader->bytes_remaining); + memcpy(reader->output_buffer, bytes, bytes_to_write); + reader->output_buffer += bytes_to_write; + bytes_processed += bytes_to_write; + reader->bytes_remaining -= bytes_to_write; + reader->output_bytes_read += bytes_to_write; + *bytes_size = bytes_processed; + return true; +} + +size_t alts_get_output_bytes_read(alts_frame_reader* reader) { + return reader->output_bytes_read; +} + +unsigned char* alts_get_output_buffer(alts_frame_reader* reader) { + return reader->output_buffer; +} + +void alts_destroy_frame_reader(alts_frame_reader* reader) { gpr_free(reader); } diff --git a/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.h b/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.h new file mode 100644 index 000000000..a703ff40d --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/frame_protector/frame_handler.h @@ -0,0 +1,236 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_FRAME_HANDLER_H +#define GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_FRAME_HANDLER_H + +#include + +#include +#include + +const size_t kFrameMessageType = 0x06; +const size_t kFrameLengthFieldSize = 4; +const size_t kFrameMessageTypeFieldSize = 4; +const size_t kFrameMaxSize = 1024 * 1024; +const size_t kFrameHeaderSize = + kFrameLengthFieldSize + kFrameMessageTypeFieldSize; + +/** + * Implementation of frame reader and frame writer. All APIs in the + * header are thread-compatible. + */ + +/** + * Main struct for a frame writer. It reads frames from an input buffer, and + * writes the contents as raw bytes. It does not own the input buffer. + */ +typedef struct alts_frame_writer { + const unsigned char* input_buffer; + unsigned char header_buffer[kFrameHeaderSize]; + size_t input_bytes_written; + size_t header_bytes_written; + size_t input_size; +} alts_frame_writer; + +/** + * Main struct for a frame reader. It reads raw bytes and puts the framed + * result into an output buffer. It does not own the output buffer. + */ +typedef struct alts_frame_reader { + unsigned char* output_buffer; + unsigned char header_buffer[kFrameHeaderSize]; + size_t header_bytes_read; + size_t output_bytes_read; + size_t bytes_remaining; +} alts_frame_reader; + +/** + * This method creates a frame writer instance and initializes its internal + * states. + */ +alts_frame_writer* alts_create_frame_writer(); + +/** + * This method resets internal states of a frame writer and prepares to write + * a single frame. It does not take ownership of payload_buffer. + * The payload_buffer must outlive the writer. + * + * - writer: a frame writer instance. + * - buffer: a buffer storing full payload data to be framed. + * - length: size of payload data. + * + * The method returns true on success and false otherwise. + */ +bool alts_reset_frame_writer(alts_frame_writer* writer, + const unsigned char* buffer, size_t length); + +/** + * This method writes up to bytes_size bytes of a frame to output. + * + * - writer: a frame writer instance. + * - output: an output buffer used to store the frame. + * - bytes_size: an in/out parameter that stores the size of output buffer + * before the call, and gets written the number of frame bytes written to the + * buffer. + * + * The method returns true on success and false otherwise. + */ +bool alts_write_frame_bytes(alts_frame_writer* writer, unsigned char* output, + size_t* bytes_size); + +/** + * This method checks if a reset can be called to write a new frame. It returns + * true if it's the first time to frame a payload, or the current frame has + * been finished processing. It returns false if it's not ready yet to start a + * new frame (e.g., more payload data needs to be accumulated to process the + * current frame). + * + * if (alts_is_frame_writer_done(writer)) { + * // a new frame can be written, call reset. + * alts_reset_frame_writer(writer, payload_buffer, payload_size); + * } else { + * // accumulate more payload data until a full frame can be written. + * } + * + * - writer: a frame writer instance. + */ +bool alts_is_frame_writer_done(alts_frame_writer* writer); + +/** + * This method returns the number of bytes left to write before a complete frame + * is formed. + * + * - writer: a frame writer instance. + */ +size_t alts_get_num_writer_bytes_remaining(alts_frame_writer* writer); + +/** + * This method destroys a frame writer instance. + * + * - writer: a frame writer instance. + */ +void alts_destroy_frame_writer(alts_frame_writer* writer); + +/** + * This method creates a frame reader instance and initializes its internal + * states. + */ +alts_frame_reader* alts_create_frame_reader(); + +/** + * This method resets internal states of a frame reader (including setting its + * output_buffer with buffer), and prepares to write processed bytes to + * an output_buffer. It does not take ownership of buffer. The buffer must + * outlive reader. + * + * - reader: a frame reader instance. + * - buffer: an output buffer used to store deframed results. + * + * The method returns true on success and false otherwise. + */ +bool alts_reset_frame_reader(alts_frame_reader* reader, unsigned char* buffer); + +/** + * This method processes up to the number of bytes given in bytes_size. It may + * choose not to process all the bytes, if, for instance, more bytes are + * given to the method than required to complete the current frame. + * + * - reader: a frame reader instance. + * - bytes: a buffer that stores data to be processed. + * - bytes_size: an in/out parameter that stores the size of bytes before the + * call and gets written the number of bytes processed. + * + * The method returns true on success and false otherwise. + */ +bool alts_read_frame_bytes(alts_frame_reader* reader, + const unsigned char* bytes, size_t* bytes_size); + +/** + * This method checks if a frame length has been read. + * + * - reader: a frame reader instance. + * + * The method returns true if a frame length has been read and false otherwise. + */ +bool alts_has_read_frame_length(alts_frame_reader* reader); + +/** + * This method returns the number of bytes the frame reader intends to write. + * It may only be called if alts_has_read_frame_length() returns true. + * + * - reader: a frame reader instance. + */ +size_t alts_get_reader_bytes_remaining(alts_frame_reader* reader); + +/** + * This method resets output_buffer but does not otherwise modify other internal + * states of a frame reader instance. After being set, the new output_buffer + * will hold the deframed payload held by the original output_buffer. It does + * not take ownership of buffer. The buffer must outlive the reader. + * To distinguish between two reset methods on a frame reader, + * + * if (alts_fh_is_frame_reader_done(reader)) { + * // if buffer contains a full payload to be deframed, call reset. + * alts_reset_frame_reader(reader, buffer); + * } + * + * // if remaining buffer space is not enough to hold a full payload + * if (buffer_space_remaining < alts_get_reader_bytes_remaining(reader)) { + * // allocate enough space for a new buffer, copy back data processed so far, + * // and call reset. + * alts_reset_reader_output_buffer(reader, new_buffer). + * } + * + * - reader: a frame reader instance. + * - buffer: a buffer used to set reader's output_buffer. + */ +void alts_reset_reader_output_buffer(alts_frame_reader* reader, + unsigned char* buffer); + +/** + * This method checks if reset can be called to start processing a new frame. + * If true and reset was previously called, a full frame has been processed and + * the content of the frame is available in output_buffer. + + * - reader: a frame reader instance. + */ +bool alts_is_frame_reader_done(alts_frame_reader* reader); + +/** + * This method returns output_bytes_read of a frame reader instance. + * + * - reader: a frame reader instance. + */ +size_t alts_get_output_bytes_read(alts_frame_reader* reader); + +/** + * This method returns output_buffer of a frame reader instance. + * + * - reader: a frame reader instance. + */ +unsigned char* alts_get_output_buffer(alts_frame_reader* reader); + +/** + * This method destroys a frame reader instance. + * + * - reader: a frame reader instance. + */ +void alts_destroy_frame_reader(alts_frame_reader* reader); + +#endif /* GRPC_CORE_TSI_ALTS_FRAME_PROTECTOR_FRAME_HANDLER_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.cc new file mode 100644 index 000000000..40f30e41c --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.cc @@ -0,0 +1,316 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h" + +#include +#include +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api.h" + +const int kHandshakerClientOpNum = 4; + +typedef struct alts_grpc_handshaker_client { + alts_handshaker_client base; + grpc_call* call; + alts_grpc_caller grpc_caller; +} alts_grpc_handshaker_client; + +static grpc_call_error grpc_start_batch(grpc_call* call, const grpc_op* ops, + size_t nops, void* tag) { + return grpc_call_start_batch(call, ops, nops, tag, nullptr); +} + +/** + * Populate grpc operation data with the fields of ALTS TSI event and make a + * grpc call. + */ +static tsi_result make_grpc_call(alts_handshaker_client* client, + alts_tsi_event* event, bool is_start) { + GPR_ASSERT(client != nullptr && event != nullptr); + alts_grpc_handshaker_client* grpc_client = + reinterpret_cast(client); + grpc_op ops[kHandshakerClientOpNum]; + memset(ops, 0, sizeof(ops)); + grpc_op* op = ops; + if (is_start) { + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op++; + GPR_ASSERT(op - ops <= kHandshakerClientOpNum); + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata.recv_initial_metadata = + &event->initial_metadata; + op++; + GPR_ASSERT(op - ops <= kHandshakerClientOpNum); + } + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = event->send_buffer; + op++; + GPR_ASSERT(op - ops <= kHandshakerClientOpNum); + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message.recv_message = &event->recv_buffer; + op++; + GPR_ASSERT(op - ops <= kHandshakerClientOpNum); + GPR_ASSERT(grpc_client->grpc_caller != nullptr); + if (grpc_client->grpc_caller(grpc_client->call, ops, + static_cast(op - ops), + (void*)event) != GRPC_CALL_OK) { + gpr_log(GPR_ERROR, "Start batch operation failed"); + return TSI_INTERNAL_ERROR; + } + return TSI_OK; +} + +/* Create and populate a client_start handshaker request, then serialize it. */ +static grpc_byte_buffer* get_serialized_start_client(alts_tsi_event* event) { + bool ok = true; + grpc_gcp_handshaker_req* req = + grpc_gcp_handshaker_req_create(CLIENT_START_REQ); + ok &= grpc_gcp_handshaker_req_set_handshake_protocol( + req, grpc_gcp_HandshakeProtocol_ALTS); + ok &= grpc_gcp_handshaker_req_add_application_protocol( + req, ALTS_APPLICATION_PROTOCOL); + ok &= grpc_gcp_handshaker_req_add_record_protocol(req, ALTS_RECORD_PROTOCOL); + grpc_gcp_rpc_protocol_versions* versions = &event->options->rpc_versions; + ok &= grpc_gcp_handshaker_req_set_rpc_versions( + req, versions->max_rpc_version.major, versions->max_rpc_version.minor, + versions->min_rpc_version.major, versions->min_rpc_version.minor); + char* target_name = grpc_slice_to_c_string(event->target_name); + ok &= grpc_gcp_handshaker_req_set_target_name(req, target_name); + target_service_account* ptr = + (reinterpret_cast(event->options)) + ->target_account_list_head; + while (ptr != nullptr) { + grpc_gcp_handshaker_req_add_target_identity_service_account(req, ptr->data); + ptr = ptr->next; + } + grpc_slice slice; + ok &= grpc_gcp_handshaker_req_encode(req, &slice); + grpc_byte_buffer* buffer = nullptr; + if (ok) { + buffer = grpc_raw_byte_buffer_create(&slice, 1 /* number of slices */); + } + grpc_slice_unref(slice); + gpr_free(target_name); + grpc_gcp_handshaker_req_destroy(req); + return buffer; +} + +static tsi_result handshaker_client_start_client(alts_handshaker_client* client, + alts_tsi_event* event) { + if (client == nullptr || event == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to alts_grpc_handshaker_client_start_client()"); + return TSI_INVALID_ARGUMENT; + } + grpc_byte_buffer* buffer = get_serialized_start_client(event); + if (buffer == nullptr) { + gpr_log(GPR_ERROR, "get_serialized_start_client() failed"); + return TSI_INTERNAL_ERROR; + } + event->send_buffer = buffer; + tsi_result result = make_grpc_call(client, event, true /* is_start */); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "make_grpc_call() failed"); + } + return result; +} + +/* Create and populate a start_server handshaker request, then serialize it. */ +static grpc_byte_buffer* get_serialized_start_server( + alts_tsi_event* event, grpc_slice* bytes_received) { + GPR_ASSERT(bytes_received != nullptr); + grpc_gcp_handshaker_req* req = + grpc_gcp_handshaker_req_create(SERVER_START_REQ); + bool ok = grpc_gcp_handshaker_req_add_application_protocol( + req, ALTS_APPLICATION_PROTOCOL); + ok &= grpc_gcp_handshaker_req_param_add_record_protocol( + req, grpc_gcp_HandshakeProtocol_ALTS, ALTS_RECORD_PROTOCOL); + ok &= grpc_gcp_handshaker_req_set_in_bytes( + req, reinterpret_cast GRPC_SLICE_START_PTR(*bytes_received), + GRPC_SLICE_LENGTH(*bytes_received)); + grpc_gcp_rpc_protocol_versions* versions = &event->options->rpc_versions; + ok &= grpc_gcp_handshaker_req_set_rpc_versions( + req, versions->max_rpc_version.major, versions->max_rpc_version.minor, + versions->min_rpc_version.major, versions->min_rpc_version.minor); + grpc_slice req_slice; + ok &= grpc_gcp_handshaker_req_encode(req, &req_slice); + grpc_byte_buffer* buffer = nullptr; + if (ok) { + buffer = grpc_raw_byte_buffer_create(&req_slice, 1 /* number of slices */); + } + grpc_slice_unref(req_slice); + grpc_gcp_handshaker_req_destroy(req); + return buffer; +} + +static tsi_result handshaker_client_start_server(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received) { + if (client == nullptr || event == nullptr || bytes_received == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to alts_grpc_handshaker_client_start_server()"); + return TSI_INVALID_ARGUMENT; + } + grpc_byte_buffer* buffer = get_serialized_start_server(event, bytes_received); + if (buffer == nullptr) { + gpr_log(GPR_ERROR, "get_serialized_start_server() failed"); + return TSI_INTERNAL_ERROR; + } + event->send_buffer = buffer; + tsi_result result = make_grpc_call(client, event, true /* is_start */); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "make_grpc_call() failed"); + } + return result; +} + +/* Create and populate a next handshaker request, then serialize it. */ +static grpc_byte_buffer* get_serialized_next(grpc_slice* bytes_received) { + GPR_ASSERT(bytes_received != nullptr); + grpc_gcp_handshaker_req* req = grpc_gcp_handshaker_req_create(NEXT_REQ); + bool ok = grpc_gcp_handshaker_req_set_in_bytes( + req, reinterpret_cast GRPC_SLICE_START_PTR(*bytes_received), + GRPC_SLICE_LENGTH(*bytes_received)); + grpc_slice req_slice; + ok &= grpc_gcp_handshaker_req_encode(req, &req_slice); + grpc_byte_buffer* buffer = nullptr; + if (ok) { + buffer = grpc_raw_byte_buffer_create(&req_slice, 1 /* number of slices */); + } + grpc_slice_unref(req_slice); + grpc_gcp_handshaker_req_destroy(req); + return buffer; +} + +static tsi_result handshaker_client_next(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received) { + if (client == nullptr || event == nullptr || bytes_received == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to alts_grpc_handshaker_client_next()"); + return TSI_INVALID_ARGUMENT; + } + grpc_byte_buffer* buffer = get_serialized_next(bytes_received); + if (buffer == nullptr) { + gpr_log(GPR_ERROR, "get_serialized_next() failed"); + return TSI_INTERNAL_ERROR; + } + event->send_buffer = buffer; + tsi_result result = make_grpc_call(client, event, false /* is_start */); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "make_grpc_call() failed"); + } + return result; +} + +static void handshaker_client_destruct(alts_handshaker_client* client) { + if (client == nullptr) { + return; + } + alts_grpc_handshaker_client* grpc_client = + reinterpret_cast(client); + grpc_call_unref(grpc_client->call); +} + +static const alts_handshaker_client_vtable vtable = { + handshaker_client_start_client, handshaker_client_start_server, + handshaker_client_next, handshaker_client_destruct}; + +alts_handshaker_client* alts_grpc_handshaker_client_create( + grpc_channel* channel, grpc_completion_queue* queue, + const char* handshaker_service_url) { + if (channel == nullptr || queue == nullptr || + handshaker_service_url == nullptr) { + gpr_log(GPR_ERROR, "Invalid arguments to alts_handshaker_client_create()"); + return nullptr; + } + alts_grpc_handshaker_client* client = + static_cast(gpr_zalloc(sizeof(*client))); + client->grpc_caller = grpc_start_batch; + grpc_slice slice = grpc_slice_from_copied_string(handshaker_service_url); + client->call = grpc_channel_create_call( + channel, nullptr, GRPC_PROPAGATE_DEFAULTS, queue, + grpc_slice_from_static_string(ALTS_SERVICE_METHOD), &slice, + gpr_inf_future(GPR_CLOCK_REALTIME), nullptr); + client->base.vtable = &vtable; + grpc_slice_unref(slice); + return &client->base; +} + +namespace grpc_core { +namespace internal { + +void alts_handshaker_client_set_grpc_caller_for_testing( + alts_handshaker_client* client, alts_grpc_caller caller) { + GPR_ASSERT(client != nullptr && caller != nullptr); + alts_grpc_handshaker_client* grpc_client = + reinterpret_cast(client); + grpc_client->grpc_caller = caller; +} + +} // namespace internal +} // namespace grpc_core + +tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client, + alts_tsi_event* event) { + if (client != nullptr && client->vtable != nullptr && + client->vtable->client_start != nullptr) { + return client->vtable->client_start(client, event); + } + gpr_log(GPR_ERROR, + "client or client->vtable has not been initialized properly"); + return TSI_INVALID_ARGUMENT; +} + +tsi_result alts_handshaker_client_start_server(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received) { + if (client != nullptr && client->vtable != nullptr && + client->vtable->server_start != nullptr) { + return client->vtable->server_start(client, event, bytes_received); + } + gpr_log(GPR_ERROR, + "client or client->vtable has not been initialized properly"); + return TSI_INVALID_ARGUMENT; +} + +tsi_result alts_handshaker_client_next(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received) { + if (client != nullptr && client->vtable != nullptr && + client->vtable->next != nullptr) { + return client->vtable->next(client, event, bytes_received); + } + gpr_log(GPR_ERROR, + "client or client->vtable has not been initialized properly"); + return TSI_INVALID_ARGUMENT; +} + +void alts_handshaker_client_destroy(alts_handshaker_client* client) { + if (client != nullptr) { + if (client->vtable != nullptr && client->vtable->destruct != nullptr) { + client->vtable->destruct(client); + } + gpr_free(client); + } +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.h new file mode 100644 index 000000000..fb2d2cf68 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_client.h @@ -0,0 +1,137 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_CLIENT_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_CLIENT_H + +#include + +#include + +#include "src/core/tsi/alts/handshaker/alts_tsi_event.h" + +#define ALTS_SERVICE_METHOD "/grpc.gcp.HandshakerService/DoHandshake" +#define ALTS_APPLICATION_PROTOCOL "grpc" +#define ALTS_RECORD_PROTOCOL "ALTSRP_GCM_AES128_REKEY" + +const size_t kAltsAes128GcmRekeyKeyLength = 44; + +/** + * A ALTS handshaker client interface. It is used to communicate with + * ALTS handshaker service by scheduling a handshaker request that could be one + * of client_start, server_start, and next handshaker requests. All APIs in the + * header are thread-compatible. + */ +typedef struct alts_handshaker_client alts_handshaker_client; + +/* A function that makes the grpc call to the handshaker service. */ +typedef grpc_call_error (*alts_grpc_caller)(grpc_call* call, const grpc_op* ops, + size_t nops, void* tag); + +/* V-table for ALTS handshaker client operations. */ +typedef struct alts_handshaker_client_vtable { + tsi_result (*client_start)(alts_handshaker_client* client, + alts_tsi_event* event); + tsi_result (*server_start)(alts_handshaker_client* client, + alts_tsi_event* event, grpc_slice* bytes_received); + tsi_result (*next)(alts_handshaker_client* client, alts_tsi_event* event, + grpc_slice* bytes_received); + void (*destruct)(alts_handshaker_client* client); +} alts_handshaker_client_vtable; + +struct alts_handshaker_client { + const alts_handshaker_client_vtable* vtable; +}; + +/** + * This method schedules a client_start handshaker request to ALTS handshaker + * service. + * + * - client: ALTS handshaker client instance. + * - event: ALTS TSI event instance. + * + * It returns TSI_OK on success and an error status code on failure. + */ +tsi_result alts_handshaker_client_start_client(alts_handshaker_client* client, + alts_tsi_event* event); + +/** + * This method schedules a server_start handshaker request to ALTS handshaker + * service. + * + * - client: ALTS handshaker client instance. + * - event: ALTS TSI event instance. + * - bytes_received: bytes in out_frames returned from the peer's handshaker + * response. + * + * It returns TSI_OK on success and an error status code on failure. + */ +tsi_result alts_handshaker_client_start_server(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received); + +/** + * This method schedules a next handshaker request to ALTS handshaker service. + * + * - client: ALTS handshaker client instance. + * - event: ALTS TSI event instance. + * - bytes_received: bytes in out_frames returned from the peer's handshaker + * response. + * + * It returns TSI_OK on success and an error status code on failure. + */ +tsi_result alts_handshaker_client_next(alts_handshaker_client* client, + alts_tsi_event* event, + grpc_slice* bytes_received); + +/** + * This method destroys a ALTS handshaker client. + * + * - client: a ALTS handshaker client instance. + */ +void alts_handshaker_client_destroy(alts_handshaker_client* client); + +/** + * This method creates a ALTS handshaker client. + * + * - channel: grpc channel to ALTS handshaker service. + * - queue: grpc completion queue. + * - handshaker_service_url: address of ALTS handshaker service in the format of + * "host:port". + * + * It returns the created ALTS handshaker client on success, and NULL on + * failure. + */ +alts_handshaker_client* alts_grpc_handshaker_client_create( + grpc_channel* channel, grpc_completion_queue* queue, + const char* handshaker_service_url); + +namespace grpc_core { +namespace internal { + +/** + * Unsafe, use for testing only. It allows the caller to change the way that + * GRPC calls are made to the handshaker service. + */ +void alts_handshaker_client_set_grpc_caller_for_testing( + alts_handshaker_client* client, alts_grpc_caller caller); + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_CLIENT_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc new file mode 100644 index 000000000..256e414ae --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc @@ -0,0 +1,520 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api.h" + +#include +#include + +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" + +/* HandshakerReq */ +grpc_gcp_handshaker_req* grpc_gcp_handshaker_req_create( + grpc_gcp_handshaker_req_type type) { + grpc_gcp_handshaker_req* req = + static_cast(gpr_zalloc(sizeof(*req))); + switch (type) { + case CLIENT_START_REQ: + req->has_client_start = true; + break; + case SERVER_START_REQ: + req->has_server_start = true; + break; + case NEXT_REQ: + req->has_next = true; + break; + } + return req; +} + +void grpc_gcp_handshaker_req_destroy(grpc_gcp_handshaker_req* req) { + if (req == nullptr) { + return; + } + if (req->has_client_start) { + /* Destroy client_start request. */ + destroy_repeated_field_list_identity( + static_cast(req->client_start.target_identities.arg)); + destroy_repeated_field_list_string(static_cast( + req->client_start.application_protocols.arg)); + destroy_repeated_field_list_string( + static_cast(req->client_start.record_protocols.arg)); + if (req->client_start.has_local_identity) { + destroy_slice(static_cast( + req->client_start.local_identity.hostname.arg)); + destroy_slice(static_cast( + req->client_start.local_identity.service_account.arg)); + } + if (req->client_start.has_local_endpoint) { + destroy_slice(static_cast( + req->client_start.local_endpoint.ip_address.arg)); + } + if (req->client_start.has_remote_endpoint) { + destroy_slice(static_cast( + req->client_start.remote_endpoint.ip_address.arg)); + } + destroy_slice(static_cast(req->client_start.target_name.arg)); + } else if (req->has_server_start) { + /* Destroy server_start request. */ + size_t i = 0; + for (i = 0; i < req->server_start.handshake_parameters_count; i++) { + destroy_repeated_field_list_identity( + static_cast(req->server_start.handshake_parameters[i] + .value.local_identities.arg)); + destroy_repeated_field_list_string( + static_cast(req->server_start.handshake_parameters[i] + .value.record_protocols.arg)); + } + destroy_repeated_field_list_string(static_cast( + req->server_start.application_protocols.arg)); + if (req->server_start.has_local_endpoint) { + destroy_slice(static_cast( + req->server_start.local_endpoint.ip_address.arg)); + } + if (req->server_start.has_remote_endpoint) { + destroy_slice(static_cast( + req->server_start.remote_endpoint.ip_address.arg)); + } + destroy_slice(static_cast(req->server_start.in_bytes.arg)); + } else { + /* Destroy next request. */ + destroy_slice(static_cast(req->next.in_bytes.arg)); + } + gpr_free(req); +} + +bool grpc_gcp_handshaker_req_set_handshake_protocol( + grpc_gcp_handshaker_req* req, + grpc_gcp_handshake_protocol handshake_protocol) { + if (req == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_handshake_protocol()."); + return false; + } + req->client_start.has_handshake_security_protocol = true; + req->client_start.handshake_security_protocol = handshake_protocol; + return true; +} + +bool grpc_gcp_handshaker_req_set_target_name(grpc_gcp_handshaker_req* req, + const char* target_name) { + if (req == nullptr || target_name == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_target_name()."); + return false; + } + grpc_slice* slice = create_slice(target_name, strlen(target_name)); + req->client_start.target_name.arg = slice; + req->client_start.target_name.funcs.encode = encode_string_or_bytes_cb; + return true; +} + +bool grpc_gcp_handshaker_req_add_application_protocol( + grpc_gcp_handshaker_req* req, const char* application_protocol) { + if (req == nullptr || application_protocol == nullptr || req->has_next) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_add_application_protocol()."); + return false; + } + grpc_slice* slice = + create_slice(application_protocol, strlen(application_protocol)); + if (req->has_client_start) { + add_repeated_field(reinterpret_cast( + &req->client_start.application_protocols.arg), + slice); + req->client_start.application_protocols.funcs.encode = + encode_repeated_string_cb; + } else { + add_repeated_field(reinterpret_cast( + &req->server_start.application_protocols.arg), + slice); + req->server_start.application_protocols.funcs.encode = + encode_repeated_string_cb; + } + return true; +} + +bool grpc_gcp_handshaker_req_add_record_protocol(grpc_gcp_handshaker_req* req, + const char* record_protocol) { + if (req == nullptr || record_protocol == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_add_record_protocol()."); + return false; + } + grpc_slice* slice = create_slice(record_protocol, strlen(record_protocol)); + add_repeated_field(reinterpret_cast( + &req->client_start.record_protocols.arg), + slice); + req->client_start.record_protocols.funcs.encode = encode_repeated_string_cb; + return true; +} + +static void set_identity_hostname(grpc_gcp_identity* identity, + const char* hostname) { + grpc_slice* slice = create_slice(hostname, strlen(hostname)); + identity->hostname.arg = slice; + identity->hostname.funcs.encode = encode_string_or_bytes_cb; +} + +static void set_identity_service_account(grpc_gcp_identity* identity, + const char* service_account) { + grpc_slice* slice = create_slice(service_account, strlen(service_account)); + identity->service_account.arg = slice; + identity->service_account.funcs.encode = encode_string_or_bytes_cb; +} + +bool grpc_gcp_handshaker_req_add_target_identity_hostname( + grpc_gcp_handshaker_req* req, const char* hostname) { + if (req == nullptr || hostname == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_handshaker_req_add_target_identity_hostname()."); + return false; + } + grpc_gcp_identity* target_identity = + static_cast(gpr_zalloc(sizeof(*target_identity))); + set_identity_hostname(target_identity, hostname); + req->client_start.target_identities.funcs.encode = + encode_repeated_identity_cb; + add_repeated_field(reinterpret_cast( + &req->client_start.target_identities.arg), + target_identity); + return true; +} + +bool grpc_gcp_handshaker_req_add_target_identity_service_account( + grpc_gcp_handshaker_req* req, const char* service_account) { + if (req == nullptr || service_account == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_handshaker_req_add_target_identity_service_account()."); + return false; + } + grpc_gcp_identity* target_identity = + static_cast(gpr_zalloc(sizeof(*target_identity))); + set_identity_service_account(target_identity, service_account); + req->client_start.target_identities.funcs.encode = + encode_repeated_identity_cb; + add_repeated_field(reinterpret_cast( + &req->client_start.target_identities.arg), + target_identity); + return true; +} + +bool grpc_gcp_handshaker_req_set_local_identity_hostname( + grpc_gcp_handshaker_req* req, const char* hostname) { + if (req == nullptr || hostname == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_handshaker_req_set_local_identity_hostname()."); + return false; + } + req->client_start.has_local_identity = true; + set_identity_hostname(&req->client_start.local_identity, hostname); + return true; +} + +bool grpc_gcp_handshaker_req_set_local_identity_service_account( + grpc_gcp_handshaker_req* req, const char* service_account) { + if (req == nullptr || service_account == nullptr || !req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_handshaker_req_set_local_identity_service_account()."); + return false; + } + req->client_start.has_local_identity = true; + set_identity_service_account(&req->client_start.local_identity, + service_account); + return true; +} + +static void set_endpoint(grpc_gcp_endpoint* endpoint, const char* ip_address, + size_t port, grpc_gcp_network_protocol protocol) { + grpc_slice* slice = create_slice(ip_address, strlen(ip_address)); + endpoint->ip_address.arg = slice; + endpoint->ip_address.funcs.encode = encode_string_or_bytes_cb; + endpoint->has_port = true; + endpoint->port = static_cast(port); + endpoint->has_protocol = true; + endpoint->protocol = protocol; +} + +bool grpc_gcp_handshaker_req_set_rpc_versions(grpc_gcp_handshaker_req* req, + uint32_t max_major, + uint32_t max_minor, + uint32_t min_major, + uint32_t min_minor) { + if (req == nullptr || req->has_next) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_rpc_versions()."); + return false; + } + if (req->has_client_start) { + req->client_start.has_rpc_versions = true; + grpc_gcp_rpc_protocol_versions_set_max(&req->client_start.rpc_versions, + max_major, max_minor); + grpc_gcp_rpc_protocol_versions_set_min(&req->client_start.rpc_versions, + min_major, min_minor); + } else { + req->server_start.has_rpc_versions = true; + grpc_gcp_rpc_protocol_versions_set_max(&req->server_start.rpc_versions, + max_major, max_minor); + grpc_gcp_rpc_protocol_versions_set_min(&req->server_start.rpc_versions, + min_major, min_minor); + } + return true; +} + +bool grpc_gcp_handshaker_req_set_local_endpoint( + grpc_gcp_handshaker_req* req, const char* ip_address, size_t port, + grpc_gcp_network_protocol protocol) { + if (req == nullptr || ip_address == nullptr || port > 65535 || + req->has_next) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_local_endpoint()."); + return false; + } + if (req->has_client_start) { + req->client_start.has_local_endpoint = true; + set_endpoint(&req->client_start.local_endpoint, ip_address, port, protocol); + } else { + req->server_start.has_local_endpoint = true; + set_endpoint(&req->server_start.local_endpoint, ip_address, port, protocol); + } + return true; +} + +bool grpc_gcp_handshaker_req_set_remote_endpoint( + grpc_gcp_handshaker_req* req, const char* ip_address, size_t port, + grpc_gcp_network_protocol protocol) { + if (req == nullptr || ip_address == nullptr || port > 65535 || + req->has_next) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_remote_endpoint()."); + return false; + } + if (req->has_client_start) { + req->client_start.has_remote_endpoint = true; + set_endpoint(&req->client_start.remote_endpoint, ip_address, port, + protocol); + } else { + req->server_start.has_remote_endpoint = true; + set_endpoint(&req->server_start.remote_endpoint, ip_address, port, + protocol); + } + return true; +} + +bool grpc_gcp_handshaker_req_set_in_bytes(grpc_gcp_handshaker_req* req, + const char* in_bytes, size_t size) { + if (req == nullptr || in_bytes == nullptr || req->has_client_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_set_in_bytes()."); + return false; + } + grpc_slice* slice = create_slice(in_bytes, size); + if (req->has_next) { + req->next.in_bytes.arg = slice; + req->next.in_bytes.funcs.encode = &encode_string_or_bytes_cb; + } else { + req->server_start.in_bytes.arg = slice; + req->server_start.in_bytes.funcs.encode = &encode_string_or_bytes_cb; + } + return true; +} + +static grpc_gcp_server_handshake_parameters* server_start_find_param( + grpc_gcp_handshaker_req* req, int32_t key) { + size_t i = 0; + for (i = 0; i < req->server_start.handshake_parameters_count; i++) { + if (req->server_start.handshake_parameters[i].key == key) { + return &req->server_start.handshake_parameters[i].value; + } + } + req->server_start + .handshake_parameters[req->server_start.handshake_parameters_count] + .has_key = true; + req->server_start + .handshake_parameters[req->server_start.handshake_parameters_count] + .has_value = true; + req->server_start + .handshake_parameters[req->server_start.handshake_parameters_count++] + .key = key; + return &req->server_start + .handshake_parameters + [req->server_start.handshake_parameters_count - 1] + .value; +} + +bool grpc_gcp_handshaker_req_param_add_record_protocol( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* record_protocol) { + if (req == nullptr || record_protocol == nullptr || !req->has_server_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_param_add_record_protocol()."); + return false; + } + grpc_gcp_server_handshake_parameters* param = + server_start_find_param(req, key); + grpc_slice* slice = create_slice(record_protocol, strlen(record_protocol)); + add_repeated_field( + reinterpret_cast(¶m->record_protocols.arg), slice); + param->record_protocols.funcs.encode = &encode_repeated_string_cb; + return true; +} + +bool grpc_gcp_handshaker_req_param_add_local_identity_hostname( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* hostname) { + if (req == nullptr || hostname == nullptr || !req->has_server_start) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_param_add_local_identity_hostname()."); + return false; + } + grpc_gcp_server_handshake_parameters* param = + server_start_find_param(req, key); + grpc_gcp_identity* local_identity = + static_cast(gpr_zalloc(sizeof(*local_identity))); + set_identity_hostname(local_identity, hostname); + add_repeated_field( + reinterpret_cast(¶m->local_identities.arg), + local_identity); + param->local_identities.funcs.encode = &encode_repeated_identity_cb; + return true; +} + +bool grpc_gcp_handshaker_req_param_add_local_identity_service_account( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* service_account) { + if (req == nullptr || service_account == nullptr || !req->has_server_start) { + gpr_log( + GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_handshaker_req_param_add_local_identity_service_account()."); + return false; + } + grpc_gcp_server_handshake_parameters* param = + server_start_find_param(req, key); + grpc_gcp_identity* local_identity = + static_cast(gpr_zalloc(sizeof(*local_identity))); + set_identity_service_account(local_identity, service_account); + add_repeated_field( + reinterpret_cast(¶m->local_identities.arg), + local_identity); + param->local_identities.funcs.encode = &encode_repeated_identity_cb; + return true; +} + +bool grpc_gcp_handshaker_req_encode(grpc_gcp_handshaker_req* req, + grpc_slice* slice) { + if (req == nullptr || slice == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to grpc_gcp_handshaker_req_encode()."); + return false; + } + pb_ostream_t size_stream; + memset(&size_stream, 0, sizeof(pb_ostream_t)); + if (!pb_encode(&size_stream, grpc_gcp_HandshakerReq_fields, req)) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&size_stream)); + return false; + } + size_t encoded_length = size_stream.bytes_written; + *slice = grpc_slice_malloc(encoded_length); + pb_ostream_t output_stream = + pb_ostream_from_buffer(GRPC_SLICE_START_PTR(*slice), encoded_length); + if (!pb_encode(&output_stream, grpc_gcp_HandshakerReq_fields, req) != 0) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&output_stream)); + return false; + } + return true; +} + +/* HandshakerResp. */ +grpc_gcp_handshaker_resp* grpc_gcp_handshaker_resp_create(void) { + grpc_gcp_handshaker_resp* resp = + static_cast(gpr_zalloc(sizeof(*resp))); + return resp; +} + +void grpc_gcp_handshaker_resp_destroy(grpc_gcp_handshaker_resp* resp) { + if (resp != nullptr) { + destroy_slice(static_cast(resp->out_frames.arg)); + if (resp->has_status) { + destroy_slice(static_cast(resp->status.details.arg)); + } + if (resp->has_result) { + destroy_slice( + static_cast(resp->result.application_protocol.arg)); + destroy_slice(static_cast(resp->result.record_protocol.arg)); + destroy_slice(static_cast(resp->result.key_data.arg)); + if (resp->result.has_local_identity) { + destroy_slice( + static_cast(resp->result.local_identity.hostname.arg)); + destroy_slice(static_cast( + resp->result.local_identity.service_account.arg)); + } + if (resp->result.has_peer_identity) { + destroy_slice( + static_cast(resp->result.peer_identity.hostname.arg)); + destroy_slice(static_cast( + resp->result.peer_identity.service_account.arg)); + } + } + gpr_free(resp); + } +} + +bool grpc_gcp_handshaker_resp_decode(grpc_slice encoded_handshaker_resp, + grpc_gcp_handshaker_resp* resp) { + if (resp == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr argument to grpc_gcp_handshaker_resp_decode()."); + return false; + } + pb_istream_t stream = + pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_handshaker_resp), + GRPC_SLICE_LENGTH(encoded_handshaker_resp)); + resp->out_frames.funcs.decode = decode_string_or_bytes_cb; + resp->status.details.funcs.decode = decode_string_or_bytes_cb; + resp->result.application_protocol.funcs.decode = decode_string_or_bytes_cb; + resp->result.record_protocol.funcs.decode = decode_string_or_bytes_cb; + resp->result.key_data.funcs.decode = decode_string_or_bytes_cb; + resp->result.peer_identity.hostname.funcs.decode = decode_string_or_bytes_cb; + resp->result.peer_identity.service_account.funcs.decode = + decode_string_or_bytes_cb; + resp->result.local_identity.hostname.funcs.decode = decode_string_or_bytes_cb; + resp->result.local_identity.service_account.funcs.decode = + decode_string_or_bytes_cb; + if (!pb_decode(&stream, grpc_gcp_HandshakerResp_fields, resp)) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); + return false; + } + return true; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h new file mode 100644 index 000000000..5df56a86f --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h @@ -0,0 +1,323 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_H + +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h" + +/** + * An implementation of nanopb thin wrapper used to set/get and + * serialize/de-serialize of ALTS handshake requests and responses. + * + * All APIs in the header are thread-compatible. A typical usage of this API at + * the client side is as follows: + * + * ----------------------------------------------------------------------------- + * // Create, populate, and serialize an ALTS client_start handshake request to + * // send to the server. + * grpc_gcp_handshaker_req* req = + * grpc_gcp_handshaker_req_create(CLIENT_START_REQ); + * grpc_gcp_handshaker_req_set_handshake_protocol( + req, grpc_gcp_HandshakeProtocol_ALTS); + * grpc_gcp_handshaker_req_add_application_protocol(req, "grpc"); + * grpc_gcp_handshaker_req_add_record_protocol(req, "ALTSRP_GCM_AES128"); + * grpc_slice client_slice; + * if (!grpc_gcp_handshaker_req_encode(req, &client_slice)) { + * fprintf(stderr, "ALTS handshake request encoding failed."; + * } + * + * // De-serialize a data stream received from the server, and store the result + * // at ALTS handshake response. + * grpc_gcp_handshaker_resp* resp = grpc_gcp_handshaker_resp_create(); + * if (!grpc_gcp_handshaker_resp_decode(server_slice, resp)) { + * fprintf(stderr, "ALTS handshake response decoding failed."); + * } + * // To access a variable-length datatype field (i.e., pb_callback_t), + * // access its "arg" subfield (if it has been set). + * if (resp->out_frames.arg != nullptr) { + * grpc_slice* slice = resp->out_frames.arg; + * } + * // To access a fixed-length datatype field (i.e., not pb_calback_t), + * // access the field directly (if it has been set). + * if (resp->has_status && resp->status->has_code) { + * uint32_t code = resp->status->code; + * } + *------------------------------------------------------------------------------ + */ + +/** + * This method creates an ALTS handshake request. + * + * - type: an enum type value that can be either CLIENT_START_REQ, + * SERVER_START_REQ, or NEXT_REQ to indicate the created instance will be + * client_start, server_start, and next handshake request message + * respectively. + * + * The method returns a pointer to the created instance. + */ +grpc_gcp_handshaker_req* grpc_gcp_handshaker_req_create( + grpc_gcp_handshaker_req_type type); + +/** + * This method sets the value for handshake_security_protocol field of ALTS + * client_start handshake request. + * + * - req: an ALTS handshake request. + * - handshake_protocol: a enum type value representing the handshake security + * protocol. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_handshake_protocol( + grpc_gcp_handshaker_req* req, + grpc_gcp_handshake_protocol handshake_protocol); + +/** + * This method sets the value for target_name field of ALTS client_start + * handshake request. + * + * - req: an ALTS handshake request. + * - target_name: a target name to be set. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_target_name(grpc_gcp_handshaker_req* req, + const char* target_name); + +/** + * This method adds an application protocol supported by the server (or + * client) to ALTS server_start (or client_start) handshake request. + * + * - req: an ALTS handshake request. + * - application_protocol: an application protocol (e.g., grpc) to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_add_application_protocol( + grpc_gcp_handshaker_req* req, const char* application_protocol); + +/** + * This method adds a record protocol supported by the client to ALTS + * client_start handshake request. + * + * - req: an ALTS handshake request. + * - record_protocol: a record protocol (e.g., ALTSRP_GCM_AES128) to be + * added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_add_record_protocol(grpc_gcp_handshaker_req* req, + const char* record_protocol); + +/** + * This method adds a target server identity represented as hostname and + * acceptable by a client to ALTS client_start handshake request. + * + * - req: an ALTS handshake request. + * - hostname: a string representation of hostname at the connection + * endpoint to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_add_target_identity_hostname( + grpc_gcp_handshaker_req* req, const char* hostname); + +/** + * This method adds a target server identity represented as service account and + * acceptable by a client to ALTS client_start handshake request. + * + * - req: an ALTS handshake request. + * - service_account: a string representation of service account at the + * connection endpoint to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_add_target_identity_service_account( + grpc_gcp_handshaker_req* req, const char* service_account); + +/** + * This method sets the hostname for local_identity field of ALTS client_start + * handshake request. + * + * - req: an ALTS handshake request. + * - hostname: a string representation of hostname. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_local_identity_hostname( + grpc_gcp_handshaker_req* req, const char* hostname); + +/** + * This method sets the service account for local_identity field of ALTS + * client_start handshake request. + * + * - req: an ALTS handshake request. + * - service_account: a string representation of service account. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_local_identity_service_account( + grpc_gcp_handshaker_req* req, const char* service_account); + +/** + * This method sets the value for local_endpoint field of either ALTS + * client_start or server_start handshake request. + * + * - req: an ALTS handshake request. + * - ip_address: a string representation of ip address associated with the + * local endpoint, that could be either IPv4 or IPv6. + * - port: a port number associated with the local endpoint. + * - protocol: a network protocol (e.g., TCP or UDP) associated with the + * local endpoint. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_local_endpoint( + grpc_gcp_handshaker_req* req, const char* ip_address, size_t port, + grpc_gcp_network_protocol protocol); + +/** + * This method sets the value for remote_endpoint field of either ALTS + * client_start or server_start handshake request. + * + * - req: an ALTS handshake request. + * - ip_address: a string representation of ip address associated with the + * remote endpoint, that could be either IPv4 or IPv6. + * - port: a port number associated with the remote endpoint. + * - protocol: a network protocol (e.g., TCP or UDP) associated with the + * remote endpoint. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_remote_endpoint( + grpc_gcp_handshaker_req* req, const char* ip_address, size_t port, + grpc_gcp_network_protocol protocol); + +/** + * This method sets the value for in_bytes field of either ALTS server_start or + * next handshake request. + * + * - req: an ALTS handshake request. + * - in_bytes: a buffer containing bytes taken from out_frames of the peer's + * ALTS handshake response. It is possible that the peer's out_frames are + * split into multiple handshake request messages. + * - size: size of in_bytes buffer. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_in_bytes(grpc_gcp_handshaker_req* req, + const char* in_bytes, size_t size); + +/** + * This method adds a record protocol to handshake parameters mapped by the + * handshake protocol for ALTS server_start handshake request. + * + * - req: an ALTS handshake request. + * - key: an enum type value representing a handshake security protocol. + * - record_protocol: a record protocol to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_param_add_record_protocol( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* record_protocol); + +/** + * This method adds a local identity represented as hostname to handshake + * parameters mapped by the handshake protocol for ALTS server_start handshake + * request. + * + * - req: an ALTS handshake request. + * - key: an enum type value representing a handshake security protocol. + * - hostname: a string representation of hostname to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_param_add_local_identity_hostname( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* hostname); + +/** + * This method adds a local identity represented as service account to handshake + * parameters mapped by the handshake protocol for ALTS server_start handshake + * request. + * + * - req: an ALTS handshake request. + * - key: an enum type value representing a handshake security protocol. + * - service_account: a string representation of service account to be added. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_param_add_local_identity_service_account( + grpc_gcp_handshaker_req* req, grpc_gcp_handshake_protocol key, + const char* service_account); + +/** + * This method sets the value for rpc_versions field of either ALTS + * client_start or server_start handshake request. + * + * - req: an ALTS handshake request. + * - max_major: a major version of maximum supported RPC version. + * - max_minor: a minor version of maximum supported RPC version. + * - min_major: a major version of minimum supported RPC version. + * - min_minor: a minor version of minimum supported RPC version. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_set_rpc_versions(grpc_gcp_handshaker_req* req, + uint32_t max_major, + uint32_t max_minor, + uint32_t min_major, + uint32_t min_minor); + +/** + * This method serializes an ALTS handshake request and returns a data stream. + * + * - req: an ALTS handshake request. + * - slice: a data stream where the serialized result will be written. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_req_encode(grpc_gcp_handshaker_req* req, + grpc_slice* slice); + +/* This method destroys an ALTS handshake request. */ +void grpc_gcp_handshaker_req_destroy(grpc_gcp_handshaker_req* req); + +/* This method creates an ALTS handshake response. */ +grpc_gcp_handshaker_resp* grpc_gcp_handshaker_resp_create(void); + +/** + * This method de-serializes a data stream and stores the result + * in an ALTS handshake response. + * + * - slice: a data stream containing a serialized ALTS handshake response. + * - resp: an ALTS handshake response used to hold de-serialized result. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_handshaker_resp_decode(grpc_slice slice, + grpc_gcp_handshaker_resp* resp); + +/* This method destroys an ALTS handshake response. */ +void grpc_gcp_handshaker_resp_destroy(grpc_gcp_handshaker_resp* resp); + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc new file mode 100644 index 000000000..e0e418468 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc @@ -0,0 +1,143 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h" + +void add_repeated_field(repeated_field** head, const void* data) { + repeated_field* field = + static_cast(gpr_zalloc(sizeof(*field))); + field->data = data; + if (*head == nullptr) { + *head = field; + (*head)->next = nullptr; + } else { + field->next = *head; + *head = field; + } +} + +void destroy_repeated_field_list_identity(repeated_field* head) { + repeated_field* field = head; + while (field != nullptr) { + repeated_field* next_field = field->next; + const grpc_gcp_identity* identity = + static_cast(field->data); + destroy_slice(static_cast(identity->hostname.arg)); + destroy_slice(static_cast(identity->service_account.arg)); + gpr_free((void*)identity); + gpr_free(field); + field = next_field; + } +} + +void destroy_repeated_field_list_string(repeated_field* head) { + repeated_field* field = head; + while (field != nullptr) { + repeated_field* next_field = field->next; + destroy_slice((grpc_slice*)field->data); + gpr_free(field); + field = next_field; + } +} + +grpc_slice* create_slice(const char* data, size_t size) { + grpc_slice slice = grpc_slice_from_copied_buffer(data, size); + grpc_slice* cb_slice = + static_cast(gpr_zalloc(sizeof(*cb_slice))); + memcpy(cb_slice, &slice, sizeof(*cb_slice)); + return cb_slice; +} + +void destroy_slice(grpc_slice* slice) { + if (slice != nullptr) { + grpc_slice_unref(*slice); + gpr_free(slice); + } +} + +bool encode_string_or_bytes_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg) { + grpc_slice* slice = static_cast(*arg); + if (!pb_encode_tag_for_field(stream, field)) return false; + return pb_encode_string(stream, GRPC_SLICE_START_PTR(*slice), + GRPC_SLICE_LENGTH(*slice)); +} + +bool encode_repeated_identity_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg) { + repeated_field* var = static_cast(*arg); + while (var != nullptr) { + if (!pb_encode_tag_for_field(stream, field)) return false; + if (!pb_encode_submessage(stream, grpc_gcp_Identity_fields, + (grpc_gcp_identity*)var->data)) + return false; + var = var->next; + } + return true; +} + +bool encode_repeated_string_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg) { + repeated_field* var = static_cast(*arg); + while (var != nullptr) { + if (!pb_encode_tag_for_field(stream, field)) return false; + const grpc_slice* slice = static_cast(var->data); + if (!pb_encode_string(stream, GRPC_SLICE_START_PTR(*slice), + GRPC_SLICE_LENGTH(*slice))) + return false; + var = var->next; + } + return true; +} + +bool decode_string_or_bytes_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg) { + grpc_slice slice = grpc_slice_malloc(stream->bytes_left); + grpc_slice* cb_slice = + static_cast(gpr_zalloc(sizeof(*cb_slice))); + memcpy(cb_slice, &slice, sizeof(*cb_slice)); + if (!pb_read(stream, GRPC_SLICE_START_PTR(*cb_slice), stream->bytes_left)) + return false; + *arg = cb_slice; + return true; +} + +bool decode_repeated_identity_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg) { + grpc_gcp_identity* identity = + static_cast(gpr_zalloc(sizeof(*identity))); + identity->hostname.funcs.decode = decode_string_or_bytes_cb; + identity->service_account.funcs.decode = decode_string_or_bytes_cb; + add_repeated_field(reinterpret_cast(arg), identity); + if (!pb_decode(stream, grpc_gcp_Identity_fields, identity)) return false; + return true; +} + +bool decode_repeated_string_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg) { + grpc_slice slice = grpc_slice_malloc(stream->bytes_left); + grpc_slice* cb_slice = + static_cast(gpr_zalloc(sizeof(*cb_slice))); + memcpy(cb_slice, &slice, sizeof(grpc_slice)); + if (!pb_read(stream, GRPC_SLICE_START_PTR(*cb_slice), stream->bytes_left)) + return false; + add_repeated_field(reinterpret_cast(arg), cb_slice); + return true; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h new file mode 100644 index 000000000..8fe8f73f8 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h @@ -0,0 +1,149 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_UTIL_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_UTIL_H + +#include + +#include "third_party/nanopb/pb_decode.h" +#include "third_party/nanopb/pb_encode.h" + +#include +#include +#include +#include + +#include "src/core/tsi/alts/handshaker/handshaker.pb.h" + +/** + * An implementation of utility functions used to serialize/ + * de-serialize ALTS handshake requests/responses. All APIs in the header + * are thread-compatible. + */ + +/* Renaming of message/field structs generated by nanopb compiler. */ +typedef grpc_gcp_HandshakeProtocol grpc_gcp_handshake_protocol; +typedef grpc_gcp_NetworkProtocol grpc_gcp_network_protocol; +typedef grpc_gcp_Identity grpc_gcp_identity; +typedef grpc_gcp_NextHandshakeMessageReq grpc_gcp_next_handshake_message_req; +typedef grpc_gcp_ServerHandshakeParameters grpc_gcp_server_handshake_parameters; +typedef grpc_gcp_Endpoint grpc_gcp_endpoint; +typedef grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry + grpc_gcp_handshake_parameters_entry; +typedef grpc_gcp_StartClientHandshakeReq grpc_gcp_start_client_handshake_req; +typedef grpc_gcp_StartServerHandshakeReq grpc_gcp_start_server_handshake_req; +typedef grpc_gcp_HandshakerReq grpc_gcp_handshaker_req; +typedef grpc_gcp_HandshakerResult grpc_gcp_handshaker_result; +typedef grpc_gcp_HandshakerStatus grpc_gcp_handshaker_status; +typedef grpc_gcp_HandshakerResp grpc_gcp_handshaker_resp; + +typedef enum { + CLIENT_START_REQ = 0, /* StartClientHandshakeReq. */ + SERVER_START_REQ = 1, /* StartServerHandshakeReq. */ + NEXT_REQ = 2, /* NextHandshakeMessageReq. */ +} grpc_gcp_handshaker_req_type; + +/** + * A struct representing a repeated field. The struct is used to organize all + * instances of a specific repeated field into a linked list, which then will + * be used at encode/decode phase. For instance at the encode phase, the encode + * function will iterate through the list, encode each field, and then output + * the result to the stream. + */ +typedef struct repeated_field_ { + struct repeated_field_* next; + const void* data; +} repeated_field; + +/** + * This method adds a repeated field to the head of repeated field list. + * + * - head: a head of repeated field list. + * - field: a repeated field to be added to the list. + */ +void add_repeated_field(repeated_field** head, const void* field); + +/** + * This method destroys a repeated field list that consists of string type + * fields. + * + * - head: a head of repeated field list. + */ +void destroy_repeated_field_list_string(repeated_field* head); + +/** + * This method destroys a repeated field list that consists of + * grpc_gcp_identity type fields. + * + * - head: a head of repeated field list. + */ +void destroy_repeated_field_list_identity(repeated_field* head); + +/** + * This method creates a grpc_slice instance by copying a data buffer. It is + * similar to grpc_slice_from_copied_buffer() except that it returns an instance + * allocated from the heap. + * + * - data: a data buffer to be copied to grpc_slice instance. + * - size: size of data buffer. + */ +grpc_slice* create_slice(const char* data, size_t size); + +/* This method destroys a grpc_slice instance. */ +void destroy_slice(grpc_slice* slice); + +/** + * The following encode/decode functions will be assigned to encode/decode + * function pointers of pb_callback_t struct (defined in + * //third_party/nanopb/pb.h), that represent a repeated field with a dynamic + * length (e.g., a string type or repeated field). + */ + +/* This method is an encode callback function for a string or byte array. */ +bool encode_string_or_bytes_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg); + +/** + * This method is an encode callback function for a repeated grpc_gcp_identity + * field. + */ +bool encode_repeated_identity_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg); + +/* This method is an encode callback function for a repeated string field. */ +bool encode_repeated_string_cb(pb_ostream_t* stream, const pb_field_t* field, + void* const* arg); + +/** + * This method is a decode callback function for a string or byte array field. + */ +bool decode_string_or_bytes_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg); +/** + * This method is a decode callback function for a repeated grpc_gcp_identity + * field. + */ +bool decode_repeated_identity_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg); + +/* This method is a decode callback function for a repeated string field. */ +bool decode_repeated_string_cb(pb_istream_t* stream, const pb_field_t* field, + void** arg); + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_HANDSHAKER_SERVICE_API_UTIL_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.cc new file mode 100644 index 000000000..ec0bf12b9 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.cc @@ -0,0 +1,73 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_tsi_event.h" + +#include +#include +#include + +tsi_result alts_tsi_event_create(alts_tsi_handshaker* handshaker, + tsi_handshaker_on_next_done_cb cb, + void* user_data, + grpc_alts_credentials_options* options, + grpc_slice target_name, + alts_tsi_event** event) { + if (event == nullptr || handshaker == nullptr || cb == nullptr) { + gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_event_create()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_event* e = static_cast(gpr_zalloc(sizeof(*e))); + e->handshaker = handshaker; + e->cb = cb; + e->user_data = user_data; + e->options = grpc_alts_credentials_options_copy(options); + e->target_name = grpc_slice_copy(target_name); + grpc_metadata_array_init(&e->initial_metadata); + grpc_metadata_array_init(&e->trailing_metadata); + *event = e; + return TSI_OK; +} + +void alts_tsi_event_dispatch_to_handshaker(alts_tsi_event* event, bool is_ok) { + if (event == nullptr) { + gpr_log( + GPR_ERROR, + "ALTS TSI event is nullptr in alts_tsi_event_dispatch_to_handshaker()"); + return; + } + alts_tsi_handshaker_handle_response(event->handshaker, event->recv_buffer, + event->status, &event->details, event->cb, + event->user_data, is_ok); +} + +void alts_tsi_event_destroy(alts_tsi_event* event) { + if (event == nullptr) { + return; + } + grpc_byte_buffer_destroy(event->send_buffer); + grpc_byte_buffer_destroy(event->recv_buffer); + grpc_metadata_array_destroy(&event->initial_metadata); + grpc_metadata_array_destroy(&event->trailing_metadata); + grpc_slice_unref(event->details); + grpc_slice_unref(event->target_name); + grpc_alts_credentials_options_destroy(event->options); + gpr_free(event); +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.h new file mode 100644 index 000000000..043e75d4a --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_event.h @@ -0,0 +1,93 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H + +#include + +#include +#include + +#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" +#include "src/core/tsi/transport_security_interface.h" + +/** + * A ALTS TSI event interface. In asynchronous implementation of + * tsi_handshaker_next(), the function will exit after scheduling a handshaker + * request to ALTS handshaker service without waiting for response to return. + * The event is used to link the scheduled handshaker request with the + * corresponding response so that enough context information can be inferred + * from it to handle the response. All APIs in the header are thread-compatible. + */ + +/** + * Main struct for ALTS TSI event. It retains ownership on send_buffer and + * recv_buffer, but not on handshaker. + */ +typedef struct alts_tsi_event { + alts_tsi_handshaker* handshaker; + grpc_byte_buffer* send_buffer; + grpc_byte_buffer* recv_buffer; + grpc_status_code status; + grpc_slice details; + grpc_metadata_array initial_metadata; + grpc_metadata_array trailing_metadata; + tsi_handshaker_on_next_done_cb cb; + void* user_data; + grpc_alts_credentials_options* options; + grpc_slice target_name; +} alts_tsi_event; + +/** + * This method creates a ALTS TSI event. + * + * - handshaker: ALTS TSI handshaker instance associated with the event to be + * created. The created event does not own the handshaker instance. + * - cb: callback function to be called when handling data received from ALTS + * handshaker service. + * - user_data: argument to callback function. + * - options: ALTS credentials options. + * - target_name: name of endpoint used for secure naming check. + * - event: address of ALTS TSI event instance to be returned from the method. + * + * It returns TSI_OK on success and an error status code on failure. + */ +tsi_result alts_tsi_event_create(alts_tsi_handshaker* handshaker, + tsi_handshaker_on_next_done_cb cb, + void* user_data, + grpc_alts_credentials_options* options, + grpc_slice target_name, + alts_tsi_event** event); + +/** + * This method dispatches a ALTS TSI event received from the handshaker service, + * and a boolean flag indicating if the event is valid to read to ALTS TSI + * handshaker to process. It is called by TSI thread. + * + * - event: ALTS TSI event instance. + * - is_ok: a boolean value indicating if the event is valid to read. + */ +void alts_tsi_event_dispatch_to_handshaker(alts_tsi_event* event, bool is_ok); + +/** + * This method destroys the ALTS TSI event. + */ +void alts_tsi_event_destroy(alts_tsi_event* event); + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_EVENT_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc new file mode 100644 index 000000000..529f2103c --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc @@ -0,0 +1,483 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_tsi_handshaker.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include "src/core/lib/gpr/host_port.h" +#include "src/core/lib/gprpp/thd.h" +#include "src/core/tsi/alts/frame_protector/alts_frame_protector.h" +#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h" +#include "src/core/tsi/alts/handshaker/alts_tsi_utils.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h" +#include "src/core/tsi/alts_transport_security.h" + +#define TSI_ALTS_INITIAL_BUFFER_SIZE 256 + +static alts_shared_resource* kSharedResource = alts_get_shared_resource(); + +/* Main struct for ALTS TSI handshaker. */ +typedef struct alts_tsi_handshaker { + tsi_handshaker base; + alts_handshaker_client* client; + grpc_slice recv_bytes; + grpc_slice target_name; + unsigned char* buffer; + size_t buffer_size; + bool is_client; + bool has_sent_start_message; + grpc_alts_credentials_options* options; +} alts_tsi_handshaker; + +/* Main struct for ALTS TSI handshaker result. */ +typedef struct alts_tsi_handshaker_result { + tsi_handshaker_result base; + char* peer_identity; + char* key_data; + unsigned char* unused_bytes; + size_t unused_bytes_size; + grpc_slice rpc_versions; + bool is_client; +} alts_tsi_handshaker_result; + +static tsi_result handshaker_result_extract_peer( + const tsi_handshaker_result* self, tsi_peer* peer) { + if (self == nullptr || peer == nullptr) { + gpr_log(GPR_ERROR, "Invalid argument to handshaker_result_extract_peer()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_handshaker_result* result = + reinterpret_cast( + const_cast(self)); + GPR_ASSERT(kTsiAltsNumOfPeerProperties == 3); + tsi_result ok = tsi_construct_peer(kTsiAltsNumOfPeerProperties, peer); + int index = 0; + if (ok != TSI_OK) { + gpr_log(GPR_ERROR, "Failed to construct tsi peer"); + return ok; + } + GPR_ASSERT(&peer->properties[index] != nullptr); + ok = tsi_construct_string_peer_property_from_cstring( + TSI_CERTIFICATE_TYPE_PEER_PROPERTY, TSI_ALTS_CERTIFICATE_TYPE, + &peer->properties[index]); + if (ok != TSI_OK) { + tsi_peer_destruct(peer); + gpr_log(GPR_ERROR, "Failed to set tsi peer property"); + return ok; + } + index++; + GPR_ASSERT(&peer->properties[index] != nullptr); + ok = tsi_construct_string_peer_property_from_cstring( + TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY, result->peer_identity, + &peer->properties[index]); + if (ok != TSI_OK) { + tsi_peer_destruct(peer); + gpr_log(GPR_ERROR, "Failed to set tsi peer property"); + } + index++; + GPR_ASSERT(&peer->properties[index] != nullptr); + ok = tsi_construct_string_peer_property( + TSI_ALTS_RPC_VERSIONS, + reinterpret_cast(GRPC_SLICE_START_PTR(result->rpc_versions)), + GRPC_SLICE_LENGTH(result->rpc_versions), &peer->properties[2]); + if (ok != TSI_OK) { + tsi_peer_destruct(peer); + gpr_log(GPR_ERROR, "Failed to set tsi peer property"); + } + GPR_ASSERT(++index == kTsiAltsNumOfPeerProperties); + return ok; +} + +static tsi_result handshaker_result_create_zero_copy_grpc_protector( + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_zero_copy_grpc_protector** protector) { + if (self == nullptr || protector == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to create_zero_copy_grpc_protector()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_handshaker_result* result = + reinterpret_cast( + const_cast(self)); + tsi_result ok = alts_zero_copy_grpc_protector_create( + reinterpret_cast(result->key_data), + kAltsAes128GcmRekeyKeyLength, /*is_rekey=*/true, result->is_client, + /*is_integrity_only=*/false, max_output_protected_frame_size, protector); + if (ok != TSI_OK) { + gpr_log(GPR_ERROR, "Failed to create zero-copy grpc protector"); + } + return ok; +} + +static tsi_result handshaker_result_create_frame_protector( + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector) { + if (self == nullptr || protector == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to handshaker_result_create_frame_protector()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_handshaker_result* result = + reinterpret_cast( + const_cast(self)); + tsi_result ok = alts_create_frame_protector( + reinterpret_cast(result->key_data), + kAltsAes128GcmRekeyKeyLength, result->is_client, /*is_rekey=*/true, + max_output_protected_frame_size, protector); + if (ok != TSI_OK) { + gpr_log(GPR_ERROR, "Failed to create frame protector"); + } + return ok; +} + +static tsi_result handshaker_result_get_unused_bytes( + const tsi_handshaker_result* self, const unsigned char** bytes, + size_t* bytes_size) { + if (self == nullptr || bytes == nullptr || bytes_size == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to handshaker_result_get_unused_bytes()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_handshaker_result* result = + reinterpret_cast( + const_cast(self)); + *bytes = result->unused_bytes; + *bytes_size = result->unused_bytes_size; + return TSI_OK; +} + +static void handshaker_result_destroy(tsi_handshaker_result* self) { + if (self == nullptr) { + return; + } + alts_tsi_handshaker_result* result = + reinterpret_cast( + const_cast(self)); + gpr_free(result->peer_identity); + gpr_free(result->key_data); + gpr_free(result->unused_bytes); + grpc_slice_unref(result->rpc_versions); + gpr_free(result); +} + +static const tsi_handshaker_result_vtable result_vtable = { + handshaker_result_extract_peer, + handshaker_result_create_zero_copy_grpc_protector, + handshaker_result_create_frame_protector, + handshaker_result_get_unused_bytes, handshaker_result_destroy}; + +static tsi_result create_handshaker_result(grpc_gcp_handshaker_resp* resp, + bool is_client, + tsi_handshaker_result** self) { + if (self == nullptr || resp == nullptr) { + gpr_log(GPR_ERROR, "Invalid arguments to create_handshaker_result()"); + return TSI_INVALID_ARGUMENT; + } + grpc_slice* key = static_cast(resp->result.key_data.arg); + GPR_ASSERT(key != nullptr); + grpc_slice* identity = + static_cast(resp->result.peer_identity.service_account.arg); + if (identity == nullptr) { + gpr_log(GPR_ERROR, "Invalid service account"); + return TSI_FAILED_PRECONDITION; + } + if (GRPC_SLICE_LENGTH(*key) < kAltsAes128GcmRekeyKeyLength) { + gpr_log(GPR_ERROR, "Bad key length"); + return TSI_FAILED_PRECONDITION; + } + alts_tsi_handshaker_result* result = + static_cast(gpr_zalloc(sizeof(*result))); + result->key_data = + static_cast(gpr_zalloc(kAltsAes128GcmRekeyKeyLength)); + memcpy(result->key_data, GRPC_SLICE_START_PTR(*key), + kAltsAes128GcmRekeyKeyLength); + result->peer_identity = grpc_slice_to_c_string(*identity); + if (!resp->result.has_peer_rpc_versions) { + gpr_log(GPR_ERROR, "Peer does not set RPC protocol versions."); + return TSI_FAILED_PRECONDITION; + } + if (!grpc_gcp_rpc_protocol_versions_encode(&resp->result.peer_rpc_versions, + &result->rpc_versions)) { + gpr_log(GPR_ERROR, "Failed to serialize peer's RPC protocol versions."); + return TSI_FAILED_PRECONDITION; + } + result->is_client = is_client; + result->base.vtable = &result_vtable; + *self = &result->base; + return TSI_OK; +} + +static tsi_result handshaker_next( + tsi_handshaker* self, const unsigned char* received_bytes, + size_t received_bytes_size, const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, tsi_handshaker_result** result, + tsi_handshaker_on_next_done_cb cb, void* user_data) { + if (self == nullptr || cb == nullptr) { + gpr_log(GPR_ERROR, "Invalid arguments to handshaker_next()"); + return TSI_INVALID_ARGUMENT; + } + alts_tsi_handshaker* handshaker = + reinterpret_cast(self); + tsi_result ok = TSI_OK; + alts_tsi_event* event = nullptr; + ok = alts_tsi_event_create(handshaker, cb, user_data, handshaker->options, + handshaker->target_name, &event); + if (ok != TSI_OK) { + gpr_log(GPR_ERROR, "Failed to create ALTS TSI event"); + return ok; + } + grpc_slice slice = (received_bytes == nullptr || received_bytes_size == 0) + ? grpc_empty_slice() + : grpc_slice_from_copied_buffer( + reinterpret_cast(received_bytes), + received_bytes_size); + if (!handshaker->has_sent_start_message) { + ok = handshaker->is_client + ? alts_handshaker_client_start_client(handshaker->client, event) + : alts_handshaker_client_start_server(handshaker->client, event, + &slice); + handshaker->has_sent_start_message = true; + } else { + if (!GRPC_SLICE_IS_EMPTY(handshaker->recv_bytes)) { + grpc_slice_unref(handshaker->recv_bytes); + } + handshaker->recv_bytes = grpc_slice_ref(slice); + ok = alts_handshaker_client_next(handshaker->client, event, &slice); + } + grpc_slice_unref(slice); + if (ok != TSI_OK) { + gpr_log(GPR_ERROR, "Failed to schedule ALTS handshaker requests"); + return ok; + } + return TSI_ASYNC; +} + +static void handshaker_destroy(tsi_handshaker* self) { + if (self == nullptr) { + return; + } + alts_tsi_handshaker* handshaker = + reinterpret_cast(self); + alts_handshaker_client_destroy(handshaker->client); + grpc_slice_unref(handshaker->recv_bytes); + grpc_slice_unref(handshaker->target_name); + grpc_alts_credentials_options_destroy(handshaker->options); + gpr_free(handshaker->buffer); + gpr_free(handshaker); +} + +static const tsi_handshaker_vtable handshaker_vtable = { + nullptr, nullptr, nullptr, nullptr, nullptr, handshaker_destroy, + handshaker_next}; + +static void thread_worker(void* arg) { + while (true) { + grpc_event event = grpc_completion_queue_next( + kSharedResource->cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr); + GPR_ASSERT(event.type != GRPC_QUEUE_TIMEOUT); + if (event.type == GRPC_QUEUE_SHUTDOWN) { + /* signal alts_tsi_shutdown() to destroy completion queue. */ + grpc_tsi_alts_signal_for_cq_destroy(); + break; + } + /* event.type == GRPC_OP_COMPLETE. */ + alts_tsi_event* alts_event = static_cast(event.tag); + alts_tsi_event_dispatch_to_handshaker(alts_event, event.success); + alts_tsi_event_destroy(alts_event); + } +} + +static void init_shared_resources(const char* handshaker_service_url) { + GPR_ASSERT(handshaker_service_url != nullptr); + gpr_mu_lock(&kSharedResource->mu); + if (kSharedResource->channel == nullptr) { + gpr_cv_init(&kSharedResource->cv); + kSharedResource->channel = + grpc_insecure_channel_create(handshaker_service_url, nullptr, nullptr); + kSharedResource->cq = grpc_completion_queue_create_for_next(nullptr); + kSharedResource->thread = + grpc_core::Thread("alts_tsi_handshaker", &thread_worker, nullptr); + kSharedResource->thread.Start(); + } + gpr_mu_unlock(&kSharedResource->mu); +} + +tsi_result alts_tsi_handshaker_create( + const grpc_alts_credentials_options* options, const char* target_name, + const char* handshaker_service_url, bool is_client, tsi_handshaker** self) { + if (handshaker_service_url == nullptr || self == nullptr || + options == nullptr || (is_client && target_name == nullptr)) { + gpr_log(GPR_ERROR, "Invalid arguments to alts_tsi_handshaker_create()"); + return TSI_INVALID_ARGUMENT; + } + init_shared_resources(handshaker_service_url); + alts_handshaker_client* client = alts_grpc_handshaker_client_create( + kSharedResource->channel, kSharedResource->cq, handshaker_service_url); + if (client == nullptr) { + gpr_log(GPR_ERROR, "Failed to create ALTS handshaker client"); + return TSI_FAILED_PRECONDITION; + } + alts_tsi_handshaker* handshaker = + static_cast(gpr_zalloc(sizeof(*handshaker))); + handshaker->client = client; + handshaker->buffer_size = TSI_ALTS_INITIAL_BUFFER_SIZE; + handshaker->buffer = + static_cast(gpr_zalloc(handshaker->buffer_size)); + handshaker->is_client = is_client; + handshaker->has_sent_start_message = false; + handshaker->target_name = target_name == nullptr + ? grpc_empty_slice() + : grpc_slice_from_static_string(target_name); + handshaker->options = grpc_alts_credentials_options_copy(options); + handshaker->base.vtable = &handshaker_vtable; + *self = &handshaker->base; + return TSI_OK; +} + +static bool is_handshake_finished_properly(grpc_gcp_handshaker_resp* resp) { + GPR_ASSERT(resp != nullptr); + if (resp->has_result) { + return true; + } + return false; +} + +static void set_unused_bytes(tsi_handshaker_result* self, + grpc_slice* recv_bytes, size_t bytes_consumed) { + GPR_ASSERT(recv_bytes != nullptr && self != nullptr); + if (GRPC_SLICE_LENGTH(*recv_bytes) == bytes_consumed) { + return; + } + alts_tsi_handshaker_result* result = + reinterpret_cast(self); + result->unused_bytes_size = GRPC_SLICE_LENGTH(*recv_bytes) - bytes_consumed; + result->unused_bytes = + static_cast(gpr_zalloc(result->unused_bytes_size)); + memcpy(result->unused_bytes, + GRPC_SLICE_START_PTR(*recv_bytes) + bytes_consumed, + result->unused_bytes_size); +} + +void alts_tsi_handshaker_handle_response(alts_tsi_handshaker* handshaker, + grpc_byte_buffer* recv_buffer, + grpc_status_code status, + grpc_slice* details, + tsi_handshaker_on_next_done_cb cb, + void* user_data, bool is_ok) { + /* Invalid input check. */ + if (cb == nullptr) { + gpr_log(GPR_ERROR, + "cb is nullptr in alts_tsi_handshaker_handle_response()"); + return; + } + if (handshaker == nullptr || recv_buffer == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to alts_tsi_handshaker_handle_response()"); + cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr); + return; + } + /* Failed grpc call check. */ + if (!is_ok || status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "grpc call made to handshaker service failed"); + if (details != nullptr) { + char* error_details = grpc_slice_to_c_string(*details); + gpr_log(GPR_ERROR, "error details:%s", error_details); + gpr_free(error_details); + } + cb(TSI_INTERNAL_ERROR, user_data, nullptr, 0, nullptr); + return; + } + grpc_gcp_handshaker_resp* resp = + alts_tsi_utils_deserialize_response(recv_buffer); + /* Invalid handshaker response check. */ + if (resp == nullptr) { + gpr_log(GPR_ERROR, "alts_tsi_utils_deserialize_response() failed"); + cb(TSI_DATA_CORRUPTED, user_data, nullptr, 0, nullptr); + return; + } + grpc_slice* slice = static_cast(resp->out_frames.arg); + unsigned char* bytes_to_send = nullptr; + size_t bytes_to_send_size = 0; + if (slice != nullptr) { + bytes_to_send_size = GRPC_SLICE_LENGTH(*slice); + while (bytes_to_send_size > handshaker->buffer_size) { + handshaker->buffer_size *= 2; + handshaker->buffer = static_cast( + gpr_realloc(handshaker->buffer, handshaker->buffer_size)); + } + memcpy(handshaker->buffer, GRPC_SLICE_START_PTR(*slice), + bytes_to_send_size); + bytes_to_send = handshaker->buffer; + } + tsi_handshaker_result* result = nullptr; + if (is_handshake_finished_properly(resp)) { + create_handshaker_result(resp, handshaker->is_client, &result); + set_unused_bytes(result, &handshaker->recv_bytes, resp->bytes_consumed); + } + grpc_status_code code = static_cast(resp->status.code); + grpc_gcp_handshaker_resp_destroy(resp); + cb(alts_tsi_utils_convert_to_tsi_result(code), user_data, bytes_to_send, + bytes_to_send_size, result); +} + +namespace grpc_core { +namespace internal { + +bool alts_tsi_handshaker_get_has_sent_start_message_for_testing( + alts_tsi_handshaker* handshaker) { + GPR_ASSERT(handshaker != nullptr); + return handshaker->has_sent_start_message; +} + +bool alts_tsi_handshaker_get_is_client_for_testing( + alts_tsi_handshaker* handshaker) { + GPR_ASSERT(handshaker != nullptr); + return handshaker->is_client; +} + +void alts_tsi_handshaker_set_recv_bytes_for_testing( + alts_tsi_handshaker* handshaker, grpc_slice* slice) { + GPR_ASSERT(handshaker != nullptr && slice != nullptr); + handshaker->recv_bytes = grpc_slice_ref(*slice); +} + +grpc_slice alts_tsi_handshaker_get_recv_bytes_for_testing( + alts_tsi_handshaker* handshaker) { + GPR_ASSERT(handshaker != nullptr); + return handshaker->recv_bytes; +} + +void alts_tsi_handshaker_set_client_for_testing( + alts_tsi_handshaker* handshaker, alts_handshaker_client* client) { + GPR_ASSERT(handshaker != nullptr && client != nullptr); + alts_handshaker_client_destroy(handshaker->client); + handshaker->client = client; +} + +} // namespace internal +} // namespace grpc_core diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h new file mode 100644 index 000000000..227b30ce5 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h @@ -0,0 +1,83 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_H + +#include + +#include + +#include "src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h" +#include "src/core/tsi/alts_transport_security.h" +#include "src/core/tsi/transport_security.h" +#include "src/core/tsi/transport_security_interface.h" + +#define TSI_ALTS_SERVICE_ACCOUNT_PEER_PROPERTY "service_accont" +#define TSI_ALTS_CERTIFICATE_TYPE "ALTS" +#define TSI_ALTS_RPC_VERSIONS "rpc_versions" + +const size_t kTsiAltsNumOfPeerProperties = 3; + +/** + * Main struct for ALTS TSI handshaker. All APIs in the header are + * thread-comptabile. + */ +typedef struct alts_tsi_handshaker alts_tsi_handshaker; + +/** + * This method creates a ALTS TSI handshaker instance. + * + * - options: ALTS credentials options containing information passed from TSI + * caller (e.g., rpc protocol versions). + * - target_name: the name of the endpoint that the channel is connecting to, + * and will be used for secure naming check. + * - handshaker_service_url: address of ALTS handshaker service in the format of + * "host:port". + * - is_client: boolean value indicating if the handshaker is used at the client + * (is_client = true) or server (is_client = false) side. + * - self: address of ALTS TSI handshaker instance to be returned from the + * method. + * + * It returns TSI_OK on success and an error status code on failure. + */ +tsi_result alts_tsi_handshaker_create( + const grpc_alts_credentials_options* options, const char* target_name, + const char* handshaker_service_url, bool is_client, tsi_handshaker** self); + +/** + * This method handles handshaker response returned from ALTS handshaker + * service. + * + * - handshaker: ALTS TSI handshaker instance. + * - recv_buffer: buffer holding data received from the handshaker service. + * - status: status of the grpc call made to the handshaker service. + * - details: error details of the grpc call made to the handshaker service. + * - cb: callback function of ALTS TSI event. + * - user_data: argument of callback function. + * - is_ok: a boolean value indicating if the handshaker response is ok to read. + * + */ +void alts_tsi_handshaker_handle_response(alts_tsi_handshaker* handshaker, + grpc_byte_buffer* recv_buffer, + grpc_status_code status, + grpc_slice* details, + tsi_handshaker_on_next_done_cb cb, + void* user_data, bool is_ok); + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h new file mode 100644 index 000000000..9b7b9bb6b --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h @@ -0,0 +1,52 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_PRIVATE_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_PRIVATE_H + +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_client.h" + +namespace grpc_core { +namespace internal { + +/** + * Unsafe, use for testing only. It allows the caller to change the way the + * ALTS TSI handshaker schedules handshaker requests. + */ +void alts_tsi_handshaker_set_client_for_testing(alts_tsi_handshaker* handshaker, + alts_handshaker_client* client); + +/* For testing only. */ +bool alts_tsi_handshaker_get_has_sent_start_message_for_testing( + alts_tsi_handshaker* handshaker); + +bool alts_tsi_handshaker_get_is_client_for_testing( + alts_tsi_handshaker* handshaker); + +void alts_tsi_handshaker_set_recv_bytes_for_testing( + alts_tsi_handshaker* handshaker, grpc_slice* slice); + +grpc_slice alts_tsi_handshaker_get_recv_bytes_for_testing( + alts_tsi_handshaker* handshaker); + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_HANDSHAKER_PRIVATE_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.cc new file mode 100644 index 000000000..d9b5e6c94 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.cc @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/alts_tsi_utils.h" + +#include + +tsi_result alts_tsi_utils_convert_to_tsi_result(grpc_status_code code) { + switch (code) { + case GRPC_STATUS_OK: + return TSI_OK; + case GRPC_STATUS_UNKNOWN: + return TSI_UNKNOWN_ERROR; + case GRPC_STATUS_INVALID_ARGUMENT: + return TSI_INVALID_ARGUMENT; + case GRPC_STATUS_NOT_FOUND: + return TSI_NOT_FOUND; + case GRPC_STATUS_INTERNAL: + return TSI_INTERNAL_ERROR; + default: + return TSI_UNKNOWN_ERROR; + } +} + +grpc_gcp_handshaker_resp* alts_tsi_utils_deserialize_response( + grpc_byte_buffer* resp_buffer) { + GPR_ASSERT(resp_buffer != nullptr); + grpc_byte_buffer_reader bbr; + grpc_byte_buffer_reader_init(&bbr, resp_buffer); + grpc_slice slice = grpc_byte_buffer_reader_readall(&bbr); + grpc_gcp_handshaker_resp* resp = grpc_gcp_handshaker_resp_create(); + bool ok = grpc_gcp_handshaker_resp_decode(slice, resp); + grpc_slice_unref(slice); + grpc_byte_buffer_reader_destroy(&bbr); + if (!ok) { + grpc_gcp_handshaker_resp_destroy(resp); + gpr_log(GPR_ERROR, "grpc_gcp_handshaker_resp_decode() failed"); + return nullptr; + } + return resp; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.h new file mode 100644 index 000000000..9ef649de2 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/alts_tsi_utils.h @@ -0,0 +1,52 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_UTILS_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_UTILS_H + +#include + +#include +#include + +#include "src/core/tsi/alts/handshaker/alts_handshaker_service_api.h" +#include "src/core/tsi/transport_security_interface.h" + +/** + * This method converts grpc_status_code code to the corresponding tsi_result + * code. + * + * - code: grpc_status_code code. + * + * It returns the converted tsi_result code. + */ +tsi_result alts_tsi_utils_convert_to_tsi_result(grpc_status_code code); + +/** + * This method deserializes a handshaker response returned from ALTS handshaker + * service. + * + * - bytes_received: data returned from ALTS handshaker service. + * + * It returns a deserialized handshaker response on success and nullptr on + * failure. + */ +grpc_gcp_handshaker_resp* alts_tsi_utils_deserialize_response( + grpc_byte_buffer* resp_buffer); + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_ALTS_TSI_UTILS_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.c b/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.c new file mode 100644 index 000000000..81a82f599 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.c @@ -0,0 +1,48 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.3.7-dev */ + +#include "src/core/tsi/alts/handshaker/altscontext.pb.h" + +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + + + +const pb_field_t grpc_gcp_AltsContext_fields[7] = { + PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, grpc_gcp_AltsContext, application_protocol, application_protocol, 0), + PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_AltsContext, record_protocol, application_protocol, 0), + PB_FIELD( 3, UENUM , OPTIONAL, STATIC , OTHER, grpc_gcp_AltsContext, security_level, record_protocol, 0), + PB_FIELD( 4, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_AltsContext, peer_service_account, security_level, 0), + PB_FIELD( 5, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_AltsContext, local_service_account, peer_service_account, 0), + PB_FIELD( 6, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_AltsContext, peer_rpc_versions, local_service_account, &grpc_gcp_RpcProtocolVersions_fields), + PB_LAST_FIELD +}; + + +/* Check that field information fits in pb_field_t */ +#if !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_32BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in 8 or 16 bit + * field descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_AltsContext, peer_rpc_versions) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_gcp_AltsContext) +#endif + +#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_16BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in the default + * 8 bit descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_AltsContext, peer_rpc_versions) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_gcp_AltsContext) +#endif + + +/* @@protoc_insertion_point(eof) */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.h new file mode 100644 index 000000000..3e72d7f67 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/altscontext.pb.h @@ -0,0 +1,64 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.3.7-dev */ + +#ifndef PB_GRPC_GCP_ALTSCONTEXT_PB_H_INCLUDED +#define PB_GRPC_GCP_ALTSCONTEXT_PB_H_INCLUDED +#include "third_party/nanopb/pb.h" +#include "src/core/tsi/alts/handshaker/transport_security_common.pb.h" + +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Struct definitions */ +typedef struct _grpc_gcp_AltsContext { + pb_callback_t application_protocol; + pb_callback_t record_protocol; + bool has_security_level; + grpc_gcp_SecurityLevel security_level; + pb_callback_t peer_service_account; + pb_callback_t local_service_account; + bool has_peer_rpc_versions; + grpc_gcp_RpcProtocolVersions peer_rpc_versions; +/* @@protoc_insertion_point(struct:grpc_gcp_AltsContext) */ +} grpc_gcp_AltsContext; + +/* Default values for struct fields */ + +/* Initializer values for message structs */ +#define grpc_gcp_AltsContext_init_default {{{NULL}, NULL}, {{NULL}, NULL}, false, (grpc_gcp_SecurityLevel)0, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_RpcProtocolVersions_init_default} +#define grpc_gcp_AltsContext_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, false, (grpc_gcp_SecurityLevel)0, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_RpcProtocolVersions_init_zero} + +/* Field tags (for use in manual encoding/decoding) */ +#define grpc_gcp_AltsContext_application_protocol_tag 1 +#define grpc_gcp_AltsContext_record_protocol_tag 2 +#define grpc_gcp_AltsContext_security_level_tag 3 +#define grpc_gcp_AltsContext_peer_service_account_tag 4 +#define grpc_gcp_AltsContext_local_service_account_tag 5 +#define grpc_gcp_AltsContext_peer_rpc_versions_tag 6 + +/* Struct field encoding specification for nanopb */ +extern const pb_field_t grpc_gcp_AltsContext_fields[7]; + +/* Maximum encoded size of messages (where known) */ +/* grpc_gcp_AltsContext_size depends on runtime parameters */ + +/* Message IDs (where set with "msgid" option) */ +#ifdef PB_MSGID + +#define ALTSCONTEXT_MESSAGES \ + + +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif +/* @@protoc_insertion_point(eof) */ + +#endif diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.c b/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.c new file mode 100644 index 000000000..bd992dfa4 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.c @@ -0,0 +1,123 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.3.7-dev */ + +#include "src/core/tsi/alts/handshaker/handshaker.pb.h" + +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + + + +const pb_field_t grpc_gcp_Endpoint_fields[4] = { + PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, grpc_gcp_Endpoint, ip_address, ip_address, 0), + PB_FIELD( 2, INT32 , OPTIONAL, STATIC , OTHER, grpc_gcp_Endpoint, port, ip_address, 0), + PB_FIELD( 3, UENUM , OPTIONAL, STATIC , OTHER, grpc_gcp_Endpoint, protocol, port, 0), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_Identity_fields[3] = { + PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, grpc_gcp_Identity, service_account, service_account, 0), + PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_Identity, hostname, service_account, 0), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_StartClientHandshakeReq_fields[10] = { + PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, grpc_gcp_StartClientHandshakeReq, handshake_security_protocol, handshake_security_protocol, 0), + PB_FIELD( 2, STRING , REPEATED, CALLBACK, OTHER, grpc_gcp_StartClientHandshakeReq, application_protocols, handshake_security_protocol, 0), + PB_FIELD( 3, STRING , REPEATED, CALLBACK, OTHER, grpc_gcp_StartClientHandshakeReq, record_protocols, application_protocols, 0), + PB_FIELD( 4, MESSAGE , REPEATED, CALLBACK, OTHER, grpc_gcp_StartClientHandshakeReq, target_identities, record_protocols, &grpc_gcp_Identity_fields), + PB_FIELD( 5, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartClientHandshakeReq, local_identity, target_identities, &grpc_gcp_Identity_fields), + PB_FIELD( 6, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartClientHandshakeReq, local_endpoint, local_identity, &grpc_gcp_Endpoint_fields), + PB_FIELD( 7, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartClientHandshakeReq, remote_endpoint, local_endpoint, &grpc_gcp_Endpoint_fields), + PB_FIELD( 8, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_StartClientHandshakeReq, target_name, remote_endpoint, 0), + PB_FIELD( 9, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartClientHandshakeReq, rpc_versions, target_name, &grpc_gcp_RpcProtocolVersions_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_ServerHandshakeParameters_fields[3] = { + PB_FIELD( 1, STRING , REPEATED, CALLBACK, FIRST, grpc_gcp_ServerHandshakeParameters, record_protocols, record_protocols, 0), + PB_FIELD( 2, MESSAGE , REPEATED, CALLBACK, OTHER, grpc_gcp_ServerHandshakeParameters, local_identities, record_protocols, &grpc_gcp_Identity_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_StartServerHandshakeReq_fields[7] = { + PB_FIELD( 1, STRING , REPEATED, CALLBACK, FIRST, grpc_gcp_StartServerHandshakeReq, application_protocols, application_protocols, 0), + PB_FIELD( 2, MESSAGE , REPEATED, STATIC , OTHER, grpc_gcp_StartServerHandshakeReq, handshake_parameters, application_protocols, &grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_fields), + PB_FIELD( 3, BYTES , OPTIONAL, CALLBACK, OTHER, grpc_gcp_StartServerHandshakeReq, in_bytes, handshake_parameters, 0), + PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartServerHandshakeReq, local_endpoint, in_bytes, &grpc_gcp_Endpoint_fields), + PB_FIELD( 5, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartServerHandshakeReq, remote_endpoint, local_endpoint, &grpc_gcp_Endpoint_fields), + PB_FIELD( 6, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartServerHandshakeReq, rpc_versions, remote_endpoint, &grpc_gcp_RpcProtocolVersions_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_fields[3] = { + PB_FIELD( 1, INT32 , OPTIONAL, STATIC , FIRST, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry, key, key, 0), + PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry, value, key, &grpc_gcp_ServerHandshakeParameters_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_NextHandshakeMessageReq_fields[2] = { + PB_FIELD( 1, BYTES , OPTIONAL, CALLBACK, FIRST, grpc_gcp_NextHandshakeMessageReq, in_bytes, in_bytes, 0), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_HandshakerReq_fields[4] = { + PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_gcp_HandshakerReq, client_start, client_start, &grpc_gcp_StartClientHandshakeReq_fields), + PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerReq, server_start, client_start, &grpc_gcp_StartServerHandshakeReq_fields), + PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerReq, next, server_start, &grpc_gcp_NextHandshakeMessageReq_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_HandshakerResult_fields[8] = { + PB_FIELD( 1, STRING , OPTIONAL, CALLBACK, FIRST, grpc_gcp_HandshakerResult, application_protocol, application_protocol, 0), + PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_HandshakerResult, record_protocol, application_protocol, 0), + PB_FIELD( 3, BYTES , OPTIONAL, CALLBACK, OTHER, grpc_gcp_HandshakerResult, key_data, record_protocol, 0), + PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResult, peer_identity, key_data, &grpc_gcp_Identity_fields), + PB_FIELD( 5, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResult, local_identity, peer_identity, &grpc_gcp_Identity_fields), + PB_FIELD( 6, BOOL , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResult, keep_channel_open, local_identity, 0), + PB_FIELD( 7, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResult, peer_rpc_versions, keep_channel_open, &grpc_gcp_RpcProtocolVersions_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_HandshakerStatus_fields[3] = { + PB_FIELD( 1, UINT32 , OPTIONAL, STATIC , FIRST, grpc_gcp_HandshakerStatus, code, code, 0), + PB_FIELD( 2, STRING , OPTIONAL, CALLBACK, OTHER, grpc_gcp_HandshakerStatus, details, code, 0), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_HandshakerResp_fields[5] = { + PB_FIELD( 1, BYTES , OPTIONAL, CALLBACK, FIRST, grpc_gcp_HandshakerResp, out_frames, out_frames, 0), + PB_FIELD( 2, UINT32 , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResp, bytes_consumed, out_frames, 0), + PB_FIELD( 3, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResp, result, bytes_consumed, &grpc_gcp_HandshakerResult_fields), + PB_FIELD( 4, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_HandshakerResp, status, result, &grpc_gcp_HandshakerStatus_fields), + PB_LAST_FIELD +}; + + +/* Check that field information fits in pb_field_t */ +#if !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_32BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in 8 or 16 bit + * field descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_StartClientHandshakeReq, target_identities) < 65536 && pb_membersize(grpc_gcp_StartClientHandshakeReq, local_identity) < 65536 && pb_membersize(grpc_gcp_StartClientHandshakeReq, local_endpoint) < 65536 && pb_membersize(grpc_gcp_StartClientHandshakeReq, remote_endpoint) < 65536 && pb_membersize(grpc_gcp_StartClientHandshakeReq, rpc_versions) < 65536 && pb_membersize(grpc_gcp_ServerHandshakeParameters, local_identities) < 65536 && pb_membersize(grpc_gcp_StartServerHandshakeReq, handshake_parameters[0]) < 65536 && pb_membersize(grpc_gcp_StartServerHandshakeReq, local_endpoint) < 65536 && pb_membersize(grpc_gcp_StartServerHandshakeReq, remote_endpoint) < 65536 && pb_membersize(grpc_gcp_StartServerHandshakeReq, rpc_versions) < 65536 && pb_membersize(grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry, value) < 65536 && pb_membersize(grpc_gcp_HandshakerReq, client_start) < 65536 && pb_membersize(grpc_gcp_HandshakerReq, server_start) < 65536 && pb_membersize(grpc_gcp_HandshakerReq, next) < 65536 && pb_membersize(grpc_gcp_HandshakerResult, peer_identity) < 65536 && pb_membersize(grpc_gcp_HandshakerResult, local_identity) < 65536 && pb_membersize(grpc_gcp_HandshakerResult, peer_rpc_versions) < 65536 && pb_membersize(grpc_gcp_HandshakerResp, result) < 65536 && pb_membersize(grpc_gcp_HandshakerResp, status) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_gcp_Endpoint_grpc_gcp_Identity_grpc_gcp_StartClientHandshakeReq_grpc_gcp_ServerHandshakeParameters_grpc_gcp_StartServerHandshakeReq_grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_grpc_gcp_NextHandshakeMessageReq_grpc_gcp_HandshakerReq_grpc_gcp_HandshakerResult_grpc_gcp_HandshakerStatus_grpc_gcp_HandshakerResp) +#endif + +#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_16BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in the default + * 8 bit descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_StartClientHandshakeReq, target_identities) < 256 && pb_membersize(grpc_gcp_StartClientHandshakeReq, local_identity) < 256 && pb_membersize(grpc_gcp_StartClientHandshakeReq, local_endpoint) < 256 && pb_membersize(grpc_gcp_StartClientHandshakeReq, remote_endpoint) < 256 && pb_membersize(grpc_gcp_StartClientHandshakeReq, rpc_versions) < 256 && pb_membersize(grpc_gcp_ServerHandshakeParameters, local_identities) < 256 && pb_membersize(grpc_gcp_StartServerHandshakeReq, handshake_parameters[0]) < 256 && pb_membersize(grpc_gcp_StartServerHandshakeReq, local_endpoint) < 256 && pb_membersize(grpc_gcp_StartServerHandshakeReq, remote_endpoint) < 256 && pb_membersize(grpc_gcp_StartServerHandshakeReq, rpc_versions) < 256 && pb_membersize(grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry, value) < 256 && pb_membersize(grpc_gcp_HandshakerReq, client_start) < 256 && pb_membersize(grpc_gcp_HandshakerReq, server_start) < 256 && pb_membersize(grpc_gcp_HandshakerReq, next) < 256 && pb_membersize(grpc_gcp_HandshakerResult, peer_identity) < 256 && pb_membersize(grpc_gcp_HandshakerResult, local_identity) < 256 && pb_membersize(grpc_gcp_HandshakerResult, peer_rpc_versions) < 256 && pb_membersize(grpc_gcp_HandshakerResp, result) < 256 && pb_membersize(grpc_gcp_HandshakerResp, status) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_gcp_Endpoint_grpc_gcp_Identity_grpc_gcp_StartClientHandshakeReq_grpc_gcp_ServerHandshakeParameters_grpc_gcp_StartServerHandshakeReq_grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_grpc_gcp_NextHandshakeMessageReq_grpc_gcp_HandshakerReq_grpc_gcp_HandshakerResult_grpc_gcp_HandshakerStatus_grpc_gcp_HandshakerResp) +#endif + + +/* @@protoc_insertion_point(eof) */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.h new file mode 100644 index 000000000..0805a144d --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/handshaker.pb.h @@ -0,0 +1,255 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.3.7-dev */ + +#ifndef PB_GRPC_GCP_HANDSHAKER_PB_H_INCLUDED +#define PB_GRPC_GCP_HANDSHAKER_PB_H_INCLUDED +#include "third_party/nanopb/pb.h" +#include "src/core/tsi/alts/handshaker/transport_security_common.pb.h" + +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Enum definitions */ +typedef enum _grpc_gcp_HandshakeProtocol { + grpc_gcp_HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED = 0, + grpc_gcp_HandshakeProtocol_TLS = 1, + grpc_gcp_HandshakeProtocol_ALTS = 2 +} grpc_gcp_HandshakeProtocol; +#define _grpc_gcp_HandshakeProtocol_MIN grpc_gcp_HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED +#define _grpc_gcp_HandshakeProtocol_MAX grpc_gcp_HandshakeProtocol_ALTS +#define _grpc_gcp_HandshakeProtocol_ARRAYSIZE ((grpc_gcp_HandshakeProtocol)(grpc_gcp_HandshakeProtocol_ALTS+1)) + +typedef enum _grpc_gcp_NetworkProtocol { + grpc_gcp_NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED = 0, + grpc_gcp_NetworkProtocol_TCP = 1, + grpc_gcp_NetworkProtocol_UDP = 2 +} grpc_gcp_NetworkProtocol; +#define _grpc_gcp_NetworkProtocol_MIN grpc_gcp_NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED +#define _grpc_gcp_NetworkProtocol_MAX grpc_gcp_NetworkProtocol_UDP +#define _grpc_gcp_NetworkProtocol_ARRAYSIZE ((grpc_gcp_NetworkProtocol)(grpc_gcp_NetworkProtocol_UDP+1)) + +/* Struct definitions */ +typedef struct _grpc_gcp_Identity { + pb_callback_t service_account; + pb_callback_t hostname; +/* @@protoc_insertion_point(struct:grpc_gcp_Identity) */ +} grpc_gcp_Identity; + +typedef struct _grpc_gcp_NextHandshakeMessageReq { + pb_callback_t in_bytes; +/* @@protoc_insertion_point(struct:grpc_gcp_NextHandshakeMessageReq) */ +} grpc_gcp_NextHandshakeMessageReq; + +typedef struct _grpc_gcp_ServerHandshakeParameters { + pb_callback_t record_protocols; + pb_callback_t local_identities; +/* @@protoc_insertion_point(struct:grpc_gcp_ServerHandshakeParameters) */ +} grpc_gcp_ServerHandshakeParameters; + +typedef struct _grpc_gcp_Endpoint { + pb_callback_t ip_address; + bool has_port; + int32_t port; + bool has_protocol; + grpc_gcp_NetworkProtocol protocol; +/* @@protoc_insertion_point(struct:grpc_gcp_Endpoint) */ +} grpc_gcp_Endpoint; + +typedef struct _grpc_gcp_HandshakerResult { + pb_callback_t application_protocol; + pb_callback_t record_protocol; + pb_callback_t key_data; + bool has_peer_identity; + grpc_gcp_Identity peer_identity; + bool has_local_identity; + grpc_gcp_Identity local_identity; + bool has_keep_channel_open; + bool keep_channel_open; + bool has_peer_rpc_versions; + grpc_gcp_RpcProtocolVersions peer_rpc_versions; +/* @@protoc_insertion_point(struct:grpc_gcp_HandshakerResult) */ +} grpc_gcp_HandshakerResult; + +typedef struct _grpc_gcp_HandshakerStatus { + bool has_code; + uint32_t code; + pb_callback_t details; +/* @@protoc_insertion_point(struct:grpc_gcp_HandshakerStatus) */ +} grpc_gcp_HandshakerStatus; + +typedef struct _grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry { + bool has_key; + int32_t key; + bool has_value; + grpc_gcp_ServerHandshakeParameters value; +/* @@protoc_insertion_point(struct:grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry) */ +} grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry; + +typedef struct _grpc_gcp_HandshakerResp { + pb_callback_t out_frames; + bool has_bytes_consumed; + uint32_t bytes_consumed; + bool has_result; + grpc_gcp_HandshakerResult result; + bool has_status; + grpc_gcp_HandshakerStatus status; +/* @@protoc_insertion_point(struct:grpc_gcp_HandshakerResp) */ +} grpc_gcp_HandshakerResp; + +typedef struct _grpc_gcp_StartClientHandshakeReq { + bool has_handshake_security_protocol; + grpc_gcp_HandshakeProtocol handshake_security_protocol; + pb_callback_t application_protocols; + pb_callback_t record_protocols; + pb_callback_t target_identities; + bool has_local_identity; + grpc_gcp_Identity local_identity; + bool has_local_endpoint; + grpc_gcp_Endpoint local_endpoint; + bool has_remote_endpoint; + grpc_gcp_Endpoint remote_endpoint; + pb_callback_t target_name; + bool has_rpc_versions; + grpc_gcp_RpcProtocolVersions rpc_versions; +/* @@protoc_insertion_point(struct:grpc_gcp_StartClientHandshakeReq) */ +} grpc_gcp_StartClientHandshakeReq; + +typedef struct _grpc_gcp_StartServerHandshakeReq { + pb_callback_t application_protocols; + pb_size_t handshake_parameters_count; + grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry handshake_parameters[3]; + pb_callback_t in_bytes; + bool has_local_endpoint; + grpc_gcp_Endpoint local_endpoint; + bool has_remote_endpoint; + grpc_gcp_Endpoint remote_endpoint; + bool has_rpc_versions; + grpc_gcp_RpcProtocolVersions rpc_versions; +/* @@protoc_insertion_point(struct:grpc_gcp_StartServerHandshakeReq) */ +} grpc_gcp_StartServerHandshakeReq; + +typedef struct _grpc_gcp_HandshakerReq { + bool has_client_start; + grpc_gcp_StartClientHandshakeReq client_start; + bool has_server_start; + grpc_gcp_StartServerHandshakeReq server_start; + bool has_next; + grpc_gcp_NextHandshakeMessageReq next; +/* @@protoc_insertion_point(struct:grpc_gcp_HandshakerReq) */ +} grpc_gcp_HandshakerReq; + +/* Default values for struct fields */ + +/* Initializer values for message structs */ +#define grpc_gcp_Endpoint_init_default {{{NULL}, NULL}, false, 0, false, (grpc_gcp_NetworkProtocol)0} +#define grpc_gcp_Identity_init_default {{{NULL}, NULL}, {{NULL}, NULL}} +#define grpc_gcp_StartClientHandshakeReq_init_default {false, (grpc_gcp_HandshakeProtocol)0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_Identity_init_default, false, grpc_gcp_Endpoint_init_default, false, grpc_gcp_Endpoint_init_default, {{NULL}, NULL}, false, grpc_gcp_RpcProtocolVersions_init_default} +#define grpc_gcp_ServerHandshakeParameters_init_default {{{NULL}, NULL}, {{NULL}, NULL}} +#define grpc_gcp_StartServerHandshakeReq_init_default {{{NULL}, NULL}, 0, {grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_default, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_default, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_default}, {{NULL}, NULL}, false, grpc_gcp_Endpoint_init_default, false, grpc_gcp_Endpoint_init_default, false, grpc_gcp_RpcProtocolVersions_init_default} +#define grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_default {false, 0, false, grpc_gcp_ServerHandshakeParameters_init_default} +#define grpc_gcp_NextHandshakeMessageReq_init_default {{{NULL}, NULL}} +#define grpc_gcp_HandshakerReq_init_default {false, grpc_gcp_StartClientHandshakeReq_init_default, false, grpc_gcp_StartServerHandshakeReq_init_default, false, grpc_gcp_NextHandshakeMessageReq_init_default} +#define grpc_gcp_HandshakerResult_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_Identity_init_default, false, grpc_gcp_Identity_init_default, false, 0, false, grpc_gcp_RpcProtocolVersions_init_default} +#define grpc_gcp_HandshakerStatus_init_default {false, 0, {{NULL}, NULL}} +#define grpc_gcp_HandshakerResp_init_default {{{NULL}, NULL}, false, 0, false, grpc_gcp_HandshakerResult_init_default, false, grpc_gcp_HandshakerStatus_init_default} +#define grpc_gcp_Endpoint_init_zero {{{NULL}, NULL}, false, 0, false, (grpc_gcp_NetworkProtocol)0} +#define grpc_gcp_Identity_init_zero {{{NULL}, NULL}, {{NULL}, NULL}} +#define grpc_gcp_StartClientHandshakeReq_init_zero {false, (grpc_gcp_HandshakeProtocol)0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_Identity_init_zero, false, grpc_gcp_Endpoint_init_zero, false, grpc_gcp_Endpoint_init_zero, {{NULL}, NULL}, false, grpc_gcp_RpcProtocolVersions_init_zero} +#define grpc_gcp_ServerHandshakeParameters_init_zero {{{NULL}, NULL}, {{NULL}, NULL}} +#define grpc_gcp_StartServerHandshakeReq_init_zero {{{NULL}, NULL}, 0, {grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_zero, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_zero, grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_zero}, {{NULL}, NULL}, false, grpc_gcp_Endpoint_init_zero, false, grpc_gcp_Endpoint_init_zero, false, grpc_gcp_RpcProtocolVersions_init_zero} +#define grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_init_zero {false, 0, false, grpc_gcp_ServerHandshakeParameters_init_zero} +#define grpc_gcp_NextHandshakeMessageReq_init_zero {{{NULL}, NULL}} +#define grpc_gcp_HandshakerReq_init_zero {false, grpc_gcp_StartClientHandshakeReq_init_zero, false, grpc_gcp_StartServerHandshakeReq_init_zero, false, grpc_gcp_NextHandshakeMessageReq_init_zero} +#define grpc_gcp_HandshakerResult_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, false, grpc_gcp_Identity_init_zero, false, grpc_gcp_Identity_init_zero, false, 0, false, grpc_gcp_RpcProtocolVersions_init_zero} +#define grpc_gcp_HandshakerStatus_init_zero {false, 0, {{NULL}, NULL}} +#define grpc_gcp_HandshakerResp_init_zero {{{NULL}, NULL}, false, 0, false, grpc_gcp_HandshakerResult_init_zero, false, grpc_gcp_HandshakerStatus_init_zero} + +/* Field tags (for use in manual encoding/decoding) */ +#define grpc_gcp_Identity_service_account_tag 1 +#define grpc_gcp_Identity_hostname_tag 2 +#define grpc_gcp_NextHandshakeMessageReq_in_bytes_tag 1 +#define grpc_gcp_ServerHandshakeParameters_record_protocols_tag 1 +#define grpc_gcp_ServerHandshakeParameters_local_identities_tag 2 +#define grpc_gcp_Endpoint_ip_address_tag 1 +#define grpc_gcp_Endpoint_port_tag 2 +#define grpc_gcp_Endpoint_protocol_tag 3 +#define grpc_gcp_HandshakerResult_application_protocol_tag 1 +#define grpc_gcp_HandshakerResult_record_protocol_tag 2 +#define grpc_gcp_HandshakerResult_key_data_tag 3 +#define grpc_gcp_HandshakerResult_peer_identity_tag 4 +#define grpc_gcp_HandshakerResult_local_identity_tag 5 +#define grpc_gcp_HandshakerResult_keep_channel_open_tag 6 +#define grpc_gcp_HandshakerResult_peer_rpc_versions_tag 7 +#define grpc_gcp_HandshakerStatus_code_tag 1 +#define grpc_gcp_HandshakerStatus_details_tag 2 +#define grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_key_tag 1 +#define grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_value_tag 2 +#define grpc_gcp_HandshakerResp_out_frames_tag 1 +#define grpc_gcp_HandshakerResp_bytes_consumed_tag 2 +#define grpc_gcp_HandshakerResp_result_tag 3 +#define grpc_gcp_HandshakerResp_status_tag 4 +#define grpc_gcp_StartClientHandshakeReq_handshake_security_protocol_tag 1 +#define grpc_gcp_StartClientHandshakeReq_application_protocols_tag 2 +#define grpc_gcp_StartClientHandshakeReq_record_protocols_tag 3 +#define grpc_gcp_StartClientHandshakeReq_target_identities_tag 4 +#define grpc_gcp_StartClientHandshakeReq_local_identity_tag 5 +#define grpc_gcp_StartClientHandshakeReq_local_endpoint_tag 6 +#define grpc_gcp_StartClientHandshakeReq_remote_endpoint_tag 7 +#define grpc_gcp_StartClientHandshakeReq_target_name_tag 8 +#define grpc_gcp_StartClientHandshakeReq_rpc_versions_tag 9 +#define grpc_gcp_StartServerHandshakeReq_application_protocols_tag 1 +#define grpc_gcp_StartServerHandshakeReq_handshake_parameters_tag 2 +#define grpc_gcp_StartServerHandshakeReq_in_bytes_tag 3 +#define grpc_gcp_StartServerHandshakeReq_local_endpoint_tag 4 +#define grpc_gcp_StartServerHandshakeReq_remote_endpoint_tag 5 +#define grpc_gcp_StartServerHandshakeReq_rpc_versions_tag 6 +#define grpc_gcp_HandshakerReq_client_start_tag 1 +#define grpc_gcp_HandshakerReq_server_start_tag 2 +#define grpc_gcp_HandshakerReq_next_tag 3 + +/* Struct field encoding specification for nanopb */ +extern const pb_field_t grpc_gcp_Endpoint_fields[4]; +extern const pb_field_t grpc_gcp_Identity_fields[3]; +extern const pb_field_t grpc_gcp_StartClientHandshakeReq_fields[10]; +extern const pb_field_t grpc_gcp_ServerHandshakeParameters_fields[3]; +extern const pb_field_t grpc_gcp_StartServerHandshakeReq_fields[7]; +extern const pb_field_t grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_fields[3]; +extern const pb_field_t grpc_gcp_NextHandshakeMessageReq_fields[2]; +extern const pb_field_t grpc_gcp_HandshakerReq_fields[4]; +extern const pb_field_t grpc_gcp_HandshakerResult_fields[8]; +extern const pb_field_t grpc_gcp_HandshakerStatus_fields[3]; +extern const pb_field_t grpc_gcp_HandshakerResp_fields[5]; + +/* Maximum encoded size of messages (where known) */ +/* grpc_gcp_Endpoint_size depends on runtime parameters */ +/* grpc_gcp_Identity_size depends on runtime parameters */ +/* grpc_gcp_StartClientHandshakeReq_size depends on runtime parameters */ +/* grpc_gcp_ServerHandshakeParameters_size depends on runtime parameters */ +/* grpc_gcp_StartServerHandshakeReq_size depends on runtime parameters */ +#define grpc_gcp_StartServerHandshakeReq_HandshakeParametersEntry_size (17 + grpc_gcp_ServerHandshakeParameters_size) +/* grpc_gcp_NextHandshakeMessageReq_size depends on runtime parameters */ +#define grpc_gcp_HandshakerReq_size (18 + grpc_gcp_StartClientHandshakeReq_size + grpc_gcp_StartServerHandshakeReq_size + grpc_gcp_NextHandshakeMessageReq_size) +/* grpc_gcp_HandshakerResult_size depends on runtime parameters */ +/* grpc_gcp_HandshakerStatus_size depends on runtime parameters */ +/* grpc_gcp_HandshakerResp_size depends on runtime parameters */ + +/* Message IDs (where set with "msgid" option) */ +#ifdef PB_MSGID + +#define HANDSHAKER_MESSAGES \ + + +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif +/* @@protoc_insertion_point(eof) */ + +#endif diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.c b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.c new file mode 100644 index 000000000..6063c7625 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.c @@ -0,0 +1,50 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.3.7-dev */ + +#include "src/core/tsi/alts/handshaker/transport_security_common.pb.h" + +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + + + +const pb_field_t grpc_gcp_RpcProtocolVersions_fields[3] = { + PB_FIELD( 1, MESSAGE , OPTIONAL, STATIC , FIRST, grpc_gcp_RpcProtocolVersions, max_rpc_version, max_rpc_version, &grpc_gcp_RpcProtocolVersions_Version_fields), + PB_FIELD( 2, MESSAGE , OPTIONAL, STATIC , OTHER, grpc_gcp_RpcProtocolVersions, min_rpc_version, max_rpc_version, &grpc_gcp_RpcProtocolVersions_Version_fields), + PB_LAST_FIELD +}; + +const pb_field_t grpc_gcp_RpcProtocolVersions_Version_fields[3] = { + PB_FIELD( 1, UINT32 , OPTIONAL, STATIC , FIRST, grpc_gcp_RpcProtocolVersions_Version, major, major, 0), + PB_FIELD( 2, UINT32 , OPTIONAL, STATIC , OTHER, grpc_gcp_RpcProtocolVersions_Version, minor, major, 0), + PB_LAST_FIELD +}; + + +/* Check that field information fits in pb_field_t */ +#if !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_32BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in 8 or 16 bit + * field descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_RpcProtocolVersions, max_rpc_version) < 65536 && pb_membersize(grpc_gcp_RpcProtocolVersions, min_rpc_version) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_gcp_RpcProtocolVersions_grpc_gcp_RpcProtocolVersions_Version) +#endif + +#if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT) +/* If you get an error here, it means that you need to define PB_FIELD_16BIT + * compile-time option. You can do that in pb.h or on compiler command line. + * + * The reason you need to do this is that some of your messages contain tag + * numbers or field sizes that are larger than what can fit in the default + * 8 bit descriptors. + */ +PB_STATIC_ASSERT((pb_membersize(grpc_gcp_RpcProtocolVersions, max_rpc_version) < 256 && pb_membersize(grpc_gcp_RpcProtocolVersions, min_rpc_version) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_gcp_RpcProtocolVersions_grpc_gcp_RpcProtocolVersions_Version) +#endif + + +/* @@protoc_insertion_point(eof) */ diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.h new file mode 100644 index 000000000..49096dffa --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common.pb.h @@ -0,0 +1,78 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.3.7-dev */ + +#ifndef PB_GRPC_GCP_TRANSPORT_SECURITY_COMMON_PB_H_INCLUDED +#define PB_GRPC_GCP_TRANSPORT_SECURITY_COMMON_PB_H_INCLUDED +#include "third_party/nanopb/pb.h" +/* @@protoc_insertion_point(includes) */ +#if PB_PROTO_HEADER_VERSION != 30 +#error Regenerate this file with the current version of nanopb generator. +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Enum definitions */ +typedef enum _grpc_gcp_SecurityLevel { + grpc_gcp_SecurityLevel_SECURITY_NONE = 0, + grpc_gcp_SecurityLevel_INTEGRITY_ONLY = 1, + grpc_gcp_SecurityLevel_INTEGRITY_AND_PRIVACY = 2 +} grpc_gcp_SecurityLevel; +#define _grpc_gcp_SecurityLevel_MIN grpc_gcp_SecurityLevel_SECURITY_NONE +#define _grpc_gcp_SecurityLevel_MAX grpc_gcp_SecurityLevel_INTEGRITY_AND_PRIVACY +#define _grpc_gcp_SecurityLevel_ARRAYSIZE ((grpc_gcp_SecurityLevel)(grpc_gcp_SecurityLevel_INTEGRITY_AND_PRIVACY+1)) + +/* Struct definitions */ +typedef struct _grpc_gcp_RpcProtocolVersions_Version { + bool has_major; + uint32_t major; + bool has_minor; + uint32_t minor; +/* @@protoc_insertion_point(struct:grpc_gcp_RpcProtocolVersions_Version) */ +} grpc_gcp_RpcProtocolVersions_Version; + +typedef struct _grpc_gcp_RpcProtocolVersions { + bool has_max_rpc_version; + grpc_gcp_RpcProtocolVersions_Version max_rpc_version; + bool has_min_rpc_version; + grpc_gcp_RpcProtocolVersions_Version min_rpc_version; +/* @@protoc_insertion_point(struct:grpc_gcp_RpcProtocolVersions) */ +} grpc_gcp_RpcProtocolVersions; + +/* Default values for struct fields */ + +/* Initializer values for message structs */ +#define grpc_gcp_RpcProtocolVersions_init_default {false, grpc_gcp_RpcProtocolVersions_Version_init_default, false, grpc_gcp_RpcProtocolVersions_Version_init_default} +#define grpc_gcp_RpcProtocolVersions_Version_init_default {false, 0, false, 0} +#define grpc_gcp_RpcProtocolVersions_init_zero {false, grpc_gcp_RpcProtocolVersions_Version_init_zero, false, grpc_gcp_RpcProtocolVersions_Version_init_zero} +#define grpc_gcp_RpcProtocolVersions_Version_init_zero {false, 0, false, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define grpc_gcp_RpcProtocolVersions_Version_major_tag 1 +#define grpc_gcp_RpcProtocolVersions_Version_minor_tag 2 +#define grpc_gcp_RpcProtocolVersions_max_rpc_version_tag 1 +#define grpc_gcp_RpcProtocolVersions_min_rpc_version_tag 2 + +/* Struct field encoding specification for nanopb */ +extern const pb_field_t grpc_gcp_RpcProtocolVersions_fields[3]; +extern const pb_field_t grpc_gcp_RpcProtocolVersions_Version_fields[3]; + +/* Maximum encoded size of messages (where known) */ +#define grpc_gcp_RpcProtocolVersions_size 28 +#define grpc_gcp_RpcProtocolVersions_Version_size 12 + +/* Message IDs (where set with "msgid" option) */ +#ifdef PB_MSGID + +#define TRANSPORT_SECURITY_COMMON_MESSAGES \ + + +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif +/* @@protoc_insertion_point(eof) */ + +#endif diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.cc b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.cc new file mode 100644 index 000000000..8a7edb53d --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.cc @@ -0,0 +1,196 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/handshaker/transport_security_common_api.h" + +bool grpc_gcp_rpc_protocol_versions_set_max( + grpc_gcp_rpc_protocol_versions* versions, uint32_t max_major, + uint32_t max_minor) { + if (versions == nullptr) { + gpr_log(GPR_ERROR, + "versions is nullptr in " + "grpc_gcp_rpc_protocol_versions_set_max()."); + return false; + } + versions->has_max_rpc_version = true; + versions->max_rpc_version.has_major = true; + versions->max_rpc_version.has_minor = true; + versions->max_rpc_version.major = max_major; + versions->max_rpc_version.minor = max_minor; + return true; +} + +bool grpc_gcp_rpc_protocol_versions_set_min( + grpc_gcp_rpc_protocol_versions* versions, uint32_t min_major, + uint32_t min_minor) { + if (versions == nullptr) { + gpr_log(GPR_ERROR, + "versions is nullptr in " + "grpc_gcp_rpc_protocol_versions_set_min()."); + return false; + } + versions->has_min_rpc_version = true; + versions->min_rpc_version.has_major = true; + versions->min_rpc_version.has_minor = true; + versions->min_rpc_version.major = min_major; + versions->min_rpc_version.minor = min_minor; + return true; +} + +size_t grpc_gcp_rpc_protocol_versions_encode_length( + const grpc_gcp_rpc_protocol_versions* versions) { + if (versions == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_rpc_protocol_versions_encode_length()."); + return 0; + } + pb_ostream_t size_stream; + memset(&size_stream, 0, sizeof(pb_ostream_t)); + if (!pb_encode(&size_stream, grpc_gcp_RpcProtocolVersions_fields, versions)) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&size_stream)); + return 0; + } + return size_stream.bytes_written; +} + +bool grpc_gcp_rpc_protocol_versions_encode_to_raw_bytes( + const grpc_gcp_rpc_protocol_versions* versions, uint8_t* bytes, + size_t bytes_length) { + if (versions == nullptr || bytes == nullptr || bytes_length == 0) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_rpc_protocol_versions_encode_to_raw_bytes()."); + return false; + } + pb_ostream_t output_stream = pb_ostream_from_buffer(bytes, bytes_length); + if (!pb_encode(&output_stream, grpc_gcp_RpcProtocolVersions_fields, + versions)) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&output_stream)); + return false; + } + return true; +} + +bool grpc_gcp_rpc_protocol_versions_encode( + const grpc_gcp_rpc_protocol_versions* versions, grpc_slice* slice) { + if (versions == nullptr || slice == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to " + "grpc_gcp_rpc_protocol_versions_encode()."); + return false; + } + size_t encoded_length = + grpc_gcp_rpc_protocol_versions_encode_length(versions); + if (encoded_length == 0) return false; + *slice = grpc_slice_malloc(encoded_length); + return grpc_gcp_rpc_protocol_versions_encode_to_raw_bytes( + versions, GRPC_SLICE_START_PTR(*slice), encoded_length); +} + +bool grpc_gcp_rpc_protocol_versions_decode( + grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions) { + if (versions == nullptr) { + gpr_log(GPR_ERROR, + "version is nullptr in " + "grpc_gcp_rpc_protocol_versions_decode()."); + return false; + } + pb_istream_t stream = pb_istream_from_buffer(GRPC_SLICE_START_PTR(slice), + GRPC_SLICE_LENGTH(slice)); + if (!pb_decode(&stream, grpc_gcp_RpcProtocolVersions_fields, versions)) { + gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); + return false; + } + return true; +} + +bool grpc_gcp_rpc_protocol_versions_copy( + const grpc_gcp_rpc_protocol_versions* src, + grpc_gcp_rpc_protocol_versions* dst) { + if ((src == nullptr && dst != nullptr) || + (src != nullptr && dst == nullptr)) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_rpc_protocol_versions_copy()."); + return false; + } + if (src == nullptr) { + return true; + } + grpc_gcp_rpc_protocol_versions_set_max(dst, src->max_rpc_version.major, + src->max_rpc_version.minor); + grpc_gcp_rpc_protocol_versions_set_min(dst, src->min_rpc_version.major, + src->min_rpc_version.minor); + return true; +} + +namespace grpc_core { +namespace internal { + +int grpc_gcp_rpc_protocol_version_compare( + const grpc_gcp_rpc_protocol_versions_version* v1, + const grpc_gcp_rpc_protocol_versions_version* v2) { + if ((v1->major > v2->major) || + (v1->major == v2->major && v1->minor > v2->minor)) { + return 1; + } + if ((v1->major < v2->major) || + (v1->major == v2->major && v1->minor < v2->minor)) { + return -1; + } + return 0; +} + +} // namespace internal +} // namespace grpc_core + +bool grpc_gcp_rpc_protocol_versions_check( + const grpc_gcp_rpc_protocol_versions* local_versions, + const grpc_gcp_rpc_protocol_versions* peer_versions, + grpc_gcp_rpc_protocol_versions_version* highest_common_version) { + if (local_versions == nullptr || peer_versions == nullptr) { + gpr_log(GPR_ERROR, + "Invalid arguments to " + "grpc_gcp_rpc_protocol_versions_check()."); + return false; + } + /* max_common_version is MIN(local.max, peer.max) */ + const grpc_gcp_rpc_protocol_versions_version* max_common_version = + grpc_core::internal::grpc_gcp_rpc_protocol_version_compare( + &local_versions->max_rpc_version, &peer_versions->max_rpc_version) > 0 + ? &peer_versions->max_rpc_version + : &local_versions->max_rpc_version; + /* min_common_version is MAX(local.min, peer.min) */ + const grpc_gcp_rpc_protocol_versions_version* min_common_version = + grpc_core::internal::grpc_gcp_rpc_protocol_version_compare( + &local_versions->min_rpc_version, &peer_versions->min_rpc_version) > 0 + ? &local_versions->min_rpc_version + : &peer_versions->min_rpc_version; + bool result = grpc_core::internal::grpc_gcp_rpc_protocol_version_compare( + max_common_version, min_common_version) >= 0 + ? true + : false; + if (result && highest_common_version != nullptr) { + memcpy(highest_common_version, max_common_version, + sizeof(grpc_gcp_rpc_protocol_versions_version)); + } + return result; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.h b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.h new file mode 100644 index 000000000..68228cb3b --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/handshaker/transport_security_common_api.h @@ -0,0 +1,163 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_HANDSHAKER_TRANSPORT_SECURITY_COMMON_API_H +#define GRPC_CORE_TSI_ALTS_HANDSHAKER_TRANSPORT_SECURITY_COMMON_API_H + +#include + +#include "third_party/nanopb/pb_decode.h" +#include "third_party/nanopb/pb_encode.h" + +#include +#include +#include +#include + +#include "src/core/tsi/alts/handshaker/transport_security_common.pb.h" + +typedef grpc_gcp_RpcProtocolVersions grpc_gcp_rpc_protocol_versions; + +typedef grpc_gcp_RpcProtocolVersions_Version + grpc_gcp_rpc_protocol_versions_version; + +/** + * This method sets the value for max_rpc_versions field of rpc protocol + * versions. + * + * - versions: an rpc protocol version instance. + * - max_major: a major version of maximum supported RPC version. + * - max_minor: a minor version of maximum supported RPC version. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_set_max( + grpc_gcp_rpc_protocol_versions* versions, uint32_t max_major, + uint32_t max_minor); + +/** + * This method sets the value for min_rpc_versions field of rpc protocol + * versions. + * + * - versions: an rpc protocol version instance. + * - min_major: a major version of minimum supported RPC version. + * - min_minor: a minor version of minimum supported RPC version. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_set_min( + grpc_gcp_rpc_protocol_versions* versions, uint32_t min_major, + uint32_t min_minor); + +/** + * This method computes serialized byte length of rpc protocol versions. + * + * - versions: an rpc protocol versions instance. + * + * The method returns serialized byte length. It returns 0 on failure. + */ +size_t grpc_gcp_rpc_protocol_versions_encode_length( + const grpc_gcp_rpc_protocol_versions* versions); + +/** + * This method serializes rpc protocol versions and writes the result to + * the memory buffer provided by the caller. Caller is responsible for + * allocating sufficient memory to store the serialized data. + * + * - versions: an rpc protocol versions instance. + * - bytes: bytes buffer where the result will be written to. + * - bytes_length: length of the bytes buffer. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_encode_to_raw_bytes( + const grpc_gcp_rpc_protocol_versions* versions, uint8_t* bytes, + size_t bytes_length); + +/** + * This method serializes an rpc protocol version and returns serialized rpc + * versions in grpc slice. + * + * - versions: an rpc protocol versions instance. + * - slice: grpc slice where the serialized result will be written. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_encode( + const grpc_gcp_rpc_protocol_versions* versions, grpc_slice* slice); + +/** + * This method de-serializes input in grpc slice form and stores the result + * in rpc protocol versions. + * + * - slice: a data stream containing a serialized rpc protocol version. + * - versions: an rpc protocol version instance used to hold de-serialized + * result. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_decode( + grpc_slice slice, grpc_gcp_rpc_protocol_versions* versions); + +/** + * This method performs a deep copy operation on rpc protocol versions + * instance. + * + * - src: rpc protocol versions instance that needs to be copied. + * - dst: rpc protocol versions instance that stores the copied result. + * + * The method returns true on success and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_copy( + const grpc_gcp_rpc_protocol_versions* src, + grpc_gcp_rpc_protocol_versions* dst); + +/** + * This method performs a version check between local and peer rpc protocol + * versions. + * + * - local_versions: local rpc protocol versions instance. + * - peer_versions: peer rpc protocol versions instance. + * - highest_common_version: an output parameter that will store the highest + * common rpc protocol version both parties agreed on. + * + * The method returns true if the check passes which means both parties agreed + * on a common rpc protocol to use, and false otherwise. + */ +bool grpc_gcp_rpc_protocol_versions_check( + const grpc_gcp_rpc_protocol_versions* local_versions, + const grpc_gcp_rpc_protocol_versions* peer_versions, + grpc_gcp_rpc_protocol_versions_version* highest_common_version); + +namespace grpc_core { +namespace internal { + +/** + * Exposed for testing only. + * The method returns 0 if v1 = v2, + * returns 1 if v1 > v2, + * returns -1 if v1 < v2. + */ +int grpc_gcp_rpc_protocol_version_compare( + const grpc_gcp_rpc_protocol_versions_version* v1, + const grpc_gcp_rpc_protocol_versions_version* v2); + +} // namespace internal +} // namespace grpc_core + +#endif /* GRPC_CORE_TSI_ALTS_HANDSHAKER_TRANSPORT_SECURITY_COMMON_API_H */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc new file mode 100644 index 000000000..7ba03eb7f --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc @@ -0,0 +1,180 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h" + +#include +#include + +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h" + +/* Main struct for alts_grpc_integrity_only_record_protocol. */ +typedef struct alts_grpc_integrity_only_record_protocol { + alts_grpc_record_protocol base; + grpc_slice_buffer data_sb; + unsigned char* tag_buf; +} alts_grpc_integrity_only_record_protocol; + +/* --- alts_grpc_record_protocol methods implementation. --- */ + +static tsi_result alts_grpc_integrity_only_protect( + alts_grpc_record_protocol* rp, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + /* Input sanity check. */ + if (rp == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol protect."); + return TSI_INVALID_ARGUMENT; + } + /* Allocates memory for header and tag slices. */ + grpc_slice header_slice = GRPC_SLICE_MALLOC(rp->header_length); + grpc_slice tag_slice = GRPC_SLICE_MALLOC(rp->tag_length); + /* Calls alts_iovec_record_protocol protect. */ + char* error_details = nullptr; + iovec_t header_iovec = {GRPC_SLICE_START_PTR(header_slice), + GRPC_SLICE_LENGTH(header_slice)}; + iovec_t tag_iovec = {GRPC_SLICE_START_PTR(tag_slice), + GRPC_SLICE_LENGTH(tag_slice)}; + alts_grpc_record_protocol_convert_slice_buffer_to_iovec(rp, + unprotected_slices); + grpc_status_code status = alts_iovec_record_protocol_integrity_only_protect( + rp->iovec_rp, rp->iovec_buf, unprotected_slices->count, header_iovec, + tag_iovec, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to protect, %s", error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + /* Appends result to protected_slices. */ + grpc_slice_buffer_add(protected_slices, header_slice); + grpc_slice_buffer_move_into(unprotected_slices, protected_slices); + grpc_slice_buffer_add(protected_slices, tag_slice); + return TSI_OK; +} + +static tsi_result alts_grpc_integrity_only_unprotect( + alts_grpc_record_protocol* rp, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + /* Input sanity check. */ + if (rp == nullptr || protected_slices == nullptr || + unprotected_slices == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol unprotect."); + return TSI_INVALID_ARGUMENT; + } + if (protected_slices->length < rp->header_length + rp->tag_length) { + gpr_log(GPR_ERROR, "Protected slices do not have sufficient data."); + return TSI_INVALID_ARGUMENT; + } + /* In this method, rp points to alts_grpc_record_protocol struct + * and integrity_only_record_protocol points to + * alts_grpc_integrity_only_record_protocol struct. */ + alts_grpc_integrity_only_record_protocol* integrity_only_record_protocol = + reinterpret_cast(rp); + /* Strips frame header from protected slices. */ + grpc_slice_buffer_reset_and_unref_internal(&rp->header_sb); + grpc_slice_buffer_move_first(protected_slices, rp->header_length, + &rp->header_sb); + GPR_ASSERT(rp->header_sb.length == rp->header_length); + iovec_t header_iovec = alts_grpc_record_protocol_get_header_iovec(rp); + /* Moves protected slices data to data_sb and leaves the remaining tag. */ + grpc_slice_buffer_reset_and_unref_internal( + &integrity_only_record_protocol->data_sb); + grpc_slice_buffer_move_first(protected_slices, + protected_slices->length - rp->tag_length, + &integrity_only_record_protocol->data_sb); + GPR_ASSERT(protected_slices->length == rp->tag_length); + iovec_t tag_iovec = {nullptr, rp->tag_length}; + if (protected_slices->count == 1) { + tag_iovec.iov_base = GRPC_SLICE_START_PTR(protected_slices->slices[0]); + } else { + /* Frame tag is in multiple slices, copies the tag bytes from slice + * buffer to a single flat buffer. */ + alts_grpc_record_protocol_copy_slice_buffer( + protected_slices, integrity_only_record_protocol->tag_buf); + tag_iovec.iov_base = integrity_only_record_protocol->tag_buf; + } + /* Calls alts_iovec_record_protocol unprotect. */ + char* error_details = nullptr; + alts_grpc_record_protocol_convert_slice_buffer_to_iovec( + rp, &integrity_only_record_protocol->data_sb); + grpc_status_code status = alts_iovec_record_protocol_integrity_only_unprotect( + rp->iovec_rp, rp->iovec_buf, + integrity_only_record_protocol->data_sb.count, header_iovec, tag_iovec, + &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to unprotect, %s", error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + grpc_slice_buffer_reset_and_unref_internal(&rp->header_sb); + grpc_slice_buffer_reset_and_unref_internal(protected_slices); + grpc_slice_buffer_move_into(&integrity_only_record_protocol->data_sb, + unprotected_slices); + return TSI_OK; +} + +static void alts_grpc_integrity_only_destruct(alts_grpc_record_protocol* rp) { + if (rp == nullptr) { + return; + } + alts_grpc_integrity_only_record_protocol* integrity_only_rp = + reinterpret_cast(rp); + grpc_slice_buffer_destroy_internal(&integrity_only_rp->data_sb); + gpr_free(integrity_only_rp->tag_buf); +} + +static const alts_grpc_record_protocol_vtable + alts_grpc_integrity_only_record_protocol_vtable = { + alts_grpc_integrity_only_protect, alts_grpc_integrity_only_unprotect, + alts_grpc_integrity_only_destruct}; + +tsi_result alts_grpc_integrity_only_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_protect, alts_grpc_record_protocol** rp) { + if (crypter == nullptr || rp == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol create."); + return TSI_INVALID_ARGUMENT; + } + alts_grpc_integrity_only_record_protocol* impl = + static_cast( + gpr_zalloc(sizeof(alts_grpc_integrity_only_record_protocol))); + /* Calls alts_grpc_record_protocol init. */ + tsi_result result = alts_grpc_record_protocol_init( + &impl->base, crypter, overflow_size, is_client, + /*is_integrity_only=*/true, is_protect); + if (result != TSI_OK) { + gpr_free(impl); + return result; + } + /* Initializes slice buffer for data_sb. */ + grpc_slice_buffer_init(&impl->data_sb); + /* Allocates tag buffer. */ + impl->tag_buf = + static_cast(gpr_malloc(impl->base.tag_length)); + impl->base.vtable = &alts_grpc_integrity_only_record_protocol_vtable; + *rp = &impl->base; + return TSI_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h new file mode 100644 index 000000000..8d68b27e0 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h @@ -0,0 +1,52 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_INTEGRITY_ONLY_RECORD_PROTOCOL_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_INTEGRITY_ONLY_RECORD_PROTOCOL_H + +#include + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h" + +/** + * This method creates an integrity-only alts_grpc_record_protocol instance, + * given a gsec_aead_crypter instance and a flag indicating if the created + * instance will be used at the client or server side. The ownership of + * gsec_aead_crypter instance is transferred to this new object. + * + * - crypter: a gsec_aead_crypter instance used to perform AEAD decryption. + * - overflow_size: overflow size of counter in bytes. + * - is_client: a flag indicating if the alts_grpc_record_protocol instance will + * be used at the client or server side. + * - is_protect: a flag indicating if the alts_grpc_record_protocol instance + * will be used for protect or unprotect. + * - rp: an alts_grpc_record_protocol instance to be returned from + * the method. + * + * This method returns TSI_OK in case of success or a specific error code in + * case of failure. + */ +tsi_result alts_grpc_integrity_only_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_protect, alts_grpc_record_protocol** rp); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_INTEGRITY_ONLY_RECORD_PROTOCOL_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc new file mode 100644 index 000000000..d4fd88d1e --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc @@ -0,0 +1,144 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h" + +#include +#include + +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h" + +/* Privacy-integrity alts_grpc_record_protocol object uses the same struct + * defined in alts_grpc_record_protocol_common.h. */ + +/* --- alts_grpc_record_protocol methods implementation. --- */ + +static tsi_result alts_grpc_privacy_integrity_protect( + alts_grpc_record_protocol* rp, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + /* Input sanity check. */ + if (rp == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol protect."); + return TSI_INVALID_ARGUMENT; + } + /* Allocates memory for output frame. In privacy-integrity protect, the + * protected frame is stored in a newly allocated buffer. */ + size_t protected_frame_size = + unprotected_slices->length + rp->header_length + + alts_iovec_record_protocol_get_tag_length(rp->iovec_rp); + grpc_slice protected_slice = GRPC_SLICE_MALLOC(protected_frame_size); + iovec_t protected_iovec = {GRPC_SLICE_START_PTR(protected_slice), + GRPC_SLICE_LENGTH(protected_slice)}; + /* Calls alts_iovec_record_protocol protect. */ + char* error_details = nullptr; + alts_grpc_record_protocol_convert_slice_buffer_to_iovec(rp, + unprotected_slices); + grpc_status_code status = + alts_iovec_record_protocol_privacy_integrity_protect( + rp->iovec_rp, rp->iovec_buf, unprotected_slices->count, + protected_iovec, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to protect, %s", error_details); + gpr_free(error_details); + grpc_slice_unref(protected_slice); + return TSI_INTERNAL_ERROR; + } + grpc_slice_buffer_add(protected_slices, protected_slice); + grpc_slice_buffer_reset_and_unref_internal(unprotected_slices); + return TSI_OK; +} + +static tsi_result alts_grpc_privacy_integrity_unprotect( + alts_grpc_record_protocol* rp, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + /* Input sanity check. */ + if (rp == nullptr || protected_slices == nullptr || + unprotected_slices == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol unprotect."); + return TSI_INVALID_ARGUMENT; + } + /* Allocates memory for output frame. In privacy-integrity unprotect, the + * unprotected data are stored in a newly allocated buffer. */ + if (protected_slices->length < rp->header_length + rp->tag_length) { + gpr_log(GPR_ERROR, "Protected slices do not have sufficient data."); + return TSI_INVALID_ARGUMENT; + } + size_t unprotected_frame_size = + protected_slices->length - rp->header_length - rp->tag_length; + grpc_slice unprotected_slice = GRPC_SLICE_MALLOC(unprotected_frame_size); + iovec_t unprotected_iovec = {GRPC_SLICE_START_PTR(unprotected_slice), + GRPC_SLICE_LENGTH(unprotected_slice)}; + /* Strips frame header from protected slices. */ + grpc_slice_buffer_reset_and_unref_internal(&rp->header_sb); + grpc_slice_buffer_move_first(protected_slices, rp->header_length, + &rp->header_sb); + iovec_t header_iovec = alts_grpc_record_protocol_get_header_iovec(rp); + /* Calls alts_iovec_record_protocol unprotect. */ + char* error_details = nullptr; + alts_grpc_record_protocol_convert_slice_buffer_to_iovec(rp, protected_slices); + grpc_status_code status = + alts_iovec_record_protocol_privacy_integrity_unprotect( + rp->iovec_rp, header_iovec, rp->iovec_buf, protected_slices->count, + unprotected_iovec, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to unprotect, %s", error_details); + gpr_free(error_details); + grpc_slice_unref(unprotected_slice); + return TSI_INTERNAL_ERROR; + } + grpc_slice_buffer_reset_and_unref_internal(&rp->header_sb); + grpc_slice_buffer_reset_and_unref_internal(protected_slices); + grpc_slice_buffer_add(unprotected_slices, unprotected_slice); + return TSI_OK; +} + +static const alts_grpc_record_protocol_vtable + alts_grpc_privacy_integrity_record_protocol_vtable = { + alts_grpc_privacy_integrity_protect, + alts_grpc_privacy_integrity_unprotect, nullptr}; + +tsi_result alts_grpc_privacy_integrity_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_protect, alts_grpc_record_protocol** rp) { + if (crypter == nullptr || rp == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol create."); + return TSI_INVALID_ARGUMENT; + } + auto* impl = static_cast( + gpr_zalloc(sizeof(alts_grpc_record_protocol))); + /* Calls alts_grpc_record_protocol init. */ + tsi_result result = + alts_grpc_record_protocol_init(impl, crypter, overflow_size, is_client, + /*is_integrity_only=*/false, is_protect); + if (result != TSI_OK) { + gpr_free(impl); + return result; + } + impl->vtable = &alts_grpc_privacy_integrity_record_protocol_vtable; + *rp = impl; + return TSI_OK; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h new file mode 100644 index 000000000..1e34aef2d --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_PRIVACY_INTEGRITY_RECORD_PROTOCOL_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_PRIVACY_INTEGRITY_RECORD_PROTOCOL_H + +#include + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h" + +/** + * This method creates a privacy-integrity alts_grpc_record_protocol instance, + * given a gsec_aead_crypter instance and a flag indicating if the created + * instance will be used at the client or server side. The ownership of + * gsec_aead_crypter instance is transferred to this new object. + * + * - crypter: a gsec_aead_crypter instance used to perform AEAD decryption. + * - is_client: a flag indicating if the alts_grpc_record_protocol instance will + * be used at the client or server side. + * - rp: an alts_grpc_record_protocol instance to be returned from + * the method. + * + * This method returns TSI_OK in case of success or a specific error code in + * case of failure. + */ +tsi_result alts_grpc_privacy_integrity_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_protect, alts_grpc_record_protocol** rp); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_PRIVACY_INTEGRITY_RECORD_PROTOCOL_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h new file mode 100644 index 000000000..d1e433dac --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h @@ -0,0 +1,91 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_H + +#include + +#include + +#include "src/core/tsi/transport_security_interface.h" + +/** + * This alts_grpc_record_protocol object protects and unprotects a single frame + * stored in grpc slice buffer with zero or minimized memory copy. + * Implementations of this object must be thread compatible. + */ +typedef struct alts_grpc_record_protocol alts_grpc_record_protocol; + +/** + * This methods performs protect operation on unprotected data and appends the + * protected frame to protected_slices. The caller needs to ensure the length + * of unprotected data plus the frame overhead is less than or equal to the + * maximum frame length. The input unprotected data slice buffer will be + * cleared, although the actual unprotected data bytes are not modified. + * + * - self: an alts_grpc_record_protocol instance. + * - unprotected_slices: the unprotected data to be protected. + * - protected_slices: slice buffer where the protected frame is appended. + * + * This method returns TSI_OK in case of success or a specific error code in + * case of failure. + */ +tsi_result alts_grpc_record_protocol_protect( + alts_grpc_record_protocol* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices); + +/** + * This methods performs unprotect operation on a full frame of protected data + * and appends unprotected data to unprotected_slices. It is the caller's + * responsibility to prepare a full frame of data before calling this method. + * The input protected frame slice buffer will be cleared, although the actual + * protected data bytes are not modified. + * + * - self: an alts_grpc_record_protocol instance. + * - protected_slices: a full frame of protected data in grpc slices. + * - unprotected_slices: slice buffer where unprotected data is appended. + * + * This method returns TSI_OK in case of success or a specific error code in + * case of failure. + */ +tsi_result alts_grpc_record_protocol_unprotect( + alts_grpc_record_protocol* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices); + +/** + * This method returns maximum allowed unprotected data size, given maximum + * protected frame size. + * + * - self: an alts_grpc_record_protocol instance. + * - max_protected_frame_size: maximum protected frame size. + * + * On success, the method returns the maximum allowed unprotected data size. + * Otherwise, it returns zero. + */ +size_t alts_grpc_record_protocol_max_unprotected_data_size( + const alts_grpc_record_protocol* self, size_t max_protected_frame_size); + +/** + * This method destroys an alts_grpc_record_protocol instance by de-allocating + * all of its occupied memory. + */ +void alts_grpc_record_protocol_destroy(alts_grpc_record_protocol* self); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc new file mode 100644 index 000000000..1048b600f --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc @@ -0,0 +1,174 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h" + +#include + +#include +#include + +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" + +const size_t kInitialIovecBufferSize = 8; + +/* Makes sure iovec_buf in alts_grpc_record_protocol is large enough. */ +static void ensure_iovec_buf_size(alts_grpc_record_protocol* rp, + const grpc_slice_buffer* sb) { + GPR_ASSERT(rp != nullptr && sb != nullptr); + if (sb->count <= rp->iovec_buf_length) { + return; + } + /* At least double the iovec buffer size. */ + rp->iovec_buf_length = GPR_MAX(sb->count, 2 * rp->iovec_buf_length); + rp->iovec_buf = static_cast( + gpr_realloc(rp->iovec_buf, rp->iovec_buf_length * sizeof(iovec_t))); +} + +/* --- Implementation of methods defined in tsi_grpc_record_protocol_common.h. + * --- */ + +void alts_grpc_record_protocol_convert_slice_buffer_to_iovec( + alts_grpc_record_protocol* rp, const grpc_slice_buffer* sb) { + GPR_ASSERT(rp != nullptr && sb != nullptr); + ensure_iovec_buf_size(rp, sb); + for (size_t i = 0; i < sb->count; i++) { + rp->iovec_buf[i].iov_base = GRPC_SLICE_START_PTR(sb->slices[i]); + rp->iovec_buf[i].iov_len = GRPC_SLICE_LENGTH(sb->slices[i]); + } +} + +void alts_grpc_record_protocol_copy_slice_buffer(const grpc_slice_buffer* src, + unsigned char* dst) { + GPR_ASSERT(src != nullptr && dst != nullptr); + for (size_t i = 0; i < src->count; i++) { + size_t slice_length = GRPC_SLICE_LENGTH(src->slices[i]); + memcpy(dst, GRPC_SLICE_START_PTR(src->slices[i]), slice_length); + dst += slice_length; + } +} + +iovec_t alts_grpc_record_protocol_get_header_iovec( + alts_grpc_record_protocol* rp) { + iovec_t header_iovec = {nullptr, 0}; + if (rp == nullptr) { + return header_iovec; + } + header_iovec.iov_len = rp->header_length; + if (rp->header_sb.count == 1) { + header_iovec.iov_base = GRPC_SLICE_START_PTR(rp->header_sb.slices[0]); + } else { + /* Frame header is in multiple slices, copies the header bytes from slice + * buffer to a single flat buffer. */ + alts_grpc_record_protocol_copy_slice_buffer(&rp->header_sb, rp->header_buf); + header_iovec.iov_base = rp->header_buf; + } + return header_iovec; +} + +tsi_result alts_grpc_record_protocol_init(alts_grpc_record_protocol* rp, + gsec_aead_crypter* crypter, + size_t overflow_size, bool is_client, + bool is_integrity_only, + bool is_protect) { + if (rp == nullptr || crypter == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to alts_grpc_record_protocol init."); + return TSI_INVALID_ARGUMENT; + } + /* Creates alts_iovec_record_protocol. */ + char* error_details = nullptr; + grpc_status_code status = alts_iovec_record_protocol_create( + crypter, overflow_size, is_client, is_integrity_only, is_protect, + &rp->iovec_rp, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to create alts_iovec_record_protocol, %s.", + error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + /* Allocates header slice buffer. */ + grpc_slice_buffer_init(&rp->header_sb); + /* Allocates header buffer. */ + rp->header_length = alts_iovec_record_protocol_get_header_length(); + rp->header_buf = static_cast(gpr_malloc(rp->header_length)); + rp->tag_length = alts_iovec_record_protocol_get_tag_length(rp->iovec_rp); + /* Allocates iovec buffer. */ + rp->iovec_buf_length = kInitialIovecBufferSize; + rp->iovec_buf = + static_cast(gpr_malloc(rp->iovec_buf_length * sizeof(iovec_t))); + return TSI_OK; +} + +/* --- Implementation of methods defined in tsi_grpc_record_protocol.h. --- */ +tsi_result alts_grpc_record_protocol_protect( + alts_grpc_record_protocol* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + if (grpc_core::ExecCtx::Get() == nullptr || self == nullptr || + self->vtable == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { + return TSI_INVALID_ARGUMENT; + } + if (self->vtable->protect == nullptr) { + return TSI_UNIMPLEMENTED; + } + return self->vtable->protect(self, unprotected_slices, protected_slices); +} + +tsi_result alts_grpc_record_protocol_unprotect( + alts_grpc_record_protocol* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + if (grpc_core::ExecCtx::Get() == nullptr || self == nullptr || + self->vtable == nullptr || protected_slices == nullptr || + unprotected_slices == nullptr) { + return TSI_INVALID_ARGUMENT; + } + if (self->vtable->unprotect == nullptr) { + return TSI_UNIMPLEMENTED; + } + return self->vtable->unprotect(self, protected_slices, unprotected_slices); +} + +void alts_grpc_record_protocol_destroy(alts_grpc_record_protocol* self) { + if (self == nullptr) { + return; + } + if (self->vtable->destruct != nullptr) { + self->vtable->destruct(self); + } + alts_iovec_record_protocol_destroy(self->iovec_rp); + grpc_slice_buffer_destroy_internal(&self->header_sb); + gpr_free(self->header_buf); + gpr_free(self->iovec_buf); + gpr_free(self); +} + +/* Integrity-only and privacy-integrity share the same implementation. No need + * to call vtable. */ +size_t alts_grpc_record_protocol_max_unprotected_data_size( + const alts_grpc_record_protocol* self, size_t max_protected_frame_size) { + if (self == nullptr) { + return 0; + } + return alts_iovec_record_protocol_max_unprotected_data_size( + self->iovec_rp, max_protected_frame_size); +} diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h new file mode 100644 index 000000000..43b8a4a2b --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h @@ -0,0 +1,100 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_COMMON_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_COMMON_H + +/** + * this file contains alts_grpc_record_protocol internals and internal-only + * helper functions. The public functions of alts_grpc_record_protocol are + * defined in the alts_grpc_record_protocol.h. + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h" + +/* V-table for alts_grpc_record_protocol implementations. */ +typedef struct { + tsi_result (*protect)(alts_grpc_record_protocol* self, + grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices); + tsi_result (*unprotect)(alts_grpc_record_protocol* self, + grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices); + void (*destruct)(alts_grpc_record_protocol* self); +} alts_grpc_record_protocol_vtable; + +/* Main struct for alts_grpc_record_protocol implementation, shared by both + * integrity-only record protocol and privacy-integrity record protocol. + * Integrity-only record protocol has additional data elements. + * Privacy-integrity record protocol uses this struct directly. */ +struct alts_grpc_record_protocol { + const alts_grpc_record_protocol_vtable* vtable; + alts_iovec_record_protocol* iovec_rp; + grpc_slice_buffer header_sb; + unsigned char* header_buf; + size_t header_length; + size_t tag_length; + iovec_t* iovec_buf; + size_t iovec_buf_length; +}; + +/** + * Converts the slices of input sb into iovec_t's and puts the result into + * rp->iovec_buf. Note that the actual data are not copied, only + * pointers and lengths are copied. + */ +void alts_grpc_record_protocol_convert_slice_buffer_to_iovec( + alts_grpc_record_protocol* rp, const grpc_slice_buffer* sb); + +/** + * Copies bytes from slice buffer to destination buffer. Caller is responsible + * for allocating enough memory of destination buffer. This method is used for + * copying frame header and tag in case they are stored in multiple slices. + */ +void alts_grpc_record_protocol_copy_slice_buffer(const grpc_slice_buffer* src, + unsigned char* dst); + +/** + * This method returns an iovec object pointing to the frame header stored in + * rp->header_sb. If the frame header is stored in multiple slices, + * this method will copy the bytes in rp->header_sb to + * rp->header_buf, and return an iovec object pointing to + * rp->header_buf. + */ +iovec_t alts_grpc_record_protocol_get_header_iovec( + alts_grpc_record_protocol* rp); + +/** + * Initializes an alts_grpc_record_protocol object, given a gsec_aead_crypter + * instance, the overflow size of the counter in bytes, a flag indicating if the + * object is used for client or server side, a flag indicating if it is used for + * integrity-only or privacy-integrity mode, and a flag indicating if it is for + * protect or unprotect. The ownership of gsec_aead_crypter object is + * transferred to the alts_grpc_record_protocol object. + */ +tsi_result alts_grpc_record_protocol_init(alts_grpc_record_protocol* rp, + gsec_aead_crypter* crypter, + size_t overflow_size, bool is_client, + bool is_integrity_only, + bool is_protect); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_GRPC_RECORD_PROTOCOL_COMMON_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc new file mode 100644 index 000000000..6a548e50d --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc @@ -0,0 +1,476 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h" + +#include +#include + +#include +#include + +#include "src/core/tsi/alts/frame_protector/alts_counter.h" + +struct alts_iovec_record_protocol { + alts_counter* ctr; + gsec_aead_crypter* crypter; + size_t tag_length; + bool is_integrity_only; + bool is_protect; +}; + +/* Copies error message to destination. */ +static void maybe_copy_error_msg(const char* src, char** dst) { + if (dst != nullptr && src != nullptr) { + *dst = static_cast(gpr_malloc(strlen(src) + 1)); + memcpy(*dst, src, strlen(src) + 1); + } +} + +/* Appends error message to destination. */ +static void maybe_append_error_msg(const char* appendix, char** dst) { + if (dst != nullptr && appendix != nullptr) { + int dst_len = static_cast(strlen(*dst)); + *dst = static_cast(realloc(*dst, dst_len + strlen(appendix) + 1)); + assert(*dst != nullptr); + memcpy(*dst + dst_len, appendix, strlen(appendix) + 1); + } +} + +/* Use little endian to interpret a string of bytes as uint32_t. */ +static uint32_t load_32_le(const unsigned char* buffer) { + return (((uint32_t)buffer[3]) << 24) | (((uint32_t)buffer[2]) << 16) | + (((uint32_t)buffer[1]) << 8) | ((uint32_t)buffer[0]); +} + +/* Store uint32_t as a string of little endian bytes. */ +static void store_32_le(uint32_t value, unsigned char* buffer) { + buffer[3] = (unsigned char)(value >> 24) & 0xFF; + buffer[2] = (unsigned char)(value >> 16) & 0xFF; + buffer[1] = (unsigned char)(value >> 8) & 0xFF; + buffer[0] = (unsigned char)(value)&0xFF; +} + +/* Ensures header and tag iovec have sufficient length. */ +static grpc_status_code ensure_header_and_tag_length( + const alts_iovec_record_protocol* rp, iovec_t header, iovec_t tag, + char** error_details) { + if (rp == nullptr) { + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (header.iov_base == nullptr) { + maybe_copy_error_msg("Header is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (header.iov_len != alts_iovec_record_protocol_get_header_length()) { + maybe_copy_error_msg("Header length is incorrect.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (tag.iov_base == nullptr) { + maybe_copy_error_msg("Tag is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (tag.iov_len != rp->tag_length) { + maybe_copy_error_msg("Tag length is incorrect.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + return GRPC_STATUS_OK; +} + +/* Increments crypter counter and checks overflow. */ +static grpc_status_code increment_counter(alts_counter* counter, + char** error_details) { + if (counter == nullptr) { + return GRPC_STATUS_FAILED_PRECONDITION; + } + bool is_overflow = false; + grpc_status_code status = + alts_counter_increment(counter, &is_overflow, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + if (is_overflow) { + maybe_copy_error_msg("Crypter counter is overflowed.", error_details); + return GRPC_STATUS_INTERNAL; + } + return GRPC_STATUS_OK; +} + +/* Given an array of iovec, computes the total length of buffer. */ +static size_t get_total_length(const iovec_t* vec, size_t vec_length) { + size_t total_length = 0; + for (size_t i = 0; i < vec_length; ++i) { + total_length += vec[i].iov_len; + } + return total_length; +} + +/* Writes frame header given data and tag length. */ +static grpc_status_code write_frame_header(size_t data_length, + unsigned char* header, + char** error_details) { + if (header == nullptr) { + maybe_copy_error_msg("Header is nullptr.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + size_t frame_length = kZeroCopyFrameMessageTypeFieldSize + data_length; + store_32_le(static_cast(frame_length), header); + store_32_le(kZeroCopyFrameMessageType, + header + kZeroCopyFrameLengthFieldSize); + return GRPC_STATUS_OK; +} + +/* Verifies frame header given protected data length. */ +static grpc_status_code verify_frame_header(size_t data_length, + unsigned char* header, + char** error_details) { + if (header == nullptr) { + maybe_copy_error_msg("Header is nullptr.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + size_t frame_length = load_32_le(header); + if (frame_length != kZeroCopyFrameMessageTypeFieldSize + data_length) { + maybe_copy_error_msg("Bad frame length.", error_details); + return GRPC_STATUS_INTERNAL; + } + size_t message_type = load_32_le(header + kZeroCopyFrameLengthFieldSize); + if (message_type != kZeroCopyFrameMessageType) { + maybe_copy_error_msg("Unsupported message type.", error_details); + return GRPC_STATUS_INTERNAL; + } + return GRPC_STATUS_OK; +} + +/* --- alts_iovec_record_protocol methods implementation. --- */ + +size_t alts_iovec_record_protocol_get_header_length() { + return kZeroCopyFrameHeaderSize; +} + +size_t alts_iovec_record_protocol_get_tag_length( + const alts_iovec_record_protocol* rp) { + if (rp != nullptr) { + return rp->tag_length; + } + return 0; +} + +size_t alts_iovec_record_protocol_max_unprotected_data_size( + const alts_iovec_record_protocol* rp, size_t max_protected_frame_size) { + if (rp == nullptr) { + return 0; + } + size_t overhead_bytes_size = + kZeroCopyFrameMessageTypeFieldSize + rp->tag_length; + if (max_protected_frame_size <= overhead_bytes_size) return 0; + return max_protected_frame_size - overhead_bytes_size; +} + +grpc_status_code alts_iovec_record_protocol_integrity_only_protect( + alts_iovec_record_protocol* rp, const iovec_t* unprotected_vec, + size_t unprotected_vec_length, iovec_t header, iovec_t tag, + char** error_details) { + /* Input sanity checks. */ + if (rp == nullptr) { + maybe_copy_error_msg("Input iovec_record_protocol is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (!rp->is_integrity_only) { + maybe_copy_error_msg( + "Integrity-only operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (!rp->is_protect) { + maybe_copy_error_msg("Protect operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + grpc_status_code status = + ensure_header_and_tag_length(rp, header, tag, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Unprotected data should not be zero length. */ + size_t data_length = + get_total_length(unprotected_vec, unprotected_vec_length); + /* Sets frame header. */ + status = write_frame_header(data_length + rp->tag_length, + static_cast(header.iov_base), + error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Computes frame tag by calling AEAD crypter. */ + size_t bytes_written = 0; + status = gsec_aead_crypter_encrypt_iovec( + rp->crypter, alts_counter_get_counter(rp->ctr), + alts_counter_get_size(rp->ctr), unprotected_vec, unprotected_vec_length, + /* plaintext_vec = */ nullptr, /* plaintext_vec_length = */ 0, tag, + &bytes_written, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + if (bytes_written != rp->tag_length) { + maybe_copy_error_msg("Bytes written expects to be the same as tag length.", + error_details); + return GRPC_STATUS_INTERNAL; + } + /* Increments the crypter counter. */ + return increment_counter(rp->ctr, error_details); +} + +grpc_status_code alts_iovec_record_protocol_integrity_only_unprotect( + alts_iovec_record_protocol* rp, const iovec_t* protected_vec, + size_t protected_vec_length, iovec_t header, iovec_t tag, + char** error_details) { + /* Input sanity checks. */ + if (rp == nullptr) { + maybe_copy_error_msg("Input iovec_record_protocol is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (!rp->is_integrity_only) { + maybe_copy_error_msg( + "Integrity-only operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (rp->is_protect) { + maybe_copy_error_msg( + "Unprotect operations are not allowed for this object.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + grpc_status_code status = + ensure_header_and_tag_length(rp, header, tag, error_details); + if (status != GRPC_STATUS_OK) return status; + /* Protected data should not be zero length. */ + size_t data_length = get_total_length(protected_vec, protected_vec_length); + /* Verifies frame header. */ + status = verify_frame_header(data_length + rp->tag_length, + static_cast(header.iov_base), + error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Verifies frame tag by calling AEAD crypter. */ + iovec_t plaintext = {nullptr, 0}; + size_t bytes_written = 0; + status = gsec_aead_crypter_decrypt_iovec( + rp->crypter, alts_counter_get_counter(rp->ctr), + alts_counter_get_size(rp->ctr), protected_vec, protected_vec_length, &tag, + 1, plaintext, &bytes_written, error_details); + if (status != GRPC_STATUS_OK || bytes_written != 0) { + maybe_append_error_msg(" Frame tag verification failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + /* Increments the crypter counter. */ + return increment_counter(rp->ctr, error_details); +} + +grpc_status_code alts_iovec_record_protocol_privacy_integrity_protect( + alts_iovec_record_protocol* rp, const iovec_t* unprotected_vec, + size_t unprotected_vec_length, iovec_t protected_frame, + char** error_details) { + /* Input sanity checks. */ + if (rp == nullptr) { + maybe_copy_error_msg("Input iovec_record_protocol is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (rp->is_integrity_only) { + maybe_copy_error_msg( + "Privacy-integrity operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (!rp->is_protect) { + maybe_copy_error_msg("Protect operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + /* Unprotected data should not be zero length. */ + size_t data_length = + get_total_length(unprotected_vec, unprotected_vec_length); + /* Ensures protected frame iovec has sufficient size. */ + if (protected_frame.iov_base == nullptr) { + maybe_copy_error_msg("Protected frame is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (protected_frame.iov_len != + alts_iovec_record_protocol_get_header_length() + data_length + + rp->tag_length) { + maybe_copy_error_msg("Protected frame size is incorrect.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + /* Writer frame header. */ + grpc_status_code status = write_frame_header( + data_length + rp->tag_length, + static_cast(protected_frame.iov_base), error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Encrypt unprotected data by calling AEAD crypter. */ + unsigned char* ciphertext_buffer = + static_cast(protected_frame.iov_base) + + alts_iovec_record_protocol_get_header_length(); + iovec_t ciphertext = {ciphertext_buffer, data_length + rp->tag_length}; + size_t bytes_written = 0; + status = gsec_aead_crypter_encrypt_iovec( + rp->crypter, alts_counter_get_counter(rp->ctr), + alts_counter_get_size(rp->ctr), /* aad_vec = */ nullptr, + /* aad_vec_length = */ 0, unprotected_vec, unprotected_vec_length, + ciphertext, &bytes_written, error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + if (bytes_written != data_length + rp->tag_length) { + maybe_copy_error_msg( + "Bytes written expects to be data length plus tag length.", + error_details); + return GRPC_STATUS_INTERNAL; + } + /* Increments the crypter counter. */ + return increment_counter(rp->ctr, error_details); +} + +grpc_status_code alts_iovec_record_protocol_privacy_integrity_unprotect( + alts_iovec_record_protocol* rp, iovec_t header, + const iovec_t* protected_vec, size_t protected_vec_length, + iovec_t unprotected_data, char** error_details) { + /* Input sanity checks. */ + if (rp == nullptr) { + maybe_copy_error_msg("Input iovec_record_protocol is nullptr.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (rp->is_integrity_only) { + maybe_copy_error_msg( + "Privacy-integrity operations are not allowed for this object.", + error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + if (rp->is_protect) { + maybe_copy_error_msg( + "Unprotect operations are not allowed for this object.", error_details); + return GRPC_STATUS_FAILED_PRECONDITION; + } + /* Protected data size should be no less than tag size. */ + size_t protected_data_length = + get_total_length(protected_vec, protected_vec_length); + if (protected_data_length < rp->tag_length) { + maybe_copy_error_msg( + "Protected data length should be more than the tag length.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + /* Ensures header has sufficient size. */ + if (header.iov_base == nullptr) { + maybe_copy_error_msg("Header is nullptr.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + if (header.iov_len != alts_iovec_record_protocol_get_header_length()) { + maybe_copy_error_msg("Header length is incorrect.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + /* Ensures unprotected data iovec has sufficient size. */ + if (unprotected_data.iov_len != protected_data_length - rp->tag_length) { + maybe_copy_error_msg("Unprotected data size is incorrect.", error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + /* Verify frame header. */ + grpc_status_code status = verify_frame_header( + protected_data_length, static_cast(header.iov_base), + error_details); + if (status != GRPC_STATUS_OK) { + return status; + } + /* Decrypt protected data by calling AEAD crypter. */ + size_t bytes_written = 0; + status = gsec_aead_crypter_decrypt_iovec( + rp->crypter, alts_counter_get_counter(rp->ctr), + alts_counter_get_size(rp->ctr), /* aad_vec = */ nullptr, + /* aad_vec_length = */ 0, protected_vec, protected_vec_length, + unprotected_data, &bytes_written, error_details); + if (status != GRPC_STATUS_OK) { + maybe_append_error_msg(" Frame decryption failed.", error_details); + return GRPC_STATUS_INTERNAL; + } + if (bytes_written != protected_data_length - rp->tag_length) { + maybe_copy_error_msg( + "Bytes written expects to be protected data length minus tag length.", + error_details); + return GRPC_STATUS_INTERNAL; + } + /* Increments the crypter counter. */ + return increment_counter(rp->ctr, error_details); +} + +grpc_status_code alts_iovec_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_integrity_only, bool is_protect, alts_iovec_record_protocol** rp, + char** error_details) { + if (crypter == nullptr || rp == nullptr) { + maybe_copy_error_msg( + "Invalid nullptr arguments to alts_iovec_record_protocol create.", + error_details); + return GRPC_STATUS_INVALID_ARGUMENT; + } + alts_iovec_record_protocol* impl = static_cast( + gpr_zalloc(sizeof(alts_iovec_record_protocol))); + /* Gets counter length. */ + size_t counter_length = 0; + grpc_status_code status = + gsec_aead_crypter_nonce_length(crypter, &counter_length, error_details); + if (status != GRPC_STATUS_OK) { + goto cleanup; + } + /* Creates counters. */ + status = + alts_counter_create(is_protect ? !is_client : is_client, counter_length, + overflow_size, &impl->ctr, error_details); + if (status != GRPC_STATUS_OK) { + goto cleanup; + } + /* Gets tag length. */ + status = + gsec_aead_crypter_tag_length(crypter, &impl->tag_length, error_details); + if (status != GRPC_STATUS_OK) { + goto cleanup; + } + impl->crypter = crypter; + impl->is_integrity_only = is_integrity_only; + impl->is_protect = is_protect; + *rp = impl; + return GRPC_STATUS_OK; +cleanup: + alts_counter_destroy(impl->ctr); + gpr_free(impl); + return GRPC_STATUS_FAILED_PRECONDITION; +} + +void alts_iovec_record_protocol_destroy(alts_iovec_record_protocol* rp) { + if (rp != nullptr) { + alts_counter_destroy(rp->ctr); + gsec_aead_crypter_destroy(rp->crypter); + gpr_free(rp); + } +} diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h new file mode 100644 index 000000000..0b7d1bf5b --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h @@ -0,0 +1,199 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_IOVEC_RECORD_PROTOCOL_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_IOVEC_RECORD_PROTOCOL_H + +#include + +#include + +#include "src/core/tsi/alts/crypt/gsec.h" + +constexpr size_t kZeroCopyFrameMessageType = 0x06; +constexpr size_t kZeroCopyFrameLengthFieldSize = 4; +constexpr size_t kZeroCopyFrameMessageTypeFieldSize = 4; +constexpr size_t kZeroCopyFrameHeaderSize = + kZeroCopyFrameLengthFieldSize + kZeroCopyFrameMessageTypeFieldSize; + +// Limit k on number of frames such that at most 2^(8 * k) frames can be sent. +constexpr size_t kAltsRecordProtocolRekeyFrameLimit = 8; +constexpr size_t kAltsRecordProtocolFrameLimit = 5; + +/* An implementation of alts record protocol. The API is thread-compatible. */ + +typedef struct iovec iovec_t; + +typedef struct alts_iovec_record_protocol alts_iovec_record_protocol; + +/** + * This method gets the length of record protocol frame header. + */ +size_t alts_iovec_record_protocol_get_header_length(); + +/** + * This method gets the length of record protocol frame tag. + * + * - rp: an alts_iovec_record_protocol instance. + * + * On success, the method returns the length of record protocol frame tag. + * Otherwise, it returns zero. + */ +size_t alts_iovec_record_protocol_get_tag_length( + const alts_iovec_record_protocol* rp); + +/** + * This method returns maximum allowed unprotected data size, given maximum + * protected frame size. + * + * - rp: an alts_iovec_record_protocol instance. + * - max_protected_frame_size: maximum protected frame size. + * + * On success, the method returns the maximum allowed unprotected data size. + * Otherwise, it returns zero. + */ +size_t alts_iovec_record_protocol_max_unprotected_data_size( + const alts_iovec_record_protocol* rp, size_t max_protected_frame_size); + +/** + * This method performs integrity-only protect operation on a + * alts_iovec_record_protocol instance, i.e., compute frame header and tag. The + * caller needs to allocate the memory for header and tag prior to calling this + * method. + * + * - rp: an alts_iovec_record_protocol instance. + * - unprotected_vec: an iovec array containing unprotected data. + * - unprotected_vec_length: the array length of unprotected_vec. + * - header: an iovec containing the output frame header. + * - tag: an iovec containing the output frame tag. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is OK to pass nullptr into error_details. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, it returns an + * error status code along with its details specified in error_details (if + * error_details is not nullptr). + */ +grpc_status_code alts_iovec_record_protocol_integrity_only_protect( + alts_iovec_record_protocol* rp, const iovec_t* unprotected_vec, + size_t unprotected_vec_length, iovec_t header, iovec_t tag, + char** error_details); + +/** + * This method performs integrity-only unprotect operation on a + * alts_iovec_record_protocol instance, i.e., verify frame header and tag. + * + * - rp: an alts_iovec_record_protocol instance. + * - protected_vec: an iovec array containing protected data. + * - protected_vec_length: the array length of protected_vec. + * - header: an iovec containing the frame header. + * - tag: an iovec containing the frame tag. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is OK to pass nullptr into error_details. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, it returns an + * error status code along with its details specified in error_details (if + * error_details is not nullptr). + */ +grpc_status_code alts_iovec_record_protocol_integrity_only_unprotect( + alts_iovec_record_protocol* rp, const iovec_t* protected_vec, + size_t protected_vec_length, iovec_t header, iovec_t tag, + char** error_details); + +/** + * This method performs privacy-integrity protect operation on a + * alts_iovec_record_protocol instance, i.e., compute a protected frame. The + * caller needs to allocate the memory for the protected frame prior to calling + * this method. + * + * - rp: an alts_iovec_record_protocol instance. + * - unprotected_vec: an iovec array containing unprotected data. + * - unprotected_vec_length: the array length of unprotected_vec. + * - protected_frame: an iovec containing the output protected frame. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is OK to pass nullptr into error_details. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, it returns an + * error status code along with its details specified in error_details (if + * error_details is not nullptr). + */ +grpc_status_code alts_iovec_record_protocol_privacy_integrity_protect( + alts_iovec_record_protocol* rp, const iovec_t* unprotected_vec, + size_t unprotected_vec_length, iovec_t protected_frame, + char** error_details); + +/** + * This method performs privacy-integrity unprotect operation on a + * alts_iovec_record_protocol instance given a full protected frame, i.e., + * compute the unprotected data. The caller needs to allocated the memory for + * the unprotected data prior to calling this method. + * + * - rp: an alts_iovec_record_protocol instance. + * - header: an iovec containing the frame header. + * - protected_vec: an iovec array containing protected data including the tag. + * - protected_vec_length: the array length of protected_vec. + * - unprotected_data: an iovec containing the output unprotected data. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is OK to pass nullptr into error_details. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, it returns an + * error status code along with its details specified in error_details (if + * error_details is not nullptr). + */ +grpc_status_code alts_iovec_record_protocol_privacy_integrity_unprotect( + alts_iovec_record_protocol* rp, iovec_t header, + const iovec_t* protected_vec, size_t protected_vec_length, + iovec_t unprotected_data, char** error_details); + +/** + * This method creates an alts_iovec_record_protocol instance, given a + * gsec_aead_crypter instance, a flag indicating if the created instance will be + * used at the client or server side, and a flag indicating if the created + * instance will be used for integrity-only mode or privacy-integrity mode. The + * ownership of gsec_aead_crypter instance is transferred to this new object. + * + * - crypter: a gsec_aead_crypter instance used to perform AEAD decryption. + * - overflow_size: overflow size of counter in bytes. + * - is_client: a flag indicating if the alts_iovec_record_protocol instance + * will be used at the client or server side. + * - is_integrity_only: a flag indicating if the alts_iovec_record_protocol + * instance will be used for integrity-only or privacy-integrity mode. + * - is_protect: a flag indicating if the alts_grpc_record_protocol instance + * will be used for protect or unprotect. + * - rp: an alts_iovec_record_protocol instance to be returned from + * the method. + * - error_details: a buffer containing an error message if the method does not + * function correctly. It is OK to pass nullptr into error_details. + * + * On success, the method returns GRPC_STATUS_OK. Otherwise, it returns an + * error status code along with its details specified in error_details (if + * error_details is not nullptr). + */ +grpc_status_code alts_iovec_record_protocol_create( + gsec_aead_crypter* crypter, size_t overflow_size, bool is_client, + bool is_integrity_only, bool is_protect, alts_iovec_record_protocol** rp, + char** error_details); + +/** + * This method destroys an alts_iovec_record_protocol instance by de-allocating + * all of its occupied memory. A gsec_aead_crypter instance passed in at + * gsec_alts_crypter instance creation time will be destroyed in this method. + */ +void alts_iovec_record_protocol_destroy(alts_iovec_record_protocol* rp); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_IOVEC_RECORD_PROTOCOL_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc new file mode 100644 index 000000000..608213745 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc @@ -0,0 +1,296 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h" + +#include + +#include +#include + +#include "src/core/lib/gpr/useful.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/tsi/alts/crypt/gsec.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h" +#include "src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h" +#include "src/core/tsi/transport_security_grpc.h" + +constexpr size_t kMinFrameLength = 1024; +constexpr size_t kDefaultFrameLength = 16 * 1024; +constexpr size_t kMaxFrameLength = 1024 * 1024; + +/** + * Main struct for alts_zero_copy_grpc_protector. + * We choose to have two alts_grpc_record_protocol objects and two sets of slice + * buffers: one for protect and the other for unprotect, so that protect and + * unprotect can be executed in parallel. Implementations of this object must be + * thread compatible. + */ +typedef struct alts_zero_copy_grpc_protector { + tsi_zero_copy_grpc_protector base; + alts_grpc_record_protocol* record_protocol; + alts_grpc_record_protocol* unrecord_protocol; + size_t max_protected_frame_size; + size_t max_unprotected_data_size; + grpc_slice_buffer unprotected_staging_sb; + grpc_slice_buffer protected_sb; + grpc_slice_buffer protected_staging_sb; + uint32_t parsed_frame_size; +} alts_zero_copy_grpc_protector; + +/** + * Given a slice buffer, parses the first 4 bytes little-endian unsigned frame + * size and returns the total frame size including the frame field. Caller + * needs to make sure the input slice buffer has at least 4 bytes. Returns true + * on success and false on failure. + */ +static bool read_frame_size(const grpc_slice_buffer* sb, + uint32_t* total_frame_size) { + if (sb == nullptr || sb->length < kZeroCopyFrameLengthFieldSize) { + return false; + } + uint8_t frame_size_buffer[kZeroCopyFrameLengthFieldSize]; + uint8_t* buf = frame_size_buffer; + /* Copies the first 4 bytes to a temporary buffer. */ + size_t remaining = kZeroCopyFrameLengthFieldSize; + for (size_t i = 0; i < sb->count; i++) { + size_t slice_length = GRPC_SLICE_LENGTH(sb->slices[i]); + if (remaining <= slice_length) { + memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), remaining); + remaining = 0; + break; + } else { + memcpy(buf, GRPC_SLICE_START_PTR(sb->slices[i]), slice_length); + buf += slice_length; + remaining -= slice_length; + } + } + GPR_ASSERT(remaining == 0); + /* Gets little-endian frame size. */ + uint32_t frame_size = (((uint32_t)frame_size_buffer[3]) << 24) | + (((uint32_t)frame_size_buffer[2]) << 16) | + (((uint32_t)frame_size_buffer[1]) << 8) | + ((uint32_t)frame_size_buffer[0]); + if (frame_size > kMaxFrameLength) { + gpr_log(GPR_ERROR, "Frame size is larger than maximum frame size"); + return false; + } + /* Returns frame size including frame length field. */ + *total_frame_size = + static_cast(frame_size + kZeroCopyFrameLengthFieldSize); + return true; +} + +/** + * Creates an alts_grpc_record_protocol object, given key, key size, and flags + * to indicate whether the record_protocol object uses the rekeying AEAD, + * whether the object is for client or server, whether the object is for + * integrity-only or privacy-integrity mode, and whether the object is is used + * for protect or unprotect. + */ +static tsi_result create_alts_grpc_record_protocol( + const uint8_t* key, size_t key_size, bool is_rekey, bool is_client, + bool is_integrity_only, bool is_protect, + alts_grpc_record_protocol** record_protocol) { + if (key == nullptr || record_protocol == nullptr) { + return TSI_INVALID_ARGUMENT; + } + grpc_status_code status; + gsec_aead_crypter* crypter = nullptr; + char* error_details = nullptr; + status = gsec_aes_gcm_aead_crypter_create(key, key_size, kAesGcmNonceLength, + kAesGcmTagLength, is_rekey, + &crypter, &error_details); + if (status != GRPC_STATUS_OK) { + gpr_log(GPR_ERROR, "Failed to create AEAD crypter, %s", error_details); + gpr_free(error_details); + return TSI_INTERNAL_ERROR; + } + size_t overflow_limit = is_rekey ? kAltsRecordProtocolRekeyFrameLimit + : kAltsRecordProtocolFrameLimit; + /* Creates alts_grpc_record_protocol with AEAD crypter ownership transferred. + */ + tsi_result result = + is_integrity_only + ? alts_grpc_integrity_only_record_protocol_create( + crypter, overflow_limit, is_client, is_protect, record_protocol) + : alts_grpc_privacy_integrity_record_protocol_create( + crypter, overflow_limit, is_client, is_protect, + record_protocol); + if (result != TSI_OK) { + gsec_aead_crypter_destroy(crypter); + return result; + } + return TSI_OK; +} + +/* --- tsi_zero_copy_grpc_protector methods implementation. --- */ + +static tsi_result alts_zero_copy_grpc_protector_protect( + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + if (self == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { + gpr_log(GPR_ERROR, "Invalid nullptr arguments to zero-copy grpc protect."); + return TSI_INVALID_ARGUMENT; + } + alts_zero_copy_grpc_protector* protector = + reinterpret_cast(self); + /* Calls alts_grpc_record_protocol protect repeatly. */ + while (unprotected_slices->length > protector->max_unprotected_data_size) { + grpc_slice_buffer_move_first(unprotected_slices, + protector->max_unprotected_data_size, + &protector->unprotected_staging_sb); + tsi_result status = alts_grpc_record_protocol_protect( + protector->record_protocol, &protector->unprotected_staging_sb, + protected_slices); + if (status != TSI_OK) { + return status; + } + } + return alts_grpc_record_protocol_protect( + protector->record_protocol, unprotected_slices, protected_slices); +} + +static tsi_result alts_zero_copy_grpc_protector_unprotect( + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + if (self == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { + gpr_log(GPR_ERROR, + "Invalid nullptr arguments to zero-copy grpc unprotect."); + return TSI_INVALID_ARGUMENT; + } + alts_zero_copy_grpc_protector* protector = + reinterpret_cast(self); + grpc_slice_buffer_move_into(protected_slices, &protector->protected_sb); + /* Keep unprotecting each frame if possible. */ + while (protector->protected_sb.length >= kZeroCopyFrameLengthFieldSize) { + if (protector->parsed_frame_size == 0) { + /* We have not parsed frame size yet. Parses frame size. */ + if (!read_frame_size(&protector->protected_sb, + &protector->parsed_frame_size)) { + grpc_slice_buffer_reset_and_unref_internal(&protector->protected_sb); + return TSI_DATA_CORRUPTED; + } + } + if (protector->protected_sb.length < protector->parsed_frame_size) break; + /* At this point, protected_sb contains at least one frame of data. */ + tsi_result status; + if (protector->protected_sb.length == protector->parsed_frame_size) { + status = alts_grpc_record_protocol_unprotect(protector->unrecord_protocol, + &protector->protected_sb, + unprotected_slices); + } else { + grpc_slice_buffer_move_first(&protector->protected_sb, + protector->parsed_frame_size, + &protector->protected_staging_sb); + status = alts_grpc_record_protocol_unprotect( + protector->unrecord_protocol, &protector->protected_staging_sb, + unprotected_slices); + } + protector->parsed_frame_size = 0; + if (status != TSI_OK) { + grpc_slice_buffer_reset_and_unref_internal(&protector->protected_sb); + return status; + } + } + return TSI_OK; +} + +static void alts_zero_copy_grpc_protector_destroy( + tsi_zero_copy_grpc_protector* self) { + if (self == nullptr) { + return; + } + alts_zero_copy_grpc_protector* protector = + reinterpret_cast(self); + alts_grpc_record_protocol_destroy(protector->record_protocol); + alts_grpc_record_protocol_destroy(protector->unrecord_protocol); + grpc_slice_buffer_destroy_internal(&protector->unprotected_staging_sb); + grpc_slice_buffer_destroy_internal(&protector->protected_sb); + grpc_slice_buffer_destroy_internal(&protector->protected_staging_sb); + gpr_free(protector); +} + +static const tsi_zero_copy_grpc_protector_vtable + alts_zero_copy_grpc_protector_vtable = { + alts_zero_copy_grpc_protector_protect, + alts_zero_copy_grpc_protector_unprotect, + alts_zero_copy_grpc_protector_destroy}; + +tsi_result alts_zero_copy_grpc_protector_create( + const uint8_t* key, size_t key_size, bool is_rekey, bool is_client, + bool is_integrity_only, size_t* max_protected_frame_size, + tsi_zero_copy_grpc_protector** protector) { + if (grpc_core::ExecCtx::Get() == nullptr || key == nullptr || + protector == nullptr) { + gpr_log( + GPR_ERROR, + "Invalid nullptr arguments to alts_zero_copy_grpc_protector create."); + return TSI_INVALID_ARGUMENT; + } + /* Creates alts_zero_copy_protector. */ + alts_zero_copy_grpc_protector* impl = + static_cast( + gpr_zalloc(sizeof(alts_zero_copy_grpc_protector))); + /* Creates alts_grpc_record_protocol objects. */ + tsi_result status = create_alts_grpc_record_protocol( + key, key_size, is_rekey, is_client, is_integrity_only, + /*is_protect=*/true, &impl->record_protocol); + if (status == TSI_OK) { + status = create_alts_grpc_record_protocol( + key, key_size, is_rekey, is_client, is_integrity_only, + /*is_protect=*/false, &impl->unrecord_protocol); + if (status == TSI_OK) { + /* Sets maximum frame size. */ + size_t max_protected_frame_size_to_set = kDefaultFrameLength; + if (max_protected_frame_size != nullptr) { + *max_protected_frame_size = + GPR_MIN(*max_protected_frame_size, kMaxFrameLength); + *max_protected_frame_size = + GPR_MAX(*max_protected_frame_size, kMinFrameLength); + max_protected_frame_size_to_set = *max_protected_frame_size; + } + impl->max_protected_frame_size = max_protected_frame_size_to_set; + impl->max_unprotected_data_size = + alts_grpc_record_protocol_max_unprotected_data_size( + impl->record_protocol, max_protected_frame_size_to_set); + GPR_ASSERT(impl->max_unprotected_data_size > 0); + /* Allocates internal slice buffers. */ + grpc_slice_buffer_init(&impl->unprotected_staging_sb); + grpc_slice_buffer_init(&impl->protected_sb); + grpc_slice_buffer_init(&impl->protected_staging_sb); + impl->parsed_frame_size = 0; + impl->base.vtable = &alts_zero_copy_grpc_protector_vtable; + *protector = &impl->base; + return TSI_OK; + } + } + + /* Cleanup if create failed. */ + alts_grpc_record_protocol_destroy(impl->record_protocol); + alts_grpc_record_protocol_destroy(impl->unrecord_protocol); + gpr_free(impl); + return TSI_INTERNAL_ERROR; +} diff --git a/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h new file mode 100644 index 000000000..71e953cfc --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h @@ -0,0 +1,52 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_ZERO_COPY_GRPC_PROTECTOR_H +#define GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_ZERO_COPY_GRPC_PROTECTOR_H + +#include + +#include + +#include "src/core/tsi/transport_security_grpc.h" + +/** + * This method creates an ALTS zero-copy grpc protector. + * + * - key: a symmetric key used to seal/unseal frames. + * - key_size: the size of symmetric key. + * - is_rekey: use rekeying AEAD crypter. + * - is_client: a flag indicating if the protector will be used at client or + * server side. + * - is_integrity_only: a flag indicating if the protector instance will be + * used for integrity-only or privacy-integrity mode. + * - max_protected_frame_size: an in/out parameter indicating max frame size + * to be used by the protector. If it is nullptr, the default frame size will + * be used. Otherwise, the provided frame size will be adjusted (if not + * falling into a valid frame range) and used. + * - protector: a pointer to the zero-copy protector returned from the method. + * + * This method returns TSI_OK on success or a specific error code otherwise. + */ +tsi_result alts_zero_copy_grpc_protector_create( + const uint8_t* key, size_t key_size, bool is_rekey, bool is_client, + bool is_integrity_only, size_t* max_protected_frame_size, + tsi_zero_copy_grpc_protector** protector); + +#endif /* GRPC_CORE_TSI_ALTS_ZERO_COPY_FRAME_PROTECTOR_ALTS_ZERO_COPY_GRPC_PROTECTOR_H \ + */ diff --git a/Sources/CgRPC/src/core/tsi/alts_transport_security.cc b/Sources/CgRPC/src/core/tsi/alts_transport_security.cc new file mode 100644 index 000000000..2fd408103 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts_transport_security.cc @@ -0,0 +1,63 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/alts_transport_security.h" + +#include + +static alts_shared_resource g_alts_resource; + +alts_shared_resource* alts_get_shared_resource(void) { + return &g_alts_resource; +} + +static void grpc_tsi_alts_wait_for_cq_drain() { + gpr_mu_lock(&g_alts_resource.mu); + while (!g_alts_resource.is_cq_drained) { + gpr_cv_wait(&g_alts_resource.cv, &g_alts_resource.mu, + gpr_inf_future(GPR_CLOCK_REALTIME)); + } + gpr_mu_unlock(&g_alts_resource.mu); +} + +void grpc_tsi_alts_signal_for_cq_destroy() { + gpr_mu_lock(&g_alts_resource.mu); + g_alts_resource.is_cq_drained = true; + gpr_cv_signal(&g_alts_resource.cv); + gpr_mu_unlock(&g_alts_resource.mu); +} + +void grpc_tsi_alts_init() { + memset(&g_alts_resource, 0, sizeof(alts_shared_resource)); + gpr_mu_init(&g_alts_resource.mu); + gpr_cv_init(&g_alts_resource.cv); +} + +void grpc_tsi_alts_shutdown() { + if (g_alts_resource.cq != nullptr) { + grpc_completion_queue_shutdown(g_alts_resource.cq); + grpc_tsi_alts_wait_for_cq_drain(); + grpc_completion_queue_destroy(g_alts_resource.cq); + grpc_channel_destroy(g_alts_resource.channel); + g_alts_resource.thread.Join(); + } + gpr_cv_destroy(&g_alts_resource.cv); + gpr_mu_destroy(&g_alts_resource.mu); +} diff --git a/Sources/CgRPC/src/core/tsi/alts_transport_security.h b/Sources/CgRPC/src/core/tsi/alts_transport_security.h new file mode 100644 index 000000000..d6b8e1113 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/alts_transport_security.h @@ -0,0 +1,47 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H +#define GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H + +#include + +#include +#include + +#include "src/core/lib/gprpp/thd.h" + +typedef struct alts_shared_resource { + grpc_core::Thread thread; + grpc_channel* channel; + grpc_completion_queue* cq; + gpr_mu mu; + gpr_cv cv; + bool is_cq_drained; +} alts_shared_resource; + +/* This method returns the address of alts_shared_resource object shared by all + * TSI handshakes. */ +alts_shared_resource* alts_get_shared_resource(void); + +/* This method signals the thread that invokes grpc_tsi_alts_shutdown() to + * continue with destroying the cq as a part of shutdown process. */ + +void grpc_tsi_alts_signal_for_cq_destroy(void); + +#endif /* GRPC_CORE_TSI_ALTS_TRANSPORT_SECURITY_H */ diff --git a/Sources/CgRPC/src/core/tsi/fake_transport_security.c b/Sources/CgRPC/src/core/tsi/fake_transport_security.cc similarity index 69% rename from Sources/CgRPC/src/core/tsi/fake_transport_security.c rename to Sources/CgRPC/src/core/tsi/fake_transport_security.cc index 64043fea0..ad08b50ed 100644 --- a/Sources/CgRPC/src/core/tsi/fake_transport_security.c +++ b/Sources/CgRPC/src/core/tsi/fake_transport_security.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/tsi/fake_transport_security.h" #include @@ -23,8 +25,8 @@ #include #include -#include -#include + +#include "src/core/lib/gpr/useful.h" #include "src/core/lib/slice/slice_internal.h" #include "src/core/tsi/transport_security_grpc.h" @@ -41,7 +43,7 @@ where the size field value is the size of the size field plus the size of the data encoded in little endian on 4 bytes. */ typedef struct { - unsigned char *data; + unsigned char* data; size_t size; size_t allocated_size; size_t offset; @@ -63,7 +65,7 @@ typedef struct { int needs_incoming_message; tsi_fake_frame incoming_frame; tsi_fake_frame outgoing_frame; - unsigned char *outgoing_bytes_buffer; + unsigned char* outgoing_bytes_buffer; size_t outgoing_bytes_buffer_size; tsi_result result; } tsi_fake_handshaker; @@ -85,10 +87,10 @@ typedef struct { /* --- Utils. ---*/ -static const char *tsi_fake_handshake_message_strings[] = { +static const char* tsi_fake_handshake_message_strings[] = { "CLIENT_INIT", "SERVER_INIT", "CLIENT_FINISHED", "SERVER_FINISHED"}; -static const char *tsi_fake_handshake_message_to_string(int msg) { +static const char* tsi_fake_handshake_message_to_string(int msg) { if (msg < 0 || msg >= TSI_FAKE_HANDSHAKE_MESSAGE_MAX) { gpr_log(GPR_ERROR, "Invalid message %d", msg); return "UNKNOWN"; @@ -97,12 +99,11 @@ static const char *tsi_fake_handshake_message_to_string(int msg) { } static tsi_result tsi_fake_handshake_message_from_string( - const char *msg_string, tsi_fake_handshake_message *msg) { - tsi_fake_handshake_message i; - for (i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) { + const char* msg_string, tsi_fake_handshake_message* msg) { + for (int i = 0; i < TSI_FAKE_HANDSHAKE_MESSAGE_MAX; i++) { if (strncmp(msg_string, tsi_fake_handshake_message_strings[i], strlen(tsi_fake_handshake_message_strings[i])) == 0) { - *msg = i; + *msg = static_cast(i); return TSI_OK; } } @@ -110,22 +111,23 @@ static tsi_result tsi_fake_handshake_message_from_string( return TSI_DATA_CORRUPTED; } -static uint32_t load32_little_endian(const unsigned char *buf) { - return ((uint32_t)(buf[0]) | (uint32_t)(buf[1] << 8) | - (uint32_t)(buf[2] << 16) | (uint32_t)(buf[3] << 24)); +static uint32_t load32_little_endian(const unsigned char* buf) { + return (static_cast(buf[0]) | static_cast(buf[1] << 8) | + static_cast(buf[2] << 16) | + static_cast(buf[3] << 24)); } -static void store32_little_endian(uint32_t value, unsigned char *buf) { - buf[3] = (unsigned char)((value >> 24) & 0xFF); - buf[2] = (unsigned char)((value >> 16) & 0xFF); - buf[1] = (unsigned char)((value >> 8) & 0xFF); - buf[0] = (unsigned char)((value)&0xFF); +static void store32_little_endian(uint32_t value, unsigned char* buf) { + buf[3] = static_cast((value >> 24) & 0xFF); + buf[2] = static_cast((value >> 16) & 0xFF); + buf[1] = static_cast((value >> 8) & 0xFF); + buf[0] = static_cast((value)&0xFF); } -static uint32_t read_frame_size(const grpc_slice_buffer *sb) { - GPR_ASSERT(sb != NULL && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE); +static uint32_t read_frame_size(const grpc_slice_buffer* sb) { + GPR_ASSERT(sb != nullptr && sb->length >= TSI_FAKE_FRAME_HEADER_SIZE); uint8_t frame_size_buffer[TSI_FAKE_FRAME_HEADER_SIZE]; - uint8_t *buf = frame_size_buffer; + uint8_t* buf = frame_size_buffer; /* Copies the first 4 bytes to a temporary buffer. */ size_t remaining = TSI_FAKE_FRAME_HEADER_SIZE; for (size_t i = 0; i < sb->count; i++) { @@ -144,7 +146,7 @@ static uint32_t read_frame_size(const grpc_slice_buffer *sb) { return load32_little_endian(frame_size_buffer); } -static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) { +static void tsi_fake_frame_reset(tsi_fake_frame* frame, int needs_draining) { frame->offset = 0; frame->needs_draining = needs_draining; if (!needs_draining) frame->size = 0; @@ -152,12 +154,14 @@ static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) { /* Checks if the frame's allocated size is at least frame->size, and reallocs * more memory if necessary. */ -static void tsi_fake_frame_ensure_size(tsi_fake_frame *frame) { - if (frame->data == NULL) { +static void tsi_fake_frame_ensure_size(tsi_fake_frame* frame) { + if (frame->data == nullptr) { frame->allocated_size = frame->size; - frame->data = gpr_malloc(frame->allocated_size); + frame->data = + static_cast(gpr_malloc(frame->allocated_size)); } else if (frame->size > frame->allocated_size) { - unsigned char *new_data = gpr_realloc(frame->data, frame->size); + unsigned char* new_data = + static_cast(gpr_realloc(frame->data, frame->size)); frame->data = new_data; frame->allocated_size = frame->size; } @@ -166,17 +170,18 @@ static void tsi_fake_frame_ensure_size(tsi_fake_frame *frame) { /* Decodes the serialized fake frame contained in incoming_bytes, and fills * frame with the contents of the decoded frame. * This method should not be called if frame->needs_framing is not 0. */ -static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes, - size_t *incoming_bytes_size, - tsi_fake_frame *frame) { +static tsi_result tsi_fake_frame_decode(const unsigned char* incoming_bytes, + size_t* incoming_bytes_size, + tsi_fake_frame* frame) { size_t available_size = *incoming_bytes_size; size_t to_read_size = 0; - const unsigned char *bytes_cursor = incoming_bytes; + const unsigned char* bytes_cursor = incoming_bytes; if (frame->needs_draining) return TSI_INTERNAL_ERROR; - if (frame->data == NULL) { + if (frame->data == nullptr) { frame->allocated_size = TSI_FAKE_FRAME_INITIAL_ALLOCATED_SIZE; - frame->data = gpr_malloc(frame->allocated_size); + frame->data = + static_cast(gpr_malloc(frame->allocated_size)); } if (frame->offset < TSI_FAKE_FRAME_HEADER_SIZE) { @@ -186,7 +191,7 @@ static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes, memcpy(frame->data + frame->offset, bytes_cursor, available_size); bytes_cursor += available_size; frame->offset += available_size; - *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes); + *incoming_bytes_size = static_cast(bytes_cursor - incoming_bytes); return TSI_INCOMPLETE_DATA; } memcpy(frame->data + frame->offset, bytes_cursor, to_read_size); @@ -202,12 +207,12 @@ static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes, memcpy(frame->data + frame->offset, bytes_cursor, available_size); frame->offset += available_size; bytes_cursor += available_size; - *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes); + *incoming_bytes_size = static_cast(bytes_cursor - incoming_bytes); return TSI_INCOMPLETE_DATA; } memcpy(frame->data + frame->offset, bytes_cursor, to_read_size); bytes_cursor += to_read_size; - *incoming_bytes_size = (size_t)(bytes_cursor - incoming_bytes); + *incoming_bytes_size = static_cast(bytes_cursor - incoming_bytes); tsi_fake_frame_reset(frame, 1 /* needs_draining */); return TSI_OK; } @@ -215,9 +220,9 @@ static tsi_result tsi_fake_frame_decode(const unsigned char *incoming_bytes, /* Encodes a fake frame into its wire format and places the result in * outgoing_bytes. outgoing_bytes_size indicates the size of the encoded frame. * This method should not be called if frame->needs_framing is 0. */ -static tsi_result tsi_fake_frame_encode(unsigned char *outgoing_bytes, - size_t *outgoing_bytes_size, - tsi_fake_frame *frame) { +static tsi_result tsi_fake_frame_encode(unsigned char* outgoing_bytes, + size_t* outgoing_bytes_size, + tsi_fake_frame* frame) { size_t to_write_size = frame->size - frame->offset; if (!frame->needs_draining) return TSI_INTERNAL_ERROR; if (*outgoing_bytes_size < to_write_size) { @@ -233,36 +238,37 @@ static tsi_result tsi_fake_frame_encode(unsigned char *outgoing_bytes, /* Sets the payload of a fake frame to contain the given data blob, where * data_size indicates the size of data. */ -static tsi_result tsi_fake_frame_set_data(unsigned char *data, size_t data_size, - tsi_fake_frame *frame) { +static tsi_result tsi_fake_frame_set_data(unsigned char* data, size_t data_size, + tsi_fake_frame* frame) { frame->offset = 0; frame->size = data_size + TSI_FAKE_FRAME_HEADER_SIZE; tsi_fake_frame_ensure_size(frame); - store32_little_endian((uint32_t)frame->size, frame->data); + store32_little_endian(static_cast(frame->size), frame->data); memcpy(frame->data + TSI_FAKE_FRAME_HEADER_SIZE, data, data_size); tsi_fake_frame_reset(frame, 1 /* needs draining */); return TSI_OK; } /* Destroys the contents of a fake frame. */ -static void tsi_fake_frame_destruct(tsi_fake_frame *frame) { - if (frame->data != NULL) gpr_free(frame->data); +static void tsi_fake_frame_destruct(tsi_fake_frame* frame) { + if (frame->data != nullptr) gpr_free(frame->data); } /* --- tsi_frame_protector methods implementation. ---*/ -static tsi_result fake_protector_protect(tsi_frame_protector *self, - const unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size) { +static tsi_result fake_protector_protect(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size) { tsi_result result = TSI_OK; - tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self; + tsi_fake_frame_protector* impl = + reinterpret_cast(self); unsigned char frame_header[TSI_FAKE_FRAME_HEADER_SIZE]; - tsi_fake_frame *frame = &impl->protect_frame; + tsi_fake_frame* frame = &impl->protect_frame; size_t saved_output_size = *protected_output_frames_size; size_t drained_size = 0; - size_t *num_bytes_written = protected_output_frames_size; + size_t* num_bytes_written = protected_output_frames_size; *num_bytes_written = 0; /* Try to drain first. */ @@ -286,7 +292,8 @@ static tsi_result fake_protector_protect(tsi_frame_protector *self, if (frame->size == 0) { /* New frame, create a header. */ size_t written_in_frame_size = 0; - store32_little_endian((uint32_t)impl->max_frame_size, frame_header); + store32_little_endian(static_cast(impl->max_frame_size), + frame_header); written_in_frame_size = TSI_FAKE_FRAME_HEADER_SIZE; result = tsi_fake_frame_decode(frame_header, &written_in_frame_size, frame); if (result != TSI_INCOMPLETE_DATA) { @@ -313,17 +320,18 @@ static tsi_result fake_protector_protect(tsi_frame_protector *self, } static tsi_result fake_protector_protect_flush( - tsi_frame_protector *self, unsigned char *protected_output_frames, - size_t *protected_output_frames_size, size_t *still_pending_size) { + tsi_frame_protector* self, unsigned char* protected_output_frames, + size_t* protected_output_frames_size, size_t* still_pending_size) { tsi_result result = TSI_OK; - tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self; - tsi_fake_frame *frame = &impl->protect_frame; + tsi_fake_frame_protector* impl = + reinterpret_cast(self); + tsi_fake_frame* frame = &impl->protect_frame; if (!frame->needs_draining) { /* Create a short frame. */ frame->size = frame->offset; frame->offset = 0; frame->needs_draining = 1; - store32_little_endian((uint32_t)frame->size, + store32_little_endian(static_cast(frame->size), frame->data); /* Overwrite header. */ } result = tsi_fake_frame_encode(protected_output_frames, @@ -334,15 +342,16 @@ static tsi_result fake_protector_protect_flush( } static tsi_result fake_protector_unprotect( - tsi_frame_protector *self, const unsigned char *protected_frames_bytes, - size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size) { + tsi_frame_protector* self, const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size) { tsi_result result = TSI_OK; - tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self; - tsi_fake_frame *frame = &impl->unprotect_frame; + tsi_fake_frame_protector* impl = + reinterpret_cast(self); + tsi_fake_frame* frame = &impl->unprotect_frame; size_t saved_output_size = *unprotected_bytes_size; size_t drained_size = 0; - size_t *num_bytes_written = unprotected_bytes_size; + size_t* num_bytes_written = unprotected_bytes_size; *num_bytes_written = 0; /* Try to drain first. */ @@ -382,36 +391,40 @@ static tsi_result fake_protector_unprotect( return result; } -static void fake_protector_destroy(tsi_frame_protector *self) { - tsi_fake_frame_protector *impl = (tsi_fake_frame_protector *)self; +static void fake_protector_destroy(tsi_frame_protector* self) { + tsi_fake_frame_protector* impl = + reinterpret_cast(self); tsi_fake_frame_destruct(&impl->protect_frame); tsi_fake_frame_destruct(&impl->unprotect_frame); gpr_free(self); } static const tsi_frame_protector_vtable frame_protector_vtable = { - fake_protector_protect, fake_protector_protect_flush, - fake_protector_unprotect, fake_protector_destroy, + fake_protector_protect, + fake_protector_protect_flush, + fake_protector_unprotect, + fake_protector_destroy, }; /* --- tsi_zero_copy_grpc_protector methods implementation. ---*/ static tsi_result fake_zero_copy_grpc_protector_protect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *unprotected_slices, - grpc_slice_buffer *protected_slices) { - if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + if (self == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { return TSI_INVALID_ARGUMENT; } - tsi_fake_zero_copy_grpc_protector *impl = - (tsi_fake_zero_copy_grpc_protector *)self; + tsi_fake_zero_copy_grpc_protector* impl = + reinterpret_cast(self); /* Protects each frame. */ while (unprotected_slices->length > 0) { size_t frame_length = GPR_MIN(impl->max_frame_size, unprotected_slices->length + TSI_FAKE_FRAME_HEADER_SIZE); grpc_slice slice = GRPC_SLICE_MALLOC(TSI_FAKE_FRAME_HEADER_SIZE); - store32_little_endian((uint32_t)frame_length, GRPC_SLICE_START_PTR(slice)); + store32_little_endian(static_cast(frame_length), + GRPC_SLICE_START_PTR(slice)); grpc_slice_buffer_add(protected_slices, slice); size_t data_length = frame_length - TSI_FAKE_FRAME_HEADER_SIZE; grpc_slice_buffer_move_first(unprotected_slices, data_length, @@ -421,14 +434,14 @@ static tsi_result fake_zero_copy_grpc_protector_protect( } static tsi_result fake_zero_copy_grpc_protector_unprotect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *protected_slices, - grpc_slice_buffer *unprotected_slices) { - if (self == NULL || unprotected_slices == NULL || protected_slices == NULL) { + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + if (self == nullptr || unprotected_slices == nullptr || + protected_slices == nullptr) { return TSI_INVALID_ARGUMENT; } - tsi_fake_zero_copy_grpc_protector *impl = - (tsi_fake_zero_copy_grpc_protector *)self; + tsi_fake_zero_copy_grpc_protector* impl = + reinterpret_cast(self); grpc_slice_buffer_move_into(protected_slices, &impl->protected_sb); /* Unprotect each frame, if we get a full frame. */ while (impl->protected_sb.length >= TSI_FAKE_FRAME_HEADER_SIZE) { @@ -450,18 +463,18 @@ static tsi_result fake_zero_copy_grpc_protector_unprotect( impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE, unprotected_slices); impl->parsed_frame_size = 0; - grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &impl->header_sb); + grpc_slice_buffer_reset_and_unref_internal(&impl->header_sb); } return TSI_OK; } static void fake_zero_copy_grpc_protector_destroy( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self) { - if (self == NULL) return; - tsi_fake_zero_copy_grpc_protector *impl = - (tsi_fake_zero_copy_grpc_protector *)self; - grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb); - grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb); + tsi_zero_copy_grpc_protector* self) { + if (self == nullptr) return; + tsi_fake_zero_copy_grpc_protector* impl = + reinterpret_cast(self); + grpc_slice_buffer_destroy_internal(&impl->header_sb); + grpc_slice_buffer_destroy_internal(&impl->protected_sb); gpr_free(impl); } @@ -476,12 +489,12 @@ static const tsi_zero_copy_grpc_protector_vtable typedef struct { tsi_handshaker_result base; - unsigned char *unused_bytes; + unsigned char* unused_bytes; size_t unused_bytes_size; } fake_handshaker_result; static tsi_result fake_handshaker_result_extract_peer( - const tsi_handshaker_result *self, tsi_peer *peer) { + const tsi_handshaker_result* self, tsi_peer* peer) { /* Construct a tsi_peer with 1 property: certificate type. */ tsi_result result = tsi_construct_peer(1, peer); if (result != TSI_OK) return result; @@ -493,32 +506,32 @@ static tsi_result fake_handshaker_result_extract_peer( } static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector( - void *exec_ctx, const tsi_handshaker_result *self, - size_t *max_output_protected_frame_size, - tsi_zero_copy_grpc_protector **protector) { + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_zero_copy_grpc_protector** protector) { *protector = tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size); return TSI_OK; } static tsi_result fake_handshaker_result_create_frame_protector( - const tsi_handshaker_result *self, size_t *max_output_protected_frame_size, - tsi_frame_protector **protector) { + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector) { *protector = tsi_create_fake_frame_protector(max_output_protected_frame_size); return TSI_OK; } static tsi_result fake_handshaker_result_get_unused_bytes( - const tsi_handshaker_result *self, const unsigned char **bytes, - size_t *bytes_size) { - fake_handshaker_result *result = (fake_handshaker_result *)self; + const tsi_handshaker_result* self, const unsigned char** bytes, + size_t* bytes_size) { + fake_handshaker_result* result = (fake_handshaker_result*)self; *bytes_size = result->unused_bytes_size; *bytes = result->unused_bytes; return TSI_OK; } -static void fake_handshaker_result_destroy(tsi_handshaker_result *self) { - fake_handshaker_result *result = (fake_handshaker_result *)self; +static void fake_handshaker_result_destroy(tsi_handshaker_result* self) { + fake_handshaker_result* result = + reinterpret_cast(self); gpr_free(result->unused_bytes); gpr_free(self); } @@ -532,16 +545,18 @@ static const tsi_handshaker_result_vtable handshaker_result_vtable = { }; static tsi_result fake_handshaker_result_create( - const unsigned char *unused_bytes, size_t unused_bytes_size, - tsi_handshaker_result **handshaker_result) { - if ((unused_bytes_size > 0 && unused_bytes == NULL) || - handshaker_result == NULL) { + const unsigned char* unused_bytes, size_t unused_bytes_size, + tsi_handshaker_result** handshaker_result) { + if ((unused_bytes_size > 0 && unused_bytes == nullptr) || + handshaker_result == nullptr) { return TSI_INVALID_ARGUMENT; } - fake_handshaker_result *result = gpr_zalloc(sizeof(*result)); + fake_handshaker_result* result = + static_cast(gpr_zalloc(sizeof(*result))); result->base.vtable = &handshaker_result_vtable; if (unused_bytes_size > 0) { - result->unused_bytes = gpr_malloc(unused_bytes_size); + result->unused_bytes = + static_cast(gpr_malloc(unused_bytes_size)); memcpy(result->unused_bytes, unused_bytes, unused_bytes_size); } result->unused_bytes_size = unused_bytes_size; @@ -552,8 +567,8 @@ static tsi_result fake_handshaker_result_create( /* --- tsi_handshaker methods implementation. ---*/ static tsi_result fake_handshaker_get_bytes_to_send_to_peer( - tsi_handshaker *self, unsigned char *bytes, size_t *bytes_size) { - tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self; + tsi_handshaker* self, unsigned char* bytes, size_t* bytes_size) { + tsi_fake_handshaker* impl = reinterpret_cast(self); tsi_result result = TSI_OK; if (impl->needs_incoming_message || impl->result == TSI_OK) { *bytes_size = 0; @@ -561,16 +576,16 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer( } if (!impl->outgoing_frame.needs_draining) { tsi_fake_handshake_message next_message_to_send = - impl->next_message_to_send + 2; - const char *msg_string = + static_cast(impl->next_message_to_send + 2); + const char* msg_string = tsi_fake_handshake_message_to_string(impl->next_message_to_send); - result = tsi_fake_frame_set_data((unsigned char *)msg_string, + result = tsi_fake_frame_set_data((unsigned char*)msg_string, strlen(msg_string), &impl->outgoing_frame); if (result != TSI_OK) return result; if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) { next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX; } - if (GRPC_TRACER_ON(tsi_tracing_enabled)) { + if (tsi_tracing_enabled.enabled()) { gpr_log(GPR_INFO, "%s prepared %s.", impl->is_client ? "Client" : "Server", tsi_fake_handshake_message_to_string(impl->next_message_to_send)); @@ -582,7 +597,7 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer( if (!impl->is_client && impl->next_message_to_send == TSI_FAKE_HANDSHAKE_MESSAGE_MAX) { /* We're done. */ - if (GRPC_TRACER_ON(tsi_tracing_enabled)) { + if (tsi_tracing_enabled.enabled()) { gpr_log(GPR_INFO, "Server is done."); } impl->result = TSI_OK; @@ -593,10 +608,11 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer( } static tsi_result fake_handshaker_process_bytes_from_peer( - tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) { + tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) { tsi_result result = TSI_OK; - tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self; - tsi_fake_handshake_message expected_msg = impl->next_message_to_send - 1; + tsi_fake_handshaker* impl = reinterpret_cast(self); + tsi_fake_handshake_message expected_msg = + static_cast(impl->next_message_to_send - 1); tsi_fake_handshake_message received_msg; if (!impl->needs_incoming_message || impl->result == TSI_OK) { @@ -608,7 +624,8 @@ static tsi_result fake_handshaker_process_bytes_from_peer( /* We now have a complete frame. */ result = tsi_fake_handshake_message_from_string( - (const char *)impl->incoming_frame.data + TSI_FAKE_FRAME_HEADER_SIZE, + reinterpret_cast(impl->incoming_frame.data) + + TSI_FAKE_FRAME_HEADER_SIZE, &received_msg); if (result != TSI_OK) { impl->result = result; @@ -619,7 +636,7 @@ static tsi_result fake_handshaker_process_bytes_from_peer( tsi_fake_handshake_message_to_string(received_msg), tsi_fake_handshake_message_to_string(expected_msg)); } - if (GRPC_TRACER_ON(tsi_tracing_enabled)) { + if (tsi_tracing_enabled.enabled()) { gpr_log(GPR_INFO, "%s received %s.", impl->is_client ? "Client" : "Server", tsi_fake_handshake_message_to_string(received_msg)); } @@ -627,7 +644,7 @@ static tsi_result fake_handshaker_process_bytes_from_peer( impl->needs_incoming_message = 0; if (impl->next_message_to_send == TSI_FAKE_HANDSHAKE_MESSAGE_MAX) { /* We're done. */ - if (GRPC_TRACER_ON(tsi_tracing_enabled)) { + if (tsi_tracing_enabled.enabled()) { gpr_log(GPR_INFO, "%s is done.", impl->is_client ? "Client" : "Server"); } impl->result = TSI_OK; @@ -635,13 +652,13 @@ static tsi_result fake_handshaker_process_bytes_from_peer( return TSI_OK; } -static tsi_result fake_handshaker_get_result(tsi_handshaker *self) { - tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self; +static tsi_result fake_handshaker_get_result(tsi_handshaker* self) { + tsi_fake_handshaker* impl = reinterpret_cast(self); return impl->result; } -static void fake_handshaker_destroy(tsi_handshaker *self) { - tsi_fake_handshaker *impl = (tsi_fake_handshaker *)self; +static void fake_handshaker_destroy(tsi_handshaker* self) { + tsi_fake_handshaker* impl = reinterpret_cast(self); tsi_fake_frame_destruct(&impl->incoming_frame); tsi_fake_frame_destruct(&impl->outgoing_frame); gpr_free(impl->outgoing_bytes_buffer); @@ -649,17 +666,18 @@ static void fake_handshaker_destroy(tsi_handshaker *self) { } static tsi_result fake_handshaker_next( - tsi_handshaker *self, const unsigned char *received_bytes, - size_t received_bytes_size, const unsigned char **bytes_to_send, - size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result, - tsi_handshaker_on_next_done_cb cb, void *user_data) { + tsi_handshaker* self, const unsigned char* received_bytes, + size_t received_bytes_size, const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result, + tsi_handshaker_on_next_done_cb cb, void* user_data) { /* Sanity check the arguments. */ - if ((received_bytes_size > 0 && received_bytes == NULL) || - bytes_to_send == NULL || bytes_to_send_size == NULL || - handshaker_result == NULL) { + if ((received_bytes_size > 0 && received_bytes == nullptr) || + bytes_to_send == nullptr || bytes_to_send_size == nullptr || + handshaker_result == nullptr) { return TSI_INVALID_ARGUMENT; } - tsi_fake_handshaker *handshaker = (tsi_fake_handshaker *)self; + tsi_fake_handshaker* handshaker = + reinterpret_cast(self); tsi_result result = TSI_OK; /* Decode and process a handshake frame from the peer. */ @@ -680,9 +698,9 @@ static tsi_result fake_handshaker_next( offset += sent_bytes_size; if (result == TSI_INCOMPLETE_DATA) { handshaker->outgoing_bytes_buffer_size *= 2; - handshaker->outgoing_bytes_buffer = + handshaker->outgoing_bytes_buffer = static_cast( gpr_realloc(handshaker->outgoing_bytes_buffer, - handshaker->outgoing_bytes_buffer_size); + handshaker->outgoing_bytes_buffer_size)); } } while (result == TSI_INCOMPLETE_DATA); if (result != TSI_OK) return result; @@ -691,10 +709,10 @@ static tsi_result fake_handshaker_next( /* Check if the handshake was completed. */ if (fake_handshaker_get_result(self) == TSI_HANDSHAKE_IN_PROGRESS) { - *handshaker_result = NULL; + *handshaker_result = nullptr; } else { /* Calculate the unused bytes. */ - const unsigned char *unused_bytes = NULL; + const unsigned char* unused_bytes = nullptr; size_t unused_bytes_size = received_bytes_size - consumed_bytes_size; if (unused_bytes_size > 0) { unused_bytes = received_bytes + consumed_bytes_size; @@ -713,23 +731,25 @@ static tsi_result fake_handshaker_next( } static const tsi_handshaker_vtable handshaker_vtable = { - NULL, /* get_bytes_to_send_to_peer -- deprecated */ - NULL, /* process_bytes_from_peer -- deprecated */ - NULL, /* get_result -- deprecated */ - NULL, /* extract_peer -- deprecated */ - NULL, /* create_frame_protector -- deprecated */ + nullptr, /* get_bytes_to_send_to_peer -- deprecated */ + nullptr, /* process_bytes_from_peer -- deprecated */ + nullptr, /* get_result -- deprecated */ + nullptr, /* extract_peer -- deprecated */ + nullptr, /* create_frame_protector -- deprecated */ fake_handshaker_destroy, fake_handshaker_next, }; -tsi_handshaker *tsi_create_fake_handshaker(int is_client) { - tsi_fake_handshaker *impl = gpr_zalloc(sizeof(*impl)); +tsi_handshaker* tsi_create_fake_handshaker(int is_client) { + tsi_fake_handshaker* impl = + static_cast(gpr_zalloc(sizeof(*impl))); impl->base.vtable = &handshaker_vtable; impl->is_client = is_client; impl->result = TSI_HANDSHAKE_IN_PROGRESS; impl->outgoing_bytes_buffer_size = TSI_FAKE_HANDSHAKER_OUTGOING_BUFFER_INITIAL_SIZE; - impl->outgoing_bytes_buffer = gpr_malloc(impl->outgoing_bytes_buffer_size); + impl->outgoing_bytes_buffer = + static_cast(gpr_malloc(impl->outgoing_bytes_buffer_size)); if (is_client) { impl->needs_incoming_message = 0; impl->next_message_to_send = TSI_FAKE_CLIENT_INIT; @@ -740,22 +760,25 @@ tsi_handshaker *tsi_create_fake_handshaker(int is_client) { return &impl->base; } -tsi_frame_protector *tsi_create_fake_frame_protector( - size_t *max_protected_frame_size) { - tsi_fake_frame_protector *impl = gpr_zalloc(sizeof(*impl)); - impl->max_frame_size = (max_protected_frame_size == NULL) +tsi_frame_protector* tsi_create_fake_frame_protector( + size_t* max_protected_frame_size) { + tsi_fake_frame_protector* impl = + static_cast(gpr_zalloc(sizeof(*impl))); + impl->max_frame_size = (max_protected_frame_size == nullptr) ? TSI_FAKE_DEFAULT_FRAME_SIZE : *max_protected_frame_size; impl->base.vtable = &frame_protector_vtable; return &impl->base; } -tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector( - size_t *max_protected_frame_size) { - tsi_fake_zero_copy_grpc_protector *impl = gpr_zalloc(sizeof(*impl)); +tsi_zero_copy_grpc_protector* tsi_create_fake_zero_copy_grpc_protector( + size_t* max_protected_frame_size) { + tsi_fake_zero_copy_grpc_protector* impl = + static_cast( + gpr_zalloc(sizeof(*impl))); grpc_slice_buffer_init(&impl->header_sb); grpc_slice_buffer_init(&impl->protected_sb); - impl->max_frame_size = (max_protected_frame_size == NULL) + impl->max_frame_size = (max_protected_frame_size == nullptr) ? TSI_FAKE_DEFAULT_FRAME_SIZE : *max_protected_frame_size; impl->parsed_frame_size = 0; diff --git a/Sources/CgRPC/src/core/tsi/fake_transport_security.h b/Sources/CgRPC/src/core/tsi/fake_transport_security.h index 6159708a8..37791827e 100644 --- a/Sources/CgRPC/src/core/tsi/fake_transport_security.h +++ b/Sources/CgRPC/src/core/tsi/fake_transport_security.h @@ -19,11 +19,9 @@ #ifndef GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H #define GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H -#include "src/core/tsi/transport_security_interface.h" +#include -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/tsi/transport_security_interface.h" /* Value for the TSI_CERTIFICATE_TYPE_PEER_PROPERTY property for FAKE certs. */ #define TSI_FAKE_CERTIFICATE_TYPE "FAKE" @@ -33,19 +31,15 @@ extern "C" { No cryptography is performed in these objects. They just simulate handshake messages going back and forth for the handshaker and do some framing on cleartext data for the protector. */ -tsi_handshaker *tsi_create_fake_handshaker(int is_client); +tsi_handshaker* tsi_create_fake_handshaker(int is_client); /* Creates a protector directly without going through the handshake phase. */ -tsi_frame_protector *tsi_create_fake_frame_protector( - size_t *max_protected_frame_size); +tsi_frame_protector* tsi_create_fake_frame_protector( + size_t* max_protected_frame_size); /* Creates a zero-copy protector directly without going through the handshake * phase. */ -tsi_zero_copy_grpc_protector *tsi_create_fake_zero_copy_grpc_protector( - size_t *max_protected_frame_size); - -#ifdef __cplusplus -} -#endif +tsi_zero_copy_grpc_protector* tsi_create_fake_zero_copy_grpc_protector( + size_t* max_protected_frame_size); #endif /* GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H */ diff --git a/Sources/CgRPC/src/core/tsi/gts_transport_security.c b/Sources/CgRPC/src/core/tsi/gts_transport_security.c deleted file mode 100644 index e2ac685e4..000000000 --- a/Sources/CgRPC/src/core/tsi/gts_transport_security.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/tsi/gts_transport_security.h" - -#include - -static gts_shared_resource g_gts_resource; - -gts_shared_resource *gts_get_shared_resource(void) { return &g_gts_resource; } - -void grpc_tsi_gts_init() { - memset(&g_gts_resource, 0, sizeof(gts_shared_resource)); - gpr_mu_init(&g_gts_resource.mu); -} - -void grpc_tsi_gts_shutdown() { - gpr_mu_destroy(&g_gts_resource.mu); - if (g_gts_resource.cq == NULL) { - return; - } - grpc_completion_queue_destroy(g_gts_resource.cq); - grpc_channel_destroy(g_gts_resource.channel); - gpr_thd_join(g_gts_resource.thread_id); -} diff --git a/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session.h b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session.h new file mode 100644 index 000000000..115221ec0 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session.h @@ -0,0 +1,73 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_H +#define GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_H + +#include + +#include + +extern "C" { +#include +} + +#include "src/core/lib/gprpp/ref_counted.h" + +// The main purpose of code here is to provide means to cache SSL sessions +// in a way that they can be shared between connections. +// +// SSL_SESSION stands for single instance of session and is not generally safe +// to share between SSL contexts with different lifetimes. It happens because +// not all SSL implementations guarantee immutability of SSL_SESSION object. +// See SSL_SESSION documentation in BoringSSL and OpenSSL for more details. + +namespace tsi { + +struct SslSessionDeleter { + void operator()(SSL_SESSION* session) { SSL_SESSION_free(session); } +}; + +typedef std::unique_ptr SslSessionPtr; + +/// SslCachedSession is an immutable thread-safe storage for single session +/// representation. It provides means to share SSL session data (e.g. TLS +/// ticket) between encrypted connections regardless of SSL context lifetime. +class SslCachedSession { + public: + // Not copyable nor movable. + SslCachedSession(const SslCachedSession&) = delete; + SslCachedSession& operator=(const SslCachedSession&) = delete; + + /// Create single cached instance of \a session. + static grpc_core::UniquePtr Create(SslSessionPtr session); + + virtual ~SslCachedSession() = default; + + /// Returns a copy of previously cached session. + virtual SslSessionPtr CopySession() const GRPC_ABSTRACT; + + GRPC_ABSTRACT_BASE_CLASS + + protected: + SslCachedSession() = default; +}; + +} // namespace tsi + +#endif /* GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_H */ diff --git a/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc new file mode 100644 index 000000000..0da5a9616 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/ssl/session_cache/ssl_session.h" + +#ifdef OPENSSL_IS_BORINGSSL + +// BoringSSL allows SSL_SESSION to outlive SSL and SSL_CTX objects which are +// re-created by gRPC on every certificate rotation or subchannel creation. +// BoringSSL guarantees that SSL_SESSION is immutable so it's safe to share +// the same original session object between different threads and connections. + +namespace tsi { +namespace { + +class BoringSslCachedSession : public SslCachedSession { + public: + BoringSslCachedSession(SslSessionPtr session) + : session_(std::move(session)) {} + + SslSessionPtr CopySession() const override { + // SslSessionPtr will dereference on destruction. + SSL_SESSION_up_ref(session_.get()); + return SslSessionPtr(session_.get()); + } + + private: + SslSessionPtr session_; +}; + +} // namespace + +grpc_core::UniquePtr SslCachedSession::Create( + SslSessionPtr session) { + return grpc_core::UniquePtr( + grpc_core::New(std::move(session))); +} + +} // namespace tsi + +#endif /* OPENSSL_IS_BORINGSSL */ diff --git a/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.cc b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.cc new file mode 100644 index 000000000..fe4f83a13 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.cc @@ -0,0 +1,211 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h" + +#include "src/core/tsi/ssl/session_cache/ssl_session.h" + +#include +#include + +namespace tsi { + +static void cache_key_avl_destroy(void* key, void* unused) {} + +static void* cache_key_avl_copy(void* key, void* unused) { return key; } + +static long cache_key_avl_compare(void* key1, void* key2, void* unused) { + return grpc_slice_cmp(*static_cast(key1), + *static_cast(key2)); +} + +static void cache_value_avl_destroy(void* value, void* unused) {} + +static void* cache_value_avl_copy(void* value, void* unused) { return value; } + +// AVL only stores pointers, ownership belonges to the linked list. +static const grpc_avl_vtable cache_avl_vtable = { + cache_key_avl_destroy, cache_key_avl_copy, cache_key_avl_compare, + cache_value_avl_destroy, cache_value_avl_copy, +}; + +/// Node for single cached session. +class SslSessionLRUCache::Node { + public: + Node(const grpc_slice& key, SslSessionPtr session) : key_(key) { + SetSession(std::move(session)); + } + + ~Node() { grpc_slice_unref(key_); } + + // Not copyable nor movable. + Node(const Node&) = delete; + Node& operator=(const Node&) = delete; + + void* AvlKey() { return &key_; } + + /// Returns a copy of the node's cache session. + SslSessionPtr CopySession() const { return session_->CopySession(); } + + /// Set the \a session (which is moved) for the node. + void SetSession(SslSessionPtr session) { + session_ = SslCachedSession::Create(std::move(session)); + } + + private: + friend class SslSessionLRUCache; + + grpc_slice key_; + grpc_core::UniquePtr session_; + + Node* next_ = nullptr; + Node* prev_ = nullptr; +}; + +SslSessionLRUCache::SslSessionLRUCache(size_t capacity) : capacity_(capacity) { + GPR_ASSERT(capacity > 0); + gpr_mu_init(&lock_); + entry_by_key_ = grpc_avl_create(&cache_avl_vtable); +} + +SslSessionLRUCache::~SslSessionLRUCache() { + Node* node = use_order_list_head_; + while (node) { + Node* next = node->next_; + grpc_core::Delete(node); + node = next; + } + grpc_avl_unref(entry_by_key_, nullptr); + gpr_mu_destroy(&lock_); +} + +size_t SslSessionLRUCache::Size() { + grpc_core::mu_guard guard(&lock_); + return use_order_list_size_; +} + +SslSessionLRUCache::Node* SslSessionLRUCache::FindLocked( + const grpc_slice& key) { + void* value = + grpc_avl_get(entry_by_key_, const_cast(&key), nullptr); + if (value == nullptr) { + return nullptr; + } + Node* node = static_cast(value); + // Move to the beginning. + Remove(node); + PushFront(node); + AssertInvariants(); + return node; +} + +void SslSessionLRUCache::Put(const char* key, SslSessionPtr session) { + grpc_core::mu_guard guard(&lock_); + Node* node = FindLocked(grpc_slice_from_static_string(key)); + if (node != nullptr) { + node->SetSession(std::move(session)); + return; + } + grpc_slice key_slice = grpc_slice_from_copied_string(key); + node = grpc_core::New(key_slice, std::move(session)); + PushFront(node); + entry_by_key_ = grpc_avl_add(entry_by_key_, node->AvlKey(), node, nullptr); + AssertInvariants(); + if (use_order_list_size_ > capacity_) { + GPR_ASSERT(use_order_list_tail_); + node = use_order_list_tail_; + Remove(node); + // Order matters, key is destroyed after deleting node. + entry_by_key_ = grpc_avl_remove(entry_by_key_, node->AvlKey(), nullptr); + grpc_core::Delete(node); + AssertInvariants(); + } +} + +SslSessionPtr SslSessionLRUCache::Get(const char* key) { + grpc_core::mu_guard guard(&lock_); + // Key is only used for lookups. + grpc_slice key_slice = grpc_slice_from_static_string(key); + Node* node = FindLocked(key_slice); + if (node == nullptr) { + return nullptr; + } + return node->CopySession(); +} + +void SslSessionLRUCache::Remove(SslSessionLRUCache::Node* node) { + if (node->prev_ == nullptr) { + use_order_list_head_ = node->next_; + } else { + node->prev_->next_ = node->next_; + } + if (node->next_ == nullptr) { + use_order_list_tail_ = node->prev_; + } else { + node->next_->prev_ = node->prev_; + } + GPR_ASSERT(use_order_list_size_ >= 1); + use_order_list_size_--; +} + +void SslSessionLRUCache::PushFront(SslSessionLRUCache::Node* node) { + if (use_order_list_head_ == nullptr) { + use_order_list_head_ = node; + use_order_list_tail_ = node; + node->next_ = nullptr; + node->prev_ = nullptr; + } else { + node->next_ = use_order_list_head_; + node->next_->prev_ = node; + use_order_list_head_ = node; + node->prev_ = nullptr; + } + use_order_list_size_++; +} + +#ifndef NDEBUG +static size_t calculate_tree_size(grpc_avl_node* node) { + if (node == nullptr) { + return 0; + } + return 1 + calculate_tree_size(node->left) + calculate_tree_size(node->right); +} + +void SslSessionLRUCache::AssertInvariants() { + size_t size = 0; + Node* prev = nullptr; + Node* current = use_order_list_head_; + while (current != nullptr) { + size++; + GPR_ASSERT(current->prev_ == prev); + void* node = grpc_avl_get(entry_by_key_, current->AvlKey(), nullptr); + GPR_ASSERT(node == current); + prev = current; + current = current->next_; + } + GPR_ASSERT(prev == use_order_list_tail_); + GPR_ASSERT(size == use_order_list_size_); + GPR_ASSERT(calculate_tree_size(entry_by_key_.root) == use_order_list_size_); +} +#else +void SslSessionLRUCache::AssertInvariants() {} +#endif + +} // namespace tsi diff --git a/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.h b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.h new file mode 100644 index 000000000..a90cca1a2 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_cache.h @@ -0,0 +1,97 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_CACHE_H +#define GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_CACHE_H + +#include + +#include +#include + +extern "C" { +#include +} + +#include "src/core/lib/avl/avl.h" +#include "src/core/lib/gprpp/memory.h" +#include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/tsi/ssl/session_cache/ssl_session.h" + +/// Cache for SSL sessions for sessions resumption. +/// +/// Older sessions may be evicted from the cache using LRU policy if capacity +/// limit is hit. All sessions are associated with some key, usually server +/// name. Note that servers are required to share session ticket encryption keys +/// in order for cache to be effective. +/// +/// This class is thread safe. + +namespace tsi { + +class SslSessionLRUCache : public grpc_core::RefCounted { + public: + /// Create new LRU cache with the given capacity. + static grpc_core::RefCountedPtr Create(size_t capacity) { + return grpc_core::MakeRefCounted(capacity); + } + + // Not copyable nor movable. + SslSessionLRUCache(const SslSessionLRUCache&) = delete; + SslSessionLRUCache& operator=(const SslSessionLRUCache&) = delete; + + /// Returns current number of sessions in the cache. + size_t Size(); + /// Add \a session in the cache using \a key. This operation may discard older + /// sessions. + void Put(const char* key, SslSessionPtr session); + /// Returns the session from the cache associated with \a key or null if not + /// found. + SslSessionPtr Get(const char* key); + + private: + // So New() can call our private ctor. + template + friend T* grpc_core::New(Args&&... args); + + // So Delete() can call our private dtor. + template + friend void grpc_core::Delete(T*); + + class Node; + + explicit SslSessionLRUCache(size_t capacity); + ~SslSessionLRUCache(); + + Node* FindLocked(const grpc_slice& key); + void Remove(Node* node); + void PushFront(Node* node); + void AssertInvariants(); + + gpr_mu lock_; + size_t capacity_; + + Node* use_order_list_head_ = nullptr; + Node* use_order_list_tail_ = nullptr; + size_t use_order_list_size_ = 0; + grpc_avl entry_by_key_; +}; + +} // namespace tsi + +#endif /* GRPC_CORE_TSI_SSL_SESSION_CACHE_SSL_SESSION_CACHE_H */ diff --git a/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc new file mode 100644 index 000000000..61c036c7e --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc @@ -0,0 +1,76 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/ssl/session_cache/ssl_session.h" + +#include + +#ifndef OPENSSL_IS_BORINGSSL + +// OpenSSL invalidates SSL_SESSION on SSL destruction making it pointless +// to cache sessions. The workaround is to serialize (relatively expensive) +// session into binary blob and re-create it from blob on every handshake. +// Note that it's safe to keep serialized session outside of SSL lifetime +// as openssl performs all necessary validation while attempting to use a +// session and creates a new one if something is wrong (e.g. server changed +// set of allowed codecs). + +namespace tsi { +namespace { + +class OpenSslCachedSession : public SslCachedSession { + public: + OpenSslCachedSession(SslSessionPtr session) { + int size = i2d_SSL_SESSION(session.get(), nullptr); + GPR_ASSERT(size > 0); + grpc_slice slice = grpc_slice_malloc(size_t(size)); + unsigned char* start = GRPC_SLICE_START_PTR(slice); + int second_size = i2d_SSL_SESSION(session.get(), &start); + GPR_ASSERT(size == second_size); + serialized_session_ = slice; + } + + virtual ~OpenSslCachedSession() { grpc_slice_unref(serialized_session_); } + + SslSessionPtr CopySession() const override { + const unsigned char* data = GRPC_SLICE_START_PTR(serialized_session_); + size_t length = GRPC_SLICE_LENGTH(serialized_session_); + SSL_SESSION* session = d2i_SSL_SESSION(nullptr, &data, length); + if (session == nullptr) { + return SslSessionPtr(); + } + return SslSessionPtr(session); + } + + private: + grpc_slice serialized_session_; +}; + +} // namespace + +grpc_core::UniquePtr SslCachedSession::Create( + SslSessionPtr session) { + return grpc_core::UniquePtr( + grpc_core::New(std::move(session))); +} + +} // namespace tsi + +#endif /* OPENSSL_IS_BORINGSSL */ diff --git a/Sources/CgRPC/src/core/tsi/ssl_transport_security.c b/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc similarity index 56% rename from Sources/CgRPC/src/core/tsi/ssl_transport_security.c rename to Sources/CgRPC/src/core/tsi/ssl_transport_security.cc index 7ebf9dd96..0ba658767 100644 --- a/Sources/CgRPC/src/core/tsi/ssl_transport_security.c +++ b/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc @@ -16,10 +16,10 @@ * */ -#include "src/core/tsi/ssl_transport_security.h" - #include +#include "src/core/tsi/ssl_transport_security.h" + #include #include @@ -35,17 +35,21 @@ #include #include +#include #include -#include -#include +#include +extern "C" { #include #include /* For OPENSSL_free */ #include #include #include #include +} +#include "src/core/lib/gpr/useful.h" +#include "src/core/tsi/ssl/session_cache/ssl_session_cache.h" #include "src/core/tsi/ssl_types.h" #include "src/core/tsi/transport_security.h" @@ -67,16 +71,21 @@ /* --- Structure definitions. ---*/ +struct tsi_ssl_root_certs_store { + X509_STORE* store; +}; + struct tsi_ssl_handshaker_factory { - const tsi_ssl_handshaker_factory_vtable *vtable; + const tsi_ssl_handshaker_factory_vtable* vtable; gpr_refcount refcount; }; struct tsi_ssl_client_handshaker_factory { tsi_ssl_handshaker_factory base; - SSL_CTX *ssl_context; - unsigned char *alpn_protocol_list; + SSL_CTX* ssl_context; + unsigned char* alpn_protocol_list; size_t alpn_protocol_list_length; + grpc_core::RefCountedPtr session_cache; }; struct tsi_ssl_server_handshaker_factory { @@ -84,47 +93,50 @@ struct tsi_ssl_server_handshaker_factory { The tsi_peer array contains the subject names of the server certificates associated with the contexts at the same index. */ tsi_ssl_handshaker_factory base; - SSL_CTX **ssl_contexts; - tsi_peer *ssl_context_x509_subject_names; + SSL_CTX** ssl_contexts; + tsi_peer* ssl_context_x509_subject_names; size_t ssl_context_count; - unsigned char *alpn_protocol_list; + unsigned char* alpn_protocol_list; size_t alpn_protocol_list_length; }; typedef struct { tsi_handshaker base; - SSL *ssl; - BIO *into_ssl; - BIO *from_ssl; + SSL* ssl; + BIO* network_io; tsi_result result; - tsi_ssl_handshaker_factory *factory_ref; + tsi_ssl_handshaker_factory* factory_ref; } tsi_ssl_handshaker; typedef struct { tsi_frame_protector base; - SSL *ssl; - BIO *into_ssl; - BIO *from_ssl; - unsigned char *buffer; + SSL* ssl; + BIO* network_io; + unsigned char* buffer; size_t buffer_size; size_t buffer_offset; } tsi_ssl_frame_protector; /* --- Library Initialization. ---*/ -static gpr_once init_openssl_once = GPR_ONCE_INIT; -static gpr_mu *openssl_mutexes = NULL; +static gpr_once g_init_openssl_once = GPR_ONCE_INIT; +static gpr_mu* g_openssl_mutexes = nullptr; +static int g_ssl_ctx_ex_factory_index = -1; +static void openssl_locking_cb(int mode, int type, const char* file, + int line) GRPC_UNUSED; +static unsigned long openssl_thread_id_cb(void) GRPC_UNUSED; +static const unsigned char kSslSessionIdContext[] = {'g', 'r', 'p', 'c'}; -static void openssl_locking_cb(int mode, int type, const char *file, int line) { +static void openssl_locking_cb(int mode, int type, const char* file, int line) { if (mode & CRYPTO_LOCK) { - gpr_mu_lock(&openssl_mutexes[type]); + gpr_mu_lock(&g_openssl_mutexes[type]); } else { - gpr_mu_unlock(&openssl_mutexes[type]); + gpr_mu_unlock(&g_openssl_mutexes[type]); } } static unsigned long openssl_thread_id_cb(void) { - return (unsigned long)gpr_thd_currentid(); + return static_cast(gpr_thd_currentid()); } static void init_openssl(void) { @@ -135,17 +147,21 @@ static void init_openssl(void) { OpenSSL_add_all_algorithms(); num_locks = CRYPTO_num_locks(); GPR_ASSERT(num_locks > 0); - openssl_mutexes = gpr_malloc((size_t)num_locks * sizeof(gpr_mu)); + g_openssl_mutexes = static_cast( + gpr_malloc(static_cast(num_locks) * sizeof(gpr_mu))); for (i = 0; i < CRYPTO_num_locks(); i++) { - gpr_mu_init(&openssl_mutexes[i]); + gpr_mu_init(&g_openssl_mutexes[i]); } CRYPTO_set_locking_callback(openssl_locking_cb); CRYPTO_set_id_callback(openssl_thread_id_cb); + g_ssl_ctx_ex_factory_index = + SSL_CTX_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr); + GPR_ASSERT(g_ssl_ctx_ex_factory_index != -1); } /* --- Ssl utils. ---*/ -static const char *ssl_error_string(int error) { +static const char* ssl_error_string(int error) { switch (error) { case SSL_ERROR_NONE: return "SSL_ERROR_NONE"; @@ -171,16 +187,16 @@ static const char *ssl_error_string(int error) { } /* TODO(jboeuf): Remove when we are past the debugging phase with this code. */ -static void ssl_log_where_info(const SSL *ssl, int where, int flag, - const char *msg) { - if ((where & flag) && GRPC_TRACER_ON(tsi_tracing_enabled)) { +static void ssl_log_where_info(const SSL* ssl, int where, int flag, + const char* msg) { + if ((where & flag) && tsi_tracing_enabled.enabled()) { gpr_log(GPR_INFO, "%20.20s - %30.30s - %5.10s", msg, SSL_state_string_long(ssl), SSL_state_string(ssl)); } } /* Used for debugging. TODO(jboeuf): Remove when code is mature enough. */ -static void ssl_info_callback(const SSL *ssl, int where, int ret) { +static void ssl_info_callback(const SSL* ssl, int where, int ret) { if (ret == 0) { gpr_log(GPR_ERROR, "ssl_info_callback: error occured.\n"); return; @@ -193,7 +209,7 @@ static void ssl_info_callback(const SSL *ssl, int where, int ret) { /* Returns 1 if name looks like an IP address, 0 otherwise. This is a very rough heuristic, and only handles IPv6 in hexadecimal form. */ -static int looks_like_ip_address(const char *name) { +static int looks_like_ip_address(const char* name) { size_t i; size_t dot_count = 0; size_t num_size = 0; @@ -218,14 +234,14 @@ static int looks_like_ip_address(const char *name) { } /* Gets the subject CN from an X509 cert. */ -static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8, - size_t *utf8_size) { +static tsi_result ssl_get_x509_common_name(X509* cert, unsigned char** utf8, + size_t* utf8_size) { int common_name_index = -1; - X509_NAME_ENTRY *common_name_entry = NULL; - ASN1_STRING *common_name_asn1 = NULL; - X509_NAME *subject_name = X509_get_subject_name(cert); + X509_NAME_ENTRY* common_name_entry = nullptr; + ASN1_STRING* common_name_asn1 = nullptr; + X509_NAME* subject_name = X509_get_subject_name(cert); int utf8_returned_size = 0; - if (subject_name == NULL) { + if (subject_name == nullptr) { gpr_log(GPR_ERROR, "Could not get subject name from certificate."); return TSI_NOT_FOUND; } @@ -237,12 +253,12 @@ static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8, return TSI_NOT_FOUND; } common_name_entry = X509_NAME_get_entry(subject_name, common_name_index); - if (common_name_entry == NULL) { + if (common_name_entry == nullptr) { gpr_log(GPR_ERROR, "Could not get common name entry from certificate."); return TSI_INTERNAL_ERROR; } common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry); - if (common_name_asn1 == NULL) { + if (common_name_asn1 == nullptr) { gpr_log(GPR_ERROR, "Could not get common name entry asn1 from certificate."); return TSI_INTERNAL_ERROR; @@ -252,20 +268,20 @@ static tsi_result ssl_get_x509_common_name(X509 *cert, unsigned char **utf8, gpr_log(GPR_ERROR, "Could not extract utf8 from asn1 string."); return TSI_OUT_OF_RESOURCES; } - *utf8_size = (size_t)utf8_returned_size; + *utf8_size = static_cast(utf8_returned_size); return TSI_OK; } /* Gets the subject CN of an X509 cert as a tsi_peer_property. */ static tsi_result peer_property_from_x509_common_name( - X509 *cert, tsi_peer_property *property) { - unsigned char *common_name; + X509* cert, tsi_peer_property* property) { + unsigned char* common_name; size_t common_name_size; tsi_result result = ssl_get_x509_common_name(cert, &common_name, &common_name_size); if (result != TSI_OK) { if (result == TSI_NOT_FOUND) { - common_name = NULL; + common_name = nullptr; common_name_size = 0; } else { return result; @@ -273,35 +289,35 @@ static tsi_result peer_property_from_x509_common_name( } result = tsi_construct_string_peer_property( TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY, - common_name == NULL ? "" : (const char *)common_name, common_name_size, - property); + common_name == nullptr ? "" : reinterpret_cast(common_name), + common_name_size, property); OPENSSL_free(common_name); return result; } /* Gets the X509 cert in PEM format as a tsi_peer_property. */ -static tsi_result add_pem_certificate(X509 *cert, tsi_peer_property *property) { - BIO *bio = BIO_new(BIO_s_mem()); +static tsi_result add_pem_certificate(X509* cert, tsi_peer_property* property) { + BIO* bio = BIO_new(BIO_s_mem()); if (!PEM_write_bio_X509(bio, cert)) { BIO_free(bio); return TSI_INTERNAL_ERROR; } - char *contents; + char* contents; long len = BIO_get_mem_data(bio, &contents); if (len <= 0) { BIO_free(bio); return TSI_INTERNAL_ERROR; } tsi_result result = tsi_construct_string_peer_property( - TSI_X509_PEM_CERT_PROPERTY, (const char *)contents, (size_t)len, - property); + TSI_X509_PEM_CERT_PROPERTY, (const char*)contents, + static_cast(len), property); BIO_free(bio); return result; } /* Gets the subject SANs from an X509 cert as a tsi_peer_property. */ static tsi_result add_subject_alt_names_properties_to_peer( - tsi_peer *peer, GENERAL_NAMES *subject_alt_names, + tsi_peer* peer, GENERAL_NAMES* subject_alt_names, size_t subject_alt_name_count) { size_t i; tsi_result result = TSI_OK; @@ -310,11 +326,11 @@ static tsi_result add_subject_alt_names_properties_to_peer( peer->property_count -= subject_alt_name_count; for (i = 0; i < subject_alt_name_count; i++) { - GENERAL_NAME *subject_alt_name = + GENERAL_NAME* subject_alt_name = sk_GENERAL_NAME_value(subject_alt_names, TSI_SIZE_AS_SIZE(i)); /* Filter out the non-dns entries names. */ if (subject_alt_name->type == GEN_DNS) { - unsigned char *name = NULL; + unsigned char* name = nullptr; int name_size; name_size = ASN1_STRING_to_UTF8(&name, subject_alt_name->d.dNSName); if (name_size < 0) { @@ -323,8 +339,9 @@ static tsi_result add_subject_alt_names_properties_to_peer( break; } result = tsi_construct_string_peer_property( - TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, (const char *)name, - (size_t)name_size, &peer->properties[peer->property_count++]); + TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY, + reinterpret_cast(name), static_cast(name_size), + &peer->properties[peer->property_count++]); OPENSSL_free(name); } else if (subject_alt_name->type == GEN_IPADD) { char ntop_buf[INET6_ADDRSTRLEN]; @@ -339,9 +356,9 @@ static tsi_result add_subject_alt_names_properties_to_peer( result = TSI_INTERNAL_ERROR; break; } - const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data, + const char* name = inet_ntop(af, subject_alt_name->d.iPAddress->data, ntop_buf, INET6_ADDRSTRLEN); - if (name == NULL) { + if (name == nullptr) { gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet."); result = TSI_INTERNAL_ERROR; break; @@ -357,20 +374,21 @@ static tsi_result add_subject_alt_names_properties_to_peer( } /* Gets information about the peer's X509 cert as a tsi_peer object. */ -static tsi_result peer_from_x509(X509 *cert, int include_certificate_type, - tsi_peer *peer) { +static tsi_result peer_from_x509(X509* cert, int include_certificate_type, + tsi_peer* peer) { /* TODO(jboeuf): Maybe add more properties. */ - GENERAL_NAMES *subject_alt_names = - X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0); - int subject_alt_name_count = (subject_alt_names != NULL) - ? (int)sk_GENERAL_NAME_num(subject_alt_names) - : 0; + GENERAL_NAMES* subject_alt_names = static_cast( + X509_get_ext_d2i(cert, NID_subject_alt_name, nullptr, nullptr)); + int subject_alt_name_count = + (subject_alt_names != nullptr) + ? static_cast(sk_GENERAL_NAME_num(subject_alt_names)) + : 0; size_t property_count; tsi_result result; GPR_ASSERT(subject_alt_name_count >= 0); - property_count = (include_certificate_type ? (size_t)1 : 0) + + property_count = (include_certificate_type ? static_cast(1) : 0) + 2 /* common name, certificate */ + - (size_t)subject_alt_name_count; + static_cast(subject_alt_name_count); result = tsi_construct_peer(property_count, peer); if (result != TSI_OK) return result; do { @@ -390,12 +408,12 @@ static tsi_result peer_from_x509(X509 *cert, int include_certificate_type, if (subject_alt_name_count != 0) { result = add_subject_alt_names_properties_to_peer( - peer, subject_alt_names, (size_t)subject_alt_name_count); + peer, subject_alt_names, static_cast(subject_alt_name_count)); if (result != TSI_OK) break; } } while (0); - if (subject_alt_names != NULL) { + if (subject_alt_names != nullptr) { sk_GENERAL_NAME_pop_free(subject_alt_names, GENERAL_NAME_free); } if (result != TSI_OK) tsi_peer_destruct(peer); @@ -407,18 +425,18 @@ static void log_ssl_error_stack(void) { unsigned long err; while ((err = ERR_get_error()) != 0) { char details[256]; - ERR_error_string_n((uint32_t)err, details, sizeof(details)); + ERR_error_string_n(static_cast(err), details, sizeof(details)); gpr_log(GPR_ERROR, "%s", details); } } /* Performs an SSL_read and handle errors. */ -static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size) { +static tsi_result do_ssl_read(SSL* ssl, unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size) { int read_from_ssl; GPR_ASSERT(*unprotected_bytes_size <= INT_MAX); - read_from_ssl = - SSL_read(ssl, unprotected_bytes, (int)*unprotected_bytes_size); + read_from_ssl = SSL_read(ssl, unprotected_bytes, + static_cast(*unprotected_bytes_size)); if (read_from_ssl <= 0) { read_from_ssl = SSL_get_error(ssl, read_from_ssl); switch (read_from_ssl) { @@ -441,17 +459,17 @@ static tsi_result do_ssl_read(SSL *ssl, unsigned char *unprotected_bytes, return TSI_PROTOCOL_FAILURE; } } - *unprotected_bytes_size = (size_t)read_from_ssl; + *unprotected_bytes_size = static_cast(read_from_ssl); return TSI_OK; } /* Performs an SSL_write and handle errors. */ -static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes, +static tsi_result do_ssl_write(SSL* ssl, unsigned char* unprotected_bytes, size_t unprotected_bytes_size) { int ssl_write_result; GPR_ASSERT(unprotected_bytes_size <= INT_MAX); - ssl_write_result = - SSL_write(ssl, unprotected_bytes, (int)unprotected_bytes_size); + ssl_write_result = SSL_write(ssl, unprotected_bytes, + static_cast(unprotected_bytes_size)); if (ssl_write_result < 0) { ssl_write_result = SSL_get_error(ssl, ssl_write_result); if (ssl_write_result == SSL_ERROR_WANT_READ) { @@ -468,19 +486,20 @@ static tsi_result do_ssl_write(SSL *ssl, unsigned char *unprotected_bytes, } /* Loads an in-memory PEM certificate chain into the SSL context. */ -static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context, - const char *pem_cert_chain, +static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX* context, + const char* pem_cert_chain, size_t pem_cert_chain_size) { tsi_result result = TSI_OK; - X509 *certificate = NULL; - BIO *pem; + X509* certificate = nullptr; + BIO* pem; GPR_ASSERT(pem_cert_chain_size <= INT_MAX); - pem = BIO_new_mem_buf((void *)pem_cert_chain, (int)pem_cert_chain_size); - if (pem == NULL) return TSI_OUT_OF_RESOURCES; + pem = BIO_new_mem_buf((void*)pem_cert_chain, + static_cast(pem_cert_chain_size)); + if (pem == nullptr) return TSI_OUT_OF_RESOURCES; do { - certificate = PEM_read_bio_X509_AUX(pem, NULL, NULL, ""); - if (certificate == NULL) { + certificate = PEM_read_bio_X509_AUX(pem, nullptr, nullptr, (void*)""); + if (certificate == nullptr) { result = TSI_INVALID_ARGUMENT; break; } @@ -489,8 +508,9 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context, break; } while (1) { - X509 *certificate_authority = PEM_read_bio_X509(pem, NULL, NULL, ""); - if (certificate_authority == NULL) { + X509* certificate_authority = + PEM_read_bio_X509(pem, nullptr, nullptr, (void*)""); + if (certificate_authority == nullptr) { ERR_clear_error(); break; /* Done reading. */ } @@ -505,23 +525,23 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX *context, } } while (0); - if (certificate != NULL) X509_free(certificate); + if (certificate != nullptr) X509_free(certificate); BIO_free(pem); return result; } /* Loads an in-memory PEM private key into the SSL context. */ -static tsi_result ssl_ctx_use_private_key(SSL_CTX *context, const char *pem_key, +static tsi_result ssl_ctx_use_private_key(SSL_CTX* context, const char* pem_key, size_t pem_key_size) { tsi_result result = TSI_OK; - EVP_PKEY *private_key = NULL; - BIO *pem; + EVP_PKEY* private_key = nullptr; + BIO* pem; GPR_ASSERT(pem_key_size <= INT_MAX); - pem = BIO_new_mem_buf((void *)pem_key, (int)pem_key_size); - if (pem == NULL) return TSI_OUT_OF_RESOURCES; + pem = BIO_new_mem_buf((void*)pem_key, static_cast(pem_key_size)); + if (pem == nullptr) return TSI_OUT_OF_RESOURCES; do { - private_key = PEM_read_bio_PrivateKey(pem, NULL, NULL, ""); - if (private_key == NULL) { + private_key = PEM_read_bio_PrivateKey(pem, nullptr, nullptr, (void*)""); + if (private_key == nullptr) { result = TSI_INVALID_ARGUMENT; break; } @@ -530,56 +550,53 @@ static tsi_result ssl_ctx_use_private_key(SSL_CTX *context, const char *pem_key, break; } } while (0); - if (private_key != NULL) EVP_PKEY_free(private_key); + if (private_key != nullptr) EVP_PKEY_free(private_key); BIO_free(pem); return result; } /* Loads in-memory PEM verification certs into the SSL context and optionally returns the verification cert names (root_names can be NULL). */ -static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context, - const char *pem_roots, - size_t pem_roots_size, - STACK_OF(X509_NAME) * - *root_names) { +static tsi_result x509_store_load_certs(X509_STORE* cert_store, + const char* pem_roots, + size_t pem_roots_size, + STACK_OF(X509_NAME) * *root_names) { tsi_result result = TSI_OK; size_t num_roots = 0; - X509 *root = NULL; - X509_NAME *root_name = NULL; - BIO *pem; - X509_STORE *root_store; + X509* root = nullptr; + X509_NAME* root_name = nullptr; + BIO* pem; GPR_ASSERT(pem_roots_size <= INT_MAX); - pem = BIO_new_mem_buf((void *)pem_roots, (int)pem_roots_size); - root_store = SSL_CTX_get_cert_store(context); - if (root_store == NULL) return TSI_INVALID_ARGUMENT; - if (pem == NULL) return TSI_OUT_OF_RESOURCES; - if (root_names != NULL) { + pem = BIO_new_mem_buf((void*)pem_roots, static_cast(pem_roots_size)); + if (cert_store == nullptr) return TSI_INVALID_ARGUMENT; + if (pem == nullptr) return TSI_OUT_OF_RESOURCES; + if (root_names != nullptr) { *root_names = sk_X509_NAME_new_null(); - if (*root_names == NULL) return TSI_OUT_OF_RESOURCES; + if (*root_names == nullptr) return TSI_OUT_OF_RESOURCES; } while (1) { - root = PEM_read_bio_X509_AUX(pem, NULL, NULL, ""); - if (root == NULL) { + root = PEM_read_bio_X509_AUX(pem, nullptr, nullptr, (void*)""); + if (root == nullptr) { ERR_clear_error(); break; /* We're at the end of stream. */ } - if (root_names != NULL) { + if (root_names != nullptr) { root_name = X509_get_subject_name(root); - if (root_name == NULL) { + if (root_name == nullptr) { gpr_log(GPR_ERROR, "Could not get name from root certificate."); result = TSI_INVALID_ARGUMENT; break; } root_name = X509_NAME_dup(root_name); - if (root_name == NULL) { + if (root_name == nullptr) { result = TSI_OUT_OF_RESOURCES; break; } sk_X509_NAME_push(*root_names, root_name); - root_name = NULL; + root_name = nullptr; } - if (!X509_STORE_add_cert(root_store, root)) { + if (!X509_STORE_add_cert(cert_store, root)) { gpr_log(GPR_ERROR, "Could not add root certificate to ssl context."); result = TSI_INTERNAL_ERROR; break; @@ -594,25 +611,35 @@ static tsi_result ssl_ctx_load_verification_certs(SSL_CTX *context, } if (result != TSI_OK) { - if (root != NULL) X509_free(root); - if (root_names != NULL) { + if (root != nullptr) X509_free(root); + if (root_names != nullptr) { sk_X509_NAME_pop_free(*root_names, X509_NAME_free); - *root_names = NULL; - if (root_name != NULL) X509_NAME_free(root_name); + *root_names = nullptr; + if (root_name != nullptr) X509_NAME_free(root_name); } } BIO_free(pem); return result; } +static tsi_result ssl_ctx_load_verification_certs(SSL_CTX* context, + const char* pem_roots, + size_t pem_roots_size, + STACK_OF(X509_NAME) * + *root_name) { + X509_STORE* cert_store = SSL_CTX_get_cert_store(context); + return x509_store_load_certs(cert_store, pem_roots, pem_roots_size, + root_name); +} + /* Populates the SSL context with a private key and a cert chain, and sets the cipher list and the ephemeral ECDH key. */ static tsi_result populate_ssl_context( - SSL_CTX *context, const tsi_ssl_pem_key_cert_pair *key_cert_pair, - const char *cipher_list) { + SSL_CTX* context, const tsi_ssl_pem_key_cert_pair* key_cert_pair, + const char* cipher_list) { tsi_result result = TSI_OK; - if (key_cert_pair != NULL) { - if (key_cert_pair->cert_chain != NULL) { + if (key_cert_pair != nullptr) { + if (key_cert_pair->cert_chain != nullptr) { result = ssl_ctx_use_certificate_chain(context, key_cert_pair->cert_chain, strlen(key_cert_pair->cert_chain)); if (result != TSI_OK) { @@ -620,7 +647,7 @@ static tsi_result populate_ssl_context( return result; } } - if (key_cert_pair->private_key != NULL) { + if (key_cert_pair->private_key != nullptr) { result = ssl_ctx_use_private_key(context, key_cert_pair->private_key, strlen(key_cert_pair->private_key)); if (result != TSI_OK || !SSL_CTX_check_private_key(context)) { @@ -629,12 +656,13 @@ static tsi_result populate_ssl_context( } } } - if ((cipher_list != NULL) && !SSL_CTX_set_cipher_list(context, cipher_list)) { + if ((cipher_list != nullptr) && + !SSL_CTX_set_cipher_list(context, cipher_list)) { gpr_log(GPR_ERROR, "Invalid cipher list: %s.", cipher_list); return TSI_INVALID_ARGUMENT; } { - EC_KEY *ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); if (!SSL_CTX_set_tmp_ecdh(context, ecdh)) { gpr_log(GPR_ERROR, "Could not set ephemeral ECDH key."); EC_KEY_free(ecdh); @@ -647,55 +675,58 @@ static tsi_result populate_ssl_context( } /* Extracts the CN and the SANs from an X509 cert as a peer object. */ -static tsi_result extract_x509_subject_names_from_pem_cert(const char *pem_cert, - tsi_peer *peer) { +static tsi_result extract_x509_subject_names_from_pem_cert(const char* pem_cert, + tsi_peer* peer) { tsi_result result = TSI_OK; - X509 *cert = NULL; - BIO *pem; - pem = BIO_new_mem_buf((void *)pem_cert, (int)strlen(pem_cert)); - if (pem == NULL) return TSI_OUT_OF_RESOURCES; + X509* cert = nullptr; + BIO* pem; + pem = BIO_new_mem_buf((void*)pem_cert, static_cast(strlen(pem_cert))); + if (pem == nullptr) return TSI_OUT_OF_RESOURCES; - cert = PEM_read_bio_X509(pem, NULL, NULL, ""); - if (cert == NULL) { + cert = PEM_read_bio_X509(pem, nullptr, nullptr, (void*)""); + if (cert == nullptr) { gpr_log(GPR_ERROR, "Invalid certificate"); result = TSI_INVALID_ARGUMENT; } else { result = peer_from_x509(cert, 0, peer); } - if (cert != NULL) X509_free(cert); + if (cert != nullptr) X509_free(cert); BIO_free(pem); return result; } /* Builds the alpn protocol name list according to rfc 7301. */ static tsi_result build_alpn_protocol_name_list( - const char **alpn_protocols, uint16_t num_alpn_protocols, - unsigned char **protocol_name_list, size_t *protocol_name_list_length) { + const char** alpn_protocols, uint16_t num_alpn_protocols, + unsigned char** protocol_name_list, size_t* protocol_name_list_length) { uint16_t i; - unsigned char *current; - *protocol_name_list = NULL; + unsigned char* current; + *protocol_name_list = nullptr; *protocol_name_list_length = 0; if (num_alpn_protocols == 0) return TSI_INVALID_ARGUMENT; for (i = 0; i < num_alpn_protocols; i++) { - size_t length = alpn_protocols[i] == NULL ? 0 : strlen(alpn_protocols[i]); + size_t length = + alpn_protocols[i] == nullptr ? 0 : strlen(alpn_protocols[i]); if (length == 0 || length > 255) { - gpr_log(GPR_ERROR, "Invalid protocol name length: %d.", (int)length); + gpr_log(GPR_ERROR, "Invalid protocol name length: %d.", + static_cast(length)); return TSI_INVALID_ARGUMENT; } *protocol_name_list_length += length + 1; } - *protocol_name_list = gpr_malloc(*protocol_name_list_length); - if (*protocol_name_list == NULL) return TSI_OUT_OF_RESOURCES; + *protocol_name_list = + static_cast(gpr_malloc(*protocol_name_list_length)); + if (*protocol_name_list == nullptr) return TSI_OUT_OF_RESOURCES; current = *protocol_name_list; for (i = 0; i < num_alpn_protocols; i++) { size_t length = strlen(alpn_protocols[i]); - *(current++) = (uint8_t)length; /* max checked above. */ + *(current++) = static_cast(length); /* max checked above. */ memcpy(current, alpn_protocols[i], length); current += length; } /* Safety check. */ if ((current < *protocol_name_list) || - ((uintptr_t)(current - *protocol_name_list) != + (static_cast(current - *protocol_name_list) != *protocol_name_list_length)) { return TSI_INTERNAL_ERROR; } @@ -706,35 +737,90 @@ static tsi_result build_alpn_protocol_name_list( // the server's certificate, but we need to pull it anyway, in case a higher // layer wants to look at it. In this case the verification may fail, but // we don't really care. -static int NullVerifyCallback(int preverify_ok, X509_STORE_CTX *ctx) { +static int NullVerifyCallback(int preverify_ok, X509_STORE_CTX* ctx) { return 1; } +/* --- tsi_ssl_root_certs_store methods implementation. ---*/ + +tsi_ssl_root_certs_store* tsi_ssl_root_certs_store_create( + const char* pem_roots) { + if (pem_roots == nullptr) { + gpr_log(GPR_ERROR, "The root certificates are empty."); + return nullptr; + } + tsi_ssl_root_certs_store* root_store = static_cast( + gpr_zalloc(sizeof(tsi_ssl_root_certs_store))); + if (root_store == nullptr) { + gpr_log(GPR_ERROR, "Could not allocate buffer for ssl_root_certs_store."); + return nullptr; + } + root_store->store = X509_STORE_new(); + if (root_store->store == nullptr) { + gpr_log(GPR_ERROR, "Could not allocate buffer for X509_STORE."); + gpr_free(root_store); + return nullptr; + } + tsi_result result = x509_store_load_certs(root_store->store, pem_roots, + strlen(pem_roots), nullptr); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Could not load root certificates."); + X509_STORE_free(root_store->store); + gpr_free(root_store); + return nullptr; + } + return root_store; +} + +void tsi_ssl_root_certs_store_destroy(tsi_ssl_root_certs_store* self) { + if (self == nullptr) return; + X509_STORE_free(self->store); + gpr_free(self); +} + +/* --- tsi_ssl_session_cache methods implementation. ---*/ + +tsi_ssl_session_cache* tsi_ssl_session_cache_create_lru(size_t capacity) { + /* Pointer will be dereferenced by unref call. */ + return reinterpret_cast( + tsi::SslSessionLRUCache::Create(capacity).release()); +} + +void tsi_ssl_session_cache_ref(tsi_ssl_session_cache* cache) { + /* Pointer will be dereferenced by unref call. */ + reinterpret_cast(cache)->Ref().release(); +} + +void tsi_ssl_session_cache_unref(tsi_ssl_session_cache* cache) { + reinterpret_cast(cache)->Unref(); +} + /* --- tsi_frame_protector methods implementation. ---*/ -static tsi_result ssl_protector_protect(tsi_frame_protector *self, - const unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size) { - tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self; +static tsi_result ssl_protector_protect(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size) { + tsi_ssl_frame_protector* impl = + reinterpret_cast(self); int read_from_ssl; size_t available; tsi_result result = TSI_OK; /* First see if we have some pending data in the SSL BIO. */ - int pending_in_ssl = (int)BIO_pending(impl->from_ssl); + int pending_in_ssl = static_cast(BIO_pending(impl->network_io)); if (pending_in_ssl > 0) { *unprotected_bytes_size = 0; GPR_ASSERT(*protected_output_frames_size <= INT_MAX); - read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames, - (int)*protected_output_frames_size); + read_from_ssl = BIO_read(impl->network_io, protected_output_frames, + static_cast(*protected_output_frames_size)); if (read_from_ssl < 0) { gpr_log(GPR_ERROR, "Could not read from BIO even though some data is pending"); return TSI_INTERNAL_ERROR; } - *protected_output_frames_size = (size_t)read_from_ssl; + *protected_output_frames_size = static_cast(read_from_ssl); return TSI_OK; } @@ -755,23 +841,24 @@ static tsi_result ssl_protector_protect(tsi_frame_protector *self, if (result != TSI_OK) return result; GPR_ASSERT(*protected_output_frames_size <= INT_MAX); - read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames, - (int)*protected_output_frames_size); + read_from_ssl = BIO_read(impl->network_io, protected_output_frames, + static_cast(*protected_output_frames_size)); if (read_from_ssl < 0) { gpr_log(GPR_ERROR, "Could not read from BIO after SSL_write."); return TSI_INTERNAL_ERROR; } - *protected_output_frames_size = (size_t)read_from_ssl; + *protected_output_frames_size = static_cast(read_from_ssl); *unprotected_bytes_size = available; impl->buffer_offset = 0; return TSI_OK; } static tsi_result ssl_protector_protect_flush( - tsi_frame_protector *self, unsigned char *protected_output_frames, - size_t *protected_output_frames_size, size_t *still_pending_size) { + tsi_frame_protector* self, unsigned char* protected_output_frames, + size_t* protected_output_frames_size, size_t* still_pending_size) { tsi_result result = TSI_OK; - tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self; + tsi_ssl_frame_protector* impl = + reinterpret_cast(self); int read_from_ssl = 0; int pending; @@ -781,34 +868,35 @@ static tsi_result ssl_protector_protect_flush( impl->buffer_offset = 0; } - pending = (int)BIO_pending(impl->from_ssl); + pending = static_cast(BIO_pending(impl->network_io)); GPR_ASSERT(pending >= 0); - *still_pending_size = (size_t)pending; + *still_pending_size = static_cast(pending); if (*still_pending_size == 0) return TSI_OK; GPR_ASSERT(*protected_output_frames_size <= INT_MAX); - read_from_ssl = BIO_read(impl->from_ssl, protected_output_frames, - (int)*protected_output_frames_size); + read_from_ssl = BIO_read(impl->network_io, protected_output_frames, + static_cast(*protected_output_frames_size)); if (read_from_ssl <= 0) { gpr_log(GPR_ERROR, "Could not read from BIO after SSL_write."); return TSI_INTERNAL_ERROR; } - *protected_output_frames_size = (size_t)read_from_ssl; - pending = (int)BIO_pending(impl->from_ssl); + *protected_output_frames_size = static_cast(read_from_ssl); + pending = static_cast(BIO_pending(impl->network_io)); GPR_ASSERT(pending >= 0); - *still_pending_size = (size_t)pending; + *still_pending_size = static_cast(pending); return TSI_OK; } static tsi_result ssl_protector_unprotect( - tsi_frame_protector *self, const unsigned char *protected_frames_bytes, - size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size) { + tsi_frame_protector* self, const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size) { tsi_result result = TSI_OK; int written_into_ssl = 0; size_t output_bytes_size = *unprotected_bytes_size; size_t output_bytes_offset = 0; - tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self; + tsi_ssl_frame_protector* impl = + reinterpret_cast(self); /* First, try to read remaining data from ssl. */ result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size); @@ -824,14 +912,14 @@ static tsi_result ssl_protector_unprotect( /* Then, try to write some data to ssl. */ GPR_ASSERT(*protected_frames_bytes_size <= INT_MAX); - written_into_ssl = BIO_write(impl->into_ssl, protected_frames_bytes, - (int)*protected_frames_bytes_size); + written_into_ssl = BIO_write(impl->network_io, protected_frames_bytes, + static_cast(*protected_frames_bytes_size)); if (written_into_ssl < 0) { gpr_log(GPR_ERROR, "Sending protected frame to ssl failed with %d", written_into_ssl); return TSI_INTERNAL_ERROR; } - *protected_frames_bytes_size = (size_t)written_into_ssl; + *protected_frames_bytes_size = static_cast(written_into_ssl); /* Now try to read some data again. */ result = do_ssl_read(impl->ssl, unprotected_bytes, unprotected_bytes_size); @@ -842,25 +930,29 @@ static tsi_result ssl_protector_unprotect( return result; } -static void ssl_protector_destroy(tsi_frame_protector *self) { - tsi_ssl_frame_protector *impl = (tsi_ssl_frame_protector *)self; - if (impl->buffer != NULL) gpr_free(impl->buffer); - if (impl->ssl != NULL) SSL_free(impl->ssl); +static void ssl_protector_destroy(tsi_frame_protector* self) { + tsi_ssl_frame_protector* impl = + reinterpret_cast(self); + if (impl->buffer != nullptr) gpr_free(impl->buffer); + if (impl->ssl != nullptr) SSL_free(impl->ssl); + if (impl->network_io != nullptr) BIO_free(impl->network_io); gpr_free(self); } static const tsi_frame_protector_vtable frame_protector_vtable = { - ssl_protector_protect, ssl_protector_protect_flush, ssl_protector_unprotect, + ssl_protector_protect, + ssl_protector_protect_flush, + ssl_protector_unprotect, ssl_protector_destroy, }; /* --- tsi_server_handshaker_factory methods implementation. --- */ static void tsi_ssl_handshaker_factory_destroy( - tsi_ssl_handshaker_factory *self) { - if (self == NULL) return; + tsi_ssl_handshaker_factory* self) { + if (self == nullptr) return; - if (self->vtable != NULL && self->vtable->destroy != NULL) { + if (self->vtable != nullptr && self->vtable->destroy != nullptr) { self->vtable->destroy(self); } /* Note, we don't free(self) here because this object is always directly @@ -868,28 +960,28 @@ static void tsi_ssl_handshaker_factory_destroy( * any memory, it should be free'd here. */ } -static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref( - tsi_ssl_handshaker_factory *self) { - if (self == NULL) return NULL; +static tsi_ssl_handshaker_factory* tsi_ssl_handshaker_factory_ref( + tsi_ssl_handshaker_factory* self) { + if (self == nullptr) return nullptr; gpr_refn(&self->refcount, 1); return self; } -static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) { - if (self == NULL) return; +static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory* self) { + if (self == nullptr) return; if (gpr_unref(&self->refcount)) { tsi_ssl_handshaker_factory_destroy(self); } } -static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL}; +static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {nullptr}; /* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for * allocating memory for the factory. */ static void tsi_ssl_handshaker_factory_init( - tsi_ssl_handshaker_factory *factory) { - GPR_ASSERT(factory != NULL); + tsi_ssl_handshaker_factory* factory) { + GPR_ASSERT(factory != nullptr); factory->vtable = &handshaker_factory_vtable; gpr_ref_init(&factory->refcount, 1); @@ -897,32 +989,33 @@ static void tsi_ssl_handshaker_factory_init( /* --- tsi_handshaker methods implementation. ---*/ -static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self, - unsigned char *bytes, - size_t *bytes_size) { - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; +static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self, + unsigned char* bytes, + size_t* bytes_size) { + tsi_ssl_handshaker* impl = reinterpret_cast(self); int bytes_read_from_ssl = 0; - if (bytes == NULL || bytes_size == NULL || *bytes_size == 0 || + if (bytes == nullptr || bytes_size == nullptr || *bytes_size == 0 || *bytes_size > INT_MAX) { return TSI_INVALID_ARGUMENT; } GPR_ASSERT(*bytes_size <= INT_MAX); - bytes_read_from_ssl = BIO_read(impl->from_ssl, bytes, (int)*bytes_size); + bytes_read_from_ssl = + BIO_read(impl->network_io, bytes, static_cast(*bytes_size)); if (bytes_read_from_ssl < 0) { *bytes_size = 0; - if (!BIO_should_retry(impl->from_ssl)) { + if (!BIO_should_retry(impl->network_io)) { impl->result = TSI_INTERNAL_ERROR; return impl->result; } else { return TSI_OK; } } - *bytes_size = (size_t)bytes_read_from_ssl; - return BIO_pending(impl->from_ssl) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA; + *bytes_size = static_cast(bytes_read_from_ssl); + return BIO_pending(impl->network_io) == 0 ? TSI_OK : TSI_INCOMPLETE_DATA; } -static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) { - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; +static tsi_result ssl_handshaker_get_result(tsi_handshaker* self) { + tsi_ssl_handshaker* impl = reinterpret_cast(self); if ((impl->result == TSI_HANDSHAKE_IN_PROGRESS) && SSL_is_init_finished(impl->ssl)) { impl->result = TSI_OK; @@ -931,21 +1024,21 @@ static tsi_result ssl_handshaker_get_result(tsi_handshaker *self) { } static tsi_result ssl_handshaker_process_bytes_from_peer( - tsi_handshaker *self, const unsigned char *bytes, size_t *bytes_size) { - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; + tsi_handshaker* self, const unsigned char* bytes, size_t* bytes_size) { + tsi_ssl_handshaker* impl = reinterpret_cast(self); int bytes_written_into_ssl_size = 0; - if (bytes == NULL || bytes_size == 0 || *bytes_size > INT_MAX) { + if (bytes == nullptr || bytes_size == nullptr || *bytes_size > INT_MAX) { return TSI_INVALID_ARGUMENT; } GPR_ASSERT(*bytes_size <= INT_MAX); bytes_written_into_ssl_size = - BIO_write(impl->into_ssl, bytes, (int)*bytes_size); + BIO_write(impl->network_io, bytes, static_cast(*bytes_size)); if (bytes_written_into_ssl_size < 0) { gpr_log(GPR_ERROR, "Could not write to memory BIO."); impl->result = TSI_INTERNAL_ERROR; return impl->result; } - *bytes_size = (size_t)bytes_written_into_ssl_size; + *bytes_size = static_cast(bytes_written_into_ssl_size); if (!tsi_handshaker_is_in_progress(self)) { impl->result = TSI_OK; @@ -956,7 +1049,7 @@ static tsi_result ssl_handshaker_process_bytes_from_peer( ssl_result = SSL_get_error(impl->ssl, ssl_result); switch (ssl_result) { case SSL_ERROR_WANT_READ: - if (BIO_pending(impl->from_ssl) == 0) { + if (BIO_pending(impl->network_io) == 0) { /* We need more data. */ return TSI_INCOMPLETE_DATA; } else { @@ -976,14 +1069,14 @@ static tsi_result ssl_handshaker_process_bytes_from_peer( } } -static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self, - tsi_peer *peer) { +static tsi_result ssl_handshaker_extract_peer(tsi_handshaker* self, + tsi_peer* peer) { tsi_result result = TSI_OK; - const unsigned char *alpn_selected = NULL; + const unsigned char* alpn_selected = nullptr; unsigned int alpn_selected_len; - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; - X509 *peer_cert = SSL_get_peer_certificate(impl->ssl); - if (peer_cert != NULL) { + tsi_ssl_handshaker* impl = reinterpret_cast(self); + X509* peer_cert = SSL_get_peer_certificate(impl->ssl); + if (peer_cert != nullptr) { result = peer_from_x509(peer_cert, 1, peer); X509_free(peer_cert); if (result != TSI_OK) return result; @@ -991,41 +1084,53 @@ static tsi_result ssl_handshaker_extract_peer(tsi_handshaker *self, #if TSI_OPENSSL_ALPN_SUPPORT SSL_get0_alpn_selected(impl->ssl, &alpn_selected, &alpn_selected_len); #endif /* TSI_OPENSSL_ALPN_SUPPORT */ - if (alpn_selected == NULL) { + if (alpn_selected == nullptr) { /* Try npn. */ SSL_get0_next_proto_negotiated(impl->ssl, &alpn_selected, &alpn_selected_len); } - if (alpn_selected != NULL) { - size_t i; - tsi_peer_property *new_properties = - gpr_zalloc(sizeof(*new_properties) * (peer->property_count + 1)); - for (i = 0; i < peer->property_count; i++) { - new_properties[i] = peer->properties[i]; - } + + // 1 is for session reused property. + size_t new_property_count = peer->property_count + 1; + if (alpn_selected != nullptr) new_property_count++; + tsi_peer_property* new_properties = static_cast( + gpr_zalloc(sizeof(*new_properties) * new_property_count)); + for (size_t i = 0; i < peer->property_count; i++) { + new_properties[i] = peer->properties[i]; + } + if (peer->properties != nullptr) gpr_free(peer->properties); + peer->properties = new_properties; + + if (alpn_selected != nullptr) { result = tsi_construct_string_peer_property( - TSI_SSL_ALPN_SELECTED_PROTOCOL, (const char *)alpn_selected, - alpn_selected_len, &new_properties[peer->property_count]); - if (result != TSI_OK) { - gpr_free(new_properties); - return result; - } - if (peer->properties != NULL) gpr_free(peer->properties); + TSI_SSL_ALPN_SELECTED_PROTOCOL, + reinterpret_cast(alpn_selected), alpn_selected_len, + &peer->properties[peer->property_count]); + if (result != TSI_OK) return result; peer->property_count++; - peer->properties = new_properties; } + + const char* session_reused = SSL_session_reused(impl->ssl) ? "true" : "false"; + result = tsi_construct_string_peer_property( + TSI_SSL_SESSION_REUSED_PEER_PROPERTY, session_reused, + strlen(session_reused) + 1, &peer->properties[peer->property_count]); + if (result != TSI_OK) return result; + peer->property_count++; + return result; } static tsi_result ssl_handshaker_create_frame_protector( - tsi_handshaker *self, size_t *max_output_protected_frame_size, - tsi_frame_protector **protector) { + tsi_handshaker* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector) { size_t actual_max_output_protected_frame_size = TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND; - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; - tsi_ssl_frame_protector *protector_impl = gpr_zalloc(sizeof(*protector_impl)); + tsi_ssl_handshaker* impl = reinterpret_cast(self); + tsi_ssl_frame_protector* protector_impl = + static_cast( + gpr_zalloc(sizeof(*protector_impl))); - if (max_output_protected_frame_size != NULL) { + if (max_output_protected_frame_size != nullptr) { if (*max_output_protected_frame_size > TSI_SSL_MAX_PROTECTED_FRAME_SIZE_UPPER_BOUND) { *max_output_protected_frame_size = @@ -1039,29 +1144,32 @@ static tsi_result ssl_handshaker_create_frame_protector( } protector_impl->buffer_size = actual_max_output_protected_frame_size - TSI_SSL_MAX_PROTECTION_OVERHEAD; - protector_impl->buffer = gpr_malloc(protector_impl->buffer_size); - if (protector_impl->buffer == NULL) { + protector_impl->buffer = + static_cast(gpr_malloc(protector_impl->buffer_size)); + if (protector_impl->buffer == nullptr) { gpr_log(GPR_ERROR, "Could not allocated buffer for tsi_ssl_frame_protector."); gpr_free(protector_impl); return TSI_INTERNAL_ERROR; } - /* Transfer ownership of ssl to the frame protector. It is OK as the caller - * cannot call anything else but destroy on the handshaker after this call. */ + /* Transfer ownership of ssl and network_io to the frame protector. It is OK + * as the caller cannot call anything else but destroy on the handshaker + * after this call. */ protector_impl->ssl = impl->ssl; - impl->ssl = NULL; - protector_impl->into_ssl = impl->into_ssl; - protector_impl->from_ssl = impl->from_ssl; + impl->ssl = nullptr; + protector_impl->network_io = impl->network_io; + impl->network_io = nullptr; protector_impl->base.vtable = &frame_protector_vtable; *protector = &protector_impl->base; return TSI_OK; } -static void ssl_handshaker_destroy(tsi_handshaker *self) { - tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; - SSL_free(impl->ssl); /* The BIO objects are owned by ssl */ +static void ssl_handshaker_destroy(tsi_handshaker* self) { + tsi_ssl_handshaker* impl = reinterpret_cast(self); + SSL_free(impl->ssl); + BIO_free(impl->network_io); tsi_ssl_handshaker_factory_unref(impl->factory_ref); gpr_free(impl); } @@ -1073,50 +1181,66 @@ static const tsi_handshaker_vtable handshaker_vtable = { ssl_handshaker_extract_peer, ssl_handshaker_create_frame_protector, ssl_handshaker_destroy, - NULL, + nullptr, }; /* --- tsi_ssl_handshaker_factory common methods. --- */ -static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client, - const char *server_name_indication, - tsi_ssl_handshaker_factory *factory, - tsi_handshaker **handshaker) { - SSL *ssl = SSL_new(ctx); - BIO *into_ssl = NULL; - BIO *from_ssl = NULL; - tsi_ssl_handshaker *impl = NULL; - *handshaker = NULL; - if (ctx == NULL) { +static void tsi_ssl_handshaker_resume_session( + SSL* ssl, tsi::SslSessionLRUCache* session_cache) { + const char* server_name = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + if (server_name == nullptr) { + return; + } + tsi::SslSessionPtr session = session_cache->Get(server_name); + if (session != nullptr) { + // SSL_set_session internally increments reference counter. + SSL_set_session(ssl, session.get()); + } +} + +static tsi_result create_tsi_ssl_handshaker(SSL_CTX* ctx, int is_client, + const char* server_name_indication, + tsi_ssl_handshaker_factory* factory, + tsi_handshaker** handshaker) { + SSL* ssl = SSL_new(ctx); + BIO* network_io = nullptr; + BIO* ssl_io = nullptr; + tsi_ssl_handshaker* impl = nullptr; + *handshaker = nullptr; + if (ctx == nullptr) { gpr_log(GPR_ERROR, "SSL Context is null. Should never happen."); return TSI_INTERNAL_ERROR; } - if (ssl == NULL) { + if (ssl == nullptr) { return TSI_OUT_OF_RESOURCES; } SSL_set_info_callback(ssl, ssl_info_callback); - into_ssl = BIO_new(BIO_s_mem()); - from_ssl = BIO_new(BIO_s_mem()); - if (into_ssl == NULL || from_ssl == NULL) { - gpr_log(GPR_ERROR, "BIO_new failed."); + if (!BIO_new_bio_pair(&network_io, 0, &ssl_io, 0)) { + gpr_log(GPR_ERROR, "BIO_new_bio_pair failed."); SSL_free(ssl); - if (into_ssl != NULL) BIO_free(into_ssl); - if (from_ssl != NULL) BIO_free(into_ssl); return TSI_OUT_OF_RESOURCES; } - SSL_set_bio(ssl, into_ssl, from_ssl); + SSL_set_bio(ssl, ssl_io, ssl_io); if (is_client) { int ssl_result; SSL_set_connect_state(ssl); - if (server_name_indication != NULL) { + if (server_name_indication != nullptr) { if (!SSL_set_tlsext_host_name(ssl, server_name_indication)) { gpr_log(GPR_ERROR, "Invalid server name indication %s.", server_name_indication); SSL_free(ssl); + BIO_free(network_io); return TSI_INTERNAL_ERROR; } } + tsi_ssl_client_handshaker_factory* client_factory = + reinterpret_cast(factory); + if (client_factory->session_cache != nullptr) { + tsi_ssl_handshaker_resume_session(ssl, + client_factory->session_cache.get()); + } ssl_result = SSL_do_handshake(ssl); ssl_result = SSL_get_error(ssl, ssl_result); if (ssl_result != SSL_ERROR_WANT_READ) { @@ -1124,16 +1248,16 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client, "Unexpected error received from first SSL_do_handshake call: %s", ssl_error_string(ssl_result)); SSL_free(ssl); + BIO_free(network_io); return TSI_INTERNAL_ERROR; } } else { SSL_set_accept_state(ssl); } - impl = gpr_zalloc(sizeof(*impl)); + impl = static_cast(gpr_zalloc(sizeof(*impl))); impl->ssl = ssl; - impl->into_ssl = into_ssl; - impl->from_ssl = from_ssl; + impl->network_io = network_io; impl->result = TSI_HANDSHAKE_IN_PROGRESS; impl->base.vtable = &handshaker_vtable; impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory); @@ -1142,18 +1266,20 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client, return TSI_OK; } -static int select_protocol_list(const unsigned char **out, - unsigned char *outlen, - const unsigned char *client_list, +static int select_protocol_list(const unsigned char** out, + unsigned char* outlen, + const unsigned char* client_list, size_t client_list_len, - const unsigned char *server_list, + const unsigned char* server_list, size_t server_list_len) { - const unsigned char *client_current = client_list; - while ((unsigned int)(client_current - client_list) < client_list_len) { + const unsigned char* client_current = client_list; + while (static_cast(client_current - client_list) < + client_list_len) { unsigned char client_current_len = *(client_current++); - const unsigned char *server_current = server_list; + const unsigned char* server_current = server_list; while ((server_current >= server_list) && - (uintptr_t)(server_current - server_list) < server_list_len) { + static_cast(server_current - server_list) < + server_list_len) { unsigned char server_current_len = *(server_current++); if ((client_current_len == server_current_len) && !memcmp(client_current, server_current, server_current_len)) { @@ -1171,36 +1297,37 @@ static int select_protocol_list(const unsigned char **out, /* --- tsi_ssl_client_handshaker_factory methods implementation. --- */ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker( - tsi_ssl_client_handshaker_factory *self, const char *server_name_indication, - tsi_handshaker **handshaker) { + tsi_ssl_client_handshaker_factory* self, const char* server_name_indication, + tsi_handshaker** handshaker) { return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication, &self->base, handshaker); } void tsi_ssl_client_handshaker_factory_unref( - tsi_ssl_client_handshaker_factory *self) { - if (self == NULL) return; + tsi_ssl_client_handshaker_factory* self) { + if (self == nullptr) return; tsi_ssl_handshaker_factory_unref(&self->base); } static void tsi_ssl_client_handshaker_factory_destroy( - tsi_ssl_handshaker_factory *factory) { - if (factory == NULL) return; - tsi_ssl_client_handshaker_factory *self = - (tsi_ssl_client_handshaker_factory *)factory; - if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context); - if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list); + tsi_ssl_handshaker_factory* factory) { + if (factory == nullptr) return; + tsi_ssl_client_handshaker_factory* self = + reinterpret_cast(factory); + if (self->ssl_context != nullptr) SSL_CTX_free(self->ssl_context); + if (self->alpn_protocol_list != nullptr) gpr_free(self->alpn_protocol_list); + self->session_cache.reset(); gpr_free(self); } -static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out, - unsigned char *outlen, - const unsigned char *in, +static int client_handshaker_factory_npn_callback(SSL* ssl, unsigned char** out, + unsigned char* outlen, + const unsigned char* in, unsigned int inlen, - void *arg) { - tsi_ssl_client_handshaker_factory *factory = - (tsi_ssl_client_handshaker_factory *)arg; - return select_protocol_list((const unsigned char **)out, outlen, + void* arg) { + tsi_ssl_client_handshaker_factory* factory = + static_cast(arg); + return select_protocol_list((const unsigned char**)out, outlen, factory->alpn_protocol_list, factory->alpn_protocol_list_length, in, inlen); } @@ -1208,44 +1335,44 @@ static int client_handshaker_factory_npn_callback(SSL *ssl, unsigned char **out, /* --- tsi_ssl_server_handshaker_factory methods implementation. --- */ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker( - tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker) { + tsi_ssl_server_handshaker_factory* self, tsi_handshaker** handshaker) { if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT; /* Create the handshaker with the first context. We will switch if needed because of SNI in ssl_server_handshaker_factory_servername_callback. */ - return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base, - handshaker); + return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, nullptr, + &self->base, handshaker); } void tsi_ssl_server_handshaker_factory_unref( - tsi_ssl_server_handshaker_factory *self) { - if (self == NULL) return; + tsi_ssl_server_handshaker_factory* self) { + if (self == nullptr) return; tsi_ssl_handshaker_factory_unref(&self->base); } static void tsi_ssl_server_handshaker_factory_destroy( - tsi_ssl_handshaker_factory *factory) { - if (factory == NULL) return; - tsi_ssl_server_handshaker_factory *self = - (tsi_ssl_server_handshaker_factory *)factory; + tsi_ssl_handshaker_factory* factory) { + if (factory == nullptr) return; + tsi_ssl_server_handshaker_factory* self = + reinterpret_cast(factory); size_t i; for (i = 0; i < self->ssl_context_count; i++) { - if (self->ssl_contexts[i] != NULL) { + if (self->ssl_contexts[i] != nullptr) { SSL_CTX_free(self->ssl_contexts[i]); tsi_peer_destruct(&self->ssl_context_x509_subject_names[i]); } } - if (self->ssl_contexts != NULL) gpr_free(self->ssl_contexts); - if (self->ssl_context_x509_subject_names != NULL) { + if (self->ssl_contexts != nullptr) gpr_free(self->ssl_contexts); + if (self->ssl_context_x509_subject_names != nullptr) { gpr_free(self->ssl_context_x509_subject_names); } - if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list); + if (self->alpn_protocol_list != nullptr) gpr_free(self->alpn_protocol_list); gpr_free(self); } -static int does_entry_match_name(const char *entry, size_t entry_length, - const char *name) { - const char *dot; - const char *name_subdomain = NULL; +static int does_entry_match_name(const char* entry, size_t entry_length, + const char* name) { + const char* dot; + const char* name_subdomain = nullptr; size_t name_length = strlen(name); size_t name_subdomain_length; if (entry_length == 0) return 0; @@ -1271,7 +1398,7 @@ static int does_entry_match_name(const char *entry, size_t entry_length, return 0; } name_subdomain = strchr(name, '.'); - if (name_subdomain == NULL) return 0; + if (name_subdomain == nullptr) return 0; name_subdomain_length = strlen(name_subdomain); if (name_subdomain_length < 2) return 0; name_subdomain++; /* Starts after the dot. */ @@ -1279,7 +1406,7 @@ static int does_entry_match_name(const char *entry, size_t entry_length, entry += 2; /* Remove *. */ entry_length -= 2; dot = strchr(name_subdomain, '.'); - if ((dot == NULL) || (dot == &name_subdomain[name_subdomain_length - 1])) { + if ((dot == nullptr) || (dot == &name_subdomain[name_subdomain_length - 1])) { gpr_log(GPR_ERROR, "Invalid toplevel subdomain: %s", name_subdomain); return 0; } @@ -1290,13 +1417,13 @@ static int does_entry_match_name(const char *entry, size_t entry_length, strncmp(entry, name_subdomain, entry_length) == 0); } -static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap, - void *arg) { - tsi_ssl_server_handshaker_factory *impl = - (tsi_ssl_server_handshaker_factory *)arg; +static int ssl_server_handshaker_factory_servername_callback(SSL* ssl, int* ap, + void* arg) { + tsi_ssl_server_handshaker_factory* impl = + static_cast(arg); size_t i = 0; - const char *servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); - if (servername == NULL || strlen(servername) == 0) { + const char* servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + if (servername == nullptr || strlen(servername) == 0) { return SSL_TLSEXT_ERR_NOACK; } @@ -1313,10 +1440,10 @@ static int ssl_server_handshaker_factory_servername_callback(SSL *ssl, int *ap, #if TSI_OPENSSL_ALPN_SUPPORT static int server_handshaker_factory_alpn_callback( - SSL *ssl, const unsigned char **out, unsigned char *outlen, - const unsigned char *in, unsigned int inlen, void *arg) { - tsi_ssl_server_handshaker_factory *factory = - (tsi_ssl_server_handshaker_factory *)arg; + SSL* ssl, const unsigned char** out, unsigned char* outlen, + const unsigned char* in, unsigned int inlen, void* arg) { + tsi_ssl_server_handshaker_factory* factory = + static_cast(arg); return select_protocol_list(out, outlen, in, inlen, factory->alpn_protocol_list, factory->alpn_protocol_list_length); @@ -1324,62 +1451,123 @@ static int server_handshaker_factory_alpn_callback( #endif /* TSI_OPENSSL_ALPN_SUPPORT */ static int server_handshaker_factory_npn_advertised_callback( - SSL *ssl, const unsigned char **out, unsigned int *outlen, void *arg) { - tsi_ssl_server_handshaker_factory *factory = - (tsi_ssl_server_handshaker_factory *)arg; + SSL* ssl, const unsigned char** out, unsigned int* outlen, void* arg) { + tsi_ssl_server_handshaker_factory* factory = + static_cast(arg); *out = factory->alpn_protocol_list; GPR_ASSERT(factory->alpn_protocol_list_length <= UINT_MAX); - *outlen = (unsigned int)factory->alpn_protocol_list_length; + *outlen = static_cast(factory->alpn_protocol_list_length); return SSL_TLSEXT_ERR_OK; } +/// This callback is called when new \a session is established and ready to +/// be cached. This session can be reused for new connections to similar +/// servers at later point of time. +/// It's intended to be used with SSL_CTX_sess_set_new_cb function. +/// +/// It returns 1 if callback takes ownership over \a session and 0 otherwise. +static int server_handshaker_factory_new_session_callback( + SSL* ssl, SSL_SESSION* session) { + SSL_CTX* ssl_context = SSL_get_SSL_CTX(ssl); + if (ssl_context == nullptr) { + return 0; + } + void* arg = SSL_CTX_get_ex_data(ssl_context, g_ssl_ctx_ex_factory_index); + tsi_ssl_client_handshaker_factory* factory = + static_cast(arg); + const char* server_name = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + if (server_name == nullptr) { + return 0; + } + factory->session_cache->Put(server_name, tsi::SslSessionPtr(session)); + // Return 1 to indicate transfered ownership over the given session. + return 1; +} + /* --- tsi_ssl_handshaker_factory constructors. --- */ static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = { tsi_ssl_client_handshaker_factory_destroy}; tsi_result tsi_create_ssl_client_handshaker_factory( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair, - const char *pem_root_certs, const char *cipher_suites, - const char **alpn_protocols, uint16_t num_alpn_protocols, - tsi_ssl_client_handshaker_factory **factory) { - SSL_CTX *ssl_context = NULL; - tsi_ssl_client_handshaker_factory *impl = NULL; + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pair, + const char* pem_root_certs, const char* cipher_suites, + const char** alpn_protocols, uint16_t num_alpn_protocols, + tsi_ssl_client_handshaker_factory** factory) { + tsi_ssl_client_handshaker_options options; + memset(&options, 0, sizeof(options)); + options.pem_key_cert_pair = pem_key_cert_pair; + options.pem_root_certs = pem_root_certs; + options.cipher_suites = cipher_suites; + options.alpn_protocols = alpn_protocols; + options.num_alpn_protocols = num_alpn_protocols; + return tsi_create_ssl_client_handshaker_factory_with_options(&options, + factory); +} + +tsi_result tsi_create_ssl_client_handshaker_factory_with_options( + const tsi_ssl_client_handshaker_options* options, + tsi_ssl_client_handshaker_factory** factory) { + SSL_CTX* ssl_context = nullptr; + tsi_ssl_client_handshaker_factory* impl = nullptr; tsi_result result = TSI_OK; - gpr_once_init(&init_openssl_once, init_openssl); + gpr_once_init(&g_init_openssl_once, init_openssl); - if (factory == NULL) return TSI_INVALID_ARGUMENT; - *factory = NULL; - if (pem_root_certs == NULL) return TSI_INVALID_ARGUMENT; + if (factory == nullptr) return TSI_INVALID_ARGUMENT; + *factory = nullptr; + if (options->pem_root_certs == nullptr && options->root_store == nullptr) { + return TSI_INVALID_ARGUMENT; + } ssl_context = SSL_CTX_new(TLSv1_2_method()); - if (ssl_context == NULL) { + if (ssl_context == nullptr) { gpr_log(GPR_ERROR, "Could not create ssl context."); return TSI_INVALID_ARGUMENT; } - impl = gpr_zalloc(sizeof(*impl)); + impl = static_cast( + gpr_zalloc(sizeof(*impl))); tsi_ssl_handshaker_factory_init(&impl->base); impl->base.vtable = &client_handshaker_factory_vtable; - impl->ssl_context = ssl_context; + if (options->session_cache != nullptr) { + // Unref is called manually on factory destruction. + impl->session_cache = + reinterpret_cast(options->session_cache) + ->Ref(); + SSL_CTX_set_ex_data(ssl_context, g_ssl_ctx_ex_factory_index, impl); + SSL_CTX_sess_set_new_cb(ssl_context, + server_handshaker_factory_new_session_callback); + SSL_CTX_set_session_cache_mode(ssl_context, SSL_SESS_CACHE_CLIENT); + } do { - result = - populate_ssl_context(ssl_context, pem_key_cert_pair, cipher_suites); + result = populate_ssl_context(ssl_context, options->pem_key_cert_pair, + options->cipher_suites); if (result != TSI_OK) break; - result = ssl_ctx_load_verification_certs(ssl_context, pem_root_certs, - strlen(pem_root_certs), NULL); - if (result != TSI_OK) { - gpr_log(GPR_ERROR, "Cannot load server root certificates."); - break; + +#if OPENSSL_VERSION_NUMBER >= 0x10100000 + // X509_STORE_up_ref is only available since OpenSSL 1.1. + if (options->root_store != nullptr) { + X509_STORE_up_ref(options->root_store->store); + SSL_CTX_set_cert_store(ssl_context, options->root_store->store); + } +#endif + if (OPENSSL_VERSION_NUMBER < 0x10100000 || options->root_store == nullptr) { + result = ssl_ctx_load_verification_certs( + ssl_context, options->pem_root_certs, strlen(options->pem_root_certs), + nullptr); + if (result != TSI_OK) { + gpr_log(GPR_ERROR, "Cannot load server root certificates."); + break; + } } - if (num_alpn_protocols != 0) { - result = build_alpn_protocol_name_list(alpn_protocols, num_alpn_protocols, - &impl->alpn_protocol_list, - &impl->alpn_protocol_list_length); + if (options->num_alpn_protocols != 0) { + result = build_alpn_protocol_name_list( + options->alpn_protocols, options->num_alpn_protocols, + &impl->alpn_protocol_list, &impl->alpn_protocol_list_length); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Building alpn list failed with error %s.", tsi_result_to_string(result)); @@ -1389,7 +1577,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory( GPR_ASSERT(impl->alpn_protocol_list_length < UINT_MAX); if (SSL_CTX_set_alpn_protos( ssl_context, impl->alpn_protocol_list, - (unsigned int)impl->alpn_protocol_list_length)) { + static_cast(impl->alpn_protocol_list_length))) { gpr_log(GPR_ERROR, "Could not set alpn protocol list to context."); result = TSI_INVALID_ARGUMENT; break; @@ -1403,7 +1591,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory( tsi_ssl_handshaker_factory_unref(&impl->base); return result; } - SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL); + SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, nullptr); /* TODO(jboeuf): Add revocation verification. */ *factory = impl; @@ -1414,11 +1602,11 @@ static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = { tsi_ssl_server_handshaker_factory_destroy}; tsi_result tsi_create_ssl_server_handshaker_factory( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, const char *pem_client_root_certs, - int force_client_auth, const char *cipher_suites, - const char **alpn_protocols, uint16_t num_alpn_protocols, - tsi_ssl_server_handshaker_factory **factory) { + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, const char* pem_client_root_certs, + int force_client_auth, const char* cipher_suites, + const char** alpn_protocols, uint16_t num_alpn_protocols, + tsi_ssl_server_handshaker_factory** factory) { return tsi_create_ssl_server_handshaker_factory_ex( pem_key_cert_pairs, num_key_cert_pairs, pem_client_root_certs, force_client_auth ? TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY @@ -1427,79 +1615,122 @@ tsi_result tsi_create_ssl_server_handshaker_factory( } tsi_result tsi_create_ssl_server_handshaker_factory_ex( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, const char *pem_client_root_certs, + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, const char* pem_client_root_certs, tsi_client_certificate_request_type client_certificate_request, - const char *cipher_suites, const char **alpn_protocols, - uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory **factory) { - tsi_ssl_server_handshaker_factory *impl = NULL; + const char* cipher_suites, const char** alpn_protocols, + uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory** factory) { + tsi_ssl_server_handshaker_options options; + memset(&options, 0, sizeof(options)); + options.pem_key_cert_pairs = pem_key_cert_pairs; + options.num_key_cert_pairs = num_key_cert_pairs; + options.pem_client_root_certs = pem_client_root_certs; + options.client_certificate_request = client_certificate_request; + options.cipher_suites = cipher_suites; + options.alpn_protocols = alpn_protocols; + options.num_alpn_protocols = num_alpn_protocols; + return tsi_create_ssl_server_handshaker_factory_with_options(&options, + factory); +} + +tsi_result tsi_create_ssl_server_handshaker_factory_with_options( + const tsi_ssl_server_handshaker_options* options, + tsi_ssl_server_handshaker_factory** factory) { + tsi_ssl_server_handshaker_factory* impl = nullptr; tsi_result result = TSI_OK; size_t i = 0; - gpr_once_init(&init_openssl_once, init_openssl); + gpr_once_init(&g_init_openssl_once, init_openssl); - if (factory == NULL) return TSI_INVALID_ARGUMENT; - *factory = NULL; - if (num_key_cert_pairs == 0 || pem_key_cert_pairs == NULL) { + if (factory == nullptr) return TSI_INVALID_ARGUMENT; + *factory = nullptr; + if (options->num_key_cert_pairs == 0 || + options->pem_key_cert_pairs == nullptr) { return TSI_INVALID_ARGUMENT; } - impl = gpr_zalloc(sizeof(*impl)); + impl = static_cast( + gpr_zalloc(sizeof(*impl))); tsi_ssl_handshaker_factory_init(&impl->base); impl->base.vtable = &server_handshaker_factory_vtable; - impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *)); - impl->ssl_context_x509_subject_names = - gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer)); - if (impl->ssl_contexts == NULL || - impl->ssl_context_x509_subject_names == NULL) { + impl->ssl_contexts = static_cast( + gpr_zalloc(options->num_key_cert_pairs * sizeof(SSL_CTX*))); + impl->ssl_context_x509_subject_names = static_cast( + gpr_zalloc(options->num_key_cert_pairs * sizeof(tsi_peer))); + if (impl->ssl_contexts == nullptr || + impl->ssl_context_x509_subject_names == nullptr) { tsi_ssl_handshaker_factory_unref(&impl->base); return TSI_OUT_OF_RESOURCES; } - impl->ssl_context_count = num_key_cert_pairs; + impl->ssl_context_count = options->num_key_cert_pairs; - if (num_alpn_protocols > 0) { - result = build_alpn_protocol_name_list(alpn_protocols, num_alpn_protocols, - &impl->alpn_protocol_list, - &impl->alpn_protocol_list_length); + if (options->num_alpn_protocols > 0) { + result = build_alpn_protocol_name_list( + options->alpn_protocols, options->num_alpn_protocols, + &impl->alpn_protocol_list, &impl->alpn_protocol_list_length); if (result != TSI_OK) { tsi_ssl_handshaker_factory_unref(&impl->base); return result; } } - for (i = 0; i < num_key_cert_pairs; i++) { + for (i = 0; i < options->num_key_cert_pairs; i++) { do { impl->ssl_contexts[i] = SSL_CTX_new(TLSv1_2_method()); - if (impl->ssl_contexts[i] == NULL) { + if (impl->ssl_contexts[i] == nullptr) { gpr_log(GPR_ERROR, "Could not create ssl context."); result = TSI_OUT_OF_RESOURCES; break; } result = populate_ssl_context(impl->ssl_contexts[i], - &pem_key_cert_pairs[i], cipher_suites); + &options->pem_key_cert_pairs[i], + options->cipher_suites); if (result != TSI_OK) break; - if (pem_client_root_certs != NULL) { - STACK_OF(X509_NAME) *root_names = NULL; + // TODO(elessar): Provide ability to disable session ticket keys. + + // Allow client cache sessions (it's needed for OpenSSL only). + int set_sid_ctx_result = SSL_CTX_set_session_id_context( + impl->ssl_contexts[i], kSslSessionIdContext, + GPR_ARRAY_SIZE(kSslSessionIdContext)); + if (set_sid_ctx_result == 0) { + gpr_log(GPR_ERROR, "Failed to set session id context."); + result = TSI_INTERNAL_ERROR; + break; + } + + if (options->session_ticket_key != nullptr) { + if (SSL_CTX_set_tlsext_ticket_keys( + impl->ssl_contexts[i], + const_cast(options->session_ticket_key), + options->session_ticket_key_size) == 0) { + gpr_log(GPR_ERROR, "Invalid STEK size."); + result = TSI_INVALID_ARGUMENT; + break; + } + } + + if (options->pem_client_root_certs != nullptr) { + STACK_OF(X509_NAME)* root_names = nullptr; result = ssl_ctx_load_verification_certs( - impl->ssl_contexts[i], pem_client_root_certs, - strlen(pem_client_root_certs), &root_names); + impl->ssl_contexts[i], options->pem_client_root_certs, + strlen(options->pem_client_root_certs), &root_names); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Invalid verification certs."); break; } SSL_CTX_set_client_CA_list(impl->ssl_contexts[i], root_names); - switch (client_certificate_request) { + switch (options->client_certificate_request) { case TSI_DONT_REQUEST_CLIENT_CERTIFICATE: - SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_NONE, NULL); + SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_NONE, nullptr); break; case TSI_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_PEER, NullVerifyCallback); break; case TSI_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: - SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_PEER, NULL); + SSL_CTX_set_verify(impl->ssl_contexts[i], SSL_VERIFY_PEER, nullptr); break; case TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: SSL_CTX_set_verify( @@ -1510,14 +1741,14 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( case TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: SSL_CTX_set_verify( impl->ssl_contexts[i], - SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL); + SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); break; } /* TODO(jboeuf): Add revocation verification. */ } result = extract_x509_subject_names_from_pem_cert( - pem_key_cert_pairs[i].cert_chain, + options->pem_key_cert_pairs[i].cert_chain, &impl->ssl_context_x509_subject_names[i]); if (result != TSI_OK) break; @@ -1546,16 +1777,16 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( /* --- tsi_ssl utils. --- */ -int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) { +int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name) { size_t i = 0; size_t san_count = 0; - const tsi_peer_property *cn_property = NULL; + const tsi_peer_property* cn_property = nullptr; int like_ip = looks_like_ip_address(name); /* Check the SAN first. */ for (i = 0; i < peer->property_count; i++) { - const tsi_peer_property *property = &peer->properties[i]; - if (property->name == NULL) continue; + const tsi_peer_property* property = &peer->properties[i]; + if (property->name == nullptr) continue; if (strcmp(property->name, TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY) == 0) { san_count++; @@ -1577,7 +1808,7 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) { } /* If there's no SAN, try the CN, but only if its not like an IP Address */ - if (san_count == 0 && cn_property != NULL && !like_ip) { + if (san_count == 0 && cn_property != nullptr && !like_ip) { if (does_entry_match_name(cn_property->value.data, cn_property->value.length, name)) { return 1; @@ -1588,13 +1819,13 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) { } /* --- Testing support. --- */ -const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable( - tsi_ssl_handshaker_factory *factory, - tsi_ssl_handshaker_factory_vtable *new_vtable) { - GPR_ASSERT(factory != NULL); - GPR_ASSERT(factory->vtable != NULL); +const tsi_ssl_handshaker_factory_vtable* tsi_ssl_handshaker_factory_swap_vtable( + tsi_ssl_handshaker_factory* factory, + tsi_ssl_handshaker_factory_vtable* new_vtable) { + GPR_ASSERT(factory != nullptr); + GPR_ASSERT(factory->vtable != nullptr); - const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable; + const tsi_ssl_handshaker_factory_vtable* orig_vtable = factory->vtable; factory->vtable = new_vtable; return orig_vtable; } diff --git a/Sources/CgRPC/src/core/tsi/ssl_transport_security.h b/Sources/CgRPC/src/core/tsi/ssl_transport_security.h index 3abfdf5ed..cabf58309 100644 --- a/Sources/CgRPC/src/core/tsi/ssl_transport_security.h +++ b/Sources/CgRPC/src/core/tsi/ssl_transport_security.h @@ -19,11 +19,9 @@ #ifndef GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H #define GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H -#include "src/core/tsi/transport_security_interface.h" +#include -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/tsi/transport_security_interface.h" /* Value for the TSI_CERTIFICATE_TYPE_PEER_PROPERTY property for X509 certs. */ #define TSI_X509_CERTIFICATE_TYPE "X509" @@ -32,11 +30,41 @@ extern "C" { #define TSI_X509_SUBJECT_COMMON_NAME_PEER_PROPERTY "x509_subject_common_name" #define TSI_X509_SUBJECT_ALTERNATIVE_NAME_PEER_PROPERTY \ "x509_subject_alternative_name" +#define TSI_SSL_SESSION_REUSED_PEER_PROPERTY "ssl_session_reused" #define TSI_X509_PEM_CERT_PROPERTY "x509_pem_cert" #define TSI_SSL_ALPN_SELECTED_PROTOCOL "ssl_alpn_selected_protocol" +/* --- tsi_ssl_root_certs_store object --- + + This object stores SSL root certificates. It can be shared by multiple SSL + context. */ +typedef struct tsi_ssl_root_certs_store tsi_ssl_root_certs_store; + +/* Given a NULL-terminated string containing the PEM encoding of the root + certificates, creates a tsi_ssl_root_certs_store object. */ +tsi_ssl_root_certs_store* tsi_ssl_root_certs_store_create( + const char* pem_roots); + +/* Destroys the tsi_ssl_root_certs_store object. */ +void tsi_ssl_root_certs_store_destroy(tsi_ssl_root_certs_store* self); + +/* --- tsi_ssl_session_cache object --- + + Cache for SSL sessions for sessions resumption. */ + +typedef struct tsi_ssl_session_cache tsi_ssl_session_cache; + +/* Create LRU cache for SSL sessions with \a capacity. */ +tsi_ssl_session_cache* tsi_ssl_session_cache_create_lru(size_t capacity); + +/* Increment reference counter of \a cache. */ +void tsi_ssl_session_cache_ref(tsi_ssl_session_cache* cache); + +/* Decrement reference counter of \a cache. */ +void tsi_ssl_session_cache_unref(tsi_ssl_session_cache* cache); + /* --- tsi_ssl_client_handshaker_factory object --- This object creates a client tsi_handshaker objects implemented in terms of @@ -49,20 +77,20 @@ typedef struct tsi_ssl_client_handshaker_factory typedef struct { /* private_key is the NULL-terminated string containing the PEM encoding of the client's private key. */ - const char *private_key; + const char* private_key; /* cert_chain is the NULL-terminated string containing the PEM encoding of the client's certificate chain. */ - const char *cert_chain; + const char* cert_chain; } tsi_ssl_pem_key_cert_pair; -/* Creates a client handshaker factory. +/* TO BE DEPRECATED. + Creates a client handshaker factory. - pem_key_cert_pair is a pointer to the object containing client's private key and certificate chain. This parameter can be NULL if the client does not have such a key/cert pair. - pem_roots_cert is the NULL-terminated string containing the PEM encoding of - the client root certificates. This parameter may be NULL if the server does - not want the client to be authenticated with SSL. + the server root certificates. - cipher_suites contains an optional list of the ciphers that the client supports. The format of this string is described in: https://www.openssl.org/docs/apps/ciphers.html. @@ -78,10 +106,51 @@ typedef struct { - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case where a parameter is invalid. */ tsi_result tsi_create_ssl_client_handshaker_factory( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair, - const char *pem_root_certs, const char *cipher_suites, - const char **alpn_protocols, uint16_t num_alpn_protocols, - tsi_ssl_client_handshaker_factory **factory); + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pair, + const char* pem_root_certs, const char* cipher_suites, + const char** alpn_protocols, uint16_t num_alpn_protocols, + tsi_ssl_client_handshaker_factory** factory); + +typedef struct { + /* pem_key_cert_pair is a pointer to the object containing client's private + key and certificate chain. This parameter can be NULL if the client does + not have such a key/cert pair. */ + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pair; + /* pem_roots_cert is the NULL-terminated string containing the PEM encoding of + the client root certificates. */ + const char* pem_root_certs; + /* root_store is a pointer to the ssl_root_certs_store object. If root_store + is not nullptr and SSL implementation permits, root_store will be used as + root certificates. Otherwise, pem_roots_cert will be used to load server + root certificates. */ + const tsi_ssl_root_certs_store* root_store; + /* cipher_suites contains an optional list of the ciphers that the client + supports. The format of this string is described in: + https://www.openssl.org/docs/apps/ciphers.html. + This parameter can be set to NULL to use the default set of ciphers. + TODO(jboeuf): Revisit the format of this parameter. */ + const char* cipher_suites; + /* alpn_protocols is an array containing the NULL terminated protocol names + that the handshakers created with this factory support. This parameter can + be NULL. */ + const char** alpn_protocols; + /* num_alpn_protocols is the number of alpn protocols and associated lengths + specified. If this parameter is 0, the other alpn parameters must be + NULL. */ + size_t num_alpn_protocols; + /* ssl_session_cache is a cache for reusable client-side sessions. */ + tsi_ssl_session_cache* session_cache; +} tsi_ssl_client_handshaker_options; + +/* Creates a client handshaker factory. + - options is the options used to create a factory. + - factory is the address of the factory pointer to be created. + + - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case + where a parameter is invalid. */ +tsi_result tsi_create_ssl_client_handshaker_factory_with_options( + const tsi_ssl_client_handshaker_options* options, + tsi_ssl_client_handshaker_factory** factory); /* Creates a client handshaker. - self is the factory from which the handshaker will be created. @@ -93,13 +162,13 @@ tsi_result tsi_create_ssl_client_handshaker_factory( - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case where a parameter is invalid. */ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker( - tsi_ssl_client_handshaker_factory *self, const char *server_name_indication, - tsi_handshaker **handshaker); + tsi_ssl_client_handshaker_factory* self, const char* server_name_indication, + tsi_handshaker** handshaker); /* Decrements reference count of the handshaker factory. Handshaker factory will * be destroyed once no references exist. */ void tsi_ssl_client_handshaker_factory_unref( - tsi_ssl_client_handshaker_factory *factory); + tsi_ssl_client_handshaker_factory* factory); /* --- tsi_ssl_server_handshaker_factory object --- @@ -109,12 +178,14 @@ void tsi_ssl_client_handshaker_factory_unref( typedef struct tsi_ssl_server_handshaker_factory tsi_ssl_server_handshaker_factory; -/* Creates a server handshaker factory. +/* TO BE DEPRECATED. + Creates a server handshaker factory. - pem_key_cert_pairs is an array private key / certificate chains of the server. - num_key_cert_pairs is the number of items in the pem_key_cert_pairs array. - pem_root_certs is the NULL-terminated string containing the PEM encoding - of the server root certificates. + of the client root certificates. This parameter may be NULL if the server + does not want the client to be authenticated with SSL. - cipher_suites contains an optional list of the ciphers that the server supports. The format of this string is described in: https://www.openssl.org/docs/apps/ciphers.html. @@ -130,24 +201,71 @@ typedef struct tsi_ssl_server_handshaker_factory - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case where a parameter is invalid. */ tsi_result tsi_create_ssl_server_handshaker_factory( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, const char *pem_client_root_certs, - int force_client_auth, const char *cipher_suites, - const char **alpn_protocols, uint16_t num_alpn_protocols, - tsi_ssl_server_handshaker_factory **factory); - -/* Same as tsi_create_ssl_server_handshaker_factory method except uses + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, const char* pem_client_root_certs, + int force_client_auth, const char* cipher_suites, + const char** alpn_protocols, uint16_t num_alpn_protocols, + tsi_ssl_server_handshaker_factory** factory); + +/* TO BE DEPRECATED. + Same as tsi_create_ssl_server_handshaker_factory method except uses tsi_client_certificate_request_type to support more ways to handle client certificate authentication. - client_certificate_request, if set to non-zero will force the client to authenticate with an SSL cert. Note that this option is ignored if pem_client_root_certs is NULL or pem_client_roots_certs_size is 0 */ tsi_result tsi_create_ssl_server_handshaker_factory_ex( - const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs, - size_t num_key_cert_pairs, const char *pem_client_root_certs, + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs, + size_t num_key_cert_pairs, const char* pem_client_root_certs, tsi_client_certificate_request_type client_certificate_request, - const char *cipher_suites, const char **alpn_protocols, - uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory **factory); + const char* cipher_suites, const char** alpn_protocols, + uint16_t num_alpn_protocols, tsi_ssl_server_handshaker_factory** factory); + +typedef struct { + /* pem_key_cert_pairs is an array private key / certificate chains of the + server. */ + const tsi_ssl_pem_key_cert_pair* pem_key_cert_pairs; + /* num_key_cert_pairs is the number of items in the pem_key_cert_pairs + array. */ + size_t num_key_cert_pairs; + /* pem_root_certs is the NULL-terminated string containing the PEM encoding + of the server root certificates. This parameter may be NULL if the server + does not want the client to be authenticated with SSL. */ + const char* pem_client_root_certs; + /* client_certificate_request, if set to non-zero will force the client to + authenticate with an SSL cert. Note that this option is ignored if + pem_client_root_certs is NULL or pem_client_roots_certs_size is 0. */ + tsi_client_certificate_request_type client_certificate_request; + /* cipher_suites contains an optional list of the ciphers that the server + supports. The format of this string is described in: + https://www.openssl.org/docs/apps/ciphers.html. + This parameter can be set to NULL to use the default set of ciphers. + TODO(jboeuf): Revisit the format of this parameter. */ + const char* cipher_suites; + /* alpn_protocols is an array containing the NULL terminated protocol names + that the handshakers created with this factory support. This parameter can + be NULL. */ + const char** alpn_protocols; + /* num_alpn_protocols is the number of alpn protocols and associated lengths + specified. If this parameter is 0, the other alpn parameters must be + NULL. */ + uint16_t num_alpn_protocols; + /* session_ticket_key is optional key for encrypting session keys. If paramter + is not specified it must be NULL. */ + const char* session_ticket_key; + /* session_ticket_key_size is a size of session ticket encryption key. */ + size_t session_ticket_key_size; +} tsi_ssl_server_handshaker_options; + +/* Creates a server handshaker factory. + - options is the options used to create a factory. + - factory is the address of the factory pointer to be created. + + - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case + where a parameter is invalid. */ +tsi_result tsi_create_ssl_server_handshaker_factory_with_options( + const tsi_ssl_server_handshaker_options* options, + tsi_ssl_server_handshaker_factory** factory); /* Creates a server handshaker. - self is the factory from which the handshaker will be created. @@ -156,19 +274,19 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( - This method returns TSI_OK on success or TSI_INVALID_PARAMETER in the case where a parameter is invalid. */ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker( - tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker); + tsi_ssl_server_handshaker_factory* self, tsi_handshaker** handshaker); /* Decrements reference count of the handshaker factory. Handshaker factory will * be destroyed once no references exist. */ void tsi_ssl_server_handshaker_factory_unref( - tsi_ssl_server_handshaker_factory *self); + tsi_ssl_server_handshaker_factory* self); /* Util that checks that an ssl peer matches a specific name. Still TODO(jboeuf): - handle mixed case. - handle %encoded chars. - handle public suffix wildchar more strictly (e.g. *.co.uk) */ -int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name); +int tsi_ssl_peer_matches_name(const tsi_peer* peer, const char* name); /* --- Testing support. --- @@ -180,7 +298,7 @@ typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory; /* Function pointer to handshaker_factory destructor. */ typedef void (*tsi_ssl_handshaker_factory_destructor)( - tsi_ssl_handshaker_factory *factory); + tsi_ssl_handshaker_factory* factory); /* Virtual table for tsi_ssl_handshaker_factory. */ typedef struct { @@ -189,12 +307,8 @@ typedef struct { /* Set destructor of handshaker_factory to new_destructor, returns previous destructor. */ -const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable( - tsi_ssl_handshaker_factory *factory, - tsi_ssl_handshaker_factory_vtable *new_vtable); - -#ifdef __cplusplus -} -#endif +const tsi_ssl_handshaker_factory_vtable* tsi_ssl_handshaker_factory_swap_vtable( + tsi_ssl_handshaker_factory* factory, + tsi_ssl_handshaker_factory_vtable* new_vtable); #endif /* GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H */ diff --git a/Sources/CgRPC/src/core/tsi/ssl_types.h b/Sources/CgRPC/src/core/tsi/ssl_types.h index 378864335..b15d02be3 100644 --- a/Sources/CgRPC/src/core/tsi/ssl_types.h +++ b/Sources/CgRPC/src/core/tsi/ssl_types.h @@ -27,6 +27,8 @@ * function */ +#include + #include #ifdef OPENSSL_IS_BORINGSSL diff --git a/Sources/CgRPC/src/core/tsi/transport_security.c b/Sources/CgRPC/src/core/tsi/transport_security.cc similarity index 57% rename from Sources/CgRPC/src/core/tsi/transport_security.c rename to Sources/CgRPC/src/core/tsi/transport_security.cc index 76213072a..129533f77 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security.c +++ b/Sources/CgRPC/src/core/tsi/transport_security.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/tsi/transport_security.h" #include @@ -26,11 +28,11 @@ /* --- Tracing. --- */ -grpc_tracer_flag tsi_tracing_enabled = GRPC_TRACER_INITIALIZER(false, "tsi"); +grpc_core::TraceFlag tsi_tracing_enabled(false, "tsi"); /* --- tsi_result common implementation. --- */ -const char *tsi_result_to_string(tsi_result result) { +const char* tsi_result_to_string(tsi_result result) { switch (result) { case TSI_OK: return "TSI_OK"; @@ -69,52 +71,56 @@ const char *tsi_result_to_string(tsi_result result) { Calls specific implementation after state/input validation. */ -tsi_result tsi_frame_protector_protect(tsi_frame_protector *self, - const unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size) { - if (self == NULL || self->vtable == NULL || unprotected_bytes == NULL || - unprotected_bytes_size == NULL || protected_output_frames == NULL || - protected_output_frames_size == NULL) { +tsi_result tsi_frame_protector_protect(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size) { + if (self == nullptr || self->vtable == nullptr || + unprotected_bytes == nullptr || unprotected_bytes_size == nullptr || + protected_output_frames == nullptr || + protected_output_frames_size == nullptr) { return TSI_INVALID_ARGUMENT; } - if (self->vtable->protect == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->protect == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->protect(self, unprotected_bytes, unprotected_bytes_size, protected_output_frames, protected_output_frames_size); } tsi_result tsi_frame_protector_protect_flush( - tsi_frame_protector *self, unsigned char *protected_output_frames, - size_t *protected_output_frames_size, size_t *still_pending_size) { - if (self == NULL || self->vtable == NULL || protected_output_frames == NULL || - protected_output_frames_size == NULL || still_pending_size == NULL) { + tsi_frame_protector* self, unsigned char* protected_output_frames, + size_t* protected_output_frames_size, size_t* still_pending_size) { + if (self == nullptr || self->vtable == nullptr || + protected_output_frames == nullptr || + protected_output_frames_size == nullptr || + still_pending_size == nullptr) { return TSI_INVALID_ARGUMENT; } - if (self->vtable->protect_flush == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->protect_flush == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->protect_flush(self, protected_output_frames, protected_output_frames_size, still_pending_size); } tsi_result tsi_frame_protector_unprotect( - tsi_frame_protector *self, const unsigned char *protected_frames_bytes, - size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size) { - if (self == NULL || self->vtable == NULL || protected_frames_bytes == NULL || - protected_frames_bytes_size == NULL || unprotected_bytes == NULL || - unprotected_bytes_size == NULL) { + tsi_frame_protector* self, const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size) { + if (self == nullptr || self->vtable == nullptr || + protected_frames_bytes == nullptr || + protected_frames_bytes_size == nullptr || unprotected_bytes == nullptr || + unprotected_bytes_size == nullptr) { return TSI_INVALID_ARGUMENT; } - if (self->vtable->unprotect == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->unprotect == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->unprotect(self, protected_frames_bytes, protected_frames_bytes_size, unprotected_bytes, unprotected_bytes_size); } -void tsi_frame_protector_destroy(tsi_frame_protector *self) { - if (self == NULL) return; +void tsi_frame_protector_destroy(tsi_frame_protector* self) { + if (self == nullptr) return; self->vtable->destroy(self); } @@ -122,39 +128,41 @@ void tsi_frame_protector_destroy(tsi_frame_protector *self) { Calls specific implementation after state/input validation. */ -tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self, - unsigned char *bytes, - size_t *bytes_size) { - if (self == NULL || self->vtable == NULL || bytes == NULL || - bytes_size == NULL) { +tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self, + unsigned char* bytes, + size_t* bytes_size) { + if (self == nullptr || self->vtable == nullptr || bytes == nullptr || + bytes_size == nullptr) { return TSI_INVALID_ARGUMENT; } if (self->frame_protector_created) return TSI_FAILED_PRECONDITION; - if (self->vtable->get_bytes_to_send_to_peer == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->get_bytes_to_send_to_peer == nullptr) + return TSI_UNIMPLEMENTED; return self->vtable->get_bytes_to_send_to_peer(self, bytes, bytes_size); } -tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self, - const unsigned char *bytes, - size_t *bytes_size) { - if (self == NULL || self->vtable == NULL || bytes == NULL || - bytes_size == NULL) { +tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self, + const unsigned char* bytes, + size_t* bytes_size) { + if (self == nullptr || self->vtable == nullptr || bytes == nullptr || + bytes_size == nullptr) { return TSI_INVALID_ARGUMENT; } if (self->frame_protector_created) return TSI_FAILED_PRECONDITION; - if (self->vtable->process_bytes_from_peer == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->process_bytes_from_peer == nullptr) + return TSI_UNIMPLEMENTED; return self->vtable->process_bytes_from_peer(self, bytes, bytes_size); } -tsi_result tsi_handshaker_get_result(tsi_handshaker *self) { - if (self == NULL || self->vtable == NULL) return TSI_INVALID_ARGUMENT; +tsi_result tsi_handshaker_get_result(tsi_handshaker* self) { + if (self == nullptr || self->vtable == nullptr) return TSI_INVALID_ARGUMENT; if (self->frame_protector_created) return TSI_FAILED_PRECONDITION; - if (self->vtable->get_result == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->get_result == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->get_result(self); } -tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) { - if (self == NULL || self->vtable == NULL || peer == NULL) { +tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer) { + if (self == nullptr || self->vtable == nullptr || peer == nullptr) { return TSI_INVALID_ARGUMENT; } memset(peer, 0, sizeof(tsi_peer)); @@ -162,20 +170,20 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer) { if (tsi_handshaker_get_result(self) != TSI_OK) { return TSI_FAILED_PRECONDITION; } - if (self->vtable->extract_peer == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->extract_peer == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->extract_peer(self, peer); } tsi_result tsi_handshaker_create_frame_protector( - tsi_handshaker *self, size_t *max_protected_frame_size, - tsi_frame_protector **protector) { + tsi_handshaker* self, size_t* max_protected_frame_size, + tsi_frame_protector** protector) { tsi_result result; - if (self == NULL || self->vtable == NULL || protector == NULL) { + if (self == nullptr || self->vtable == nullptr || protector == nullptr) { return TSI_INVALID_ARGUMENT; } if (self->frame_protector_created) return TSI_FAILED_PRECONDITION; if (tsi_handshaker_get_result(self) != TSI_OK) return TSI_FAILED_PRECONDITION; - if (self->vtable->create_frame_protector == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->create_frame_protector == nullptr) return TSI_UNIMPLEMENTED; result = self->vtable->create_frame_protector(self, max_protected_frame_size, protector); if (result == TSI_OK) { @@ -185,59 +193,59 @@ tsi_result tsi_handshaker_create_frame_protector( } tsi_result tsi_handshaker_next( - tsi_handshaker *self, const unsigned char *received_bytes, - size_t received_bytes_size, const unsigned char **bytes_to_send, - size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result, - tsi_handshaker_on_next_done_cb cb, void *user_data) { - if (self == NULL || self->vtable == NULL) return TSI_INVALID_ARGUMENT; + tsi_handshaker* self, const unsigned char* received_bytes, + size_t received_bytes_size, const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result, + tsi_handshaker_on_next_done_cb cb, void* user_data) { + if (self == nullptr || self->vtable == nullptr) return TSI_INVALID_ARGUMENT; if (self->handshaker_result_created) return TSI_FAILED_PRECONDITION; - if (self->vtable->next == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->next == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->next(self, received_bytes, received_bytes_size, bytes_to_send, bytes_to_send_size, handshaker_result, cb, user_data); } -void tsi_handshaker_destroy(tsi_handshaker *self) { - if (self == NULL) return; +void tsi_handshaker_destroy(tsi_handshaker* self) { + if (self == nullptr) return; self->vtable->destroy(self); } /* --- tsi_handshaker_result implementation. --- */ -tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result *self, - tsi_peer *peer) { - if (self == NULL || self->vtable == NULL || peer == NULL) { +tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result* self, + tsi_peer* peer) { + if (self == nullptr || self->vtable == nullptr || peer == nullptr) { return TSI_INVALID_ARGUMENT; } memset(peer, 0, sizeof(tsi_peer)); - if (self->vtable->extract_peer == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->extract_peer == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->extract_peer(self, peer); } tsi_result tsi_handshaker_result_create_frame_protector( - const tsi_handshaker_result *self, size_t *max_protected_frame_size, - tsi_frame_protector **protector) { - if (self == NULL || self->vtable == NULL || protector == NULL) { + const tsi_handshaker_result* self, size_t* max_protected_frame_size, + tsi_frame_protector** protector) { + if (self == nullptr || self->vtable == nullptr || protector == nullptr) { return TSI_INVALID_ARGUMENT; } - if (self->vtable->create_frame_protector == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->create_frame_protector == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->create_frame_protector(self, max_protected_frame_size, protector); } tsi_result tsi_handshaker_result_get_unused_bytes( - const tsi_handshaker_result *self, const unsigned char **bytes, - size_t *bytes_size) { - if (self == NULL || self->vtable == NULL || bytes == NULL || - bytes_size == NULL) { + const tsi_handshaker_result* self, const unsigned char** bytes, + size_t* bytes_size) { + if (self == nullptr || self->vtable == nullptr || bytes == nullptr || + bytes_size == nullptr) { return TSI_INVALID_ARGUMENT; } - if (self->vtable->get_unused_bytes == NULL) return TSI_UNIMPLEMENTED; + if (self->vtable->get_unused_bytes == nullptr) return TSI_UNIMPLEMENTED; return self->vtable->get_unused_bytes(self, bytes, bytes_size); } -void tsi_handshaker_result_destroy(tsi_handshaker_result *self) { - if (self == NULL) return; +void tsi_handshaker_result_destroy(tsi_handshaker_result* self) { + if (self == nullptr) return; self->vtable->destroy(self); } @@ -249,7 +257,7 @@ tsi_peer_property tsi_init_peer_property(void) { return property; } -static void tsi_peer_destroy_list_property(tsi_peer_property *children, +static void tsi_peer_destroy_list_property(tsi_peer_property* children, size_t child_count) { size_t i; for (i = 0; i < child_count; i++) { @@ -258,46 +266,46 @@ static void tsi_peer_destroy_list_property(tsi_peer_property *children, gpr_free(children); } -void tsi_peer_property_destruct(tsi_peer_property *property) { - if (property->name != NULL) { +void tsi_peer_property_destruct(tsi_peer_property* property) { + if (property->name != nullptr) { gpr_free(property->name); } - if (property->value.data != NULL) { + if (property->value.data != nullptr) { gpr_free(property->value.data); } *property = tsi_init_peer_property(); /* Reset everything to 0. */ } -void tsi_peer_destruct(tsi_peer *self) { - if (self == NULL) return; - if (self->properties != NULL) { +void tsi_peer_destruct(tsi_peer* self) { + if (self == nullptr) return; + if (self->properties != nullptr) { tsi_peer_destroy_list_property(self->properties, self->property_count); - self->properties = NULL; + self->properties = nullptr; } self->property_count = 0; } tsi_result tsi_construct_allocated_string_peer_property( - const char *name, size_t value_length, tsi_peer_property *property) { + const char* name, size_t value_length, tsi_peer_property* property) { *property = tsi_init_peer_property(); - if (name != NULL) property->name = gpr_strdup(name); + if (name != nullptr) property->name = gpr_strdup(name); if (value_length > 0) { - property->value.data = gpr_zalloc(value_length); + property->value.data = static_cast(gpr_zalloc(value_length)); property->value.length = value_length; } return TSI_OK; } tsi_result tsi_construct_string_peer_property_from_cstring( - const char *name, const char *value, tsi_peer_property *property) { + const char* name, const char* value, tsi_peer_property* property) { return tsi_construct_string_peer_property(name, value, strlen(value), property); } -tsi_result tsi_construct_string_peer_property(const char *name, - const char *value, +tsi_result tsi_construct_string_peer_property(const char* name, + const char* value, size_t value_length, - tsi_peer_property *property) { + tsi_peer_property* property) { tsi_result result = tsi_construct_allocated_string_peer_property( name, value_length, property); if (result != TSI_OK) return result; @@ -307,10 +315,11 @@ tsi_result tsi_construct_string_peer_property(const char *name, return TSI_OK; } -tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer) { +tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer) { memset(peer, 0, sizeof(tsi_peer)); if (property_count > 0) { - peer->properties = gpr_zalloc(property_count * sizeof(tsi_peer_property)); + peer->properties = static_cast( + gpr_zalloc(property_count * sizeof(tsi_peer_property))); peer->property_count = property_count; } return TSI_OK; diff --git a/Sources/CgRPC/src/core/tsi/transport_security.h b/Sources/CgRPC/src/core/tsi/transport_security.h index 3bba38149..b1ec82d3f 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security.h +++ b/Sources/CgRPC/src/core/tsi/transport_security.h @@ -19,66 +19,64 @@ #ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_H #define GRPC_CORE_TSI_TRANSPORT_SECURITY_H +#include + #include #include "src/core/lib/debug/trace.h" #include "src/core/tsi/transport_security_interface.h" -#ifdef __cplusplus -extern "C" { -#endif - -extern grpc_tracer_flag tsi_tracing_enabled; +extern grpc_core::TraceFlag tsi_tracing_enabled; /* Base for tsi_frame_protector implementations. See transport_security_interface.h for documentation. */ typedef struct { - tsi_result (*protect)(tsi_frame_protector *self, - const unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size); - tsi_result (*protect_flush)(tsi_frame_protector *self, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size, - size_t *still_pending_size); - tsi_result (*unprotect)(tsi_frame_protector *self, - const unsigned char *protected_frames_bytes, - size_t *protected_frames_bytes_size, - unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size); - void (*destroy)(tsi_frame_protector *self); + tsi_result (*protect)(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size); + tsi_result (*protect_flush)(tsi_frame_protector* self, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size, + size_t* still_pending_size); + tsi_result (*unprotect)(tsi_frame_protector* self, + const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, + unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size); + void (*destroy)(tsi_frame_protector* self); } tsi_frame_protector_vtable; struct tsi_frame_protector { - const tsi_frame_protector_vtable *vtable; + const tsi_frame_protector_vtable* vtable; }; /* Base for tsi_handshaker implementations. See transport_security_interface.h for documentation. */ typedef struct { - tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker *self, - unsigned char *bytes, - size_t *bytes_size); - tsi_result (*process_bytes_from_peer)(tsi_handshaker *self, - const unsigned char *bytes, - size_t *bytes_size); - tsi_result (*get_result)(tsi_handshaker *self); - tsi_result (*extract_peer)(tsi_handshaker *self, tsi_peer *peer); - tsi_result (*create_frame_protector)(tsi_handshaker *self, - size_t *max_protected_frame_size, - tsi_frame_protector **protector); - void (*destroy)(tsi_handshaker *self); - tsi_result (*next)(tsi_handshaker *self, const unsigned char *received_bytes, + tsi_result (*get_bytes_to_send_to_peer)(tsi_handshaker* self, + unsigned char* bytes, + size_t* bytes_size); + tsi_result (*process_bytes_from_peer)(tsi_handshaker* self, + const unsigned char* bytes, + size_t* bytes_size); + tsi_result (*get_result)(tsi_handshaker* self); + tsi_result (*extract_peer)(tsi_handshaker* self, tsi_peer* peer); + tsi_result (*create_frame_protector)(tsi_handshaker* self, + size_t* max_protected_frame_size, + tsi_frame_protector** protector); + void (*destroy)(tsi_handshaker* self); + tsi_result (*next)(tsi_handshaker* self, const unsigned char* received_bytes, size_t received_bytes_size, - const unsigned char **bytes_to_send, - size_t *bytes_to_send_size, - tsi_handshaker_result **handshaker_result, - tsi_handshaker_on_next_done_cb cb, void *user_data); + const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, + tsi_handshaker_result** handshaker_result, + tsi_handshaker_on_next_done_cb cb, void* user_data); } tsi_handshaker_vtable; struct tsi_handshaker { - const tsi_handshaker_vtable *vtable; + const tsi_handshaker_vtable* vtable; bool frame_protector_created; bool handshaker_result_created; }; @@ -92,42 +90,38 @@ struct tsi_handshaker { needs to compile in other applications, where grpc_exec_ctx is not defined. */ typedef struct { - tsi_result (*extract_peer)(const tsi_handshaker_result *self, tsi_peer *peer); + tsi_result (*extract_peer)(const tsi_handshaker_result* self, tsi_peer* peer); tsi_result (*create_zero_copy_grpc_protector)( - void *exec_ctx, const tsi_handshaker_result *self, - size_t *max_output_protected_frame_size, - tsi_zero_copy_grpc_protector **protector); - tsi_result (*create_frame_protector)(const tsi_handshaker_result *self, - size_t *max_output_protected_frame_size, - tsi_frame_protector **protector); - tsi_result (*get_unused_bytes)(const tsi_handshaker_result *self, - const unsigned char **bytes, - size_t *bytes_size); - void (*destroy)(tsi_handshaker_result *self); + const tsi_handshaker_result* self, + size_t* max_output_protected_frame_size, + tsi_zero_copy_grpc_protector** protector); + tsi_result (*create_frame_protector)(const tsi_handshaker_result* self, + size_t* max_output_protected_frame_size, + tsi_frame_protector** protector); + tsi_result (*get_unused_bytes)(const tsi_handshaker_result* self, + const unsigned char** bytes, + size_t* bytes_size); + void (*destroy)(tsi_handshaker_result* self); } tsi_handshaker_result_vtable; struct tsi_handshaker_result { - const tsi_handshaker_result_vtable *vtable; + const tsi_handshaker_result_vtable* vtable; }; /* Peer and property construction/destruction functions. */ -tsi_result tsi_construct_peer(size_t property_count, tsi_peer *peer); +tsi_result tsi_construct_peer(size_t property_count, tsi_peer* peer); tsi_peer_property tsi_init_peer_property(void); -void tsi_peer_property_destruct(tsi_peer_property *property); -tsi_result tsi_construct_string_peer_property(const char *name, - const char *value, +void tsi_peer_property_destruct(tsi_peer_property* property); +tsi_result tsi_construct_string_peer_property(const char* name, + const char* value, size_t value_length, - tsi_peer_property *property); + tsi_peer_property* property); tsi_result tsi_construct_allocated_string_peer_property( - const char *name, size_t value_length, tsi_peer_property *property); + const char* name, size_t value_length, tsi_peer_property* property); tsi_result tsi_construct_string_peer_property_from_cstring( - const char *name, const char *value, tsi_peer_property *property); + const char* name, const char* value, tsi_peer_property* property); /* Utils. */ -char *tsi_strdup(const char *src); /* Sadly, no strdup in C89. */ - -#ifdef __cplusplus -} -#endif +char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */ #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_H */ diff --git a/Sources/CgRPC/src/core/tsi/transport_security_adapter.c b/Sources/CgRPC/src/core/tsi/transport_security_adapter.cc similarity index 62% rename from Sources/CgRPC/src/core/tsi/transport_security_adapter.c rename to Sources/CgRPC/src/core/tsi/transport_security_adapter.cc index 1c2a57b3b..25608f065 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security_adapter.c +++ b/Sources/CgRPC/src/core/tsi/transport_security_adapter.cc @@ -16,6 +16,8 @@ * */ +#include + #include "src/core/tsi/transport_security_adapter.h" #include @@ -30,36 +32,37 @@ typedef struct { tsi_handshaker_result base; - tsi_handshaker *wrapped; - unsigned char *unused_bytes; + tsi_handshaker* wrapped; + unsigned char* unused_bytes; size_t unused_bytes_size; } tsi_adapter_handshaker_result; -static tsi_result adapter_result_extract_peer(const tsi_handshaker_result *self, - tsi_peer *peer) { - tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self; +static tsi_result adapter_result_extract_peer(const tsi_handshaker_result* self, + tsi_peer* peer) { + tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self; return tsi_handshaker_extract_peer(impl->wrapped, peer); } static tsi_result adapter_result_create_frame_protector( - const tsi_handshaker_result *self, size_t *max_output_protected_frame_size, - tsi_frame_protector **protector) { - tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self; + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector) { + tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self; return tsi_handshaker_create_frame_protector( impl->wrapped, max_output_protected_frame_size, protector); } static tsi_result adapter_result_get_unused_bytes( - const tsi_handshaker_result *self, const unsigned char **bytes, - size_t *byte_size) { - tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self; + const tsi_handshaker_result* self, const unsigned char** bytes, + size_t* byte_size) { + tsi_adapter_handshaker_result* impl = (tsi_adapter_handshaker_result*)self; *bytes = impl->unused_bytes; *byte_size = impl->unused_bytes_size; return TSI_OK; } -static void adapter_result_destroy(tsi_handshaker_result *self) { - tsi_adapter_handshaker_result *impl = (tsi_adapter_handshaker_result *)self; +static void adapter_result_destroy(tsi_handshaker_result* self) { + tsi_adapter_handshaker_result* impl = + reinterpret_cast(self); tsi_handshaker_destroy(impl->wrapped); gpr_free(impl->unused_bytes); gpr_free(self); @@ -67,7 +70,7 @@ static void adapter_result_destroy(tsi_handshaker_result *self) { static const tsi_handshaker_result_vtable result_vtable = { adapter_result_extract_peer, - NULL, /* create_zero_copy_grpc_protector */ + nullptr, /* create_zero_copy_grpc_protector */ adapter_result_create_frame_protector, adapter_result_get_unused_bytes, adapter_result_destroy, @@ -75,20 +78,23 @@ static const tsi_handshaker_result_vtable result_vtable = { /* Ownership of wrapped tsi_handshaker is transferred to the result object. */ static tsi_result tsi_adapter_create_handshaker_result( - tsi_handshaker *wrapped, const unsigned char *unused_bytes, - size_t unused_bytes_size, tsi_handshaker_result **handshaker_result) { - if (wrapped == NULL || (unused_bytes_size > 0 && unused_bytes == NULL)) { + tsi_handshaker* wrapped, const unsigned char* unused_bytes, + size_t unused_bytes_size, tsi_handshaker_result** handshaker_result) { + if (wrapped == nullptr || + (unused_bytes_size > 0 && unused_bytes == nullptr)) { return TSI_INVALID_ARGUMENT; } - tsi_adapter_handshaker_result *impl = gpr_zalloc(sizeof(*impl)); + tsi_adapter_handshaker_result* impl = + static_cast(gpr_zalloc(sizeof(*impl))); impl->base.vtable = &result_vtable; impl->wrapped = wrapped; impl->unused_bytes_size = unused_bytes_size; if (unused_bytes_size > 0) { - impl->unused_bytes = gpr_malloc(unused_bytes_size); + impl->unused_bytes = + static_cast(gpr_malloc(unused_bytes_size)); memcpy(impl->unused_bytes, unused_bytes, unused_bytes_size); } else { - impl->unused_bytes = NULL; + impl->unused_bytes = nullptr; } *handshaker_result = &impl->base; return TSI_OK; @@ -98,63 +104,65 @@ static tsi_result tsi_adapter_create_handshaker_result( typedef struct { tsi_handshaker base; - tsi_handshaker *wrapped; - unsigned char *adapter_buffer; + tsi_handshaker* wrapped; + unsigned char* adapter_buffer; size_t adapter_buffer_size; } tsi_adapter_handshaker; -static tsi_result adapter_get_bytes_to_send_to_peer(tsi_handshaker *self, - unsigned char *bytes, - size_t *bytes_size) { +static tsi_result adapter_get_bytes_to_send_to_peer(tsi_handshaker* self, + unsigned char* bytes, + size_t* bytes_size) { return tsi_handshaker_get_bytes_to_send_to_peer( tsi_adapter_handshaker_get_wrapped(self), bytes, bytes_size); } -static tsi_result adapter_process_bytes_from_peer(tsi_handshaker *self, - const unsigned char *bytes, - size_t *bytes_size) { +static tsi_result adapter_process_bytes_from_peer(tsi_handshaker* self, + const unsigned char* bytes, + size_t* bytes_size) { return tsi_handshaker_process_bytes_from_peer( tsi_adapter_handshaker_get_wrapped(self), bytes, bytes_size); } -static tsi_result adapter_get_result(tsi_handshaker *self) { +static tsi_result adapter_get_result(tsi_handshaker* self) { return tsi_handshaker_get_result(tsi_adapter_handshaker_get_wrapped(self)); } -static tsi_result adapter_extract_peer(tsi_handshaker *self, tsi_peer *peer) { +static tsi_result adapter_extract_peer(tsi_handshaker* self, tsi_peer* peer) { return tsi_handshaker_extract_peer(tsi_adapter_handshaker_get_wrapped(self), peer); } static tsi_result adapter_create_frame_protector( - tsi_handshaker *self, size_t *max_protected_frame_size, - tsi_frame_protector **protector) { + tsi_handshaker* self, size_t* max_protected_frame_size, + tsi_frame_protector** protector) { return tsi_handshaker_create_frame_protector( tsi_adapter_handshaker_get_wrapped(self), max_protected_frame_size, protector); } -static void adapter_destroy(tsi_handshaker *self) { - tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)self; +static void adapter_destroy(tsi_handshaker* self) { + tsi_adapter_handshaker* impl = + reinterpret_cast(self); tsi_handshaker_destroy(impl->wrapped); gpr_free(impl->adapter_buffer); gpr_free(self); } static tsi_result adapter_next( - tsi_handshaker *self, const unsigned char *received_bytes, - size_t received_bytes_size, const unsigned char **bytes_to_send, - size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result, - tsi_handshaker_on_next_done_cb cb, void *user_data) { + tsi_handshaker* self, const unsigned char* received_bytes, + size_t received_bytes_size, const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result, + tsi_handshaker_on_next_done_cb cb, void* user_data) { /* Input sanity check. */ - if ((received_bytes_size > 0 && received_bytes == NULL) || - bytes_to_send == NULL || bytes_to_send_size == NULL || - handshaker_result == NULL) { + if ((received_bytes_size > 0 && received_bytes == nullptr) || + bytes_to_send == nullptr || bytes_to_send_size == nullptr || + handshaker_result == nullptr) { return TSI_INVALID_ARGUMENT; } /* If there are received bytes, process them first. */ - tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)self; + tsi_adapter_handshaker* impl = + reinterpret_cast(self); tsi_result status = TSI_OK; size_t bytes_consumed = received_bytes_size; if (received_bytes_size > 0) { @@ -172,8 +180,8 @@ static tsi_result adapter_next( offset += to_send_size; if (status == TSI_INCOMPLETE_DATA) { impl->adapter_buffer_size *= 2; - impl->adapter_buffer = - gpr_realloc(impl->adapter_buffer, impl->adapter_buffer_size); + impl->adapter_buffer = static_cast( + gpr_realloc(impl->adapter_buffer, impl->adapter_buffer_size)); } } while (status == TSI_INCOMPLETE_DATA); if (status != TSI_OK) return status; @@ -182,16 +190,16 @@ static tsi_result adapter_next( /* If handshake completes, create tsi_handshaker_result. */ if (tsi_handshaker_is_in_progress(impl->wrapped)) { - *handshaker_result = NULL; + *handshaker_result = nullptr; } else { size_t unused_bytes_size = received_bytes_size - bytes_consumed; - const unsigned char *unused_bytes = - unused_bytes_size == 0 ? NULL : received_bytes + bytes_consumed; + const unsigned char* unused_bytes = + unused_bytes_size == 0 ? nullptr : received_bytes + bytes_consumed; status = tsi_adapter_create_handshaker_result( impl->wrapped, unused_bytes, unused_bytes_size, handshaker_result); if (status == TSI_OK) { impl->base.handshaker_result_created = true; - impl->wrapped = NULL; + impl->wrapped = nullptr; } } return status; @@ -207,18 +215,21 @@ static const tsi_handshaker_vtable handshaker_vtable = { adapter_next, }; -tsi_handshaker *tsi_create_adapter_handshaker(tsi_handshaker *wrapped) { - GPR_ASSERT(wrapped != NULL); - tsi_adapter_handshaker *impl = gpr_zalloc(sizeof(*impl)); +tsi_handshaker* tsi_create_adapter_handshaker(tsi_handshaker* wrapped) { + GPR_ASSERT(wrapped != nullptr); + tsi_adapter_handshaker* impl = + static_cast(gpr_zalloc(sizeof(*impl))); impl->base.vtable = &handshaker_vtable; impl->wrapped = wrapped; impl->adapter_buffer_size = TSI_ADAPTER_INITIAL_BUFFER_SIZE; - impl->adapter_buffer = gpr_malloc(impl->adapter_buffer_size); + impl->adapter_buffer = + static_cast(gpr_malloc(impl->adapter_buffer_size)); return &impl->base; } -tsi_handshaker *tsi_adapter_handshaker_get_wrapped(tsi_handshaker *adapter) { - if (adapter == NULL) return NULL; - tsi_adapter_handshaker *impl = (tsi_adapter_handshaker *)adapter; +tsi_handshaker* tsi_adapter_handshaker_get_wrapped(tsi_handshaker* adapter) { + if (adapter == nullptr) return nullptr; + tsi_adapter_handshaker* impl = + reinterpret_cast(adapter); return impl->wrapped; } diff --git a/Sources/CgRPC/src/core/tsi/transport_security_adapter.h b/Sources/CgRPC/src/core/tsi/transport_security_adapter.h index 02f33d4c1..f83ecc53e 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security_adapter.h +++ b/Sources/CgRPC/src/core/tsi/transport_security_adapter.h @@ -19,11 +19,9 @@ #ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_ADAPTER_H #define GRPC_CORE_TSI_TRANSPORT_SECURITY_ADAPTER_H -#include "src/core/tsi/transport_security_interface.h" +#include -#ifdef __cplusplus -extern "C" { -#endif +#include "src/core/tsi/transport_security_interface.h" /* Create a tsi handshaker that takes an implementation of old interface and converts into an implementation of new interface. In the old interface, @@ -33,15 +31,11 @@ extern "C" { this tsi adapter handshaker is temporary. It will be removed once TSI has been fully migrated to the new interface. Ownership of input tsi_handshaker is transferred to this new adapter. */ -tsi_handshaker *tsi_create_adapter_handshaker(tsi_handshaker *wrapped); +tsi_handshaker* tsi_create_adapter_handshaker(tsi_handshaker* wrapped); /* Given a tsi adapter handshaker, return the original wrapped handshaker. The adapter still owns the wrapped handshaker which should not be destroyed by the caller. */ -tsi_handshaker *tsi_adapter_handshaker_get_wrapped(tsi_handshaker *adapter); - -#ifdef __cplusplus -} -#endif +tsi_handshaker* tsi_adapter_handshaker_get_wrapped(tsi_handshaker* adapter); #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_ADAPTER_H */ diff --git a/Sources/CgRPC/src/core/tsi/transport_security_grpc.c b/Sources/CgRPC/src/core/tsi/transport_security_grpc.c deleted file mode 100644 index affd99523..000000000 --- a/Sources/CgRPC/src/core/tsi/transport_security_grpc.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include "src/core/tsi/transport_security_grpc.h" - -/* This method creates a tsi_zero_copy_grpc_protector object. */ -tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( - grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self, - size_t *max_output_protected_frame_size, - tsi_zero_copy_grpc_protector **protector) { - if (exec_ctx == NULL || self == NULL || self->vtable == NULL || - protector == NULL) { - return TSI_INVALID_ARGUMENT; - } - if (self->vtable->create_zero_copy_grpc_protector == NULL) { - return TSI_UNIMPLEMENTED; - } - return self->vtable->create_zero_copy_grpc_protector( - exec_ctx, self, max_output_protected_frame_size, protector); -} - -/* --- tsi_zero_copy_grpc_protector common implementation. --- - - Calls specific implementation after state/input validation. */ - -tsi_result tsi_zero_copy_grpc_protector_protect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *unprotected_slices, - grpc_slice_buffer *protected_slices) { - if (exec_ctx == NULL || self == NULL || self->vtable == NULL || - unprotected_slices == NULL || protected_slices == NULL) { - return TSI_INVALID_ARGUMENT; - } - if (self->vtable->protect == NULL) return TSI_UNIMPLEMENTED; - return self->vtable->protect(exec_ctx, self, unprotected_slices, - protected_slices); -} - -tsi_result tsi_zero_copy_grpc_protector_unprotect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *protected_slices, - grpc_slice_buffer *unprotected_slices) { - if (exec_ctx == NULL || self == NULL || self->vtable == NULL || - protected_slices == NULL || unprotected_slices == NULL) { - return TSI_INVALID_ARGUMENT; - } - if (self->vtable->unprotect == NULL) return TSI_UNIMPLEMENTED; - return self->vtable->unprotect(exec_ctx, self, protected_slices, - unprotected_slices); -} - -void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx, - tsi_zero_copy_grpc_protector *self) { - if (self == NULL) return; - self->vtable->destroy(exec_ctx, self); -} diff --git a/Sources/CgRPC/src/core/tsi/transport_security_grpc.cc b/Sources/CgRPC/src/core/tsi/transport_security_grpc.cc new file mode 100644 index 000000000..c73a6e303 --- /dev/null +++ b/Sources/CgRPC/src/core/tsi/transport_security_grpc.cc @@ -0,0 +1,66 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include "src/core/tsi/transport_security_grpc.h" + +/* This method creates a tsi_zero_copy_grpc_protector object. */ +tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_zero_copy_grpc_protector** protector) { + if (self == nullptr || self->vtable == nullptr || protector == nullptr) { + return TSI_INVALID_ARGUMENT; + } + if (self->vtable->create_zero_copy_grpc_protector == nullptr) { + return TSI_UNIMPLEMENTED; + } + return self->vtable->create_zero_copy_grpc_protector( + self, max_output_protected_frame_size, protector); +} + +/* --- tsi_zero_copy_grpc_protector common implementation. --- + + Calls specific implementation after state/input validation. */ + +tsi_result tsi_zero_copy_grpc_protector_protect( + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices) { + if (self == nullptr || self->vtable == nullptr || + unprotected_slices == nullptr || protected_slices == nullptr) { + return TSI_INVALID_ARGUMENT; + } + if (self->vtable->protect == nullptr) return TSI_UNIMPLEMENTED; + return self->vtable->protect(self, unprotected_slices, protected_slices); +} + +tsi_result tsi_zero_copy_grpc_protector_unprotect( + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices) { + if (self == nullptr || self->vtable == nullptr || + protected_slices == nullptr || unprotected_slices == nullptr) { + return TSI_INVALID_ARGUMENT; + } + if (self->vtable->unprotect == nullptr) return TSI_UNIMPLEMENTED; + return self->vtable->unprotect(self, protected_slices, unprotected_slices); +} + +void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector* self) { + if (self == nullptr) return; + self->vtable->destroy(self); +} diff --git a/Sources/CgRPC/src/core/tsi/transport_security_grpc.h b/Sources/CgRPC/src/core/tsi/transport_security_grpc.h index ca6755c12..d3bb04d07 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security_grpc.h +++ b/Sources/CgRPC/src/core/tsi/transport_security_grpc.h @@ -19,20 +19,17 @@ #ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_GRPC_H #define GRPC_CORE_TSI_TRANSPORT_SECURITY_GRPC_H +#include + #include #include "src/core/tsi/transport_security.h" -#ifdef __cplusplus -extern "C" { -#endif - /* This method creates a tsi_zero_copy_grpc_protector object. It return TSI_OK assuming there is no fatal error. The caller is responsible for destroying the protector. */ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( - grpc_exec_ctx *exec_ctx, const tsi_handshaker_result *self, - size_t *max_output_protected_frame_size, - tsi_zero_copy_grpc_protector **protector); + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_zero_copy_grpc_protector** protector); /* -- tsi_zero_copy_grpc_protector object -- */ @@ -43,8 +40,8 @@ tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector( - This method returns TSI_OK in case of success or a specific error code in case of failure. */ tsi_result tsi_zero_copy_grpc_protector_protect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *unprotected_slices, grpc_slice_buffer *protected_slices); + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices); /* Outputs unprotected bytes. - protected_slices is the bytes of protected frames. @@ -53,32 +50,25 @@ tsi_result tsi_zero_copy_grpc_protector_protect( there is not enough data to output in which case unprotected_slices has 0 bytes. */ tsi_result tsi_zero_copy_grpc_protector_unprotect( - grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *protected_slices, grpc_slice_buffer *unprotected_slices); + tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices); /* Destroys the tsi_zero_copy_grpc_protector object. */ -void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx *exec_ctx, - tsi_zero_copy_grpc_protector *self); +void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector* self); /* Base for tsi_zero_copy_grpc_protector implementations. */ typedef struct { - tsi_result (*protect)(grpc_exec_ctx *exec_ctx, - tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *unprotected_slices, - grpc_slice_buffer *protected_slices); - tsi_result (*unprotect)(grpc_exec_ctx *exec_ctx, - tsi_zero_copy_grpc_protector *self, - grpc_slice_buffer *protected_slices, - grpc_slice_buffer *unprotected_slices); - void (*destroy)(grpc_exec_ctx *exec_ctx, tsi_zero_copy_grpc_protector *self); + tsi_result (*protect)(tsi_zero_copy_grpc_protector* self, + grpc_slice_buffer* unprotected_slices, + grpc_slice_buffer* protected_slices); + tsi_result (*unprotect)(tsi_zero_copy_grpc_protector* self, + grpc_slice_buffer* protected_slices, + grpc_slice_buffer* unprotected_slices); + void (*destroy)(tsi_zero_copy_grpc_protector* self); } tsi_zero_copy_grpc_protector_vtable; struct tsi_zero_copy_grpc_protector { - const tsi_zero_copy_grpc_protector_vtable *vtable; + const tsi_zero_copy_grpc_protector_vtable* vtable; }; -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_GRPC_H */ diff --git a/Sources/CgRPC/src/core/tsi/transport_security_interface.h b/Sources/CgRPC/src/core/tsi/transport_security_interface.h index 80c426bbd..8c1086693 100644 --- a/Sources/CgRPC/src/core/tsi/transport_security_interface.h +++ b/Sources/CgRPC/src/core/tsi/transport_security_interface.h @@ -19,15 +19,13 @@ #ifndef GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H #define GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H +#include + #include #include #include "src/core/lib/debug/trace.h" -#ifdef __cplusplus -extern "C" { -#endif - /* --- tsi result --- */ typedef enum { @@ -56,11 +54,11 @@ typedef enum { TSI_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY, } tsi_client_certificate_request_type; -const char *tsi_result_to_string(tsi_result result); +const char* tsi_result_to_string(tsi_result result); /* --- tsi tracing --- */ -extern grpc_tracer_flag tsi_tracing_enabled; +extern grpc_core::TraceFlag tsi_tracing_enabled; /* -- tsi_zero_copy_grpc_protector object -- @@ -131,11 +129,11 @@ typedef struct tsi_frame_protector tsi_frame_protector; if (result != TSI_OK) HandleError(result); ------------------------------------------------------------------------ */ -tsi_result tsi_frame_protector_protect(tsi_frame_protector *self, - const unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size, - unsigned char *protected_output_frames, - size_t *protected_output_frames_size); +tsi_result tsi_frame_protector_protect(tsi_frame_protector* self, + const unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size, + unsigned char* protected_output_frames, + size_t* protected_output_frames_size); /* Indicates that we need to flush the bytes buffered in the protector and get the resulting frame. @@ -146,8 +144,8 @@ tsi_result tsi_frame_protector_protect(tsi_frame_protector *self, - still_pending_bytes is an output parameter indicating the number of bytes that still need to be flushed from the protector.*/ tsi_result tsi_frame_protector_protect_flush( - tsi_frame_protector *self, unsigned char *protected_output_frames, - size_t *protected_output_frames_size, size_t *still_pending_size); + tsi_frame_protector* self, unsigned char* protected_output_frames, + size_t* protected_output_frames_size, size_t* still_pending_size); /* Outputs unprotected bytes. - protected_frames_bytes is an input only parameter and points to the @@ -172,12 +170,12 @@ tsi_result tsi_frame_protector_protect_flush( needs to be read before new protected data can be processed in which case protected_frames_size will be set to 0. */ tsi_result tsi_frame_protector_unprotect( - tsi_frame_protector *self, const unsigned char *protected_frames_bytes, - size_t *protected_frames_bytes_size, unsigned char *unprotected_bytes, - size_t *unprotected_bytes_size); + tsi_frame_protector* self, const unsigned char* protected_frames_bytes, + size_t* protected_frames_bytes_size, unsigned char* unprotected_bytes, + size_t* unprotected_bytes_size); /* Destroys the tsi_frame_protector object. */ -void tsi_frame_protector_destroy(tsi_frame_protector *self); +void tsi_frame_protector_destroy(tsi_frame_protector* self); /* --- tsi_peer objects --- @@ -189,20 +187,20 @@ void tsi_frame_protector_destroy(tsi_frame_protector *self); /* Property values may contain NULL characters just like C++ strings. The length field gives the length of the string. */ typedef struct tsi_peer_property { - char *name; + char* name; struct { - char *data; + char* data; size_t length; } value; } tsi_peer_property; typedef struct { - tsi_peer_property *properties; + tsi_peer_property* properties; size_t property_count; } tsi_peer; /* Destructs the tsi_peer object. */ -void tsi_peer_destruct(tsi_peer *self); +void tsi_peer_destruct(tsi_peer* self); /* --- tsi_handshaker_result object --- @@ -215,27 +213,27 @@ typedef struct tsi_handshaker_result tsi_handshaker_result; /* This method extracts tsi peer. It returns TSI_OK assuming there is no fatal error. The caller is responsible for destructing the peer. */ -tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result *self, - tsi_peer *peer); +tsi_result tsi_handshaker_result_extract_peer(const tsi_handshaker_result* self, + tsi_peer* peer); /* This method creates a tsi_frame_protector object. It returns TSI_OK assuming there is no fatal error. The caller is responsible for destroying the protector. */ tsi_result tsi_handshaker_result_create_frame_protector( - const tsi_handshaker_result *self, size_t *max_output_protected_frame_size, - tsi_frame_protector **protector); + const tsi_handshaker_result* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector); /* This method returns the unused bytes from the handshake. It returns TSI_OK assuming there is no fatal error. Ownership of the bytes is retained by the handshaker result. As a consequence, the caller must not free the bytes. */ tsi_result tsi_handshaker_result_get_unused_bytes( - const tsi_handshaker_result *self, const unsigned char **bytes, - size_t *byte_size); + const tsi_handshaker_result* self, const unsigned char** bytes, + size_t* byte_size); /* This method releases the tsi_handshaker_handshaker object. After this method is called, no other method can be called on the object. */ -void tsi_handshaker_result_destroy(tsi_handshaker_result *self); +void tsi_handshaker_result_destroy(tsi_handshaker_result* self); /* --- tsi_handshaker objects ---- @@ -346,9 +344,9 @@ typedef struct tsi_handshaker tsi_handshaker; needs to be called again to get all the bytes to send to the peer (there was more data to write than the specified bytes_size). In case of a fatal error in the handshake, another specific error code is returned. */ -tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self, - unsigned char *bytes, - size_t *bytes_size); +tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker* self, + unsigned char* bytes, + size_t* bytes_size); /* TO BE DEPRECATED SOON. Use tsi_handshaker_next instead. Processes bytes received from the peer. @@ -360,9 +358,9 @@ tsi_result tsi_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self, needs to be called again to complete the data needed for processing. In case of a fatal error in the handshake, another specific error code is returned. */ -tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self, - const unsigned char *bytes, - size_t *bytes_size); +tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker* self, + const unsigned char* bytes, + size_t* bytes_size); /* TO BE DEPRECATED SOON. Gets the result of the handshaker. @@ -370,7 +368,7 @@ tsi_result tsi_handshaker_process_bytes_from_peer(tsi_handshaker *self, errors. Returns TSI_HANDSHAKE_IN_PROGRESS if the handshaker is not done yet but no error has been encountered so far. Otherwise the handshaker failed with the returned error. */ -tsi_result tsi_handshaker_get_result(tsi_handshaker *self); +tsi_result tsi_handshaker_get_result(tsi_handshaker* self); /* TO BE DEPRECATED SOON. Returns 1 if the handshake is in progress, 0 otherwise. */ @@ -382,7 +380,7 @@ tsi_result tsi_handshaker_get_result(tsi_handshaker *self); tsi_handshaker_is_in_progress returns 1, it returns TSI_OK otherwise assuming the handshaker is not in a fatal error state. The caller is responsible for destructing the peer. */ -tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer); +tsi_result tsi_handshaker_extract_peer(tsi_handshaker* self, tsi_peer* peer); /* TO BE DEPRECATED SOON. Use tsi_handshaker_result_create_frame_protector instead. @@ -403,8 +401,8 @@ tsi_result tsi_handshaker_extract_peer(tsi_handshaker *self, tsi_peer *peer); the handshaker is not in a fatal error state. The caller is responsible for destroying the protector. */ tsi_result tsi_handshaker_create_frame_protector( - tsi_handshaker *self, size_t *max_output_protected_frame_size, - tsi_frame_protector **protector); + tsi_handshaker* self, size_t* max_output_protected_frame_size, + tsi_frame_protector** protector); /* Callback function definition for tsi_handshaker_next. - status indicates the status of the next operation. @@ -414,8 +412,8 @@ tsi_result tsi_handshaker_create_frame_protector( - handshaker_result is the result of handshake when the handshake completes, is NULL otherwise. */ typedef void (*tsi_handshaker_on_next_done_cb)( - tsi_result status, void *user_data, const unsigned char *bytes_to_send, - size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result); + tsi_result status, void* user_data, const unsigned char* bytes_to_send, + size_t bytes_to_send_size, tsi_handshaker_result* handshaker_result); /* Conduct a next step of the handshake. - received_bytes is the buffer containing the data received from the peer. @@ -437,14 +435,14 @@ typedef void (*tsi_handshaker_on_next_done_cb)( the caller should not free bytes_to_send, as the buffer is owned by the tsi_handshaker object. */ tsi_result tsi_handshaker_next( - tsi_handshaker *self, const unsigned char *received_bytes, - size_t received_bytes_size, const unsigned char **bytes_to_send, - size_t *bytes_to_send_size, tsi_handshaker_result **handshaker_result, - tsi_handshaker_on_next_done_cb cb, void *user_data); + tsi_handshaker* self, const unsigned char* received_bytes, + size_t received_bytes_size, const unsigned char** bytes_to_send, + size_t* bytes_to_send_size, tsi_handshaker_result** handshaker_result, + tsi_handshaker_on_next_done_cb cb, void* user_data); /* This method releases the tsi_handshaker object. After this method is called, no other method can be called on the object. */ -void tsi_handshaker_destroy(tsi_handshaker *self); +void tsi_handshaker_destroy(tsi_handshaker* self); /* This method initializes the necessary shared objects used for tsi implementation. */ @@ -453,8 +451,4 @@ void tsi_init(); /* This method destroys the shared objects created by tsi_init. */ void tsi_destroy(); -#ifdef __cplusplus -} -#endif - #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H */ diff --git a/Sources/CgRPC/third_party/nanopb/pb.h b/Sources/CgRPC/third_party/nanopb/pb.h index 4576f79ab..62dca73f4 100644 --- a/Sources/CgRPC/third_party/nanopb/pb.h +++ b/Sources/CgRPC/third_party/nanopb/pb.h @@ -25,7 +25,7 @@ /* #define PB_FIELD_16BIT 1 */ /* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */ -/* #define PB_FIELD_32BIT 1 */ +/* #define PB_FIELD_32BIT 1 */ /* Disable support for error messages in order to save some code space. */ /* #define PB_NO_ERRMSG 1 */ diff --git a/Sources/SwiftGRPC/Core/Roots.swift b/Sources/SwiftGRPC/Core/Roots.swift index aa395f0a8..6c0adff18 100644 --- a/Sources/SwiftGRPC/Core/Roots.swift +++ b/Sources/SwiftGRPC/Core/Roots.swift @@ -37,11 +37,12 @@ import Foundation func roots_pem() -> String? { - let roots = - "# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
# Label: "GlobalSign Root CA"
# Serial: 4835703278459707669005204
# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
-----BEGIN CERTIFICATE-----
MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
# Label: "GlobalSign Root CA - R2"
# Serial: 4835703278459682885658125
# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
-----BEGIN CERTIFICATE-----
MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
# Serial: 206684696279472310254277870180966723415
# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
-----BEGIN CERTIFICATE-----
MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
-----END CERTIFICATE-----

# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Label: "Entrust.net Premium 2048 Secure Server CA"
# Serial: 946069240
# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
fF6adulZkMV8gzURZVE=
-----END CERTIFICATE-----

# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
# Label: "Baltimore CyberTrust Root"
# Serial: 33554617
# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
-----END CERTIFICATE-----

# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
# Label: "AddTrust External Root"
# Serial: 1
# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
-----BEGIN CERTIFICATE-----
MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Label: "Entrust Root Certification Authority"
# Serial: 1164660820
# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
-----BEGIN CERTIFICATE-----
MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
0vdXcDazv/wor3ElhVsT/h5/WrQ8
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
# Label: "GeoTrust Global CA"
# Serial: 144470
# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
-----BEGIN CERTIFICATE-----
MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
# Label: "GeoTrust Universal CA"
# Serial: 1
# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
-----BEGIN CERTIFICATE-----
MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
# Label: "GeoTrust Universal CA 2"
# Serial: 1
# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
-----BEGIN CERTIFICATE-----
MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
-----END CERTIFICATE-----

# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
# Label: "Visa eCommerce Root"
# Serial: 25952180776285836048024890241505565794
# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
-----BEGIN CERTIFICATE-----
MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
398znM/jra6O1I7mT1GvFpLgXPYHDw==
-----END CERTIFICATE-----

# Issuer: CN=Certum CA O=Unizeto Sp. z o.o.
# Subject: CN=Certum CA O=Unizeto Sp. z o.o.
# Label: "Certum Root CA"
# Serial: 65568
# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9
# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18
# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24
-----BEGIN CERTIFICATE-----
MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM
MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM
MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD
QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E
jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo
ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI
ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu
Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg
AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7
HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA
uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa
TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg
xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q
CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x
O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs
6GAqm4VKQPNriiTsBhYscw==
-----END CERTIFICATE-----

# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
# Subject: CN=AAA Certificate Services O=Comodo CA Limited
# Label: "Comodo AAA Services root"
# Serial: 1
# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
-----BEGIN CERTIFICATE-----
MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
# Label: "QuoVadis Root CA"
# Serial: 985026699
# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
-----BEGIN CERTIFICATE-----
MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
SnQ2+Q==
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Label: "QuoVadis Root CA 2"
# Serial: 1289
# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
-----BEGIN CERTIFICATE-----
MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
+ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 3"
# Serial: 1478
# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
-----BEGIN CERTIFICATE-----
MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
+LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
4SVhM7JZG+Ju1zdXtg2pEto=
-----END CERTIFICATE-----

# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
# Label: "Security Communication Root CA"
# Serial: 0
# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
-----BEGIN CERTIFICATE-----
MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
-----END CERTIFICATE-----

# Issuer: CN=Sonera Class2 CA O=Sonera
# Subject: CN=Sonera Class2 CA O=Sonera
# Label: "Sonera Class 2 Root CA"
# Serial: 29
# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
-----BEGIN CERTIFICATE-----
MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
-----END CERTIFICATE-----

# Issuer: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
# Subject: CN=Chambers of Commerce Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
# Label: "Camerfirma Chambers of Commerce Root"
# Serial: 0
# MD5 Fingerprint: b0:01:ee:14:d9:af:29:18:94:76:8e:f1:69:33:2a:84
# SHA1 Fingerprint: 6e:3a:55:a4:19:0c:19:5c:93:84:3c:c0:db:72:2e:31:30:61:f0:b1
# SHA256 Fingerprint: 0c:25:8a:12:a5:67:4a:ef:25:f2:8b:a7:dc:fa:ec:ee:a3:48:e5:41:e6:f5:cc:4e:e6:3b:71:b3:61:60:6a:c3
-----BEGIN CERTIFICATE-----
MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn
MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg
b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa
MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB
ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw
IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B
AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb
unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d
BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq
7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3
0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX
roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG
A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j
aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p
26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA
BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud
EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN
BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz
aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB
AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd
p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi
1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc
XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0
eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu
tGWaIZDgqtCYvDi1czyL+Nw=
-----END CERTIFICATE-----

# Issuer: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
# Subject: CN=Global Chambersign Root O=AC Camerfirma SA CIF A82743287 OU=http://www.chambersign.org
# Label: "Camerfirma Global Chambersign Root"
# Serial: 0
# MD5 Fingerprint: c5:e6:7b:bf:06:d0:4f:43:ed:c4:7a:65:8a:fb:6b:19
# SHA1 Fingerprint: 33:9b:6b:14:50:24:9b:55:7a:01:87:72:84:d9:e0:2f:c3:d2:d8:e9
# SHA256 Fingerprint: ef:3c:b4:17:fc:8e:bf:6f:97:87:6c:9e:4e:ce:39:de:1e:a5:fe:64:91:41:d1:02:8b:7d:11:c0:b2:29:8c:ed
-----BEGIN CERTIFICATE-----
MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn
MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL
ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo
YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9
MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy
NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G
A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA
A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0
Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s
QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV
eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795
B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh
z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T
AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i
ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w
TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH
MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD
VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE
VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh
bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B
AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM
bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi
ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG
VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c
ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/
AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A==
-----END CERTIFICATE-----

# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Label: "XRamp Global CA Root"
# Serial: 107108908803651509692980124233745014957
# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
-----BEGIN CERTIFICATE-----
MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
O+7ETPTsJ3xCwnR8gooJybQDJbw=
-----END CERTIFICATE-----

# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
# Label: "Go Daddy Class 2 CA"
# Serial: 0
# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
-----BEGIN CERTIFICATE-----
MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
ReYNnyicsbkqWletNw+vHX/bvZ8=
-----END CERTIFICATE-----

# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
# Label: "Starfield Class 2 CA"
# Serial: 0
# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
-----BEGIN CERTIFICATE-----
MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
+lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
-----END CERTIFICATE-----

# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
# Label: "StartCom Certification Authority"
# Serial: 1
# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16
# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f
# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea
-----BEGIN CERTIFICATE-----
MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
-----END CERTIFICATE-----

# Issuer: O=Government Root Certification Authority
# Subject: O=Government Root Certification Authority
# Label: "Taiwan GRCA"
# Serial: 42023070807708724159991140556527066870
# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
-----BEGIN CERTIFICATE-----
MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
pYYsfPQS
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root CA"
# Serial: 17154717934120587862167794914071425081
# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
-----BEGIN CERTIFICATE-----
MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root CA"
# Serial: 10944719598952040374951832963794454346
# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
-----BEGIN CERTIFICATE-----
MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
-----END CERTIFICATE-----

# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert High Assurance EV Root CA"
# Serial: 3553400076410547919724730734378100087
# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
+OkuE6N36B9K
-----END CERTIFICATE-----

# Issuer: CN=Class 2 Primary CA O=Certplus
# Subject: CN=Class 2 Primary CA O=Certplus
# Label: "Certplus Class 2 Primary CA"
# Serial: 177770208045934040241468760488327595043
# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
-----BEGIN CERTIFICATE-----
MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
l7+ijrRU
-----END CERTIFICATE-----

# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Label: "DST Root CA X3"
# Serial: 91299735575339953335919266965803778155
# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
-----END CERTIFICATE-----

# Issuer: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES
# Subject: CN=DST ACES CA X6 O=Digital Signature Trust OU=DST ACES
# Label: "DST ACES CA X6"
# Serial: 17771143917277623872238992636097467865
# MD5 Fingerprint: 21:d8:4c:82:2b:99:09:33:a2:eb:14:24:8d:8e:5f:e8
# SHA1 Fingerprint: 40:54:da:6f:1c:3f:40:74:ac:ed:0f:ec:cd:db:79:d1:53:fb:90:1d
# SHA256 Fingerprint: 76:7c:95:5a:76:41:2c:89:af:68:8e:90:a1:c7:0f:55:6c:fd:6b:60:25:db:ea:10:41:6d:7e:b6:83:1f:8c:40
-----BEGIN CERTIFICATE-----
MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx
ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w
MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD
VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx
FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu
ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7
gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH
fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a
ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT
ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF
MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk
c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto
dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt
aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI
hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk
QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/
h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq
nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR
rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2
9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis=
-----END CERTIFICATE-----

# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
# Label: "SwissSign Gold CA - G2"
# Serial: 13492815561806991280
# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
-----BEGIN CERTIFICATE-----
MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
-----END CERTIFICATE-----

# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
# Label: "SwissSign Silver CA - G2"
# Serial: 5700383053117599563
# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
-----BEGIN CERTIFICATE-----
MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
# Label: "GeoTrust Primary Certification Authority"
# Serial: 32798226551256963324313806436981982369
# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
-----BEGIN CERTIFICATE-----
MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA"
# Serial: 69529181992039203566298953787712940909
# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
-----BEGIN CERTIFICATE-----
MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
jVaMaA==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
# Serial: 33037644167568058970164719475676101450
# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
-----BEGIN CERTIFICATE-----
MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
-----END CERTIFICATE-----

# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
# Subject: CN=SecureTrust CA O=SecureTrust Corporation
# Label: "SecureTrust CA"
# Serial: 17199774589125277788362757014266862032
# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
-----BEGIN CERTIFICATE-----
MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
-----END CERTIFICATE-----

# Issuer: CN=Secure Global CA O=SecureTrust Corporation
# Subject: CN=Secure Global CA O=SecureTrust Corporation
# Label: "Secure Global CA"
# Serial: 9751836167731051554232119481456978597
# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
-----BEGIN CERTIFICATE-----
MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
-----END CERTIFICATE-----

# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
# Label: "COMODO Certification Authority"
# Serial: 104350513648249232941998508985834464573
# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
-----BEGIN CERTIFICATE-----
MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
ZQ==
-----END CERTIFICATE-----

# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
# Label: "Network Solutions Certificate Authority"
# Serial: 116697915152937497490437556386812487904
# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
-----BEGIN CERTIFICATE-----
MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
-----END CERTIFICATE-----

# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
# Label: "COMODO ECC Certification Authority"
# Serial: 41578283867086692638256921589707938090
# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
-----BEGIN CERTIFICATE-----
MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
-----END CERTIFICATE-----

# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1
# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication EV RootCA1
# Label: "Security Communication EV RootCA1"
# Serial: 0
# MD5 Fingerprint: 22:2d:a6:01:ea:7c:0a:f7:f0:6c:56:43:3f:77:76:d3
# SHA1 Fingerprint: fe:b8:c4:32:dc:f9:76:9a:ce:ae:3d:d8:90:8f:fd:28:86:65:64:7d
# SHA256 Fingerprint: a2:2d:ba:68:1e:97:37:6e:2d:39:7d:72:8a:ae:3a:9b:62:96:b9:fd:ba:60:bc:2e:11:f6:47:f2:c6:75:fb:37
-----BEGIN CERTIFICATE-----
MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl
MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh
U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz
MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N
IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11
bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE
RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO
zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5
bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF
MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1
VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC
OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW
tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ
q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb
EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+
Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O
VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490
-----END CERTIFICATE-----

# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
# Label: "OISTE WISeKey Global Root GA CA"
# Serial: 86718877871133159090080555911823548314
# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
-----BEGIN CERTIFICATE-----
MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
/L7fCg0=
-----END CERTIFICATE-----

# Issuer: CN=Certigna O=Dhimyotis
# Subject: CN=Certigna O=Dhimyotis
# Label: "Certigna"
# Serial: 18364802974209362175
# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
-----BEGIN CERTIFICATE-----
MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
-----END CERTIFICATE-----

# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Label: "Deutsche Telekom Root CA 2"
# Serial: 38
# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
-----BEGIN CERTIFICATE-----
MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
Cm26OWMohpLzGITY+9HPBVZkVw==
-----END CERTIFICATE-----

# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
# Label: "Cybertrust Global Root"
# Serial: 4835703278459682877484360
# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
-----BEGIN CERTIFICATE-----
MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
WL1WMRJOEcgh4LMRkWXbtKaIOM5V
-----END CERTIFICATE-----

# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
# Label: "ePKI Root Certification Authority"
# Serial: 28956088682735189655030529057352760477
# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
-----BEGIN CERTIFICATE-----
MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
-----END CERTIFICATE-----

# Issuer: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi
# Subject: CN=TÜBİTAK UEKAE Kök Sertifika Hizmet Sağlayıcısı - Sürüm 3 O=Türkiye Bilimsel ve Teknolojik Araştırma Kurumu - TÜBİTAK OU=Ulusal Elektronik ve Kriptoloji Araştırma Enstitüsü - UEKAE/Kamu Sertifikasyon Merkezi
# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3"
# Serial: 17
# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26
# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96
# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a
-----BEGIN CERTIFICATE-----
MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS
MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp
bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw
VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy
YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy
dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2
ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe
Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx
GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls
aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU
QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh
xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0
aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr
IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h
gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK
O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO
fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw
lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL
hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID
AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP
NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t
wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM
7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh
gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n
oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs
yZyQ2uypQjyttgI=
-----END CERTIFICATE-----

# Issuer: O=certSIGN OU=certSIGN ROOT CA
# Subject: O=certSIGN OU=certSIGN ROOT CA
# Label: "certSIGN ROOT CA"
# Serial: 35210227249154
# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
-----BEGIN CERTIFICATE-----
MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
9u6wWk5JRFRYX0KD
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
# Label: "GeoTrust Primary Certification Authority - G3"
# Serial: 28809105769928564313984085209975885599
# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
-----BEGIN CERTIFICATE-----
MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
spki4cErx5z481+oghLrGREt
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA - G2"
# Serial: 71758320672825410020661621085256472406
# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
-----BEGIN CERTIFICATE-----
MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA - G3"
# Serial: 127614157056681299805556476275995414779
# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
MdRAGmI0Nj81Aa6sY6A=
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
# Label: "GeoTrust Primary Certification Authority - G2"
# Serial: 80682863203381065782177908751794619243
# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
-----BEGIN CERTIFICATE-----
MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
rD6ogRLQy7rQkgu2npaqBA+K
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Universal Root Certification Authority"
# Serial: 85209574734084581917763752644031726877
# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
-----BEGIN CERTIFICATE-----
MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
7M2CYfE45k+XmCpajQ==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
# Serial: 63143484348153506665311985501458640051
# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
-----BEGIN CERTIFICATE-----
MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
-----END CERTIFICATE-----

# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
# Label: "NetLock Arany (Class Gold) Főtanúsítvány"
# Serial: 80544274841616
# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
-----BEGIN CERTIFICATE-----
MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
# Label: "Staat der Nederlanden Root CA - G2"
# Serial: 10000012
# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
-----BEGIN CERTIFICATE-----
MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
-----END CERTIFICATE-----

# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Label: "Hongkong Post Root CA 1"
# Serial: 1000
# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
-----BEGIN CERTIFICATE-----
MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
AmvZWg==
-----END CERTIFICATE-----

# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
# Label: "SecureSign RootCA11"
# Serial: 1
# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
-----BEGIN CERTIFICATE-----
MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
QSdJQO7e5iNEOdyhIta6A/I=
-----END CERTIFICATE-----

# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI
# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI
# Label: "ACEDICOM Root"
# Serial: 7029493972724711941
# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6
# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84
# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a
-----BEGIN CERTIFICATE-----
MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE
AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x
CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW
MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF
RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7
09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7
XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P
Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK
t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb
X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28
MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU
fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI
2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH
K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae
ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP
BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ
MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw
RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv
bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm
fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3
gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe
I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i
5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi
ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn
MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ
o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6
zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN
GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt
r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK
Z05phkOTOPu220+DkdRgfks+KzgHVZhepA==
-----END CERTIFICATE-----

# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
# Label: "Microsec e-Szigno Root CA 2009"
# Serial: 14014712776195784473
# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
-----BEGIN CERTIFICATE-----
MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
+rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
# Label: "GlobalSign Root CA - R3"
# Serial: 4835703278459759426209954
# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
-----BEGIN CERTIFICATE-----
MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
WD9f
-----END CERTIFICATE-----

# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
# Serial: 6047274297262753887
# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
-----BEGIN CERTIFICATE-----
MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
-----END CERTIFICATE-----

# Issuer: CN=Izenpe.com O=IZENPE S.A.
# Subject: CN=Izenpe.com O=IZENPE S.A.
# Label: "Izenpe.com"
# Serial: 917563065490389241595536686991402621
# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
-----BEGIN CERTIFICATE-----
MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
-----END CERTIFICATE-----

# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
# Label: "Chambers of Commerce Root - 2008"
# Serial: 11806822484801597146
# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
-----BEGIN CERTIFICATE-----
MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
d0jQ
-----END CERTIFICATE-----

# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
# Label: "Global Chambersign Root - 2008"
# Serial: 14541511773111788494
# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
-----BEGIN CERTIFICATE-----
MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
-----END CERTIFICATE-----

# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Label: "Go Daddy Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
4uJEvlz36hz1
-----END CERTIFICATE-----

# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Label: "Starfield Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
-----BEGIN CERTIFICATE-----
MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
-----END CERTIFICATE-----

# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Label: "Starfield Services Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
-----BEGIN CERTIFICATE-----
MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
sSi6
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
# Subject: CN=AffirmTrust Commercial O=AffirmTrust
# Label: "AffirmTrust Commercial"
# Serial: 8608355977964138876
# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
-----BEGIN CERTIFICATE-----
MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Networking O=AffirmTrust
# Subject: CN=AffirmTrust Networking O=AffirmTrust
# Label: "AffirmTrust Networking"
# Serial: 8957382827206547757
# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
-----BEGIN CERTIFICATE-----
MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Premium O=AffirmTrust
# Subject: CN=AffirmTrust Premium O=AffirmTrust
# Label: "AffirmTrust Premium"
# Serial: 7893706540734352110
# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
-----BEGIN CERTIFICATE-----
MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
+jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
KeC2uAloGRwYQw==
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
# Label: "AffirmTrust Premium ECC"
# Serial: 8401224907861490260
# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
-----BEGIN CERTIFICATE-----
MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
-----END CERTIFICATE-----

# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Label: "Certum Trusted Network CA"
# Serial: 279744
# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
-----BEGIN CERTIFICATE-----
MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
-----END CERTIFICATE-----

# Issuer: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903
# Subject: CN=Certinomis - Autorité Racine O=Certinomis OU=0002 433998903
# Label: "Certinomis - Autorité Racine"
# Serial: 1
# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a
# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3
# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17
-----BEGIN CERTIFICATE-----
MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET
MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk
BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4
Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl
cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0
aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY
F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N
8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe
rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K
/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu
7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC
28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6
lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E
nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB
0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09
5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj
WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN
jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ
KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s
ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM
OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q
619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn
2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj
o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v
nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG
5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq
pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb
dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0
BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5
-----END CERTIFICATE-----

# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
# Label: "TWCA Root Certification Authority"
# Serial: 1
# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
-----BEGIN CERTIFICATE-----
MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
-----END CERTIFICATE-----

# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
# Label: "Security Communication RootCA2"
# Serial: 0
# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
-----END CERTIFICATE-----

# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
# Label: "EC-ACC"
# Serial: -23701579247955709139626555126524820479
# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09
# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8
# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99
-----BEGIN CERTIFICATE-----
MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB
8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy
dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1
YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3
dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh
IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD
LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG
EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g
KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD
ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu
bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg
ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R
85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm
4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV
HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd
QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t
lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB
o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4
opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo
dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW
ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN
AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y
/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k
SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy
Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS
Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl
nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI=
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions RootCA 2011"
# Serial: 0
# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
-----BEGIN CERTIFICATE-----
MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
l7WdmplNsDz4SgCbZN2fOUvRJ9e4
-----END CERTIFICATE-----

# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
# Label: "Actalis Authentication Root CA"
# Serial: 6271844772424770508
# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
-----BEGIN CERTIFICATE-----
MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
-----END CERTIFICATE-----

# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
# Subject: O=Trustis Limited OU=Trustis FPS Root CA
# Label: "Trustis FPS Root CA"
# Serial: 36053640375399034304724988975563710553
# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
-----BEGIN CERTIFICATE-----
MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
ZetX2fNXlrtIzYE=
-----END CERTIFICATE-----

# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing
# Label: "StartCom Certification Authority"
# Serial: 45
# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16
# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0
# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11
-----BEGIN CERTIFICATE-----
MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul
F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC
ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w
ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk
aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0
YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg
c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0
aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93
d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG
CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1
dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF
wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS
Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst
0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc
pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl
CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF
P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK
1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm
KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ
8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm
fyWl8kgAwKQB2j8=
-----END CERTIFICATE-----

# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd.
# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd.
# Label: "StartCom Certification Authority G2"
# Serial: 59
# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64
# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17
# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95
-----BEGIN CERTIFICATE-----
MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm
aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1
OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG
A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G
CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ
JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD
vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo
D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/
Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW
RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK
HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN
nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM
0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i
UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9
Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg
TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL
BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX
UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl
6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK
9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ
HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI
wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY
XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l
IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo
hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr
so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI
-----END CERTIFICATE-----

# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Label: "Buypass Class 2 Root CA"
# Serial: 2
# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
-----BEGIN CERTIFICATE-----
MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
+fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
-----END CERTIFICATE-----

# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
# Label: "Buypass Class 3 Root CA"
# Serial: 2
# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
-----BEGIN CERTIFICATE-----
MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
-----END CERTIFICATE-----

# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Label: "T-TeleSec GlobalRoot Class 3"
# Serial: 1
# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
-----BEGIN CERTIFICATE-----
MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
TpPDpFQUWw==
-----END CERTIFICATE-----

# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
# Label: "EE Certification Centre Root CA"
# Serial: 112324828676200291871926431888494945866
# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
-----BEGIN CERTIFICATE-----
MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
-----END CERTIFICATE-----

# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007
# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Aralık 2007
# Label: "TURKTRUST Certificate Services Provider Root 2007"
# Serial: 1
# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72
# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33
# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50
-----BEGIN CERTIFICATE-----
MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc
UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS
S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx
OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry
b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC
VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE
sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F
ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY
KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG
+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG
HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P
IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M
733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk
Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G
CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW
AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I
aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5
mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa
XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ
qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9
-----END CERTIFICATE-----

# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Label: "D-TRUST Root Class 3 CA 2 2009"
# Serial: 623603
# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
-----BEGIN CERTIFICATE-----
MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
Johw1+qRzT65ysCQblrGXnRl11z+o+I=
-----END CERTIFICATE-----

# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
# Serial: 623604
# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
-----BEGIN CERTIFICATE-----
MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
-----END CERTIFICATE-----

# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica
# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT
# Label: "PSCProcert"
# Serial: 11
# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec
# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74
# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0
-----BEGIN CERTIFICATE-----
MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1
dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s
YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz
dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0
aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh
IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ
KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw
MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy
b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx
KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG
A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u
aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI
hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9
7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74
BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G
ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9
JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0
PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2
0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH
0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/
6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m
v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7
K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev
bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw
MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w
MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD
gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0
b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh
bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0
cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp
ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg
ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq
hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD
AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w
MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag
RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t
UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl
cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v
Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG
AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN
AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS
1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB
3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv
Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh
HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm
pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz
sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE
qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb
mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9
opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H
YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km
-----END CERTIFICATE-----

# Issuer: CN=CA Disig Root R1 O=Disig a.s.
# Subject: CN=CA Disig Root R1 O=Disig a.s.
# Label: "CA Disig Root R1"
# Serial: 14052245610670616104
# MD5 Fingerprint: be:ec:11:93:9a:f5:69:21:bc:d7:c1:c0:67:89:cc:2a
# SHA1 Fingerprint: 8e:1c:74:f8:a6:20:b9:e5:8a:f4:61:fa:ec:2b:47:56:51:1a:52:c6
# SHA256 Fingerprint: f9:6f:23:f4:c3:e7:9c:07:7a:46:98:8d:5a:f5:90:06:76:a0:f0:39:cb:64:5d:d1:75:49:b2:16:c8:24:40:ce
-----BEGIN CERTIFICATE-----
MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV
BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy
MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk
D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o
OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A
fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe
IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n
oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK
/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj
rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD
3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE
7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC
yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd
qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI
hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR
xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA
SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo
HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB
emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC
AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb
7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x
DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk
F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF
a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT
Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL
-----END CERTIFICATE-----

# Issuer: CN=CA Disig Root R2 O=Disig a.s.
# Subject: CN=CA Disig Root R2 O=Disig a.s.
# Label: "CA Disig Root R2"
# Serial: 10572350602393338211
# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
-----BEGIN CERTIFICATE-----
MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
-----END CERTIFICATE-----

# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
# Label: "ACCVRAIZ1"
# Serial: 6828503384748696800
# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
-----BEGIN CERTIFICATE-----
MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
-----END CERTIFICATE-----

# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
# Label: "TWCA Global Root CA"
# Serial: 3262
# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
-----BEGIN CERTIFICATE-----
MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
KwbQBM0=
-----END CERTIFICATE-----

# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
# Label: "TeliaSonera Root CA v1"
# Serial: 199041966741090107964904287217786801558
# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
-----BEGIN CERTIFICATE-----
MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
-----END CERTIFICATE-----

# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
# Label: "E-Tugra Certification Authority"
# Serial: 7667447206703254355
# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
-----BEGIN CERTIFICATE-----
MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
+GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
-----END CERTIFICATE-----

# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Label: "T-TeleSec GlobalRoot Class 2"
# Serial: 1
# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
-----BEGIN CERTIFICATE-----
MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
BSeOE6Fuwg==
-----END CERTIFICATE-----

# Issuer: CN=Atos TrustedRoot 2011 O=Atos
# Subject: CN=Atos TrustedRoot 2011 O=Atos
# Label: "Atos TrustedRoot 2011"
# Serial: 6643877497813316402
# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 1 G3"
# Serial: 687049649626669250736271037606554624078720034195
# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 2 G3"
# Serial: 390156079458959257446133169266079962026824725800
# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 3 G3"
# Serial: 268090761170461462463995952157327242137089239581
# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root G2"
# Serial: 15385348160840213938643033620894905419
# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
-----BEGIN CERTIFICATE-----
MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
IhNzbM8m9Yop5w==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root G3"
# Serial: 15459312981008553731928384953135426796
# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
-----BEGIN CERTIFICATE-----
MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
6pZjamVFkpUBtA==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root G2"
# Serial: 4293743540046975378534879503202253541
# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
-----BEGIN CERTIFICATE-----
MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
MrY=
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root G3"
# Serial: 7089244469030293291760083333884364146
# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
-----BEGIN CERTIFICATE-----
MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
sycX
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Trusted Root G4"
# Serial: 7451500558977370777930084869016614236
# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
-----BEGIN CERTIFICATE-----
MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
+SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
-----END CERTIFICATE-----

# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited
# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited
# Label: "WoSign"
# Serial: 125491772294754854453622855443212256657
# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d
# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb
# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08
-----BEGIN CERTIFICATE-----
MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV
MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV
BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw
MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX
b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp
dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN
rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U
fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc
f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2
ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M
x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR
aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch
zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar
uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K
mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA
Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv
HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H
EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1
LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ
MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e
JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN
g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp
dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab
R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ
PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce
xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+
J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl
OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT
ee5Ehr7XHuQe+w==
-----END CERTIFICATE-----

# Issuer: CN=CA 沃通根证书 O=WoSign CA Limited
# Subject: CN=CA 沃通根证书 O=WoSign CA Limited
# Label: "WoSign China"
# Serial: 106921963437422998931660691310149453965
# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93
# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6
# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54
-----BEGIN CERTIFICATE-----
MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG
MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV
BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw
MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl
ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r
D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1
9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf
v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk
UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L
NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb
+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V
qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K
yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G
AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK
J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC
AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4
WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6
yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj
/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6
jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2
ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX
X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n
FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D
u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l
O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le
ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1
2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ==
-----END CERTIFICATE-----

# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
# Label: "COMODO RSA Certification Authority"
# Serial: 101909084537582093308941363524873193117
# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
-----BEGIN CERTIFICATE-----
MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
+pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
NVOFBkpdn627G190
-----END CERTIFICATE-----

# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
# Label: "USERTrust RSA Certification Authority"
# Serial: 2645093764781058787591871645665788717
# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
-----BEGIN CERTIFICATE-----
MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
jjxDah2nGN59PRbxYvnKkKj9
-----END CERTIFICATE-----

# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
# Label: "USERTrust ECC Certification Authority"
# Serial: 123013823720199481456569720443997572134
# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
-----BEGIN CERTIFICATE-----
MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
# Label: "GlobalSign ECC Root CA - R4"
# Serial: 14367148294922964480859022125800977897474
# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
-----BEGIN CERTIFICATE-----
MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
ewv4n4Q=
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
# Label: "GlobalSign ECC Root CA - R5"
# Serial: 32785792099990507226680698011560947931244
# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
-----BEGIN CERTIFICATE-----
MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
xwy8p2Fp8fc74SrL+SvzZpA3
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
# Label: "Staat der Nederlanden Root CA - G3"
# Serial: 10003001
# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
-----BEGIN CERTIFICATE-----
MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
94B7IWcnMFk=
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Label: "Staat der Nederlanden EV Root CA"
# Serial: 10000013
# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
-----BEGIN CERTIFICATE-----
MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
7uzXLg==
-----END CERTIFICATE-----

# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
# Label: "IdenTrust Commercial Root CA 1"
# Serial: 13298821034946342390520003877796839426
# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
+ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
-----END CERTIFICATE-----

# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
# Label: "IdenTrust Public Sector Root CA 1"
# Serial: 13298821034946342390521976156843933698
# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
-----BEGIN CERTIFICATE-----
MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
# Label: "Entrust Root Certification Authority - G2"
# Serial: 1246989352
# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
-----BEGIN CERTIFICATE-----
MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
# Label: "Entrust Root Certification Authority - EC1"
# Serial: 51543124481930649114116133369
# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
-----BEGIN CERTIFICATE-----
MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
-----END CERTIFICATE-----

# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
# Label: "CFCA EV ROOT"
# Serial: 407555286
# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
-----BEGIN CERTIFICATE-----
MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
-----END CERTIFICATE-----

# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
# Label: "TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5"
# Serial: 156233699172481
# MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e
# SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb
# SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78
-----BEGIN CERTIFICATE-----
MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE
BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn
aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg
QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg
SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0
MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD
VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8
dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF
bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom
/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR
Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3
4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z
5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0
hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID
AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX
SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l
VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq
URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf
peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF
Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW
+qtB4Uu2NQvAmxU=
-----END CERTIFICATE-----

# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Label: "Certinomis - Root CA"
# Serial: 1
# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f
# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8
# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58
-----BEGIN CERTIFICATE-----
MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
-----END CERTIFICATE-----

# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Label: "OISTE WISeKey Global Root GB CA"
# Serial: 157768595616588414422159278966750757568
# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
-----BEGIN CERTIFICATE-----
MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
-----END CERTIFICATE-----

# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
# Label: "Certification Authority of WoSign G2"
# Serial: 142423943073812161787490648904721057092
# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60
# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1
# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16
-----BEGIN CERTIFICATE-----
MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY
MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV
BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx
MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK
ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo
b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX
JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO
gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg
5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n
fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5
2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ
KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8
fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G
3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy
SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng
LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7
XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg=
-----END CERTIFICATE-----

# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited
# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited
# Label: "CA WoSign ECC Root"
# Serial: 138625735294506723296996289575837012112
# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20
# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b
# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02
-----BEGIN CERTIFICATE-----
MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw
CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT
EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4
NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb
MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID
YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8
KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES
1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB
1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3
aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K
-----END CERTIFICATE-----

# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
# Label: "SZAFIR ROOT CA2"
# Serial: 357043034767186914217277344587386743377558296292
# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
-----BEGIN CERTIFICATE-----
MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
-----END CERTIFICATE-----

# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Label: "Certum Trusted Network CA 2"
# Serial: 44979900017204383099463764357512596969
# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
-----BEGIN CERTIFICATE-----
MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
DrW5viSP
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions RootCA 2015"
# Serial: 0
# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
-----BEGIN CERTIFICATE-----
MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
vm9qp/UsQu0yrbYhnr68
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
# Serial: 0
# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
-----BEGIN CERTIFICATE-----
MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
-----END CERTIFICATE-----

# Issuer: CN=Certplus Root CA G1 O=Certplus
# Subject: CN=Certplus Root CA G1 O=Certplus
# Label: "Certplus Root CA G1"
# Serial: 1491911565779898356709731176965615564637713
# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42
# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66
# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA
MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa
MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a
iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt
6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP
0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f
6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE
EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN
1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc
h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT
mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV
4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO
WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd
Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq
hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh
66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7
/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS
S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j
2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R
Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr
RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy
6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV
V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5
g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl
++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo=
-----END CERTIFICATE-----

# Issuer: CN=Certplus Root CA G2 O=Certplus
# Subject: CN=Certplus Root CA G2 O=Certplus
# Label: "Certplus Root CA G2"
# Serial: 1492087096131536844209563509228951875861589
# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31
# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a
# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17
-----BEGIN CERTIFICATE-----
MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x
CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x
CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat
93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x
Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P
AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj
FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG
SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch
p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal
U5ORGpOucGpnutee5WEaXw==
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust
# Subject: CN=OpenTrust Root CA G1 O=OpenTrust
# Label: "OpenTrust Root CA G1"
# Serial: 1492036577811947013770400127034825178844775
# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da
# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e
# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4
-----BEGIN CERTIFICATE-----
MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA
MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw
MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b
wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX
/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0
77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP
uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx
p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx
Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2
TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W
G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw
vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY
EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1
2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw
DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E
PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf
gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS
FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0
V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P
XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I
i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t
TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91
09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky
Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ
AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj
1oxx
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust
# Subject: CN=OpenTrust Root CA G2 O=OpenTrust
# Label: "OpenTrust Root CA G2"
# Serial: 1492012448042702096986875987676935573415441
# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb
# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b
# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2
-----BEGIN CERTIFICATE-----
MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA
MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw
MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh
/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e
CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6
1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE
FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS
gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X
G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy
YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH
vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4
t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/
gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3
5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w
DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz
Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0
nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT
RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT
wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2
t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa
TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2
o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU
3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA
iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f
WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM
S1IK
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust
# Subject: CN=OpenTrust Root CA G3 O=OpenTrust
# Label: "OpenTrust Root CA G3"
# Serial: 1492104908271485653071219941864171170455615
# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24
# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6
# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92
-----BEGIN CERTIFICATE-----
MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx
CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U
cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow
QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl
blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm
3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d
oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G
A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5
DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK
BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q
j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx
4nxp5V2a+EEfOzmTk51V6s2N8fvB
-----END CERTIFICATE-----

# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
# Subject: CN=ISRG Root X1 O=Internet Security Research Group
# Label: "ISRG Root X1"
# Serial: 172886928669790476064670243504169061120
# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
-----END CERTIFICATE-----

# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
# Label: "AC RAIZ FNMT-RCM"
# Serial: 485876308206448804701554682760554759
# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
-----BEGIN CERTIFICATE-----
MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 1 O=Amazon
# Subject: CN=Amazon Root CA 1 O=Amazon
# Label: "Amazon Root CA 1"
# Serial: 143266978916655856878034712317230054538369994
# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
-----BEGIN CERTIFICATE-----
MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
rqXRfboQnoZsG4q5WTP468SQvvG5
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 2 O=Amazon
# Subject: CN=Amazon Root CA 2 O=Amazon
# Label: "Amazon Root CA 2"
# Serial: 143266982885963551818349160658925006970653239
# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
-----BEGIN CERTIFICATE-----
MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
+XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
+gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
4PsJYGw=
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 3 O=Amazon
# Subject: CN=Amazon Root CA 3 O=Amazon
# Label: "Amazon Root CA 3"
# Serial: 143266986699090766294700635381230934788665930
# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
-----BEGIN CERTIFICATE-----
MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
YyRIHN8wfdVoOw==
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 4 O=Amazon
# Subject: CN=Amazon Root CA 4 O=Amazon
# Label: "Amazon Root CA 4"
# Serial: 143266989758080763974105200630763877849284878
# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
-----BEGIN CERTIFICATE-----
MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
1KyLa2tJElMzrdfkviT8tQp21KW8EA==
-----END CERTIFICATE-----

# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
# Label: "LuxTrust Global Root 2"
# Serial: 59914338225734147123941058376788110305822489521
# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
-----BEGIN CERTIFICATE-----
MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
-----END CERTIFICATE-----

# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
# Serial: 1
# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
-----BEGIN CERTIFICATE-----
MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
-----END CERTIFICATE-----
" - if let data = Data(base64Encoded: roots, options: []) { - return String(data: data, encoding: .utf8) + let roots = + "# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
# Label: "GlobalSign Root CA"
# Serial: 4835703278459707669005204
# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
-----BEGIN CERTIFICATE-----
MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
# Label: "GlobalSign Root CA - R2"
# Serial: 4835703278459682885658125
# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
-----BEGIN CERTIFICATE-----
MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
# Serial: 206684696279472310254277870180966723415
# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
-----BEGIN CERTIFICATE-----
MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
-----END CERTIFICATE-----

# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
# Label: "Entrust.net Premium 2048 Secure Server CA"
# Serial: 946069240
# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
fF6adulZkMV8gzURZVE=
-----END CERTIFICATE-----

# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
# Label: "Baltimore CyberTrust Root"
# Serial: 33554617
# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
-----END CERTIFICATE-----

# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
# Label: "AddTrust External Root"
# Serial: 1
# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
-----BEGIN CERTIFICATE-----
MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
# Label: "Entrust Root Certification Authority"
# Serial: 1164660820
# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
-----BEGIN CERTIFICATE-----
MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
0vdXcDazv/wor3ElhVsT/h5/WrQ8
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
# Label: "GeoTrust Global CA"
# Serial: 144470
# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
-----BEGIN CERTIFICATE-----
MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
# Label: "GeoTrust Universal CA"
# Serial: 1
# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
-----BEGIN CERTIFICATE-----
MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
# Label: "GeoTrust Universal CA 2"
# Serial: 1
# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
-----BEGIN CERTIFICATE-----
MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
-----END CERTIFICATE-----

# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
# Label: "Visa eCommerce Root"
# Serial: 25952180776285836048024890241505565794
# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
-----BEGIN CERTIFICATE-----
MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
398znM/jra6O1I7mT1GvFpLgXPYHDw==
-----END CERTIFICATE-----

# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
# Subject: CN=AAA Certificate Services O=Comodo CA Limited
# Label: "Comodo AAA Services root"
# Serial: 1
# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
-----BEGIN CERTIFICATE-----
MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
# Label: "QuoVadis Root CA"
# Serial: 985026699
# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
-----BEGIN CERTIFICATE-----
MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
SnQ2+Q==
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
# Label: "QuoVadis Root CA 2"
# Serial: 1289
# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
-----BEGIN CERTIFICATE-----
MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
+ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 3"
# Serial: 1478
# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
-----BEGIN CERTIFICATE-----
MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
+LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
4SVhM7JZG+Ju1zdXtg2pEto=
-----END CERTIFICATE-----

# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
# Label: "Security Communication Root CA"
# Serial: 0
# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
-----BEGIN CERTIFICATE-----
MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
-----END CERTIFICATE-----

# Issuer: CN=Sonera Class2 CA O=Sonera
# Subject: CN=Sonera Class2 CA O=Sonera
# Label: "Sonera Class 2 Root CA"
# Serial: 29
# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
-----BEGIN CERTIFICATE-----
MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
-----END CERTIFICATE-----

# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
# Label: "XRamp Global CA Root"
# Serial: 107108908803651509692980124233745014957
# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
-----BEGIN CERTIFICATE-----
MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
O+7ETPTsJ3xCwnR8gooJybQDJbw=
-----END CERTIFICATE-----

# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
# Label: "Go Daddy Class 2 CA"
# Serial: 0
# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
-----BEGIN CERTIFICATE-----
MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
ReYNnyicsbkqWletNw+vHX/bvZ8=
-----END CERTIFICATE-----

# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
# Label: "Starfield Class 2 CA"
# Serial: 0
# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
-----BEGIN CERTIFICATE-----
MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
+lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
-----END CERTIFICATE-----

# Issuer: O=Government Root Certification Authority
# Subject: O=Government Root Certification Authority
# Label: "Taiwan GRCA"
# Serial: 42023070807708724159991140556527066870
# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
-----BEGIN CERTIFICATE-----
MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
pYYsfPQS
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root CA"
# Serial: 17154717934120587862167794914071425081
# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
-----BEGIN CERTIFICATE-----
MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root CA"
# Serial: 10944719598952040374951832963794454346
# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
-----BEGIN CERTIFICATE-----
MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
-----END CERTIFICATE-----

# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert High Assurance EV Root CA"
# Serial: 3553400076410547919724730734378100087
# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
+OkuE6N36B9K
-----END CERTIFICATE-----

# Issuer: CN=Class 2 Primary CA O=Certplus
# Subject: CN=Class 2 Primary CA O=Certplus
# Label: "Certplus Class 2 Primary CA"
# Serial: 177770208045934040241468760488327595043
# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
-----BEGIN CERTIFICATE-----
MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
l7+ijrRU
-----END CERTIFICATE-----

# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
# Label: "DST Root CA X3"
# Serial: 91299735575339953335919266965803778155
# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
-----END CERTIFICATE-----

# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
# Label: "SwissSign Gold CA - G2"
# Serial: 13492815561806991280
# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
-----BEGIN CERTIFICATE-----
MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
-----END CERTIFICATE-----

# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
# Label: "SwissSign Silver CA - G2"
# Serial: 5700383053117599563
# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
-----BEGIN CERTIFICATE-----
MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
# Label: "GeoTrust Primary Certification Authority"
# Serial: 32798226551256963324313806436981982369
# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
-----BEGIN CERTIFICATE-----
MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA"
# Serial: 69529181992039203566298953787712940909
# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
-----BEGIN CERTIFICATE-----
MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
jVaMaA==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
# Serial: 33037644167568058970164719475676101450
# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
-----BEGIN CERTIFICATE-----
MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
-----END CERTIFICATE-----

# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
# Subject: CN=SecureTrust CA O=SecureTrust Corporation
# Label: "SecureTrust CA"
# Serial: 17199774589125277788362757014266862032
# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
-----BEGIN CERTIFICATE-----
MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
-----END CERTIFICATE-----

# Issuer: CN=Secure Global CA O=SecureTrust Corporation
# Subject: CN=Secure Global CA O=SecureTrust Corporation
# Label: "Secure Global CA"
# Serial: 9751836167731051554232119481456978597
# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
-----BEGIN CERTIFICATE-----
MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
-----END CERTIFICATE-----

# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
# Label: "COMODO Certification Authority"
# Serial: 104350513648249232941998508985834464573
# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
-----BEGIN CERTIFICATE-----
MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
ZQ==
-----END CERTIFICATE-----

# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
# Label: "Network Solutions Certificate Authority"
# Serial: 116697915152937497490437556386812487904
# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
-----BEGIN CERTIFICATE-----
MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
-----END CERTIFICATE-----

# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
# Label: "COMODO ECC Certification Authority"
# Serial: 41578283867086692638256921589707938090
# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
-----BEGIN CERTIFICATE-----
MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
-----END CERTIFICATE-----

# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
# Label: "OISTE WISeKey Global Root GA CA"
# Serial: 86718877871133159090080555911823548314
# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
-----BEGIN CERTIFICATE-----
MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
/L7fCg0=
-----END CERTIFICATE-----

# Issuer: CN=Certigna O=Dhimyotis
# Subject: CN=Certigna O=Dhimyotis
# Label: "Certigna"
# Serial: 18364802974209362175
# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
-----BEGIN CERTIFICATE-----
MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
-----END CERTIFICATE-----

# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
# Label: "Deutsche Telekom Root CA 2"
# Serial: 38
# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
-----BEGIN CERTIFICATE-----
MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
Cm26OWMohpLzGITY+9HPBVZkVw==
-----END CERTIFICATE-----

# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
# Label: "Cybertrust Global Root"
# Serial: 4835703278459682877484360
# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
-----BEGIN CERTIFICATE-----
MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
WL1WMRJOEcgh4LMRkWXbtKaIOM5V
-----END CERTIFICATE-----

# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
# Label: "ePKI Root Certification Authority"
# Serial: 28956088682735189655030529057352760477
# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
-----BEGIN CERTIFICATE-----
MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
-----END CERTIFICATE-----

# Issuer: O=certSIGN OU=certSIGN ROOT CA
# Subject: O=certSIGN OU=certSIGN ROOT CA
# Label: "certSIGN ROOT CA"
# Serial: 35210227249154
# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
-----BEGIN CERTIFICATE-----
MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
9u6wWk5JRFRYX0KD
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
# Label: "GeoTrust Primary Certification Authority - G3"
# Serial: 28809105769928564313984085209975885599
# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
-----BEGIN CERTIFICATE-----
MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
spki4cErx5z481+oghLrGREt
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA - G2"
# Serial: 71758320672825410020661621085256472406
# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
-----BEGIN CERTIFICATE-----
MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
-----END CERTIFICATE-----

# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
# Label: "thawte Primary Root CA - G3"
# Serial: 127614157056681299805556476275995414779
# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
MdRAGmI0Nj81Aa6sY6A=
-----END CERTIFICATE-----

# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
# Label: "GeoTrust Primary Certification Authority - G2"
# Serial: 80682863203381065782177908751794619243
# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
-----BEGIN CERTIFICATE-----
MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
rD6ogRLQy7rQkgu2npaqBA+K
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Universal Root Certification Authority"
# Serial: 85209574734084581917763752644031726877
# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
-----BEGIN CERTIFICATE-----
MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
7M2CYfE45k+XmCpajQ==
-----END CERTIFICATE-----

# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
# Serial: 63143484348153506665311985501458640051
# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
-----BEGIN CERTIFICATE-----
MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
-----END CERTIFICATE-----

# Issuer: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
# Subject: CN=NetLock Arany (Class Gold) Főtanúsítvány O=NetLock Kft. OU=Tanúsítványkiadók (Certification Services)
# Label: "NetLock Arany (Class Gold) Főtanúsítvány"
# Serial: 80544274841616
# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
-----BEGIN CERTIFICATE-----
MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
# Label: "Staat der Nederlanden Root CA - G2"
# Serial: 10000012
# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
-----BEGIN CERTIFICATE-----
MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
-----END CERTIFICATE-----

# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
# Label: "Hongkong Post Root CA 1"
# Serial: 1000
# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
-----BEGIN CERTIFICATE-----
MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
AmvZWg==
-----END CERTIFICATE-----

# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
# Label: "SecureSign RootCA11"
# Serial: 1
# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
-----BEGIN CERTIFICATE-----
MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
QSdJQO7e5iNEOdyhIta6A/I=
-----END CERTIFICATE-----

# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
# Label: "Microsec e-Szigno Root CA 2009"
# Serial: 14014712776195784473
# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
-----BEGIN CERTIFICATE-----
MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
+rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
# Label: "GlobalSign Root CA - R3"
# Serial: 4835703278459759426209954
# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
-----BEGIN CERTIFICATE-----
MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
WD9f
-----END CERTIFICATE-----

# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
# Serial: 6047274297262753887
# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
-----BEGIN CERTIFICATE-----
MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
-----END CERTIFICATE-----

# Issuer: CN=Izenpe.com O=IZENPE S.A.
# Subject: CN=Izenpe.com O=IZENPE S.A.
# Label: "Izenpe.com"
# Serial: 917563065490389241595536686991402621
# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
-----BEGIN CERTIFICATE-----
MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
-----END CERTIFICATE-----

# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
# Label: "Chambers of Commerce Root - 2008"
# Serial: 11806822484801597146
# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
-----BEGIN CERTIFICATE-----
MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
d0jQ
-----END CERTIFICATE-----

# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
# Label: "Global Chambersign Root - 2008"
# Serial: 14541511773111788494
# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
-----BEGIN CERTIFICATE-----
MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
-----END CERTIFICATE-----

# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
# Label: "Go Daddy Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
4uJEvlz36hz1
-----END CERTIFICATE-----

# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Label: "Starfield Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
-----BEGIN CERTIFICATE-----
MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
-----END CERTIFICATE-----

# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
# Label: "Starfield Services Root Certificate Authority - G2"
# Serial: 0
# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
-----BEGIN CERTIFICATE-----
MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
sSi6
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
# Subject: CN=AffirmTrust Commercial O=AffirmTrust
# Label: "AffirmTrust Commercial"
# Serial: 8608355977964138876
# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
-----BEGIN CERTIFICATE-----
MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Networking O=AffirmTrust
# Subject: CN=AffirmTrust Networking O=AffirmTrust
# Label: "AffirmTrust Networking"
# Serial: 8957382827206547757
# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
-----BEGIN CERTIFICATE-----
MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Premium O=AffirmTrust
# Subject: CN=AffirmTrust Premium O=AffirmTrust
# Label: "AffirmTrust Premium"
# Serial: 7893706540734352110
# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
-----BEGIN CERTIFICATE-----
MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
+jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
KeC2uAloGRwYQw==
-----END CERTIFICATE-----

# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
# Label: "AffirmTrust Premium ECC"
# Serial: 8401224907861490260
# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
-----BEGIN CERTIFICATE-----
MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
-----END CERTIFICATE-----

# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Label: "Certum Trusted Network CA"
# Serial: 279744
# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
-----BEGIN CERTIFICATE-----
MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
-----END CERTIFICATE-----

# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
# Label: "TWCA Root Certification Authority"
# Serial: 1
# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
-----BEGIN CERTIFICATE-----
MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
-----END CERTIFICATE-----

# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
# Label: "Security Communication RootCA2"
# Serial: 0
# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
-----END CERTIFICATE-----

# Issuer: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
# Subject: CN=EC-ACC O=Agencia Catalana de Certificacio (NIF Q-0801176-I) OU=Serveis Publics de Certificacio/Vegeu https://www.catcert.net/verarrel (c)03/Jerarquia Entitats de Certificacio Catalanes
# Label: "EC-ACC"
# Serial: -23701579247955709139626555126524820479
# MD5 Fingerprint: eb:f5:9d:29:0d:61:f9:42:1f:7c:c2:ba:6d:e3:15:09
# SHA1 Fingerprint: 28:90:3a:63:5b:52:80:fa:e6:77:4c:0b:6d:a7:d6:ba:a6:4a:f2:e8
# SHA256 Fingerprint: 88:49:7f:01:60:2f:31:54:24:6a:e2:8c:4d:5a:ef:10:f1:d8:7e:bb:76:62:6f:4a:e0:b7:f9:5b:a7:96:87:99
-----BEGIN CERTIFICATE-----
MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB
8zELMAkGA1UEBhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2Vy
dGlmaWNhY2lvIChOSUYgUS0wODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1
YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYDVQQLEyxWZWdldSBodHRwczovL3d3
dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UECxMsSmVyYXJxdWlh
IEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMTBkVD
LUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQG
EwJFUzE7MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8g
KE5JRiBRLTA4MDExNzYtSSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBD
ZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZlZ2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQu
bmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJhcnF1aWEgRW50aXRhdHMg
ZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUNDMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R
85iKw5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm
4CgPukLjbo73FCeTae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaV
HMf5NLWUhdWZXqBIoH7nF2W4onW4HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNd
QlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0aE9jD2z3Il3rucO2n5nzbcc8t
lGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw0JDnJwIDAQAB
o4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4
opvpXY0wfwYDVR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBo
dHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidW
ZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAwDQYJKoZIhvcN
AQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJlF7W2u++AVtd0x7Y
/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNaAl6k
SBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhy
Rp/7SNVel+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOS
Agu+TGbrIP65y7WZf+a2E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xl
nJ2lYJU6Un/10asIbvPuW/mIPX64b24D5EI=
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions RootCA 2011"
# Serial: 0
# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
-----BEGIN CERTIFICATE-----
MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
l7WdmplNsDz4SgCbZN2fOUvRJ9e4
-----END CERTIFICATE-----

# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
# Label: "Actalis Authentication Root CA"
# Serial: 6271844772424770508
# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
-----BEGIN CERTIFICATE-----
MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
-----END CERTIFICATE-----

# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
# Subject: O=Trustis Limited OU=Trustis FPS Root CA
# Label: "Trustis FPS Root CA"
# Serial: 36053640375399034304724988975563710553
# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
-----BEGIN CERTIFICATE-----
MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
ZetX2fNXlrtIzYE=
-----END CERTIFICATE-----

# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
# Label: "Buypass Class 2 Root CA"
# Serial: 2
# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
-----BEGIN CERTIFICATE-----
MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
+fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
-----END CERTIFICATE-----

# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
# Label: "Buypass Class 3 Root CA"
# Serial: 2
# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
-----BEGIN CERTIFICATE-----
MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
-----END CERTIFICATE-----

# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Label: "T-TeleSec GlobalRoot Class 3"
# Serial: 1
# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
-----BEGIN CERTIFICATE-----
MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
TpPDpFQUWw==
-----END CERTIFICATE-----

# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
# Label: "EE Certification Centre Root CA"
# Serial: 112324828676200291871926431888494945866
# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
-----BEGIN CERTIFICATE-----
MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
-----END CERTIFICATE-----

# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
# Label: "D-TRUST Root Class 3 CA 2 2009"
# Serial: 623603
# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
-----BEGIN CERTIFICATE-----
MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
Johw1+qRzT65ysCQblrGXnRl11z+o+I=
-----END CERTIFICATE-----

# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
# Serial: 623604
# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
-----BEGIN CERTIFICATE-----
MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
-----END CERTIFICATE-----

# Issuer: CN=CA Disig Root R2 O=Disig a.s.
# Subject: CN=CA Disig Root R2 O=Disig a.s.
# Label: "CA Disig Root R2"
# Serial: 10572350602393338211
# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
-----BEGIN CERTIFICATE-----
MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
-----END CERTIFICATE-----

# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
# Label: "ACCVRAIZ1"
# Serial: 6828503384748696800
# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
-----BEGIN CERTIFICATE-----
MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
-----END CERTIFICATE-----

# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
# Label: "TWCA Global Root CA"
# Serial: 3262
# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
-----BEGIN CERTIFICATE-----
MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
KwbQBM0=
-----END CERTIFICATE-----

# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
# Label: "TeliaSonera Root CA v1"
# Serial: 199041966741090107964904287217786801558
# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
-----BEGIN CERTIFICATE-----
MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
-----END CERTIFICATE-----

# Issuer: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
# Subject: CN=E-Tugra Certification Authority O=E-Tuğra EBG Bilişim Teknolojileri ve Hizmetleri A.Ş. OU=E-Tugra Sertifikasyon Merkezi
# Label: "E-Tugra Certification Authority"
# Serial: 7667447206703254355
# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
-----BEGIN CERTIFICATE-----
MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
+GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
-----END CERTIFICATE-----

# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
# Label: "T-TeleSec GlobalRoot Class 2"
# Serial: 1
# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
-----BEGIN CERTIFICATE-----
MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
BSeOE6Fuwg==
-----END CERTIFICATE-----

# Issuer: CN=Atos TrustedRoot 2011 O=Atos
# Subject: CN=Atos TrustedRoot 2011 O=Atos
# Label: "Atos TrustedRoot 2011"
# Serial: 6643877497813316402
# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
-----BEGIN CERTIFICATE-----
MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 1 G3"
# Serial: 687049649626669250736271037606554624078720034195
# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 2 G3"
# Serial: 390156079458959257446133169266079962026824725800
# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
-----END CERTIFICATE-----

# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
# Label: "QuoVadis Root CA 3 G3"
# Serial: 268090761170461462463995952157327242137089239581
# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root G2"
# Serial: 15385348160840213938643033620894905419
# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
-----BEGIN CERTIFICATE-----
MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
IhNzbM8m9Yop5w==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Assured ID Root G3"
# Serial: 15459312981008553731928384953135426796
# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
-----BEGIN CERTIFICATE-----
MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
6pZjamVFkpUBtA==
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root G2"
# Serial: 4293743540046975378534879503202253541
# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
-----BEGIN CERTIFICATE-----
MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
MrY=
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Global Root G3"
# Serial: 7089244469030293291760083333884364146
# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
-----BEGIN CERTIFICATE-----
MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
sycX
-----END CERTIFICATE-----

# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
# Label: "DigiCert Trusted Root G4"
# Serial: 7451500558977370777930084869016614236
# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
-----BEGIN CERTIFICATE-----
MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
+SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
-----END CERTIFICATE-----

# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
# Label: "COMODO RSA Certification Authority"
# Serial: 101909084537582093308941363524873193117
# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
-----BEGIN CERTIFICATE-----
MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
+pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
NVOFBkpdn627G190
-----END CERTIFICATE-----

# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
# Label: "USERTrust RSA Certification Authority"
# Serial: 2645093764781058787591871645665788717
# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
-----BEGIN CERTIFICATE-----
MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
jjxDah2nGN59PRbxYvnKkKj9
-----END CERTIFICATE-----

# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
# Label: "USERTrust ECC Certification Authority"
# Serial: 123013823720199481456569720443997572134
# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
-----BEGIN CERTIFICATE-----
MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
# Label: "GlobalSign ECC Root CA - R4"
# Serial: 14367148294922964480859022125800977897474
# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
-----BEGIN CERTIFICATE-----
MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
ewv4n4Q=
-----END CERTIFICATE-----

# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
# Label: "GlobalSign ECC Root CA - R5"
# Serial: 32785792099990507226680698011560947931244
# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
-----BEGIN CERTIFICATE-----
MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
xwy8p2Fp8fc74SrL+SvzZpA3
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
# Label: "Staat der Nederlanden Root CA - G3"
# Serial: 10003001
# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
-----BEGIN CERTIFICATE-----
MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
94B7IWcnMFk=
-----END CERTIFICATE-----

# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
# Label: "Staat der Nederlanden EV Root CA"
# Serial: 10000013
# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
-----BEGIN CERTIFICATE-----
MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
7uzXLg==
-----END CERTIFICATE-----

# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
# Label: "IdenTrust Commercial Root CA 1"
# Serial: 13298821034946342390520003877796839426
# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
-----BEGIN CERTIFICATE-----
MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
+ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
-----END CERTIFICATE-----

# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
# Label: "IdenTrust Public Sector Root CA 1"
# Serial: 13298821034946342390521976156843933698
# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
-----BEGIN CERTIFICATE-----
MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
# Label: "Entrust Root Certification Authority - G2"
# Serial: 1246989352
# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
-----BEGIN CERTIFICATE-----
MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
-----END CERTIFICATE-----

# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
# Label: "Entrust Root Certification Authority - EC1"
# Serial: 51543124481930649114116133369
# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
-----BEGIN CERTIFICATE-----
MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
-----END CERTIFICATE-----

# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
# Label: "CFCA EV ROOT"
# Serial: 407555286
# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
-----BEGIN CERTIFICATE-----
MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
-----END CERTIFICATE-----

# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
# Label: "Certinomis - Root CA"
# Serial: 1
# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f
# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8
# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58
-----BEGIN CERTIFICATE-----
MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
-----END CERTIFICATE-----

# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
# Label: "OISTE WISeKey Global Root GB CA"
# Serial: 157768595616588414422159278966750757568
# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
-----BEGIN CERTIFICATE-----
MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
-----END CERTIFICATE-----

# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
# Label: "SZAFIR ROOT CA2"
# Serial: 357043034767186914217277344587386743377558296292
# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
-----BEGIN CERTIFICATE-----
MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
-----END CERTIFICATE-----

# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
# Label: "Certum Trusted Network CA 2"
# Serial: 44979900017204383099463764357512596969
# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
-----BEGIN CERTIFICATE-----
MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
DrW5viSP
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions RootCA 2015"
# Serial: 0
# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
-----BEGIN CERTIFICATE-----
MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
vm9qp/UsQu0yrbYhnr68
-----END CERTIFICATE-----

# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
# Serial: 0
# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
-----BEGIN CERTIFICATE-----
MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
-----END CERTIFICATE-----

# Issuer: CN=Certplus Root CA G1 O=Certplus
# Subject: CN=Certplus Root CA G1 O=Certplus
# Label: "Certplus Root CA G1"
# Serial: 1491911565779898356709731176965615564637713
# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42
# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66
# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA
MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa
MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy
dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a
iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt
6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP
0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f
6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE
EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN
1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc
h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT
mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV
4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO
WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd
Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq
hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh
66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7
/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS
S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j
2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R
Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr
RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy
6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV
V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5
g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl
++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo=
-----END CERTIFICATE-----

# Issuer: CN=Certplus Root CA G2 O=Certplus
# Subject: CN=Certplus Root CA G2 O=Certplus
# Label: "Certplus Root CA G2"
# Serial: 1492087096131536844209563509228951875861589
# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31
# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a
# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17
-----BEGIN CERTIFICATE-----
MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x
CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x
CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs
dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat
93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x
Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P
AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj
FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG
SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch
p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal
U5ORGpOucGpnutee5WEaXw==
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust
# Subject: CN=OpenTrust Root CA G1 O=OpenTrust
# Label: "OpenTrust Root CA G1"
# Serial: 1492036577811947013770400127034825178844775
# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da
# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e
# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4
-----BEGIN CERTIFICATE-----
MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA
MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw
MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b
wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX
/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0
77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP
uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx
p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx
Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2
TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W
G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw
vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY
EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1
2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw
DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E
PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf
gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS
FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0
V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P
XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I
i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t
TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91
09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky
Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ
AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj
1oxx
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust
# Subject: CN=OpenTrust Root CA G2 O=OpenTrust
# Label: "OpenTrust Root CA G2"
# Serial: 1492012448042702096986875987676935573415441
# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb
# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b
# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2
-----BEGIN CERTIFICATE-----
MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA
MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w
ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw
MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU
T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh
/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e
CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6
1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE
FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS
gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X
G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy
YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH
vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4
t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/
gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3
5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w
DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz
Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0
nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT
RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT
wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2
t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa
TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2
o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU
3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA
iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f
WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM
S1IK
-----END CERTIFICATE-----

# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust
# Subject: CN=OpenTrust Root CA G3 O=OpenTrust
# Label: "OpenTrust Root CA G3"
# Serial: 1492104908271485653071219941864171170455615
# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24
# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6
# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92
-----BEGIN CERTIFICATE-----
MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx
CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U
cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow
QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl
blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm
3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d
oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G
A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5
DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK
BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q
j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx
4nxp5V2a+EEfOzmTk51V6s2N8fvB
-----END CERTIFICATE-----

# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
# Subject: CN=ISRG Root X1 O=Internet Security Research Group
# Label: "ISRG Root X1"
# Serial: 172886928669790476064670243504169061120
# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
-----END CERTIFICATE-----

# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
# Label: "AC RAIZ FNMT-RCM"
# Serial: 485876308206448804701554682760554759
# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
-----BEGIN CERTIFICATE-----
MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 1 O=Amazon
# Subject: CN=Amazon Root CA 1 O=Amazon
# Label: "Amazon Root CA 1"
# Serial: 143266978916655856878034712317230054538369994
# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
-----BEGIN CERTIFICATE-----
MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
rqXRfboQnoZsG4q5WTP468SQvvG5
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 2 O=Amazon
# Subject: CN=Amazon Root CA 2 O=Amazon
# Label: "Amazon Root CA 2"
# Serial: 143266982885963551818349160658925006970653239
# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
-----BEGIN CERTIFICATE-----
MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
+XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
+gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
4PsJYGw=
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 3 O=Amazon
# Subject: CN=Amazon Root CA 3 O=Amazon
# Label: "Amazon Root CA 3"
# Serial: 143266986699090766294700635381230934788665930
# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
-----BEGIN CERTIFICATE-----
MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
YyRIHN8wfdVoOw==
-----END CERTIFICATE-----

# Issuer: CN=Amazon Root CA 4 O=Amazon
# Subject: CN=Amazon Root CA 4 O=Amazon
# Label: "Amazon Root CA 4"
# Serial: 143266989758080763974105200630763877849284878
# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
-----BEGIN CERTIFICATE-----
MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
1KyLa2tJElMzrdfkviT8tQp21KW8EA==
-----END CERTIFICATE-----

# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
# Label: "LuxTrust Global Root 2"
# Serial: 59914338225734147123941058376788110305822489521
# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
-----BEGIN CERTIFICATE-----
MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
+Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
-----END CERTIFICATE-----

# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
# Serial: 1
# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
-----BEGIN CERTIFICATE-----
MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
-----END CERTIFICATE-----

# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
# Label: "GDCA TrustAUTH R5 ROOT"
# Serial: 9009899650740120186
# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
-----BEGIN CERTIFICATE-----
MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
-----END CERTIFICATE-----

# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Label: "TrustCor RootCert CA-1"
# Serial: 15752444095811006489
# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45
# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a
# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c
-----BEGIN CERTIFICATE-----
MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD
VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y
IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB
pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h
IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG
A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU
cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid
RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V
seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme
9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV
EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW
hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/
DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw
DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD
ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I
/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ
yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts
L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN
zl/HHk484IkzlQsPpTLWPFp5LBk=
-----END CERTIFICATE-----

# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Label: "TrustCor RootCert CA-2"
# Serial: 2711694510199101698
# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64
# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0
# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65
-----BEGIN CERTIFICATE-----
MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV
BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig
Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk
MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg
Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD
VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy
dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+
QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq
1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp
2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK
DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape
az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF
3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88
oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM
g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3
mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd
BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U
nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw
DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX
dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+
MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL
/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX
CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa
ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW
2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7
N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3
Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB
As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp
5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu
1uwJ
-----END CERTIFICATE-----

# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
# Label: "TrustCor ECA-1"
# Serial: 9548242946988625984
# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c
# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd
# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c
-----BEGIN CERTIFICATE-----
MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD
VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y
IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV
BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig
RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb
3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA
BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5
3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou
owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/
wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF
ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf
BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/
MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv
civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2
AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50
soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI
WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi
tJ/X5g==
-----END CERTIFICATE-----

# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
# Label: "SSL.com Root Certification Authority RSA"
# Serial: 8875640296558310041
# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
-----BEGIN CERTIFICATE-----
MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
Ic2wBlX7Jz9TkHCpBB5XJ7k=
-----END CERTIFICATE-----

# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
# Label: "SSL.com Root Certification Authority ECC"
# Serial: 8495723813297216424
# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
-----BEGIN CERTIFICATE-----
MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
-----END CERTIFICATE-----

# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
# Label: "SSL.com EV Root Certification Authority RSA R2"
# Serial: 6248227494352943350
# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
-----BEGIN CERTIFICATE-----
MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
-----END CERTIFICATE-----

# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
# Label: "SSL.com EV Root Certification Authority ECC"
# Serial: 3182246526754555285
# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
-----BEGIN CERTIFICATE-----
MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
-----END CERTIFICATE-----
" + if let data = Data(base64Encoded: roots, options:[]) { + return String(data:data, encoding:.utf8) } else { return nil } } + diff --git a/SwiftGRPC.podspec b/SwiftGRPC.podspec index 4e7a3954e..0d32be66d 100644 --- a/SwiftGRPC.podspec +++ b/SwiftGRPC.podspec @@ -39,7 +39,7 @@ Pod::Spec.new do |s| s.source_files = 'Sources/SwiftGRPC/*.swift', 'Sources/SwiftGRPC/**/*.swift', 'Sources/CgRPC/shim/*.[ch]' s.public_header_files = 'Sources/CgRPC/shim/cgrpc.h' - s.dependency 'gRPC-Core', '~> 1.11.0' + s.dependency 'gRPC-Core', '~> 1.12.0' s.dependency 'BoringSSL', '~> 10.0' s.dependency 'SwiftProtobuf', '~> 1.0.3' end diff --git a/fix-indentation-settings.rb b/fix-indentation-settings.rb deleted file mode 100644 index 43c6f355f..000000000 --- a/fix-indentation-settings.rb +++ /dev/null @@ -1,7 +0,0 @@ -require 'xcodeproj' -project_path = './SwiftGRPC.xcodeproj' -project = Xcodeproj::Project.open(project_path) -project.main_group.uses_tabs = '0' -project.main_group.tab_width = '2' -project.main_group.indent_width = '2' -project.save diff --git a/fix-project-settings.rb b/fix-project-settings.rb new file mode 100644 index 000000000..38b1058a7 --- /dev/null +++ b/fix-project-settings.rb @@ -0,0 +1,15 @@ +require 'xcodeproj' +project_path = './SwiftGRPC.xcodeproj' +project = Xcodeproj::Project.open(project_path) + +project.main_group.uses_tabs = '0' +project.main_group.tab_width = '2' +project.main_group.indent_width = '2' + +cgrpc = project.targets.select { |t| t.name == 'CgRPC' }.first +cgrpc.build_configurations.each do |config| + config.build_settings['CLANG_CXX_LANGUAGE_STANDARD'] = 'c++0x' + config.build_settings['OTHER_CFLAGS'] = '-DPB_FIELD_16BIT=1' +end + +project.save diff --git a/vendor-boringssl.sh b/vendor-boringssl.sh index c9ec9c50e..e56d0d870 100755 --- a/vendor-boringssl.sh +++ b/vendor-boringssl.sh @@ -38,11 +38,15 @@ rm -rf $DSTROOT/err_data.c PATTERNS=( 'include/openssl/*.h' 'ssl/*.h' -'ssl/*.c' +'ssl/*.cc' 'crypto/*.h' 'crypto/*.c' -'crypto/**/*.h' -'crypto/**/*.c' +'crypto/*/*.h' +'crypto/*/*.c' +'crypto/*/*/*.h' +'crypto/*/*/*.c' +'third_party/fiat/*.h' +'third_party/fiat/*.c' ) EXCLUDES=( @@ -54,7 +58,6 @@ EXCLUDES=( for pattern in "${PATTERNS[@]}" do - echo "COPYING $pattern" for i in $SRCROOT/$pattern; do path=${i#$SRCROOT} dest="$DSTROOT$path" @@ -73,6 +76,12 @@ do find $DSTROOT -d -name "$exclude" -exec rm -rf {} \; done +echo "GENERATING err_data.c" +go run $SRCROOT/crypto/err/err_data_generate.go > $DSTROOT/crypto/err/err_data.c + +echo "DELETING crypto/fipsmodule/bcm.c" +rm -f $DSTROOT/crypto/fipsmodule/bcm.c + # # edit the BoringSSL headers to disable dependency on assembly language helpers. # From 470fb2fbe31d042e3f5cfd89a2683b649b11feb3 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Tue, 29 May 2018 14:05:23 +0200 Subject: [PATCH 2/9] Update .travis-install.sh to Swift 4.1.1. --- .travis-install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis-install.sh b/.travis-install.sh index eb201bc0b..36b416933 100755 --- a/.travis-install.sh +++ b/.travis-install.sh @@ -32,7 +32,7 @@ if [ "$TRAVIS_OS_NAME" == "osx" ]; then PROTOC_URL=https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-osx-x86_64.zip else # Install swift - SWIFT_URL=https://swift.org/builds/swift-4.0.3-release/ubuntu1404/swift-4.0.3-RELEASE/swift-4.0.3-RELEASE-ubuntu14.04.tar.gz + SWIFT_URL=https://swift.org/builds/swift-4.1.1-release/ubuntu1404/swift-4.1.1-RELEASE/swift-4.1.1-RELEASE-ubuntu14.04.tar.gz echo $SWIFT_URL curl -fSsL $SWIFT_URL -o swift.tar.gz tar -xzf swift.tar.gz --strip-components=2 --directory=local From 070170d1230bba639391edce28bfa12062ab3ea5 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Tue, 29 May 2018 14:11:02 +0200 Subject: [PATCH 3/9] Also update the Mac Travis system images to Xcode 9.3. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e59fb0573..84de43c24 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,7 +28,7 @@ cache: # Use Ubuntu 14.04 dist: trusty -osx_image: xcode9 +osx_image: xcode9.3 sudo: false From 0368de9e8b501b5d0229cfd9490062e18fa164a9 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Tue, 29 May 2018 14:24:25 +0200 Subject: [PATCH 4/9] Re-add swift-nio-zlib-support for macOS builds. --- Package.swift | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/Package.swift b/Package.swift index fa723a4d2..4b96c192a 100644 --- a/Package.swift +++ b/Package.swift @@ -17,30 +17,16 @@ */ import PackageDescription -var dependencies: [Package.Dependency] = [ - .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), - .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0") -] - -/* - * `swift-nio-zlib-support` uses `pkgConfig` to find `zlib` on - * non-Apple platforms. Details here: - * https://github.com/apple/swift-nio-zlib-support/issues/2#issuecomment-384681975 - * - * This doesn't play well with Macports, so require it only for non-Apple - * platforms, until there is a better solution. - * Issue: https://github.com/grpc/grpc-swift/issues/220 - */ -#if !os(macOS) -dependencies.append(.package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0")) -#endif - let package = Package( name: "SwiftGRPC", products: [ .library(name: "SwiftGRPC", targets: ["SwiftGRPC"]), ], - dependencies: dependencies, + dependencies: [ + .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), + .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0"), + .package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0") + ], targets: [ .target(name: "SwiftGRPC", dependencies: ["CgRPC", "SwiftProtobuf"]), From 4a16c13ebbbe94b5f37bfcfb955a4f931cc8a02d Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Wed, 30 May 2018 09:34:21 +0200 Subject: [PATCH 5/9] Move a compilation flag out of the Makefile and fix-project-settings.rb and into vendor-grpc.sh, which adds them to Sources/CgRPC/third_party/nanopb/pb.h. --- Makefile | 4 ++-- Sources/CgRPC/third_party/nanopb/pb.h | 2 +- fix-project-settings.rb | 6 ------ vendor-grpc.sh | 3 +++ 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index c84f063a5..d0ea82740 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -CFLAGS = -Xcc -ISources/BoringSSL/include -Xcc -DPB_FIELD_16BIT=1 +CFLAGS = -Xcc -ISources/BoringSSL/include all: swift build -v $(CFLAGS) @@ -8,7 +8,7 @@ all: project: swift package generate-xcodeproj - @ruby fix-project-settings.rb || echo "ERROR: Please install Ruby and the 'xcodeproj' gem to automatically fix the Xcode project's settings." + @-ruby fix-project-settings.rb || echo "Consider running 'sudo gem install xcodeproj' to automatically set correct indentation settings for the generated project." test: all swift test -v $(CFLAGS) diff --git a/Sources/CgRPC/third_party/nanopb/pb.h b/Sources/CgRPC/third_party/nanopb/pb.h index 62dca73f4..a8e1bf7aa 100644 --- a/Sources/CgRPC/third_party/nanopb/pb.h +++ b/Sources/CgRPC/third_party/nanopb/pb.h @@ -22,7 +22,7 @@ /* #define PB_MAX_REQUIRED_FIELDS 256 */ /* Add support for tag numbers > 255 and fields larger than 255 bytes. */ -/* #define PB_FIELD_16BIT 1 */ +#define PB_FIELD_16BIT 1 /* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */ /* #define PB_FIELD_32BIT 1 */ diff --git a/fix-project-settings.rb b/fix-project-settings.rb index 38b1058a7..43e496d03 100644 --- a/fix-project-settings.rb +++ b/fix-project-settings.rb @@ -6,10 +6,4 @@ project.main_group.tab_width = '2' project.main_group.indent_width = '2' -cgrpc = project.targets.select { |t| t.name == 'CgRPC' }.first -cgrpc.build_configurations.each do |config| - config.build_settings['CLANG_CXX_LANGUAGE_STANDARD'] = 'c++0x' - config.build_settings['OTHER_CFLAGS'] = '-DPB_FIELD_16BIT=1' -end - project.save diff --git a/vendor-grpc.sh b/vendor-grpc.sh index 718a72e18..c54a38383 100755 --- a/vendor-grpc.sh +++ b/vendor-grpc.sh @@ -72,6 +72,9 @@ done echo "COPYING additional nanopb headers" cp third_party/grpc/third_party/nanopb/*.h Sources/CgRPC/third_party/nanopb/ +echo "ADDING additional compiler flags to nanopb/pb.h" +perl -pi -e 's/\/\* #define PB_FIELD_16BIT 1 \*\//#define PB_FIELD_16BIT 1/' Sources/CgRPC/third_party/nanopb/pb.h + echo "DISABLING ARES" perl -pi -e 's/#define GRPC_ARES 1/#define GRPC_ARES 0/' Sources/CgRPC/include/grpc/impl/codegen/port_platform.h From c12125be6fbcd2e3b162eb6c3ffdc71270a28709 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Tue, 29 May 2018 14:24:25 +0200 Subject: [PATCH 6/9] Re-add swift-nio-zlib-support for macOS builds. --- Package.swift | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/Package.swift b/Package.swift index ad4ced6e8..a7f9abb2b 100644 --- a/Package.swift +++ b/Package.swift @@ -17,39 +17,23 @@ */ import PackageDescription -var dependencies: [Package.Dependency] = [ - .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), - .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0") -] - var cGRPCDependencies: [Target.Dependency] = [] -#if os(Linux) +#if !os(Linux) // On Linux, Foundation links with openssl, so we'll need to use that instead of BoringSSL. // See https://github.com/apple/swift-nio-ssl/issues/16#issuecomment-392705505 for details. -dependencies.append(.package(url: "https://github.com/apple/swift-nio-ssl-support.git", from: "1.0.0")) -#else cGRPCDependencies.append("BoringSSL") #endif -/* - * `swift-nio-zlib-support` uses `pkgConfig` to find `zlib` on - * non-Apple platforms. Details here: - * https://github.com/apple/swift-nio-zlib-support/issues/2#issuecomment-384681975 - * - * This doesn't play well with Macports, so require it only for non-Apple - * platforms, until there is a better solution. - * Issue: https://github.com/grpc/grpc-swift/issues/220 - */ -#if !os(macOS) -dependencies.append(.package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0")) -#endif - let package = Package( name: "SwiftGRPC", products: [ .library(name: "SwiftGRPC", targets: ["SwiftGRPC"]), ], - dependencies: dependencies, + dependencies: [ + .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), + .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0"), + .package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0") + ], targets: [ .target(name: "SwiftGRPC", dependencies: ["CgRPC", "SwiftProtobuf"]), From 03f43093fbe080849b408a6aca85ff75351ab440 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Mon, 4 Jun 2018 09:30:30 +0200 Subject: [PATCH 7/9] Move another compiler directive into vendor-grpc.sh. --- Makefile | 1 - Sources/CgRPC/src/core/tsi/ssl_transport_security.cc | 2 +- vendor-grpc.sh | 3 +++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dba5c68ee..d7c7cc2c6 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,6 @@ UNAME_S = $(shell uname -s) ifeq ($(UNAME_S),Linux) - CFLAGS = -Xcc -DTSI_OPENSSL_ALPN_SUPPORT=0 else CFLAGS = -Xcc -ISources/BoringSSL/include endif diff --git a/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc b/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc index 0ba658767..7662a5538 100644 --- a/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc +++ b/Sources/CgRPC/src/core/tsi/ssl_transport_security.cc @@ -62,7 +62,7 @@ extern "C" { bad practice. TODO(jboeuf): refactor all the #if / #endif in a separate module. */ #ifndef TSI_OPENSSL_ALPN_SUPPORT -#define TSI_OPENSSL_ALPN_SUPPORT 1 +#define TSI_OPENSSL_ALPN_SUPPORT 0 #endif /* TODO(jboeuf): I have not found a way to get this number dynamically from the diff --git a/vendor-grpc.sh b/vendor-grpc.sh index c54a38383..05a5a79c5 100755 --- a/vendor-grpc.sh +++ b/vendor-grpc.sh @@ -75,6 +75,9 @@ cp third_party/grpc/third_party/nanopb/*.h Sources/CgRPC/third_party/nanopb/ echo "ADDING additional compiler flags to nanopb/pb.h" perl -pi -e 's/\/\* #define PB_FIELD_16BIT 1 \*\//#define PB_FIELD_16BIT 1/' Sources/CgRPC/third_party/nanopb/pb.h +echo "ADDING additional compiler flags to tsi/ssl_transport_security.cc" +perl -pi -e 's/#define TSI_OPENSSL_ALPN_SUPPORT 1/#define TSI_OPENSSL_ALPN_SUPPORT 0/' Sources/CgRPC/src/core/tsi/ssl_transport_security.cc + echo "DISABLING ARES" perl -pi -e 's/#define GRPC_ARES 1/#define GRPC_ARES 0/' Sources/CgRPC/include/grpc/impl/codegen/port_platform.h From 8ef093b2b3cc571c0750d9d2a5d3704dc9636000 Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Mon, 4 Jun 2018 09:38:43 +0200 Subject: [PATCH 8/9] Fix linking with OpenSSL on Linux. --- Package.swift | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/Package.swift b/Package.swift index a7f9abb2b..d343b84cd 100644 --- a/Package.swift +++ b/Package.swift @@ -17,10 +17,18 @@ */ import PackageDescription +var packageDependencies: [Package.Dependency] = [ + .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), + .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0"), + .package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0") +] + var cGRPCDependencies: [Target.Dependency] = [] -#if !os(Linux) +#if os(Linux) // On Linux, Foundation links with openssl, so we'll need to use that instead of BoringSSL. // See https://github.com/apple/swift-nio-ssl/issues/16#issuecomment-392705505 for details. +packageDependencies.append(.package(url: "https://github.com/apple/swift-nio-ssl-support.git", from: "1.0.0")) +#else cGRPCDependencies.append("BoringSSL") #endif @@ -29,11 +37,7 @@ let package = Package( products: [ .library(name: "SwiftGRPC", targets: ["SwiftGRPC"]), ], - dependencies: [ - .package(url: "https://github.com/apple/swift-protobuf.git", from: "1.0.2"), - .package(url: "https://github.com/kylef/Commander.git", from: "0.8.0"), - .package(url: "https://github.com/apple/swift-nio-zlib-support.git", from: "1.0.0") - ], + dependencies: packageDependencies, targets: [ .target(name: "SwiftGRPC", dependencies: ["CgRPC", "SwiftProtobuf"]), From 56ecb721c198e854be9b55685d8ebc1476b227ad Mon Sep 17 00:00:00 2001 From: Daniel Alm Date: Mon, 4 Jun 2018 12:54:05 +0200 Subject: [PATCH 9/9] Fix building with a clean Xcode and add a CI run to ensure just that. --- .gitignore | 1 + .travis.yml | 1 + Makefile | 9 ++++++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 2a233870a..7a977da4b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ project.xcworkspace xcuserdata DerivedData/ .build +build /protoc-gen-swift /protoc-gen-swiftgrpc third_party/** diff --git a/.travis.yml b/.travis.yml index 84de43c24..ba8b0321f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,3 +57,4 @@ script: - make test - make test-plugin - make test-echo + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then make xcodebuild; fi diff --git a/Makefile b/Makefile index d7c7cc2c6..528e6ec8c 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ UNAME_S = $(shell uname -s) ifeq ($(UNAME_S),Linux) else - CFLAGS = -Xcc -ISources/BoringSSL/include + CFLAGS = -Xcc -ISources/BoringSSL/include -Xlinker -lz endif all: @@ -11,7 +11,7 @@ all: cp .build/debug/protoc-gen-swiftgrpc . project: - swift package generate-xcodeproj + swift package -v $(CFLAGS) generate-xcodeproj @-ruby fix-project-settings.rb || echo "Consider running 'sudo gem install xcodeproj' to automatically set correct indentation settings for the generated project." test: all @@ -32,9 +32,12 @@ test-plugin: protoc Sources/Examples/Echo/echo.proto --proto_path=Sources/Examples/Echo --plugin=.build/debug/protoc-gen-swift --plugin=.build/debug/protoc-gen-swiftgrpc --swiftgrpc_out=/tmp --swiftgrpc_opt=TestStubs=true diff -u /tmp/echo.grpc.swift Sources/Examples/Echo/Generated/echo.grpc.swift +xcodebuild: project + xcodebuild -configuration "Debug" -parallelizeTargets -target SwiftGRPC -target Echo -target Simple -target protoc-gen-swiftgrpc build + clean: rm -rf Packages - rm -rf .build + rm -rf .build build rm -rf SwiftGRPC.xcodeproj rm -rf Package.pins Package.resolved rm -rf protoc-gen-swift protoc-gen-swiftgrpc